python_code
stringlengths
0
992k
repo_name
stringlengths
8
46
file_path
stringlengths
5
162
import importlib import os # automatically import any Python files in the criterions/ directory for file in sorted(os.listdir(os.path.dirname(__file__))): if file.endswith(".py") and not file.startswith("_"): file_name = file[: file.find(".py")] importlib.import_module("criterions." + file_name)
APAC-SCALE-master
examples/fairseq/criterions/__init__.py
import torch from torchscale.architecture.config import DecoderConfig from torchscale.architecture.decoder import Decoder from torchscale.component.embedding import PositionalEmbedding from transformers import T5Tokenizer, CLIPProcessor, CLIPModel, from PIL import Image from torch.nn import Embedding, Module import bitsandbytes as bnb class MagnetoTokenizer: def __init__(self): self.processor = CLIPProcessor.from_pretrained("laion/CLIP-ViT-L-14-laion-2B-s32B-b82k") self.tokenize = T5Tokenizer.from_pretrained( "t5-large", additional_special_tokens=[""], extra_ids = 0, model_max_length = 1984 ) self.im_idx, self.im_end_idx = self.tokenizer.convert_tokens_to_ids([""]) def tokenize_texts(self, texts): texts = self.tokenize(texts, return_tensors="pt", padding=True, truncation=True).input_ids #add image tokens to text image_tokens = torch.tensor([[self.im_idx, self.im_end_idx]] * texts.shape[0]) return torch.cat([texts[:, 0:1], image_tokens, texts[:, 1:]], dim=1), texts def tokenize_images(self, images): return self.processor(images=images, return_tensors="pt").pixel_values def tokenize(self, sample): text_tokens, only_text_tokens = self.tokenize_texts(sample["target_text"]) attention_mask = text_tokens != self.tokenizer.pad_token_id dummy_image_features = torch.ones((text_tokens.shape[0], 64)) attention_mask = torch.cat([dummy_image_features, attention_mask], dim=1) return { 'text_tokens': text_tokens, 'images': self.tokenize_images(sample["image"]), 'labels': only_text_tokens, 'attention_mask': attention_mask } class Magneto(Module): def __init__(self): self.clip_model
APAC-SCALE-master
examples/magneto/magneto.py
import time import torch from accelerate.utils import set_seed from datasets import load_dataset from torch.nn import CrossEntropyLoss from torch.utils.data import DataLoader from transformers import get_scheduler, default_data_collator, get_linear_schedule_with_warmup from torch.optim import AdamW from lion_pytorch import Lion from kosmos import Kosmos, KosmosTokenizer from accelerate import Accelerator from rich.progres import Progress from datasets import Image from bitsandbytes.optim import AdamW8bit def count_number_of_parameters(model, only_trainable: bool = True) -> int: if only_trainable: num_param: int = sum(p.numel() for p in model.parameters() if p.requires.grad) else: num_params: int = sum(p.numel() for p in model.parameters() if p) def prep_sample(sample): question = sample["question"] answer = sample["answer"].split("|!+")[1] explanation = sample["explanation"] text = f"Question: {question} Answer: {answer} Explanation: {explanation}" image = sample["image"] return { "image": image, "target_text": text } def train(args): accelerator = Accelerator( mixed_precision="fp16" ) #if passed along set the training seed now if args.seed is not None: set_seed(args.seed) model = Kosmos() model = model.to(accelerator.device) optimizer = Lion(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay) lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=args.max_steps, ) tokenizer = KosmosTokenizer() dataset = load_dataset("bjoernp/vqax", split="text") #dataset = dataset.cast_column("url", Image) dataset = dataset.map(prep_sample, num_proc=8) remove_columns = ['id', 'img_id', 'question', 'answer', 'explanation', 'none', 'image', 'target_text'] dataset = dataset.map(tokenizer.tokenize, batched=True, batch_size=128, remove_columns=remove_columns) train_dataloader = DataLoader( dataset, collate_fn=default_data_collator, batch_size=args.batch_size, pin_memory=True ) model, train_dataloader, optimizer, lr_scheduler = accelerator.prepare(model, train_dataloader, optimizer, lr_scheduler) model.train() accelerator.register_for_checkpointing(lr_scheduler) model.clip_model.requires_grad_(False) model.clip_model.encoder.layers[-1].requires_grad_(True) accelerator.print(f"number of parameters: {count_number_of_parameters(model):,}") accelerator.print(f"number of trainable parameters: {count_number_of_parameters(model, only_trainable=True):,}") #log model and optimizer paramneters to wandb accelerator.init_trackers(project_name="kosmos") train_loader = iter(train_dataloader) epoch_loss=0 total_loss=0 start_time = time.time() with Progress() as progress: task = progress.add_task("[red]Training...", total=args.max_steps) for step in range(0, args.max_steps): batch_start = time.time() batch = next(train_loader) outputs = model(**batch, self_attn_padding_mask=batch["attention_mask"]) #shift so that tokens < n predict n outputs = torch.cat([outputs[:, :1], outputs[:, 67:]], dim=1).contigous() #shift_logits = outputs[..., :-1, :].contigous() # shift_labels=batch["labels"][..., 1:].contigous() #flatten the tokens loss_fct = CrossEntropyLoss() one_hot_labels = torch.nn.functional.one_hot(batch["labels"][:, 1:], num_classes=32002).float() loss = loss_fct(outputs[:, :-1], one_hot_labels) epoch_loss += loss.detach().float() accelerator.backward(loss) optimizer.step() optimizer.zero_grad() batch_end = time.time() logs = { "loss": loss.items(), "perplexity": torch.exp(loss).item(), "lr": lr_scheduler.get_last_lr()[0], "examples": args.batch_size * (step + 1), "examples_per_second": args.batch_size / (batch_end - batch_start), } if step % args.log_every == args.log_every - 1: accelerator.log(logs, step=step) progress.update(task, advance=1, description=f"Step Loss: {loss.item():.5f} " f"| Mean Loss: {(total_loss + epoch_loss) / step:.5f} " f"| Mean PPL: {torch.exp((total_loss + epoch_loss) / step):.2f} " f"| Examples: {args.batch_size * (step + 1)} " f"| Examples/s: {args.batch_size / (batch_end - batch_start):.2f} " f"| Elapsed: {time.strftime('%H:%M:%S', time.gmtime(time.time() - start_time))}") if step % args.save_every == args.save_every - 1: train_epoch_loss = epoch_loss / args.save_every total_loss += epoch_loss epoch_loss = 0 accelerator.log({ "train_ppl": torch.exp(train_epoch_loss), "train_epoch_loss": train_epoch_loss, }, step=step) progress.print(f"Saving checkpoint at step {step}...") accelerator.save_state( f"{args.checkpoint_dir}/checkpoint_at_step_{step}/") if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument("--checkpoint_dir", type=str, default="checkpoints") parser.add_argument("--learning_rate", type=float, default=1e-5) parser.add_argument("--weight_decay", type=float, default=0.01) parser.add_argument("--warmup_steps", type=int, default=0) parser.add_argument("--max_steps", type=int, default=100000) parser.add_argument("--batch_size", type=int, default=4) parser.add_argument("--log_every", type=int, default=1) parser.add_argument("--save_every", type=int, default=100) parser.add_argument("--seed", type=int, default=None) args = parser.parse_args() train(args)
APAC-SCALE-master
examples/kosmos/train_kosmos.py
import torch from torchscale.architecture.config import DecoderConfig from torchscale.architecture.decoder import Decoder from torchscale.component.embedding import PositionalEmbedding from transformers import T5Tokenizer, CLIPProcessor, CLIPModel from flamingo_pytorch import PerceiverResampler from PIL import Image from torch.nn import Embedding, Module import bitsandbytes as bnb class KomosTokenizer: def __init__(self): self.processor = CLIPProcessor.from_pretrained('laion/CLIP-ViT-L-14-laion2B-s32B-b82k') #t5 uses sentience piece tokenizer self.tokenize = T5Tokenizer.from_pretrained( "t5-large", additional_special_tokens=[""], extra_ids=0, model_max_length=1984 ) self.im_idx, self.im_end_idx = self.tokenizer.convert_tokens_to_ids([""]) def tokenize_texts(self, texts): texts = self.tokenizer(texts, return_tensors="pt", padding=True, truncation=True).input_ids #add image tokens to text as "<s>  text </s>" image_tokens = torch.tensor([[self.im_idx, self.im_end_idx]] * texts.shape[0]) return torch.cat([texts[:, 0:1], image_tokens, texts[:, 1:]], dim=1), texts def tokenize_images(self, images): return self.processor(images=images, return_tensors="pt").pixel_values def tokenize(self, sample): text_tokens, only_text_tokens = self.tokenize_texts(sample["target_text"]) attention_mask = text_tokens != self.tokenizer.pad_token_id dummy_image_features = torch.ones((text_tokens.shape[0], 64)) attention_mask = torch.cat([dummy_image_features, attention_mask], dim=1) return { "text_tokens": text_tokens, "images": self.tokenize_images(sample["image"]), "labels": only_text_tokens, "attention_mask": attention_mask, } class Kosmos(Module): def __init__(self): #instantiate clip vit self.clip_model = CLIPModel.from_pretrained("laion/CLIP-ViT-L-14-laion2B-s32B-b82K").vision_model self.embed = bnb.nn.Embedding( 32002, 2048, padding_idx=1 ) self.embed_positions = PositionalEmbedding( 2048, 2048, 1 ) self.output_projection = torch.nn.Linear( 2048, 32002, bias=False ) torch.nn.init.normal_( self.output_projection.weight, mean=9, std=2048**-0.5 ) #config self.config = DecoderConfig( decoder_layers = 24, decoder_embed_dim=2048, decoder_ffn_embed_dim=8192, decoder_attention_heads=32, dropout=0.1, activation_fn="flashattention", use_xmoe=True, attention_dropout=0.1, vocab_size=32002, subln=True, xpos_rel_pos=True, max_rel_pos=2048 ) self.decoder = Decoder( self.config, embed_tokens =self.embed, embed_positions = self.embed_positions, output_projections = self.output_projection ) self.perceive = PerceiverResampler( dim=1024, depth=2, dim_head=64, heads=8, num_latents=64, num_media_embeds=257 ) self.image_proj = torch.nn.Linear(1024, 2048, bias=False) torch.nn.init.normal_( self.image_proj.weight, mean=0, std=2048**-0.5 ) def forward(self, text_tokens, images, **kwargs): images = self.clip_model(pixel_values=images)["last_hidden_state"] images = self.percieve(images).squeeze(1) images = self.image_proj(images) model_input = self.decoder.forward_embedding(text_tokens)[1] model_input = torch.cat([model_input[:, 0:2], images, model_input[:, 2:]], dim=1) model_input = self.decoder.forward_embedding(model_input, token_embedding=model_input)[0] return self.decoder(model_input, passed_x=model_input)[0]
APAC-SCALE-master
examples/kosmos/kosmos.py
from exa import Inference model = Inference( model_id="georgesung/llama2_7b_chat_uncensored", quantize=True ) model.run("What is your name")
Exa-main
example.py
from exa.inference.gptq import GPTQInference from exa.inference.hf import Inference from exa.quant.main import Quantize # from exa.inference.diffusion import Diffuse
Exa-main
exa/__init__.py
Exa-main
exa/quant/__init__.py
import logging import time from transformers import AutoModelForCausalLM, BitsAndBytesConfig class Quantize: """ Quantize provides a convenient way to load, quantize, and manage HuggingFace models, specifically designed for optimization. The primary goal of this class is to help users quantize HuggingFace models using the BitsAndBytes configuration to achieve faster inference times and to either push the optimized models to HuggingFace's model hub or load them from it. Parameters: - model_id (str): Model identifier to be loaded from the HuggingFace hub. - bits (int, optional): Bit precision for quantization. Default is 4. - threshold (float, optional): Threshold for quantization. Default is 6.0. - skip_modules (list, optional): List of modules to skip during quantization. Default is None. - enable_fp32_cpu_offload (bool, optional): If True, offload the FP32 computations to CPU. Default is False. - has_fp16_weight (bool, optional): If True, indicates that the model has FP16 weights. Default is False. - compute_dtype (str, optional): Data type for computation after quantization. Default is None. - quant_type (str, optional): Type of quantization to apply. Default is "fp4". - use_double_quant (bool, optional): If True, applies double quantization. Default is False. - verbose (bool, optional): If True, provides more detailed logs. Default is False. """ def __init__( self, model_id, bits: int = 4, threshold: float = 6.0, skip_modules = None, enable_fp32_cpu_offload=False, has_fp16_weight=False, compute_dtype=None, quant_type: str = "fp4", use_double_quant=False, verbose=False, ): super().__init__() self.model_id = model_id self.bits = bits self.threshold = threshold self.skip_modules = skip_modules self.enable_fp32_cpu_offload = enable_fp32_cpu_offload self.has_fp16_weight = has_fp16_weight self.compute_dtype = compute_dtype self.quant_type = quant_type self.use_double_quant = use_double_quant self.verbose = verbose self.tokenizer = None self.model = None self.logger = self._init_logger() def _init_logger(self): logger = logging.getLogger(__name__) logger.setLevel(logging.INFO if self.verbose else logging.ERROR) formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') console_handler = logging.StreamHandler() console_handler.setFormatter(formatter) logger.addHandler(console_handler) return logger def log_metadata(self, metadata): if self.verbose: for key, value in metadata.items(): self.logger.info(f"{key}: {value}") def load_model(self): """ Loads and quantizes the HuggingFace model using the provided parameters and configuration. """ try: #load the tokenizer and model #define device map if cpu offload is enabled device_map = None if self.enable_fp32_cpu_offload: device_map = { "transformer.word_embeddings": 0, "transformer.word_embeddings_layernorm": 0, "lm_head": "cpu", "transformer.h": 0, "transformer.ln_f": 0, } #quantization config quantization_config = BitsAndBytesConfig( load_in_8bit = self.bits == 8, load_in_4bit = self.bits == 4, llm_int8_threshold=self.threshold, llm_int8_skip_modules=self.skip_modules, llm_int8_enable_fp32_cpu_offload=self.enable_fp32_cpu_offload, llm_int8_has_fp16_weight = self.has_fp16_weight, bnb_4bit_compute_dtype=self.compute_dtype, bnb_4bit_quant_type=self.quant_type, bnb_4bit_use_double_quant=self.use_double_quant, ) start_time = time.time() #load the quantized model self.model = AutoModelForCausalLM.from_pretrained( self.model_id, device_map=device_map, quantization_config=quantization_config, ) #end timer end_time = time.time() #log metadata and metrics metadata = { "Bits": self.bits, "Threshold": self.threshold, "FP32 CPU Offload": self.enable_fp32_cpu_offload, "Quantization Type": self.quant_type, "Time to Quantize (s)": round(end_time - start_time, 2) } self.log_metadata(metadata) except RuntimeError as error: self.logger.error(f"An error occured while loading the model: {error} please understand the root cause then look at documentation for common errors") def push_to_hub(self, hub): """ Pushes the quantized model and the associated tokenizer to the HuggingFace's model hub. Parameters: - hub (str): Hub name or identifier to which the model and tokenizer should be pushed. """ try: #push the quantized model to the hub if self.model is not None and self.tokenizer is not None: self.model.push_to_hub(hub) self.tokenizer.push_to_hub(hub) else: raise ValueError("Model and tokenizer must be loaded before pushing to the hub") except RuntimeError as error: self.logger.error(f"An error occured while pushing to the hub: {error}") def load_from_hub(self, hub): """ Loads a quantized model from the HuggingFace's model hub. Parameters: - hub (str): Hub name or identifier from which the model should be loaded. """ try: #load a quantized model from the hub if self.tokenizer is not None: self.model = AutoModelForCausalLM.from_pretrained(hub, device_map="auto") else: raise ValueError("Tokenizer must be loaded from loading the hub model") except RuntimeError as error: self.logger.error(f'An error occured while loading from the hub: {error}') #usage # quantize = Quantize( # model_id="bigscience/bloom-1b7", # bits=8, # enable_fp32_cpu_offload=True, # ) # quantize.load_model() # quantize.push_to_hub("my model") # quantize.load_from_hub('my model')
Exa-main
exa/quant/main.py
Exa-main
exa/utils/metric_logger.py
import logging from termcolor import colored class CustomFormatter(logging.Formatter): """ Custom logging formatter for color-coded logging. Provides a custom format for logs based on the level of logging. Each logging level has its own associated color to easily distinguish between different log messages. Attributes: format_mappings (dict): Mapping of logging levels to their associated color and format. ########### import logging from exa import CustomFormatter logging.basicConfig(level=logging.INFO) handler = logging.StreamHandler() handler.setFormatter(CustomFormatter()) logger = logging.getLogger("CustomFormatterExample") logger.addHandler(handler) logger.info("This is an info message.") logger.warning("This is a warning message.") logger.error("This is an error message.") logger.debug("This is a debug message.") """ format_mappings = { logging.DEBUG: {"color": "grey", "format": "%(asctime)s - %(levelname)s - %(message)s"}, logging.INFO: {"color": "green", "format": "%(asctime)s - %(levelname)s - %(message)s"}, logging.WARNING: {"color": "yellow", "format": "%(asctime)s - %(levelname)s - %(message)s"}, logging.ERROR: {"color": "red", "format": "%(asctime)s - %(levelname)s - %(message)s"}, } def format(self, record): """ Format the log message. Args: record (LogRecord): The record to be formatted. Returns: str: Formatted log message with the appropriate color. """ format_dict = self.format_mappings.get(record.levelno) log_message = super().format(record) return colored(log_message, format_dict["color"]) class ColoredLogger: """ Utility class for colored logging. Provides static methods to log messages with colors associated with their log levels. The actual coloring is handled by the CustomFormatter. from exa import ColoredLogger ColoredLogger.info("This is an info message from ColoredLogger.") ColoredLogger.warning("This is a warning message from ColoredLogger.") ColoredLogger.error("This is an error message from ColoredLogger.") ColoredLogger.debug("This is a debug message from ColoredLogger.") """ @staticmethod def info(msg): """ Log an info level message. Args: msg (str): The message to be logged. """ logger.info(msg) @staticmethod def warning(msg): """ Log a warning level message. Args: msg (str): The message to be logged. """ logger.warning(msg) @staticmethod def error(msg): """ Log an error level message. Args: msg (str): The message to be logged. """ logger.error(msg) @staticmethod def debug(msg): """ Log a debug level message. Args: msg (str): The message to be logged. """ logger.debug(msg) def log_method_calls(cls): """ Decorator to log method entry and exit for classes. When a method of a decorated class is called, this decorator logs the method's name upon entering and exiting the method. Useful for debugging and tracking the flow of the program. ``` from exa import log_metadata @log_metadata class MyClass: def say_hello(self): print("Hello from MyClass!") def say_goodbye(self): print("Goodbye from MyClass!") # Using MyClass with log_metadata decorator sample_instance = MyClass() sample_instance.say_hello() sample_instance.say_goodbye() ``` Args: cls (type): The class to be wrapped. Returns: type: Wrapped class with added logging functionality. """ class Wrapped(cls): """Wrapper class to intercept method calls and log them.""" def __init__(self, *args, **kwargs): self._logger = logging.getLogger(cls.__name__) super().__init__(*args, **kwargs) def __getattribute__(self, s): """ Intercept method calls to log them. Args: s (str): The name of the attribute or method to be accessed. Returns: Any: The wrapped method with added logging or the original attribute. """ attr = super().__getattribute__(s) if callable(attr): def wrapped(*args, **kwargs): self._logger.debug(colored(f'Entering {s} method', 'blue')) result = attr(*args, **kwargs) self._logger.debug(colored(f'Exiting {s} method', 'blue')) return result return wrapped else: return attr return Wrapped # Set up the logger logging.basicConfig(level=logging.INFO) handler = logging.StreamHandler() handler.setFormatter(CustomFormatter()) logger = logging.getLogger(__name__) logger.addHandler(handler)
Exa-main
exa/utils/custom_formatter.py
Exa-main
exa/utils/__init__.py
Exa-main
exa/utils/decoding_wrapper.py
import torch.distributed as dist import torch.multiprocessing as mp from accelerate import PartialState from diffusers import DiffusionPipeline class Diffuse: def __init__( self, model, dtype, use_safetensors=False, method="accelerate" ): self.pipeline = DiffusionPipeline( model, torch_dtype=dtype, use_safetensors=use_safetensors, ) if self.method == "accelerate": self.distributed_state = PartialState() self.pipeline.to(self.distributed_state.device) elif self.method == "torch": pass def infer_with_accelerate(self, prompt): with self.distributed_state.split_between_processes(prompt) as prompt: result = self.pipeline(prompt).images[0] result.save(f"result_{self.distributed_state.process_index}.png") def infer_torch(self, rank, world_size, prompts): dist.init_process_group("nccl", rank=rank, world_size=world_size) self.pipeline.to(rank) prompt = prompts[rank] image = self.pipeline(prompt).images[0] try: image.save(f"./{'_'.join(prompt.split())}.png") except RuntimeError as error: print(f"Error trying to save the image {error}") def run(self, prompts, world_size=2): try: if self.method == "accelerate": self.infer_with_accelerate(prompts) except RuntimeError as error: print(f"Could not run inference om accelerate setup: {error}") #error handling try: if self.method == "torch": mp.spawn( self.infer_torch, args=(world_size, prompts), nproces=world_size, join=True ) except RuntimeError as error: print(f"Could not run inference on torch setup: {error}") prompts = ["a dog", " cat"] infer = Diffuse(method="accelerate") infer.run(prompts)
Exa-main
exa/inference/diffusion.py
Exa-main
exa/inference/__init__.py
import logging import torch from torch.multiprocessing import set_start_method from torch.nn.parallel import DistributedDataParallel as DDP from transformers import AutoModelForCausalLM, AutoTokenizer, GPTQConfig #set up logging logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) class GPTQInference: def __init__( self, model_id, quantization_config_bits, quantization_config_dataset, max_length, verbose = False, distributed = False, ): self.model_id = model_id self.quantization_config_bits = quantization_config_bits self.quantization_config_dataset = quantization_config_dataset self.max_length = max_length self.verbose = verbose self.distributed = distributed if self.distributed: assert torch.cuda.device_count() > 1, "You need more than 1 gpu for distributed processing" set_start_method("spawn", force=True) self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") else: self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") self.tokenizer = AutoTokenizer.from_pretrained(self.model_id) self.quantization_config = GPTQConfig( bits=self.quantization_config_bits, dataset=quantization_config_dataset, tokenizer=self.tokenizer ) self.model = AutoModelForCausalLM.from_pretrained( self.model_id, device_map="auto", quantization_config=self.quantization_config ).to(self.device) if self.distributed: self.model = DDP( self.model, device_ids=[0], output_device=0, ) logger.info(f"Model loaded from {self.model_id} on {self.device}") def run( self, prompt: str, max_length: int = 500, ): max_length = self.max_length or max_length try: inputs = self.tokenizer.encode( prompt, return_tensors="pt" ).to(self.device) with torch.no_grad(): outputs = self.model.generate( inputs, max_length=max_length, do_sample=True ) return self.tokenizer.decode( outputs[0], skip_special_tokens=True ) except Exception as error: print(f"Error: {error} in inference mode, please change the inference logic or try again") raise def __del__(self): #free up resources torch.cuda.empty_cache()
Exa-main
exa/inference/gptq.py
from abc import ABC, abstractmethod class InferenceHandler(ABC): @abstractmethod def run( self, prompt_text=None, model=None, tokenizer=None, device=None, max_length = None ): pass class DefaultInferenceHandler(InferenceHandler): def run( self, prompt_text, model, tokenizer, device, max_length ): inputs = tokenizer.encode(prompt_text, return_tensors="pt").to(self.device) outputs = model.run(inputs, max_length=max_length, do_sample=True) return tokenizer.decode(outputs[0], skip_special_tokens=True)
Exa-main
exa/inference/base.py
import logging import torch from torch.nn.parallel import DistributedDataParallel as DDP from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig class Inference: """ A class for running inference on a given model. Attributes: model_id (str): The ID of the model. device (str): The device to run the model on (either 'cuda' or 'cpu'). max_length (int): The maximum length of the output sequence. """ def __init__( self, model_id: str, device: str = None, max_length: int = 20, quantize: bool = False, quantization_config: dict = None, verbose = False, # logger=None, distributed=False, decoding=False ): """ Initialize the Inference object. Args: model_id (str): The ID of the model. device (str, optional): The device to run the model on. Defaults to 'cuda' if available. max_length (int, optional): The maximum length of the output sequence. Defaults to 20. quantize (bool, optional): Whether to use quantization. Defaults to False. quantization_config (dict, optional): The configuration for quantization. verbose (bool, optional): Whether to print verbose logs. Defaults to False. logger (logging.Logger, optional): The logger to use. Defaults to a basic logger. """ self.logger = logging.getLogger(__name__) self.device = device if device else ('cuda' if torch.cuda.is_available() else 'cpu') self.model_id = model_id self.max_length = max_length self.verbose = verbose self.distributed = distributed self.decoding = decoding self.model, self.tokenizer = None, None if self.distributed: assert torch.cuda.device_count() > 1, "You need more than 1 gpu for distributed processing" bnb_config = None if quantize: if not quantization_config: quantization_config = { 'load_in_4bit': True, 'bnb_4bit_use_double_quant': True, 'bnb_4bit_quant_type': "nf4", 'bnb_4bit_compute_dtype': torch.bfloat16 } bnb_config = BitsAndBytesConfig(**quantization_config) try: self.tokenizer = AutoTokenizer.from_pretrained(self.model_id) self.model = AutoModelForCausalLM.from_pretrained( self.model_id, quantization_config=bnb_config ) self.model#.to(self.device) except Exception as e: self.logger.error(f"Failed to load the model or the tokenizer: {e}") raise def load_model(self): if not self.model or not self.tokenizer: try: self.tokenizer = AutoTokenizer.from_pretrained(self.model_id) bnb_config = BitsAndBytesConfig( **self.quantization_config ) if self.quantization_config else None self.model = AutoModelForCausalLM.from_pretrained( self.model_id, quantization_config=bnb_config ).to(self.device) if self.distributed: self.model = DDP(self.model) except Exception as error: self.logger.error(f"Failed to load the model or the tokenizer: {error}") raise def run( self, prompt_text: str, max_length: int = None ): """ Generate a response based on the prompt text. Args: - prompt_text (str): Text to prompt the model. - max_length (int): Maximum length of the response. Returns: - Generated text (str). """ self.load_model() max_length = max_length if max_length else self.max_length try: inputs = self.tokenizer.encode( prompt_text, return_tensors="pt" ).to(self.device) if self.decoding: with torch.no_grad(): for _ in range(max_length): output_sequence = [] outputs = self.model.generate( inputs, max_length=len(inputs) + 1, do_sample=True ) output_tokens = outputs[0][-1] output_sequence.append(output_tokens.item()) #print token in real-time print(self.tokenizer.decode( [output_tokens], skip_special_tokens=True), end="", flush=True ) inputs = outputs else: with torch.no_grad(): outputs = self.model.generate( inputs, max_length=max_length, do_sample=True ) del inputs return self.tokenizer.decode(outputs[0], skip_special_tokens=True) except Exception as e: self.logger.error(f"Failed to generate the text: {e}") raise
Exa-main
exa/inference/hf.py
Exa-main
exa/benchmarking/__init__.py
import time import psutil import os from termcolor import colored import platform class BenchmarkSuite: def __init__(self, llm_instance, backend, dtype): self.llm = llm_instance self.initial_memory = psutil.Process(os.getpid()).memory_info().rss self.history = [] self.backend = backend self.dtype = dtype def measure_memory(self): current_memory = psutil.Process(os.getpid()).memory_info().rss return (current_memory - self.initial_memory) / (1024 ** 2) # Convert to MB def measure_throughput(self, input_text): durations = [] for _ in range(5): # Adaptive sampling: running 5 times start_time = time.time() self.llm.run(input_text) durations.append(time.time() - start_time) avg_duration = sum(durations) / len(durations) return len(input_text.split()) / avg_duration def measure_energy(self): return None # Placeholder for actual energy measurement def system_metadata(self): return { 'OS': platform.system(), 'Processor': platform.processor(), 'Machine': platform.machine() } def benchmark(self, input_text, best_score, best_scored_llm, metrics_to_run=None): if not metrics_to_run: metrics_to_run = ['Memory', 'Throughput', 'Energy'] results = { 'Backend': self.backend, 'Dtype': self.dtype, 'Optimizations': self.llm.optimizations, 'Quantization': self.llm.quantization, 'Class': self.llm.class_name, 'Type': self.llm.type_name, 'System': self.system_metadata() } if 'Memory' in metrics_to_run: results['Memory (MB)'] = self.measure_memory() if 'Throughput' in metrics_to_run: results['Throughput (tokens/s)'] = self.measure_throughput(input_text) if 'Energy' in metrics_to_run: results['Energy (tokens/kWh)'] = self.measure_energy() results['Best Score (%)'] = best_score results['Best Scored LLM'] = best_scored_llm self.history.append(results) self.log_results(results) return results def log_results(self, results): print(colored("\n===== BENCHMARK RESULTS =====", 'blue')) for key, value in results.items(): if key in ['Throughput (tokens/s)', 'Best Score (%)']: print(colored(f"{key}: {value}", 'green')) elif key in ['Memory (MB)', 'Energy (tokens/kWh)']: print(colored(f"{key}: {value}", 'red')) else: print(f"{key}: {value}") print(colored("=============================\n", 'blue')) # llm_instance = LLM(backend="CPU", dtype="FP32", optimizations="Layer Norm", quantization="8-bit", class_name="Transformer", type_name="BERT") # benchmarker = BenchmarkSuite(llm_instance) # results = benchmarker.benchmark(input_text="This is a sample input text.", best_score=98.5, best_scored_llm="GPT-3", metrics_to_run=['Memory', 'Throughput'])
Exa-main
exa/benchmarking/main.py
import unittest import torch from unittest.mock import patch, Mock from transformers import PreTrainedTokenizerFast, PreTrainedModel from exa.inference.gptq import GPTQInference class TestGPTQInference(unittest.TestCase): def setUp(self): # Mocking some of the external dependencies to avoid actual calls self.mocked_tokenizer = Mock(spec=PreTrainedTokenizerFast) self.mocked_tokenizer.encode.return_value = torch.tensor([0, 1, 2]) self.mocked_tokenizer.decode.return_value = "decoded text" self.mocked_model = Mock(spec=PreTrainedModel) self.mocked_model.generate.return_value = torch.tensor([[0, 1, 2]]) patcher1 = patch('YOUR_MODULE_PATH.AutoTokenizer.from_pretrained', return_value=self.mocked_tokenizer) patcher2 = patch('YOUR_MODULE_PATH.AutoModelForCausalLM.from_pretrained', return_value=self.mocked_model) self.addCleanup(patcher1.stop) self.addCleanup(patcher2.stop) self.mocked_from_pretrained_tokenizer = patcher1.start() self.mocked_from_pretrained_model = patcher2.start() def test_initialization(self): inferer = GPTQInference( model_id='gpt-2', quantization_config_bits=8, quantization_config_dataset='wiki', max_length=100 ) self.assertTrue(inferer.verbose is False) self.assertTrue(inferer.distributed is False) self.mocked_from_pretrained_tokenizer.assert_called_once() self.mocked_from_pretrained_model.assert_called_once() def test_initialization_with_distributed(self): with self.assertRaises(AssertionError): GPTQInference( model_id='gpt-2', quantization_config_bits=8, quantization_config_dataset='wiki', max_length=100, distributed=True ) @patch('torch.cuda.device_count', return_value=2) def test_initialization_distributed_success(self, mocked_device_count): inferer = GPTQInference( model_id='gpt-2', quantization_config_bits=8, quantization_config_dataset='wiki', max_length=100, distributed=True ) self.assertTrue(inferer.distributed is True) def test_run(self): inferer = GPTQInference( model_id='gpt-2', quantization_config_bits=8, quantization_config_dataset='wiki', max_length=100 ) result = inferer.run("This is a test.") self.assertEqual(result, "decoded text") self.mocked_tokenizer.encode.assert_called_once_with("This is a test.", return_tensors="pt") self.mocked_model.generate.assert_called_once() def test_run_error(self): self.mocked_tokenizer.encode.side_effect = Exception("Tokenization failed") inferer = GPTQInference( model_id='gpt-2', quantization_config_bits=8, quantization_config_dataset='wiki', max_length=100 ) with self.assertRaises(Exception) as context: inferer.run("This is a test.") self.assertEqual(str(context.exception), "Tokenization failed") def test_del(self): inferer = GPTQInference( model_id='gpt-2', quantization_config_bits=8, quantization_config_dataset='wiki', max_length=100 ) with patch('torch.cuda.empty_cache') as mocked_empty_cache: del inferer mocked_empty_cache.assert_called_once() if __name__ == '__main__': unittest.main()
Exa-main
tests/gptqinference.py
import unittest from unittest.mock import MagicMock, patch from transformers import AutoModelForCausalLM from exa.quant.main import Quantize class TestQuantize(unittest.TestCase): def setUp(self): self.quantize = Quantize( model_id="bigscience/bloom-1b7", bits=8, enable_fp32_cpu_offload=True, ) @patch.object(AutoModelForCausalLM, 'from_pretrained') def test_load_model(self, mock_from_pretrained): mock_model = MagicMock() mock_from_pretrained.return_value = mock_model self.quantize.load_model() mock_from_pretrained.assert_called_once() self.assertEqual(self.quantize.model, mock_model) @patch.object(AutoModelForCausalLM, 'from_pretrained') def test_load_model_error(self, mock_from_pretrained): mock_from_pretrained.side_effect = RuntimeError('Test error') with self.assertRaises(RuntimeError): self.quantize.load_model() @patch.object(AutoModelForCausalLM, 'push_to_hub') def test_push_to_hub(self, mock_push_to_hub): mock_model = MagicMock() self.quantize.model = mock_model self.quantize.push_to_hub('test_hub') mock_push_to_hub.assert_called_once_with('test_hub') @patch.object(AutoModelForCausalLM, 'push_to_hub') def test_push_to_hub_error(self, mock_push_to_hub): mock_push_to_hub.side_effect = RuntimeError('Test error') with self.assertRaises(RuntimeError): self.quantize.push_to_hub('test_hub') @patch.object(AutoModelForCausalLM, 'from_pretrained') def test_load_from_hub(self, mock_from_pretrained): mock_model = MagicMock() mock_from_pretrained.return_value = mock_model self.quantize.load_from_hub('test_hub') mock_from_pretrained.assert_called_once_with('test_hub', device_map='auto') self.assertEqual(self.quantize.model, mock_model) @patch.object(AutoModelForCausalLM, 'from_pretrained') def test_load_from_hub_error(self, mock_from_pretrained): mock_from_pretrained.side_effect = RuntimeError('Test error') with self.assertRaises(RuntimeError): self.quantize.load_from_hub('test_hub') def test_init_logger(self): logger = self.quantize._init_logger() self.assertEqual(logger.level, 40) # 40 is the level for ERROR def test_log_metadata(self): with patch('logging.Logger.info') as mock_info: self.quantize.verbose = True self.quantize.log_metadata({'test': 'value'}) mock_info.assert_called_once_with('test: value') if __name__ == '__main__': unittest.main()
Exa-main
tests/quantize.py
import unittest from unittest.mock import Mock, patch import torch from transformers import PreTrainedModel, PreTrainedTokenizerFast from exa.inference.hf import Inference class TestInference(unittest.TestCase): def setUp(self): self.mocked_tokenizer = Mock(spec=PreTrainedTokenizerFast) self.mocked_tokenizer.encode.return_value = torch.tensor([0, 1, 2]) self.mocked_tokenizer.decode.return_value = "generated text" self.mocked_model = Mock(spec=PreTrainedModel) self.mocked_model.generate.return_value = torch.tensor([[0, 1, 2]]) self.tokenizer_patcher = patch('YOUR_MODULE_PATH.AutoTokenizer.from_pretrained', return_value=self.mocked_tokenizer) self.model_patcher = patch('YOUR_MODULE_PATH.AutoModelForCausalLM.from_pretrained', return_value=self.mocked_model) self.mocked_from_pretrained_tokenizer = self.tokenizer_patcher.start() self.mocked_from_pretrained_model = self.model_patcher.start() def tearDown(self): self.tokenizer_patcher.stop() self.model_patcher.stop() def test_default_initialization(self): inference = Inference('gpt-2') self.assertEqual(inference.model_id, 'gpt-2') self.assertEqual(inference.max_length, 20) self.assertEqual(inference.verbose, False) self.assertEqual(inference.distributed, False) self.assertEqual(inference.decoding, False) def test_model_loading(self): inference = Inference('gpt-2') inference.load_model() self.mocked_from_pretrained_tokenizer.assert_called_once() self.mocked_from_pretrained_model.assert_called_once() def test_gpu_device_assignment(self): with patch('torch.cuda.is_available', return_value=True): inference = Inference('gpt-2') self.assertEqual(inference.device, 'cuda') def test_default_quantization(self): inference = Inference('gpt-2', quantize=True) self.assertIsNotNone(inference.quantization_config) self.assertTrue(inference.quantization_config['load_in_4bit']) self.assertEqual(inference.quantization_config['bnb_4bit_compute_dtype'], torch.bfloat16) def test_custom_quantization(self): custom_config = { 'load_in_4bit': False, 'bnb_4bit_use_double_quant': False } inference = Inference('gpt-2', quantize=True, quantization_config=custom_config) self.assertFalse(inference.quantization_config['load_in_4bit']) self.assertFalse(inference.quantization_config['bnb_4bit_use_double_quant']) def test_text_generation_without_realtime_decoding(self): inference = Inference('gpt-2', decoding=False) result = inference.run("This is a test.") self.assertEqual(result, "generated text") def test_text_generation_with_realtime_decoding(self): inference = Inference('gpt-2', decoding=True) with patch('builtins.print') as mocked_print: result = inference.run("This is a test.") mocked_print.assert_called() self.assertEqual(result, "generated text") def test_distributed_processing_assertion(self): with patch('torch.cuda.device_count', return_value=1): with self.assertRaises(AssertionError): Inference('gpt-2', distributed=True) def test_error_during_model_loading(self): self.mocked_from_pretrained_tokenizer.side_effect = Exception("Failed to load tokenizer") with self.assertRaises(Exception): Inference('gpt-2') def test_text_generation_failure_handling(self): self.mocked_model.generate.side_effect = Exception("Failed to generate") inference = Inference('gpt-2') with self.assertRaises(Exception): inference.run("This is a test.") if __name__ == '__main__': unittest.main()
Exa-main
tests/inference.py
Exa-main
benchmarks/gptq_inference.py
Exa-main
benchmarks/inference.py
Exa-main
benchmarks/quant.py
from __future__ import annotations import ast import csv import inspect import os import subprocess import tempfile import threading import warnings from pathlib import Path from typing import TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Tuple import matplotlib import matplotlib.pyplot as plt import numpy as np import PIL import PIL.Image import gradio from gradio import components, processing_utils, routes, utils from gradio.context import Context from gradio.documentation import document, set_documentation_group from gradio.flagging import CSVLogger if TYPE_CHECKING: # Only import for type checking (to avoid circular imports). from gradio.components import IOComponent CACHED_FOLDER = "gradio_cached_examples" LOG_FILE = "log.csv" def create_myexamples( examples: List[Any] | List[List[Any]] | str, inputs: IOComponent | List[IOComponent], outputs: IOComponent | List[IOComponent] | None = None, fn: Callable | None = None, cache_examples: bool = False, examples_per_page: int = 10, _api_mode: bool = False, label: str | None = None, elem_id: str | None = None, run_on_click: bool = False, preprocess: bool = True, postprocess: bool = True, batch: bool = False,): """Top-level synchronous function that creates Examples. Provided for backwards compatibility, i.e. so that gr.Examples(...) can be used to create the Examples component.""" examples_obj = MyExamples( examples=examples, inputs=inputs, outputs=outputs, fn=fn, cache_examples=cache_examples, examples_per_page=examples_per_page, _api_mode=_api_mode, label=label, elem_id=elem_id, run_on_click=run_on_click, preprocess=preprocess, postprocess=postprocess, batch=batch, _initiated_directly=False, ) utils.synchronize_async(examples_obj.create) return examples_obj class MyExamples(gradio.helpers.Examples): def __init__( self, examples: List[Any] | List[List[Any]] | str, inputs: IOComponent | List[IOComponent], outputs: IOComponent | List[IOComponent] | None = None, fn: Callable | None = None, cache_examples: bool = False, examples_per_page: int = 10, _api_mode: bool = False, label: str | None = "Examples", elem_id: str | None = None, run_on_click: bool = False, preprocess: bool = True, postprocess: bool = True, batch: bool = False, _initiated_directly: bool = True,): if _initiated_directly: warnings.warn( "Please use gr.Examples(...) instead of gr.examples.Examples(...) to create the Examples.", ) if cache_examples and (fn is None or outputs is None): raise ValueError("If caching examples, `fn` and `outputs` must be provided") if not isinstance(inputs, list): inputs = [inputs] if outputs and not isinstance(outputs, list): outputs = [outputs] working_directory = Path().absolute() if examples is None: raise ValueError("The parameter `examples` cannot be None") elif isinstance(examples, list) and ( len(examples) == 0 or isinstance(examples[0], list) ): pass elif ( isinstance(examples, list) and len(inputs) == 1 ): # If there is only one input component, examples can be provided as a regular list instead of a list of lists examples = [[e] for e in examples] elif isinstance(examples, str): if not Path(examples).exists(): raise FileNotFoundError( "Could not find examples directory: " + examples ) working_directory = examples if not (Path(examples) / LOG_FILE).exists(): if len(inputs) == 1: examples = [[e] for e in os.listdir(examples)] else: raise FileNotFoundError( "Could not find log file (required for multiple inputs): " + LOG_FILE ) else: with open(Path(examples) / LOG_FILE) as logs: examples = list(csv.reader(logs)) examples = [ examples[i][: len(inputs)] for i in range(1, len(examples)) ] # remove header and unnecessary columns else: raise ValueError( "The parameter `examples` must either be a string directory or a list" "(if there is only 1 input component) or (more generally), a nested " "list, where each sublist represents a set of inputs." ) input_has_examples = [False] * len(inputs) for example in examples: for idx, example_for_input in enumerate(example): # if not (example_for_input is None): if True: try: input_has_examples[idx] = True except IndexError: pass # If there are more example components than inputs, ignore. This can sometimes be intentional (e.g. loading from a log file where outputs and timestamps are also logged) inputs_with_examples = [ inp for (inp, keep) in zip(inputs, input_has_examples) if keep ] non_none_examples = [ [ex for (ex, keep) in zip(example, input_has_examples) if keep] for example in examples ] self.examples = examples self.non_none_examples = non_none_examples self.inputs = inputs self.inputs_with_examples = inputs_with_examples self.outputs = outputs self.fn = fn self.cache_examples = cache_examples self._api_mode = _api_mode self.preprocess = preprocess self.postprocess = postprocess self.batch = batch with utils.set_directory(working_directory): self.processed_examples = [ [ component.postprocess(sample) for component, sample in zip(inputs, example) ] for example in examples ] self.non_none_processed_examples = [ [ex for (ex, keep) in zip(example, input_has_examples) if keep] for example in self.processed_examples ] if cache_examples: for example in self.examples: if len([ex for ex in example if ex is not None]) != len(self.inputs): warnings.warn( "Examples are being cached but not all input components have " "example values. This may result in an exception being thrown by " "your function. If you do get an error while caching examples, make " "sure all of your inputs have example values for all of your examples " "or you provide default values for those particular parameters in your function." ) break with utils.set_directory(working_directory): self.dataset = components.Dataset( components=inputs_with_examples, samples=non_none_examples, type="index", label=label, samples_per_page=examples_per_page, elem_id=elem_id, ) self.cached_folder = Path(CACHED_FOLDER) / str(self.dataset._id) self.cached_file = Path(self.cached_folder) / "log.csv" self.cache_examples = cache_examples self.run_on_click = run_on_click from gradio import utils, processing_utils from PIL import Image as _Image from pathlib import Path import numpy as np def customized_postprocess(self, y): if y is None: return None if isinstance(y, dict): if self.tool == "sketch" and self.source in ["upload", "webcam"]: y, mask = y["image"], y["mask"] if y is None: return None elif isinstance(y, np.ndarray): im = processing_utils.encode_array_to_base64(y) elif isinstance(y, _Image.Image): im = processing_utils.encode_pil_to_base64(y) elif isinstance(y, (str, Path)): im = processing_utils.encode_url_or_file_to_base64(y) else: raise ValueError("Cannot process this value as an Image") im = self._format_image(im) if mask is None: return im elif isinstance(y, np.ndarray): mask_im = processing_utils.encode_array_to_base64(mask) elif isinstance(y, _Image.Image): mask_im = processing_utils.encode_pil_to_base64(mask) elif isinstance(y, (str, Path)): mask_im = processing_utils.encode_url_or_file_to_base64(mask) else: raise ValueError("Cannot process this value as an Image") return {"image": im, "mask" : mask_im,} elif isinstance(y, np.ndarray): return processing_utils.encode_array_to_base64(y) elif isinstance(y, _Image.Image): return processing_utils.encode_pil_to_base64(y) elif isinstance(y, (str, Path)): return processing_utils.encode_url_or_file_to_base64(y) else: raise ValueError("Cannot process this value as an Image") # def customized_as_example(self, input_data=None): # if input_data is None: # return str('assets/demo/misc/noimage.jpg') # elif isinstance(input_data, dict): # im = np.array(PIL.Image.open(input_data["image"])).astype(float) # mask = np.array(PIL.Image.open(input_data["mask"])).astype(float)/255 # imm = (im * (1-mask)).astype(np.uint8) # import time # ctime = int(time.time()*100) # impath = 'assets/demo/temp/temp_{}.png'.format(ctime) # PIL.Image.fromarray(imm).save(impath) # return str(utils.abspath(impath)) # else: # return str(utils.abspath(input_data)) def customized_as_example(self, input_data=None): if input_data is None: return str('assets/demo/misc/noimage.jpg') else: return str(utils.abspath(input_data))
Versatile-Diffusion-master
cusomized_gradio_blocks.py
################################################################################ # Copyright (C) 2023 Xingqian Xu - All Rights Reserved # # # # Please visit Versatile Diffusion's arXiv paper for more details, link at # # arxiv.org/abs/2211.08332 # # # # Besides, this work is also inspired by many established techniques including:# # Denoising Diffusion Probablistic Model; Denoising Diffusion Implicit Model; # # Latent Diffusion Model; Stable Diffusion; Stable Diffusion - Img2Img; Stable # # Diffusion - Variation; ImageMixer; DreamBooth; Stable Diffusion - Lora; More # # Control for Free; Prompt-to-Prompt; # # # ################################################################################ import gradio as gr import os import PIL from PIL import Image from pathlib import Path import numpy as np import numpy.random as npr from contextlib import nullcontext import types import torch import torchvision.transforms as tvtrans from lib.cfg_helper import model_cfg_bank from lib.model_zoo import get_model from cusomized_gradio_blocks import create_myexamples, customized_as_example, customized_postprocess n_sample_image = 2 n_sample_text = 4 cache_examples = True from lib.model_zoo.ddim import DDIMSampler ########## # helper # ########## def highlight_print(info): print('') print(''.join(['#']*(len(info)+4))) print('# '+info+' #') print(''.join(['#']*(len(info)+4))) print('') def decompose(x, q=20, niter=100): x_mean = x.mean(-1, keepdim=True) x_input = x - x_mean u, s, v = torch.pca_lowrank(x_input, q=q, center=False, niter=niter) ss = torch.stack([torch.diag(si) for si in s]) x_lowrank = torch.bmm(torch.bmm(u, ss), torch.permute(v, [0, 2, 1])) x_remain = x_input - x_lowrank return u, s, v, x_mean, x_remain class adjust_rank(object): def __init__(self, max_drop_rank=[1, 5], q=20): self.max_semantic_drop_rank = max_drop_rank[0] self.max_style_drop_rank = max_drop_rank[1] self.q = q def t2y0_semf_wrapper(t0, y00, t1, y01): return lambda t: (np.exp((t-0.5)*2)-t0)/(t1-t0)*(y01-y00)+y00 t0, y00 = np.exp((0 -0.5)*2), -self.max_semantic_drop_rank t1, y01 = np.exp((0.5-0.5)*2), 1 self.t2y0_semf = t2y0_semf_wrapper(t0, y00, t1, y01) def x2y_semf_wrapper(x0, x1, y1): return lambda x, y0: (x-x0)/(x1-x0)*(y1-y0)+y0 x0 = 0 x1, y1 = self.max_semantic_drop_rank+1, 1 self.x2y_semf = x2y_semf_wrapper(x0, x1, y1) def t2y0_styf_wrapper(t0, y00, t1, y01): return lambda t: (np.exp((t-0.5)*2)-t0)/(t1-t0)*(y01-y00)+y00 t0, y00 = np.exp((1 -0.5)*2), -(q-self.max_style_drop_rank) t1, y01 = np.exp((0.5-0.5)*2), 1 self.t2y0_styf = t2y0_styf_wrapper(t0, y00, t1, y01) def x2y_styf_wrapper(x0, x1, y1): return lambda x, y0: (x-x0)/(x1-x0)*(y1-y0)+y0 x0 = q-1 x1, y1 = self.max_style_drop_rank-1, 1 self.x2y_styf = x2y_styf_wrapper(x0, x1, y1) def __call__(self, x, lvl): if lvl == 0.5: return x if x.dtype == torch.float16: fp16 = True x = x.float() else: fp16 = False std_save = x.std(axis=[-2, -1]) u, s, v, x_mean, x_remain = decompose(x, q=self.q) if lvl < 0.5: assert lvl>=0 for xi in range(0, self.max_semantic_drop_rank+1): y0 = self.t2y0_semf(lvl) yi = self.x2y_semf(xi, y0) yi = 0 if yi<0 else yi s[:, xi] *= yi elif lvl > 0.5: assert lvl <= 1 for xi in range(self.max_style_drop_rank, self.q): y0 = self.t2y0_styf(lvl) yi = self.x2y_styf(xi, y0) yi = 0 if yi<0 else yi s[:, xi] *= yi x_remain = 0 ss = torch.stack([torch.diag(si) for si in s]) x_lowrank = torch.bmm(torch.bmm(u, ss), torch.permute(v, [0, 2, 1])) x_new = x_lowrank + x_mean + x_remain std_new = x_new.std(axis=[-2, -1]) x_new = x_new / std_new * std_save if fp16: x_new = x_new.half() return x_new def remove_duplicate_word(tx): def combine_words(input, length): combined_inputs = [] if len(splitted_input)>1: for i in range(len(input)-1): combined_inputs.append(input[i]+" "+last_word_of(splitted_input[i+1],length)) #add the last word of the right-neighbour (overlapping) sequence (before it has expanded), which is the next word in the original sentence return combined_inputs, length+1 def remove_duplicates(input, length): bool_broke=False #this means we didn't find any duplicates here for i in range(len(input) - length): if input[i]==input[i + length]: #found a duplicate piece of sentence! for j in range(0, length): #remove the overlapping sequences in reverse order del input[i + length - j] bool_broke = True break #break the for loop as the loop length does not matches the length of splitted_input anymore as we removed elements if bool_broke: return remove_duplicates(input, length) #if we found a duplicate, look for another duplicate of the same length return input def last_word_of(input, length): splitted = input.split(" ") if len(splitted)==0: return input else: return splitted[length-1] def split_and_puncsplit(text): tx = text.split(" ") txnew = [] for txi in tx: txqueue=[] while True: if txi[0] in '([{': txqueue.extend([txi[:1], '<puncnext>']) txi = txi[1:] if len(txi) == 0: break else: break txnew += txqueue txstack=[] if len(txi) == 0: continue while True: if txi[-1] in '?!.,:;}])': txstack = ['<puncnext>', txi[-1:]] + txstack txi = txi[:-1] if len(txi) == 0: break else: break if len(txi) != 0: txnew += [txi] txnew += txstack return txnew if tx == '': return tx splitted_input = split_and_puncsplit(tx) word_length = 1 intermediate_output = False while len(splitted_input)>1: splitted_input = remove_duplicates(splitted_input, word_length) if len(splitted_input)>1: splitted_input, word_length = combine_words(splitted_input, word_length) if intermediate_output: print(splitted_input) print(word_length) output = splitted_input[0] output = output.replace(' <puncnext> ', '') return output def get_instruction(mode): t2i_instruction = ["Generate image from text prompt."] i2i_instruction = ["Generate image conditioned on reference image.",] i2t_instruction = ["Generate text from reference image. "] t2t_instruction = ["Generate text from reference text prompt. "] dcg_instruction = ["Generate image conditioned on both text and image."] tcg_instruction = ["Generate image conditioned on text and up to two images."] mcg_instruction = ["Generate image from multiple contexts."] if mode == "Text-to-Image": return '\n'.join(t2i_instruction) elif mode == "Image-Variation": return '\n'.join(i2i_instruction) elif mode == "Image-to-Text": return '\n'.join(i2t_instruction) elif mode == "Text-Variation": return '\n'.join(t2t_instruction) elif mode == "Dual-Context": return '\n'.join(dcg_instruction) elif mode == "Triple-Context": return '\n'.join(tcg_instruction) elif mode == "Multi-Context": return '\n'.join(mcg_instruction) else: assert False ######## # main # ######## class vd_dummy(object): def __init__(self, *args, **kwarg): self.which = 'Vdummy' def inference_t2i(self, *args, **kwarg): pass def inference_i2i(self, *args, **kwarg): pass def inference_i2t(self, *args, **kwarg): pass def inference_t2t(self, *args, **kwarg): pass def inference_dcg(self, *args, **kwarg): pass def inference_tcg(self, *args, **kwarg): pass def inference_mcg(self, *args, **kwarg): return None, None class vd_inference(object): def __init__(self, fp16=False, which='v2.0'): highlight_print(which) self.which = which if self.which == 'v1.0': cfgm = model_cfg_bank()('vd_four_flow_v1-0') else: assert False, 'Model type not supported' net = get_model()(cfgm) if fp16: highlight_print('Running in FP16') if self.which == 'v1.0': net.ctx['text'].fp16 = True net.ctx['image'].fp16 = True net = net.half() self.dtype = torch.float16 else: self.dtype = torch.float32 if self.which == 'v1.0': if fp16: sd = torch.load('pretrained/vd-four-flow-v1-0-fp16.pth', map_location='cpu') else: sd = torch.load('pretrained/vd-four-flow-v1-0.pth', map_location='cpu') # from huggingface_hub import hf_hub_download # if fp16: # temppath = hf_hub_download('shi-labs/versatile-diffusion-model', 'pretrained_pth/vd-four-flow-v1-0-fp16.pth') # else: # temppath = hf_hub_download('shi-labs/versatile-diffusion-model', 'pretrained_pth/vd-four-flow-v1-0.pth') # sd = torch.load(temppath, map_location='cpu') net.load_state_dict(sd, strict=False) self.use_cuda = torch.cuda.is_available() if self.use_cuda: net.to('cuda') self.net = net self.sampler = DDIMSampler(net) self.output_dim = [512, 512] self.n_sample_image = n_sample_image self.n_sample_text = n_sample_text self.ddim_steps = 50 self.ddim_eta = 0.0 self.scale_textto = 7.5 self.image_latent_dim = 4 self.text_latent_dim = 768 self.text_temperature = 1 if which == 'v1.0': self.adjust_rank_f = adjust_rank(max_drop_rank=[1, 5], q=20) self.scale_imgto = 7.5 self.disentanglement_noglobal = True def inference_t2i(self, text, seed): n_samples = self.n_sample_image scale = self.scale_textto sampler = self.sampler h, w = self.output_dim u = self.net.ctx_encode([""], which='text').repeat(n_samples, 1, 1) c = self.net.ctx_encode([text], which='text').repeat(n_samples, 1, 1) shape = [n_samples, self.image_latent_dim, h//8, w//8] np.random.seed(seed) torch.manual_seed(seed + 100) x, _ = sampler.sample( steps=self.ddim_steps, x_info={'type':'image'}, c_info={'type':'text', 'conditioning':c, 'unconditional_conditioning':u, 'unconditional_guidance_scale':scale}, shape=shape, verbose=False, eta=self.ddim_eta) im = self.net.vae_decode(x, which='image') im = [tvtrans.ToPILImage()(i) for i in im] return im def inference_i2i(self, im, fid_lvl, fcs_lvl, clr_adj, seed): n_samples = self.n_sample_image scale = self.scale_imgto sampler = self.sampler h, w = self.output_dim device = self.net.device BICUBIC = PIL.Image.Resampling.BICUBIC im = im.resize([w, h], resample=BICUBIC) if fid_lvl == 1: return [im]*n_samples cx = tvtrans.ToTensor()(im)[None].to(device).to(self.dtype) c = self.net.ctx_encode(cx, which='image') if self.disentanglement_noglobal: c_glb = c[:, 0:1] c_loc = c[:, 1: ] c_loc = self.adjust_rank_f(c_loc, fcs_lvl) c = torch.cat([c_glb, c_loc], dim=1).repeat(n_samples, 1, 1) else: c = self.adjust_rank_f(c, fcs_lvl).repeat(n_samples, 1, 1) u = torch.zeros_like(c) shape = [n_samples, self.image_latent_dim, h//8, w//8] np.random.seed(seed) torch.manual_seed(seed + 100) if fid_lvl!=0: x0 = self.net.vae_encode(cx, which='image').repeat(n_samples, 1, 1, 1) step = int(self.ddim_steps * (1-fid_lvl)) x, _ = sampler.sample( steps=self.ddim_steps, x_info={'type':'image', 'x0':x0, 'x0_forward_timesteps':step}, c_info={'type':'image', 'conditioning':c, 'unconditional_conditioning':u, 'unconditional_guidance_scale':scale}, shape=shape, verbose=False, eta=self.ddim_eta) else: x, _ = sampler.sample( steps=self.ddim_steps, x_info={'type':'image',}, c_info={'type':'image', 'conditioning':c, 'unconditional_conditioning':u, 'unconditional_guidance_scale':scale}, shape=shape, verbose=False, eta=self.ddim_eta) imout = self.net.vae_decode(x, which='image') if clr_adj == 'Simple': cx_mean = cx.view(3, -1).mean(-1)[:, None, None] cx_std = cx.view(3, -1).std(-1)[:, None, None] imout_mean = [imouti.view(3, -1).mean(-1)[:, None, None] for imouti in imout] imout_std = [imouti.view(3, -1).std(-1)[:, None, None] for imouti in imout] imout = [(ii-mi)/si*cx_std+cx_mean for ii, mi, si in zip(imout, imout_mean, imout_std)] imout = [torch.clamp(ii, 0, 1) for ii in imout] imout = [tvtrans.ToPILImage()(i) for i in imout] return imout def inference_i2t(self, im, seed): n_samples = self.n_sample_text scale = self.scale_imgto sampler = self.sampler h, w = self.output_dim device = self.net.device BICUBIC = PIL.Image.Resampling.BICUBIC im = im.resize([w, h], resample=BICUBIC) cx = tvtrans.ToTensor()(im)[None].to(device) c = self.net.ctx_encode(cx, which='image').repeat(n_samples, 1, 1) u = self.net.ctx_encode(torch.zeros_like(cx), which='image').repeat(n_samples, 1, 1) shape = [n_samples, self.text_latent_dim] np.random.seed(seed) torch.manual_seed(seed + 100) x, _ = sampler.sample( steps=self.ddim_steps, x_info={'type':'text',}, c_info={'type':'image', 'conditioning':c, 'unconditional_conditioning':u, 'unconditional_guidance_scale':scale}, shape=shape, verbose=False, eta=self.ddim_eta) tx = self.net.vae_decode(x, which='text', temperature=self.text_temperature) tx = [remove_duplicate_word(txi) for txi in tx] tx_combined = '\n'.join(tx) return tx_combined def inference_t2t(self, text, seed): n_samples = self.n_sample_text scale = self.scale_textto sampler = self.sampler u = self.net.ctx_encode([""], which='text').repeat(n_samples, 1, 1) c = self.net.ctx_encode([text], which='text').repeat(n_samples, 1, 1) shape = [n_samples, self.text_latent_dim] np.random.seed(seed) torch.manual_seed(seed + 100) x, _ = sampler.sample( steps=self.ddim_steps, x_info={'type':'text',}, c_info={'type':'text', 'conditioning':c, 'unconditional_conditioning':u, 'unconditional_guidance_scale':scale}, shape=shape, verbose=False, eta=self.ddim_eta) tx = self.net.vae_decode(x, which='text', temperature=self.text_temperature) tx = [remove_duplicate_word(txi) for txi in tx] tx_combined = '\n'.join(tx) return tx_combined def inference_dcg(self, imctx, fcs_lvl, textctx, textstrength, seed): n_samples = self.n_sample_image sampler = self.sampler h, w = self.output_dim device = self.net.device c_info_list = [] if (textctx is not None) and (textctx != "") and (textstrength != 0): ut = self.net.ctx_encode([""], which='text').repeat(n_samples, 1, 1) ct = self.net.ctx_encode([textctx], which='text').repeat(n_samples, 1, 1) scale = self.scale_imgto*(1-textstrength) + self.scale_textto*textstrength c_info_list.append({ 'type':'text', 'conditioning':ct, 'unconditional_conditioning':ut, 'unconditional_guidance_scale':scale, 'ratio': textstrength, }) else: scale = self.scale_imgto textstrength = 0 BICUBIC = PIL.Image.Resampling.BICUBIC cx = imctx.resize([w, h], resample=BICUBIC) cx = tvtrans.ToTensor()(cx)[None].to(device).to(self.dtype) ci = self.net.ctx_encode(cx, which='image') if self.disentanglement_noglobal: ci_glb = ci[:, 0:1] ci_loc = ci[:, 1: ] ci_loc = self.adjust_rank_f(ci_loc, fcs_lvl) ci = torch.cat([ci_glb, ci_loc], dim=1).repeat(n_samples, 1, 1) else: ci = self.adjust_rank_f(ci, fcs_lvl).repeat(n_samples, 1, 1) c_info_list.append({ 'type':'image', 'conditioning':ci, 'unconditional_conditioning':torch.zeros_like(ci), 'unconditional_guidance_scale':scale, 'ratio': (1-textstrength), }) shape = [n_samples, self.image_latent_dim, h//8, w//8] np.random.seed(seed) torch.manual_seed(seed + 100) x, _ = sampler.sample_multicontext( steps=self.ddim_steps, x_info={'type':'image',}, c_info_list=c_info_list, shape=shape, verbose=False, eta=self.ddim_eta) imout = self.net.vae_decode(x, which='image') imout = [tvtrans.ToPILImage()(i) for i in imout] return imout def inference_tcg(self, *args): args_imag = list(args[0:10]) + [None, None, None, None, None]*2 args_rest = args[10:] imin, imout = self.inference_mcg(*args_imag, *args_rest) return imin, imout def inference_mcg(self, *args): imctx = [args[0:5], args[5:10], args[10:15], args[15:20]] textctx, textstrength, seed = args[20:] n_samples = self.n_sample_image sampler = self.sampler h, w = self.output_dim device = self.net.device c_info_list = [] if (textctx is not None) and (textctx != "") and (textstrength != 0): ut = self.net.ctx_encode([""], which='text').repeat(n_samples, 1, 1) ct = self.net.ctx_encode([textctx], which='text').repeat(n_samples, 1, 1) scale = self.scale_imgto*(1-textstrength) + self.scale_textto*textstrength c_info_list.append({ 'type':'text', 'conditioning':ct, 'unconditional_conditioning':ut, 'unconditional_guidance_scale':scale, 'ratio': textstrength, }) else: scale = self.scale_imgto textstrength = 0 input_save = [] imc = [] for im, imm, strength, fcs_lvl, use_mask in imctx: if (im is None) and (imm is None): continue BILINEAR = PIL.Image.Resampling.BILINEAR BICUBIC = PIL.Image.Resampling.BICUBIC if use_mask: cx = imm['image'].resize([w, h], resample=BICUBIC) cx = tvtrans.ToTensor()(cx)[None].to(self.dtype).to(device) m = imm['mask'].resize([w, h], resample=BILINEAR) m = tvtrans.ToTensor()(m)[None, 0:1].to(self.dtype).to(device) m = (1-m) cx_show = cx*m ci = self.net.ctx_encode(cx, which='image', masks=m) else: cx = im.resize([w, h], resample=BICUBIC) cx = tvtrans.ToTensor()(cx)[None].to(self.dtype).to(device) ci = self.net.ctx_encode(cx, which='image') cx_show = cx input_save.append(tvtrans.ToPILImage()(cx_show[0])) if self.disentanglement_noglobal: ci_glb = ci[:, 0:1] ci_loc = ci[:, 1: ] ci_loc = self.adjust_rank_f(ci_loc, fcs_lvl) ci = torch.cat([ci_glb, ci_loc], dim=1).repeat(n_samples, 1, 1) else: ci = self.adjust_rank_f(ci, fcs_lvl).repeat(n_samples, 1, 1) imc.append(ci * strength) cis = torch.cat(imc, dim=1) c_info_list.append({ 'type':'image', 'conditioning':cis, 'unconditional_conditioning':torch.zeros_like(cis), 'unconditional_guidance_scale':scale, 'ratio': (1-textstrength), }) shape = [n_samples, self.image_latent_dim, h//8, w//8] np.random.seed(seed) torch.manual_seed(seed + 100) x, _ = sampler.sample_multicontext( steps=self.ddim_steps, x_info={'type':'image',}, c_info_list=c_info_list, shape=shape, verbose=False, eta=self.ddim_eta) imout = self.net.vae_decode(x, which='image') imout = [tvtrans.ToPILImage()(i) for i in imout] return input_save, imout # vd_inference = vd_dummy() vd_inference = vd_inference(which='v1.0', fp16=True) ################# # sub interface # ################# def t2i_interface(with_example=False): gr.HTML('<p id=myinst>&nbsp Description: ' + get_instruction("Text-to-Image") + '</p>') with gr.Row(): with gr.Column(): text = gr.Textbox(lines=4, placeholder="Input prompt...", label='Text Input') seed = gr.Number(20, label="Seed", precision=0) button = gr.Button("Run") with gr.Column(): img_output = gr.Gallery(label="Image Result", elem_id='customized_imbox').style(grid=n_sample_image) button.click( vd_inference.inference_t2i, inputs=[text, seed], outputs=[img_output]) if with_example: gr.Examples( label='Examples', examples=get_example('Text-to-Image'), fn=vd_inference.inference_t2i, inputs=[text, seed], outputs=[img_output], cache_examples=cache_examples), def i2i_interface(with_example=False): gr.HTML('<p id=myinst>&nbsp Description: ' + get_instruction("Image-Variation") + '</p>') with gr.Row(): with gr.Column(): img_input = gr.Image(label='Image Input', type='pil', elem_id='customized_imbox') sim_flag = gr.Checkbox(label='Show Detail Controls') with gr.Row(): fid_lvl = gr.Slider(label="Fidelity (Dislike -- Same)", minimum=0, maximum=1, value=0, step=0.02, visible=False) fcs_lvl = gr.Slider(label="Focus (Semantic -- Style)", minimum=0, maximum=1, value=0.5, step=0.02, visible=False) clr_adj = gr.Radio(label="Color Adjustment", choices=["None", "Simple"], value='Simple', visible=False) explain = gr.HTML('<p id=myinst>&nbsp Fidelity: How likely the output image looks like the referece image (0-dislike (default), 1-same).</p>'+ '<p id=myinst>&nbsp Focus: What the output image should focused on (0-semantic, 0.5-balanced (default), 1-style).</p>', visible=False) seed = gr.Number(20, label="Seed", precision=0) button = gr.Button("Run") with gr.Column(): img_output = gr.Gallery(label="Image Result", elem_id='customized_imbox').style(grid=n_sample_image) sim_flag.change( fn=lambda x: { explain : gr.update(visible=x), fid_lvl : gr.update(visible=x), fcs_lvl : gr.update(visible=x), clr_adj : gr.update(visible=x), }, inputs=sim_flag, outputs=[explain, fid_lvl, fcs_lvl, clr_adj, seed],) button.click( vd_inference.inference_i2i, inputs=[img_input, fid_lvl, fcs_lvl, clr_adj, seed], outputs=[img_output]) if with_example: gr.Examples( label='Examples', examples=get_example('Image-Variation'), fn=vd_inference.inference_i2i, inputs=[img_input, fid_lvl, fcs_lvl, clr_adj, seed], outputs=[img_output], cache_examples=cache_examples), def i2t_interface(with_example=False): gr.HTML('<p id=myinst>&nbsp Description: ' + get_instruction("Image-to-Text") + '</p>') with gr.Row(): with gr.Column(): img_input = gr.Image(label='Image Input', type='pil', elem_id='customized_imbox') seed = gr.Number(20, label="Seed", precision=0) button = gr.Button("Run") with gr.Column(): txt_output = gr.Textbox(lines=4, label='Text Result') button.click( vd_inference.inference_i2t, inputs=[img_input, seed], outputs=[txt_output]) if with_example: gr.Examples( label='Examples', examples=get_example('Image-to-Text'), fn=vd_inference.inference_i2t, inputs=[img_input, seed], outputs=[txt_output], cache_examples=cache_examples), def t2t_interface(with_example=False): gr.HTML('<p id=myinst>&nbsp Description: ' + get_instruction("Text-Variation") + '</p>') with gr.Row(): with gr.Column(): text = gr.Textbox(lines=4, placeholder="Input prompt...", label='Text Input') seed = gr.Number(20, label="Seed", precision=0) button = gr.Button("Run") with gr.Column(): txt_output = gr.Textbox(lines=4, label='Text Result') button.click( vd_inference.inference_t2t, inputs=[text, seed], outputs=[txt_output]) if with_example: gr.Examples( label='Examples', examples=get_example('Text-Variation'), fn=vd_inference.inference_t2t, inputs=[text, seed], outputs=[txt_output], cache_examples=cache_examples, ) class image_mimage_swap(object): def __init__(self, block0, block1): self.block0 = block0 self.block1 = block1 self.which_update = 'both' def __call__(self, x0, x1, flag): if self.which_update == 'both': return self.update_both(x0, x1, flag) elif self.which_update == 'visible': return self.update_visible(x0, x1, flag) elif self.which_update == 'visible_oneoff': return self.update_visible_oneoff(x0, x1, flag) else: assert False def update_both(self, x0, x1, flag): if flag: ug0 = gr.update(visible=False) if x0 is None: ug1 = gr.update(value=None, visible=True) else: if (x1 is not None) and ('mask' in x1): value1 = {'image':x0, 'mask':x1['mask']} else: value1 = {'image':x0, 'mask':None} ug1 = gr.update(value=value1, visible=True) else: if (x1 is not None) and ('image' in x1): value0 = x1['image'] else: value0 = None ug0 = gr.update(value=value0, visible=True) ug1 = gr.update(visible=False) return { self.block0 : ug0, self.block1 : ug1,} def update_visible(self, x0, x1, flag): return { self.block0 : gr.update(visible=not flag), self.block1 : gr.update(visible=flag), } def update_visible_oneoff(self, x0, x1, flag): self.which_update = 'both' return { self.block0 : gr.update(visible=not flag), self.block1 : gr.update(visible=flag), } class example_visible_only_hack(object): def __init__(self, checkbox_list, functor_list): self.checkbox_list = checkbox_list self.functor_list = functor_list def __call__(self, *args): for bi, fi, vi in zip(self.checkbox_list, self.functor_list, args): if bi.value != vi: fi.which_update = 'visible_oneoff' def dcg_interface(with_example=False): gr.HTML('<p id=myinst>&nbsp Description: ' + get_instruction("Dual-Context") + '</p>') with gr.Row(): input_session = [] with gr.Column(): img = gr.Image(label='Image Input', type='pil', elem_id='customized_imbox') fcs = gr.Slider(label="Focus (Semantic -- Style)", minimum=0, maximum=1, value=0.5, step=0.02) gr.HTML('<p id=myinst>&nbsp Focus: Focus on what aspect of the image? (0-semantic, 0.5-balanced (default), 1-style).</p>') text = gr.Textbox(lines=2, placeholder="Input prompt...", label='Text Input') tstrength = gr.Slider(label="Text Domination (NoEffect -- TextOnly)", minimum=0, maximum=1, value=0, step=0.02) seed = gr.Number(20, label="Seed", precision=0) button = gr.Button("Run") with gr.Column(): output_gallary = gr.Gallery(label="Image Result", elem_id='customized_imbox').style(grid=n_sample_image) input_list = [] for i in input_session: input_list += i button.click( vd_inference.inference_dcg, inputs=[img, fcs, text, tstrength, seed], outputs=[output_gallary]) if with_example: gr.Examples( label='Examples', examples=get_example('Dual-Context'), fn=vd_inference.inference_dcg, inputs=[img, fcs, text, tstrength, seed], outputs=[output_gallary], cache_examples=cache_examples) def tcg_interface(with_example=False): gr.HTML('<p id=myinst>&nbsp Description: ' + get_instruction("Triple-Context") + '</p>') with gr.Row(): input_session = [] with gr.Column(min_width=940): with gr.Row(): with gr.Column(): img0 = gr.Image(label='Image Input', type='pil', elem_id='customized_imbox') img0.as_example = types.MethodType(customized_as_example, img0) imgm0 = gr.Image(label='Image Input with Mask', type='pil', elem_id='customized_imbox', tool='sketch', source="upload", visible=False) imgm0.postprocess = types.MethodType(customized_postprocess, imgm0) imgm0.as_example = types.MethodType(customized_as_example, imgm0) istrength0 = gr.Slider(label="Weight", minimum=0, maximum=1, value=1, step=0.02) fcs0 = gr.Slider(label="Focus (Semantic -- Style)", minimum=0, maximum=1, value=0.5, step=0.02) msk0 = gr.Checkbox(label='Use mask?') swapf0 = image_mimage_swap(img0, imgm0) msk0.change( fn=swapf0, inputs=[img0, imgm0, msk0], outputs=[img0, imgm0],) input_session.append([img0, imgm0, istrength0, fcs0, msk0]) with gr.Column(): img1 = gr.Image(label='Image Input', type='pil', elem_id='customized_imbox') img1.as_example = types.MethodType(customized_as_example, img1) imgm1 = gr.Image(label='Image Input with Mask', type='pil', elem_id='customized_imbox', tool='sketch', source="upload", visible=False) imgm1.postprocess = types.MethodType(customized_postprocess, imgm1) imgm1.as_example = types.MethodType(customized_as_example, imgm1) istrength1 = gr.Slider(label="Weight", minimum=0, maximum=1, value=1, step=0.02) fcs1 = gr.Slider(label="Focus (Semantic -- Style)", minimum=0, maximum=1, value=0.5, step=0.02) msk1 = gr.Checkbox(label='Use mask?') swapf1 = image_mimage_swap(img1, imgm1) msk1.change( fn=swapf1, inputs=[img1, imgm1, msk1], outputs=[img1, imgm1],) input_session.append([img1, imgm1, istrength1, fcs1, msk1]) gr.HTML('<p id=myinst>&nbsp Weight: The strength of the reference image. This weight is subject to <u>Text Domination</u>).</p>'+ '<p id=myinst>&nbsp Focus: Focus on what aspect of the image? (0-semantic, 0.5-balanced (default), 1-style).</p>'+ '<p id=myinst>&nbsp Mask: Remove regions on reference image so they will not influence the output.</p>',) text = gr.Textbox(lines=2, placeholder="Input prompt...", label='Text Input') tstrength = gr.Slider(label="Text Domination (NoEffect -- TextOnly)", minimum=0, maximum=1, value=0, step=0.02) seed = gr.Number(20, label="Seed", precision=0) button = gr.Button("Run") with gr.Column(min_width=470): input_gallary = gr.Gallery(label="Input Display", elem_id="customized_imbox").style(grid=2) output_gallary = gr.Gallery(label="Image Result", elem_id="customized_imbox").style(grid=n_sample_image) input_list = [] for i in input_session: input_list += i input_list += [text, tstrength, seed] button.click( vd_inference.inference_tcg, inputs=input_list, outputs=[input_gallary, output_gallary]) if with_example: create_myexamples( label='Examples', examples=get_example('Triple-Context'), fn=vd_inference.inference_tcg, inputs=input_list, outputs=[input_gallary, output_gallary, ], cache_examples=cache_examples, ) gr.HTML('<br><p id=myinst>&nbsp How to add mask: Please see the following instructions.</p><br>'+ '<div id="maskinst">'+ '<img src="file/assets/demo/misc/mask_inst1.gif">'+ '<img src="file/assets/demo/misc/mask_inst2.gif">'+ '<img src="file/assets/demo/misc/mask_inst3.gif">'+ '</div>') def mcg_interface(with_example=False): num_img_input = 4 gr.HTML('<p id=myinst>&nbsp Description: ' + get_instruction("Multi-Context") + '</p>') with gr.Row(): input_session = [] with gr.Column(): for idx in range(num_img_input): with gr.Tab('Image{}'.format(idx+1)): img = gr.Image(label='Image Input', type='pil', elem_id='customized_imbox') img.as_example = types.MethodType(customized_as_example, img) imgm = gr.Image(label='Image Input with Mask', type='pil', elem_id='customized_imbox', tool='sketch', source="upload", visible=False) imgm.postprocess = types.MethodType(customized_postprocess, imgm) imgm.as_example = types.MethodType(customized_as_example, imgm) with gr.Row(): istrength = gr.Slider(label="Weight", minimum=0, maximum=1, value=1, step=0.02) fcs = gr.Slider(label="Focus (Semantic -- Style)", minimum=0, maximum=1, value=0.5, step=0.02) msk = gr.Checkbox(label='Use mask?') gr.HTML('<p id=myinst>&nbsp Weight: The strength of the reference image. This weight is subject to <u>Text Domination</u>).</p>'+ '<p id=myinst>&nbsp Focus: Focus on what aspect of the image? (0-semantic, 0.5-balanced (default), 1-style).</p>'+ '<p id=myinst>&nbsp Mask: Remove regions on reference image so they will not influence the output.</p>',) msk.change( fn=image_mimage_swap(img, imgm), inputs=[img, imgm, msk], outputs=[img, imgm],) input_session.append([img, imgm, istrength, fcs, msk]) text = gr.Textbox(lines=2, placeholder="Input prompt...", label='Text Input') tstrength = gr.Slider(label="Text Domination (NoEffect -- TextOnly)", minimum=0, maximum=1, value=0, step=0.02) seed = gr.Number(20, label="Seed", precision=0) button = gr.Button("Run") with gr.Column(): input_gallary = gr.Gallery(label="Input Display", elem_id='customized_imbox').style(grid=4) output_gallary = gr.Gallery(label="Image Result", elem_id='customized_imbox').style(grid=n_sample_image) input_list = [] for i in input_session: input_list += i input_list += [text, tstrength, seed] button.click( vd_inference.inference_mcg, inputs=input_list, outputs=[input_gallary, output_gallary], ) if with_example: create_myexamples( label='Examples', examples=get_example('Multi-Context'), fn=vd_inference.inference_mcg, inputs=input_list, outputs=[input_gallary, output_gallary], cache_examples=cache_examples, ) gr.HTML('<br><p id=myinst>&nbsp How to add mask: Please see the following instructions.</p><br>'+ '<div id="maskinst">'+ '<img src="file/assets/demo/misc/mask_inst1.gif">'+ '<img src="file/assets/demo/misc/mask_inst2.gif">'+ '<img src="file/assets/demo/misc/mask_inst3.gif">'+ '</div>') ########### # Example # ########### def get_example(mode): if mode == 'Text-to-Image': case = [ ['a dream of a village in china, by Caspar David Friedrich, matte painting trending on artstation HQ', 23], ['a beautiful landscape with mountains and rivers', 20], ] elif mode == "Image-Variation": case = [ ['assets/demo/reg_example/ghibli.jpg', 0, 0.5, 'None', 20], ['assets/demo/reg_example/ghibli.jpg', 0.5, 0.5, 'None', 20], ['assets/demo/reg_example/matisse.jpg', 0, 0, 'None', 20], ['assets/demo/reg_example/matisse.jpg', 0, 1, 'Simple', 20], ['assets/demo/reg_example/vermeer.jpg', 0.2, 0.3, 'None', 30], ] elif mode == "Image-to-Text": case = [ ['assets/demo/reg_example/house_by_lake.jpg', 20], ] elif mode == "Text-Variation": case = [ ['heavy arms gundam penguin mech', 20], ] elif mode == "Dual-Context": case = [ ['assets/demo/reg_example/benz.jpg', 0.5, 'cyberpunk 2077', 0.7, 22], ['assets/demo/reg_example/ghibli.jpg', 1, 'Red maple on a hill in golden Autumn.', 0.5, 21], ] elif mode == "Triple-Context": case = [ [ 'assets/demo/reg_example/night_light.jpg', None, 1 , 0.5, False, 'assets/demo/reg_example/paris.jpg' , None, 0.94, 0.5, False, "snow on the street", 0.4, 28], [ 'assets/demo/tcg_example/e1i0.jpg', None, 1 , 0.5, False, 'assets/demo/tcg_example/e1i1.jpg', None, 0.94, 0.5, False, "a painting of an elegant woman in front of the moon", 0.2, 217], [ 'assets/demo/tcg_example/e2i0.jpg', None, 1, 0.5, False, 'assets/demo/reg_example/paris.jpg', None, 1, 0.5, False, "", 0, 29], [ 'assets/demo/tcg_example/e0i0.jpg', None, 1 , 0.5, False, 'assets/demo/tcg_example/e0i1.jpg', None, 0.9, 0.5, False, "rose blooms on the tree", 0.2, 20], [ 'assets/demo/reg_example/ghibli.jpg', None, 1 , 1 , False, 'assets/demo/reg_example/space.jpg' , None, 0.88, 0.5, False, "", 0, 20], [ 'assets/demo/reg_example/train.jpg' , None, 0.8, 0.5, False, 'assets/demo/reg_example/matisse.jpg', None, 1 , 1 , False, "", 0, 20], ] elif mode == "Multi-Context": case = [ [ 'assets/demo/mcg_example/e0i0.jpg', None, 1, 0.5, False, 'assets/demo/mcg_example/e0i1.jpg', None, 1, 0.5, False, 'assets/demo/mcg_example/e0i2.jpg', None, 0.86, 0.5, False, None, None, 1, 0.5, False, "", 0, 20], ] else: raise ValueError return case ############# # Interface # ############# css = """ #customized_imbox { min-height: 450px; } #customized_imbox>div[data-testid="image"] { min-height: 450px; } #customized_imbox>div[data-testid="image"]>div { min-height: 450px; } #customized_imbox>div[data-testid="image"]>iframe { min-height: 450px; } #customized_imbox>div.unpadded_box { min-height: 450px; } #myinst { font-size: 0.8rem; margin: 0rem; color: #6B7280; } #maskinst { text-align: justify; min-width: 1200px; } #maskinst>img { min-width:399px; max-width:450px; vertical-align: top; display: inline-block; } #maskinst:after { content: ""; width: 100%; display: inline-block; } """ if True: with gr.Blocks(css=css) as demo: gr.HTML( """ <div style="text-align: center; max-width: 1200px; margin: 20px auto;"> <h1 style="font-weight: 900; font-size: 3rem; margin: 0rem"> Versatile Diffusion </h1> <h2 style="font-weight: 450; font-size: 1rem; margin-top: 0.5rem; margin-bottom: 0.5rem"> We built <b>Versatile Diffusion (VD), the first unified multi-flow multimodal diffusion framework</b>, as a step towards <b>Universal Generative AI</b>. VD can natively support image-to-text, image-variation, text-to-image, and text-variation, and can be further extended to other applications such as semantic-style disentanglement, image-text dual-guided generation, latent image-to-text-to-image editing, and more. Future versions will support more modalities such as speech, music, video and 3D. </h2> <h3 style="font-weight: 450; font-size: 1rem; margin: 0rem"> Xingqian Xu, Atlas Wang, Eric Zhang, Kai Wang, and <a href="https://www.humphreyshi.com/home">Humphrey Shi</a> [<a href="https://arxiv.org/abs/2211.08332" style="color:blue;">arXiv</a>] [<a href="https://github.com/SHI-Labs/Versatile-Diffusion" style="color:blue;">GitHub</a>] </h3> </div> """) with gr.Tab('Text-to-Image'): t2i_interface(with_example=True) with gr.Tab('Image-Variation'): i2i_interface(with_example=True) with gr.Tab('Image-to-Text'): i2t_interface(with_example=True) with gr.Tab('Text-Variation'): t2t_interface(with_example=True) with gr.Tab('Dual-Context Image-Generation'): dcg_interface(with_example=True) with gr.Tab('Triple-Context Image-Blender'): tcg_interface(with_example=True) with gr.Tab('Multi-Context Image-Blender'): mcg_interface(with_example=True) gr.HTML( """ <div style="text-align: justify; max-width: 1200px; margin: 20px auto;"> <h3 style="font-weight: 450; font-size: 0.8rem; margin: 0rem"> <b>Version</b>: {} </h3> <h3 style="font-weight: 450; font-size: 0.8rem; margin: 0rem"> <b>Caution</b>: We would like the raise the awareness of users of this demo of its potential issues and concerns. Like previous large foundation models, Versatile Diffusion could be problematic in some cases, partially due to the imperfect training data and pretrained network (VAEs / context encoders) with limited scope. In its future research phase, VD may do better on tasks such as text-to-image, image-to-text, etc., with the help of more powerful VAEs, more sophisticated network designs, and more cleaned data. So far, we keep all features available for research testing both to show the great potential of the VD framework and to collect important feedback to improve the model in the future. We welcome researchers and users to report issues with the HuggingFace community discussion feature or email the authors. </h3> <h3 style="font-weight: 450; font-size: 0.8rem; margin: 0rem"> <b>Biases and content acknowledgement</b>: Beware that VD may output content that reinforces or exacerbates societal biases, as well as realistic faces, pornography, and violence. VD was trained on the LAION-2B dataset, which scraped non-curated online images and text, and may contained unintended exceptions as we removed illegal content. VD in this demo is meant only for research purposes. </h3> </div> """.format(' '+vd_inference.which)) demo.launch(share=True) # demo.launch(debug=True)
Versatile-Diffusion-master
app.py
import torch.distributed as dist import torch.multiprocessing as mp import os import os.path as osp import sys import numpy as np import copy from lib.cfg_holder import cfg_unique_holder as cfguh from lib.cfg_helper import \ get_command_line_args, \ cfg_initiates from lib.utils import get_obj_from_str if __name__ == "__main__": cfg = get_command_line_args() cfg = cfg_initiates(cfg) if 'train' in cfg: trainer = get_obj_from_str(cfg.train.main)(cfg) tstage = get_obj_from_str(cfg.train.stage)() if 'eval' in cfg: tstage.nested_eval_stage = get_obj_from_str(cfg.eval.stage)() trainer.register_stage(tstage) if cfg.env.gpu_count == 1: trainer(0) else: mp.spawn(trainer, args=(), nprocs=cfg.env.gpu_count, join=True) trainer.destroy() else: evaler = get_obj_from_str(cfg.eval.main)(cfg) estage = get_obj_from_str(cfg.eval.stage)() evaler.register_stage(estage) if cfg.env.gpu_count == 1: evaler(0) else: mp.spawn(evaler, args=(), nprocs=cfg.env.gpu_count, join=True) evaler.destroy()
Versatile-Diffusion-master
main.py
from multiprocessing import shared_memory # import multiprocessing # if hasattr(multiprocessing, "shared_memory"): # from multiprocessing import shared_memory # else: # # workaround for single gpu inference on colab # shared_memory = None import random import pickle import time import copy import torch import torch.distributed as dist from lib.cfg_holder import cfg_unique_holder as cfguh def singleton(class_): instances = {} def getinstance(*args, **kwargs): if class_ not in instances: instances[class_] = class_(*args, **kwargs) return instances[class_] return getinstance def is_ddp(): return dist.is_available() and dist.is_initialized() def get_rank(type='local'): ddp = is_ddp() global_rank = dist.get_rank() if ddp else 0 local_world_size = torch.cuda.device_count() if type == 'global': return global_rank elif type == 'local': return global_rank % local_world_size elif type == 'node': return global_rank // local_world_size elif type == 'all': return global_rank, \ global_rank % local_world_size, \ global_rank // local_world_size else: assert False, 'Unknown type' def get_world_size(type='local'): ddp = is_ddp() global_rank = dist.get_rank() if ddp else 0 global_world_size = dist.get_world_size() if ddp else 1 local_world_size = torch.cuda.device_count() if type == 'global': return global_world_size elif type == 'local': return local_world_size elif type == 'node': return global_world_size // local_world_size elif type == 'all': return global_world_size, local_world_size, \ global_world_size // local_world_size else: assert False, 'Unknown type' class barrier_lock(object): def __init__(self, n): self.n = n id = int(random.random()*10000) + int(time.time())*10000 self.lock_shmname = 'barrier_lock_{}'.format(id) lock_shm = shared_memory.SharedMemory( name=self.lock_shmname, create=True, size=n) for i in range(n): lock_shm.buf[i] = 0 lock_shm.close() def destroy(self): try: lock_shm = shared_memory.SharedMemory( name=self.lock_shmname) lock_shm.close() lock_shm.unlink() except: return def wait(self, k): lock_shm = shared_memory.SharedMemory( name=self.lock_shmname) assert lock_shm.buf[k] == 0, 'Two waits on the same id is not allowed.' lock_shm.buf[k] = 1 if k == 0: while sum([lock_shm.buf[i]==0 for i in range(self.n)]) != 0: pass for i in range(self.n): lock_shm.buf[i] = 0 return else: while lock_shm.buf[k] != 0: pass class nodewise_sync_global(object): """ This is the global part of nodewise_sync that need to call at master process before spawn. """ def __init__(self): self.local_world_size = get_world_size('local') self.b_lock = barrier_lock(self.local_world_size) id = int(random.random()*10000) + int(time.time())*10000 self.id_shmname = 'nodewise_sync_id_shm_{}'.format(id) def destroy(self): self.b_lock.destroy() try: shm = shared_memory.SharedMemory(name=self.id_shmname) shm.close() shm.unlink() except: return @singleton class nodewise_sync(object): """ A class that centralize nodewise sync activities. The backend is multiprocess sharememory, not torch, as torch not support this. """ def __init__(self): pass def copy_global(self, reference): self.local_world_size = reference.local_world_size self.b_lock = reference.b_lock self.id_shmname = reference.id_shmname return self def local_init(self): self.ddp = is_ddp() self.global_rank, self.local_rank, self.node_rank = get_rank('all') self.global_world_size, self.local_world_size, self.nodes = get_world_size('all') if self.local_rank == 0: temp = int(random.random()*10000) + int(time.time())*10000 temp = pickle.dumps(temp) shm = shared_memory.SharedMemory( name=self.id_shmname, create=True, size=len(temp)) shm.close() return self def random_sync_id(self): assert self.local_rank is not None, 'Not initialized!' if self.local_rank == 0: sync_id = int(random.random()*10000) + int(time.time())*10000 data = pickle.dumps(sync_id) shm = shared_memory.SharedMemory(name=self.id_shmname) shm.buf[0:len(data)] = data[0:len(data)] self.barrier() shm.close() else: self.barrier() shm = shared_memory.SharedMemory(name=self.id_shmname) sync_id = pickle.loads(shm.buf) shm.close() return sync_id def barrier(self): self.b_lock.wait(self.local_rank) def broadcast_r0(self, data=None): assert self.local_rank is not None, 'Not initialized!' id = self.random_sync_id() shmname = 'broadcast_r0_{}'.format(id) if self.local_rank == 0: assert data!=None, 'Rank 0 needs to input data!' data = pickle.dumps(data) datan = len(data) load_info_shm = shared_memory.SharedMemory( name=shmname, create=True, size=datan) load_info_shm.buf[0:datan] = data[0:datan] self.barrier() self.barrier() load_info_shm.close() load_info_shm.unlink() return None else: assert data==None, 'Rank other than 1 should input None as data!' self.barrier() shm = shared_memory.SharedMemory(name=shmname) data = pickle.loads(shm.buf) shm.close() self.barrier() return data def destroy(self): self.barrier.destroy() try: shm = shared_memory.SharedMemory(name=self.id_shmname) shm.close() shm.unlink() except: return # import contextlib # @contextlib.contextmanager # def weight_sync(module, sync): # assert isinstance(module, torch.nn.Module) # if sync or not isinstance(module, torch.nn.parallel.DistributedDataParallel): # yield # else: # with module.no_sync(): # yield # def weight_sync(net): # for parameters in net.parameters(): # dist.all_reduce(parameters, dist.ReduceOp.AVG)
Versatile-Diffusion-master
lib/sync.py
import os import os.path as osp import shutil import copy import time import pprint import numpy as np import torch import matplotlib import argparse import json import yaml from easydict import EasyDict as edict from .model_zoo import get_model ############ # cfg_bank # ############ def cfg_solvef(cmd, root): if not isinstance(cmd, str): return cmd if cmd.find('SAME')==0: zoom = root p = cmd[len('SAME'):].strip('()').split('.') p = [pi.strip() for pi in p] for pi in p: try: pi = int(pi) except: pass try: zoom = zoom[pi] except: return cmd return cfg_solvef(zoom, root) if cmd.find('SEARCH')==0: zoom = root p = cmd[len('SEARCH'):].strip('()').split('.') p = [pi.strip() for pi in p] find = True # Depth first search for pi in p: try: pi = int(pi) except: pass try: zoom = zoom[pi] except: find = False break if find: return cfg_solvef(zoom, root) else: if isinstance(root, dict): for ri in root: rv = cfg_solvef(cmd, root[ri]) if rv != cmd: return rv if isinstance(root, list): for ri in root: rv = cfg_solvef(cmd, ri) if rv != cmd: return rv return cmd if cmd.find('MODEL')==0: goto = cmd[len('MODEL'):].strip('()') return model_cfg_bank()(goto) if cmd.find('DATASET')==0: goto = cmd[len('DATASET'):].strip('()') return dataset_cfg_bank()(goto) return cmd def cfg_solve(cfg, cfg_root): # The function solve cfg element such that # all sorrogate input are settled. # (i.e. SAME(***) ) if isinstance(cfg, list): for i in range(len(cfg)): if isinstance(cfg[i], (list, dict)): cfg[i] = cfg_solve(cfg[i], cfg_root) else: cfg[i] = cfg_solvef(cfg[i], cfg_root) if isinstance(cfg, dict): for k in cfg: if isinstance(cfg[k], (list, dict)): cfg[k] = cfg_solve(cfg[k], cfg_root) else: cfg[k] = cfg_solvef(cfg[k], cfg_root) return cfg class model_cfg_bank(object): def __init__(self): self.cfg_dir = osp.join('configs', 'model') self.cfg_bank = edict() def __call__(self, name): if name not in self.cfg_bank: cfg_path = self.get_yaml_path(name) with open(cfg_path, 'r') as f: cfg_new = yaml.load( f, Loader=yaml.FullLoader) cfg_new = edict(cfg_new) self.cfg_bank.update(cfg_new) cfg = self.cfg_bank[name] cfg.name = name if 'super_cfg' not in cfg: cfg = cfg_solve(cfg, cfg) self.cfg_bank[name] = cfg return copy.deepcopy(cfg) super_cfg = self.__call__(cfg.super_cfg) # unlike other field, # args will not be replaced but update. if 'args' in cfg: if 'args' in super_cfg: super_cfg.args.update(cfg.args) else: super_cfg.args = cfg.args cfg.pop('args') super_cfg.update(cfg) super_cfg.pop('super_cfg') cfg = super_cfg try: delete_args = cfg.pop('delete_args') except: delete_args = [] for dargs in delete_args: cfg.args.pop(dargs) cfg = cfg_solve(cfg, cfg) self.cfg_bank[name] = cfg return copy.deepcopy(cfg) def get_yaml_path(self, name): if name.find('openai_unet')==0: return osp.join( self.cfg_dir, 'openai_unet.yaml') elif (name.find('clip')==0) or (name.find('openclip')==0): return osp.join( self.cfg_dir, 'clip.yaml') elif name.find('vd')==0: return osp.join( self.cfg_dir, 'vd.yaml') elif name.find('optimus')==0: return osp.join( self.cfg_dir, 'optimus.yaml') elif name.find('autokl')==0: return osp.join( self.cfg_dir, 'autokl.yaml') else: raise ValueError class dataset_cfg_bank(object): def __init__(self): self.cfg_dir = osp.join('configs', 'dataset') self.cfg_bank = edict() def __call__(self, name): if name not in self.cfg_bank: cfg_path = self.get_yaml_path(name) with open(cfg_path, 'r') as f: cfg_new = yaml.load( f, Loader=yaml.FullLoader) cfg_new = edict(cfg_new) self.cfg_bank.update(cfg_new) cfg = self.cfg_bank[name] cfg.name = name if cfg.get('super_cfg', None) is None: cfg = cfg_solve(cfg, cfg) self.cfg_bank[name] = cfg return copy.deepcopy(cfg) super_cfg = self.__call__(cfg.super_cfg) super_cfg.update(cfg) cfg = super_cfg cfg.super_cfg = None try: delete = cfg.pop('delete') except: delete = [] for dargs in delete: cfg.pop(dargs) cfg = cfg_solve(cfg, cfg) self.cfg_bank[name] = cfg return copy.deepcopy(cfg) def get_yaml_path(self, name): if name.find('laion2b')==0: return osp.join( self.cfg_dir, 'laion2b.yaml') else: raise ValueError class experiment_cfg_bank(object): def __init__(self): self.cfg_dir = osp.join('configs', 'experiment') self.cfg_bank = edict() def __call__(self, name): if name not in self.cfg_bank: cfg_path = self.get_yaml_path(name) with open(cfg_path, 'r') as f: cfg = yaml.load( f, Loader=yaml.FullLoader) cfg = edict(cfg) cfg = cfg_solve(cfg, cfg) cfg = cfg_solve(cfg, cfg) # twice for SEARCH self.cfg_bank[name] = cfg return copy.deepcopy(cfg) def get_yaml_path(self, name): return osp.join( self.cfg_dir, name+'.yaml') def load_cfg_yaml(path): if osp.isfile(path): cfg_path = path elif osp.isfile(osp.join('configs', 'experiment', path)): cfg_path = osp.join('configs', 'experiment', path) elif osp.isfile(osp.join('configs', 'experiment', path+'.yaml')): cfg_path = osp.join('configs', 'experiment', path+'.yaml') else: assert False, 'No such config!' with open(cfg_path, 'r') as f: cfg = yaml.load(f, Loader=yaml.FullLoader) cfg = edict(cfg) cfg = cfg_solve(cfg, cfg) cfg = cfg_solve(cfg, cfg) return cfg ############## # cfg_helper # ############## def get_experiment_id(ref=None): if ref is None: time.sleep(0.5) return int(time.time()*100) else: try: return int(ref) except: pass _, ref = osp.split(ref) ref = ref.split('_')[0] try: return int(ref) except: assert False, 'Invalid experiment ID!' def record_resume_cfg(path): cnt = 0 while True: if osp.exists(path+'.{:04d}'.format(cnt)): cnt += 1 continue shutil.copyfile(path, path+'.{:04d}'.format(cnt)) break def get_command_line_args(): parser = argparse.ArgumentParser() parser.add_argument('--debug', action='store_true', default=False) parser.add_argument('--config', type=str) parser.add_argument('--gpu', nargs='+', type=int) parser.add_argument('--node_rank', type=int) parser.add_argument('--node_list', nargs='+', type=str) parser.add_argument('--nodes', type=int) parser.add_argument('--addr', type=str, default='127.0.0.1') parser.add_argument('--port', type=int, default=11233) parser.add_argument('--signature', nargs='+', type=str) parser.add_argument('--seed', type=int) parser.add_argument('--eval', type=str) parser.add_argument('--eval_subdir', type=str) parser.add_argument('--pretrained', type=str) parser.add_argument('--resume_dir', type=str) parser.add_argument('--resume_step', type=int) parser.add_argument('--resume_weight', type=str) args = parser.parse_args() # Special handling the resume if args.resume_dir is not None: cfg = edict() cfg.env = edict() cfg.env.debug = args.debug cfg.env.resume = edict() cfg.env.resume.dir = args.resume_dir cfg.env.resume.step = args.resume_step cfg.env.resume.weight = args.resume_weight return cfg cfg = load_cfg_yaml(args.config) cfg.env.debug = args.debug cfg.env.gpu_device = [0] if args.gpu is None else list(args.gpu) cfg.env.master_addr = args.addr cfg.env.master_port = args.port cfg.env.dist_url = 'tcp://{}:{}'.format(args.addr, args.port) if args.node_list is None: cfg.env.node_rank = 0 if args.node_rank is None else args.node_rank cfg.env.nodes = 1 if args.nodes is None else args.nodes else: import socket hostname = socket.gethostname() assert cfg.env.master_addr == args.node_list[0] cfg.env.node_rank = args.node_list.index(hostname) cfg.env.nodes = len(args.node_list) cfg.env.node_list = args.node_list istrain = False if args.eval is not None else True isdebug = cfg.env.debug if istrain: if isdebug: cfg.env.experiment_id = 999999999999 cfg.train.signature = ['debug'] else: cfg.env.experiment_id = get_experiment_id() if args.signature is not None: cfg.train.signature = args.signature else: if 'train' in cfg: cfg.pop('train') cfg.env.experiment_id = get_experiment_id(args.eval) if args.signature is not None: cfg.eval.signature = args.signature if isdebug and (args.eval is None): cfg.env.experiment_id = 999999999999 cfg.eval.signature = ['debug'] if args.eval_subdir is not None: if isdebug: cfg.eval.eval_subdir = 'debug' else: cfg.eval.eval_subdir = args.eval_subdir if args.pretrained is not None: cfg.eval.pretrained = args.pretrained # The override pretrained over the setting in cfg.model if args.seed is not None: cfg.env.rnd_seed = args.seed return cfg def cfg_initiates(cfg): cfge = cfg.env isdebug = cfge.debug isresume = 'resume' in cfge istrain = 'train' in cfg haseval = 'eval' in cfg cfgt = cfg.train if istrain else None cfgv = cfg.eval if haseval else None ############################### # get some environment params # ############################### cfge.computer = os.uname() cfge.torch_version = str(torch.__version__) ########## # resume # ########## if isresume: resume_cfg_path = osp.join(cfge.resume.dir, 'config.yaml') record_resume_cfg(resume_cfg_path) with open(resume_cfg_path, 'r') as f: cfg_resume = yaml.load(f, Loader=yaml.FullLoader) cfg_resume = edict(cfg_resume) cfg_resume.env.update(cfge) cfg = cfg_resume cfge = cfg.env log_file = cfg.train.log_file print('') print('##########') print('# resume #') print('##########') print('') with open(log_file, 'a') as f: print('', file=f) print('##########', file=f) print('# resume #', file=f) print('##########', file=f) print('', file=f) pprint.pprint(cfg) with open(log_file, 'a') as f: pprint.pprint(cfg, f) #################### # node distributed # #################### if cfg.env.master_addr!='127.0.0.1': os.environ['MASTER_ADDR'] = cfge.master_addr os.environ['MASTER_PORT'] = '{}'.format(cfge.master_port) if cfg.env.dist_backend=='nccl': os.environ['NCCL_SOCKET_FAMILY'] = 'AF_INET' if cfg.env.dist_backend=='gloo': os.environ['GLOO_SOCKET_FAMILY'] = 'AF_INET' ####################### # cuda visible device # ####################### os.environ["CUDA_VISIBLE_DEVICES"] = ','.join( [str(gid) for gid in cfge.gpu_device]) ##################### # return resume cfg # ##################### if isresume: return cfg ############################################# # some misc setting that not need in resume # ############################################# cfgm = cfg.model cfge.gpu_count = len(cfge.gpu_device) ########################################## # align batch size and num worker config # ########################################## gpu_n = cfge.gpu_count * cfge.nodes def align_batch_size(bs, bs_per_gpu): assert (bs is not None) or (bs_per_gpu is not None) bs = bs_per_gpu * gpu_n if bs is None else bs bs_per_gpu = bs // gpu_n if bs_per_gpu is None else bs_per_gpu assert (bs == bs_per_gpu * gpu_n) return bs, bs_per_gpu if istrain: cfgt.batch_size, cfgt.batch_size_per_gpu = \ align_batch_size(cfgt.batch_size, cfgt.batch_size_per_gpu) cfgt.dataset_num_workers, cfgt.dataset_num_workers_per_gpu = \ align_batch_size(cfgt.dataset_num_workers, cfgt.dataset_num_workers_per_gpu) if haseval: cfgv.batch_size, cfgv.batch_size_per_gpu = \ align_batch_size(cfgv.batch_size, cfgv.batch_size_per_gpu) cfgv.dataset_num_workers, cfgv.dataset_num_workers_per_gpu = \ align_batch_size(cfgv.dataset_num_workers, cfgv.dataset_num_workers_per_gpu) ################## # create log dir # ################## if istrain: if not isdebug: sig = cfgt.get('signature', []) sig = sig + ['s{}'.format(cfge.rnd_seed)] else: sig = ['debug'] log_dir = [ cfge.log_root_dir, '{}_{}'.format(cfgm.symbol, cfgt.dataset.symbol), '_'.join([str(cfge.experiment_id)] + sig) ] log_dir = osp.join(*log_dir) log_file = osp.join(log_dir, 'train.log') if not osp.exists(log_file): os.makedirs(osp.dirname(log_file)) cfgt.log_dir = log_dir cfgt.log_file = log_file if haseval: cfgv.log_dir = log_dir cfgv.log_file = log_file else: model_symbol = cfgm.symbol if cfgv.get('dataset', None) is None: dataset_symbol = 'nodataset' else: dataset_symbol = cfgv.dataset.symbol log_dir = osp.join(cfge.log_root_dir, '{}_{}'.format(model_symbol, dataset_symbol)) exp_dir = search_experiment_folder(log_dir, cfge.experiment_id) if exp_dir is None: if not isdebug: sig = cfgv.get('signature', []) + ['evalonly'] else: sig = ['debug'] exp_dir = '_'.join([str(cfge.experiment_id)] + sig) eval_subdir = cfgv.get('eval_subdir', None) # override subdir in debug mode (if eval_subdir is set) eval_subdir = 'debug' if (eval_subdir is not None) and isdebug else eval_subdir if eval_subdir is not None: log_dir = osp.join(log_dir, exp_dir, eval_subdir) else: log_dir = osp.join(log_dir, exp_dir) disable_log_override = cfgv.get('disable_log_override', False) if osp.isdir(log_dir): if disable_log_override: assert False, 'Override an exsited log_dir is disabled at [{}]'.format(log_dir) else: os.makedirs(log_dir) log_file = osp.join(log_dir, 'eval.log') cfgv.log_dir = log_dir cfgv.log_file = log_file ###################### # print and save cfg # ###################### pprint.pprint(cfg) if cfge.node_rank==0: with open(log_file, 'w') as f: pprint.pprint(cfg, f) with open(osp.join(log_dir, 'config.yaml'), 'w') as f: yaml.dump(edict_2_dict(cfg), f) else: with open(osp.join(log_dir, 'config.yaml.{}'.format(cfge.node_rank)), 'w') as f: yaml.dump(edict_2_dict(cfg), f) ############# # save code # ############# save_code = False if istrain: save_code = cfgt.get('save_code', False) elif haseval: save_code = cfgv.get('save_code', False) save_code = save_code and (cfge.node_rank==0) if save_code: codedir = osp.join(log_dir, 'code') if osp.exists(codedir): shutil.rmtree(codedir) for d in ['configs', 'lib']: fromcodedir = d tocodedir = osp.join(codedir, d) shutil.copytree( fromcodedir, tocodedir, ignore=shutil.ignore_patterns( '*__pycache__*', '*build*')) for codei in os.listdir('.'): if osp.splitext(codei)[1] == 'py': shutil.copy(codei, codedir) ####################### # set matplotlib mode # ####################### if 'matplotlib_mode' in cfge: try: matplotlib.use(cfge.matplotlib_mode) except: print('Warning: matplotlib mode [{}] failed to be set!'.format(cfge.matplotlib_mode)) return cfg def edict_2_dict(x): if isinstance(x, dict): xnew = {} for k in x: xnew[k] = edict_2_dict(x[k]) return xnew elif isinstance(x, list): xnew = [] for i in range(len(x)): xnew.append( edict_2_dict(x[i]) ) return xnew else: return x def search_experiment_folder(root, exid): target = None for fi in os.listdir(root): if not osp.isdir(osp.join(root, fi)): continue if int(fi.split('_')[0]) == exid: if target is not None: return None # duplicated elif target is None: target = fi return target
Versatile-Diffusion-master
lib/cfg_helper.py
Versatile-Diffusion-master
lib/__init__.py
import copy def singleton(class_): instances = {} def getinstance(*args, **kwargs): if class_ not in instances: instances[class_] = class_(*args, **kwargs) return instances[class_] return getinstance ############## # cfg_holder # ############## @singleton class cfg_unique_holder(object): def __init__(self): self.cfg = None # this is use to track the main codes. self.code = set() def save_cfg(self, cfg): self.cfg = copy.deepcopy(cfg) def add_code(self, code): """ A new main code is reached and its name is added. """ self.code.add(code)
Versatile-Diffusion-master
lib/cfg_holder.py
import timeit import numpy as np import os import os.path as osp import shutil import copy import torch import torch.nn as nn import torch.distributed as dist from .cfg_holder import cfg_unique_holder as cfguh from . import sync print_console_local_rank0_only = True def print_log(*console_info): local_rank = sync.get_rank('local') if print_console_local_rank0_only and (local_rank!=0): return console_info = [str(i) for i in console_info] console_info = ' '.join(console_info) print(console_info) if local_rank!=0: return log_file = None try: log_file = cfguh().cfg.train.log_file except: try: log_file = cfguh().cfg.eval.log_file except: return if log_file is not None: with open(log_file, 'a') as f: f.write(console_info + '\n') class distributed_log_manager(object): def __init__(self): self.sum = {} self.cnt = {} self.time_check = timeit.default_timer() cfgt = cfguh().cfg.train use_tensorboard = getattr(cfgt, 'log_tensorboard', False) self.ddp = sync.is_ddp() self.rank = sync.get_rank('local') self.world_size = sync.get_world_size('local') self.tb = None if use_tensorboard and (self.rank==0): import tensorboardX monitoring_dir = osp.join(cfguh().cfg.train.log_dir, 'tensorboard') self.tb = tensorboardX.SummaryWriter(osp.join(monitoring_dir)) def accumulate(self, n, **data): if n < 0: raise ValueError for itemn, di in data.items(): if itemn in self.sum: self.sum[itemn] += di * n self.cnt[itemn] += n else: self.sum[itemn] = di * n self.cnt[itemn] = n def get_mean_value_dict(self): value_gather = [ self.sum[itemn]/self.cnt[itemn] \ for itemn in sorted(self.sum.keys()) ] value_gather_tensor = torch.FloatTensor(value_gather).to(self.rank) if self.ddp: dist.all_reduce(value_gather_tensor, op=dist.ReduceOp.SUM) value_gather_tensor /= self.world_size mean = {} for idx, itemn in enumerate(sorted(self.sum.keys())): mean[itemn] = value_gather_tensor[idx].item() return mean def tensorboard_log(self, step, data, mode='train', **extra): if self.tb is None: return if mode == 'train': self.tb.add_scalar('other/epochn', extra['epochn'], step) if 'lr' in extra: self.tb.add_scalar('other/lr', extra['lr'], step) for itemn, di in data.items(): if itemn.find('loss') == 0: self.tb.add_scalar('loss/'+itemn, di, step) elif itemn == 'Loss': self.tb.add_scalar('Loss', di, step) else: self.tb.add_scalar('other/'+itemn, di, step) elif mode == 'eval': if isinstance(data, dict): for itemn, di in data.items(): self.tb.add_scalar('eval/'+itemn, di, step) else: self.tb.add_scalar('eval', data, step) return def train_summary(self, itern, epochn, samplen, lr, tbstep=None): console_info = [ 'Iter:{}'.format(itern), 'Epoch:{}'.format(epochn), 'Sample:{}'.format(samplen),] if lr is not None: console_info += ['LR:{:.4E}'.format(lr)] mean = self.get_mean_value_dict() tbstep = itern if tbstep is None else tbstep self.tensorboard_log( tbstep, mean, mode='train', itern=itern, epochn=epochn, lr=lr) loss = mean.pop('Loss') mean_info = ['Loss:{:.4f}'.format(loss)] + [ '{}:{:.4f}'.format(itemn, mean[itemn]) \ for itemn in sorted(mean.keys()) \ if itemn.find('loss') == 0 ] console_info += mean_info console_info.append('Time:{:.2f}s'.format( timeit.default_timer() - self.time_check)) return ' , '.join(console_info) def clear(self): self.sum = {} self.cnt = {} self.time_check = timeit.default_timer() def tensorboard_close(self): if self.tb is not None: self.tb.close() # ----- also include some small utils ----- def torch_to_numpy(*argv): if len(argv) > 1: data = list(argv) else: data = argv[0] if isinstance(data, torch.Tensor): return data.to('cpu').detach().numpy() elif isinstance(data, (list, tuple)): out = [] for di in data: out.append(torch_to_numpy(di)) return out elif isinstance(data, dict): out = {} for ni, di in data.items(): out[ni] = torch_to_numpy(di) return out else: return data
Versatile-Diffusion-master
lib/log_service.py
import torch import torch.nn as nn import torch.nn.functional as F import torch.backends.cudnn as cudnn # cudnn.enabled = True # cudnn.benchmark = True import torch.distributed as dist import torch.multiprocessing as mp import os import os.path as osp import sys import numpy as np import pprint import timeit import time import copy import matplotlib.pyplot as plt from .cfg_holder import cfg_unique_holder as cfguh from .data_factory import \ get_dataset, collate, \ get_loader, \ get_transform, \ get_estimator, \ get_formatter, \ get_sampler from .model_zoo import \ get_model, get_optimizer, get_scheduler from .log_service import print_log, distributed_log_manager from .evaluator import get_evaluator from . import sync class train_stage(object): """ This is a template for a train stage, (can be either train or test or anything) Usually, it takes RANK one dataloader, one model, one optimizer, one scheduler. But it is not limited to these parameters. """ def __init__(self): self.nested_eval_stage = None self.rv_keep = None def is_better(self, x): return (self.rv_keep is None) or (x>self.rv_keep) def set_model(self, net, mode): if mode == 'train': return net.train() elif mode == 'eval': return net.eval() else: raise ValueError def __call__(self, **paras): cfg = cfguh().cfg cfgt = cfg.train logm = distributed_log_manager() epochn, itern, samplen = 0, 0, 0 step_type = cfgt.get('step_type', 'iter') assert step_type in ['epoch', 'iter', 'sample'], \ 'Step type must be in [epoch, iter, sample]' step_num = cfgt.get('step_num' , None) gradacc_every = cfgt.get('gradacc_every', 1 ) log_every = cfgt.get('log_every' , None) ckpt_every = cfgt.get('ckpt_every' , None) eval_start = cfgt.get('eval_start' , 0 ) eval_every = cfgt.get('eval_every' , None) if paras.get('resume_step', None) is not None: resume_step = paras['resume_step'] assert step_type == resume_step['type'] epochn = resume_step['epochn'] itern = resume_step['itern'] samplen = resume_step['samplen'] del paras['resume_step'] trainloader = paras['trainloader'] optimizer = paras['optimizer'] scheduler = paras['scheduler'] net = paras['net'] GRANK, LRANK, NRANK = sync.get_rank('all') GWSIZE, LWSIZE, NODES = sync.get_world_size('all') weight_path = osp.join(cfgt.log_dir, 'weight') if (GRANK==0) and (not osp.isdir(weight_path)): os.makedirs(weight_path) if (GRANK==0) and (cfgt.save_init_model): self.save(net, is_init=True, step=0, optimizer=optimizer) epoch_time = timeit.default_timer() end_flag = False net.train() while True: if step_type == 'epoch': lr = scheduler[epochn] if scheduler is not None else None for batch in trainloader: # so first element of batch (usually image) can be [tensor] if not isinstance(batch[0], list): bs = batch[0].shape[0] else: bs = len(batch[0]) if cfgt.skip_partial_batch and (bs != cfgt.batch_size_per_gpu): continue itern_next = itern + 1 samplen_next = samplen + bs*GWSIZE if step_type == 'iter': lr = scheduler[itern//gradacc_every] if scheduler is not None else None grad_update = itern%gradacc_every==(gradacc_every-1) elif step_type == 'sample': lr = scheduler[samplen] if scheduler is not None else None # TODO: # grad_update = samplen%gradacc_every==(gradacc_every-1) # timeDebug = timeit.default_timer() paras_new = self.main( batch=batch, lr=lr, itern=itern, epochn=epochn, samplen=samplen, isinit=False, grad_update=grad_update, **paras) # print_log(timeit.default_timer() - timeDebug) paras.update(paras_new) logm.accumulate(bs, **paras['log_info']) ####### # log # ####### display_flag = False if log_every is not None: display_i = (itern//log_every) != (itern_next//log_every) display_s = (samplen//log_every) != (samplen_next//log_every) display_flag = (display_i and (step_type=='iter')) \ or (display_s and (step_type=='sample')) if display_flag: tbstep = itern_next if step_type=='iter' else samplen_next console_info = logm.train_summary( itern_next, epochn, samplen_next, lr, tbstep=tbstep) logm.clear() print_log(console_info) ######## # eval # ######## eval_flag = False if (self.nested_eval_stage is not None) and (eval_every is not None) and (NRANK == 0): if step_type=='iter': eval_flag = (itern//eval_every) != (itern_next//eval_every) eval_flag = eval_flag and (itern_next>=eval_start) eval_flag = eval_flag or itern==0 if step_type=='sample': eval_flag = (samplen//eval_every) != (samplen_next//eval_every) eval_flag = eval_flag and (samplen_next>=eval_start) eval_flag = eval_flag or samplen==0 if eval_flag: eval_cnt = itern_next if step_type=='iter' else samplen_next net = self.set_model(net, 'eval') rv = self.nested_eval_stage( eval_cnt=eval_cnt, **paras) rv = rv.get('eval_rv', None) if rv is not None: logm.tensorboard_log(eval_cnt, rv, mode='eval') if self.is_better(rv): self.rv_keep = rv if GRANK==0: step = {'epochn':epochn, 'itern':itern_next, 'samplen':samplen_next, 'type':step_type, } self.save(net, is_best=True, step=step, optimizer=optimizer) net = self.set_model(net, 'train') ######## # ckpt # ######## ckpt_flag = False if (GRANK==0) and (ckpt_every is not None): # not distributed ckpt_i = (itern//ckpt_every) != (itern_next//ckpt_every) ckpt_s = (samplen//ckpt_every) != (samplen_next//ckpt_every) ckpt_flag = (ckpt_i and (step_type=='iter')) \ or (ckpt_s and (step_type=='sample')) if ckpt_flag: if step_type == 'iter': print_log('Checkpoint... {}'.format(itern_next)) step = {'epochn':epochn, 'itern':itern_next, 'samplen':samplen_next, 'type':step_type, } self.save(net, itern=itern_next, step=step, optimizer=optimizer) else: print_log('Checkpoint... {}'.format(samplen_next)) step = {'epochn':epochn, 'itern':itern_next, 'samplen':samplen_next, 'type':step_type, } self.save(net, samplen=samplen_next, step=step, optimizer=optimizer) ####### # end # ####### itern = itern_next samplen = samplen_next if step_type is not None: end_flag = (itern>=step_num and (step_type=='iter')) \ or (samplen>=step_num and (step_type=='sample')) if end_flag: break # loop end epochn += 1 print_log('Epoch {} time:{:.2f}s.'.format( epochn, timeit.default_timer()-epoch_time)) epoch_time = timeit.default_timer() if end_flag: break elif step_type != 'epoch': # This is temporarily added to resolve the data issue trainloader = self.trick_update_trainloader(trainloader) continue ####### # log # ####### display_flag = False if (log_every is not None) and (step_type=='epoch'): display_flag = (epochn==1) or (epochn%log_every==0) if display_flag: console_info = logm.train_summary( itern, epochn, samplen, lr, tbstep=epochn) logm.clear() print_log(console_info) ######## # eval # ######## eval_flag = False if (self.nested_eval_stage is not None) and (eval_every is not None) \ and (step_type=='epoch') and (NRANK==0): eval_flag = (epochn%eval_every==0) and (itern_next>=eval_start) eval_flag = (epochn==1) or eval_flag if eval_flag: net = self.set_model(net, 'eval') rv = self.nested_eval_stage( eval_cnt=epochn, **paras)['eval_rv'] if rv is not None: logm.tensorboard_log(epochn, rv, mode='eval') if self.is_better(rv): self.rv_keep = rv if (GRANK==0): step = {'epochn':epochn, 'itern':itern, 'samplen':samplen, 'type':step_type, } self.save(net, is_best=True, step=step, optimizer=optimizer) net = self.set_model(net, 'train') ######## # ckpt # ######## ckpt_flag = False if (ckpt_every is not None) and (GRANK==0) and (step_type=='epoch'): # not distributed ckpt_flag = epochn%ckpt_every==0 if ckpt_flag: print_log('Checkpoint... {}'.format(itern_next)) step = {'epochn':epochn, 'itern':itern, 'samplen':samplen, 'type':step_type, } self.save(net, epochn=epochn, step=step, optimizer=optimizer) ####### # end # ####### if (step_type=='epoch') and (epochn>=step_num): break # loop end # This is temporarily added to resolve the data issue trainloader = self.trick_update_trainloader(trainloader) logm.tensorboard_close() return {} def main(self, **paras): raise NotImplementedError def trick_update_trainloader(self, trainloader): return trainloader def save_model(self, net, path_noext, **paras): cfgt = cfguh().cfg.train path = path_noext+'.pth' if isinstance(net, (torch.nn.DataParallel, torch.nn.parallel.DistributedDataParallel)): netm = net.module else: netm = net torch.save(netm.state_dict(), path) print_log('Saving model file {0}'.format(path)) def save(self, net, itern=None, epochn=None, samplen=None, is_init=False, is_best=False, is_last=False, **paras): exid = cfguh().cfg.env.experiment_id cfgt = cfguh().cfg.train cfgm = cfguh().cfg.model if isinstance(net, (torch.nn.DataParallel, torch.nn.parallel.DistributedDataParallel)): netm = net.module else: netm = net net_symbol = cfgm.symbol check = sum([ itern is not None, samplen is not None, epochn is not None, is_init, is_best, is_last]) assert check<2 if itern is not None: path_noexp = '{}_{}_iter_{}'.format(exid, net_symbol, itern) elif samplen is not None: path_noexp = '{}_{}_samplen_{}'.format(exid, net_symbol, samplen) elif epochn is not None: path_noexp = '{}_{}_epoch_{}'.format(exid, net_symbol, epochn) elif is_init: path_noexp = '{}_{}_init'.format(exid, net_symbol) elif is_best: path_noexp = '{}_{}_best'.format(exid, net_symbol) elif is_last: path_noexp = '{}_{}_last'.format(exid, net_symbol) else: path_noexp = '{}_{}_default'.format(exid, net_symbol) path_noexp = osp.join(cfgt.log_dir, 'weight', path_noexp) self.save_model(net, path_noexp, **paras) class eval_stage(object): def __init__(self): self.evaluator = None def create_dir(self, path): local_rank = sync.get_rank('local') if (not osp.isdir(path)) and (local_rank == 0): os.makedirs(path) sync.nodewise_sync().barrier() def __call__(self, evalloader, net, **paras): cfgt = cfguh().cfg.eval local_rank = sync.get_rank('local') if self.evaluator is None: evaluator = get_evaluator()(cfgt.evaluator) self.evaluator = evaluator else: evaluator = self.evaluator time_check = timeit.default_timer() for idx, batch in enumerate(evalloader): rv = self.main(batch, net) evaluator.add_batch(**rv) if cfgt.output_result: try: self.output_f(**rv, cnt=paras['eval_cnt']) except: self.output_f(**rv) if idx%cfgt.log_display == cfgt.log_display-1: print_log('processed.. {}, Time:{:.2f}s'.format( idx+1, timeit.default_timer() - time_check)) time_check = timeit.default_timer() # break evaluator.set_sample_n(len(evalloader.dataset)) eval_rv = evaluator.compute() if local_rank == 0: evaluator.one_line_summary() evaluator.save(cfgt.log_dir) evaluator.clear_data() return { 'eval_rv' : eval_rv } class exec_container(object): """ This is the base functor for all types of executions. One execution can have multiple stages, but are only allowed to use the same config, network, dataloader. Thus, in most of the cases, one exec_container is one training/evaluation/demo... If DPP is in use, this functor should be spawn. """ def __init__(self, cfg, **kwargs): self.cfg = cfg self.registered_stages = [] self.node_rank = None self.local_rank = None self.global_rank = None self.local_world_size = None self.global_world_size = None self.nodewise_sync_global_obj = sync.nodewise_sync_global() def register_stage(self, stage): self.registered_stages.append(stage) def __call__(self, local_rank, **kwargs): cfg = self.cfg cfguh().save_cfg(cfg) self.node_rank = cfg.env.node_rank self.local_rank = local_rank self.nodes = cfg.env.nodes self.local_world_size = cfg.env.gpu_count self.global_rank = self.local_rank + self.node_rank * self.nodes self.global_world_size = self.nodes * self.local_world_size dist.init_process_group( backend = cfg.env.dist_backend, init_method = cfg.env.dist_url, rank = self.global_rank, world_size = self.global_world_size,) torch.cuda.set_device(local_rank) sync.nodewise_sync().copy_global(self.nodewise_sync_global_obj).local_init() if isinstance(cfg.env.rnd_seed, int): np.random.seed(cfg.env.rnd_seed + self.global_rank) torch.manual_seed(cfg.env.rnd_seed + self.global_rank) time_start = timeit.default_timer() para = {'itern_total' : 0,} dl_para = self.prepare_dataloader() assert isinstance(dl_para, dict) para.update(dl_para) md_para = self.prepare_model() assert isinstance(md_para, dict) para.update(md_para) for stage in self.registered_stages: stage_para = stage(**para) if stage_para is not None: para.update(stage_para) if self.global_rank==0: self.save_last_model(**para) print_log( 'Total {:.2f} seconds'.format(timeit.default_timer() - time_start)) dist.destroy_process_group() def prepare_dataloader(self): """ Prepare the dataloader from config. """ return { 'trainloader' : None, 'evalloader' : None} def prepare_model(self): """ Prepare the model from config. """ return {'net' : None} def save_last_model(self, **para): return def destroy(self): self.nodewise_sync_global_obj.destroy() class train(exec_container): def prepare_dataloader(self): cfg = cfguh().cfg trainset = get_dataset()(cfg.train.dataset) sampler = get_sampler()( dataset=trainset, cfg=cfg.train.dataset.get('sampler', 'default_train')) trainloader = torch.utils.data.DataLoader( trainset, batch_size = cfg.train.batch_size_per_gpu, sampler = sampler, num_workers = cfg.train.dataset_num_workers_per_gpu, drop_last = False, pin_memory = cfg.train.dataset.get('pin_memory', False), collate_fn = collate(),) evalloader = None if 'eval' in cfg: evalset = get_dataset()(cfg.eval.dataset) if evalset is not None: sampler = get_sampler()( dataset=evalset, cfg=cfg.eval.dataset.get('sampler', 'default_eval')) evalloader = torch.utils.data.DataLoader( evalset, batch_size = cfg.eval.batch_size_per_gpu, sampler = sampler, num_workers = cfg.eval.dataset_num_workers_per_gpu, drop_last = False, pin_memory = cfg.eval.dataset.get('pin_memory', False), collate_fn = collate(),) return { 'trainloader' : trainloader, 'evalloader' : evalloader,} def prepare_model(self): cfg = cfguh().cfg net = get_model()(cfg.model) if cfg.env.cuda: net.to(self.local_rank) net = torch.nn.parallel.DistributedDataParallel( net, device_ids=[self.local_rank], find_unused_parameters=True) net.train() scheduler = get_scheduler()(cfg.train.scheduler) optimizer = get_optimizer()(net, cfg.train.optimizer) return { 'net' : net, 'optimizer' : optimizer, 'scheduler' : scheduler,} def save_last_model(self, **para): cfgt = cfguh().cfg.train net = para['net'] net_symbol = cfguh().cfg.model.symbol if isinstance(net, (torch.nn.DataParallel, torch.nn.parallel.DistributedDataParallel)): netm = net.module else: netm = net path = osp.join(cfgt.log_dir, '{}_{}_last.pth'.format( cfgt.experiment_id, net_symbol)) torch.save(netm.state_dict(), path) print_log('Saving model file {0}'.format(path)) class eval(exec_container): def prepare_dataloader(self): cfg = cfguh().cfg evalloader = None if cfg.eval.get('dataset', None) is not None: evalset = get_dataset()(cfg.eval.dataset) if evalset is None: return sampler = get_sampler()( dataset=evalset, cfg=getattr(cfg.eval.dataset, 'sampler', 'default_eval')) evalloader = torch.utils.data.DataLoader( evalset, batch_size = cfg.eval.batch_size_per_gpu, sampler = sampler, num_workers = cfg.eval.dataset_num_workers_per_gpu, drop_last = False, pin_memory = False, collate_fn = collate(), ) return { 'trainloader' : None, 'evalloader' : evalloader,} def prepare_model(self): cfg = cfguh().cfg net = get_model()(cfg.model) if cfg.env.cuda: net.to(self.local_rank) net = torch.nn.parallel.DistributedDataParallel( net, device_ids=[self.local_rank], find_unused_parameters=True) net.eval() return {'net' : net,} def save_last_model(self, **para): return ############### # some helper # ############### def torch_to_numpy(*argv): if len(argv) > 1: data = list(argv) else: data = argv[0] if isinstance(data, torch.Tensor): return data.to('cpu').detach().numpy() elif isinstance(data, (list, tuple)): out = [] for di in data: out.append(torch_to_numpy(di)) return out elif isinstance(data, dict): out = {} for ni, di in data.items(): out[ni] = torch_to_numpy(di) return out else: return data import importlib def get_obj_from_str(string, reload=False): module, cls = string.rsplit(".", 1) if reload: module_imp = importlib.import_module(module) importlib.reload(module_imp) return getattr(importlib.import_module(module, package=None), cls)
Versatile-Diffusion-master
lib/utils.py
"""SAMPLING ONLY.""" import torch import numpy as np from tqdm import tqdm from functools import partial from .diffusion_utils import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like class DDIMSampler(object): def __init__(self, model, schedule="linear", **kwargs): super().__init__() self.model = model self.ddpm_num_timesteps = model.num_timesteps self.schedule = schedule def register_buffer(self, name, attr): if type(attr) == torch.Tensor: if attr.device != torch.device("cuda"): attr = attr.to(torch.device("cuda")) setattr(self, name, attr) def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True): self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, num_ddpm_timesteps=self.ddpm_num_timesteps, verbose=verbose) alphas_cumprod = self.model.alphas_cumprod assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep' to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device) self.register_buffer('betas', to_torch(self.model.betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu()))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu()))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu()))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu()))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1))) # ddim sampling parameters ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters( alphacums=alphas_cumprod.cpu(), ddim_timesteps=self.ddim_timesteps, eta=ddim_eta,verbose=verbose) self.register_buffer('ddim_sigmas', ddim_sigmas) self.register_buffer('ddim_alphas', ddim_alphas) self.register_buffer('ddim_alphas_prev', ddim_alphas_prev) self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas)) sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * ( 1 - self.alphas_cumprod / self.alphas_cumprod_prev)) self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps) @torch.no_grad() def sample(self, steps, shape, x_info, c_info, eta=0., temperature=1., noise_dropout=0., verbose=True, log_every_t=100,): self.make_schedule(ddim_num_steps=steps, ddim_eta=eta, verbose=verbose) print(f'Data shape for DDIM sampling is {shape}, eta {eta}') samples, intermediates = self.ddim_sampling( shape, x_info=x_info, c_info=c_info, noise_dropout=noise_dropout, temperature=temperature, log_every_t=log_every_t,) return samples, intermediates @torch.no_grad() def ddim_sampling(self, shape, x_info, c_info, noise_dropout=0., temperature=1., log_every_t=100,): device = self.model.device dtype = c_info['conditioning'].dtype bs = shape[0] timesteps = self.ddim_timesteps if ('xt' in x_info) and (x_info['xt'] is not None): xt = x_info['xt'].astype(dtype).to(device) x_info['x'] = xt elif ('x0' in x_info) and (x_info['x0'] is not None): x0 = x_info['x0'].type(dtype).to(device) ts = timesteps[x_info['x0_forward_timesteps']].repeat(bs) ts = torch.Tensor(ts).long().to(device) timesteps = timesteps[:x_info['x0_forward_timesteps']] x0_nz = self.model.q_sample(x0, ts) x_info['x'] = x0_nz else: x_info['x'] = torch.randn(shape, device=device, dtype=dtype) intermediates = {'pred_xt': [], 'pred_x0': []} time_range = np.flip(timesteps) total_steps = timesteps.shape[0] iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps) for i, step in enumerate(iterator): index = total_steps - i - 1 ts = torch.full((bs,), step, device=device, dtype=torch.long) outs = self.p_sample_ddim( x_info, c_info, ts, index, noise_dropout=noise_dropout, temperature=temperature,) pred_xt, pred_x0 = outs x_info['x'] = pred_xt if index % log_every_t == 0 or index == total_steps - 1: intermediates['pred_xt'].append(pred_xt) intermediates['pred_x0'].append(pred_x0) return pred_xt, intermediates @torch.no_grad() def p_sample_ddim(self, x_info, c_info, t, index, repeat_noise=False, use_original_steps=False, noise_dropout=0., temperature=1.,): x = x_info['x'] unconditional_guidance_scale = c_info['unconditional_guidance_scale'] b, *_, device = *x.shape, x.device if unconditional_guidance_scale == 1.: c_info['c'] = c_info['conditioning'] e_t = self.model.apply_model(x_info, t, c_info) else: x_in = torch.cat([x] * 2) t_in = torch.cat([t] * 2) c_in = torch.cat([c_info['unconditional_conditioning'], c_info['conditioning']]) x_info['x'] = x_in c_info['c'] = c_in e_t_uncond, e_t = self.model.apply_model(x_info, t_in, c_info).chunk(2) e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond) alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas # select parameters corresponding to the currently considered timestep extended_shape = [b] + [1]*(len(e_t.shape)-1) a_t = torch.full(extended_shape, alphas[index], device=device, dtype=x.dtype) a_prev = torch.full(extended_shape, alphas_prev[index], device=device, dtype=x.dtype) sigma_t = torch.full(extended_shape, sigmas[index], device=device, dtype=x.dtype) sqrt_one_minus_at = torch.full(extended_shape, sqrt_one_minus_alphas[index], device=device, dtype=x.dtype) # current prediction for x_0 pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t noise = sigma_t * noise_like(x, repeat_noise) * temperature if noise_dropout > 0.: noise = torch.nn.functional.dropout(noise, p=noise_dropout) x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise return x_prev, pred_x0 @torch.no_grad() def sample_multicontext(self, steps, shape, x_info, c_info_list, eta=0., temperature=1., noise_dropout=0., verbose=True, log_every_t=100,): self.make_schedule(ddim_num_steps=steps, ddim_eta=eta, verbose=verbose) print(f'Data shape for DDIM sampling is {shape}, eta {eta}') samples, intermediates = self.ddim_sampling_multicontext( shape, x_info=x_info, c_info_list=c_info_list, noise_dropout=noise_dropout, temperature=temperature, log_every_t=log_every_t,) return samples, intermediates @torch.no_grad() def ddim_sampling_multicontext(self, shape, x_info, c_info_list, noise_dropout=0., temperature=1., log_every_t=100,): device = self.model.device dtype = c_info_list[0]['conditioning'].dtype bs = shape[0] timesteps = self.ddim_timesteps if ('xt' in x_info) and (x_info['xt'] is not None): xt = x_info['xt'].astype(dtype).to(device) x_info['x'] = xt elif ('x0' in x_info) and (x_info['x0'] is not None): x0 = x_info['x0'].type(dtype).to(device) ts = timesteps[x_info['x0_forward_timesteps']].repeat(bs) ts = torch.Tensor(ts).long().to(device) timesteps = timesteps[:x_info['x0_forward_timesteps']] x0_nz = self.model.q_sample(x0, ts) x_info['x'] = x0_nz else: x_info['x'] = torch.randn(shape, device=device, dtype=dtype) intermediates = {'pred_xt': [], 'pred_x0': []} time_range = np.flip(timesteps) total_steps = timesteps.shape[0] iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps) for i, step in enumerate(iterator): index = total_steps - i - 1 ts = torch.full((bs,), step, device=device, dtype=torch.long) outs = self.p_sample_ddim_multicontext( x_info, c_info_list, ts, index, noise_dropout=noise_dropout, temperature=temperature,) pred_xt, pred_x0 = outs x_info['x'] = pred_xt if index % log_every_t == 0 or index == total_steps - 1: intermediates['pred_xt'].append(pred_xt) intermediates['pred_x0'].append(pred_x0) return pred_xt, intermediates @torch.no_grad() def p_sample_ddim_multicontext( self, x_info, c_info_list, t, index, repeat_noise=False, use_original_steps=False, noise_dropout=0., temperature=1.,): x = x_info['x'] b, *_, device = *x.shape, x.device unconditional_guidance_scale = None for c_info in c_info_list: if unconditional_guidance_scale is None: unconditional_guidance_scale = c_info['unconditional_guidance_scale'] else: assert unconditional_guidance_scale==c_info['unconditional_guidance_scale'], \ "A different unconditional guidance scale between different context is not allowed!" if unconditional_guidance_scale == 1.: c_info['c'] = c_info['conditioning'] else: c_in = torch.cat([c_info['unconditional_conditioning'], c_info['conditioning']]) c_info['c'] = c_in if unconditional_guidance_scale == 1.: e_t = self.model.apply_model_multicontext(x_info, t, c_info_list) else: x_in = torch.cat([x] * 2) t_in = torch.cat([t] * 2) x_info['x'] = x_in e_t_uncond, e_t = self.model.apply_model_multicontext(x_info, t_in, c_info_list).chunk(2) e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond) alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas # select parameters corresponding to the currently considered timestep extended_shape = [b] + [1]*(len(e_t.shape)-1) a_t = torch.full(extended_shape, alphas[index], device=device, dtype=x.dtype) a_prev = torch.full(extended_shape, alphas_prev[index], device=device, dtype=x.dtype) sigma_t = torch.full(extended_shape, sigmas[index], device=device, dtype=x.dtype) sqrt_one_minus_at = torch.full(extended_shape, sqrt_one_minus_alphas[index], device=device, dtype=x.dtype) # current prediction for x_0 pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t noise = sigma_t * noise_like(x, repeat_noise) * temperature if noise_dropout > 0.: noise = torch.nn.functional.dropout(noise, p=noise_dropout) x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise return x_prev, pred_x0
Versatile-Diffusion-master
lib/model_zoo/ddim.py
from inspect import isfunction import math import torch import torch.nn.functional as F from torch import nn, einsum from einops import rearrange, repeat from .diffusion_utils import checkpoint def exists(val): return val is not None def uniq(arr): return{el: True for el in arr}.keys() def default(val, d): if exists(val): return val return d() if isfunction(d) else d def max_neg_value(t): return -torch.finfo(t.dtype).max def init_(tensor): dim = tensor.shape[-1] std = 1 / math.sqrt(dim) tensor.uniform_(-std, std) return tensor # feedforward class GEGLU(nn.Module): def __init__(self, dim_in, dim_out): super().__init__() self.proj = nn.Linear(dim_in, dim_out * 2) def forward(self, x): x, gate = self.proj(x).chunk(2, dim=-1) return x * F.gelu(gate) class FeedForward(nn.Module): def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.): super().__init__() inner_dim = int(dim * mult) dim_out = default(dim_out, dim) project_in = nn.Sequential( nn.Linear(dim, inner_dim), nn.GELU() ) if not glu else GEGLU(dim, inner_dim) self.net = nn.Sequential( project_in, nn.Dropout(dropout), nn.Linear(inner_dim, dim_out) ) def forward(self, x): return self.net(x) def zero_module(module): """ Zero out the parameters of a module and return it. """ for p in module.parameters(): p.detach().zero_() return module def Normalize(in_channels): return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True) class LinearAttention(nn.Module): def __init__(self, dim, heads=4, dim_head=32): super().__init__() self.heads = heads hidden_dim = dim_head * heads self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False) self.to_out = nn.Conv2d(hidden_dim, dim, 1) def forward(self, x): b, c, h, w = x.shape qkv = self.to_qkv(x) q, k, v = rearrange(qkv, 'b (qkv heads c) h w -> qkv b heads c (h w)', heads = self.heads, qkv=3) k = k.softmax(dim=-1) context = torch.einsum('bhdn,bhen->bhde', k, v) out = torch.einsum('bhde,bhdn->bhen', context, q) out = rearrange(out, 'b heads c (h w) -> b (heads c) h w', heads=self.heads, h=h, w=w) return self.to_out(out) class SpatialSelfAttention(nn.Module): def __init__(self, in_channels): super().__init__() self.in_channels = in_channels self.norm = Normalize(in_channels) self.q = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) self.k = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) self.v = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) self.proj_out = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) def forward(self, x): h_ = x h_ = self.norm(h_) q = self.q(h_) k = self.k(h_) v = self.v(h_) # compute attention b,c,h,w = q.shape q = rearrange(q, 'b c h w -> b (h w) c') k = rearrange(k, 'b c h w -> b c (h w)') w_ = torch.einsum('bij,bjk->bik', q, k) w_ = w_ * (int(c)**(-0.5)) w_ = torch.nn.functional.softmax(w_, dim=2) # attend to values v = rearrange(v, 'b c h w -> b c (h w)') w_ = rearrange(w_, 'b i j -> b j i') h_ = torch.einsum('bij,bjk->bik', v, w_) h_ = rearrange(h_, 'b c (h w) -> b c h w', h=h) h_ = self.proj_out(h_) return x+h_ class CrossAttention(nn.Module): def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.): super().__init__() inner_dim = dim_head * heads context_dim = default(context_dim, query_dim) self.scale = dim_head ** -0.5 self.heads = heads self.to_q = nn.Linear(query_dim, inner_dim, bias=False) self.to_k = nn.Linear(context_dim, inner_dim, bias=False) self.to_v = nn.Linear(context_dim, inner_dim, bias=False) self.to_out = nn.Sequential( nn.Linear(inner_dim, query_dim), nn.Dropout(dropout) ) def forward(self, x, context=None, mask=None): h = self.heads q = self.to_q(x) context = default(context, x) k = self.to_k(context) v = self.to_v(context) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) sim = einsum('b i d, b j d -> b i j', q, k) * self.scale if exists(mask): mask = rearrange(mask, 'b ... -> b (...)') max_neg_value = -torch.finfo(sim.dtype).max mask = repeat(mask, 'b j -> (b h) () j', h=h) sim.masked_fill_(~mask, max_neg_value) # attention, what we cannot get enough of attn = sim.softmax(dim=-1) out = einsum('b i j, b j d -> b i d', attn, v) out = rearrange(out, '(b h) n d -> b n (h d)', h=h) return self.to_out(out) class BasicTransformerBlock(nn.Module): def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True, disable_self_attn=False): super().__init__() self.disable_self_attn = disable_self_attn self.attn1 = CrossAttention(query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout, context_dim=context_dim if self.disable_self_attn else None) # is a self-attention if not self.disable_self_attn self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff) self.attn2 = CrossAttention(query_dim=dim, context_dim=context_dim, heads=n_heads, dim_head=d_head, dropout=dropout) # is self-attn if context is none self.norm1 = nn.LayerNorm(dim) self.norm2 = nn.LayerNorm(dim) self.norm3 = nn.LayerNorm(dim) self.checkpoint = checkpoint def forward(self, x, context=None): return checkpoint(self._forward, (x, context), self.parameters(), self.checkpoint) def _forward(self, x, context=None): x = self.attn1(self.norm1(x), context=context if self.disable_self_attn else None) + x x = self.attn2(self.norm2(x), context=context) + x x = self.ff(self.norm3(x)) + x return x class SpatialTransformer(nn.Module): """ Transformer block for image-like data. First, project the input (aka embedding) and reshape to b, t, d. Then apply standard transformer action. Finally, reshape to image """ def __init__(self, in_channels, n_heads, d_head, depth=1, dropout=0., context_dim=None, disable_self_attn=False): super().__init__() self.in_channels = in_channels inner_dim = n_heads * d_head self.norm = Normalize(in_channels) self.proj_in = nn.Conv2d(in_channels, inner_dim, kernel_size=1, stride=1, padding=0) self.transformer_blocks = nn.ModuleList( [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim, disable_self_attn=disable_self_attn) for d in range(depth)] ) self.proj_out = zero_module(nn.Conv2d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0)) def forward(self, x, context=None): # note: if no context is given, cross-attention defaults to self-attention b, c, h, w = x.shape x_in = x x = self.norm(x) x = self.proj_in(x) x = rearrange(x, 'b c h w -> b (h w) c').contiguous() for block in self.transformer_blocks: x = block(x, context=context) x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w).contiguous() x = self.proj_out(x) return x + x_in ########################## # transformer no context # ########################## class BasicTransformerBlockNoContext(nn.Module): def __init__(self, dim, n_heads, d_head, dropout=0., gated_ff=True, checkpoint=True): super().__init__() self.attn1 = CrossAttention(query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout, context_dim=None) self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff) self.attn2 = CrossAttention(query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout, context_dim=None) self.norm1 = nn.LayerNorm(dim) self.norm2 = nn.LayerNorm(dim) self.norm3 = nn.LayerNorm(dim) self.checkpoint = checkpoint def forward(self, x): return checkpoint(self._forward, (x,), self.parameters(), self.checkpoint) def _forward(self, x): x = self.attn1(self.norm1(x)) + x x = self.attn2(self.norm2(x)) + x x = self.ff(self.norm3(x)) + x return x class SpatialTransformerNoContext(nn.Module): """ Transformer block for image-like data. First, project the input (aka embedding) and reshape to b, t, d. Then apply standard transformer action. Finally, reshape to image """ def __init__(self, in_channels, n_heads, d_head, depth=1, dropout=0.,): super().__init__() self.in_channels = in_channels inner_dim = n_heads * d_head self.norm = Normalize(in_channels) self.proj_in = nn.Conv2d(in_channels, inner_dim, kernel_size=1, stride=1, padding=0) self.transformer_blocks = nn.ModuleList( [BasicTransformerBlockNoContext(inner_dim, n_heads, d_head, dropout=dropout) for d in range(depth)] ) self.proj_out = zero_module(nn.Conv2d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0)) def forward(self, x): # note: if no context is given, cross-attention defaults to self-attention b, c, h, w = x.shape x_in = x x = self.norm(x) x = self.proj_in(x) x = rearrange(x, 'b c h w -> b (h w) c').contiguous() for block in self.transformer_blocks: x = block(x) x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w).contiguous() x = self.proj_out(x) return x + x_in ####################################### # Spatial Transformer with Two Branch # ####################################### class DualSpatialTransformer(nn.Module): def __init__(self, in_channels, n_heads, d_head, depth=1, dropout=0., context_dim=None, disable_self_attn=False): super().__init__() self.in_channels = in_channels inner_dim = n_heads * d_head # First crossattn self.norm_0 = Normalize(in_channels) self.proj_in_0 = nn.Conv2d( in_channels, inner_dim, kernel_size=1, stride=1, padding=0) self.transformer_blocks_0 = nn.ModuleList( [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim, disable_self_attn=disable_self_attn) for d in range(depth)] ) self.proj_out_0 = zero_module(nn.Conv2d( inner_dim, in_channels, kernel_size=1, stride=1, padding=0)) # Second crossattn self.norm_1 = Normalize(in_channels) self.proj_in_1 = nn.Conv2d( in_channels, inner_dim, kernel_size=1, stride=1, padding=0) self.transformer_blocks_1 = nn.ModuleList( [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim, disable_self_attn=disable_self_attn) for d in range(depth)] ) self.proj_out_1 = zero_module(nn.Conv2d( inner_dim, in_channels, kernel_size=1, stride=1, padding=0)) def forward(self, x, context=None, which=None): # note: if no context is given, cross-attention defaults to self-attention b, c, h, w = x.shape x_in = x if which==0: norm, proj_in, blocks, proj_out = \ self.norm_0, self.proj_in_0, self.transformer_blocks_0, self.proj_out_0 elif which==1: norm, proj_in, blocks, proj_out = \ self.norm_1, self.proj_in_1, self.transformer_blocks_1, self.proj_out_1 else: # assert False, 'DualSpatialTransformer forward with a invalid which branch!' # import numpy.random as npr # rwhich = 0 if npr.rand() < which else 1 # context = context[rwhich] # if rwhich==0: # norm, proj_in, blocks, proj_out = \ # self.norm_0, self.proj_in_0, self.transformer_blocks_0, self.proj_out_0 # elif rwhich==1: # norm, proj_in, blocks, proj_out = \ # self.norm_1, self.proj_in_1, self.transformer_blocks_1, self.proj_out_1 # import numpy.random as npr # rwhich = 0 if npr.rand() < 0.33 else 1 # if rwhich==0: # context = context[rwhich] # norm, proj_in, blocks, proj_out = \ # self.norm_0, self.proj_in_0, self.transformer_blocks_0, self.proj_out_0 # else: norm, proj_in, blocks, proj_out = \ self.norm_0, self.proj_in_0, self.transformer_blocks_0, self.proj_out_0 x0 = norm(x) x0 = proj_in(x0) x0 = rearrange(x0, 'b c h w -> b (h w) c').contiguous() for block in blocks: x0 = block(x0, context=context[0]) x0 = rearrange(x0, 'b (h w) c -> b c h w', h=h, w=w).contiguous() x0 = proj_out(x0) norm, proj_in, blocks, proj_out = \ self.norm_1, self.proj_in_1, self.transformer_blocks_1, self.proj_out_1 x1 = norm(x) x1 = proj_in(x1) x1 = rearrange(x1, 'b c h w -> b (h w) c').contiguous() for block in blocks: x1 = block(x1, context=context[1]) x1 = rearrange(x1, 'b (h w) c -> b c h w', h=h, w=w).contiguous() x1 = proj_out(x1) return x0*which + x1*(1-which) + x_in x = norm(x) x = proj_in(x) x = rearrange(x, 'b c h w -> b (h w) c').contiguous() for block in blocks: x = block(x, context=context) x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w).contiguous() x = proj_out(x) return x + x_in
Versatile-Diffusion-master
lib/model_zoo/attention.py
# pytorch_diffusion + derived encoder decoder import math import torch import torch.nn as nn import numpy as np from einops import rearrange # from .diffusion_utils import instantiate_from_config from .attention import LinearAttention def get_timestep_embedding(timesteps, embedding_dim): """ This matches the implementation in Denoising Diffusion Probabilistic Models: From Fairseq. Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". """ assert len(timesteps.shape) == 1 half_dim = embedding_dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb) emb = emb.to(device=timesteps.device) emb = timesteps.float()[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: # zero pad emb = torch.nn.functional.pad(emb, (0,1,0,0)) return emb def nonlinearity(x): # swish return x*torch.sigmoid(x) def Normalize(in_channels, num_groups=32): return torch.nn.GroupNorm(num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True) class Upsample(nn.Module): def __init__(self, in_channels, with_conv): super().__init__() self.with_conv = with_conv if self.with_conv: self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1) def forward(self, x): x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest") if self.with_conv: x = self.conv(x) return x class Downsample(nn.Module): def __init__(self, in_channels, with_conv): super().__init__() self.with_conv = with_conv if self.with_conv: # no asymmetric padding in torch conv, must do it ourselves self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0) def forward(self, x): if self.with_conv: pad = (0,1,0,1) x = torch.nn.functional.pad(x, pad, mode="constant", value=0) x = self.conv(x) else: x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2) return x class ResnetBlock(nn.Module): def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False, dropout, temb_channels=512): super().__init__() self.in_channels = in_channels out_channels = in_channels if out_channels is None else out_channels self.out_channels = out_channels self.use_conv_shortcut = conv_shortcut self.norm1 = Normalize(in_channels) self.conv1 = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) if temb_channels > 0: self.temb_proj = torch.nn.Linear(temb_channels, out_channels) self.norm2 = Normalize(out_channels) self.dropout = torch.nn.Dropout(dropout) self.conv2 = torch.nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1) if self.in_channels != self.out_channels: if self.use_conv_shortcut: self.conv_shortcut = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) else: self.nin_shortcut = torch.nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0) def forward(self, x, temb): h = x h = self.norm1(h) h = nonlinearity(h) h = self.conv1(h) if temb is not None: h = h + self.temb_proj(nonlinearity(temb))[:,:,None,None] h = self.norm2(h) h = nonlinearity(h) h = self.dropout(h) h = self.conv2(h) if self.in_channels != self.out_channels: if self.use_conv_shortcut: x = self.conv_shortcut(x) else: x = self.nin_shortcut(x) return x+h class LinAttnBlock(LinearAttention): """to match AttnBlock usage""" def __init__(self, in_channels): super().__init__(dim=in_channels, heads=1, dim_head=in_channels) class AttnBlock(nn.Module): def __init__(self, in_channels): super().__init__() self.in_channels = in_channels self.norm = Normalize(in_channels) self.q = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) self.k = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) self.v = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) self.proj_out = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) def forward(self, x): h_ = x h_ = self.norm(h_) q = self.q(h_) k = self.k(h_) v = self.v(h_) # compute attention b,c,h,w = q.shape q = q.reshape(b,c,h*w) q = q.permute(0,2,1) # b,hw,c k = k.reshape(b,c,h*w) # b,c,hw w_ = torch.bmm(q,k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j] w_ = w_ * (int(c)**(-0.5)) w_ = torch.nn.functional.softmax(w_, dim=2) # attend to values v = v.reshape(b,c,h*w) w_ = w_.permute(0,2,1) # b,hw,hw (first hw of k, second of q) h_ = torch.bmm(v,w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j] h_ = h_.reshape(b,c,h,w) h_ = self.proj_out(h_) return x+h_ def make_attn(in_channels, attn_type="vanilla"): assert attn_type in ["vanilla", "linear", "none"], f'attn_type {attn_type} unknown' print(f"making attention of type '{attn_type}' with {in_channels} in_channels") if attn_type == "vanilla": return AttnBlock(in_channels) elif attn_type == "none": return nn.Identity(in_channels) else: return LinAttnBlock(in_channels) class Model(nn.Module): def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, resolution, use_timestep=True, use_linear_attn=False, attn_type="vanilla"): super().__init__() if use_linear_attn: attn_type = "linear" self.ch = ch self.temb_ch = self.ch*4 self.num_resolutions = len(ch_mult) self.num_res_blocks = num_res_blocks self.resolution = resolution self.in_channels = in_channels self.use_timestep = use_timestep if self.use_timestep: # timestep embedding self.temb = nn.Module() self.temb.dense = nn.ModuleList([ torch.nn.Linear(self.ch, self.temb_ch), torch.nn.Linear(self.temb_ch, self.temb_ch), ]) # downsampling self.conv_in = torch.nn.Conv2d(in_channels, self.ch, kernel_size=3, stride=1, padding=1) curr_res = resolution in_ch_mult = (1,)+tuple(ch_mult) self.down = nn.ModuleList() for i_level in range(self.num_resolutions): block = nn.ModuleList() attn = nn.ModuleList() block_in = ch*in_ch_mult[i_level] block_out = ch*ch_mult[i_level] for i_block in range(self.num_res_blocks): block.append(ResnetBlock(in_channels=block_in, out_channels=block_out, temb_channels=self.temb_ch, dropout=dropout)) block_in = block_out if curr_res in attn_resolutions: attn.append(make_attn(block_in, attn_type=attn_type)) down = nn.Module() down.block = block down.attn = attn if i_level != self.num_resolutions-1: down.downsample = Downsample(block_in, resamp_with_conv) curr_res = curr_res // 2 self.down.append(down) # middle self.mid = nn.Module() self.mid.block_1 = ResnetBlock(in_channels=block_in, out_channels=block_in, temb_channels=self.temb_ch, dropout=dropout) self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) self.mid.block_2 = ResnetBlock(in_channels=block_in, out_channels=block_in, temb_channels=self.temb_ch, dropout=dropout) # upsampling self.up = nn.ModuleList() for i_level in reversed(range(self.num_resolutions)): block = nn.ModuleList() attn = nn.ModuleList() block_out = ch*ch_mult[i_level] skip_in = ch*ch_mult[i_level] for i_block in range(self.num_res_blocks+1): if i_block == self.num_res_blocks: skip_in = ch*in_ch_mult[i_level] block.append(ResnetBlock(in_channels=block_in+skip_in, out_channels=block_out, temb_channels=self.temb_ch, dropout=dropout)) block_in = block_out if curr_res in attn_resolutions: attn.append(make_attn(block_in, attn_type=attn_type)) up = nn.Module() up.block = block up.attn = attn if i_level != 0: up.upsample = Upsample(block_in, resamp_with_conv) curr_res = curr_res * 2 self.up.insert(0, up) # prepend to get consistent order # end self.norm_out = Normalize(block_in) self.conv_out = torch.nn.Conv2d(block_in, out_ch, kernel_size=3, stride=1, padding=1) def forward(self, x, t=None, context=None): #assert x.shape[2] == x.shape[3] == self.resolution if context is not None: # assume aligned context, cat along channel axis x = torch.cat((x, context), dim=1) if self.use_timestep: # timestep embedding assert t is not None temb = get_timestep_embedding(t, self.ch) temb = self.temb.dense[0](temb) temb = nonlinearity(temb) temb = self.temb.dense[1](temb) else: temb = None # downsampling hs = [self.conv_in(x)] for i_level in range(self.num_resolutions): for i_block in range(self.num_res_blocks): h = self.down[i_level].block[i_block](hs[-1], temb) if len(self.down[i_level].attn) > 0: h = self.down[i_level].attn[i_block](h) hs.append(h) if i_level != self.num_resolutions-1: hs.append(self.down[i_level].downsample(hs[-1])) # middle h = hs[-1] h = self.mid.block_1(h, temb) h = self.mid.attn_1(h) h = self.mid.block_2(h, temb) # upsampling for i_level in reversed(range(self.num_resolutions)): for i_block in range(self.num_res_blocks+1): h = self.up[i_level].block[i_block]( torch.cat([h, hs.pop()], dim=1), temb) if len(self.up[i_level].attn) > 0: h = self.up[i_level].attn[i_block](h) if i_level != 0: h = self.up[i_level].upsample(h) # end h = self.norm_out(h) h = nonlinearity(h) h = self.conv_out(h) return h def get_last_layer(self): return self.conv_out.weight class Encoder(nn.Module): def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, resolution, z_channels, double_z=True, use_linear_attn=False, attn_type="vanilla", **ignore_kwargs): super().__init__() if use_linear_attn: attn_type = "linear" self.ch = ch self.temb_ch = 0 self.num_resolutions = len(ch_mult) self.num_res_blocks = num_res_blocks self.resolution = resolution self.in_channels = in_channels # downsampling self.conv_in = torch.nn.Conv2d(in_channels, self.ch, kernel_size=3, stride=1, padding=1) curr_res = resolution in_ch_mult = (1,)+tuple(ch_mult) self.in_ch_mult = in_ch_mult self.down = nn.ModuleList() for i_level in range(self.num_resolutions): block = nn.ModuleList() attn = nn.ModuleList() block_in = ch*in_ch_mult[i_level] block_out = ch*ch_mult[i_level] for i_block in range(self.num_res_blocks): block.append(ResnetBlock(in_channels=block_in, out_channels=block_out, temb_channels=self.temb_ch, dropout=dropout)) block_in = block_out if curr_res in attn_resolutions: attn.append(make_attn(block_in, attn_type=attn_type)) down = nn.Module() down.block = block down.attn = attn if i_level != self.num_resolutions-1: down.downsample = Downsample(block_in, resamp_with_conv) curr_res = curr_res // 2 self.down.append(down) # middle self.mid = nn.Module() self.mid.block_1 = ResnetBlock(in_channels=block_in, out_channels=block_in, temb_channels=self.temb_ch, dropout=dropout) self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) self.mid.block_2 = ResnetBlock(in_channels=block_in, out_channels=block_in, temb_channels=self.temb_ch, dropout=dropout) # end self.norm_out = Normalize(block_in) self.conv_out = torch.nn.Conv2d(block_in, 2*z_channels if double_z else z_channels, kernel_size=3, stride=1, padding=1) def forward(self, x): # timestep embedding temb = None # downsampling hs = [self.conv_in(x)] for i_level in range(self.num_resolutions): for i_block in range(self.num_res_blocks): h = self.down[i_level].block[i_block](hs[-1], temb) if len(self.down[i_level].attn) > 0: h = self.down[i_level].attn[i_block](h) hs.append(h) if i_level != self.num_resolutions-1: hs.append(self.down[i_level].downsample(hs[-1])) # middle h = hs[-1] h = self.mid.block_1(h, temb) h = self.mid.attn_1(h) h = self.mid.block_2(h, temb) # end h = self.norm_out(h) h = nonlinearity(h) h = self.conv_out(h) return h class Decoder(nn.Module): def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False, attn_type="vanilla", **ignorekwargs): super().__init__() if use_linear_attn: attn_type = "linear" self.ch = ch self.temb_ch = 0 self.num_resolutions = len(ch_mult) self.num_res_blocks = num_res_blocks self.resolution = resolution self.in_channels = in_channels self.give_pre_end = give_pre_end self.tanh_out = tanh_out # compute in_ch_mult, block_in and curr_res at lowest res in_ch_mult = (1,)+tuple(ch_mult) block_in = ch*ch_mult[self.num_resolutions-1] curr_res = resolution // 2**(self.num_resolutions-1) self.z_shape = (1,z_channels,curr_res,curr_res) print("Working with z of shape {} = {} dimensions.".format( self.z_shape, np.prod(self.z_shape))) # z to block_in self.conv_in = torch.nn.Conv2d(z_channels, block_in, kernel_size=3, stride=1, padding=1) # middle self.mid = nn.Module() self.mid.block_1 = ResnetBlock(in_channels=block_in, out_channels=block_in, temb_channels=self.temb_ch, dropout=dropout) self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) self.mid.block_2 = ResnetBlock(in_channels=block_in, out_channels=block_in, temb_channels=self.temb_ch, dropout=dropout) # upsampling self.up = nn.ModuleList() for i_level in reversed(range(self.num_resolutions)): block = nn.ModuleList() attn = nn.ModuleList() block_out = ch*ch_mult[i_level] for i_block in range(self.num_res_blocks+1): block.append(ResnetBlock(in_channels=block_in, out_channels=block_out, temb_channels=self.temb_ch, dropout=dropout)) block_in = block_out if curr_res in attn_resolutions: attn.append(make_attn(block_in, attn_type=attn_type)) up = nn.Module() up.block = block up.attn = attn if i_level != 0: up.upsample = Upsample(block_in, resamp_with_conv) curr_res = curr_res * 2 self.up.insert(0, up) # prepend to get consistent order # end self.norm_out = Normalize(block_in) self.conv_out = torch.nn.Conv2d(block_in, out_ch, kernel_size=3, stride=1, padding=1) def forward(self, z): #assert z.shape[1:] == self.z_shape[1:] self.last_z_shape = z.shape # timestep embedding temb = None # z to block_in h = self.conv_in(z) # middle h = self.mid.block_1(h, temb) h = self.mid.attn_1(h) h = self.mid.block_2(h, temb) # upsampling for i_level in reversed(range(self.num_resolutions)): for i_block in range(self.num_res_blocks+1): h = self.up[i_level].block[i_block](h, temb) if len(self.up[i_level].attn) > 0: h = self.up[i_level].attn[i_block](h) if i_level != 0: h = self.up[i_level].upsample(h) # end if self.give_pre_end: return h h = self.norm_out(h) h = nonlinearity(h) h = self.conv_out(h) if self.tanh_out: h = torch.tanh(h) return h class SimpleDecoder(nn.Module): def __init__(self, in_channels, out_channels, *args, **kwargs): super().__init__() self.model = nn.ModuleList([nn.Conv2d(in_channels, in_channels, 1), ResnetBlock(in_channels=in_channels, out_channels=2 * in_channels, temb_channels=0, dropout=0.0), ResnetBlock(in_channels=2 * in_channels, out_channels=4 * in_channels, temb_channels=0, dropout=0.0), ResnetBlock(in_channels=4 * in_channels, out_channels=2 * in_channels, temb_channels=0, dropout=0.0), nn.Conv2d(2*in_channels, in_channels, 1), Upsample(in_channels, with_conv=True)]) # end self.norm_out = Normalize(in_channels) self.conv_out = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) def forward(self, x): for i, layer in enumerate(self.model): if i in [1,2,3]: x = layer(x, None) else: x = layer(x) h = self.norm_out(x) h = nonlinearity(h) x = self.conv_out(h) return x class UpsampleDecoder(nn.Module): def __init__(self, in_channels, out_channels, ch, num_res_blocks, resolution, ch_mult=(2,2), dropout=0.0): super().__init__() # upsampling self.temb_ch = 0 self.num_resolutions = len(ch_mult) self.num_res_blocks = num_res_blocks block_in = in_channels curr_res = resolution // 2 ** (self.num_resolutions - 1) self.res_blocks = nn.ModuleList() self.upsample_blocks = nn.ModuleList() for i_level in range(self.num_resolutions): res_block = [] block_out = ch * ch_mult[i_level] for i_block in range(self.num_res_blocks + 1): res_block.append(ResnetBlock(in_channels=block_in, out_channels=block_out, temb_channels=self.temb_ch, dropout=dropout)) block_in = block_out self.res_blocks.append(nn.ModuleList(res_block)) if i_level != self.num_resolutions - 1: self.upsample_blocks.append(Upsample(block_in, True)) curr_res = curr_res * 2 # end self.norm_out = Normalize(block_in) self.conv_out = torch.nn.Conv2d(block_in, out_channels, kernel_size=3, stride=1, padding=1) def forward(self, x): # upsampling h = x for k, i_level in enumerate(range(self.num_resolutions)): for i_block in range(self.num_res_blocks + 1): h = self.res_blocks[i_level][i_block](h, None) if i_level != self.num_resolutions - 1: h = self.upsample_blocks[k](h) h = self.norm_out(h) h = nonlinearity(h) h = self.conv_out(h) return h class LatentRescaler(nn.Module): def __init__(self, factor, in_channels, mid_channels, out_channels, depth=2): super().__init__() # residual block, interpolate, residual block self.factor = factor self.conv_in = nn.Conv2d(in_channels, mid_channels, kernel_size=3, stride=1, padding=1) self.res_block1 = nn.ModuleList([ResnetBlock(in_channels=mid_channels, out_channels=mid_channels, temb_channels=0, dropout=0.0) for _ in range(depth)]) self.attn = AttnBlock(mid_channels) self.res_block2 = nn.ModuleList([ResnetBlock(in_channels=mid_channels, out_channels=mid_channels, temb_channels=0, dropout=0.0) for _ in range(depth)]) self.conv_out = nn.Conv2d(mid_channels, out_channels, kernel_size=1, ) def forward(self, x): x = self.conv_in(x) for block in self.res_block1: x = block(x, None) x = torch.nn.functional.interpolate(x, size=(int(round(x.shape[2]*self.factor)), int(round(x.shape[3]*self.factor)))) x = self.attn(x) for block in self.res_block2: x = block(x, None) x = self.conv_out(x) return x class MergedRescaleEncoder(nn.Module): def __init__(self, in_channels, ch, resolution, out_ch, num_res_blocks, attn_resolutions, dropout=0.0, resamp_with_conv=True, ch_mult=(1,2,4,8), rescale_factor=1.0, rescale_module_depth=1): super().__init__() intermediate_chn = ch * ch_mult[-1] self.encoder = Encoder(in_channels=in_channels, num_res_blocks=num_res_blocks, ch=ch, ch_mult=ch_mult, z_channels=intermediate_chn, double_z=False, resolution=resolution, attn_resolutions=attn_resolutions, dropout=dropout, resamp_with_conv=resamp_with_conv, out_ch=None) self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=intermediate_chn, mid_channels=intermediate_chn, out_channels=out_ch, depth=rescale_module_depth) def forward(self, x): x = self.encoder(x) x = self.rescaler(x) return x class MergedRescaleDecoder(nn.Module): def __init__(self, z_channels, out_ch, resolution, num_res_blocks, attn_resolutions, ch, ch_mult=(1,2,4,8), dropout=0.0, resamp_with_conv=True, rescale_factor=1.0, rescale_module_depth=1): super().__init__() tmp_chn = z_channels*ch_mult[-1] self.decoder = Decoder(out_ch=out_ch, z_channels=tmp_chn, attn_resolutions=attn_resolutions, dropout=dropout, resamp_with_conv=resamp_with_conv, in_channels=None, num_res_blocks=num_res_blocks, ch_mult=ch_mult, resolution=resolution, ch=ch) self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=z_channels, mid_channels=tmp_chn, out_channels=tmp_chn, depth=rescale_module_depth) def forward(self, x): x = self.rescaler(x) x = self.decoder(x) return x class Upsampler(nn.Module): def __init__(self, in_size, out_size, in_channels, out_channels, ch_mult=2): super().__init__() assert out_size >= in_size num_blocks = int(np.log2(out_size//in_size))+1 factor_up = 1.+ (out_size % in_size) print(f"Building {self.__class__.__name__} with in_size: {in_size} --> out_size {out_size} and factor {factor_up}") self.rescaler = LatentRescaler(factor=factor_up, in_channels=in_channels, mid_channels=2*in_channels, out_channels=in_channels) self.decoder = Decoder(out_ch=out_channels, resolution=out_size, z_channels=in_channels, num_res_blocks=2, attn_resolutions=[], in_channels=None, ch=in_channels, ch_mult=[ch_mult for _ in range(num_blocks)]) def forward(self, x): x = self.rescaler(x) x = self.decoder(x) return x class Resize(nn.Module): def __init__(self, in_channels=None, learned=False, mode="bilinear"): super().__init__() self.with_conv = learned self.mode = mode if self.with_conv: print(f"Note: {self.__class__.__name} uses learned downsampling and will ignore the fixed {mode} mode") raise NotImplementedError() assert in_channels is not None # no asymmetric padding in torch conv, must do it ourselves self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=4, stride=2, padding=1) def forward(self, x, scale_factor=1.0): if scale_factor==1.0: return x else: x = torch.nn.functional.interpolate(x, mode=self.mode, align_corners=False, scale_factor=scale_factor) return x class FirstStagePostProcessor(nn.Module): def __init__(self, ch_mult:list, in_channels, pretrained_model:nn.Module=None, reshape=False, n_channels=None, dropout=0., pretrained_config=None): super().__init__() if pretrained_config is None: assert pretrained_model is not None, 'Either "pretrained_model" or "pretrained_config" must not be None' self.pretrained_model = pretrained_model else: assert pretrained_config is not None, 'Either "pretrained_model" or "pretrained_config" must not be None' self.instantiate_pretrained(pretrained_config) self.do_reshape = reshape if n_channels is None: n_channels = self.pretrained_model.encoder.ch self.proj_norm = Normalize(in_channels,num_groups=in_channels//2) self.proj = nn.Conv2d(in_channels,n_channels,kernel_size=3, stride=1,padding=1) blocks = [] downs = [] ch_in = n_channels for m in ch_mult: blocks.append(ResnetBlock(in_channels=ch_in,out_channels=m*n_channels,dropout=dropout)) ch_in = m * n_channels downs.append(Downsample(ch_in, with_conv=False)) self.model = nn.ModuleList(blocks) self.downsampler = nn.ModuleList(downs) def instantiate_pretrained(self, config): model = instantiate_from_config(config) self.pretrained_model = model.eval() # self.pretrained_model.train = False for param in self.pretrained_model.parameters(): param.requires_grad = False @torch.no_grad() def encode_with_pretrained(self,x): c = self.pretrained_model.encode(x) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() return c def forward(self,x): z_fs = self.encode_with_pretrained(x) z = self.proj_norm(z_fs) z = self.proj(z) z = nonlinearity(z) for submodel, downmodel in zip(self.model,self.downsampler): z = submodel(z,temb=None) z = downmodel(z) if self.do_reshape: z = rearrange(z,'b c h w -> b (h w) c') return z
Versatile-Diffusion-master
lib/model_zoo/autokl_modules.py
import torch import torch.nn as nn import torch.nn.functional as F import numpy as np import numpy.random as npr import copy from lib.model_zoo.common.get_model import get_model, register from lib.model_zoo.common import utils from .optimus_models.tokenization_gpt2 import GPT2Tokenizer symbol = 'optimus' @register('optimus_vae') class optimus_vae(nn.Module): """VAE with normal prior""" def __init__(self, encoder, decoder, tokenizer_encoder, tokenizer_decoder, args): # super().__init__() self.encoder = encoder if isinstance(encoder, nn.Module) else get_model()(encoder) self.decoder = decoder if isinstance(decoder, nn.Module) else get_model()(decoder) self.tokenizer_encoder = tokenizer_encoder \ if isinstance(tokenizer_encoder, nn.Module) \ else get_model()(tokenizer_encoder, verbose=False) self.tokenizer_decoder = tokenizer_decoder \ if isinstance(tokenizer_decoder, nn.Module) \ else get_model()(tokenizer_decoder, verbose=False) gpt2_special_tokens_dict = {'pad_token': '<PAD>', 'bos_token': '<BOS>', 'eos_token': '<EOS>'} if isinstance(self.tokenizer_encoder, GPT2Tokenizer): self.tokenizer_encoder.add_special_tokens(gpt2_special_tokens_dict) if isinstance(self.tokenizer_decoder, GPT2Tokenizer): self.tokenizer_decoder.add_special_tokens(gpt2_special_tokens_dict) self.args = args self.nz = args.latent_size self.eos_token_id = self.tokenizer_decoder.convert_tokens_to_ids( [self.tokenizer_decoder.eos_token])[0] self.pad_token_id = self.tokenizer_decoder.convert_tokens_to_ids( [self.tokenizer_decoder.pad_token])[0] # connector: from Bert hidden units to the latent space # self.linear = nn.Linear(args.nz, 2 * args.nz, bias=False) # Standard Normal prior loc = torch.zeros(self.nz) scale = torch.ones(self.nz) self.prior = torch.distributions.normal.Normal(loc, scale) def connect(self, bert_fea, nsamples=1): """ Returns: Tensor1, Tensor2 Tensor1: the tensor latent z with shape [batch, nsamples, nz] Tensor2: the tenor of KL for each x with shape [batch] """ # (batch_size, nz) mean, logvar = self.encoder.linear(bert_fea).chunk(2, -1) # pdb.set_trace() # mean, logvar = mean.squeeze(0), logvar.squeeze(0) # (batch, nsamples, nz) z = self.reparameterize(mean, logvar, nsamples) KL = 0.5 * (mean.pow(2) + logvar.exp() - logvar - 1).sum(dim=1) return z, KL def connect_deterministic(self, bert_fea, nsamples=1): """ Returns: Tensor1, Tensor2 Tensor1: the tensor latent z with shape [batch, nsamples, nz] Tensor2: the tenor of KL for each x with shape [batch] """ # (batch_size, nz) mean, logvar = self.encoder.linear(bert_fea).chunk(2, -1) # pdb.set_trace() # mean, logvar = mean.squeeze(0), logvar.squeeze(0) logvar.fill_(.0) # (batch, nsamples, nz) z = self.reparameterize(mean, logvar, nsamples) KL = 0.5 * (mean.pow(2) + logvar.exp() - logvar - 1).sum(dim=1) return z, KL def reparameterize(self, mu, logvar, nsamples=1): """sample from posterior Gaussian family Args: mu: Tensor Mean of gaussian distribution with shape (batch, nz) logvar: Tensor logvar of gaussian distibution with shape (batch, nz) Returns: Tensor Sampled z with shape (batch, nsamples, nz) """ batch_size, nz = mu.size() std = logvar.mul(0.5).exp() mu_expd = mu.unsqueeze(1).expand(batch_size, nsamples, nz) std_expd = std.unsqueeze(1).expand(batch_size, nsamples, nz) eps = torch.zeros_like(std_expd).normal_() return mu_expd + torch.mul(eps, std_expd) def forward(self, inputs, labels): # pdb.set_trace() attention_mask=(inputs > 0).float() # logger.info(inputs) # logger.info(attention_mask) # logger.info(labels) reconstrution_mask=(labels != 50257).float() # 50257 is the padding token for GPT2 sent_length = torch.sum(reconstrution_mask, dim=1) outputs = self.encoder(inputs, attention_mask) pooled_hidden_fea = outputs[1] # model outputs are always tuple in pytorch-transformers (see doc) if self.args.fb_mode==0: # Connect hidden feature to the latent space latent_z, loss_kl = self.connect(pooled_hidden_fea) latent_z = latent_z.squeeze(1) # Decoding outputs = self.decoder(input_ids=labels, past=latent_z, labels=labels, label_ignore=self.pad_token_id) loss_rec = outputs[0] # model outputs are always tuple in pytorch-transformers (see doc) elif self.args.fb_mode==1: # Connect hidden feature to the latent space mu, logvar = self.encoder.linear(pooled_hidden_fea).chunk(2, -1) latent_z = self.reparameterize(mu, logvar, nsamples=1) latent_z = latent_z.squeeze(1) loss_kl = 0.5 * (mu.pow(2) + logvar.exp() - logvar - 1) kl_mask = (loss_kl > self.args.dim_target_kl).float() loss_kl = (kl_mask * loss_kl).sum(dim=1) # pdb.set_trace() # past = self.decoder.linear(latent_z) # Decoding outputs = self.decoder(input_ids=labels, past=latent_z, labels=labels, label_ignore=self.pad_token_id) loss_rec = outputs[0] # model outputs are always tuple in pytorch-transformers (see doc) elif self.args.fb_mode==2: # Connect hidden feature to the latent space latent_z, loss_kl = self.connect_deterministic(pooled_hidden_fea) latent_z = latent_z.squeeze(1) # past = self.decoder.linear(latent_z) # Decoding outputs = self.decoder(input_ids=labels, past=latent_z, labels=labels, label_ignore=self.pad_token_id) loss_rec = outputs[0] # model outputs are always tuple in pytorch-transformers (see doc) # pdb.set_trace() if self.args.length_weighted_loss: loss = loss_rec / sent_length + self.args.beta * loss_kl else: loss = loss_rec + self.args.beta * loss_kl return loss_rec, loss_kl, loss def encoder_sample(self, bert_fea, nsamples): """sampling from the encoder Returns: Tensor1 Tensor1: the tensor latent z with shape [batch, nsamples, nz] """ # (batch_size, nz) mu, logvar = self.encoder.linear(bert_fea).chunk(2, -1) mu, logvar = mu.squeeze(0), logvar.squeeze(0) # (batch, nsamples, nz) z = self.reparameterize(mu, logvar, nsamples) return z, (mu, logvar) def encode_stats(self, x): """ Returns: Tensor1, Tensor2 Tensor1: the mean of latent z with shape [batch, nz] Tensor2: the logvar of latent z with shape [batch, nz] """ return self.encoder.encode_stats(x) def decode(self, z, strategy, K=10): """generate samples from z given strategy Args: z: [batch, nsamples, nz] strategy: "beam" or "greedy" or "sample" K: the beam width parameter Returns: List1 List1: a list of decoded word sequence """ if strategy == "beam": return self.decoder.beam_search_decode(z, K) elif strategy == "greedy": return self.decoder.greedy_decode(z) elif strategy == "sample": return self.decoder.sample_decode(z) else: raise ValueError("the decoding strategy is not supported") def reconstruct(self, x, decoding_strategy="greedy", K=5): """reconstruct from input x Args: x: (batch, *) decoding_strategy: "beam" or "greedy" or "sample" K: the beam width parameter Returns: List1 List1: a list of decoded word sequence """ z = self.sample_from_inference(x).squeeze(1) return self.decode(z, decoding_strategy, K) def log_probability(self, x, z): """Cross Entropy in the language case Args: x: (batch_size, seq_len) z: (batch_size, n_sample, nz) Returns: log_p: (batch_size, n_sample). log_p(x|z) across different x and z """ outputs = self.decoder(input_ids=x, past=z, labels=x, label_ignore=self.pad_token_id) loss_rec = outputs[0] return -loss_rec def loss_iw(self, x0, x1, nsamples=50, ns=1): """ Args: x: if the data is constant-length, x is the data tensor with shape (batch, *). Otherwise x is a tuple that contains the data tensor and length list Returns: Tensor1, Tensor2, Tensor3 Tensor1: total loss [batch] Tensor2: reconstruction loss shape [batch] Tensor3: KL loss shape [batch] """ # encoding into bert features bert_fea = self.encoder(x0)[1] # (batch_size, nz) mu, logvar = self.encoder.linear(bert_fea).chunk(2, -1) ################## # compute KL ################## # pdb.set_trace() KL = 0.5 * (mu.pow(2) + logvar.exp() - logvar - 1).sum(dim=1) # mu, logvar = mu.squeeze(0), logvar.squeeze(0) ll_tmp, rc_tmp = [], [] for _ in range(int(nsamples / ns)): # (batch, nsamples, nz) z = self.reparameterize(mu, logvar, ns) # past = self.decoder.linear(z) past = z # [batch, nsamples] log_prior = self.eval_prior_dist(z) log_gen = self.eval_cond_ll(x1, past) log_infer = self.eval_inference_dist(z, (mu, logvar)) # pdb.set_trace() log_gen = log_gen.unsqueeze(0).contiguous().view(z.shape[0],-1) # pdb.set_trace() rc_tmp.append(log_gen) ll_tmp.append(log_gen + log_prior - log_infer) log_prob_iw = log_sum_exp(torch.cat(ll_tmp, dim=-1), dim=-1) - math.log(nsamples) log_gen_iw = torch.mean(torch.cat(rc_tmp, dim=-1), dim=-1) return log_prob_iw, log_gen_iw , KL def nll_iw(self, x0, x1, nsamples, ns=1): """compute the importance weighting estimate of the log-likelihood Args: x0, x1: two different tokenization results of x, where x is the data tensor with shape (batch, *). nsamples: Int the number of samples required to estimate marginal data likelihood Returns: Tensor1 Tensor1: the estimate of log p(x), shape [batch] """ # compute iw every ns samples to address the memory issue # nsamples = 500, ns = 100 # nsamples = 500, ns = 10 # TODO: note that x is forwarded twice in self.encoder.sample(x, ns) and self.eval_inference_dist(x, z, param) #. this problem is to be solved in order to speed up tmp = [] for _ in range(int(nsamples / ns)): # [batch, ns, nz] # Chunyuan: # encoding into bert features pooled_hidden_fea = self.encoder(x0)[1] # param is the parameters required to evaluate q(z|x) z, param = self.encoder_sample(pooled_hidden_fea, ns) # [batch, ns] log_comp_ll = self.eval_complete_ll(x1, z) log_infer_ll = self.eval_inference_dist(z, param) tmp.append(log_comp_ll - log_infer_ll) ll_iw = log_sum_exp(torch.cat(tmp, dim=-1), dim=-1) - math.log(nsamples) return ll_iw def KL(self, x): _, KL = self.encode(x, 1) return KL def eval_prior_dist(self, zrange): """perform grid search to calculate the true posterior Args: zrange: tensor different z points that will be evaluated, with shape (k^2, nz), where k=(zmax - zmin)/space """ # (k^2) return self.prior.log_prob(zrange).sum(dim=-1) def eval_complete_ll(self, x, z): """compute log p(z,x) Args: x: Tensor input with shape [batch, seq_len] z: Tensor evaluation points with shape [batch, nsamples, nz] Returns: Tensor1 Tensor1: log p(z,x) Tensor with shape [batch, nsamples] """ # [batch, nsamples] log_prior = self.eval_prior_dist(z) log_gen = self.eval_cond_ll(x, z) return log_prior + log_gen def eval_cond_ll(self, x, z): """compute log p(x|z) """ x_shape = list(x.size()) z_shape = list(z.size()) if len(z_shape) == 3: x = x.unsqueeze(1).repeat(1, z_shape[1], 1).contiguous().view(x_shape[0]*z_shape[1], x_shape[-1]) z = z.contiguous().view(x_shape[0]*z_shape[1], z_shape[-1]) return self.log_probability(x, z) def eval_log_model_posterior(self, x, grid_z): """perform grid search to calculate the true posterior this function computes p(z|x) Args: grid_z: tensor different z points that will be evaluated, with shape (k^2, nz), where k=(zmax - zmin)/pace Returns: Tensor Tensor: the log posterior distribution log p(z|x) with shape [batch_size, K^2] """ try: batch_size = x.size(0) except: batch_size = x[0].size(0) # (batch_size, k^2, nz) grid_z = grid_z.unsqueeze(0).expand(batch_size, *grid_z.size()).contiguous() # (batch_size, k^2) log_comp = self.eval_complete_ll(x, grid_z) # normalize to posterior log_posterior = log_comp - log_sum_exp(log_comp, dim=1, keepdim=True) return log_posterior def sample_from_inference(self, x, nsamples=1): """perform sampling from inference net Returns: Tensor Tensor: samples from infernece nets with shape (batch_size, nsamples, nz) """ z, _ = self.encoder.sample(x, nsamples) return z def sample_from_posterior(self, x, nsamples): """perform MH sampling from model posterior Returns: Tensor Tensor: samples from model posterior with shape (batch_size, nsamples, nz) """ # use the samples from inference net as initial points # for MCMC sampling. [batch_size, nsamples, nz] cur = self.encoder.sample_from_inference(x, 1) cur_ll = self.eval_complete_ll(x, cur) total_iter = self.args.mh_burn_in + nsamples * self.args.mh_thin samples = [] for iter_ in range(total_iter): next = torch.normal(mean=cur, std=cur.new_full(size=cur.size(), fill_value=self.args.mh_std)) # [batch_size, 1] next_ll = self.eval_complete_ll(x, next) ratio = next_ll - cur_ll accept_prob = torch.min(ratio.exp(), ratio.new_ones(ratio.size())) uniform_t = accept_prob.new_empty(accept_prob.size()).uniform_() # [batch_size, 1] mask = (uniform_t < accept_prob).float() mask_ = mask.unsqueeze(2) cur = mask_ * next + (1 - mask_) * cur cur_ll = mask * next_ll + (1 - mask) * cur_ll if iter_ >= self.args.mh_burn_in and (iter_ - self.args.mh_burn_in) % self.args.mh_thin == 0: samples.append(cur.unsqueeze(1)) return torch.cat(samples, dim=1) def calc_model_posterior_mean(self, x, grid_z): """compute the mean value of model posterior, i.e. E_{z ~ p(z|x)}[z] Args: grid_z: different z points that will be evaluated, with shape (k^2, nz), where k=(zmax - zmin)/pace x: [batch, *] Returns: Tensor1 Tensor1: the mean value tensor with shape [batch, nz] """ # [batch, K^2] log_posterior = self.eval_log_model_posterior(x, grid_z) posterior = log_posterior.exp() # [batch, nz] return torch.mul(posterior.unsqueeze(2), grid_z.unsqueeze(0)).sum(1) def calc_infer_mean(self, x): """ Returns: Tensor1 Tensor1: the mean of inference distribution, with shape [batch, nz] """ mean, logvar = self.encoder.forward(x) return mean def eval_inference_dist(self, z, param): """this function computes log q(z | x) Args: z: tensor different z points that will be evaluated, with shape [batch, nsamples, nz] Returns: Tensor1 Tensor1: log q(z|x) with shape [batch, nsamples] """ nz = z.size(2) mu, logvar = param # (batch_size, 1, nz) mu, logvar = mu.unsqueeze(1), logvar.unsqueeze(1) var = logvar.exp() # (batch_size, nsamples, nz) dev = z - mu # (batch_size, nsamples) log_density = -0.5 * ((dev ** 2) / var).sum(dim=-1) - \ 0.5 * (nz * math.log(2 * math.pi) + logvar.sum(-1)) return log_density def calc_mi(self, test_data_batch, args): # calc_mi_v3 import math from modules.utils import log_sum_exp mi = 0 num_examples = 0 mu_batch_list, logvar_batch_list = [], [] neg_entropy = 0. for batch_data in test_data_batch: x0, _, _ = batch_data x0 = x0.to(args.device) # encoding into bert features bert_fea = self.encoder(x0)[1] (batch_size, nz) mu, logvar = self.encoder.linear(bert_fea).chunk(2, -1) x_batch, nz = mu.size() #print(x_batch, end=' ') num_examples += x_batch # E_{q(z|x)}log(q(z|x)) = -0.5*nz*log(2*\pi) - 0.5*(1+logvar).sum(-1) neg_entropy += (-0.5 * nz * math.log(2 * math.pi)- 0.5 * (1 + logvar).sum(-1)).sum().item() mu_batch_list += [mu.cpu()] logvar_batch_list += [logvar.cpu()] pdb.set_trace() neg_entropy = neg_entropy / num_examples ##print() num_examples = 0 log_qz = 0. for i in range(len(mu_batch_list)): ############### # get z_samples ############### mu, logvar = mu_batch_list[i].cuda(), logvar_batch_list[i].cuda() # [z_batch, 1, nz] z_samples = self.reparameterize(mu, logvar, 1) z_samples = z_samples.view(-1, 1, nz) num_examples += z_samples.size(0) ############### # compute density ############### # [1, x_batch, nz] #mu, logvar = mu_batch_list[i].cuda(), logvar_batch_list[i].cuda() #indices = list(np.random.choice(np.arange(len(mu_batch_list)), 10)) + [i] indices = np.arange(len(mu_batch_list)) mu = torch.cat([mu_batch_list[_] for _ in indices], dim=0).cuda() logvar = torch.cat([logvar_batch_list[_] for _ in indices], dim=0).cuda() x_batch, nz = mu.size() mu, logvar = mu.unsqueeze(0), logvar.unsqueeze(0) var = logvar.exp() # (z_batch, x_batch, nz) dev = z_samples - mu # (z_batch, x_batch) log_density = -0.5 * ((dev ** 2) / var).sum(dim=-1) - \ 0.5 * (nz * math.log(2 * math.pi) + logvar.sum(-1)) # log q(z): aggregate posterior # [z_batch] log_qz += (log_sum_exp(log_density, dim=1) - math.log(x_batch)).sum(-1) log_qz /= num_examples mi = neg_entropy - log_qz return mi def calc_au(self, eval_dataloader, args, delta=0.01): """compute the number of active units """ cnt = 0 for batch_data in eval_dataloader: x0, _, _ = batch_data x0 = x0.to(args.device) # encoding into bert features bert_fea = self.encoder(x0)[1] # (batch_size, nz) mean, logvar = self.encoder.linear(bert_fea).chunk(2, -1) if cnt == 0: means_sum = mean.sum(dim=0, keepdim=True) else: means_sum = means_sum + mean.sum(dim=0, keepdim=True) cnt += mean.size(0) # (1, nz) mean_mean = means_sum / cnt cnt = 0 for batch_data in eval_dataloader: x0, _, _ = batch_data x0 = x0.to(args.device) # encoding into bert features bert_fea = self.encoder(x0)[1] # (batch_size, nz) mean, _ = self.encoder.linear(bert_fea).chunk(2, -1) if cnt == 0: var_sum = ((mean - mean_mean) ** 2).sum(dim=0) else: var_sum = var_sum + ((mean - mean_mean) ** 2).sum(dim=0) cnt += mean.size(0) # (nz) au_var = var_sum / (cnt - 1) return (au_var >= delta).sum().item(), au_var from .optimus_models.optimus_bert import BertForLatentConnector_XX @register('optimus_bert_connector') class optimus_bert_connector(BertForLatentConnector_XX): pass from .optimus_models.tokenization_bert import BertTokenizer @register('optimus_bert_tokenizer') class optimus_bert_tokenizer(BertTokenizer): pass from .optimus_models.optimus_gpt2 import GPT2ForLatentConnector_XX @register('optimus_gpt2_connector') class optimus_gpt2_connector(GPT2ForLatentConnector_XX): pass from .optimus_models.tokenization_gpt2 import GPT2Tokenizer @register('optimus_gpt2_tokenizer') class optimus_gpt2_tokenizer(GPT2Tokenizer): pass ############################## # some helpers for inference # ############################## def sample_single_sequence_conditional( model, context, past=None, temperature=1, top_k=0, top_p=0.0, eos_token=50829, max_length=30, ): past = past.unsqueeze(0) generated = context.unsqueeze(0) with torch.no_grad(): while True: # for _ in trange(length): inputs = {'input_ids': generated, 'past': past} outputs = model(**inputs) next_token_logits = outputs[0][0, -1, :] / temperature filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p) next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1) generated = torch.cat((generated, next_token.unsqueeze(0)), dim=1) if next_token[0].item() == eos_token: break if generated.shape[1] >= max_length: generated[0, -1] = eos_token break return generated.squeeze(0) def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')): """ Filter a distribution of logits using top-k and/or nucleus (top-p) filtering Args: logits: logits distribution shape (vocabulary size) top_k > 0: keep only top k tokens with highest probability (top-k filtering). top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering). Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751) From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317 """ assert logits.dim() == 1 # batch size 1 for now - could be updated for more but the code would be less clear top_k = min(top_k, logits.size(-1)) # Safety check if top_k > 0: # Remove all tokens with a probability less than the last token of the top-k indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None] logits[indices_to_remove] = filter_value if top_p > 0.0: sorted_logits, sorted_indices = torch.sort(logits, descending=True) cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) # Remove tokens with cumulative probability above the threshold sorted_indices_to_remove = cumulative_probs > top_p # Shift the indices to the right to keep also the first token above the threshold sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() sorted_indices_to_remove[..., 0] = 0 indices_to_remove = sorted_indices[sorted_indices_to_remove] logits[indices_to_remove] = filter_value return logits ######################## # compatible to vd 2.0 # ######################## @register('optimus_vae_next') class optimus_vae_next(optimus_vae): def get_device(self): return self.encoder.linear.weight.device def encode(self, text, max_length=77): tokenizer = self.tokenizer_encoder token = [tokenizer.tokenize(sentence.lower()) for sentence in text] token = [ti[0:max_length] for ti in token] token_id = [] for tokeni in token: token_sentence = [tokenizer._convert_token_to_id(i) for i in tokeni] token_sentence = tokenizer.add_special_tokens_single_sentence(token_sentence) token_id.append(torch.LongTensor(token_sentence)) token_id = torch._C._nn.pad_sequence(token_id, batch_first=True, padding_value=0.0) token_id = token_id.to(self.get_device()) z = self.encoder(token_id, attention_mask=(token_id > 0).float())[1] z_mu, z_logvar = self.encoder.linear(z).chunk(2, -1) # z_sampled = self.reparameterize(z_mu, z_logvar, 1) return z_mu.squeeze(1) @torch.no_grad() def decode(self, z, temperature=1.0): bos_token = self.tokenizer_decoder.encode('<BOS>') eos_token = self.tokenizer_decoder.encode('<EOS>') context_tokens = torch.LongTensor(bos_token).to(z.device) sentenses = [] for zi in z: out = sample_single_sequence_conditional( model=self.decoder, context=context_tokens, past=zi, temperature=temperature, top_k=0, top_p=1.0, max_length=30, eos_token = eos_token[0],) text = self.tokenizer_decoder.decode(out.tolist(), clean_up_tokenization_spaces=True) text = text.split()[1:-1] text = ' '.join(text) sentenses.append(text) return sentenses
Versatile-Diffusion-master
lib/model_zoo/optimus.py
import torch from torch import nn class LitEma(nn.Module): def __init__(self, model, decay=0.9999, use_num_updates=True): super().__init__() if decay < 0.0 or decay > 1.0: raise ValueError('Decay must be between 0 and 1') self.m_name2s_name = {} self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32)) self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_updates else torch.tensor(-1,dtype=torch.int)) for name, p in model.named_parameters(): if p.requires_grad: #remove as '.'-character is not allowed in buffers s_name = name.replace('.','') self.m_name2s_name.update({name:s_name}) self.register_buffer(s_name,p.clone().detach().data) self.collected_params = [] def forward(self, model): decay = self.decay if self.num_updates >= 0: self.num_updates += 1 decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates)) one_minus_decay = 1.0 - decay with torch.no_grad(): m_param = dict(model.named_parameters()) shadow_params = dict(self.named_buffers()) for key in m_param: if m_param[key].requires_grad: sname = self.m_name2s_name[key] shadow_params[sname] = shadow_params[sname].type_as(m_param[key]) shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key])) else: assert not key in self.m_name2s_name def copy_to(self, model): m_param = dict(model.named_parameters()) shadow_params = dict(self.named_buffers()) for key in m_param: if m_param[key].requires_grad: m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data) else: assert not key in self.m_name2s_name def store(self, parameters): """ Save the current parameters for restoring later. Args: parameters: Iterable of `torch.nn.Parameter`; the parameters to be temporarily stored. """ self.collected_params = [param.clone() for param in parameters] def restore(self, parameters): """ Restore the parameters stored with the `store` method. Useful to validate the model with EMA parameters without affecting the original optimization process. Store the parameters before the `copy_to` method. After validation (or model saving), use this to restore the former parameters. Args: parameters: Iterable of `torch.nn.Parameter`; the parameters to be updated with the stored parameters. """ for c_param, param in zip(self.collected_params, parameters): param.data.copy_(c_param.data)
Versatile-Diffusion-master
lib/model_zoo/ema.py
import os import math import torch import torch.nn as nn import numpy as np from einops import repeat def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if schedule == "linear": betas = ( torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2 ) elif schedule == "cosine": timesteps = ( torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s ) alphas = timesteps / (1 + cosine_s) * np.pi / 2 alphas = torch.cos(alphas).pow(2) alphas = alphas / alphas[0] betas = 1 - alphas[1:] / alphas[:-1] betas = np.clip(betas, a_min=0, a_max=0.999) elif schedule == "sqrt_linear": betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) elif schedule == "sqrt": betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5 else: raise ValueError(f"schedule '{schedule}' unknown.") return betas.numpy() def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True): if ddim_discr_method == 'uniform': c = num_ddpm_timesteps // num_ddim_timesteps ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c))) elif ddim_discr_method == 'quad': ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int) else: raise NotImplementedError(f'There is no ddim discretization method called "{ddim_discr_method}"') # assert ddim_timesteps.shape[0] == num_ddim_timesteps # add one to get the final alpha values right (the ones from first scale to data during sampling) steps_out = ddim_timesteps + 1 if verbose: print(f'Selected timesteps for ddim sampler: {steps_out}') return steps_out def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True): # select alphas for computing the variance schedule alphas = alphacums[ddim_timesteps] alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist()) # according the the formula provided in https://arxiv.org/abs/2010.02502 sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev)) if verbose: print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}') print(f'For the chosen value of eta, which is {eta}, ' f'this results in the following sigma_t schedule for ddim sampler {sigmas}') return sigmas, alphas, alphas_prev def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999): """ Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of (1-beta) over time from t = [0,1]. :param num_diffusion_timesteps: the number of betas to produce. :param alpha_bar: a lambda that takes an argument t from 0 to 1 and produces the cumulative product of (1-beta) up to that part of the diffusion process. :param max_beta: the maximum beta to use; use values lower than 1 to prevent singularities. """ betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) return np.array(betas) def extract_into_tensor(a, t, x_shape): b, *_ = t.shape out = a.gather(-1, t) return out.reshape(b, *((1,) * (len(x_shape) - 1))) def checkpoint(func, inputs, params, flag): """ Evaluate a function without caching intermediate activations, allowing for reduced memory at the expense of extra compute in the backward pass. :param func: the function to evaluate. :param inputs: the argument sequence to pass to `func`. :param params: a sequence of parameters `func` depends on but does not explicitly take as arguments. :param flag: if False, disable gradient checkpointing. """ if flag: args = tuple(inputs) + tuple(params) return CheckpointFunction.apply(func, len(inputs), *args) else: return func(*inputs) class CheckpointFunction(torch.autograd.Function): @staticmethod def forward(ctx, run_function, length, *args): ctx.run_function = run_function ctx.input_tensors = list(args[:length]) ctx.input_params = list(args[length:]) with torch.no_grad(): output_tensors = ctx.run_function(*ctx.input_tensors) return output_tensors @staticmethod def backward(ctx, *output_grads): ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors] with torch.enable_grad(): # Fixes a bug where the first op in run_function modifies the # Tensor storage in place, which is not allowed for detach()'d # Tensors. shallow_copies = [x.view_as(x) for x in ctx.input_tensors] output_tensors = ctx.run_function(*shallow_copies) input_grads = torch.autograd.grad( output_tensors, ctx.input_tensors + ctx.input_params, output_grads, allow_unused=True, ) del ctx.input_tensors del ctx.input_params del output_tensors return (None, None) + input_grads def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False): """ Create sinusoidal timestep embeddings. :param timesteps: a 1-D Tensor of N indices, one per batch element. These may be fractional. :param dim: the dimension of the output. :param max_period: controls the minimum frequency of the embeddings. :return: an [N x dim] Tensor of positional embeddings. """ if not repeat_only: half = dim // 2 freqs = torch.exp( -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half ).to(device=timesteps.device) args = timesteps[:, None].float() * freqs[None] embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) if dim % 2: embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1) else: embedding = repeat(timesteps, 'b -> b d', d=dim) return embedding def zero_module(module): """ Zero out the parameters of a module and return it. """ for p in module.parameters(): p.detach().zero_() return module def scale_module(module, scale): """ Scale the parameters of a module and return it. """ for p in module.parameters(): p.detach().mul_(scale) return module def mean_flat(tensor): """ Take the mean over all non-batch dimensions. """ return tensor.mean(dim=list(range(1, len(tensor.shape)))) def normalization(channels): """ Make a standard normalization layer. :param channels: number of input channels. :return: an nn.Module for normalization. """ return GroupNorm32(32, channels) # PyTorch 1.7 has SiLU, but we support PyTorch 1.5. class SiLU(nn.Module): def forward(self, x): return x * torch.sigmoid(x) class GroupNorm32(nn.GroupNorm): def forward(self, x): # return super().forward(x.float()).type(x.dtype) return super().forward(x) def conv_nd(dims, *args, **kwargs): """ Create a 1D, 2D, or 3D convolution module. """ if dims == 1: return nn.Conv1d(*args, **kwargs) elif dims == 2: return nn.Conv2d(*args, **kwargs) elif dims == 3: return nn.Conv3d(*args, **kwargs) raise ValueError(f"unsupported dimensions: {dims}") def linear(*args, **kwargs): """ Create a linear module. """ return nn.Linear(*args, **kwargs) def avg_pool_nd(dims, *args, **kwargs): """ Create a 1D, 2D, or 3D average pooling module. """ if dims == 1: return nn.AvgPool1d(*args, **kwargs) elif dims == 2: return nn.AvgPool2d(*args, **kwargs) elif dims == 3: return nn.AvgPool3d(*args, **kwargs) raise ValueError(f"unsupported dimensions: {dims}") class HybridConditioner(nn.Module): def __init__(self, c_concat_config, c_crossattn_config): super().__init__() self.concat_conditioner = instantiate_from_config(c_concat_config) self.crossattn_conditioner = instantiate_from_config(c_crossattn_config) def forward(self, c_concat, c_crossattn): c_concat = self.concat_conditioner(c_concat) c_crossattn = self.crossattn_conditioner(c_crossattn) return {'c_concat': [c_concat], 'c_crossattn': [c_crossattn]} def noise_like(x, repeat=False): noise = torch.randn_like(x) if repeat: bs = x.shape[0] noise = noise[0:1].repeat(bs, *((1,) * (len(x.shape) - 1))) return noise ########################## # inherit from ldm.utils # ########################## def count_params(model, verbose=False): total_params = sum(p.numel() for p in model.parameters()) if verbose: print(f"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.") return total_params
Versatile-Diffusion-master
lib/model_zoo/diffusion_utils.py
from .common.get_model import get_model from .common.get_optimizer import get_optimizer from .common.get_scheduler import get_scheduler from .common.utils import get_unit
Versatile-Diffusion-master
lib/model_zoo/__init__.py
import torch import numpy as np class AbstractDistribution: def sample(self): raise NotImplementedError() def mode(self): raise NotImplementedError() class DiracDistribution(AbstractDistribution): def __init__(self, value): self.value = value def sample(self): return self.value def mode(self): return self.value class DiagonalGaussianDistribution(object): def __init__(self, parameters, deterministic=False): self.parameters = parameters self.mean, self.logvar = torch.chunk(parameters, 2, dim=1) self.logvar = torch.clamp(self.logvar, -30.0, 20.0) self.deterministic = deterministic self.std = torch.exp(0.5 * self.logvar) self.var = torch.exp(self.logvar) if self.deterministic: self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device) def sample(self): x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device) return x def kl(self, other=None): if self.deterministic: return torch.Tensor([0.]) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean, 2) + self.var - 1.0 - self.logvar, dim=[1, 2, 3]) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean, 2) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar, dim=[1, 2, 3]) def nll(self, sample, dims=[1,2,3]): if self.deterministic: return torch.Tensor([0.]) logtwopi = np.log(2.0 * np.pi) return 0.5 * torch.sum( logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var, dim=dims) def mode(self): return self.mean def normal_kl(mean1, logvar1, mean2, logvar2): """ source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12 Compute the KL divergence between two gaussians. Shapes are automatically broadcasted, so batches can be compared to scalars, among other use cases. """ tensor = None for obj in (mean1, logvar1, mean2, logvar2): if isinstance(obj, torch.Tensor): tensor = obj break assert tensor is not None, "at least one argument must be a Tensor" # Force variances to be Tensors. Broadcasting helps convert scalars to # Tensors, but it does not work for torch.exp(). logvar1, logvar2 = [ x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor) for x in (logvar1, logvar2) ] return 0.5 * ( -1.0 + logvar2 - logvar1 + torch.exp(logvar1 - logvar2) + ((mean1 - mean2) ** 2) * torch.exp(-logvar2) )
Versatile-Diffusion-master
lib/model_zoo/distributions.py
# pytorch_diffusion + derived encoder decoder import math import torch import torch.nn as nn import numpy as np from einops import rearrange # from .diffusion_utils import instantiate_from_config from .attention import LinearAttention def get_timestep_embedding(timesteps, embedding_dim): """ This matches the implementation in Denoising Diffusion Probabilistic Models: From Fairseq. Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". """ assert len(timesteps.shape) == 1 half_dim = embedding_dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb) emb = emb.to(device=timesteps.device) emb = timesteps.float()[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: # zero pad emb = torch.nn.functional.pad(emb, (0,1,0,0)) return emb def nonlinearity(x): # swish return x*torch.sigmoid(x) def Normalize(in_channels, num_groups=32): return torch.nn.GroupNorm(num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True) class Upsample(nn.Module): def __init__(self, in_channels, with_conv): super().__init__() self.with_conv = with_conv if self.with_conv: self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1) def forward(self, x): x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest") if self.with_conv: x = self.conv(x) return x class Downsample(nn.Module): def __init__(self, in_channels, with_conv): super().__init__() self.with_conv = with_conv if self.with_conv: # no asymmetric padding in torch conv, must do it ourselves self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0) def forward(self, x): if self.with_conv: pad = (0,1,0,1) x = torch.nn.functional.pad(x, pad, mode="constant", value=0) x = self.conv(x) else: x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2) return x class ResnetBlock(nn.Module): def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False, dropout, temb_channels=512): super().__init__() self.in_channels = in_channels out_channels = in_channels if out_channels is None else out_channels self.out_channels = out_channels self.use_conv_shortcut = conv_shortcut self.norm1 = Normalize(in_channels) self.conv1 = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) if temb_channels > 0: self.temb_proj = torch.nn.Linear(temb_channels, out_channels) self.norm2 = Normalize(out_channels) self.dropout = torch.nn.Dropout(dropout) self.conv2 = torch.nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1) if self.in_channels != self.out_channels: if self.use_conv_shortcut: self.conv_shortcut = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) else: self.nin_shortcut = torch.nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0) def forward(self, x, temb): h = x h = self.norm1(h) h = nonlinearity(h) h = self.conv1(h) if temb is not None: h = h + self.temb_proj(nonlinearity(temb))[:,:,None,None] h = self.norm2(h) h = nonlinearity(h) h = self.dropout(h) h = self.conv2(h) if self.in_channels != self.out_channels: if self.use_conv_shortcut: x = self.conv_shortcut(x) else: x = self.nin_shortcut(x) return x+h class LinAttnBlock(LinearAttention): """to match AttnBlock usage""" def __init__(self, in_channels): super().__init__(dim=in_channels, heads=1, dim_head=in_channels) class AttnBlock(nn.Module): def __init__(self, in_channels): super().__init__() self.in_channels = in_channels self.norm = Normalize(in_channels) self.q = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) self.k = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) self.v = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) self.proj_out = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) def forward(self, x): h_ = x h_ = self.norm(h_) q = self.q(h_) k = self.k(h_) v = self.v(h_) # compute attention b,c,h,w = q.shape q = q.reshape(b,c,h*w) q = q.permute(0,2,1) # b,hw,c k = k.reshape(b,c,h*w) # b,c,hw w_ = torch.bmm(q,k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j] w_ = w_ * (int(c)**(-0.5)) w_ = torch.nn.functional.softmax(w_, dim=2) # attend to values v = v.reshape(b,c,h*w) w_ = w_.permute(0,2,1) # b,hw,hw (first hw of k, second of q) h_ = torch.bmm(v,w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j] h_ = h_.reshape(b,c,h,w) h_ = self.proj_out(h_) return x+h_ def make_attn(in_channels, attn_type="vanilla"): assert attn_type in ["vanilla", "linear", "none"], f'attn_type {attn_type} unknown' print(f"making attention of type '{attn_type}' with {in_channels} in_channels") if attn_type == "vanilla": return AttnBlock(in_channels) elif attn_type == "none": return nn.Identity(in_channels) else: return LinAttnBlock(in_channels) class Model(nn.Module): def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, resolution, use_timestep=True, use_linear_attn=False, attn_type="vanilla"): super().__init__() if use_linear_attn: attn_type = "linear" self.ch = ch self.temb_ch = self.ch*4 self.num_resolutions = len(ch_mult) self.num_res_blocks = num_res_blocks self.resolution = resolution self.in_channels = in_channels self.use_timestep = use_timestep if self.use_timestep: # timestep embedding self.temb = nn.Module() self.temb.dense = nn.ModuleList([ torch.nn.Linear(self.ch, self.temb_ch), torch.nn.Linear(self.temb_ch, self.temb_ch), ]) # downsampling self.conv_in = torch.nn.Conv2d(in_channels, self.ch, kernel_size=3, stride=1, padding=1) curr_res = resolution in_ch_mult = (1,)+tuple(ch_mult) self.down = nn.ModuleList() for i_level in range(self.num_resolutions): block = nn.ModuleList() attn = nn.ModuleList() block_in = ch*in_ch_mult[i_level] block_out = ch*ch_mult[i_level] for i_block in range(self.num_res_blocks): block.append(ResnetBlock(in_channels=block_in, out_channels=block_out, temb_channels=self.temb_ch, dropout=dropout)) block_in = block_out if curr_res in attn_resolutions: attn.append(make_attn(block_in, attn_type=attn_type)) down = nn.Module() down.block = block down.attn = attn if i_level != self.num_resolutions-1: down.downsample = Downsample(block_in, resamp_with_conv) curr_res = curr_res // 2 self.down.append(down) # middle self.mid = nn.Module() self.mid.block_1 = ResnetBlock(in_channels=block_in, out_channels=block_in, temb_channels=self.temb_ch, dropout=dropout) self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) self.mid.block_2 = ResnetBlock(in_channels=block_in, out_channels=block_in, temb_channels=self.temb_ch, dropout=dropout) # upsampling self.up = nn.ModuleList() for i_level in reversed(range(self.num_resolutions)): block = nn.ModuleList() attn = nn.ModuleList() block_out = ch*ch_mult[i_level] skip_in = ch*ch_mult[i_level] for i_block in range(self.num_res_blocks+1): if i_block == self.num_res_blocks: skip_in = ch*in_ch_mult[i_level] block.append(ResnetBlock(in_channels=block_in+skip_in, out_channels=block_out, temb_channels=self.temb_ch, dropout=dropout)) block_in = block_out if curr_res in attn_resolutions: attn.append(make_attn(block_in, attn_type=attn_type)) up = nn.Module() up.block = block up.attn = attn if i_level != 0: up.upsample = Upsample(block_in, resamp_with_conv) curr_res = curr_res * 2 self.up.insert(0, up) # prepend to get consistent order # end self.norm_out = Normalize(block_in) self.conv_out = torch.nn.Conv2d(block_in, out_ch, kernel_size=3, stride=1, padding=1) def forward(self, x, t=None, context=None): #assert x.shape[2] == x.shape[3] == self.resolution if context is not None: # assume aligned context, cat along channel axis x = torch.cat((x, context), dim=1) if self.use_timestep: # timestep embedding assert t is not None temb = get_timestep_embedding(t, self.ch) temb = self.temb.dense[0](temb) temb = nonlinearity(temb) temb = self.temb.dense[1](temb) else: temb = None # downsampling hs = [self.conv_in(x)] for i_level in range(self.num_resolutions): for i_block in range(self.num_res_blocks): h = self.down[i_level].block[i_block](hs[-1], temb) if len(self.down[i_level].attn) > 0: h = self.down[i_level].attn[i_block](h) hs.append(h) if i_level != self.num_resolutions-1: hs.append(self.down[i_level].downsample(hs[-1])) # middle h = hs[-1] h = self.mid.block_1(h, temb) h = self.mid.attn_1(h) h = self.mid.block_2(h, temb) # upsampling for i_level in reversed(range(self.num_resolutions)): for i_block in range(self.num_res_blocks+1): h = self.up[i_level].block[i_block]( torch.cat([h, hs.pop()], dim=1), temb) if len(self.up[i_level].attn) > 0: h = self.up[i_level].attn[i_block](h) if i_level != 0: h = self.up[i_level].upsample(h) # end h = self.norm_out(h) h = nonlinearity(h) h = self.conv_out(h) return h def get_last_layer(self): return self.conv_out.weight class Encoder(nn.Module): def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, resolution, z_channels, double_z=True, use_linear_attn=False, attn_type="vanilla", **ignore_kwargs): super().__init__() if use_linear_attn: attn_type = "linear" self.ch = ch self.temb_ch = 0 self.num_resolutions = len(ch_mult) self.num_res_blocks = num_res_blocks self.resolution = resolution self.in_channels = in_channels # downsampling self.conv_in = torch.nn.Conv2d(in_channels, self.ch, kernel_size=3, stride=1, padding=1) curr_res = resolution in_ch_mult = (1,)+tuple(ch_mult) self.in_ch_mult = in_ch_mult self.down = nn.ModuleList() for i_level in range(self.num_resolutions): block = nn.ModuleList() attn = nn.ModuleList() block_in = ch*in_ch_mult[i_level] block_out = ch*ch_mult[i_level] for i_block in range(self.num_res_blocks): block.append(ResnetBlock(in_channels=block_in, out_channels=block_out, temb_channels=self.temb_ch, dropout=dropout)) block_in = block_out if curr_res in attn_resolutions: attn.append(make_attn(block_in, attn_type=attn_type)) down = nn.Module() down.block = block down.attn = attn if i_level != self.num_resolutions-1: down.downsample = Downsample(block_in, resamp_with_conv) curr_res = curr_res // 2 self.down.append(down) # middle self.mid = nn.Module() self.mid.block_1 = ResnetBlock(in_channels=block_in, out_channels=block_in, temb_channels=self.temb_ch, dropout=dropout) self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) self.mid.block_2 = ResnetBlock(in_channels=block_in, out_channels=block_in, temb_channels=self.temb_ch, dropout=dropout) # end self.norm_out = Normalize(block_in) self.conv_out = torch.nn.Conv2d(block_in, 2*z_channels if double_z else z_channels, kernel_size=3, stride=1, padding=1) def forward(self, x): # timestep embedding temb = None # downsampling hs = [self.conv_in(x)] for i_level in range(self.num_resolutions): for i_block in range(self.num_res_blocks): h = self.down[i_level].block[i_block](hs[-1], temb) if len(self.down[i_level].attn) > 0: h = self.down[i_level].attn[i_block](h) hs.append(h) if i_level != self.num_resolutions-1: hs.append(self.down[i_level].downsample(hs[-1])) # middle h = hs[-1] h = self.mid.block_1(h, temb) h = self.mid.attn_1(h) h = self.mid.block_2(h, temb) # end h = self.norm_out(h) h = nonlinearity(h) h = self.conv_out(h) return h class Decoder(nn.Module): def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False, attn_type="vanilla", **ignorekwargs): super().__init__() if use_linear_attn: attn_type = "linear" self.ch = ch self.temb_ch = 0 self.num_resolutions = len(ch_mult) self.num_res_blocks = num_res_blocks self.resolution = resolution self.in_channels = in_channels self.give_pre_end = give_pre_end self.tanh_out = tanh_out # compute in_ch_mult, block_in and curr_res at lowest res in_ch_mult = (1,)+tuple(ch_mult) block_in = ch*ch_mult[self.num_resolutions-1] curr_res = resolution // 2**(self.num_resolutions-1) self.z_shape = (1,z_channels,curr_res,curr_res) print("Working with z of shape {} = {} dimensions.".format( self.z_shape, np.prod(self.z_shape))) # z to block_in self.conv_in = torch.nn.Conv2d(z_channels, block_in, kernel_size=3, stride=1, padding=1) # middle self.mid = nn.Module() self.mid.block_1 = ResnetBlock(in_channels=block_in, out_channels=block_in, temb_channels=self.temb_ch, dropout=dropout) self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) self.mid.block_2 = ResnetBlock(in_channels=block_in, out_channels=block_in, temb_channels=self.temb_ch, dropout=dropout) # upsampling self.up = nn.ModuleList() for i_level in reversed(range(self.num_resolutions)): block = nn.ModuleList() attn = nn.ModuleList() block_out = ch*ch_mult[i_level] for i_block in range(self.num_res_blocks+1): block.append(ResnetBlock(in_channels=block_in, out_channels=block_out, temb_channels=self.temb_ch, dropout=dropout)) block_in = block_out if curr_res in attn_resolutions: attn.append(make_attn(block_in, attn_type=attn_type)) up = nn.Module() up.block = block up.attn = attn if i_level != 0: up.upsample = Upsample(block_in, resamp_with_conv) curr_res = curr_res * 2 self.up.insert(0, up) # prepend to get consistent order # end self.norm_out = Normalize(block_in) self.conv_out = torch.nn.Conv2d(block_in, out_ch, kernel_size=3, stride=1, padding=1) def forward(self, z): #assert z.shape[1:] == self.z_shape[1:] self.last_z_shape = z.shape # timestep embedding temb = None # z to block_in h = self.conv_in(z) # middle h = self.mid.block_1(h, temb) h = self.mid.attn_1(h) h = self.mid.block_2(h, temb) # upsampling for i_level in reversed(range(self.num_resolutions)): for i_block in range(self.num_res_blocks+1): h = self.up[i_level].block[i_block](h, temb) if len(self.up[i_level].attn) > 0: h = self.up[i_level].attn[i_block](h) if i_level != 0: h = self.up[i_level].upsample(h) # end if self.give_pre_end: return h h = self.norm_out(h) h = nonlinearity(h) h = self.conv_out(h) if self.tanh_out: h = torch.tanh(h) return h class SimpleDecoder(nn.Module): def __init__(self, in_channels, out_channels, *args, **kwargs): super().__init__() self.model = nn.ModuleList([nn.Conv2d(in_channels, in_channels, 1), ResnetBlock(in_channels=in_channels, out_channels=2 * in_channels, temb_channels=0, dropout=0.0), ResnetBlock(in_channels=2 * in_channels, out_channels=4 * in_channels, temb_channels=0, dropout=0.0), ResnetBlock(in_channels=4 * in_channels, out_channels=2 * in_channels, temb_channels=0, dropout=0.0), nn.Conv2d(2*in_channels, in_channels, 1), Upsample(in_channels, with_conv=True)]) # end self.norm_out = Normalize(in_channels) self.conv_out = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) def forward(self, x): for i, layer in enumerate(self.model): if i in [1,2,3]: x = layer(x, None) else: x = layer(x) h = self.norm_out(x) h = nonlinearity(h) x = self.conv_out(h) return x class UpsampleDecoder(nn.Module): def __init__(self, in_channels, out_channels, ch, num_res_blocks, resolution, ch_mult=(2,2), dropout=0.0): super().__init__() # upsampling self.temb_ch = 0 self.num_resolutions = len(ch_mult) self.num_res_blocks = num_res_blocks block_in = in_channels curr_res = resolution // 2 ** (self.num_resolutions - 1) self.res_blocks = nn.ModuleList() self.upsample_blocks = nn.ModuleList() for i_level in range(self.num_resolutions): res_block = [] block_out = ch * ch_mult[i_level] for i_block in range(self.num_res_blocks + 1): res_block.append(ResnetBlock(in_channels=block_in, out_channels=block_out, temb_channels=self.temb_ch, dropout=dropout)) block_in = block_out self.res_blocks.append(nn.ModuleList(res_block)) if i_level != self.num_resolutions - 1: self.upsample_blocks.append(Upsample(block_in, True)) curr_res = curr_res * 2 # end self.norm_out = Normalize(block_in) self.conv_out = torch.nn.Conv2d(block_in, out_channels, kernel_size=3, stride=1, padding=1) def forward(self, x): # upsampling h = x for k, i_level in enumerate(range(self.num_resolutions)): for i_block in range(self.num_res_blocks + 1): h = self.res_blocks[i_level][i_block](h, None) if i_level != self.num_resolutions - 1: h = self.upsample_blocks[k](h) h = self.norm_out(h) h = nonlinearity(h) h = self.conv_out(h) return h class LatentRescaler(nn.Module): def __init__(self, factor, in_channels, mid_channels, out_channels, depth=2): super().__init__() # residual block, interpolate, residual block self.factor = factor self.conv_in = nn.Conv2d(in_channels, mid_channels, kernel_size=3, stride=1, padding=1) self.res_block1 = nn.ModuleList([ResnetBlock(in_channels=mid_channels, out_channels=mid_channels, temb_channels=0, dropout=0.0) for _ in range(depth)]) self.attn = AttnBlock(mid_channels) self.res_block2 = nn.ModuleList([ResnetBlock(in_channels=mid_channels, out_channels=mid_channels, temb_channels=0, dropout=0.0) for _ in range(depth)]) self.conv_out = nn.Conv2d(mid_channels, out_channels, kernel_size=1, ) def forward(self, x): x = self.conv_in(x) for block in self.res_block1: x = block(x, None) x = torch.nn.functional.interpolate(x, size=(int(round(x.shape[2]*self.factor)), int(round(x.shape[3]*self.factor)))) x = self.attn(x) for block in self.res_block2: x = block(x, None) x = self.conv_out(x) return x class MergedRescaleEncoder(nn.Module): def __init__(self, in_channels, ch, resolution, out_ch, num_res_blocks, attn_resolutions, dropout=0.0, resamp_with_conv=True, ch_mult=(1,2,4,8), rescale_factor=1.0, rescale_module_depth=1): super().__init__() intermediate_chn = ch * ch_mult[-1] self.encoder = Encoder(in_channels=in_channels, num_res_blocks=num_res_blocks, ch=ch, ch_mult=ch_mult, z_channels=intermediate_chn, double_z=False, resolution=resolution, attn_resolutions=attn_resolutions, dropout=dropout, resamp_with_conv=resamp_with_conv, out_ch=None) self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=intermediate_chn, mid_channels=intermediate_chn, out_channels=out_ch, depth=rescale_module_depth) def forward(self, x): x = self.encoder(x) x = self.rescaler(x) return x class MergedRescaleDecoder(nn.Module): def __init__(self, z_channels, out_ch, resolution, num_res_blocks, attn_resolutions, ch, ch_mult=(1,2,4,8), dropout=0.0, resamp_with_conv=True, rescale_factor=1.0, rescale_module_depth=1): super().__init__() tmp_chn = z_channels*ch_mult[-1] self.decoder = Decoder(out_ch=out_ch, z_channels=tmp_chn, attn_resolutions=attn_resolutions, dropout=dropout, resamp_with_conv=resamp_with_conv, in_channels=None, num_res_blocks=num_res_blocks, ch_mult=ch_mult, resolution=resolution, ch=ch) self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=z_channels, mid_channels=tmp_chn, out_channels=tmp_chn, depth=rescale_module_depth) def forward(self, x): x = self.rescaler(x) x = self.decoder(x) return x class Upsampler(nn.Module): def __init__(self, in_size, out_size, in_channels, out_channels, ch_mult=2): super().__init__() assert out_size >= in_size num_blocks = int(np.log2(out_size//in_size))+1 factor_up = 1.+ (out_size % in_size) print(f"Building {self.__class__.__name__} with in_size: {in_size} --> out_size {out_size} and factor {factor_up}") self.rescaler = LatentRescaler(factor=factor_up, in_channels=in_channels, mid_channels=2*in_channels, out_channels=in_channels) self.decoder = Decoder(out_ch=out_channels, resolution=out_size, z_channels=in_channels, num_res_blocks=2, attn_resolutions=[], in_channels=None, ch=in_channels, ch_mult=[ch_mult for _ in range(num_blocks)]) def forward(self, x): x = self.rescaler(x) x = self.decoder(x) return x class Resize(nn.Module): def __init__(self, in_channels=None, learned=False, mode="bilinear"): super().__init__() self.with_conv = learned self.mode = mode if self.with_conv: print(f"Note: {self.__class__.__name} uses learned downsampling and will ignore the fixed {mode} mode") raise NotImplementedError() assert in_channels is not None # no asymmetric padding in torch conv, must do it ourselves self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=4, stride=2, padding=1) def forward(self, x, scale_factor=1.0): if scale_factor==1.0: return x else: x = torch.nn.functional.interpolate(x, mode=self.mode, align_corners=False, scale_factor=scale_factor) return x class FirstStagePostProcessor(nn.Module): def __init__(self, ch_mult:list, in_channels, pretrained_model:nn.Module=None, reshape=False, n_channels=None, dropout=0., pretrained_config=None): super().__init__() if pretrained_config is None: assert pretrained_model is not None, 'Either "pretrained_model" or "pretrained_config" must not be None' self.pretrained_model = pretrained_model else: assert pretrained_config is not None, 'Either "pretrained_model" or "pretrained_config" must not be None' self.instantiate_pretrained(pretrained_config) self.do_reshape = reshape if n_channels is None: n_channels = self.pretrained_model.encoder.ch self.proj_norm = Normalize(in_channels,num_groups=in_channels//2) self.proj = nn.Conv2d(in_channels,n_channels,kernel_size=3, stride=1,padding=1) blocks = [] downs = [] ch_in = n_channels for m in ch_mult: blocks.append(ResnetBlock(in_channels=ch_in,out_channels=m*n_channels,dropout=dropout)) ch_in = m * n_channels downs.append(Downsample(ch_in, with_conv=False)) self.model = nn.ModuleList(blocks) self.downsampler = nn.ModuleList(downs) def instantiate_pretrained(self, config): model = instantiate_from_config(config) self.pretrained_model = model.eval() # self.pretrained_model.train = False for param in self.pretrained_model.parameters(): param.requires_grad = False @torch.no_grad() def encode_with_pretrained(self,x): c = self.pretrained_model.encode(x) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() return c def forward(self,x): z_fs = self.encode_with_pretrained(x) z = self.proj_norm(z_fs) z = self.proj(z) z = nonlinearity(z) for submodel, downmodel in zip(self.model,self.downsampler): z = submodel(z,temb=None) z = downmodel(z) if self.do_reshape: z = rearrange(z,'b c h w -> b (h w) c') return z
Versatile-Diffusion-master
lib/model_zoo/diffusion_modules.py
from abc import abstractmethod from functools import partial import math from typing import Iterable import numpy as np import torch as th import torch.nn as nn import torch.nn.functional as F from .diffusion_utils import \ checkpoint, conv_nd, linear, avg_pool_nd, \ zero_module, normalization, timestep_embedding from .attention import SpatialTransformer from lib.model_zoo.common.get_model import get_model, register symbol = 'openai' # dummy replace def convert_module_to_f16(x): pass def convert_module_to_f32(x): pass ## go class AttentionPool2d(nn.Module): """ Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py """ def __init__( self, spacial_dim: int, embed_dim: int, num_heads_channels: int, output_dim: int = None, ): super().__init__() self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5) self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1) self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1) self.num_heads = embed_dim // num_heads_channels self.attention = QKVAttention(self.num_heads) def forward(self, x): b, c, *_spatial = x.shape x = x.reshape(b, c, -1) # NC(HW) x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1) x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1) x = self.qkv_proj(x) x = self.attention(x) x = self.c_proj(x) return x[:, :, 0] class TimestepBlock(nn.Module): """ Any module where forward() takes timestep embeddings as a second argument. """ @abstractmethod def forward(self, x, emb): """ Apply the module to `x` given `emb` timestep embeddings. """ class TimestepEmbedSequential(nn.Sequential, TimestepBlock): """ A sequential module that passes timestep embeddings to the children that support it as an extra input. """ def forward(self, x, emb, context=None): for layer in self: if isinstance(layer, TimestepBlock): x = layer(x, emb) elif isinstance(layer, SpatialTransformer): x = layer(x, context) else: x = layer(x) return x class Upsample(nn.Module): """ An upsampling layer with an optional convolution. :param channels: channels in the inputs and outputs. :param use_conv: a bool determining if a convolution is applied. :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then upsampling occurs in the inner-two dimensions. """ def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.use_conv = use_conv self.dims = dims if use_conv: self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding) def forward(self, x): assert x.shape[1] == self.channels if self.dims == 3: x = F.interpolate( x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest" ) else: x = F.interpolate(x, scale_factor=2, mode="nearest") if self.use_conv: x = self.conv(x) return x class TransposedUpsample(nn.Module): 'Learned 2x upsampling without padding' def __init__(self, channels, out_channels=None, ks=5): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2) def forward(self,x): return self.up(x) class Downsample(nn.Module): """ A downsampling layer with an optional convolution. :param channels: channels in the inputs and outputs. :param use_conv: a bool determining if a convolution is applied. :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then downsampling occurs in the inner-two dimensions. """ def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.use_conv = use_conv self.dims = dims stride = 2 if dims != 3 else (1, 2, 2) if use_conv: self.op = conv_nd( dims, self.channels, self.out_channels, 3, stride=stride, padding=padding ) else: assert self.channels == self.out_channels self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride) def forward(self, x): assert x.shape[1] == self.channels return self.op(x) class ResBlock(TimestepBlock): """ A residual block that can optionally change the number of channels. :param channels: the number of input channels. :param emb_channels: the number of timestep embedding channels. :param dropout: the rate of dropout. :param out_channels: if specified, the number of out channels. :param use_conv: if True and out_channels is specified, use a spatial convolution instead of a smaller 1x1 convolution to change the channels in the skip connection. :param dims: determines if the signal is 1D, 2D, or 3D. :param use_checkpoint: if True, use gradient checkpointing on this module. :param up: if True, use this block for upsampling. :param down: if True, use this block for downsampling. """ def __init__( self, channels, emb_channels, dropout, out_channels=None, use_conv=False, use_scale_shift_norm=False, dims=2, use_checkpoint=False, up=False, down=False, ): super().__init__() self.channels = channels self.emb_channels = emb_channels self.dropout = dropout self.out_channels = out_channels or channels self.use_conv = use_conv self.use_checkpoint = use_checkpoint self.use_scale_shift_norm = use_scale_shift_norm self.in_layers = nn.Sequential( normalization(channels), nn.SiLU(), conv_nd(dims, channels, self.out_channels, 3, padding=1), ) self.updown = up or down if up: self.h_upd = Upsample(channels, False, dims) self.x_upd = Upsample(channels, False, dims) elif down: self.h_upd = Downsample(channels, False, dims) self.x_upd = Downsample(channels, False, dims) else: self.h_upd = self.x_upd = nn.Identity() self.emb_layers = nn.Sequential( nn.SiLU(), linear( emb_channels, 2 * self.out_channels if use_scale_shift_norm else self.out_channels, ), ) self.out_layers = nn.Sequential( normalization(self.out_channels), nn.SiLU(), nn.Dropout(p=dropout), zero_module( conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1) ), ) if self.out_channels == channels: self.skip_connection = nn.Identity() elif use_conv: self.skip_connection = conv_nd( dims, channels, self.out_channels, 3, padding=1 ) else: self.skip_connection = conv_nd(dims, channels, self.out_channels, 1) def forward(self, x, emb): """ Apply the block to a Tensor, conditioned on a timestep embedding. :param x: an [N x C x ...] Tensor of features. :param emb: an [N x emb_channels] Tensor of timestep embeddings. :return: an [N x C x ...] Tensor of outputs. """ return checkpoint( self._forward, (x, emb), self.parameters(), self.use_checkpoint ) def _forward(self, x, emb): if self.updown: in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1] h = in_rest(x) h = self.h_upd(h) x = self.x_upd(x) h = in_conv(h) else: h = self.in_layers(x) emb_out = self.emb_layers(emb).type(h.dtype) while len(emb_out.shape) < len(h.shape): emb_out = emb_out[..., None] if self.use_scale_shift_norm: out_norm, out_rest = self.out_layers[0], self.out_layers[1:] scale, shift = th.chunk(emb_out, 2, dim=1) h = out_norm(h) * (1 + scale) + shift h = out_rest(h) else: h = h + emb_out h = self.out_layers(h) return self.skip_connection(x) + h class AttentionBlock(nn.Module): """ An attention block that allows spatial positions to attend to each other. Originally ported from here, but adapted to the N-d case. https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66. """ def __init__( self, channels, num_heads=1, num_head_channels=-1, use_checkpoint=False, use_new_attention_order=False, ): super().__init__() self.channels = channels if num_head_channels == -1: self.num_heads = num_heads else: assert ( channels % num_head_channels == 0 ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}" self.num_heads = channels // num_head_channels self.use_checkpoint = use_checkpoint self.norm = normalization(channels) self.qkv = conv_nd(1, channels, channels * 3, 1) if use_new_attention_order: # split qkv before split heads self.attention = QKVAttention(self.num_heads) else: # split heads before split qkv self.attention = QKVAttentionLegacy(self.num_heads) self.proj_out = zero_module(conv_nd(1, channels, channels, 1)) def forward(self, x): return checkpoint(self._forward, (x,), self.parameters(), True) # TODO: check checkpoint usage, is True # TODO: fix the .half call!!! #return pt_checkpoint(self._forward, x) # pytorch def _forward(self, x): b, c, *spatial = x.shape x = x.reshape(b, c, -1) qkv = self.qkv(self.norm(x)) h = self.attention(qkv) h = self.proj_out(h) return (x + h).reshape(b, c, *spatial) def count_flops_attn(model, _x, y): """ A counter for the `thop` package to count the operations in an attention operation. Meant to be used like: macs, params = thop.profile( model, inputs=(inputs, timestamps), custom_ops={QKVAttention: QKVAttention.count_flops}, ) """ b, c, *spatial = y[0].shape num_spatial = int(np.prod(spatial)) # We perform two matmuls with the same number of ops. # The first computes the weight matrix, the second computes # the combination of the value vectors. matmul_ops = 2 * b * (num_spatial ** 2) * c model.total_ops += th.DoubleTensor([matmul_ops]) class QKVAttentionLegacy(nn.Module): """ A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping """ def __init__(self, n_heads): super().__init__() self.n_heads = n_heads def forward(self, qkv): """ Apply QKV attention. :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs. :return: an [N x (H * C) x T] tensor after attention. """ bs, width, length = qkv.shape assert width % (3 * self.n_heads) == 0 ch = width // (3 * self.n_heads) q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1) scale = 1 / math.sqrt(math.sqrt(ch)) weight = th.einsum( "bct,bcs->bts", q * scale, k * scale ) # More stable with f16 than dividing afterwards weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) a = th.einsum("bts,bcs->bct", weight, v) return a.reshape(bs, -1, length) @staticmethod def count_flops(model, _x, y): return count_flops_attn(model, _x, y) class QKVAttention(nn.Module): """ A module which performs QKV attention and splits in a different order. """ def __init__(self, n_heads): super().__init__() self.n_heads = n_heads def forward(self, qkv): """ Apply QKV attention. :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs. :return: an [N x (H * C) x T] tensor after attention. """ bs, width, length = qkv.shape assert width % (3 * self.n_heads) == 0 ch = width // (3 * self.n_heads) q, k, v = qkv.chunk(3, dim=1) scale = 1 / math.sqrt(math.sqrt(ch)) weight = th.einsum( "bct,bcs->bts", (q * scale).view(bs * self.n_heads, ch, length), (k * scale).view(bs * self.n_heads, ch, length), ) # More stable with f16 than dividing afterwards weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length)) return a.reshape(bs, -1, length) @staticmethod def count_flops(model, _x, y): return count_flops_attn(model, _x, y) @register('openai_unet') class UNetModel(nn.Module): """ The full UNet model with attention and timestep embedding. :param in_channels: channels in the input Tensor. :param model_channels: base channel count for the model. :param out_channels: channels in the output Tensor. :param num_res_blocks: number of residual blocks per downsample. :param attention_resolutions: a collection of downsample rates at which attention will take place. May be a set, list, or tuple. For example, if this contains 4, then at 4x downsampling, attention will be used. :param dropout: the dropout probability. :param channel_mult: channel multiplier for each level of the UNet. :param conv_resample: if True, use learned convolutions for upsampling and downsampling. :param dims: determines if the signal is 1D, 2D, or 3D. :param num_classes: if specified (as an int), then this model will be class-conditional with `num_classes` classes. :param use_checkpoint: use gradient checkpointing to reduce memory usage. :param num_heads: the number of attention heads in each attention layer. :param num_heads_channels: if specified, ignore num_heads and instead use a fixed channel width per attention head. :param num_heads_upsample: works with num_heads to set a different number of heads for upsampling. Deprecated. :param use_scale_shift_norm: use a FiLM-like conditioning mechanism. :param resblock_updown: use residual blocks for up/downsampling. :param use_new_attention_order: use a different attention pattern for potentially increased efficiency. """ def __init__( self, image_size, in_channels, model_channels, out_channels, num_res_blocks, attention_resolutions, dropout=0, channel_mult=(1, 2, 4, 8), conv_resample=True, dims=2, num_classes=None, use_checkpoint=False, use_fp16=False, num_heads=-1, num_head_channels=-1, num_heads_upsample=-1, use_scale_shift_norm=False, resblock_updown=False, use_new_attention_order=False, use_spatial_transformer=False, # custom transformer support transformer_depth=1, # custom transformer support context_dim=None, # custom transformer support n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model legacy=True, disable_self_attentions=None, num_attention_blocks=None ): super().__init__() if use_spatial_transformer: assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...' if context_dim is not None: assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...' from omegaconf.listconfig import ListConfig if type(context_dim) == ListConfig: context_dim = list(context_dim) if num_heads_upsample == -1: num_heads_upsample = num_heads if num_heads == -1: assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set' if num_head_channels == -1: assert num_heads != -1, 'Either num_heads or num_head_channels has to be set' self.image_size = image_size self.in_channels = in_channels self.model_channels = model_channels self.out_channels = out_channels if isinstance(num_res_blocks, int): self.num_res_blocks = len(channel_mult) * [num_res_blocks] else: if len(num_res_blocks) != len(channel_mult): raise ValueError("provide num_res_blocks either as an int (globally constant) or " "as a list/tuple (per-level) with the same length as channel_mult") self.num_res_blocks = num_res_blocks #self.num_res_blocks = num_res_blocks if disable_self_attentions is not None: # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not assert len(disable_self_attentions) == len(channel_mult) if num_attention_blocks is not None: assert len(num_attention_blocks) == len(self.num_res_blocks) assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks)))) print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. " f"This option has LESS priority than attention_resolutions {attention_resolutions}, " f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, " f"attention will still not be set.") # todo: convert to warning self.attention_resolutions = attention_resolutions self.dropout = dropout self.channel_mult = channel_mult self.conv_resample = conv_resample self.num_classes = num_classes self.use_checkpoint = use_checkpoint self.dtype = th.float16 if use_fp16 else th.float32 self.num_heads = num_heads self.num_head_channels = num_head_channels self.num_heads_upsample = num_heads_upsample self.predict_codebook_ids = n_embed is not None time_embed_dim = model_channels * 4 self.time_embed = nn.Sequential( linear(model_channels, time_embed_dim), nn.SiLU(), linear(time_embed_dim, time_embed_dim), ) if self.num_classes is not None: self.label_emb = nn.Embedding(num_classes, time_embed_dim) self.input_blocks = nn.ModuleList( [ TimestepEmbedSequential( conv_nd(dims, in_channels, model_channels, 3, padding=1) ) ] ) self._feature_size = model_channels input_block_chans = [model_channels] ch = model_channels ds = 1 for level, mult in enumerate(channel_mult): for nr in range(self.num_res_blocks[level]): layers = [ ResBlock( ch, time_embed_dim, dropout, out_channels=mult * model_channels, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ) ] ch = mult * model_channels if ds in attention_resolutions: if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels if disable_self_attentions is not None: disabled_sa = disable_self_attentions[level] else: disabled_sa = False if num_attention_blocks is None or nr < num_attention_blocks[level]: layers.append( AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads, num_head_channels=dim_head, use_new_attention_order=use_new_attention_order, ) if not use_spatial_transformer else SpatialTransformer( ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, disable_self_attn=disabled_sa ) ) self.input_blocks.append(TimestepEmbedSequential(*layers)) self._feature_size += ch input_block_chans.append(ch) if level != len(channel_mult) - 1: out_ch = ch self.input_blocks.append( TimestepEmbedSequential( ResBlock( ch, time_embed_dim, dropout, out_channels=out_ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, down=True, ) if resblock_updown else Downsample( ch, conv_resample, dims=dims, out_channels=out_ch ) ) ) ch = out_ch input_block_chans.append(ch) ds *= 2 self._feature_size += ch if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels self.middle_block = TimestepEmbedSequential( ResBlock( ch, time_embed_dim, dropout, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ), AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads, num_head_channels=dim_head, use_new_attention_order=use_new_attention_order, ) if not use_spatial_transformer else SpatialTransformer( # always uses a self-attn ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim ), ResBlock( ch, time_embed_dim, dropout, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ), ) self._feature_size += ch self.output_blocks = nn.ModuleList([]) for level, mult in list(enumerate(channel_mult))[::-1]: for i in range(self.num_res_blocks[level] + 1): ich = input_block_chans.pop() layers = [ ResBlock( ch + ich, time_embed_dim, dropout, out_channels=model_channels * mult, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ) ] ch = model_channels * mult if ds in attention_resolutions: if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels if disable_self_attentions is not None: disabled_sa = disable_self_attentions[level] else: disabled_sa = False if num_attention_blocks is None or i < num_attention_blocks[level]: layers.append( AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads_upsample, num_head_channels=dim_head, use_new_attention_order=use_new_attention_order, ) if not use_spatial_transformer else SpatialTransformer( ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, disable_self_attn=disabled_sa ) ) if level and i == self.num_res_blocks[level]: out_ch = ch layers.append( ResBlock( ch, time_embed_dim, dropout, out_channels=out_ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, up=True, ) if resblock_updown else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch) ) ds //= 2 self.output_blocks.append(TimestepEmbedSequential(*layers)) self._feature_size += ch self.out = nn.Sequential( normalization(ch), nn.SiLU(), zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)), ) if self.predict_codebook_ids: self.id_predictor = nn.Sequential( normalization(ch), conv_nd(dims, model_channels, n_embed, 1), #nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits ) def convert_to_fp16(self): """ Convert the torso of the model to float16. """ self.input_blocks.apply(convert_module_to_f16) self.middle_block.apply(convert_module_to_f16) self.output_blocks.apply(convert_module_to_f16) def convert_to_fp32(self): """ Convert the torso of the model to float32. """ self.input_blocks.apply(convert_module_to_f32) self.middle_block.apply(convert_module_to_f32) self.output_blocks.apply(convert_module_to_f32) def forward(self, x, timesteps=None, context=None, y=None,**kwargs): """ Apply the model to an input batch. :param x: an [N x C x ...] Tensor of inputs. :param timesteps: a 1-D batch of timesteps. :param context: conditioning plugged in via crossattn :param y: an [N] Tensor of labels, if class-conditional. :return: an [N x C x ...] Tensor of outputs. """ assert (y is not None) == ( self.num_classes is not None ), "must specify y if and only if the model is class-conditional" hs = [] t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) emb = self.time_embed(t_emb) if self.num_classes is not None: assert y.shape == (x.shape[0],) emb = emb + self.label_emb(y) h = x.type(self.dtype) for module in self.input_blocks: h = module(h, emb, context) hs.append(h) h = self.middle_block(h, emb, context) for module in self.output_blocks: h = th.cat([h, hs.pop()], dim=1) h = module(h, emb, context) h = h.type(x.dtype) if self.predict_codebook_ids: return self.id_predictor(h) else: return self.out(h) class EncoderUNetModel(nn.Module): """ The half UNet model with attention and timestep embedding. For usage, see UNet. """ def __init__( self, image_size, in_channels, model_channels, out_channels, num_res_blocks, attention_resolutions, dropout=0, channel_mult=(1, 2, 4, 8), conv_resample=True, dims=2, use_checkpoint=False, use_fp16=False, num_heads=1, num_head_channels=-1, num_heads_upsample=-1, use_scale_shift_norm=False, resblock_updown=False, use_new_attention_order=False, pool="adaptive", *args, **kwargs ): super().__init__() if num_heads_upsample == -1: num_heads_upsample = num_heads self.in_channels = in_channels self.model_channels = model_channels self.out_channels = out_channels self.num_res_blocks = num_res_blocks self.attention_resolutions = attention_resolutions self.dropout = dropout self.channel_mult = channel_mult self.conv_resample = conv_resample self.use_checkpoint = use_checkpoint self.dtype = th.float16 if use_fp16 else th.float32 self.num_heads = num_heads self.num_head_channels = num_head_channels self.num_heads_upsample = num_heads_upsample time_embed_dim = model_channels * 4 self.time_embed = nn.Sequential( linear(model_channels, time_embed_dim), nn.SiLU(), linear(time_embed_dim, time_embed_dim), ) self.input_blocks = nn.ModuleList( [ TimestepEmbedSequential( conv_nd(dims, in_channels, model_channels, 3, padding=1) ) ] ) self._feature_size = model_channels input_block_chans = [model_channels] ch = model_channels ds = 1 for level, mult in enumerate(channel_mult): for _ in range(num_res_blocks): layers = [ ResBlock( ch, time_embed_dim, dropout, out_channels=mult * model_channels, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ) ] ch = mult * model_channels if ds in attention_resolutions: layers.append( AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads, num_head_channels=num_head_channels, use_new_attention_order=use_new_attention_order, ) ) self.input_blocks.append(TimestepEmbedSequential(*layers)) self._feature_size += ch input_block_chans.append(ch) if level != len(channel_mult) - 1: out_ch = ch self.input_blocks.append( TimestepEmbedSequential( ResBlock( ch, time_embed_dim, dropout, out_channels=out_ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, down=True, ) if resblock_updown else Downsample( ch, conv_resample, dims=dims, out_channels=out_ch ) ) ) ch = out_ch input_block_chans.append(ch) ds *= 2 self._feature_size += ch self.middle_block = TimestepEmbedSequential( ResBlock( ch, time_embed_dim, dropout, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ), AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads, num_head_channels=num_head_channels, use_new_attention_order=use_new_attention_order, ), ResBlock( ch, time_embed_dim, dropout, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ), ) self._feature_size += ch self.pool = pool if pool == "adaptive": self.out = nn.Sequential( normalization(ch), nn.SiLU(), nn.AdaptiveAvgPool2d((1, 1)), zero_module(conv_nd(dims, ch, out_channels, 1)), nn.Flatten(), ) elif pool == "attention": assert num_head_channels != -1 self.out = nn.Sequential( normalization(ch), nn.SiLU(), AttentionPool2d( (image_size // ds), ch, num_head_channels, out_channels ), ) elif pool == "spatial": self.out = nn.Sequential( nn.Linear(self._feature_size, 2048), nn.ReLU(), nn.Linear(2048, self.out_channels), ) elif pool == "spatial_v2": self.out = nn.Sequential( nn.Linear(self._feature_size, 2048), normalization(2048), nn.SiLU(), nn.Linear(2048, self.out_channels), ) else: raise NotImplementedError(f"Unexpected {pool} pooling") def convert_to_fp16(self): """ Convert the torso of the model to float16. """ self.input_blocks.apply(convert_module_to_f16) self.middle_block.apply(convert_module_to_f16) def convert_to_fp32(self): """ Convert the torso of the model to float32. """ self.input_blocks.apply(convert_module_to_f32) self.middle_block.apply(convert_module_to_f32) def forward(self, x, timesteps): """ Apply the model to an input batch. :param x: an [N x C x ...] Tensor of inputs. :param timesteps: a 1-D batch of timesteps. :return: an [N x K] Tensor of outputs. """ emb = self.time_embed(timestep_embedding(timesteps, self.model_channels)) results = [] h = x.type(self.dtype) for module in self.input_blocks: h = module(h, emb) if self.pool.startswith("spatial"): results.append(h.type(x.dtype).mean(dim=(2, 3))) h = self.middle_block(h, emb) if self.pool.startswith("spatial"): results.append(h.type(x.dtype).mean(dim=(2, 3))) h = th.cat(results, axis=-1) return self.out(h) else: h = h.type(x.dtype) return self.out(h) ####################### # Unet with self-attn # ####################### from .attention import SpatialTransformerNoContext @register('openai_unet_nocontext') class UNetModelNoContext(nn.Module): def __init__( self, image_size, in_channels, model_channels, out_channels, num_res_blocks, attention_resolutions, dropout=0, channel_mult=(1, 2, 4, 8), conv_resample=True, dims=2, num_classes=None, use_checkpoint=False, use_fp16=False, num_heads=-1, num_head_channels=-1, num_heads_upsample=-1, use_scale_shift_norm=False, resblock_updown=False, use_new_attention_order=False, use_spatial_transformer=False, # custom transformer support transformer_depth=1, # custom transformer support n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model legacy=True, num_attention_blocks=None, ): super().__init__() if num_heads_upsample == -1: num_heads_upsample = num_heads if num_heads == -1: assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set' if num_head_channels == -1: assert num_heads != -1, 'Either num_heads or num_head_channels has to be set' self.image_size = image_size self.in_channels = in_channels self.model_channels = model_channels self.out_channels = out_channels if isinstance(num_res_blocks, int): self.num_res_blocks = len(channel_mult) * [num_res_blocks] else: if len(num_res_blocks) != len(channel_mult): raise ValueError("provide num_res_blocks either as an int (globally constant) or " "as a list/tuple (per-level) with the same length as channel_mult") self.num_res_blocks = num_res_blocks #self.num_res_blocks = num_res_blocks if num_attention_blocks is not None: assert len(num_attention_blocks) == len(self.num_res_blocks) assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks)))) print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. " f"This option has LESS priority than attention_resolutions {attention_resolutions}, " f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, " f"attention will still not be set.") # todo: convert to warning self.attention_resolutions = attention_resolutions self.dropout = dropout self.channel_mult = channel_mult self.conv_resample = conv_resample self.num_classes = num_classes self.use_checkpoint = use_checkpoint self.dtype = th.float16 if use_fp16 else th.float32 self.num_heads = num_heads self.num_head_channels = num_head_channels self.num_heads_upsample = num_heads_upsample self.predict_codebook_ids = n_embed is not None time_embed_dim = model_channels * 4 self.time_embed = nn.Sequential( linear(model_channels, time_embed_dim), nn.SiLU(), linear(time_embed_dim, time_embed_dim), ) if self.num_classes is not None: self.label_emb = nn.Embedding(num_classes, time_embed_dim) self.input_blocks = nn.ModuleList( [ TimestepEmbedSequential( conv_nd(dims, in_channels, model_channels, 3, padding=1) ) ] ) self._feature_size = model_channels input_block_chans = [model_channels] ch = model_channels ds = 1 for level, mult in enumerate(channel_mult): for nr in range(self.num_res_blocks[level]): layers = [ ResBlock( ch, time_embed_dim, dropout, out_channels=mult * model_channels, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ) ] ch = mult * model_channels if ds in attention_resolutions: if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels if not exists(num_attention_blocks) or nr < num_attention_blocks[level]: layers.append( AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads, num_head_channels=dim_head, use_new_attention_order=use_new_attention_order, ) if not use_spatial_transformer else SpatialTransformerNoContext( ch, num_heads, dim_head, depth=transformer_depth ) ) self.input_blocks.append(TimestepEmbedSequential(*layers)) self._feature_size += ch input_block_chans.append(ch) if level != len(channel_mult) - 1: out_ch = ch self.input_blocks.append( TimestepEmbedSequential( ResBlock( ch, time_embed_dim, dropout, out_channels=out_ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, down=True, ) if resblock_updown else Downsample( ch, conv_resample, dims=dims, out_channels=out_ch ) ) ) ch = out_ch input_block_chans.append(ch) ds *= 2 self._feature_size += ch if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels self.middle_block = TimestepEmbedSequential( ResBlock( ch, time_embed_dim, dropout, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ), AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads, num_head_channels=dim_head, use_new_attention_order=use_new_attention_order, ) if not use_spatial_transformer else SpatialTransformerNoContext( # always uses a self-attn ch, num_heads, dim_head, depth=transformer_depth ), ResBlock( ch, time_embed_dim, dropout, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ), ) self._feature_size += ch self.output_blocks = nn.ModuleList([]) for level, mult in list(enumerate(channel_mult))[::-1]: for i in range(self.num_res_blocks[level] + 1): ich = input_block_chans.pop() layers = [ ResBlock( ch + ich, time_embed_dim, dropout, out_channels=model_channels * mult, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ) ] ch = model_channels * mult if ds in attention_resolutions: if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels if not exists(num_attention_blocks) or i < num_attention_blocks[level]: layers.append( AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads_upsample, num_head_channels=dim_head, use_new_attention_order=use_new_attention_order, ) if not use_spatial_transformer else SpatialTransformerNoContext( ch, num_heads, dim_head, depth=transformer_depth, ) ) if level and i == self.num_res_blocks[level]: out_ch = ch layers.append( ResBlock( ch, time_embed_dim, dropout, out_channels=out_ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, up=True, ) if resblock_updown else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch) ) ds //= 2 self.output_blocks.append(TimestepEmbedSequential(*layers)) self._feature_size += ch self.out = nn.Sequential( normalization(ch), nn.SiLU(), zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)), ) if self.predict_codebook_ids: self.id_predictor = nn.Sequential( normalization(ch), conv_nd(dims, model_channels, n_embed, 1), #nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits ) def forward(self, x, timesteps): assert self.num_classes is None, \ "not supported" hs = [] t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) emb = self.time_embed(t_emb) h = x.type(self.dtype) for module in self.input_blocks: h = module(h, emb) hs.append(h) h = self.middle_block(h, emb) for module in self.output_blocks: h = th.cat([h, hs.pop()], dim=1) h = module(h, emb) h = h.type(x.dtype) if self.predict_codebook_ids: return self.id_predictor(h) else: return self.out(h) @register('openai_unet_nocontext_noatt') class UNetModelNoContextNoAtt(nn.Module): def __init__( self, in_channels, model_channels, out_channels, num_res_blocks, dropout=0, channel_mult=(1, 2, 4, 8), conv_resample=True, dims=2, num_classes=None, use_checkpoint=False, use_fp16=False, use_scale_shift_norm=False, resblock_updown=False, n_embed=None,): super().__init__() self.in_channels = in_channels self.model_channels = model_channels self.out_channels = out_channels if isinstance(num_res_blocks, int): self.num_res_blocks = len(channel_mult) * [num_res_blocks] else: if len(num_res_blocks) != len(channel_mult): raise ValueError("provide num_res_blocks either as an int (globally constant) or " "as a list/tuple (per-level) with the same length as channel_mult") self.num_res_blocks = num_res_blocks #self.num_res_blocks = num_res_blocks self.dropout = dropout self.channel_mult = channel_mult self.conv_resample = conv_resample self.num_classes = num_classes self.use_checkpoint = use_checkpoint self.dtype = th.float16 if use_fp16 else th.float32 self.predict_codebook_ids = n_embed is not None time_embed_dim = model_channels * 4 self.time_embed = nn.Sequential( linear(model_channels, time_embed_dim), nn.SiLU(), linear(time_embed_dim, time_embed_dim), ) if self.num_classes is not None: self.label_emb = nn.Embedding(num_classes, time_embed_dim) self.input_blocks = nn.ModuleList( [ TimestepEmbedSequential( conv_nd(dims, in_channels, model_channels, 3, padding=1) ) ] ) self._feature_size = model_channels input_block_chans = [model_channels] ch = model_channels ds = 1 for level, mult in enumerate(channel_mult): for nr in range(self.num_res_blocks[level]): layers = [ ResBlock( ch, time_embed_dim, dropout, out_channels=mult * model_channels, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ) ] ch = mult * model_channels self.input_blocks.append(TimestepEmbedSequential(*layers)) self._feature_size += ch input_block_chans.append(ch) if level != len(channel_mult) - 1: out_ch = ch self.input_blocks.append( TimestepEmbedSequential( ResBlock( ch, time_embed_dim, dropout, out_channels=out_ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, down=True, ) if resblock_updown else Downsample( ch, conv_resample, dims=dims, out_channels=out_ch ) ) ) ch = out_ch input_block_chans.append(ch) ds *= 2 self._feature_size += ch self.middle_block = TimestepEmbedSequential( ResBlock( ch, time_embed_dim, dropout, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ), ResBlock( ch, time_embed_dim, dropout, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ), ) self._feature_size += ch self.output_blocks = nn.ModuleList([]) for level, mult in list(enumerate(channel_mult))[::-1]: for i in range(self.num_res_blocks[level] + 1): ich = input_block_chans.pop() layers = [ ResBlock( ch + ich, time_embed_dim, dropout, out_channels=model_channels * mult, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ) ] ch = model_channels * mult if level and i == self.num_res_blocks[level]: out_ch = ch layers.append( ResBlock( ch, time_embed_dim, dropout, out_channels=out_ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, up=True, ) if resblock_updown else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch) ) ds //= 2 self.output_blocks.append(TimestepEmbedSequential(*layers)) self._feature_size += ch self.out = nn.Sequential( normalization(ch), nn.SiLU(), zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)), ) if self.predict_codebook_ids: self.id_predictor = nn.Sequential( normalization(ch), conv_nd(dims, model_channels, n_embed, 1), #nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits ) def forward(self, x, timesteps): assert self.num_classes is None, \ "not supported" hs = [] t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) emb = self.time_embed(t_emb) h = x.type(self.dtype) for module in self.input_blocks: h = module(h, emb) hs.append(h) h = self.middle_block(h, emb) for module in self.output_blocks: h = th.cat([h, hs.pop()], dim=1) h = module(h, emb) h = h.type(x.dtype) if self.predict_codebook_ids: return self.id_predictor(h) else: return self.out(h) @register('openai_unet_nocontext_noatt_decoderonly') class UNetModelNoContextNoAttDecoderOnly(nn.Module): def __init__( self, in_channels, out_channels, model_channels, num_res_blocks, dropout=0, channel_mult=(4, 2, 1), conv_resample=True, dims=2, num_classes=None, use_checkpoint=False, use_fp16=False, use_scale_shift_norm=False, resblock_updown=False, n_embed=None,): super().__init__() self.in_channels = in_channels self.model_channels = model_channels self.out_channels = out_channels if isinstance(num_res_blocks, int): self.num_res_blocks = len(channel_mult) * [num_res_blocks] else: if len(num_res_blocks) != len(channel_mult): raise ValueError("provide num_res_blocks either as an int (globally constant) or " "as a list/tuple (per-level) with the same length as channel_mult") self.num_res_blocks = num_res_blocks #self.num_res_blocks = num_res_blocks self.dropout = dropout self.channel_mult = channel_mult self.conv_resample = conv_resample self.num_classes = num_classes self.use_checkpoint = use_checkpoint self.dtype = th.float16 if use_fp16 else th.float32 self.predict_codebook_ids = n_embed is not None time_embed_dim = model_channels * 4 self.time_embed = nn.Sequential( linear(model_channels, time_embed_dim), nn.SiLU(), linear(time_embed_dim, time_embed_dim), ) if self.num_classes is not None: self.label_emb = nn.Embedding(num_classes, time_embed_dim) self._feature_size = model_channels ch = model_channels * self.channel_mult[0] self.output_blocks = nn.ModuleList( [ TimestepEmbedSequential( conv_nd(dims, in_channels, ch, 3, padding=1) ) ] ) for level, mult in enumerate(channel_mult): for i in range(self.num_res_blocks[level]): layers = [ ResBlock( ch, time_embed_dim, dropout, out_channels=model_channels * mult, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ) ] ch = model_channels * mult if level != len(channel_mult)-1 and (i == self.num_res_blocks[level]-1): out_ch = ch layers.append( ResBlock( ch, time_embed_dim, dropout, out_channels=out_ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, up=True, ) if resblock_updown else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch) ) self.output_blocks.append(TimestepEmbedSequential(*layers)) self.out = nn.Sequential( normalization(ch), nn.SiLU(), zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)), ) if self.predict_codebook_ids: self.id_predictor = nn.Sequential( normalization(ch), conv_nd(dims, model_channels, n_embed, 1), #nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits ) def forward(self, x, timesteps): assert self.num_classes is None, \ "not supported" hs = [] t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) emb = self.time_embed(t_emb) h = x.type(self.dtype) for module in self.output_blocks: h = module(h, emb) h = h.type(x.dtype) if self.predict_codebook_ids: return self.id_predictor(h) else: return self.out(h) ######################### # Double Attention Unet # ######################### from .attention import DualSpatialTransformer class TimestepEmbedSequentialExtended(nn.Sequential, TimestepBlock): def forward(self, x, emb, context=None, which_attn=None): for layer in self: if isinstance(layer, TimestepBlock): x = layer(x, emb) elif isinstance(layer, SpatialTransformer): x = layer(x, context) elif isinstance(layer, DualSpatialTransformer): x = layer(x, context, which=which_attn) else: x = layer(x) return x @register('openai_unet_dual_context') class UNetModelDualContext(nn.Module): def __init__( self, image_size, in_channels, model_channels, out_channels, num_res_blocks, attention_resolutions, dropout=0, channel_mult=(1, 2, 4, 8), conv_resample=True, dims=2, num_classes=None, use_checkpoint=False, use_fp16=False, num_heads=-1, num_head_channels=-1, num_heads_upsample=-1, use_scale_shift_norm=False, resblock_updown=False, use_new_attention_order=False, use_spatial_transformer=False, # custom transformer support transformer_depth=1, # custom transformer support context_dim=None, # custom transformer support n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model legacy=True, disable_self_attentions=None, num_attention_blocks=None, ): super().__init__() if use_spatial_transformer: assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...' if context_dim is not None: assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...' from omegaconf.listconfig import ListConfig if type(context_dim) == ListConfig: context_dim = list(context_dim) if num_heads_upsample == -1: num_heads_upsample = num_heads if num_heads == -1: assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set' if num_head_channels == -1: assert num_heads != -1, 'Either num_heads or num_head_channels has to be set' self.image_size = image_size self.in_channels = in_channels self.model_channels = model_channels self.out_channels = out_channels if isinstance(num_res_blocks, int): self.num_res_blocks = len(channel_mult) * [num_res_blocks] else: if len(num_res_blocks) != len(channel_mult): raise ValueError("provide num_res_blocks either as an int (globally constant) or " "as a list/tuple (per-level) with the same length as channel_mult") self.num_res_blocks = num_res_blocks #self.num_res_blocks = num_res_blocks if disable_self_attentions is not None: # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not assert len(disable_self_attentions) == len(channel_mult) if num_attention_blocks is not None: assert len(num_attention_blocks) == len(self.num_res_blocks) assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks)))) print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. " f"This option has LESS priority than attention_resolutions {attention_resolutions}, " f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, " f"attention will still not be set.") # todo: convert to warning self.attention_resolutions = attention_resolutions self.dropout = dropout self.channel_mult = channel_mult self.conv_resample = conv_resample self.num_classes = num_classes self.use_checkpoint = use_checkpoint self.dtype = th.float16 if use_fp16 else th.float32 self.num_heads = num_heads self.num_head_channels = num_head_channels self.num_heads_upsample = num_heads_upsample self.predict_codebook_ids = n_embed is not None time_embed_dim = model_channels * 4 self.time_embed = nn.Sequential( linear(model_channels, time_embed_dim), nn.SiLU(), linear(time_embed_dim, time_embed_dim), ) if self.num_classes is not None: self.label_emb = nn.Embedding(num_classes, time_embed_dim) self.input_blocks = nn.ModuleList( [ TimestepEmbedSequentialExtended( conv_nd(dims, in_channels, model_channels, 3, padding=1) ) ] ) self._feature_size = model_channels input_block_chans = [model_channels] ch = model_channels ds = 1 for level, mult in enumerate(channel_mult): for nr in range(self.num_res_blocks[level]): layers = [ ResBlock( ch, time_embed_dim, dropout, out_channels=mult * model_channels, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ) ] ch = mult * model_channels if ds in attention_resolutions: if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels if disable_self_attentions is not None: disabled_sa = disable_self_attentions[level] else: disabled_sa = False if num_attention_blocks is None or nr < num_attention_blocks[level]: layers.append( AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads, num_head_channels=dim_head, use_new_attention_order=use_new_attention_order, ) if not use_spatial_transformer else DualSpatialTransformer( ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, disable_self_attn=disabled_sa ) ) self.input_blocks.append(TimestepEmbedSequentialExtended(*layers)) self._feature_size += ch input_block_chans.append(ch) if level != len(channel_mult) - 1: out_ch = ch self.input_blocks.append( TimestepEmbedSequentialExtended( ResBlock( ch, time_embed_dim, dropout, out_channels=out_ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, down=True, ) if resblock_updown else Downsample( ch, conv_resample, dims=dims, out_channels=out_ch ) ) ) ch = out_ch input_block_chans.append(ch) ds *= 2 self._feature_size += ch if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels self.middle_block = TimestepEmbedSequentialExtended( ResBlock( ch, time_embed_dim, dropout, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ), AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads, num_head_channels=dim_head, use_new_attention_order=use_new_attention_order, ) if not use_spatial_transformer else DualSpatialTransformer( # always uses a self-attn ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim ), ResBlock( ch, time_embed_dim, dropout, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ), ) self._feature_size += ch self.output_blocks = nn.ModuleList([]) for level, mult in list(enumerate(channel_mult))[::-1]: for i in range(self.num_res_blocks[level] + 1): ich = input_block_chans.pop() layers = [ ResBlock( ch + ich, time_embed_dim, dropout, out_channels=model_channels * mult, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ) ] ch = model_channels * mult if ds in attention_resolutions: if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels if disable_self_attentions is not None: disabled_sa = disable_self_attentions[level] else: disabled_sa = False if num_attention_blocks is None or i < num_attention_blocks[level]: layers.append( AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads_upsample, num_head_channels=dim_head, use_new_attention_order=use_new_attention_order, ) if not use_spatial_transformer else DualSpatialTransformer( ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, disable_self_attn=disabled_sa ) ) if level and i == self.num_res_blocks[level]: out_ch = ch layers.append( ResBlock( ch, time_embed_dim, dropout, out_channels=out_ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, up=True, ) if resblock_updown else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch) ) ds //= 2 self.output_blocks.append(TimestepEmbedSequentialExtended(*layers)) self._feature_size += ch self.out = nn.Sequential( normalization(ch), nn.SiLU(), zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)), ) if self.predict_codebook_ids: self.id_predictor = nn.Sequential( normalization(ch), conv_nd(dims, model_channels, n_embed, 1), #nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits ) def forward(self, x, timesteps=None, context=None, y=None, which_attn=None, **kwargs): """ Apply the model to an input batch. :param x: an [N x C x ...] Tensor of inputs. :param timesteps: a 1-D batch of timesteps. :param context: conditioning plugged in via crossattn :param y: an [N] Tensor of labels, if class-conditional. :return: an [N x C x ...] Tensor of outputs. """ assert (y is not None) == ( self.num_classes is not None ), "must specify y if and only if the model is class-conditional" hs = [] t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) t_emb = t_emb.to(context.dtype) emb = self.time_embed(t_emb) if self.num_classes is not None: assert y.shape == (x.shape[0],) emb = emb + self.label_emb(y) h = x.type(self.dtype) for module in self.input_blocks: h = module(h, emb, context, which_attn=which_attn) hs.append(h) h = self.middle_block(h, emb, context, which_attn=which_attn) for module in self.output_blocks: h = th.cat([h, hs.pop()], dim=1) h = module(h, emb, context, which_attn=which_attn) h = h.type(x.dtype) if self.predict_codebook_ids: return self.id_predictor(h) else: return self.out(h) ########### # VD Unet # ########### from functools import partial @register('openai_unet_2d') class UNetModel2D(nn.Module): def __init__(self, input_channels, model_channels, output_channels, context_dim=768, num_noattn_blocks=(2, 2, 2, 2), channel_mult=(1, 2, 4, 8), with_attn=[True, True, True, False], num_heads=8, use_checkpoint=True, ): super().__init__() ResBlockPreset = partial( ResBlock, dropout=0, dims=2, use_checkpoint=use_checkpoint, use_scale_shift_norm=False) self.input_channels = input_channels self.model_channels = model_channels self.num_noattn_blocks = num_noattn_blocks self.channel_mult = channel_mult self.num_heads = num_heads ################## # Time embedding # ################## time_embed_dim = model_channels * 4 self.time_embed = nn.Sequential( linear(model_channels, time_embed_dim), nn.SiLU(), linear(time_embed_dim, time_embed_dim),) ################ # input_blocks # ################ current_channel = model_channels input_blocks = [ TimestepEmbedSequential( nn.Conv2d(input_channels, model_channels, 3, padding=1, bias=True))] input_block_channels = [current_channel] for level_idx, mult in enumerate(channel_mult): for _ in range(self.num_noattn_blocks[level_idx]): layers = [ ResBlockPreset( current_channel, time_embed_dim, out_channels = mult * model_channels,)] current_channel = mult * model_channels dim_head = current_channel // num_heads if with_attn[level_idx]: layers += [ SpatialTransformer( current_channel, num_heads, dim_head, depth=1, context_dim=context_dim, )] input_blocks += [TimestepEmbedSequential(*layers)] input_block_channels.append(current_channel) if level_idx != len(channel_mult) - 1: input_blocks += [ TimestepEmbedSequential( Downsample( current_channel, use_conv=True, dims=2, out_channels=current_channel,))] input_block_channels.append(current_channel) self.input_blocks = nn.ModuleList(input_blocks) ################# # middle_blocks # ################# middle_block = [ ResBlockPreset( current_channel, time_embed_dim,), SpatialTransformer( current_channel, num_heads, dim_head, depth=1, context_dim=context_dim, ), ResBlockPreset( current_channel, time_embed_dim,),] self.middle_block = TimestepEmbedSequential(*middle_block) ################# # output_blocks # ################# output_blocks = [] for level_idx, mult in list(enumerate(channel_mult))[::-1]: for block_idx in range(self.num_noattn_blocks[level_idx] + 1): extra_channel = input_block_channels.pop() layers = [ ResBlockPreset( current_channel + extra_channel, time_embed_dim, out_channels = model_channels * mult,) ] current_channel = model_channels * mult dim_head = current_channel // num_heads if with_attn[level_idx]: layers += [ SpatialTransformer( current_channel, num_heads, dim_head, depth=1, context_dim=context_dim,)] if level_idx!=0 and block_idx==self.num_noattn_blocks[level_idx]: layers += [ Upsample( current_channel, use_conv=True, dims=2, out_channels=current_channel)] output_blocks += [TimestepEmbedSequential(*layers)] self.output_blocks = nn.ModuleList(output_blocks) self.out = nn.Sequential( normalization(current_channel), nn.SiLU(), zero_module(nn.Conv2d(model_channels, output_channels, 3, padding=1)),) def forward(self, x, timesteps=None, context=None): hs = [] t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) emb = self.time_embed(t_emb) h = x for module in self.input_blocks: h = module(h, emb, context) hs.append(h) h = self.middle_block(h, emb, context) for module in self.output_blocks: h = th.cat([h, hs.pop()], dim=1) h = module(h, emb, context) return self.out(h) class FCBlock(TimestepBlock): def __init__( self, channels, emb_channels, dropout, out_channels=None, use_checkpoint=False, ): super().__init__() self.channels = channels self.emb_channels = emb_channels self.dropout = dropout self.out_channels = out_channels or channels self.use_checkpoint = use_checkpoint self.in_layers = nn.Sequential( normalization(channels), nn.SiLU(), nn.Conv2d(channels, self.out_channels, 1, padding=0),) self.emb_layers = nn.Sequential( nn.SiLU(), linear(emb_channels, self.out_channels,),) self.out_layers = nn.Sequential( normalization(self.out_channels), nn.SiLU(), nn.Dropout(p=dropout), zero_module(nn.Conv2d(self.out_channels, self.out_channels, 1, padding=0)), ) if self.out_channels == channels: self.skip_connection = nn.Identity() else: self.skip_connection = nn.Conv2d(channels, self.out_channels, 1, padding=0) def forward(self, x, emb): if len(x.shape) == 2: x = x[:, :, None, None] elif len(x.shape) == 4: pass else: raise ValueError y = checkpoint( self._forward, (x, emb), self.parameters(), self.use_checkpoint) if len(x.shape) == 2: return y[:, :, 0, 0] elif len(x.shape) == 4: return y def _forward(self, x, emb): h = self.in_layers(x) emb_out = self.emb_layers(emb).type(h.dtype) while len(emb_out.shape) < len(h.shape): emb_out = emb_out[..., None] h = h + emb_out h = self.out_layers(h) return self.skip_connection(x) + h @register('openai_unet_0d') class UNetModel0D(nn.Module): def __init__(self, input_channels, model_channels, output_channels, context_dim=768, num_noattn_blocks=(2, 2, 2, 2), channel_mult=(1, 2, 4, 8), with_attn=[True, True, True, False], num_heads=8, use_checkpoint=True, ): super().__init__() FCBlockPreset = partial(FCBlock, dropout=0, use_checkpoint=use_checkpoint) self.input_channels = input_channels self.model_channels = model_channels self.num_noattn_blocks = num_noattn_blocks self.channel_mult = channel_mult self.num_heads = num_heads ################## # Time embedding # ################## time_embed_dim = model_channels * 4 self.time_embed = nn.Sequential( linear(model_channels, time_embed_dim), nn.SiLU(), linear(time_embed_dim, time_embed_dim),) ################ # input_blocks # ################ current_channel = model_channels input_blocks = [ TimestepEmbedSequential( nn.Conv2d(input_channels, model_channels, 1, padding=0, bias=True))] input_block_channels = [current_channel] for level_idx, mult in enumerate(channel_mult): for _ in range(self.num_noattn_blocks[level_idx]): layers = [ FCBlockPreset( current_channel, time_embed_dim, out_channels = mult * model_channels,)] current_channel = mult * model_channels dim_head = current_channel // num_heads if with_attn[level_idx]: layers += [ SpatialTransformer( current_channel, num_heads, dim_head, depth=1, context_dim=context_dim, )] input_blocks += [TimestepEmbedSequential(*layers)] input_block_channels.append(current_channel) if level_idx != len(channel_mult) - 1: input_blocks += [ TimestepEmbedSequential( Downsample( current_channel, use_conv=True, dims=2, out_channels=current_channel,))] input_block_channels.append(current_channel) self.input_blocks = nn.ModuleList(input_blocks) ################# # middle_blocks # ################# middle_block = [ FCBlockPreset( current_channel, time_embed_dim,), SpatialTransformer( current_channel, num_heads, dim_head, depth=1, context_dim=context_dim, ), FCBlockPreset( current_channel, time_embed_dim,),] self.middle_block = TimestepEmbedSequential(*middle_block) ################# # output_blocks # ################# output_blocks = [] for level_idx, mult in list(enumerate(channel_mult))[::-1]: for block_idx in range(self.num_noattn_blocks[level_idx] + 1): extra_channel = input_block_channels.pop() layers = [ FCBlockPreset( current_channel + extra_channel, time_embed_dim, out_channels = model_channels * mult,) ] current_channel = model_channels * mult dim_head = current_channel // num_heads if with_attn[level_idx]: layers += [ SpatialTransformer( current_channel, num_heads, dim_head, depth=1, context_dim=context_dim,)] if level_idx!=0 and block_idx==self.num_noattn_blocks[level_idx]: layers += [ nn.Conv2d(current_channel, current_channel, 1, padding=0)] output_blocks += [TimestepEmbedSequential(*layers)] self.output_blocks = nn.ModuleList(output_blocks) self.out = nn.Sequential( normalization(current_channel), nn.SiLU(), zero_module(nn.Conv2d(model_channels, output_channels, 1, padding=0)),) def forward(self, x, timesteps=None, context=None): hs = [] t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) emb = self.time_embed(t_emb) h = x for module in self.input_blocks: h = module(h, emb, context) hs.append(h) h = self.middle_block(h, emb, context) for module in self.output_blocks: h = th.cat([h, hs.pop()], dim=1) h = module(h, emb, context) return self.out(h) class Linear_MultiDim(nn.Linear): def __init__(self, in_features, out_features, *args, **kwargs): in_features = [in_features] if isinstance(in_features, int) else list(in_features) out_features = [out_features] if isinstance(out_features, int) else list(out_features) self.in_features_multidim = in_features self.out_features_multidim = out_features super().__init__( np.array(in_features).prod(), np.array(out_features).prod(), *args, **kwargs) def forward(self, x): shape = x.shape n = len(shape) - len(self.in_features_multidim) x = x.view(*shape[:n], self.in_features) y = super().forward(x) y = y.view(*shape[:n], *self.out_features_multidim) return y class FCBlock_MultiDim(FCBlock): def __init__( self, channels, emb_channels, dropout, out_channels=None, use_checkpoint=False,): channels = [channels] if isinstance(channels, int) else list(channels) channels_all = np.array(channels).prod() self.channels_multidim = channels if out_channels is not None: out_channels = [out_channels] if isinstance(out_channels, int) else list(out_channels) out_channels_all = np.array(out_channels).prod() self.out_channels_multidim = out_channels else: out_channels_all = channels_all self.out_channels_multidim = self.channels_multidim self.channels = channels super().__init__( channels = channels_all, emb_channels = emb_channels, dropout = dropout, out_channels = out_channels_all, use_checkpoint = use_checkpoint,) def forward(self, x, emb): shape = x.shape n = len(self.channels_multidim) x = x.view(*shape[0:-n], self.channels, 1, 1) x = x.view(-1, self.channels, 1, 1) y = checkpoint( self._forward, (x, emb), self.parameters(), self.use_checkpoint) y = y.view(*shape[0:-n], -1) y = y.view(*shape[0:-n], *self.out_channels_multidim) return y @register('openai_unet_0dmd') class UNetModel0D_MultiDim(nn.Module): def __init__(self, input_channels, model_channels, output_channels, context_dim=768, num_noattn_blocks=(2, 2, 2, 2), channel_mult=(1, 2, 4, 8), second_dim=(4, 4, 4, 4), with_attn=[True, True, True, False], num_heads=8, use_checkpoint=True, ): super().__init__() FCBlockPreset = partial(FCBlock_MultiDim, dropout=0, use_checkpoint=use_checkpoint) self.input_channels = input_channels self.model_channels = model_channels self.num_noattn_blocks = num_noattn_blocks self.channel_mult = channel_mult self.second_dim = second_dim self.num_heads = num_heads ################## # Time embedding # ################## time_embed_dim = model_channels * 4 self.time_embed = nn.Sequential( linear(model_channels, time_embed_dim), nn.SiLU(), linear(time_embed_dim, time_embed_dim),) ################ # input_blocks # ################ sdim = second_dim[0] current_channel = [model_channels, sdim, 1] input_blocks = [ TimestepEmbedSequential( Linear_MultiDim([input_channels, 1, 1], current_channel, bias=True))] input_block_channels = [current_channel] for level_idx, (mult, sdim) in enumerate(zip(channel_mult, second_dim)): for _ in range(self.num_noattn_blocks[level_idx]): layers = [ FCBlockPreset( current_channel, time_embed_dim, out_channels = [mult*model_channels, sdim, 1],)] current_channel = [mult*model_channels, sdim, 1] dim_head = current_channel[0] // num_heads if with_attn[level_idx]: layers += [ SpatialTransformer( current_channel[0], num_heads, dim_head, depth=1, context_dim=context_dim, )] input_blocks += [TimestepEmbedSequential(*layers)] input_block_channels.append(current_channel) if level_idx != len(channel_mult) - 1: input_blocks += [ TimestepEmbedSequential( Linear_MultiDim(current_channel, current_channel, bias=True, ))] input_block_channels.append(current_channel) self.input_blocks = nn.ModuleList(input_blocks) ################# # middle_blocks # ################# middle_block = [ FCBlockPreset( current_channel, time_embed_dim, ), SpatialTransformer( current_channel[0], num_heads, dim_head, depth=1, context_dim=context_dim, ), FCBlockPreset( current_channel, time_embed_dim, ),] self.middle_block = TimestepEmbedSequential(*middle_block) ################# # output_blocks # ################# output_blocks = [] for level_idx, (mult, sdim) in list(enumerate(zip(channel_mult, second_dim)))[::-1]: for block_idx in range(self.num_noattn_blocks[level_idx] + 1): extra_channel = input_block_channels.pop() layers = [ FCBlockPreset( [current_channel[0] + extra_channel[0]] + current_channel[1:], time_embed_dim, out_channels = [mult*model_channels, sdim, 1], )] current_channel = [mult*model_channels, sdim, 1] dim_head = current_channel[0] // num_heads if with_attn[level_idx]: layers += [ SpatialTransformer( current_channel[0], num_heads, dim_head, depth=1, context_dim=context_dim,)] if level_idx!=0 and block_idx==self.num_noattn_blocks[level_idx]: layers += [ Linear_MultiDim(current_channel, current_channel, bias=True, )] output_blocks += [TimestepEmbedSequential(*layers)] self.output_blocks = nn.ModuleList(output_blocks) self.out = nn.Sequential( normalization(current_channel[0]), nn.SiLU(), zero_module(Linear_MultiDim(current_channel, [output_channels, 1, 1], bias=True, )),) def forward(self, x, timesteps=None, context=None): hs = [] t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) emb = self.time_embed(t_emb) h = x for module in self.input_blocks: h = module(h, emb, context) hs.append(h) h = self.middle_block(h, emb, context) for module in self.output_blocks: h = th.cat([h, hs.pop()], dim=1) h = module(h, emb, context) return self.out(h) @register('openai_unet_vd') class UNetModelVD(nn.Module): def __init__(self, unet_image_cfg, unet_text_cfg, ): super().__init__() self.unet_image = get_model()(unet_image_cfg) self.unet_text = get_model()(unet_text_cfg) self.time_embed = self.unet_image.time_embed del self.unet_image.time_embed del self.unet_text.time_embed self.model_channels = self.unet_image.model_channels def forward(self, x, timesteps, context, xtype='image', ctype='prompt'): hs = [] t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) emb = self.time_embed(t_emb.to(x.dtype)) if xtype == 'text': x = x[:, :, None, None] h = x for i_module, t_module in zip(self.unet_image.input_blocks, self.unet_text.input_blocks): h = self.mixed_run(i_module, t_module, h, emb, context, xtype, ctype) hs.append(h) h = self.mixed_run( self.unet_image.middle_block, self.unet_text.middle_block, h, emb, context, xtype, ctype) for i_module, t_module in zip(self.unet_image.output_blocks, self.unet_text.output_blocks): h = th.cat([h, hs.pop()], dim=1) h = self.mixed_run(i_module, t_module, h, emb, context, xtype, ctype) if xtype == 'image': return self.unet_image.out(h) elif xtype == 'text': return self.unet_text.out(h).squeeze(-1).squeeze(-1) def mixed_run(self, inet, tnet, x, emb, context, xtype, ctype): h = x for ilayer, tlayer in zip(inet, tnet): if isinstance(ilayer, TimestepBlock) and xtype=='image': h = ilayer(h, emb) elif isinstance(tlayer, TimestepBlock) and xtype=='text': h = tlayer(h, emb) elif isinstance(ilayer, SpatialTransformer) and ctype=='vision': h = ilayer(h, context) elif isinstance(ilayer, SpatialTransformer) and ctype=='prompt': h = tlayer(h, context) elif xtype=='image': h = ilayer(h) elif xtype == 'text': h = tlayer(h) else: raise ValueError return h def forward_dc(self, x, timesteps, c0, c1, xtype, c0_type, c1_type, mixed_ratio): hs = [] t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) emb = self.time_embed(t_emb.to(x.dtype)) if xtype == 'text': x = x[:, :, None, None] h = x for i_module, t_module in zip(self.unet_image.input_blocks, self.unet_text.input_blocks): h = self.mixed_run_dc(i_module, t_module, h, emb, c0, c1, xtype, c0_type, c1_type, mixed_ratio) hs.append(h) h = self.mixed_run_dc( self.unet_image.middle_block, self.unet_text.middle_block, h, emb, c0, c1, xtype, c0_type, c1_type, mixed_ratio) for i_module, t_module in zip(self.unet_image.output_blocks, self.unet_text.output_blocks): h = th.cat([h, hs.pop()], dim=1) h = self.mixed_run_dc(i_module, t_module, h, emb, c0, c1, xtype, c0_type, c1_type, mixed_ratio) if xtype == 'image': return self.unet_image.out(h) elif xtype == 'text': return self.unet_text.out(h).squeeze(-1).squeeze(-1) def mixed_run_dc(self, inet, tnet, x, emb, c0, c1, xtype, c0_type, c1_type, mixed_ratio): h = x for ilayer, tlayer in zip(inet, tnet): if isinstance(ilayer, TimestepBlock) and xtype=='image': h = ilayer(h, emb) elif isinstance(tlayer, TimestepBlock) and xtype=='text': h = tlayer(h, emb) elif isinstance(ilayer, SpatialTransformer): h0 = ilayer(h, c0)-h if c0_type=='vision' else tlayer(h, c0)-h h1 = ilayer(h, c1)-h if c1_type=='vision' else tlayer(h, c1)-h h = h0*mixed_ratio + h1*(1-mixed_ratio) + h # h = ilayer(h, c0) elif xtype=='image': h = ilayer(h) elif xtype == 'text': h = tlayer(h) else: raise ValueError return h ################ # VD Next Unet # ################ from functools import partial import copy @register('openai_unet_2d_next') class UNetModel2D_Next(nn.Module): def __init__( self, in_channels, model_channels, out_channels, num_res_blocks, attention_resolutions, context_dim, dropout=0, channel_mult=(1, 2, 4, 8), conv_resample=True, use_checkpoint=False, num_heads=8, num_head_channels=None, parts = ['global', 'data', 'context']): super().__init__() self.in_channels = in_channels self.model_channels = model_channels self.out_channels = out_channels if isinstance(num_res_blocks, int): self.num_res_blocks = len(channel_mult) * [num_res_blocks] else: if len(num_res_blocks) != len(channel_mult): raise ValueError("provide num_res_blocks either as an int (globally constant) or " "as a list/tuple (per-level) with the same length as channel_mult") self.num_res_blocks = num_res_blocks self.attention_resolutions = attention_resolutions self.context_dim = context_dim self.dropout = dropout self.channel_mult = channel_mult self.conv_resample = conv_resample self.use_checkpoint = use_checkpoint self.num_heads = num_heads self.num_head_channels = num_head_channels assert (num_heads is None) + (num_head_channels is None) == 1, \ "One of num_heads or num_head_channels need to be set" self.parts = parts if isinstance(parts, list) else [parts] self.glayer_included = 'global' in self.parts self.dlayer_included = 'data' in self.parts self.clayer_included = 'context' in self.parts self.layer_sequence_ordering = [] ################# # global layers # ################# time_embed_dim = model_channels * 4 if self.glayer_included: self.time_embed = nn.Sequential( linear(model_channels, time_embed_dim), nn.SiLU(), linear(time_embed_dim, time_embed_dim), ) ################ # input layers # ################ if self.dlayer_included: self.data_blocks = nn.ModuleList([]) ResBlockDefault = partial( ResBlock, emb_channels=time_embed_dim, dropout=dropout, dims=2, use_checkpoint=use_checkpoint, use_scale_shift_norm=False, ) else: def dummy(*args, **kwargs): return None ResBlockDefault = dummy if self.clayer_included: self.context_blocks = nn.ModuleList([]) CrossAttnDefault = partial( SpatialTransformer, context_dim=context_dim, disable_self_attn=False, ) else: def dummy(*args, **kwargs): return None CrossAttnDefault = dummy self.add_data_layer(conv_nd(2, in_channels, model_channels, 3, padding=1)) self.layer_sequence_ordering.append('save_hidden_feature') input_block_chans = [model_channels] ch = model_channels ds = 1 for level, mult in enumerate(channel_mult): for _ in range(self.num_res_blocks[level]): layer = ResBlockDefault( channels=ch, out_channels=mult*model_channels,) self.add_data_layer(layer) ch = mult * model_channels if (ds in attention_resolutions): d_head, n_heads = self.get_d_head_n_heads(ch) layer = CrossAttnDefault( in_channels=ch, d_head=d_head, n_heads=n_heads,) self.add_context_layer(layer) input_block_chans.append(ch) self.layer_sequence_ordering.append('save_hidden_feature') if level != len(channel_mult) - 1: layer = Downsample( ch, use_conv=True, dims=2, out_channels=ch) self.add_data_layer(layer) input_block_chans.append(ch) self.layer_sequence_ordering.append('save_hidden_feature') ds *= 2 self.i_order = copy.deepcopy(self.layer_sequence_ordering) self.layer_sequence_ordering = [] ################# # middle layers # ################# self.add_data_layer(ResBlockDefault(channels=ch)) d_head, n_heads = self.get_d_head_n_heads(ch) self.add_context_layer(CrossAttnDefault(in_channels=ch, d_head=d_head, n_heads=n_heads)) self.add_data_layer(ResBlockDefault(channels=ch)) self.m_order = copy.deepcopy(self.layer_sequence_ordering) self.layer_sequence_ordering = [] ################# # output layers # ################# for level, mult in list(enumerate(channel_mult))[::-1]: for _ in range(self.num_res_blocks[level] + 1): self.layer_sequence_ordering.append('load_hidden_feature') ich = input_block_chans.pop() layer = ResBlockDefault( channels=ch+ich, out_channels=model_channels*mult,) ch = model_channels * mult self.add_data_layer(layer) if ds in attention_resolutions: d_head, n_heads = self.get_d_head_n_heads(ch) layer = CrossAttnDefault( in_channels=ch, d_head=d_head, n_heads=n_heads) self.add_context_layer(layer) if level != 0: layer = Upsample(ch, conv_resample, dims=2, out_channels=ch) self.add_data_layer(layer) ds //= 2 layer = nn.Sequential( normalization(ch), nn.SiLU(), zero_module(conv_nd(2, model_channels, out_channels, 3, padding=1)), ) self.add_data_layer(layer) self.o_order = copy.deepcopy(self.layer_sequence_ordering) self.layer_order = copy.deepcopy(self.i_order + self.m_order + self.o_order) del self.layer_sequence_ordering self.parameter_group = {} if self.glayer_included: self.parameter_group['global'] = self.time_embed if self.dlayer_included: self.parameter_group['data'] = self.data_blocks if self.clayer_included: self.parameter_group['context'] = self.context_blocks def get_d_head_n_heads(self, ch): if self.num_head_channels is None: d_head = ch // self.num_heads n_heads = self.num_heads else: d_head = self.num_head_channels n_heads = ch // self.num_head_channels return d_head, n_heads def add_data_layer(self, layer): if self.dlayer_included: if not isinstance(layer, (list, tuple)): layer = [layer] self.data_blocks.append(TimestepEmbedSequential(*layer)) self.layer_sequence_ordering.append('d') def add_context_layer(self, layer): if self.clayer_included: if not isinstance(layer, (list, tuple)): layer = [layer] self.context_blocks.append(TimestepEmbedSequential(*layer)) self.layer_sequence_ordering.append('c') def forward(self, x, timesteps, context): hs = [] t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) emb = self.time_embed(t_emb) d_iter = iter(self.data_blocks) c_iter = iter(self.context_blocks) h = x for ltype in self.i_order: if ltype == 'd': module = next(d_iter) h = module(h, emb, context) elif ltype == 'c': module = next(c_iter) h = module(h, emb, context) elif ltype == 'save_hidden_feature': hs.append(h) for ltype in self.m_order: if ltype == 'd': module = next(d_iter) h = module(h, emb, context) elif ltype == 'c': module = next(c_iter) h = module(h, emb, context) for ltype in self.i_order: if ltype == 'load_hidden_feature': h = th.cat([h, hs.pop()], dim=1) elif ltype == 'd': module = next(d_iter) h = module(h, emb, context) elif ltype == 'c': module = next(c_iter) h = module(h, emb, context) o = h return o @register('openai_unet_0d_next') class UNetModel0D_Next(UNetModel2D_Next): def __init__( self, input_channels, model_channels, output_channels, context_dim = 788, num_noattn_blocks=(2, 2, 2, 2), channel_mult=(1, 2, 4, 8), second_dim=(4, 4, 4, 4), with_attn=[True, True, True, False], num_heads=8, num_head_channels=None, use_checkpoint=False, parts = ['global', 'data', 'context']): super(UNetModel2D_Next, self).__init__() self.input_channels = input_channels self.model_channels = model_channels self.output_channels = output_channels self.num_noattn_blocks = num_noattn_blocks self.channel_mult = channel_mult self.second_dim = second_dim self.with_attn = with_attn self.num_heads = num_heads self.num_head_channels = num_head_channels self.parts = parts if isinstance(parts, list) else [parts] self.glayer_included = 'global' in self.parts self.dlayer_included = 'data' in self.parts self.clayer_included = 'context' in self.parts self.layer_sequence_ordering = [] ################# # global layers # ################# time_embed_dim = model_channels * 4 if self.glayer_included: self.time_embed = nn.Sequential( linear(model_channels, time_embed_dim), nn.SiLU(), linear(time_embed_dim, time_embed_dim), ) ################ # input layers # ################ if self.dlayer_included: self.data_blocks = nn.ModuleList([]) FCBlockDefault = partial( FCBlock_MultiDim, dropout=0, use_checkpoint=use_checkpoint) else: def dummy(*args, **kwargs): return None FCBlockDefault = dummy if self.clayer_included: self.context_blocks = nn.ModuleList([]) CrossAttnDefault = partial( SpatialTransformer, context_dim=context_dim, disable_self_attn=False, ) else: def dummy(*args, **kwargs): return None CrossAttnDefault = dummy sdim = second_dim[0] current_channel = [model_channels, sdim, 1] one_layer = Linear_MultiDim([input_channels], current_channel, bias=True) self.add_data_layer(one_layer) self.layer_sequence_ordering.append('save_hidden_feature') input_block_channels = [current_channel] for level_idx, (mult, sdim) in enumerate(zip(channel_mult, second_dim)): for _ in range(self.num_noattn_blocks[level_idx]): layer = FCBlockDefault( current_channel, time_embed_dim, out_channels = [mult*model_channels, sdim, 1],) self.add_data_layer(layer) current_channel = [mult*model_channels, sdim, 1] if with_attn[level_idx]: d_head, n_heads = self.get_d_head_n_heads(current_channel[0]) layer = CrossAttnDefault( in_channels=current_channel[0], d_head=d_head, n_heads=n_heads,) self.add_context_layer(layer) input_block_channels.append(current_channel) self.layer_sequence_ordering.append('save_hidden_feature') if level_idx != len(channel_mult) - 1: layer = Linear_MultiDim(current_channel, current_channel, bias=True,) self.add_data_layer(layer) input_block_channels.append(current_channel) self.layer_sequence_ordering.append('save_hidden_feature') self.i_order = copy.deepcopy(self.layer_sequence_ordering) self.layer_sequence_ordering = [] ################# # middle layers # ################# self.add_data_layer(FCBlockDefault(current_channel, time_embed_dim, )) d_head, n_heads = self.get_d_head_n_heads(current_channel[0]) self.add_context_layer(CrossAttnDefault(in_channels=current_channel[0], d_head=d_head, n_heads=n_heads)) self.add_data_layer(FCBlockDefault(current_channel, time_embed_dim, )) self.m_order = copy.deepcopy(self.layer_sequence_ordering) self.layer_sequence_ordering = [] ################# # output layers # ################# for level_idx, (mult, sdim) in list(enumerate(zip(channel_mult, second_dim)))[::-1]: for _ in range(self.num_noattn_blocks[level_idx] + 1): self.layer_sequence_ordering.append('load_hidden_feature') extra_channel = input_block_channels.pop() layer = FCBlockDefault( [current_channel[0] + extra_channel[0]] + current_channel[1:], time_embed_dim, out_channels = [mult*model_channels, sdim, 1], ) self.add_data_layer(layer) current_channel = [mult*model_channels, sdim, 1] if with_attn[level_idx]: d_head, n_heads = self.get_d_head_n_heads(current_channel[0]) layer = CrossAttnDefault( in_channels=current_channel[0], d_head=d_head, n_heads=n_heads) self.add_context_layer(layer) if level_idx != 0: layer = Linear_MultiDim(current_channel, current_channel, bias=True, ) self.add_data_layer(layer) layer = nn.Sequential( normalization(current_channel[0]), nn.SiLU(), zero_module(Linear_MultiDim(current_channel, [output_channels], bias=True, )), ) self.add_data_layer(layer) self.o_order = copy.deepcopy(self.layer_sequence_ordering) self.layer_order = copy.deepcopy(self.i_order + self.m_order + self.o_order) del self.layer_sequence_ordering self.parameter_group = {} if self.glayer_included: self.parameter_group['global'] = self.time_embed if self.dlayer_included: self.parameter_group['data'] = self.data_blocks if self.clayer_included: self.parameter_group['context'] = self.context_blocks
Versatile-Diffusion-master
lib/model_zoo/openaimodel.py
import torch import torch.nn as nn from functools import partial # from ldm.modules.x_transformer import Encoder, TransformerWrapper # TODO: can we directly rely on lucidrains code and simply add this as a reuirement? --> test class AbstractEncoder(nn.Module): def __init__(self): super().__init__() def encode(self, *args, **kwargs): raise NotImplementedError class ClassEmbedder(nn.Module): def __init__(self, embed_dim, n_classes=1000, key='class'): super().__init__() self.key = key self.embedding = nn.Embedding(n_classes, embed_dim) def forward(self, batch, key=None): if key is None: key = self.key # this is for use in crossattn c = batch[key][:, None] c = self.embedding(c) return c class TransformerEmbedder(AbstractEncoder): """Some transformer encoder layers""" def __init__(self, n_embed, n_layer, vocab_size, max_seq_len=77): super().__init__() self.transformer = TransformerWrapper(num_tokens=vocab_size, max_seq_len=max_seq_len, attn_layers=Encoder(dim=n_embed, depth=n_layer)) def forward(self, tokens): z = self.transformer(tokens, return_embeddings=True) return z def encode(self, x): return self(x) class BERTTokenizer(AbstractEncoder): """ Uses a pretrained BERT tokenizer by huggingface. Vocab size: 30522 (?)""" def __init__(self, device="cuda", vq_interface=True, max_length=77): super().__init__() from transformers import BertTokenizerFast # TODO: add to reuquirements self.tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased") self.vq_interface = vq_interface self.max_length = max_length def forward(self, text): batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True, return_overflowing_tokens=False, padding="max_length", return_tensors="pt") tokens = batch_encoding["input_ids"] return tokens @torch.no_grad() def encode(self, text): tokens = self(text) if not self.vq_interface: return tokens return None, None, [None, None, tokens] def decode(self, text): return text class BERTEmbedder(AbstractEncoder): """Uses the BERT tokenizr model and add some transformer encoder layers""" def __init__(self, n_embed, n_layer, vocab_size=30522, max_seq_len=77, ckpt_path=None, ignore_keys=[], device="cuda", use_tokenizer=True, embedding_dropout=0.0): super().__init__() self.use_tknz_fn = use_tokenizer if self.use_tknz_fn: self.tknz_fn = BERTTokenizer(vq_interface=False, max_length=max_seq_len) self.transformer = TransformerWrapper(num_tokens=vocab_size, max_seq_len=max_seq_len, attn_layers=Encoder(dim=n_embed, depth=n_layer), emb_dropout=embedding_dropout) if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) def init_from_ckpt(self, path, ignore_keys=list()): sd = torch.load(path, map_location="cpu") keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] missing, unexpected = self.load_state_dict(sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") def forward(self, text): if self.use_tknz_fn: tokens = self.tknz_fn(text) else: tokens = text device = self.transformer.token_emb.weight.device # a trick to get device tokens = tokens.to(device) z = self.transformer(tokens, return_embeddings=True) return z def encode(self, text): # output of length 77 return self(text) class SpatialRescaler(nn.Module): def __init__(self, n_stages=1, method='bilinear', multiplier=0.5, in_channels=3, out_channels=None, bias=False): super().__init__() self.n_stages = n_stages assert self.n_stages >= 0 assert method in ['nearest','linear','bilinear','trilinear','bicubic','area'] self.multiplier = multiplier self.interpolator = partial(torch.nn.functional.interpolate, mode=method) self.remap_output = out_channels is not None if self.remap_output: print(f'Spatial Rescaler mapping from {in_channels} to {out_channels} channels after resizing.') self.channel_mapper = nn.Conv2d(in_channels,out_channels,1,bias=bias) def forward(self,x): for stage in range(self.n_stages): x = self.interpolator(x, scale_factor=self.multiplier) if self.remap_output: x = self.channel_mapper(x) return x def encode(self, x): return self(x)
Versatile-Diffusion-master
lib/model_zoo/bert.py
import torch import torch.nn as nn import numpy as np from functools import partial from lib.model_zoo.common.get_model import register import torch.nn.functional as F symbol = 'clip' class AbstractEncoder(nn.Module): def __init__(self): super().__init__() def encode(self, *args, **kwargs): raise NotImplementedError from transformers import CLIPTokenizer, CLIPTextModel def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self ############### # for vd next # ############### from transformers import CLIPModel @register('clip_text_context_encoder') class CLIPTextContextEncoder(AbstractEncoder): def __init__(self, version="openai/clip-vit-large-patch14", max_length=77, fp16=False, ): super().__init__() self.tokenizer = CLIPTokenizer.from_pretrained(version) self.model = CLIPModel.from_pretrained(version) self.max_length = max_length self.fp16 = fp16 self.freeze() def get_device(self): # A trick to get device return self.model.text_projection.weight.device def freeze(self): self.model = self.model.eval() self.train = disabled_train for param in self.parameters(): param.requires_grad = False def encode(self, text): batch_encoding = self.tokenizer( text, truncation=True, max_length=self.max_length, return_length=True, return_overflowing_tokens=False, padding="max_length", return_tensors="pt") tokens = batch_encoding["input_ids"].to(self.get_device()) outputs = self.model.text_model(input_ids=tokens) z = self.model.text_projection(outputs.last_hidden_state) z_pooled = self.model.text_projection(outputs.pooler_output) z = z / torch.norm(z_pooled.unsqueeze(1), dim=-1, keepdim=True) return z from transformers import CLIPProcessor @register('clip_image_context_encoder') class CLIPImageContextEncoder(AbstractEncoder): def __init__(self, version="openai/clip-vit-large-patch14", fp16=False, ): super().__init__() self.tokenizer = CLIPTokenizer.from_pretrained(version) self.processor = CLIPProcessor.from_pretrained(version) self.model = CLIPModel.from_pretrained(version) self.fp16 = fp16 self.freeze() def get_device(self): # A trick to get device return self.model.text_projection.weight.device def freeze(self): self.model = self.model.eval() self.train = disabled_train for param in self.parameters(): param.requires_grad = False def _encode(self, images): if isinstance(images, torch.Tensor): import torchvision.transforms as tvtrans images = [tvtrans.ToPILImage()(i) for i in images] inputs = self.processor(images=images, return_tensors="pt") pixels = inputs['pixel_values'].half() if self.fp16 else inputs['pixel_values'] pixels = pixels.to(self.get_device()) outputs = self.model.vision_model(pixel_values=pixels) z = outputs.last_hidden_state z = self.model.vision_model.post_layernorm(z) z = self.model.visual_projection(z) z_pooled = z[:, 0:1] z = z / torch.norm(z_pooled, dim=-1, keepdim=True) return z @torch.no_grad() def _encode_wmask(self, images, masks): assert isinstance(masks, torch.Tensor) assert (len(masks.shape)==4) and (masks.shape[1]==1) masks = torch.clamp(masks, 0, 1) masks = masks.float() masks = F.interpolate(masks, [224, 224], mode='bilinear') if masks.sum() == masks.numel(): return self._encode(images) device = images.device dtype = images.dtype gscale = masks.mean(axis=[1, 2, 3], keepdim=True).flatten(2) vtoken_kernel_size = self.model.vision_model.embeddings.patch_embedding.kernel_size vtoken_stride = self.model.vision_model.embeddings.patch_embedding.stride mask_kernal = torch.ones([1, 1, *vtoken_kernel_size], device=device, requires_grad=False).float() vtoken_mask = torch.nn.functional.conv2d(masks, mask_kernal, stride=vtoken_stride).flatten(2).transpose(1, 2) vtoken_mask = vtoken_mask/np.prod(vtoken_kernel_size) vtoken_mask = torch.concat([gscale, vtoken_mask], axis=1) import types def customized_embedding_forward(self, pixel_values): batch_size = pixel_values.shape[0] patch_embeds = self.patch_embedding(pixel_values) # shape = [*, width, grid, grid] patch_embeds = patch_embeds.flatten(2).transpose(1, 2) class_embeds = self.class_embedding.expand(batch_size, 1, -1) embeddings = torch.cat([class_embeds, patch_embeds], dim=1) embeddings = embeddings + self.position_embedding(self.position_ids) embeddings = embeddings*vtoken_mask.to(embeddings.dtype) return embeddings old_forward = self.model.vision_model.embeddings.forward self.model.vision_model.embeddings.forward = types.MethodType( customized_embedding_forward, self.model.vision_model.embeddings) z = self._encode(images) self.model.vision_model.embeddings.forward = old_forward z = z * vtoken_mask.to(dtype) return z def encode(self, images, masks=None): if masks is None: return self._encode(images) else: return self._encode_wmask(images, masks)
Versatile-Diffusion-master
lib/model_zoo/clip.py
import torch import torch.nn as nn import torch.nn.functional as F import numpy as np import numpy.random as npr import copy from functools import partial from contextlib import contextmanager from lib.model_zoo.common.get_model import get_model, register from lib.log_service import print_log symbol = 'vd' from .diffusion_utils import \ count_params, extract_into_tensor, make_beta_schedule from .distributions import normal_kl, DiagonalGaussianDistribution from .autokl import AutoencoderKL from .ema import LitEma def highlight_print(info): print_log('') print_log(''.join(['#']*(len(info)+4))) print_log('# '+info+' #') print_log(''.join(['#']*(len(info)+4))) print_log('') class String_Reg_Buffer(nn.Module): def __init__(self, output_string): super().__init__() torch_string = torch.ByteTensor(list(bytes(output_string, 'utf8'))) self.register_buffer('output_string', torch_string) @torch.no_grad() def forward(self, *args, **kwargs): list_str = self.output_string.tolist() output_string = bytes(list_str) output_string = output_string.decode() return output_string @register('vd_v2_0') class VD_v2_0(nn.Module): def __init__(self, vae_cfg_list, ctx_cfg_list, diffuser_cfg_list, global_layer_ptr=None, parameterization="eps", timesteps=1000, use_ema=False, beta_schedule="linear", beta_linear_start=1e-4, beta_linear_end=2e-2, given_betas=None, cosine_s=8e-3, loss_type="l2", l_simple_weight=1., l_elbo_weight=0., v_posterior=0., learn_logvar=False, logvar_init=0, latent_scale_factor=None,): super().__init__() assert parameterization in ["eps", "x0"], \ 'currently only supporting "eps" and "x0"' self.parameterization = parameterization highlight_print("Running in {} mode".format(self.parameterization)) self.vae = self.get_model_list(vae_cfg_list) self.ctx = self.get_model_list(ctx_cfg_list) self.diffuser = self.get_model_list(diffuser_cfg_list) self.global_layer_ptr = global_layer_ptr assert self.check_diffuser(), 'diffuser layers are not aligned!' self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print_log(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.loss_type = loss_type self.l_simple_weight = l_simple_weight self.l_elbo_weight = l_elbo_weight self.v_posterior = v_posterior self.device = 'cpu' self.register_schedule( given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=beta_linear_start, linear_end=beta_linear_end, cosine_s=cosine_s) self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.latent_scale_factor = {} if latent_scale_factor is None else latent_scale_factor self.parameter_group = {} for namei, diffuseri in self.diffuser.items(): self.parameter_group.update({ 'diffuser_{}_{}'.format(namei, pgni):pgi for pgni, pgi in diffuseri.parameter_group.items() }) def to(self, device): self.device = device super().to(device) def get_model_list(self, cfg_list): net = nn.ModuleDict() for name, cfg in cfg_list: if not isinstance(cfg, str): net[name] = get_model()(cfg) else: net[name] = String_Reg_Buffer(cfg) return net def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if given_betas is not None: betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, \ 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print_log(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print_log(f"{context}: Restored training weights") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): value1 = extract_into_tensor( self.sqrt_recip_alphas_cumprod, t, x_t.shape) value2 = extract_into_tensor( self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) return value1*x_t -value2*noise def q_sample(self, x_start, t, noise=None): noise = torch.randn_like(x_start) if noise is None else noise return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def forward(self, x_info, c_info): x = x_info['x'] t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x_info, t, c_info) def p_losses(self, x_info, t, c_info, noise=None): x = x_info['x'] noise = torch.randn_like(x) if noise is None else noise x_noisy = self.q_sample(x_start=x, t=t, noise=noise) x_info['x'] = x_noisy model_output = self.apply_model(x_info, t, c_info) loss_dict = {} if self.parameterization == "x0": target = x elif self.parameterization == "eps": target = noise else: raise NotImplementedError() bs = model_output.shape[0] loss_simple = self.get_loss(model_output, target, mean=False).view(bs, -1).mean(-1) loss_dict['loss_simple'] = loss_simple.mean() logvar_t = self.logvar[t].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t if self.learn_logvar: loss_dict['loss_gamma'] = loss.mean() loss_dict['logvar' ] = self.logvar.data.mean() loss = self.l_simple_weight * loss.mean() loss_vlb = self.get_loss(model_output, target, mean=False).view(bs, -1).mean(-1) loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() loss_dict['loss_vlb'] = loss_vlb loss_dict.update({'Loss': loss}) return loss, loss_dict @torch.no_grad() def vae_encode(self, x, which, **kwargs): z = self.vae[which].encode(x, **kwargs) if self.latent_scale_factor is not None: if self.latent_scale_factor.get(which, None) is not None: scale = self.latent_scale_factor[which] return scale * z return z @torch.no_grad() def vae_decode(self, z, which, **kwargs): if self.latent_scale_factor is not None: if self.latent_scale_factor.get(which, None) is not None: scale = self.latent_scale_factor[which] z = 1./scale * z x = self.vae[which].decode(z, **kwargs) return x @torch.no_grad() def ctx_encode(self, x, which, **kwargs): if which.find('vae_') == 0: return self.vae[which[4:]].encode(x, **kwargs) else: return self.ctx[which].encode(x, **kwargs) def ctx_encode_trainable(self, x, which, **kwargs): if which.find('vae_') == 0: return self.vae[which[4:]].encode(x, **kwargs) else: return self.ctx[which].encode(x, **kwargs) def check_diffuser(self): for idx, (_, diffuseri) in enumerate(self.diffuser.items()): if idx==0: order = diffuseri.layer_order else: if not order == diffuseri.layer_order: return False return True @torch.no_grad() def on_train_batch_start(self, x): pass def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def apply_model(self, x_info, timesteps, c_info): x_type, x = x_info['type'], x_info['x'] c_type, c = c_info['type'], c_info['c'] dtype = x.dtype hs = [] from .openaimodel import timestep_embedding glayer_ptr = x_type if self.global_layer_ptr is None else self.global_layer_ptr model_channels = self.diffuser[glayer_ptr].model_channels t_emb = timestep_embedding(timesteps, model_channels, repeat_only=False).to(dtype) emb = self.diffuser[glayer_ptr].time_embed(t_emb) d_iter = iter(self.diffuser[x_type].data_blocks) c_iter = iter(self.diffuser[c_type].context_blocks) i_order = self.diffuser[x_type].i_order m_order = self.diffuser[x_type].m_order o_order = self.diffuser[x_type].o_order h = x for ltype in i_order: if ltype == 'd': module = next(d_iter) h = module(h, emb, None) elif ltype == 'c': module = next(c_iter) h = module(h, emb, c) elif ltype == 'save_hidden_feature': hs.append(h) for ltype in m_order: if ltype == 'd': module = next(d_iter) h = module(h, emb, None) elif ltype == 'c': module = next(c_iter) h = module(h, emb, c) for ltype in o_order: if ltype == 'load_hidden_feature': h = torch.cat([h, hs.pop()], dim=1) elif ltype == 'd': module = next(d_iter) h = module(h, emb, None) elif ltype == 'c': module = next(c_iter) h = module(h, emb, c) o = h return o def context_mixing(self, x, emb, context_module_list, context_info_list, mixing_type): nm = len(context_module_list) nc = len(context_info_list) assert nm == nc context = [c_info['c'] for c_info in context_info_list] cratio = np.array([c_info['ratio'] for c_info in context_info_list]) cratio = cratio / cratio.sum() if mixing_type == 'attention': h = None for module, c, r in zip(context_module_list, context, cratio): hi = module(x, emb, c) * r h = h+hi if h is not None else hi return h elif mixing_type == 'layer': ni = npr.choice(nm, p=cratio) module = context_module_list[ni] c = context[ni] h = module(x, emb, c) return h def apply_model_multicontext(self, x_info, timesteps, c_info_list, mixing_type='attention'): ''' context_info_list: [[context_type, context, ratio]] for 'attention' ''' x_type, x = x_info['type'], x_info['x'] dtype = x.dtype hs = [] from .openaimodel import timestep_embedding model_channels = self.diffuser[x_type].model_channels t_emb = timestep_embedding(timesteps, model_channels, repeat_only=False).to(dtype) emb = self.diffuser[x_type].time_embed(t_emb) d_iter = iter(self.diffuser[x_type].data_blocks) c_iter_list = [iter(self.diffuser[c_info['type']].context_blocks) for c_info in c_info_list] i_order = self.diffuser[x_type].i_order m_order = self.diffuser[x_type].m_order o_order = self.diffuser[x_type].o_order h = x for ltype in i_order: if ltype == 'd': module = next(d_iter) h = module(h, emb, None) elif ltype == 'c': module_list = [next(c_iteri) for c_iteri in c_iter_list] h = self.context_mixing(h, emb, module_list, c_info_list, mixing_type) elif ltype == 'save_hidden_feature': hs.append(h) for ltype in m_order: if ltype == 'd': module = next(d_iter) h = module(h, emb, None) elif ltype == 'c': module_list = [next(c_iteri) for c_iteri in c_iter_list] h = self.context_mixing(h, emb, module_list, c_info_list, mixing_type) for ltype in o_order: if ltype == 'load_hidden_feature': h = torch.cat([h, hs.pop()], dim=1) elif ltype == 'd': module = next(d_iter) h = module(h, emb, None) elif ltype == 'c': module_list = [next(c_iteri) for c_iteri in c_iter_list] h = self.context_mixing(h, emb, module_list, c_info_list, mixing_type) o = h return o
Versatile-Diffusion-master
lib/model_zoo/vd.py
import torch import torch.nn as nn import functools class ActNorm(nn.Module): def __init__(self, num_features, logdet=False, affine=True, allow_reverse_init=False): assert affine super().__init__() self.logdet = logdet self.loc = nn.Parameter(torch.zeros(1, num_features, 1, 1)) self.scale = nn.Parameter(torch.ones(1, num_features, 1, 1)) self.allow_reverse_init = allow_reverse_init self.register_buffer('initialized', torch.tensor(0, dtype=torch.uint8)) def initialize(self, input): with torch.no_grad(): flatten = input.permute(1, 0, 2, 3).contiguous().view(input.shape[1], -1) mean = ( flatten.mean(1) .unsqueeze(1) .unsqueeze(2) .unsqueeze(3) .permute(1, 0, 2, 3) ) std = ( flatten.std(1) .unsqueeze(1) .unsqueeze(2) .unsqueeze(3) .permute(1, 0, 2, 3) ) self.loc.data.copy_(-mean) self.scale.data.copy_(1 / (std + 1e-6)) def forward(self, input, reverse=False): if reverse: return self.reverse(input) if len(input.shape) == 2: input = input[:,:,None,None] squeeze = True else: squeeze = False _, _, height, width = input.shape if self.training and self.initialized.item() == 0: self.initialize(input) self.initialized.fill_(1) h = self.scale * (input + self.loc) if squeeze: h = h.squeeze(-1).squeeze(-1) if self.logdet: log_abs = torch.log(torch.abs(self.scale)) logdet = height*width*torch.sum(log_abs) logdet = logdet * torch.ones(input.shape[0]).to(input) return h, logdet return h def reverse(self, output): if self.training and self.initialized.item() == 0: if not self.allow_reverse_init: raise RuntimeError( "Initializing ActNorm in reverse direction is " "disabled by default. Use allow_reverse_init=True to enable." ) else: self.initialize(output) self.initialized.fill_(1) if len(output.shape) == 2: output = output[:,:,None,None] squeeze = True else: squeeze = False h = output / self.scale - self.loc if squeeze: h = h.squeeze(-1).squeeze(-1) return h ################# # Discriminator # ################# def weights_init(m): classname = m.__class__.__name__ if classname.find('Conv') != -1: nn.init.normal_(m.weight.data, 0.0, 0.02) elif classname.find('BatchNorm') != -1: nn.init.normal_(m.weight.data, 1.0, 0.02) nn.init.constant_(m.bias.data, 0) class NLayerDiscriminator(nn.Module): """Defines a PatchGAN discriminator as in Pix2Pix --> see https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/networks.py """ def __init__(self, input_nc=3, ndf=64, n_layers=3, use_actnorm=False): """Construct a PatchGAN discriminator Parameters: input_nc (int) -- the number of channels in input images ndf (int) -- the number of filters in the last conv layer n_layers (int) -- the number of conv layers in the discriminator norm_layer -- normalization layer """ super(NLayerDiscriminator, self).__init__() if not use_actnorm: norm_layer = nn.BatchNorm2d else: norm_layer = ActNorm if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters use_bias = norm_layer.func != nn.BatchNorm2d else: use_bias = norm_layer != nn.BatchNorm2d kw = 4 padw = 1 sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)] nf_mult = 1 nf_mult_prev = 1 for n in range(1, n_layers): # gradually increase the number of filters nf_mult_prev = nf_mult nf_mult = min(2 ** n, 8) sequence += [ nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias), norm_layer(ndf * nf_mult), nn.LeakyReLU(0.2, True) ] nf_mult_prev = nf_mult nf_mult = min(2 ** n_layers, 8) sequence += [ nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias), norm_layer(ndf * nf_mult), nn.LeakyReLU(0.2, True) ] sequence += [ nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map self.main = nn.Sequential(*sequence) def forward(self, input): """Standard forward.""" return self.main(input) ######### # LPIPS # ######### class ScalingLayer(nn.Module): def __init__(self): super(ScalingLayer, self).__init__() self.register_buffer('shift', torch.Tensor([-.030, -.088, -.188])[None, :, None, None]) self.register_buffer('scale', torch.Tensor([.458, .448, .450])[None, :, None, None]) def forward(self, inp): return (inp - self.shift) / self.scale class NetLinLayer(nn.Module): """ A single linear layer which does a 1x1 conv """ def __init__(self, chn_in, chn_out=1, use_dropout=False): super(NetLinLayer, self).__init__() layers = [nn.Dropout(), ] if (use_dropout) else [] layers += [nn.Conv2d(chn_in, chn_out, 1, stride=1, padding=0, bias=False), ] self.model = nn.Sequential(*layers) from collections import namedtuple from torchvision import models from torchvision.models import VGG16_Weights class vgg16(torch.nn.Module): def __init__(self, requires_grad=False, pretrained=True): super(vgg16, self).__init__() if pretrained: vgg_pretrained_features = models.vgg16(weights=VGG16_Weights.IMAGENET1K_V1).features self.slice1 = torch.nn.Sequential() self.slice2 = torch.nn.Sequential() self.slice3 = torch.nn.Sequential() self.slice4 = torch.nn.Sequential() self.slice5 = torch.nn.Sequential() self.N_slices = 5 for x in range(4): self.slice1.add_module(str(x), vgg_pretrained_features[x]) for x in range(4, 9): self.slice2.add_module(str(x), vgg_pretrained_features[x]) for x in range(9, 16): self.slice3.add_module(str(x), vgg_pretrained_features[x]) for x in range(16, 23): self.slice4.add_module(str(x), vgg_pretrained_features[x]) for x in range(23, 30): self.slice5.add_module(str(x), vgg_pretrained_features[x]) if not requires_grad: for param in self.parameters(): param.requires_grad = False def forward(self, X): h = self.slice1(X) h_relu1_2 = h h = self.slice2(h) h_relu2_2 = h h = self.slice3(h) h_relu3_3 = h h = self.slice4(h) h_relu4_3 = h h = self.slice5(h) h_relu5_3 = h vgg_outputs = namedtuple("VggOutputs", ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3', 'relu5_3']) out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3, h_relu5_3) return out def normalize_tensor(x,eps=1e-10): norm_factor = torch.sqrt(torch.sum(x**2,dim=1,keepdim=True)) return x/(norm_factor+eps) def spatial_average(x, keepdim=True): return x.mean([2,3],keepdim=keepdim) def get_ckpt_path(*args, **kwargs): return 'pretrained/lpips.pth' class LPIPS(nn.Module): # Learned perceptual metric def __init__(self, use_dropout=True): super().__init__() self.scaling_layer = ScalingLayer() self.chns = [64, 128, 256, 512, 512] # vg16 features self.net = vgg16(pretrained=True, requires_grad=False) self.lin0 = NetLinLayer(self.chns[0], use_dropout=use_dropout) self.lin1 = NetLinLayer(self.chns[1], use_dropout=use_dropout) self.lin2 = NetLinLayer(self.chns[2], use_dropout=use_dropout) self.lin3 = NetLinLayer(self.chns[3], use_dropout=use_dropout) self.lin4 = NetLinLayer(self.chns[4], use_dropout=use_dropout) self.load_from_pretrained() for param in self.parameters(): param.requires_grad = False def load_from_pretrained(self, name="vgg_lpips"): ckpt = get_ckpt_path(name, "taming/modules/autoencoder/lpips") self.load_state_dict(torch.load(ckpt, map_location=torch.device("cpu")), strict=False) print("loaded pretrained LPIPS loss from {}".format(ckpt)) @classmethod def from_pretrained(cls, name="vgg_lpips"): if name != "vgg_lpips": raise NotImplementedError model = cls() ckpt = get_ckpt_path(name) model.load_state_dict(torch.load(ckpt, map_location=torch.device("cpu")), strict=False) return model def forward(self, input, target): in0_input, in1_input = (self.scaling_layer(input), self.scaling_layer(target)) outs0, outs1 = self.net(in0_input), self.net(in1_input) feats0, feats1, diffs = {}, {}, {} lins = [self.lin0, self.lin1, self.lin2, self.lin3, self.lin4] for kk in range(len(self.chns)): feats0[kk], feats1[kk] = normalize_tensor(outs0[kk]), normalize_tensor(outs1[kk]) diffs[kk] = (feats0[kk] - feats1[kk]) ** 2 res = [spatial_average(lins[kk].model(diffs[kk]), keepdim=True) for kk in range(len(self.chns))] val = res[0] for l in range(1, len(self.chns)): val += res[l] return val ############ # The loss # ############ def adopt_weight(weight, global_step, threshold=0, value=0.): if global_step < threshold: weight = value return weight def hinge_d_loss(logits_real, logits_fake): loss_real = torch.mean(F.relu(1. - logits_real)) loss_fake = torch.mean(F.relu(1. + logits_fake)) d_loss = 0.5 * (loss_real + loss_fake) return d_loss def vanilla_d_loss(logits_real, logits_fake): d_loss = 0.5 * ( torch.mean(torch.nn.functional.softplus(-logits_real)) + torch.mean(torch.nn.functional.softplus(logits_fake))) return d_loss class LPIPSWithDiscriminator(nn.Module): def __init__(self, disc_start, logvar_init=0.0, kl_weight=1.0, pixelloss_weight=1.0, disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=1.0, perceptual_weight=1.0, use_actnorm=False, disc_conditional=False, disc_loss="hinge"): super().__init__() assert disc_loss in ["hinge", "vanilla"] self.kl_weight = kl_weight self.pixel_weight = pixelloss_weight self.perceptual_loss = LPIPS().eval() self.perceptual_weight = perceptual_weight # output log variance self.logvar = nn.Parameter(torch.ones(size=()) * logvar_init) self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels, n_layers=disc_num_layers, use_actnorm=use_actnorm ).apply(weights_init) self.discriminator_iter_start = disc_start self.disc_loss = hinge_d_loss if disc_loss == "hinge" else vanilla_d_loss self.disc_factor = disc_factor self.discriminator_weight = disc_weight self.disc_conditional = disc_conditional def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None): if last_layer is not None: nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0] g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0] else: nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0] g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0] d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4) d_weight = torch.clamp(d_weight, 0.0, 1e4).detach() d_weight = d_weight * self.discriminator_weight return d_weight def forward(self, inputs, reconstructions, posteriors, optimizer_idx, global_step, last_layer=None, cond=None, split="train", weights=None): rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous()) if self.perceptual_weight > 0: p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous()) rec_loss = rec_loss + self.perceptual_weight * p_loss nll_loss = rec_loss / torch.exp(self.logvar) + self.logvar weighted_nll_loss = nll_loss if weights is not None: weighted_nll_loss = weights*nll_loss weighted_nll_loss = torch.sum(weighted_nll_loss) / weighted_nll_loss.shape[0] nll_loss = torch.sum(nll_loss) / nll_loss.shape[0] kl_loss = posteriors.kl() kl_loss = torch.sum(kl_loss) / kl_loss.shape[0] # now the GAN part if optimizer_idx == 0: # generator update if cond is None: assert not self.disc_conditional logits_fake = self.discriminator(reconstructions.contiguous()) else: assert self.disc_conditional logits_fake = self.discriminator(torch.cat((reconstructions.contiguous(), cond), dim=1)) g_loss = -torch.mean(logits_fake) if self.disc_factor > 0.0: try: d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer) except RuntimeError: assert not self.training d_weight = torch.tensor(0.0) else: d_weight = torch.tensor(0.0) disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start) loss = weighted_nll_loss + self.kl_weight * kl_loss + d_weight * disc_factor * g_loss log = {"Loss": loss.clone().detach().mean(), "logvar": self.logvar.detach(), "loss_kl": kl_loss.detach().mean(), "loss_nll": nll_loss.detach().mean(), "loss_rec": rec_loss.detach().mean(), "d_weight": d_weight.detach(), "disc_factor": torch.tensor(disc_factor), "loss_g": g_loss.detach().mean(), } return loss, log if optimizer_idx == 1: # second pass for discriminator update if cond is None: logits_real = self.discriminator(inputs.contiguous().detach()) logits_fake = self.discriminator(reconstructions.contiguous().detach()) else: logits_real = self.discriminator(torch.cat((inputs.contiguous().detach(), cond), dim=1)) logits_fake = self.discriminator(torch.cat((reconstructions.contiguous().detach(), cond), dim=1)) disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start) d_loss = disc_factor * self.disc_loss(logits_real, logits_fake) log = {"Loss": d_loss.clone().detach().mean(), "loss_disc": d_loss.clone().detach().mean(), "logits_real": logits_real.detach().mean(), "logits_fake": logits_fake.detach().mean() } return d_loss, log
Versatile-Diffusion-master
lib/model_zoo/autokl_utils.py
import torch import torch.nn as nn import torch.nn.functional as F from contextlib import contextmanager from lib.model_zoo.common.get_model import get_model, register # from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer from .autokl_modules import Encoder, Decoder from .distributions import DiagonalGaussianDistribution from .autokl_utils import LPIPSWithDiscriminator @register('autoencoderkl') class AutoencoderKL(nn.Module): def __init__(self, ddconfig, lossconfig, embed_dim,): super().__init__() self.encoder = Encoder(**ddconfig) self.decoder = Decoder(**ddconfig) if lossconfig is not None: self.loss = LPIPSWithDiscriminator(**lossconfig) assert ddconfig["double_z"] self.quant_conv = torch.nn.Conv2d(2*ddconfig["z_channels"], 2*embed_dim, 1) self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) self.embed_dim = embed_dim @torch.no_grad() def encode(self, x, out_posterior=False): return self.encode_trainable(x, out_posterior) def encode_trainable(self, x, out_posterior=False): x = x*2-1 h = self.encoder(x) moments = self.quant_conv(h) posterior = DiagonalGaussianDistribution(moments) if out_posterior: return posterior else: return posterior.sample() @torch.no_grad() def decode(self, z): z = self.post_quant_conv(z) dec = self.decoder(z) dec = torch.clamp((dec+1)/2, 0, 1) return dec def decode_trainable(self, z): z = self.post_quant_conv(z) dec = self.decoder(z) dec = (dec+1)/2 return dec def apply_model(self, input, sample_posterior=True): posterior = self.encode_trainable(input, out_posterior=True) if sample_posterior: z = posterior.sample() else: z = posterior.mode() dec = self.decode_trainable(z) return dec, posterior def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float() return x def forward(self, x, optimizer_idx, global_step): reconstructions, posterior = self.apply_model(x) if optimizer_idx == 0: # train encoder+decoder+logvar aeloss, log_dict_ae = self.loss(x, reconstructions, posterior, optimizer_idx, global_step=global_step, last_layer=self.get_last_layer(), split="train") return aeloss, log_dict_ae if optimizer_idx == 1: # train the discriminator discloss, log_dict_disc = self.loss(x, reconstructions, posterior, optimizer_idx, global_step=global_step, last_layer=self.get_last_layer(), split="train") return discloss, log_dict_disc def validation_step(self, batch, batch_idx): inputs = self.get_input(batch, self.image_key) reconstructions, posterior = self(inputs) aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step, last_layer=self.get_last_layer(), split="val") discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step, last_layer=self.get_last_layer(), split="val") self.log("val/rec_loss", log_dict_ae["val/rec_loss"]) self.log_dict(log_dict_ae) self.log_dict(log_dict_disc) return self.log_dict def configure_optimizers(self): lr = self.learning_rate opt_ae = torch.optim.Adam(list(self.encoder.parameters())+ list(self.decoder.parameters())+ list(self.quant_conv.parameters())+ list(self.post_quant_conv.parameters()), lr=lr, betas=(0.5, 0.9)) opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(), lr=lr, betas=(0.5, 0.9)) return [opt_ae, opt_disc], [] def get_last_layer(self): return self.decoder.conv_out.weight @torch.no_grad() def log_images(self, batch, only_inputs=False, **kwargs): log = dict() x = self.get_input(batch, self.image_key) x = x.to(self.device) if not only_inputs: xrec, posterior = self(x) if x.shape[1] > 3: # colorize with random projection assert xrec.shape[1] > 3 x = self.to_rgb(x) xrec = self.to_rgb(xrec) log["samples"] = self.decode(torch.randn_like(posterior.sample())) log["reconstructions"] = xrec log["inputs"] = x return log def to_rgb(self, x): assert self.image_key == "segmentation" if not hasattr(self, "colorize"): self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x)) x = F.conv2d(x, weight=self.colorize) x = 2.*(x-x.min())/(x.max()-x.min()) - 1. return x
Versatile-Diffusion-master
lib/model_zoo/autokl.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Configuration base class and utilities.""" from __future__ import (absolute_import, division, print_function, unicode_literals) import copy import json import logging import os from io import open from .file_utils import cached_path, CONFIG_NAME logger = logging.getLogger(__name__) class PretrainedConfig(object): r""" Base class for all configuration classes. Handles a few parameters common to all models' configurations as well as methods for loading/downloading/saving configurations. Note: A configuration file can be loaded and saved to disk. Loading the configuration file and using this file to initialize a model does **not** load the model weights. It only affects the model's configuration. Class attributes (overridden by derived classes): - ``pretrained_config_archive_map``: a python ``dict`` of with `short-cut-names` (string) as keys and `url` (string) of associated pretrained model configurations as values. Parameters: ``finetuning_task``: string, default `None`. Name of the task used to fine-tune the model. This can be used when converting from an original (TensorFlow or PyTorch) checkpoint. ``num_labels``: integer, default `2`. Number of classes to use when the model is a classification model (sequences/tokens) ``output_attentions``: boolean, default `False`. Should the model returns attentions weights. ``output_hidden_states``: string, default `False`. Should the model returns all hidden-states. ``torchscript``: string, default `False`. Is the model used with Torchscript. """ pretrained_config_archive_map = {} def __init__(self, **kwargs): self.finetuning_task = kwargs.pop('finetuning_task', None) self.num_labels = kwargs.pop('num_labels', 2) self.output_attentions = kwargs.pop('output_attentions', False) self.output_hidden_states = kwargs.pop('output_hidden_states', False) self.torchscript = kwargs.pop('torchscript', False) self.pruned_heads = kwargs.pop('pruned_heads', {}) def save_pretrained(self, save_directory): """ Save a configuration object to the directory `save_directory`, so that it can be re-loaded using the :func:`~pytorch_transformers.PretrainedConfig.from_pretrained` class method. """ assert os.path.isdir(save_directory), "Saving path should be a directory where the model and configuration can be saved" # If we save using the predefined names, we can load using `from_pretrained` output_config_file = os.path.join(save_directory, CONFIG_NAME) self.to_json_file(output_config_file) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): r""" Instantiate a :class:`~pytorch_transformers.PretrainedConfig` (or a derived class) from a pre-trained model configuration. Parameters: pretrained_model_name_or_path: either: - a string with the `shortcut name` of a pre-trained model configuration to load from cache or download, e.g.: ``bert-base-uncased``. - a path to a `directory` containing a configuration file saved using the :func:`~pytorch_transformers.PretrainedConfig.save_pretrained` method, e.g.: ``./my_model_directory/``. - a path or url to a saved configuration JSON `file`, e.g.: ``./my_model_directory/configuration.json``. cache_dir: (`optional`) string: Path to a directory in which a downloaded pre-trained model configuration should be cached if the standard cache should not be used. kwargs: (`optional`) dict: key/value pairs with which to update the configuration object after loading. - The values in kwargs of any keys which are configuration attributes will be used to override the loaded values. - Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled by the `return_unused_kwargs` keyword parameter. force_download: (`optional`) boolean, default False: Force to (re-)download the model weights and configuration files and override the cached versions if they exists. proxies: (`optional`) dict, default None: A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. The proxies are used on each request. return_unused_kwargs: (`optional`) bool: - If False, then this function returns just the final configuration object. - If True, then this functions returns a tuple `(config, unused_kwargs)` where `unused_kwargs` is a dictionary consisting of the key/value pairs whose keys are not configuration attributes: ie the part of kwargs which has not been used to update `config` and is otherwise ignored. Examples:: # We can't instantiate directly the base class `PretrainedConfig` so let's show the examples on a # derived class: BertConfig config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache. config = BertConfig.from_pretrained('./test/saved_model/') # E.g. config (or model) was saved using `save_pretrained('./test/saved_model/')` config = BertConfig.from_pretrained('./test/saved_model/my_configuration.json') config = BertConfig.from_pretrained('bert-base-uncased', output_attention=True, foo=False) assert config.output_attention == True config, unused_kwargs = BertConfig.from_pretrained('bert-base-uncased', output_attention=True, foo=False, return_unused_kwargs=True) assert config.output_attention == True assert unused_kwargs == {'foo': False} """ cache_dir = kwargs.pop('cache_dir', None) force_download = kwargs.pop('force_download', False) proxies = kwargs.pop('proxies', None) return_unused_kwargs = kwargs.pop('return_unused_kwargs', False) if pretrained_model_name_or_path in cls.pretrained_config_archive_map: config_file = cls.pretrained_config_archive_map[pretrained_model_name_or_path] elif os.path.isdir(pretrained_model_name_or_path): config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME) else: config_file = pretrained_model_name_or_path # redirect to the cache, if necessary try: resolved_config_file = cached_path(config_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies) except EnvironmentError as e: if pretrained_model_name_or_path in cls.pretrained_config_archive_map: logger.error( "Couldn't reach server at '{}' to download pretrained model configuration file.".format( config_file)) else: logger.error( "Model name '{}' was not found in model name list ({}). " "We assumed '{}' was a path or url but couldn't find any file " "associated to this path or url.".format( pretrained_model_name_or_path, ', '.join(cls.pretrained_config_archive_map.keys()), config_file)) raise e if resolved_config_file == config_file: logger.info("loading configuration file {}".format(config_file)) else: logger.info("loading configuration file {} from cache at {}".format( config_file, resolved_config_file)) # Load config config = cls.from_json_file(resolved_config_file) if hasattr(config, 'pruned_heads'): config.pruned_heads = dict((int(key), set(value)) for key, value in config.pruned_heads.items()) # Update config with kwargs if needed to_remove = [] for key, value in kwargs.items(): if hasattr(config, key): setattr(config, key, value) to_remove.append(key) for key in to_remove: kwargs.pop(key, None) logger.info("Model config %s", config) if return_unused_kwargs: return config, kwargs else: return config @classmethod def from_dict(cls, json_object): """Constructs a `Config` from a Python dictionary of parameters.""" config = cls(vocab_size_or_config_json_file=-1) for key, value in json_object.items(): config.__dict__[key] = value return config @classmethod def from_json_file(cls, json_file): """Constructs a `BertConfig` from a json file of parameters.""" with open(json_file, "r", encoding='utf-8') as reader: text = reader.read() return cls.from_dict(json.loads(text)) def __eq__(self, other): return self.__dict__ == other.__dict__ def __repr__(self): return str(self.to_json_string()) def to_dict(self): """Serializes this instance to a Python dictionary.""" output = copy.deepcopy(self.__dict__) return output def to_json_string(self): """Serializes this instance to a JSON string.""" return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" def to_json_file(self, json_file_path): """ Save this instance to a json file.""" with open(json_file_path, "w", encoding='utf-8') as writer: writer.write(self.to_json_string())
Versatile-Diffusion-master
lib/model_zoo/optimus_models/configuration_utils.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ BERT model configuration """ from __future__ import absolute_import, division, print_function, unicode_literals import json import logging import sys from io import open from .configuration_utils import PretrainedConfig logger = logging.getLogger(__name__) BERT_PRETRAINED_CONFIG_ARCHIVE_MAP = { 'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-config.json", 'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-config.json", 'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-config.json", 'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-config.json", 'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-config.json", 'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-config.json", 'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-config.json", 'bert-base-german-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-config.json", 'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-config.json", 'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-config.json", 'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-config.json", 'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-config.json", 'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-config.json", } class BertConfig(PretrainedConfig): r""" :class:`~pytorch_transformers.BertConfig` is the configuration class to store the configuration of a `BertModel`. Arguments: vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`. hidden_size: Size of the encoder layers and the pooler layer. num_hidden_layers: Number of hidden layers in the Transformer encoder. num_attention_heads: Number of attention heads for each attention layer in the Transformer encoder. intermediate_size: The size of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act: The non-linear activation function (function or string) in the encoder and pooler. If string, "gelu", "relu" and "swish" are supported. hidden_dropout_prob: The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob: The dropout ratio for the attention probabilities. max_position_embeddings: The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size: The vocabulary size of the `token_type_ids` passed into `BertModel`. initializer_range: The sttdev of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps: The epsilon used by LayerNorm. """ pretrained_config_archive_map = BERT_PRETRAINED_CONFIG_ARCHIVE_MAP def __init__(self, vocab_size_or_config_json_file=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, **kwargs): super(BertConfig, self).__init__(**kwargs) if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2 and isinstance(vocab_size_or_config_json_file, unicode)): with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader: json_config = json.loads(reader.read()) for key, value in json_config.items(): self.__dict__[key] = value elif isinstance(vocab_size_or_config_json_file, int): self.vocab_size = vocab_size_or_config_json_file self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps else: raise ValueError("First argument must be either a vocabulary size (int)" " or the path to a pretrained model config file (str)")
Versatile-Diffusion-master
lib/model_zoo/optimus_models/configuration_bert.py
# coding=utf-8 # Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for OpenAI GPT.""" from __future__ import (absolute_import, division, print_function, unicode_literals) import logging import os import json import six import copy from io import open from .file_utils import cached_path logger = logging.getLogger(__name__) SPECIAL_TOKENS_MAP_FILE = 'special_tokens_map.json' ADDED_TOKENS_FILE = 'added_tokens.json' TOKENIZER_CONFIG_FILE = 'tokenizer_config.json' class PreTrainedTokenizer(object): """ Base class for all tokenizers. Handle all the shared methods for tokenization and special tokens as well as methods dowloading/caching/loading pretrained tokenizers as well as adding tokens to the vocabulary. This class also contain the added tokens in a unified way on top of all tokenizers so we don't have to handle the specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece...). Class attributes (overridden by derived classes): - ``vocab_files_names``: a python ``dict`` with, as keys, the ``__init__`` keyword name of each vocabulary file required by the model, and as associated values, the filename for saving the associated file (string). - ``pretrained_vocab_files_map``: a python ``dict of dict`` the high-level keys being the ``__init__`` keyword name of each vocabulary file required by the model, the low-level being the `short-cut-names` (string) of the pretrained models with, as associated values, the `url` (string) to the associated pretrained vocabulary file. - ``max_model_input_sizes``: a python ``dict`` with, as keys, the `short-cut-names` (string) of the pretrained models, and as associated values, the maximum length of the sequence inputs of this model, or None if the model has no maximum input size. - ``pretrained_init_configuration``: a python ``dict`` with, as keys, the `short-cut-names` (string) of the pretrained models, and as associated values, a dictionnary of specific arguments to pass to the ``__init__``method of the tokenizer class for this pretrained model when loading the tokenizer with the ``from_pretrained()`` method. Parameters: - ``bos_token``: (`Optional`) string: a beginning of sentence token. Will be associated to ``self.bos_token`` and ``self.bos_token_id`` - ``eos_token``: (`Optional`) string: an end of sentence token. Will be associated to ``self.eos_token`` and ``self.eos_token_id`` - ``unk_token``: (`Optional`) string: an unknown token. Will be associated to ``self.unk_token`` and ``self.unk_token_id`` - ``sep_token``: (`Optional`) string: a separation token (e.g. to separate context and query in an input sequence). Will be associated to ``self.sep_token`` and ``self.sep_token_id`` - ``pad_token``: (`Optional`) string: a padding token. Will be associated to ``self.pad_token`` and ``self.pad_token_id`` - ``cls_token``: (`Optional`) string: a classification token (e.g. to extract a summary of an input sequence leveraging self-attention along the full depth of the model). Will be associated to ``self.cls_token`` and ``self.cls_token_id`` - ``mask_token``: (`Optional`) string: a masking token (e.g. when training a model with masked-language modeling). Will be associated to ``self.mask_token`` and ``self.mask_token_id`` - ``additional_special_tokens``: (`Optional`) list: a list of additional special tokens. Adding all special tokens here ensure they won't be split by the tokenization process. Will be associated to ``self.additional_special_tokens`` and ``self.additional_special_tokens_ids`` """ vocab_files_names = {} pretrained_vocab_files_map = {} pretrained_init_configuration = {} max_model_input_sizes = {} SPECIAL_TOKENS_ATTRIBUTES = ["bos_token", "eos_token", "unk_token", "sep_token", "pad_token", "cls_token", "mask_token", "additional_special_tokens"] @property def bos_token(self): """ Beginning of sentence token (string). Log an error if used while not having been set. """ if self._bos_token is None: logger.error("Using bos_token, but it is not set yet.") return self._bos_token @property def eos_token(self): """ End of sentence token (string). Log an error if used while not having been set. """ if self._eos_token is None: logger.error("Using eos_token, but it is not set yet.") return self._eos_token @property def unk_token(self): """ Unknown token (string). Log an error if used while not having been set. """ if self._unk_token is None: logger.error("Using unk_token, but it is not set yet.") return self._unk_token @property def sep_token(self): """ Separation token (string). E.g. separate context and query in an input sequence. Log an error if used while not having been set. """ if self._sep_token is None: logger.error("Using sep_token, but it is not set yet.") return self._sep_token @property def pad_token(self): """ Padding token (string). Log an error if used while not having been set. """ if self._pad_token is None: logger.error("Using pad_token, but it is not set yet.") return self._pad_token @property def cls_token(self): """ Classification token (string). E.g. to extract a summary of an input sequence leveraging self-attention along the full depth of the model. Log an error if used while not having been set. """ if self._cls_token is None: logger.error("Using cls_token, but it is not set yet.") return self._cls_token @property def mask_token(self): """ Mask token (string). E.g. when training a model with masked-language modeling. Log an error if used while not having been set. """ if self._mask_token is None: logger.error("Using mask_token, but it is not set yet.") return self._mask_token @property def additional_special_tokens(self): """ All the additional special tokens you may want to use (list of strings). Log an error if used while not having been set. """ if self._additional_special_tokens is None: logger.error("Using additional_special_tokens, but it is not set yet.") return self._additional_special_tokens @bos_token.setter def bos_token(self, value): self._bos_token = value @eos_token.setter def eos_token(self, value): self._eos_token = value @unk_token.setter def unk_token(self, value): self._unk_token = value @sep_token.setter def sep_token(self, value): self._sep_token = value @pad_token.setter def pad_token(self, value): self._pad_token = value @cls_token.setter def cls_token(self, value): self._cls_token = value @mask_token.setter def mask_token(self, value): self._mask_token = value @additional_special_tokens.setter def additional_special_tokens(self, value): self._additional_special_tokens = value @property def bos_token_id(self): """ Id of the beginning of sentence token in the vocabulary. Log an error if used while not having been set. """ return self.convert_tokens_to_ids(self.bos_token) @property def eos_token_id(self): """ Id of the end of sentence token in the vocabulary. Log an error if used while not having been set. """ return self.convert_tokens_to_ids(self.eos_token) @property def unk_token_id(self): """ Id of the unknown token in the vocabulary. Log an error if used while not having been set. """ return self.convert_tokens_to_ids(self.unk_token) @property def sep_token_id(self): """ Id of the separation token in the vocabulary. E.g. separate context and query in an input sequence. Log an error if used while not having been set. """ return self.convert_tokens_to_ids(self.sep_token) @property def pad_token_id(self): """ Id of the padding token in the vocabulary. Log an error if used while not having been set. """ return self.convert_tokens_to_ids(self.pad_token) @property def cls_token_id(self): """ Id of the classification token in the vocabulary. E.g. to extract a summary of an input sequence leveraging self-attention along the full depth of the model. Log an error if used while not having been set. """ return self.convert_tokens_to_ids(self.cls_token) @property def mask_token_id(self): """ Id of the mask token in the vocabulary. E.g. when training a model with masked-language modeling. Log an error if used while not having been set. """ return self.convert_tokens_to_ids(self.mask_token) @property def additional_special_tokens_ids(self): """ Ids of all the additional special tokens in the vocabulary (list of integers). Log an error if used while not having been set. """ return self.convert_tokens_to_ids(self.additional_special_tokens) def __init__(self, max_len=None, **kwargs): self._bos_token = None self._eos_token = None self._unk_token = None self._sep_token = None self._pad_token = None self._cls_token = None self._mask_token = None self._additional_special_tokens = [] self.max_len = max_len if max_len is not None else int(1e12) # Added tokens self.added_tokens_encoder = {} self.added_tokens_decoder = {} # inputs and kwargs for saving and re-loading (see ``from_pretrained`` and ``save_pretrained``) self.init_inputs = () self.init_kwargs = {} for key, value in kwargs.items(): if key in self.SPECIAL_TOKENS_ATTRIBUTES: if key == 'additional_special_tokens': assert isinstance(value, (list, tuple)) and all(isinstance(t, str) or (six.PY2 and isinstance(t, unicode)) for t in value) else: assert isinstance(value, str) or (six.PY2 and isinstance(value, unicode)) setattr(self, key, value) @classmethod def from_pretrained(cls, *inputs, **kwargs): r""" Instantiate a :class:`~pytorch_transformers.PreTrainedTokenizer` (or a derived class) from a predefined tokenizer. Args: pretrained_model_name_or_path: either: - a string with the `shortcut name` of a predefined tokenizer to load from cache or download, e.g.: ``bert-base-uncased``. - a path to a `directory` containing vocabulary files required by the tokenizer, for instance saved using the :func:`~pytorch_transformers.PreTrainedTokenizer.save_pretrained` method, e.g.: ``./my_model_directory/``. - (not applicable to all derived classes) a path or url to a single saved vocabulary file if and only if the tokenizer only requires a single vocabulary file (e.g. Bert, XLNet), e.g.: ``./my_model_directory/vocab.txt``. cache_dir: (`optional`) string: Path to a directory in which a downloaded predefined tokenizer vocabulary files should be cached if the standard cache should not be used. force_download: (`optional`) boolean, default False: Force to (re-)download the vocabulary files and override the cached versions if they exists. proxies: (`optional`) dict, default None: A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. The proxies are used on each request. inputs: (`optional`) positional arguments: will be passed to the Tokenizer ``__init__`` method. kwargs: (`optional`) keyword arguments: will be passed to the Tokenizer ``__init__`` method. Can be used to set special tokens like ``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``, ``mask_token``, ``additional_special_tokens``. See parameters in the doc string of :class:`~pytorch_transformers.PreTrainedTokenizer` for details. Examples:: # We can't instantiate directly the base class `PreTrainedTokenizer` so let's show our examples on a derived class: BertTokenizer # Download vocabulary from S3 and cache. tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') # If vocabulary files are in a directory (e.g. tokenizer was saved using `save_pretrained('./test/saved_model/')`) tokenizer = BertTokenizer.from_pretrained('./test/saved_model/') # If the tokenizer uses a single vocabulary file, you can point directly to this file tokenizer = BertTokenizer.from_pretrained('./test/saved_model/my_vocab.txt') # You can link tokens to special vocabulary when instantiating tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', unk_token='<unk>') # You should be sure '<unk>' is in the vocabulary when doing that. # Otherwise use tokenizer.add_special_tokens({'unk_token': '<unk>'}) instead) assert tokenizer.unk_token == '<unk>' """ return cls._from_pretrained(*inputs, **kwargs) @classmethod def _from_pretrained(cls, pretrained_model_name_or_path, *init_inputs, **kwargs): cache_dir = kwargs.pop('cache_dir', None) force_download = kwargs.pop('force_download', False) proxies = kwargs.pop('proxies', None) s3_models = list(cls.max_model_input_sizes.keys()) vocab_files = {} init_configuration = {} if pretrained_model_name_or_path in s3_models: # Get the vocabulary from AWS S3 bucket for file_id, map_list in cls.pretrained_vocab_files_map.items(): vocab_files[file_id] = map_list[pretrained_model_name_or_path] if cls.pretrained_init_configuration and pretrained_model_name_or_path in cls.pretrained_init_configuration: init_configuration = cls.pretrained_init_configuration[pretrained_model_name_or_path] else: # Get the vocabulary from local files logger.info( "Model name '{}' not found in model shortcut name list ({}). " "Assuming '{}' is a path or url to a directory containing tokenizer files.".format( pretrained_model_name_or_path, ', '.join(s3_models), pretrained_model_name_or_path)) # Look for the tokenizer main vocabulary files for file_id, file_name in cls.vocab_files_names.items(): if os.path.isdir(pretrained_model_name_or_path): # If a directory is provided we look for the standard filenames full_file_name = os.path.join(pretrained_model_name_or_path, file_name) else: # If a path to a file is provided we use it (will only work for non-BPE tokenizer using a single vocabulary file) full_file_name = pretrained_model_name_or_path if not os.path.exists(full_file_name): logger.info("Didn't find file {}. We won't load it.".format(full_file_name)) full_file_name = None vocab_files[file_id] = full_file_name # Look for the additional tokens files additional_files_names = {'added_tokens_file': ADDED_TOKENS_FILE, 'special_tokens_map_file': SPECIAL_TOKENS_MAP_FILE, 'tokenizer_config_file': TOKENIZER_CONFIG_FILE, } # If a path to a file was provided, get the parent directory saved_directory = pretrained_model_name_or_path if os.path.exists(saved_directory) and not os.path.isdir(saved_directory): saved_directory = os.path.dirname(saved_directory) for file_id, file_name in additional_files_names.items(): full_file_name = os.path.join(saved_directory, file_name) if not os.path.exists(full_file_name): logger.info("Didn't find file {}. We won't load it.".format(full_file_name)) full_file_name = None vocab_files[file_id] = full_file_name if all(full_file_name is None for full_file_name in vocab_files.values()): logger.error( "Model name '{}' was not found in model name list ({}). " "We assumed '{}' was a path or url but couldn't find tokenizer files" "at this path or url.".format( pretrained_model_name_or_path, ', '.join(s3_models), pretrained_model_name_or_path, )) return None # Get files from url, cache, or disk depending on the case try: resolved_vocab_files = {} for file_id, file_path in vocab_files.items(): if file_path is None: resolved_vocab_files[file_id] = None else: resolved_vocab_files[file_id] = cached_path(file_path, cache_dir=cache_dir, force_download=force_download, proxies=proxies) except EnvironmentError as e: if pretrained_model_name_or_path in s3_models: logger.error("Couldn't reach server to download vocabulary.") else: logger.error( "Model name '{}' was not found in model name list ({}). " "We assumed '{}' was a path or url but couldn't find files {} " "at this path or url.".format( pretrained_model_name_or_path, ', '.join(s3_models), pretrained_model_name_or_path, str(vocab_files.keys()))) raise e for file_id, file_path in vocab_files.items(): if file_path == resolved_vocab_files[file_id]: logger.info("loading file {}".format(file_path)) else: logger.info("loading file {} from cache at {}".format( file_path, resolved_vocab_files[file_id])) # Prepare tokenizer initialization kwargs # Did we saved some inputs and kwargs to reload ? tokenizer_config_file = resolved_vocab_files.pop('tokenizer_config_file', None) if tokenizer_config_file is not None: init_kwargs = json.load(open(tokenizer_config_file, encoding="utf-8")) saved_init_inputs = init_kwargs.pop('init_inputs', ()) if not init_inputs: init_inputs = saved_init_inputs else: init_kwargs = init_configuration # Update with newly provided kwargs init_kwargs.update(kwargs) # Set max length if needed if pretrained_model_name_or_path in cls.max_model_input_sizes: # if we're using a pretrained model, ensure the tokenizer # wont index sequences longer than the number of positional embeddings max_len = cls.max_model_input_sizes[pretrained_model_name_or_path] if max_len is not None and isinstance(max_len, (int, float)): init_kwargs['max_len'] = min(init_kwargs.get('max_len', int(1e12)), max_len) # Merge resolved_vocab_files arguments in init_kwargs. added_tokens_file = resolved_vocab_files.pop('added_tokens_file', None) special_tokens_map_file = resolved_vocab_files.pop('special_tokens_map_file', None) for args_name, file_path in resolved_vocab_files.items(): if args_name not in init_kwargs: init_kwargs[args_name] = file_path if special_tokens_map_file is not None: special_tokens_map = json.load(open(special_tokens_map_file, encoding="utf-8")) for key, value in special_tokens_map.items(): if key not in init_kwargs: init_kwargs[key] = value # Instantiate tokenizer. tokenizer = cls(*init_inputs, **init_kwargs) # Save inputs and kwargs for saving and re-loading with ``save_pretrained`` tokenizer.init_inputs = init_inputs tokenizer.init_kwargs = init_kwargs # Add supplementary tokens. if added_tokens_file is not None: added_tok_encoder = json.load(open(added_tokens_file, encoding="utf-8")) added_tok_decoder = {v:k for k, v in added_tok_encoder.items()} tokenizer.added_tokens_encoder.update(added_tok_encoder) tokenizer.added_tokens_decoder.update(added_tok_decoder) return tokenizer def save_pretrained(self, save_directory): """ Save the tokenizer vocabulary files together with: - added tokens, - special-tokens-to-class-attributes-mapping, - tokenizer instantiation positional and keywords inputs (e.g. do_lower_case for Bert). This won't save modifications other than (added tokens and special token mapping) you may have applied to the tokenizer after the instantion (e.g. modifying tokenizer.do_lower_case after creation). This method make sure the full tokenizer can then be re-loaded using the :func:`~pytorch_transformers.PreTrainedTokenizer.from_pretrained` class method. """ if not os.path.isdir(save_directory): logger.error("Saving directory ({}) should be a directory".format(save_directory)) return special_tokens_map_file = os.path.join(save_directory, SPECIAL_TOKENS_MAP_FILE) added_tokens_file = os.path.join(save_directory, ADDED_TOKENS_FILE) tokenizer_config_file = os.path.join(save_directory, TOKENIZER_CONFIG_FILE) tokenizer_config = copy.deepcopy(self.init_kwargs) tokenizer_config['init_inputs'] = copy.deepcopy(self.init_inputs) for file_id in self.vocab_files_names.keys(): tokenizer_config.pop(file_id, None) with open(tokenizer_config_file, 'w', encoding='utf-8') as f: f.write(json.dumps(tokenizer_config, ensure_ascii=False)) with open(special_tokens_map_file, 'w', encoding='utf-8') as f: f.write(json.dumps(self.special_tokens_map, ensure_ascii=False)) with open(added_tokens_file, 'w', encoding='utf-8') as f: if self.added_tokens_encoder: out_str = json.dumps(self.added_tokens_encoder, ensure_ascii=False) else: out_str = u"{}" f.write(out_str) vocab_files = self.save_vocabulary(save_directory) return vocab_files + (special_tokens_map_file, added_tokens_file) def save_vocabulary(self, save_directory): """ Save the tokenizer vocabulary to a directory. This method does *NOT* save added tokens and special token mappings. Please use :func:`~pytorch_transformers.PreTrainedTokenizer.save_pretrained` `()` to save the full Tokenizer state if you want to reload it using the :func:`~pytorch_transformers.PreTrainedTokenizer.from_pretrained` class method. """ raise NotImplementedError def vocab_size(self): """ Size of the base vocabulary (without the added tokens) """ raise NotImplementedError def __len__(self): """ Size of the full vocabulary with the added tokens """ return self.vocab_size + len(self.added_tokens_encoder) def add_tokens(self, new_tokens): """ Add a list of new tokens to the tokenizer class. If the new tokens are not in the vocabulary, they are added to it with indices starting from length of the current vocabulary. Args: new_tokens: list of string. Each string is a token to add. Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer assign the index of the ``unk_token`` to them). Returns: Number of tokens added to the vocabulary. Examples:: # Let's see how to increase the vocabulary of Bert model and tokenizer tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertModel.from_pretrained('bert-base-uncased') num_added_toks = tokenizer.add_tokens(['new_tok1', 'my_new-tok2']) print('We have added', num_added_toks, 'tokens') model.resize_token_embeddings(len(tokenizer)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer. """ if not new_tokens: return 0 to_add_tokens = [] for token in new_tokens: assert isinstance(token, str) or (six.PY2 and isinstance(token, unicode)) if token != self.unk_token and \ self.convert_tokens_to_ids(token) == self.convert_tokens_to_ids(self.unk_token): to_add_tokens.append(token) logger.info("Adding %s to the vocabulary", token) added_tok_encoder = dict((tok, len(self) + i) for i, tok in enumerate(to_add_tokens)) added_tok_decoder = {v:k for k, v in added_tok_encoder.items()} self.added_tokens_encoder.update(added_tok_encoder) self.added_tokens_decoder.update(added_tok_decoder) return len(to_add_tokens) def add_special_tokens(self, special_tokens_dict): """ Add a dictionary of special tokens (eos, pad, cls...) to the encoder and link them to class attributes. If special tokens are NOT in the vocabulary, they are added to it (indexed starting from the last index of the current vocabulary). Using `add_special_tokens` will ensure your special tokens can be used in several ways: - special tokens are carefully handled by the tokenizer (they are never split) - you can easily refer to special tokens using tokenizer class attributes like `tokenizer.cls_token`. This makes it easy to develop model-agnostic training and fine-tuning scripts. When possible, special tokens are already registered for provided pretrained models (ex: BertTokenizer cls_token is already registered to be '[CLS]' and XLM's one is also registered to be '</s>') Args: special_tokens_dict: dict of string. Keys should be in the list of predefined special attributes: [``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``, ``mask_token``, ``additional_special_tokens``]. Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer assign the index of the ``unk_token`` to them). Returns: Number of tokens added to the vocabulary. Examples:: # Let's see how to add a new classification token to GPT-2 tokenizer = GPT2Tokenizer.from_pretrained('gpt2') model = GPT2Model.from_pretrained('gpt2') special_tokens_dict = {'cls_token': '<CLS>'} num_added_toks = tokenizer.add_special_tokens(special_tokens_dict) print('We have added', num_added_toks, 'tokens') model.resize_token_embeddings(len(tokenizer)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer. assert tokenizer.cls_token == '<CLS>' """ if not special_tokens_dict: return 0 added_tokens = 0 for key, value in special_tokens_dict.items(): assert key in self.SPECIAL_TOKENS_ATTRIBUTES if key == 'additional_special_tokens': assert isinstance(value, (list, tuple)) and all(isinstance(t, str) or (six.PY2 and isinstance(t, unicode)) for t in value) added_tokens += self.add_tokens(value) else: assert isinstance(value, str) or (six.PY2 and isinstance(value, unicode)) added_tokens += self.add_tokens([value]) logger.info("Assigning %s to the %s key of the tokenizer", value, key) setattr(self, key, value) return added_tokens def tokenize(self, text, **kwargs): """ Converts a string in a sequence of tokens (string), using the tokenizer. Split in words for word-based vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces). Take care of added tokens. """ def split_on_token(tok, text): result = [] split_text = text.split(tok) for i, sub_text in enumerate(split_text): sub_text = sub_text.strip() if i == 0 and not sub_text: result += [tok] elif i == len(split_text) - 1: if sub_text: result += [sub_text] else: pass else: if sub_text: result += [sub_text] result += [tok] return result def split_on_tokens(tok_list, text): if not text: return [] if not tok_list: return self._tokenize(text, **kwargs) tokenized_text = [] text_list = [text] for tok in tok_list: tokenized_text = [] for sub_text in text_list: if sub_text not in self.added_tokens_encoder \ and sub_text not in self.all_special_tokens: tokenized_text += split_on_token(tok, sub_text) else: tokenized_text += [sub_text] text_list = tokenized_text return sum((self._tokenize(token, **kwargs) if token not \ in self.added_tokens_encoder and token not in self.all_special_tokens \ else [token] for token in tokenized_text), []) added_tokens = list(self.added_tokens_encoder.keys()) + self.all_special_tokens tokenized_text = split_on_tokens(added_tokens, text) return tokenized_text def _tokenize(self, text, **kwargs): """ Converts a string in a sequence of tokens (string), using the tokenizer. Split in words for word-based vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces). Do NOT take care of added tokens. """ raise NotImplementedError def convert_tokens_to_ids(self, tokens): """ Converts a single token, or a sequence of tokens, (str/unicode) in a single integer id (resp. a sequence of ids), using the vocabulary. """ if tokens is None: return None if isinstance(tokens, str) or (six.PY2 and isinstance(tokens, unicode)): return self._convert_token_to_id_with_added_voc(tokens) ids = [] for token in tokens: ids.append(self._convert_token_to_id_with_added_voc(token)) if len(ids) > self.max_len: logger.warning("Token indices sequence length is longer than the specified maximum sequence length " "for this model ({} > {}). Running this sequence through the model will result in " "indexing errors".format(len(ids), self.max_len)) return ids def _convert_token_to_id_with_added_voc(self, token): if token is None: return None if token in self.added_tokens_encoder: return self.added_tokens_encoder[token] return self._convert_token_to_id(token) def _convert_token_to_id(self, token): raise NotImplementedError def encode(self, text, text_pair=None, add_special_tokens=False, **kwargs): """ Converts a string in a sequence of ids (integer), using the tokenizer and vocabulary. Same as doing ``self.convert_tokens_to_ids(self.tokenize(text))``. Args: text: The first sequence to be encoded. text_pair: Optional second sequence to be encoded. add_special_tokens: if set to ``True``, the sequences will be encoded with the special tokens relative to their model. **kwargs: passed to the `self.tokenize()` method """ if text_pair is None: if add_special_tokens: return self.add_special_tokens_single_sentence(self.convert_tokens_to_ids(self.tokenize(text, **kwargs))) else: return self.convert_tokens_to_ids(self.tokenize(text, **kwargs)) first_sentence_tokens = [self._convert_token_to_id(token) for token in self.tokenize(text, **kwargs)] second_sentence_tokens = [self._convert_token_to_id(token) for token in self.tokenize(text_pair, **kwargs)] if add_special_tokens: return self.add_special_tokens_sentences_pair(first_sentence_tokens, second_sentence_tokens) else: return first_sentence_tokens, second_sentence_tokens def add_special_tokens_single_sentence(self, token_ids): logger.warning("This tokenizer does not make use of special tokens. The sequence has been returned with no modification.") return token_ids def add_special_tokens_sentences_pair(self, token_ids_0, token_ids_1): logger.warning("This tokenizer does not make use of special tokens. The two sequences have been concatenated.") return token_ids_0 + token_ids_1 def convert_ids_to_tokens(self, ids, skip_special_tokens=False): """ Converts a single index or a sequence of indices (integers) in a token " (resp.) a sequence of tokens (str/unicode), using the vocabulary and added tokens. Args: skip_special_tokens: Don't decode special tokens (self.all_special_tokens). Default: False """ if isinstance(ids, int): if ids in self.added_tokens_decoder: return self.added_tokens_decoder[ids] else: return self._convert_id_to_token(ids) tokens = [] for index in ids: if skip_special_tokens and index in self.all_special_ids: continue if index in self.added_tokens_decoder: tokens.append(self.added_tokens_decoder[index]) else: tokens.append(self._convert_id_to_token(index)) return tokens def _convert_id_to_token(self, index): raise NotImplementedError def convert_tokens_to_string(self, tokens): """ Converts a sequence of tokens (string) in a single string. The most simple way to do it is ' '.join(self.convert_ids_to_tokens(token_ids)) but we often want to remove sub-word tokenization artifacts at the same time. """ return ' '.join(self.convert_ids_to_tokens(tokens)) def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True): """ Converts a sequence of ids (integer) in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces. Similar to doing ``self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))``. """ filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separatly for added tokens and byte-level tokens # cf. https://github.com/huggingface/pytorch-transformers/issues/1133 sub_texts = [] current_sub_text = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(current_sub_text)) current_sub_text = [] sub_texts.append(" " + token) else: current_sub_text.append(token) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(current_sub_text)) text = ''.join(sub_texts) if self._sep_token is not None and self._sep_token in text: text = text.replace(self._cls_token, self._sep_token) split_text = list(filter(lambda sentence: len(sentence) > 0, text.split(self._sep_token))) if clean_up_tokenization_spaces: clean_text = [self.clean_up_tokenization(text) for text in split_text] return clean_text else: return split_text else: if clean_up_tokenization_spaces: clean_text = self.clean_up_tokenization(text) return clean_text else: return text @property def special_tokens_map(self): """ A dictionary mapping special token class attribute (cls_token, unk_token...) to their values ('<unk>', '<cls>'...) """ set_attr = {} for attr in self.SPECIAL_TOKENS_ATTRIBUTES: attr_value = getattr(self, "_" + attr) if attr_value: set_attr[attr] = attr_value return set_attr @property def all_special_tokens(self): """ List all the special tokens ('<unk>', '<cls>'...) mapped to class attributes (cls_token, unk_token...). """ all_toks = [] set_attr = self.special_tokens_map for attr_value in set_attr.values(): all_toks = all_toks + (list(attr_value) if isinstance(attr_value, (list, tuple)) else [attr_value]) all_toks = list(set(all_toks)) return all_toks @property def all_special_ids(self): """ List the vocabulary indices of the special tokens ('<unk>', '<cls>'...) mapped to class attributes (cls_token, unk_token...). """ all_toks = self.all_special_tokens all_ids = list(self._convert_token_to_id(t) for t in all_toks) return all_ids @staticmethod def clean_up_tokenization(out_string): """ Clean up a list of simple English tokenization artifacts like spaces before punctuations and abreviated forms. """ out_string = out_string.replace(' .', '.').replace(' ?', '?').replace(' !', '!').replace(' ,', ',' ).replace(" ' ", "'").replace(" n't", "n't").replace(" 'm", "'m").replace(" do not", " don't" ).replace(" 's", "'s").replace(" 've", "'ve").replace(" 're", "'re") return out_string
Versatile-Diffusion-master
lib/model_zoo/optimus_models/tokenization_utils.py
# coding=utf-8 # Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for OpenAI GPT.""" from __future__ import (absolute_import, division, print_function, unicode_literals) import sys import json import logging import os import regex as re from io import open try: from functools import lru_cache except ImportError: # Just a dummy decorator to get the checks to run on python2 # because honestly I don't want to support a byte-level unicode BPE tokenizer on python 2 right now. def lru_cache(): return lambda func: func from .tokenization_utils import PreTrainedTokenizer logger = logging.getLogger(__name__) VOCAB_FILES_NAMES = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', } PRETRAINED_VOCAB_FILES_MAP = { 'vocab_file': { 'gpt2': "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-vocab.json", 'gpt2-medium': "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-vocab.json", 'gpt2-large': "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-large-vocab.json", }, 'merges_file': { 'gpt2': "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-merges.txt", 'gpt2-medium': "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-merges.txt", 'gpt2-large': "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-large-merges.txt", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { 'gpt2': 1024, 'gpt2-medium': 1024, 'gpt2-large': 1024, } @lru_cache() def bytes_to_unicode(): """ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control characters the bpe code barfs on. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. This is a signficant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. """ _chr = unichr if sys.version_info[0] == 2 else chr bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1)) cs = bs[:] n = 0 for b in range(2**8): if b not in bs: bs.append(b) cs.append(2**8+n) n += 1 cs = [_chr(n) for n in cs] return dict(zip(bs, cs)) def get_pairs(word): """Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length strings). """ pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs class GPT2Tokenizer(PreTrainedTokenizer): """ GPT-2 BPE tokenizer. Peculiarities: - Byte-level Byte-Pair-Encoding - Requires a space to start the input string => will add a space is there isn't. As a consequence, this tokenizer `encode` and `decode` method will not conserve the absence of a space at the beginning of a string: `tokenizer.decode(tokenizer.encode("Hello")) = " Hello" """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__(self, vocab_file, merges_file, errors='replace', unk_token="<|endoftext|>", bos_token="<|endoftext|>", eos_token="<|endoftext|>", **kwargs): super(GPT2Tokenizer, self).__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, **kwargs) self.max_len_single_sentence = self.max_len # no default special tokens - you can update this value if you add special tokens self.max_len_sentences_pair = self.max_len # no default special tokens - you can update this value if you add special tokens self.encoder = json.load(open(vocab_file, encoding="utf-8")) self.decoder = {v: k for k, v in self.encoder.items()} self.errors = errors # how to handle errors in decoding self.byte_encoder = bytes_to_unicode() self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} bpe_data = open(merges_file, encoding='utf-8').read().split('\n')[1:-1] bpe_merges = [tuple(merge.split()) for merge in bpe_data] self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) self.cache = {} # Should haved added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""") @property def vocab_size(self): return len(self.encoder) def bpe(self, token): if token in self.cache: return self.cache[token] word = tuple(token) pairs = get_pairs(word) if not pairs: return token while True: bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf'))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) new_word.extend(word[i:j]) i = j except: new_word.extend(word[i:]) break if word[i] == first and i < len(word)-1 and word[i+1] == second: new_word.append(first+second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = ' '.join(word) self.cache[token] = word return word def _tokenize(self, text): """ Tokenize a string. """ text = ' ' + text # GPT-2 (and RoBERTa) tokenizers need at least one space to begin the sentence with. bpe_tokens = [] for token in re.findall(self.pat, text): if sys.version_info[0] == 2: token = ''.join(self.byte_encoder[ord(b)] for b in token) # Maps all our bytes to unicode strings, avoiding controle tokens of the BPE (spaces in our case) else: token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding controle tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(' ')) return bpe_tokens def _convert_token_to_id(self, token): """ Converts a token (str/unicode) in an id using the vocab. """ return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (string/unicode) using the vocab.""" return self.decoder.get(index) def convert_tokens_to_string(self, tokens): """ Converts a sequence of tokens (string) in a single string. """ text = ''.join(tokens) text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors) return text def save_vocabulary(self, save_directory): """Save the tokenizer vocabulary and merge files to a directory.""" if not os.path.isdir(save_directory): logger.error("Vocabulary path ({}) should be a directory".format(save_directory)) return vocab_file = os.path.join(save_directory, VOCAB_FILES_NAMES['vocab_file']) merge_file = os.path.join(save_directory, VOCAB_FILES_NAMES['merges_file']) with open(vocab_file, 'w', encoding='utf-8') as f: f.write(json.dumps(self.encoder, ensure_ascii=False)) index = 0 with open(merge_file, "w", encoding="utf-8") as writer: writer.write(u'#version: 0.2\n') for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning("Saving vocabulary to {}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!".format(merge_file)) index = token_index writer.write(' '.join(bpe_tokens) + u'\n') index += 1 return vocab_file, merge_file # XX added def add_special_tokens_single_sentence(self, token_ids): return [self.added_tokens_encoder['<BOS>']] + token_ids + [self.added_tokens_encoder['<EOS>']]
Versatile-Diffusion-master
lib/model_zoo/optimus_models/tokenization_gpt2.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch BERT model. """ from __future__ import absolute_import, division, print_function, unicode_literals import json import logging import math import os import sys from io import open import pdb import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from .modeling_utils import PreTrainedModel, prune_linear_layer from .configuration_bert import BertConfig from .file_utils import add_start_docstrings logger = logging.getLogger(__name__) BERT_PRETRAINED_MODEL_ARCHIVE_MAP = { 'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-pytorch_model.bin", 'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-pytorch_model.bin", 'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-pytorch_model.bin", 'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-pytorch_model.bin", 'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-pytorch_model.bin", 'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-pytorch_model.bin", 'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-pytorch_model.bin", 'bert-base-german-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-pytorch_model.bin", 'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-pytorch_model.bin", 'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-pytorch_model.bin", 'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-pytorch_model.bin", 'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-pytorch_model.bin", 'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-pytorch_model.bin", } def load_tf_weights_in_bert(model, config, tf_checkpoint_path): """ Load tf checkpoints in a pytorch model. """ try: import re import numpy as np import tensorflow as tf except ImportError: logger.error("Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions.") raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info("Converting TensorFlow checkpoint from {}".format(tf_path)) # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: logger.info("Loading TF weight {} with shape {}".format(name, shape)) array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for name, array in zip(names, arrays): name = name.split('/') # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if any(n in ["adam_v", "adam_m", "global_step"] for n in name): logger.info("Skipping {}".format("/".join(name))) continue pointer = model for m_name in name: if re.fullmatch(r'[A-Za-z]+_\d+', m_name): l = re.split(r'_(\d+)', m_name) else: l = [m_name] if l[0] == 'kernel' or l[0] == 'gamma': pointer = getattr(pointer, 'weight') elif l[0] == 'output_bias' or l[0] == 'beta': pointer = getattr(pointer, 'bias') elif l[0] == 'output_weights': pointer = getattr(pointer, 'weight') elif l[0] == 'squad': pointer = getattr(pointer, 'classifier') else: try: pointer = getattr(pointer, l[0]) except AttributeError: logger.info("Skipping {}".format("/".join(name))) continue if len(l) >= 2: num = int(l[1]) pointer = pointer[num] if m_name[-11:] == '_embeddings': pointer = getattr(pointer, 'weight') elif m_name == 'kernel': array = np.transpose(array) try: assert pointer.shape == array.shape except AssertionError as e: e.args += (pointer.shape, array.shape) raise logger.info("Initialize PyTorch weight {}".format(name)) pointer.data = torch.from_numpy(array) return model def gelu(x): """Implementation of the gelu activation function. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see https://arxiv.org/abs/1606.08415 """ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) def swish(x): return x * torch.sigmoid(x) ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish} try: from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm except (ImportError, AttributeError) as e: logger.info("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .") BertLayerNorm = torch.nn.LayerNorm class BertEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings. """ def __init__(self, config): super(BertEmbeddings, self).__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_ids, token_type_ids=None, position_ids=None): seq_length = input_ids.size(1) if position_ids is None: position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device) position_ids = position_ids.unsqueeze(0).expand_as(input_ids) if token_type_ids is None: token_type_ids = torch.zeros_like(input_ids) words_embeddings = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = words_embeddings + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class BertSelfAttention(nn.Module): def __init__(self, config): super(BertSelfAttention, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( "The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (config.hidden_size, config.num_attention_heads)) self.output_attentions = config.output_attentions self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask, head_mask=None): mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) # Apply the attention mask is (precomputed for all layers in BertModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.Softmax(dim=-1)(attention_scores) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,) return outputs class BertSelfOutput(nn.Module): def __init__(self, config): super(BertSelfOutput, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertAttention(nn.Module): def __init__(self, config): super(BertAttention, self).__init__() self.self = BertSelfAttention(config) self.output = BertSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return mask = torch.ones(self.self.num_attention_heads, self.self.attention_head_size) heads = set(heads) - self.pruned_heads # Convert to set and emove already pruned heads for head in heads: # Compute how many pruned heads are before the head and move the index accordingly head = head - sum(1 if h < head else 0 for h in self.pruned_heads) mask[head] = 0 mask = mask.view(-1).contiguous().eq(1) index = torch.arange(len(mask))[mask].long() # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, input_tensor, attention_mask, head_mask=None): self_outputs = self.self(input_tensor, attention_mask, head_mask) attention_output = self.output(self_outputs[0], input_tensor) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs class BertIntermediate(nn.Module): def __init__(self, config): super(BertIntermediate, self).__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class BertOutput(nn.Module): def __init__(self, config): super(BertOutput, self).__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertLayer(nn.Module): def __init__(self, config): super(BertLayer, self).__init__() self.attention = BertAttention(config) self.intermediate = BertIntermediate(config) self.output = BertOutput(config) def forward(self, hidden_states, attention_mask, head_mask=None): attention_outputs = self.attention(hidden_states, attention_mask, head_mask) attention_output = attention_outputs[0] intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them return outputs class BertEncoder(nn.Module): def __init__(self, config): super(BertEncoder, self).__init__() self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)]) def forward(self, hidden_states, attention_mask, head_mask=None): all_hidden_states = () all_attentions = () for i, layer_module in enumerate(self.layer): if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module(hidden_states, attention_mask, head_mask[i]) hidden_states = layer_outputs[0] if self.output_attentions: all_attentions = all_attentions + (layer_outputs[1],) # Add last layer if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = (hidden_states,) if self.output_hidden_states: outputs = outputs + (all_hidden_states,) if self.output_attentions: outputs = outputs + (all_attentions,) return outputs # last-layer hidden state, (all hidden states), (all attentions) class BertPooler(nn.Module): def __init__(self, config): super(BertPooler, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class BertPredictionHeadTransform(nn.Module): def __init__(self, config): super(BertPredictionHeadTransform, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class BertLMPredictionHead(nn.Module): def __init__(self, config): super(BertLMPredictionHead, self).__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) + self.bias return hidden_states class BertOnlyMLMHead(nn.Module): def __init__(self, config): super(BertOnlyMLMHead, self).__init__() self.predictions = BertLMPredictionHead(config) def forward(self, sequence_output): prediction_scores = self.predictions(sequence_output) return prediction_scores class BertOnlyNSPHead(nn.Module): def __init__(self, config): super(BertOnlyNSPHead, self).__init__() self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, pooled_output): seq_relationship_score = self.seq_relationship(pooled_output) return seq_relationship_score class BertPreTrainingHeads(nn.Module): def __init__(self, config): super(BertPreTrainingHeads, self).__init__() self.predictions = BertLMPredictionHead(config) self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, sequence_output, pooled_output): prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return prediction_scores, seq_relationship_score class BertPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for dowloading and loading pretrained models. """ config_class = BertConfig pretrained_model_archive_map = BERT_PRETRAINED_MODEL_ARCHIVE_MAP load_tf_weights = load_tf_weights_in_bert base_model_prefix = "bert" def _init_weights(self, module): """ Initialize the weights """ if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, BertLayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() BERT_START_DOCSTRING = r""" The BERT model was proposed in `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova. It's a bidirectional transformer pre-trained using a combination of masked language modeling objective and next sentence prediction on a large corpus comprising the Toronto Book Corpus and Wikipedia. This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. .. _`BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`: https://arxiv.org/abs/1810.04805 .. _`torch.nn.Module`: https://pytorch.org/docs/stable/nn.html#module Parameters: config (:class:`~pytorch_transformers.BertConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~pytorch_transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ BERT_INPUTS_DOCSTRING = r""" Inputs: **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Indices of input sequence tokens in the vocabulary. To match pre-training, BERT input sequence should be formatted with [CLS] and [SEP] tokens as follows: (a) For sequence pairs: ``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]`` ``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1`` (b) For single sequences: ``tokens: [CLS] the dog is hairy . [SEP]`` ``token_type_ids: 0 0 0 0 0 0 0`` Bert is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. Indices can be obtained using :class:`pytorch_transformers.BertTokenizer`. See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and :func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details. **attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``: Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. **token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1`` corresponds to a `sentence B` token (see `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details). **position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. **head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``: Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. """ @add_start_docstrings("The bare Bert Model transformer outputting raw hidden-states without any specific head on top.", BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING) class BertModel(BertPreTrainedModel): r""" Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)`` Sequence of hidden-states at the output of the last layer of the model. **pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)`` Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during Bert pretraining. This output is usually *not* a good summary of the semantic content of the input, you're often better with averaging or pooling the sequence of hidden-states for the whole input sequence. **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertModel.from_pretrained('bert-base-uncased') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 outputs = model(input_ids) last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple """ def __init__(self, config): super(BertModel, self).__init__(config) self.embeddings = BertEmbeddings(config) self.encoder = BertEncoder(config) self.pooler = BertPooler(config) self.init_weights() def _resize_token_embeddings(self, new_num_tokens): old_embeddings = self.embeddings.word_embeddings new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens) self.embeddings.word_embeddings = new_embeddings return self.embeddings.word_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None): if attention_mask is None: attention_mask = torch.ones_like(input_ids) if token_type_ids is None: token_type_ids = torch.zeros_like(input_ids) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] if head_mask is not None: if head_mask.dim() == 1: head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1) elif head_mask.dim() == 2: head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility else: head_mask = [None] * self.config.num_hidden_layers embedding_output = self.embeddings(input_ids, position_ids=position_ids, token_type_ids=token_type_ids) encoder_outputs = self.encoder(embedding_output, extended_attention_mask, head_mask=head_mask) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) outputs = (sequence_output, pooled_output,) + encoder_outputs[1:] # add hidden_states and attentions if they are here return outputs # sequence_output, pooled_output, (hidden_states), (attentions) @add_start_docstrings("The bare Bert Model transformer outputting raw hidden-states without any specific head on top.", BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING) class BertForLatentConnector(BertPreTrainedModel): r""" Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)`` Sequence of hidden-states at the output of the last layer of the model. **pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)`` Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during Bert pretraining. This output is usually *not* a good summary of the semantic content of the input, you're often better with averaging or pooling the sequence of hidden-states for the whole input sequence. **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertModel.from_pretrained('bert-base-uncased') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 outputs = model(input_ids) last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple """ def __init__(self, config, latent_size): super(BertForLatentConnector, self).__init__(config) self.embeddings = BertEmbeddings(config) self.encoder = BertEncoder(config) self.pooler = BertPooler(config) self.linear = nn.Linear(config.hidden_size, 2 * latent_size, bias=False) self.init_weights() def _resize_token_embeddings(self, new_num_tokens): old_embeddings = self.embeddings.word_embeddings new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens) self.embeddings.word_embeddings = new_embeddings return self.embeddings.word_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None): if attention_mask is None: attention_mask = torch.ones_like(input_ids) if token_type_ids is None: token_type_ids = torch.zeros_like(input_ids) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] if head_mask is not None: if head_mask.dim() == 1: head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1) elif head_mask.dim() == 2: head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility else: head_mask = [None] * self.config.num_hidden_layers embedding_output = self.embeddings(input_ids, position_ids=position_ids, token_type_ids=token_type_ids) encoder_outputs = self.encoder(embedding_output, extended_attention_mask, head_mask=head_mask) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) outputs = (sequence_output, pooled_output,) + encoder_outputs[1:] # add hidden_states and attentions if they are here return outputs # sequence_output, pooled_output, (hidden_states), (attentions) @add_start_docstrings("""Bert Model with two heads on top as done during the pre-training: a `masked language modeling` head and a `next sentence prediction (classification)` head. """, BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING) class BertForPreTraining(BertPreTrainedModel): r""" **masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Labels for computing the masked language modeling loss. Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` **next_sentence_label**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring) Indices should be in ``[0, 1]``. ``0`` indicates sequence B is a continuation of sequence A, ``1`` indicates sequence B is a random sequence. Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned when both ``masked_lm_labels`` and ``next_sentence_label`` are provided) ``torch.FloatTensor`` of shape ``(1,)``: Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss. **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)`` Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). **seq_relationship_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)`` Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertForPreTraining.from_pretrained('bert-base-uncased') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 outputs = model(input_ids) prediction_scores, seq_relationship_scores = outputs[:2] """ def __init__(self, config): super(BertForPreTraining, self).__init__(config) self.bert = BertModel(config) self.cls = BertPreTrainingHeads(config) self.init_weights() self.tie_weights() def tie_weights(self): """ Make sure we are sharing the input and output embeddings. Export to TorchScript can't handle parameter sharing so we are cloning them instead. """ self._tie_or_clone_weights(self.cls.predictions.decoder, self.bert.embeddings.word_embeddings) def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, masked_lm_labels=None, next_sentence_label=None): outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask) sequence_output, pooled_output = outputs[:2] prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output) outputs = (prediction_scores, seq_relationship_score,) + outputs[2:] # add hidden states and attention if they are here if masked_lm_labels is not None and next_sentence_label is not None: loss_fct = CrossEntropyLoss(ignore_index=-1) masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1)) next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) total_loss = masked_lm_loss + next_sentence_loss outputs = (total_loss,) + outputs return outputs # (loss), prediction_scores, seq_relationship_score, (hidden_states), (attentions) @add_start_docstrings("""Bert Model with a `language modeling` head on top. """, BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING) class BertForMaskedLM(BertPreTrainedModel): r""" **masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Labels for computing the masked language modeling loss. Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Masked language modeling loss. **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)`` Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertForMaskedLM.from_pretrained('bert-base-uncased') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 outputs = model(input_ids, masked_lm_labels=input_ids) loss, prediction_scores = outputs[:2] """ def __init__(self, config): super(BertForMaskedLM, self).__init__(config) self.bert = BertModel(config) self.cls = BertOnlyMLMHead(config) self.init_weights() self.tie_weights() def tie_weights(self): """ Make sure we are sharing the input and output embeddings. Export to TorchScript can't handle parameter sharing so we are cloning them instead. """ self._tie_or_clone_weights(self.cls.predictions.decoder, self.bert.embeddings.word_embeddings) def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, masked_lm_labels=None): outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here if masked_lm_labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-1) masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1)) outputs = (masked_lm_loss,) + outputs return outputs # (masked_lm_loss), prediction_scores, (hidden_states), (attentions) @add_start_docstrings("""Bert Model with a `next sentence prediction (classification)` head on top. """, BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING) class BertForNextSentencePrediction(BertPreTrainedModel): r""" **next_sentence_label**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring) Indices should be in ``[0, 1]``. ``0`` indicates sequence B is a continuation of sequence A, ``1`` indicates sequence B is a random sequence. Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned when ``next_sentence_label`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Next sequence prediction (classification) loss. **seq_relationship_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)`` Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertForNextSentencePrediction.from_pretrained('bert-base-uncased') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 outputs = model(input_ids) seq_relationship_scores = outputs[0] """ def __init__(self, config): super(BertForNextSentencePrediction, self).__init__(config) self.bert = BertModel(config) self.cls = BertOnlyNSPHead(config) self.init_weights() def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, next_sentence_label=None): outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask) pooled_output = outputs[1] seq_relationship_score = self.cls(pooled_output) outputs = (seq_relationship_score,) + outputs[2:] # add hidden states and attention if they are here if next_sentence_label is not None: loss_fct = CrossEntropyLoss(ignore_index=-1) next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) outputs = (next_sentence_loss,) + outputs return outputs # (next_sentence_loss), seq_relationship_score, (hidden_states), (attentions) @add_start_docstrings("""Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING) class BertForSequenceClassification(BertPreTrainedModel): r""" **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Labels for computing the sequence classification/regression loss. Indices should be in ``[0, ..., config.num_labels - 1]``. If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss), If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy). Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Classification (or regression if config.num_labels==1) loss. **logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)`` Classification (or regression if config.num_labels==1) scores (before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertForSequenceClassification.from_pretrained('bert-base-uncased') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(input_ids, labels=labels) loss, logits = outputs[:2] """ def __init__(self, config): super(BertForSequenceClassification, self).__init__(config) self.num_labels = config.num_labels self.bert = BertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) self.use_freeze = False self.init_weights() def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, labels=None): outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask) pooled_output = outputs[1] if self.use_freeze: pooled_output = pooled_output.detach() pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here if labels is not None: if self.num_labels == 1: # We are doing regression loss_fct = MSELoss() loss = loss_fct(logits.view(-1), labels.view(-1)) else: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) outputs = (loss,) + outputs # pdb.set_trace() return outputs, pooled_output # (loss), logits, (hidden_states), (attentions) @add_start_docstrings("""Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING) class BertForSequenceClassificationLatentConnector(BertPreTrainedModel): r""" **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Labels for computing the sequence classification/regression loss. Indices should be in ``[0, ..., config.num_labels - 1]``. If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss), If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy). Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Classification (or regression if config.num_labels==1) loss. **logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)`` Classification (or regression if config.num_labels==1) scores (before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertForSequenceClassificationLatentConnector.from_pretrained('bert-base-uncased') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(input_ids, labels=labels) loss, logits = outputs[:2] """ def __init__(self, config, latent_size): super(BertForSequenceClassificationLatentConnector, self).__init__(config) self.num_labels = config.num_labels self.bert = BertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) self.linear = nn.Linear(config.hidden_size, 2 * latent_size, bias=False) self.use_freeze = False self.init_weights() def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, labels=None): outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask) pooled_output = outputs[1] # mean, logvar = self.linear(pooled_output).chunk(2, -1) if self.use_freeze: pooled_output = pooled_output.detach() pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here if labels is not None: if self.num_labels == 1: # We are doing regression loss_fct = MSELoss() loss = loss_fct(logits.view(-1), labels.view(-1)) else: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) outputs = (loss,) + outputs return outputs, pooled_output # (loss), logits, (hidden_states), (attentions) @add_start_docstrings("""Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING) class BertForMultipleChoice(BertPreTrainedModel): r""" **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Labels for computing the multiple choice classification loss. Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension of the input tensors. (see `input_ids` above) Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Classification loss. **classification_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices)`` where `num_choices` is the size of the second dimension of the input tensors. (see `input_ids` above). Classification scores (before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertForMultipleChoice.from_pretrained('bert-base-uncased') choices = ["Hello, my dog is cute", "Hello, my cat is amazing"] input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices labels = torch.tensor(1).unsqueeze(0) # Batch size 1 outputs = model(input_ids, labels=labels) loss, classification_scores = outputs[:2] """ def __init__(self, config): super(BertForMultipleChoice, self).__init__(config) self.bert = BertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) self.init_weights() def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, labels=None): num_choices = input_ids.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) outputs = (reshaped_logits,) + outputs[2:] # add hidden states and attention if they are here if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) outputs = (loss,) + outputs return outputs # (loss), reshaped_logits, (hidden_states), (attentions) @add_start_docstrings("""Bert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING) class BertForTokenClassification(BertPreTrainedModel): r""" **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels - 1]``. Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Classification loss. **scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.num_labels)`` Classification scores (before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertForTokenClassification.from_pretrained('bert-base-uncased') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0) # Batch size 1 outputs = model(input_ids, labels=labels) loss, scores = outputs[:2] """ def __init__(self, config): super(BertForTokenClassification, self).__init__(config) self.num_labels = config.num_labels self.bert = BertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, labels=None): outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here if labels is not None: loss_fct = CrossEntropyLoss() # Only keep active parts of the loss if attention_mask is not None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels)[active_loss] active_labels = labels.view(-1)[active_loss] loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) outputs = (loss,) + outputs return outputs # (loss), scores, (hidden_states), (attentions) @add_start_docstrings("""Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING) class BertForQuestionAnswering(BertPreTrainedModel): r""" **start_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. **end_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. **start_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)`` Span-start scores (before SoftMax). **end_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)`` Span-end scores (before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertForQuestionAnswering.from_pretrained('bert-base-uncased') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 start_positions = torch.tensor([1]) end_positions = torch.tensor([3]) outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions) loss, start_scores, end_scores = outputs[:2] """ def __init__(self, config): super(BertForQuestionAnswering, self).__init__(config) self.num_labels = config.num_labels self.bert = BertModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, start_positions=None, end_positions=None): outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) outputs = (start_logits, end_logits,) + outputs[2:] if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 outputs = (total_loss,) + outputs return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions) ############ # XX Added # ############ class BertForLatentConnector_XX(nn.Module): def __init__(self, config, latent_size): super().__init__() self.config = config self.embeddings = BertEmbeddings(config) self.encoder = BertEncoder(config) self.pooler = BertPooler(config) self.linear = nn.Linear(config.hidden_size, 2 * latent_size, bias=False) self.init_weights() def init_weights(self): """ Initialize and prunes weights if needed. """ # Initialize weights self.apply(self._init_weights) # Prune heads if needed if self.config.pruned_heads: self.prune_heads(self.config.pruned_heads) def _init_weights(self, module): """ Initialize the weights """ if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, BertLayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() def _resize_token_embeddings(self, new_num_tokens): old_embeddings = self.embeddings.word_embeddings new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens) self.embeddings.word_embeddings = new_embeddings return self.embeddings.word_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None): if attention_mask is None: attention_mask = torch.ones_like(input_ids) if token_type_ids is None: token_type_ids = torch.zeros_like(input_ids) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] if head_mask is not None: if head_mask.dim() == 1: head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1) elif head_mask.dim() == 2: head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility else: head_mask = [None] * self.config.num_hidden_layers embedding_output = self.embeddings(input_ids, position_ids=position_ids, token_type_ids=token_type_ids) encoder_outputs = self.encoder(embedding_output, extended_attention_mask, head_mask=head_mask) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) outputs = (sequence_output, pooled_output,) + encoder_outputs[1:] # add hidden_states and attentions if they are here return outputs # sequence_output, pooled_output, (hidden_states), (attentions)
Versatile-Diffusion-master
lib/model_zoo/optimus_models/optimus_bert.py
# coding=utf-8 # Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch OpenAI GPT-2 model.""" from __future__ import absolute_import, division, print_function, unicode_literals import pdb import collections import json import logging import math import os import sys from io import open import torch import torch.nn as nn from torch.nn import CrossEntropyLoss from torch.nn.parameter import Parameter from .modeling_utils import PreTrainedModel, Conv1D, prune_conv1d_layer, SequenceSummary from .configuration_gpt2 import GPT2Config from .file_utils import add_start_docstrings logger = logging.getLogger(__name__) GPT2_PRETRAINED_MODEL_ARCHIVE_MAP = {"gpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-pytorch_model.bin", "gpt2-medium": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-pytorch_model.bin", "gpt2-large": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-large-pytorch_model.bin"} def load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path): """ Load tf checkpoints in a pytorch model """ try: import re import numpy as np import tensorflow as tf except ImportError: logger.error("Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions.") raise tf_path = os.path.abspath(gpt2_checkpoint_path) logger.info("Converting TensorFlow checkpoint from {}".format(tf_path)) # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: logger.info("Loading TF weight {} with shape {}".format(name, shape)) array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array.squeeze()) for name, array in zip(names, arrays): name = name[6:] # skip "model/" name = name.split('/') pointer = model for m_name in name: if re.fullmatch(r'[A-Za-z]+\d+', m_name): l = re.split(r'(\d+)', m_name) else: l = [m_name] if l[0] == 'w' or l[0] == 'g': pointer = getattr(pointer, 'weight') elif l[0] == 'b': pointer = getattr(pointer, 'bias') elif l[0] == 'wpe' or l[0] == 'wte': pointer = getattr(pointer, l[0]) pointer = getattr(pointer, 'weight') else: pointer = getattr(pointer, l[0]) if len(l) >= 2: num = int(l[1]) pointer = pointer[num] try: assert pointer.shape == array.shape except AssertionError as e: e.args += (pointer.shape, array.shape) raise logger.info("Initialize PyTorch weight {}".format(name)) pointer.data = torch.from_numpy(array) return model def gelu(x): return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) class Attention(nn.Module): def __init__(self, nx, n_ctx, config, scale=False): super(Attention, self).__init__() self.output_attentions = config.output_attentions n_state = nx # in Attention: n_state=768 (nx=n_embd) # [switch nx => n_state from Block to Attention to keep identical to TF implem] assert n_state % config.n_head == 0 self.register_buffer("bias", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx)) self.n_head = config.n_head self.split_size = n_state self.scale = scale self.c_attn = Conv1D(n_state * 3, nx) self.c_proj = Conv1D(n_state, nx) self.attn_dropout = nn.Dropout(config.attn_pdrop) self.resid_dropout = nn.Dropout(config.resid_pdrop) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return mask = torch.ones(self.n_head, self.split_size // self.n_head) heads = set(heads) - self.pruned_heads # Convert to set and emove already pruned heads for head in heads: # Compute how many pruned heads are before the head and move the index accordingly head = head - sum(1 if h < head else 0 for h in self.pruned_heads) mask[head] = 0 mask = mask.view(-1).contiguous().eq(1) index = torch.arange(len(mask))[mask].long() index_attn = torch.cat([index, index + self.split_size, index + (2*self.split_size)]) # Prune conv1d layers self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1) self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0) # Update hyper params self.split_size = (self.split_size // self.n_head) * (self.n_head - len(heads)) self.n_head = self.n_head - len(heads) self.pruned_heads = self.pruned_heads.union(heads) def _attn(self, q, k, v, attention_mask=None, head_mask=None): w = torch.matmul(q, k) if self.scale: w = w / math.sqrt(v.size(-1)) nd, ns = w.size(-2), w.size(-1) b = self.bias[:, :, ns-nd:ns, :ns] w = w * b - 1e4 * (1 - b) if attention_mask is not None: # Apply the attention mask w = w + attention_mask w = nn.Softmax(dim=-1)(w) w = self.attn_dropout(w) # Mask heads if we want to if head_mask is not None: w = w * head_mask outputs = [torch.matmul(w, v)] if self.output_attentions: outputs.append(w) return outputs def merge_heads(self, x): x = x.permute(0, 2, 1, 3).contiguous() new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),) return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states def split_heads(self, x, k=False): new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head) x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states if k: return x.permute(0, 2, 3, 1) # (batch, head, head_features, seq_length) else: return x.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features) def forward(self, x, layer_past=None, attention_mask=None, head_mask=None): x = self.c_attn(x) query, key, value = x.split(self.split_size, dim=2) query = self.split_heads(query) key = self.split_heads(key, k=True) value = self.split_heads(value) if layer_past is not None: past_key, past_value = layer_past[0], layer_past[1] # transpose back cf below past_key = self.split_heads(past_key, k=True) past_value = self.split_heads(past_value) # pdb.set_trace() key = torch.cat((past_key, key), dim=-1) value = torch.cat((past_value, value), dim=-2) present = torch.stack((key.transpose(-2, -1), value)) # transpose to have same shapes for stacking attn_outputs = self._attn(query, key, value, attention_mask, head_mask) a = attn_outputs[0] a = self.merge_heads(a) a = self.c_proj(a) a = self.resid_dropout(a) outputs = [a, present] + attn_outputs[1:] return outputs # a, present, (attentions) class MLP(nn.Module): def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd) super(MLP, self).__init__() nx = config.n_embd self.c_fc = Conv1D(n_state, nx) self.c_proj = Conv1D(nx, n_state) self.act = gelu self.dropout = nn.Dropout(config.resid_pdrop) def forward(self, x): h = self.act(self.c_fc(x)) h2 = self.c_proj(h) return self.dropout(h2) class Block(nn.Module): def __init__(self, n_ctx, config, scale=False): super(Block, self).__init__() nx = config.n_embd self.ln_1 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon) self.attn = Attention(nx, n_ctx, config, scale) self.ln_2 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon) self.mlp = MLP(4 * nx, config) def forward(self, x, layer_past=None, attention_mask=None, head_mask=None): output_attn = self.attn(self.ln_1(x), layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask) a = output_attn[0] # output_attn: a, present, (attentions) x = x + a m = self.mlp(self.ln_2(x)) x = x + m outputs = [x] + output_attn[1:] return outputs # x, present, (attentions) class GPT2PreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for dowloading and loading pretrained models. """ config_class = GPT2Config pretrained_model_archive_map = GPT2_PRETRAINED_MODEL_ARCHIVE_MAP load_tf_weights = load_tf_weights_in_gpt2 base_model_prefix = "transformer" def __init__(self, *inputs, **kwargs): super(GPT2PreTrainedModel, self).__init__(*inputs, **kwargs) def _init_weights(self, module): """ Initialize the weights. """ if isinstance(module, (nn.Linear, nn.Embedding, Conv1D)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if isinstance(module, (nn.Linear, Conv1D)) and module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) GPT2_START_DOCSTRING = r""" OpenAI GPT-2 model was proposed in `Language Models are Unsupervised Multitask Learners`_ by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**. It's a causal (unidirectional) transformer pre-trained using language modeling on a very large corpus of ~40 GB of text data. This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. .. _`Language Models are Unsupervised Multitask Learners`: https://openai.com/blog/better-language-models/ .. _`torch.nn.Module`: https://pytorch.org/docs/stable/nn.html#module Parameters: config (:class:`~pytorch_transformers.GPT2Config`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~pytorch_transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ GPT2_INPUTS_DOCSTRING = r""" Inputs: **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Indices of input sequence tokens in the vocabulary. GPT-2 is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. Indices can be obtained using :class:`pytorch_transformers.GPT2Tokenizer`. See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and :func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details. **past**: list of ``torch.FloatTensor`` (one for each layer): that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see `past` output below). Can be used to speed up sequential decoding. **attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``: Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. **token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: A parallel sequence of tokens (can be used to indicate various portions of the inputs). The embeddings from these tokens will be summed with the respective token embeddings. Indices are selected in the vocabulary (unlike BERT which has a specific vocabulary for segment indices). **position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. **head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``: Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. """ @add_start_docstrings("The bare GPT2 Model transformer outputting raw hidden-states without any specific head on top.", GPT2_START_DOCSTRING, GPT2_INPUTS_DOCSTRING) class GPT2Model(GPT2PreTrainedModel): r""" Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)`` Sequence of hidden-states at the last layer of the model. **past**: list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: that contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `past` input) to speed up sequential decoding. **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = GPT2Tokenizer.from_pretrained('gpt2') model = GPT2Model.from_pretrained('gpt2') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 outputs = model(input_ids) last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple """ def __init__(self, config): super(GPT2Model, self).__init__(config) self.output_hidden_states = config.output_hidden_states self.output_attentions = config.output_attentions self.wte = nn.Embedding(config.vocab_size, config.n_embd) self.wpe = nn.Embedding(config.n_positions, config.n_embd) self.drop = nn.Dropout(config.embd_pdrop) self.h = nn.ModuleList([Block(config.n_ctx, config, scale=True) for _ in range(config.n_layer)]) self.ln_f = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon) try: self.latent_size = config.latent_size except: self.latent_size = 32 # default size is 32 self.linear = nn.Linear(self.latent_size, config.hidden_size * config.n_layer, bias=False) # different latent vector for each layer self.linear_emb = nn.Linear(self.latent_size, config.hidden_size, bias=False) # share the same latent vector as the embeddings self.config = config self.init_weights() def _resize_token_embeddings(self, new_num_tokens): self.wte = self._get_resized_embeddings(self.wte, new_num_tokens) return self.wte def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} """ for layer, heads in heads_to_prune.items(): self.h[layer].attn.prune_heads(heads) def forward(self, input_ids, past=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, latent_as_gpt_emb=False, latent_as_gpt_memory=True): if past is None: past_length = 0 past = [None] * len(self.h) else: if latent_as_gpt_emb: past_emb = self.linear_emb(past) # used as embeddings to add on other three embeddings if latent_as_gpt_memory: past = self.linear(past) share_latent = False if share_latent: # the same latent vector shared by all layers past = [past.unsqueeze(-2), past.unsqueeze(-2)] # query, key past = [past] * len(self.h) past_length = past[0][0].size(-2) else: # different latent vectors for each layer past_split = torch.split(past.unsqueeze(1), self.config.hidden_size, dim=2) past = list(zip(past_split,past_split)) # past = past.view(batch_size,len(self.h),-1) # past = [[past[:,i,:].unsqueeze(-2), past[:,i,:].unsqueeze(-2) ] for i in range(len(self.h))] past_length = 1 # past[0][0].size(-2) else: past_length = 0 past = [None] * len(self.h) if position_ids is None: position_ids = torch.arange(past_length, input_ids.size(-1) + past_length, dtype=torch.long, device=input_ids.device) position_ids = position_ids.unsqueeze(0).expand_as(input_ids) # Attention mask. if attention_mask is not None: # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. attention_mask = attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility attention_mask = (1.0 - attention_mask) * -10000.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # head_mask has shape n_layer x batch x n_heads x N x N if head_mask is not None: if head_mask.dim() == 1: head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) head_mask = head_mask.expand(self.config.n_layer, -1, -1, -1, -1) elif head_mask.dim() == 2: head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility else: head_mask = [None] * self.config.n_layer input_shape = input_ids.size() input_ids = input_ids.view(-1, input_ids.size(-1)) position_ids = position_ids.view(-1, position_ids.size(-1)) inputs_embeds = self.wte(input_ids) position_embeds = self.wpe(position_ids) if token_type_ids is not None: token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) token_type_embeds = self.wte(token_type_ids) else: token_type_embeds = 0 hidden_states = inputs_embeds + position_embeds + token_type_embeds if latent_as_gpt_emb: # pdb.set_trace() hidden_states = hidden_states + past_emb.unsqueeze(1) hidden_states = self.drop(hidden_states) output_shape = input_shape + (hidden_states.size(-1),) presents = () all_attentions = [] all_hidden_states = () for i, (block, layer_past) in enumerate(zip(self.h, past)): if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states.view(*output_shape),) outputs = block(hidden_states, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask[i]) hidden_states, present = outputs[:2] presents = presents + (present,) if self.output_attentions: all_attentions.append(outputs[2]) hidden_states = self.ln_f(hidden_states) hidden_states = hidden_states.view(*output_shape) # Add last hidden state if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = (hidden_states, presents) if self.output_hidden_states: outputs = outputs + (all_hidden_states,) if self.output_attentions: # let the number of heads free (-1) so we can extract attention even after head pruning attention_output_shape = input_shape[:-1] + (-1,) + all_attentions[0].shape[-2:] all_attentions = tuple(t.view(*attention_output_shape) for t in all_attentions) outputs = outputs + (all_attentions,) return outputs # last hidden state, presents, (all hidden_states), (attentions) @add_start_docstrings("""The GPT2 Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). """, GPT2_START_DOCSTRING, GPT2_INPUTS_DOCSTRING) class GPT2LMHeadModel(GPT2PreTrainedModel): r""" **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids`` Indices are selected in ``[-1, 0, ..., config.vocab_size]`` All labels set to ``-1`` are ignored (masked), the loss is only computed for labels in ``[0, ..., config.vocab_size]`` Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Language modeling loss. **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)`` Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). **past**: list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: that contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `past` input) to speed up sequential decoding. **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: import torch from pytorch_transformers import GPT2Tokenizer, GPT2LMHeadModel tokenizer = GPT2Tokenizer.from_pretrained('gpt2') model = GPT2LMHeadModel.from_pretrained('gpt2') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 outputs = model(input_ids, labels=input_ids) loss, logits = outputs[:2] """ def __init__(self, config): super(GPT2LMHeadModel, self).__init__(config) self.transformer = GPT2Model(config) self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) self.init_weights() self.tie_weights() def tie_weights(self): """ Make sure we are sharing the input and output embeddings. Export to TorchScript can't handle parameter sharing so we are cloning them instead. """ self._tie_or_clone_weights(self.lm_head, self.transformer.wte) def forward(self, input_ids, past=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, labels=None, label_ignore=None): transformer_outputs = self.transformer(input_ids, past=past, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask) hidden_states = transformer_outputs[0] lm_logits = self.lm_head(hidden_states) outputs = (lm_logits,) + transformer_outputs[1:] if labels is not None: # Shift so that tokens < n predict n shift_logits = lm_logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss(ignore_index=label_ignore, reduce=False) # 50258 is the padding id, otherwise -1 is used for masked LM. loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) loss = torch.sum(loss.view(-1, shift_labels.shape[-1]), -1) outputs = (loss,) + outputs return outputs # (loss), lm_logits, presents, (all hidden_states), (attentions) @add_start_docstrings("""The GPT2 Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). """, GPT2_START_DOCSTRING, GPT2_INPUTS_DOCSTRING) class GPT2ForLatentConnector(GPT2PreTrainedModel): r""" **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids`` Indices are selected in ``[-1, 0, ..., config.vocab_size]`` All labels set to ``-1`` are ignored (masked), the loss is only computed for labels in ``[0, ..., config.vocab_size]`` Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Language modeling loss. **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)`` Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). **past**: list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: that contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `past` input) to speed up sequential decoding. **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: import torch from pytorch_transformers import GPT2Tokenizer, GPT2LMHeadModel tokenizer = GPT2Tokenizer.from_pretrained('gpt2') model = GPT2LMHeadModel.from_pretrained('gpt2') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 outputs = model(input_ids, labels=input_ids) loss, logits = outputs[:2] """ def __init__(self, config, latent_size=32, latent_as_gpt_emb=True, latent_as_gpt_memory=True): super(GPT2ForLatentConnector, self).__init__(config) self.transformer = GPT2Model(config) self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) self.init_weights() self.tie_weights() self.latent_as_gpt_emb = latent_as_gpt_emb self.latent_as_gpt_memory = latent_as_gpt_memory def tie_weights(self): """ Make sure we are sharing the input and output embeddings. Export to TorchScript can't handle parameter sharing so we are cloning them instead. """ self._tie_or_clone_weights(self.lm_head, self.transformer.wte) def forward(self, input_ids, past=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, labels=None, label_ignore=None): transformer_outputs = self.transformer(input_ids, past=past, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, latent_as_gpt_emb=self.latent_as_gpt_emb, latent_as_gpt_memory=self.latent_as_gpt_memory) hidden_states = transformer_outputs[0] lm_logits = self.lm_head(hidden_states) outputs = (lm_logits,) + transformer_outputs[1:] if labels is not None: # Shift so that tokens < n predict n shift_logits = lm_logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss(ignore_index=label_ignore, reduce=False) # 50258 is the padding id, otherwise -1 is used for masked LM. loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) loss = torch.sum(loss.view(-1, shift_labels.shape[-1]), -1) outputs = (loss,) + outputs return outputs # (loss), lm_logits, presents, (all hidden_states), (attentions) @add_start_docstrings("""The GPT2 Model transformer with a language modeling and a multiple-choice classification head on top e.g. for RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the input embeddings, the classification head takes as input the input of a specified classification token index in the input sequence). """, GPT2_START_DOCSTRING, GPT2_INPUTS_DOCSTRING) class GPT2DoubleHeadsModel(GPT2PreTrainedModel): r""" **mc_token_ids**: (`optional`, default to index of the last token of the input) ``torch.LongTensor`` of shape ``(batch_size, num_choices)``: Index of the classification token in each input sequence. Selected in the range ``[0, input_ids.size(-1) - 1[``. **lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids`` Indices are selected in ``[-1, 0, ..., config.vocab_size]`` All labels set to ``-1`` are ignored (masked), the loss is only computed for labels in ``[0, ..., config.vocab_size]`` **mc_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size)``: Labels for computing the multiple choice classification loss. Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension of the input tensors. (see `input_ids` above) Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **lm_loss**: (`optional`, returned when ``lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Language modeling loss. **mc_loss**: (`optional`, returned when ``multiple_choice_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Multiple choice classification loss. **lm_prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices, sequence_length, config.vocab_size)`` Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). **mc_prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices)`` Prediction scores of the multiplechoice classification head (scores for each choice before SoftMax). **past**: list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: that contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `past` input) to speed up sequential decoding. **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: import torch from pytorch_transformers import GPT2Tokenizer, GPT2DoubleHeadsModel tokenizer = GPT2Tokenizer.from_pretrained('gpt2') model = GPT2DoubleHeadsModel.from_pretrained('gpt2') # Add a [CLS] to the vocabulary (we should train it also!) tokenizer.add_special_tokens({'cls_token': '[CLS]'}) model.resize_token_embeddings(len(tokenizer)) # Update the model embeddings with the new vocabulary size print(tokenizer.cls_token_id, len(tokenizer)) # The newly token the last token of the vocabulary choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"] encoded_choices = [tokenizer.encode(s) for s in choices] cls_token_location = [tokens.index(tokenizer.cls_token_id) for tokens in encoded_choices] input_ids = torch.tensor(encoded_choices).unsqueeze(0) # Batch size: 1, number of choices: 2 mc_token_ids = torch.tensor([cls_token_location]) # Batch size: 1 outputs = model(input_ids, mc_token_ids=mc_token_ids) lm_prediction_scores, mc_prediction_scores = outputs[:2] """ def __init__(self, config): super(GPT2DoubleHeadsModel, self).__init__(config) self.transformer = GPT2Model(config) self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) self.multiple_choice_head = SequenceSummary(config) self.init_weights() self.tie_weights() def tie_weights(self): """ Make sure we are sharing the input and output embeddings. Export to TorchScript can't handle parameter sharing so we are cloning them instead. """ self._tie_or_clone_weights(self.lm_head, self.transformer.wte) def forward(self, input_ids, past=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, mc_token_ids=None, lm_labels=None, mc_labels=None): transformer_outputs = self.transformer(input_ids, past=past, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask) hidden_states = transformer_outputs[0] lm_logits = self.lm_head(hidden_states) mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1) outputs = (lm_logits, mc_logits) + transformer_outputs[1:] if mc_labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1)) outputs = (loss,) + outputs if lm_labels is not None: shift_logits = lm_logits[..., :-1, :].contiguous() shift_labels = lm_labels[..., 1:].contiguous() loss_fct = CrossEntropyLoss(ignore_index=-1) loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) outputs = (loss,) + outputs return outputs # (lm loss), (mc loss), lm logits, mc logits, presents, (all hidden_states), (attentions) ############ # XX Added # ############ class GPT2Model_XX(nn.Module): def __init__(self, config): super().__init__() self.config = config self.output_hidden_states = config.output_hidden_states self.output_attentions = config.output_attentions self.wte = nn.Embedding(config.vocab_size, config.n_embd) self.wpe = nn.Embedding(config.n_positions, config.n_embd) self.drop = nn.Dropout(config.embd_pdrop) self.h = nn.ModuleList([Block(config.n_ctx, config, scale=True) for _ in range(config.n_layer)]) self.ln_f = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon) try: self.latent_size = config.latent_size except: self.latent_size = 32 # default size is 32 self.linear = nn.Linear(self.latent_size, config.hidden_size * config.n_layer, bias=False) # different latent vector for each layer self.linear_emb = nn.Linear(self.latent_size, config.hidden_size, bias=False) # share the same latent vector as the embeddings self.config = config self.init_weights() def init_weights(self): """ Initialize and prunes weights if needed. """ # Initialize weights self.apply(self._init_weights) # Prune heads if needed if self.config.pruned_heads: self.prune_heads(self.config.pruned_heads) def _init_weights(self, module): """ Initialize the weights. """ if isinstance(module, (nn.Linear, nn.Embedding, Conv1D)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if isinstance(module, (nn.Linear, Conv1D)) and module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) def _resize_token_embeddings(self, new_num_tokens): self.wte = self._get_resized_embeddings(self.wte, new_num_tokens) return self.wte def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} """ for layer, heads in heads_to_prune.items(): self.h[layer].attn.prune_heads(heads) def forward(self, input_ids, past=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, latent_as_gpt_emb=False, latent_as_gpt_memory=True): if past is None: past_length = 0 past = [None] * len(self.h) else: if latent_as_gpt_emb: past_emb = self.linear_emb(past) # used as embeddings to add on other three embeddings if latent_as_gpt_memory: past = self.linear(past) share_latent = False if share_latent: # the same latent vector shared by all layers past = [past.unsqueeze(-2), past.unsqueeze(-2)] # query, key past = [past] * len(self.h) past_length = past[0][0].size(-2) else: # different latent vectors for each layer past_split = torch.split(past.unsqueeze(1), self.config.hidden_size, dim=2) past = list(zip(past_split,past_split)) # past = past.view(batch_size,len(self.h),-1) # past = [[past[:,i,:].unsqueeze(-2), past[:,i,:].unsqueeze(-2) ] for i in range(len(self.h))] past_length = 1 # past[0][0].size(-2) else: past_length = 0 past = [None] * len(self.h) if position_ids is None: position_ids = torch.arange(past_length, input_ids.size(-1) + past_length, dtype=torch.long, device=input_ids.device) position_ids = position_ids.unsqueeze(0).expand_as(input_ids) # Attention mask. if attention_mask is not None: # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. attention_mask = attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility attention_mask = (1.0 - attention_mask) * -10000.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # head_mask has shape n_layer x batch x n_heads x N x N if head_mask is not None: if head_mask.dim() == 1: head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) head_mask = head_mask.expand(self.config.n_layer, -1, -1, -1, -1) elif head_mask.dim() == 2: head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility else: head_mask = [None] * self.config.n_layer input_shape = input_ids.size() input_ids = input_ids.view(-1, input_ids.size(-1)) position_ids = position_ids.view(-1, position_ids.size(-1)) inputs_embeds = self.wte(input_ids) position_embeds = self.wpe(position_ids) if token_type_ids is not None: token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) token_type_embeds = self.wte(token_type_ids) else: token_type_embeds = 0 hidden_states = inputs_embeds + position_embeds + token_type_embeds if latent_as_gpt_emb: # pdb.set_trace() hidden_states = hidden_states + past_emb.unsqueeze(1) hidden_states = self.drop(hidden_states) output_shape = input_shape + (hidden_states.size(-1),) presents = () all_attentions = [] all_hidden_states = () for i, (block, layer_past) in enumerate(zip(self.h, past)): if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states.view(*output_shape),) outputs = block(hidden_states, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask[i]) hidden_states, present = outputs[:2] presents = presents + (present,) if self.output_attentions: all_attentions.append(outputs[2]) hidden_states = self.ln_f(hidden_states) hidden_states = hidden_states.view(*output_shape) # Add last hidden state if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = (hidden_states, presents) if self.output_hidden_states: outputs = outputs + (all_hidden_states,) if self.output_attentions: # let the number of heads free (-1) so we can extract attention even after head pruning attention_output_shape = input_shape[:-1] + (-1,) + all_attentions[0].shape[-2:] all_attentions = tuple(t.view(*attention_output_shape) for t in all_attentions) outputs = outputs + (all_attentions,) return outputs # last hidden state, presents, (all hidden_states), (attentions) def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None): """ Build a resized Embedding Module from a provided token Embedding Module. Increasing the size will add newly initialized vectors at the end Reducing the size will remove vectors from the end Args: new_num_tokens: (`optional`) int New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end Reducing the size will remove vectors from the end If not provided or None: return the provided token Embedding Module. Return: ``torch.nn.Embeddings`` Pointer to the resized Embedding Module or the old Embedding Module if new_num_tokens is None """ if new_num_tokens is None: return old_embeddings old_num_tokens, old_embedding_dim = old_embeddings.weight.size() if old_num_tokens == new_num_tokens: return old_embeddings # Build new embeddings new_embeddings = nn.Embedding(new_num_tokens, old_embedding_dim) new_embeddings.to(old_embeddings.weight.device) # initialize all new embeddings (in particular added tokens) self._init_weights(new_embeddings) # Copy word embeddings from the previous weights num_tokens_to_copy = min(old_num_tokens, new_num_tokens) new_embeddings.weight.data[:num_tokens_to_copy, :] = old_embeddings.weight.data[:num_tokens_to_copy, :] return new_embeddings class GPT2ForLatentConnector_XX(nn.Module): def __init__(self, config, latent_size=32, latent_as_gpt_emb=True, latent_as_gpt_memory=True): super().__init__() self.config = config self.transformer = GPT2Model_XX(config) self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) self.init_weights() self.tie_weights() self.latent_as_gpt_emb = latent_as_gpt_emb self.latent_as_gpt_memory = latent_as_gpt_memory def init_weights(self): """ Initialize and prunes weights if needed. """ # Initialize weights self.apply(self._init_weights) # Prune heads if needed if self.config.pruned_heads: self.prune_heads(self.config.pruned_heads) def _init_weights(self, module): """ Initialize the weights. """ if isinstance(module, (nn.Linear, nn.Embedding, Conv1D)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if isinstance(module, (nn.Linear, Conv1D)) and module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) def _tie_or_clone_weights(self, first_module, second_module): """ Tie or clone module weights depending of weither we are using TorchScript or not """ if self.config.torchscript: first_module.weight = nn.Parameter(second_module.weight.clone()) else: first_module.weight = second_module.weight if hasattr(first_module, 'bias') and first_module.bias is not None: first_module.bias.data = torch.nn.functional.pad( first_module.bias.data, (0, first_module.weight.shape[0] - first_module.bias.shape[0]), 'constant', 0,) def tie_weights(self): """ Make sure we are sharing the input and output embeddings. Export to TorchScript can't handle parameter sharing so we are cloning them instead. """ self._tie_or_clone_weights(self.lm_head, self.transformer.wte) def forward(self, input_ids, past=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, labels=None, label_ignore=None): transformer_outputs = self.transformer(input_ids, past=past, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, latent_as_gpt_emb=self.latent_as_gpt_emb, latent_as_gpt_memory=self.latent_as_gpt_memory) hidden_states = transformer_outputs[0] lm_logits = self.lm_head(hidden_states) outputs = (lm_logits,) + transformer_outputs[1:] if labels is not None: # Shift so that tokens < n predict n shift_logits = lm_logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss(ignore_index=label_ignore, reduce=False) # 50258 is the padding id, otherwise -1 is used for masked LM. loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) loss = torch.sum(loss.view(-1, shift_labels.shape[-1]), -1) outputs = (loss,) + outputs return outputs # (loss), lm_logits, presents, (all hidden_states), (attentions) def resize_token_embeddings(self, new_num_tokens=None): model_embeds = self.transformer._resize_token_embeddings(new_num_tokens) if new_num_tokens is None: return model_embeds self.config.vocab_size = new_num_tokens self.transformer.vocab_size = new_num_tokens if hasattr(self, 'tie_weights'): self.tie_weights() return model_embeds
Versatile-Diffusion-master
lib/model_zoo/optimus_models/optimus_gpt2.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch BERT model.""" from __future__ import (absolute_import, division, print_function, unicode_literals) import pdb import copy import json import logging import os from io import open import six import torch from torch import nn from torch.nn import CrossEntropyLoss from torch.nn import functional as F from .configuration_utils import PretrainedConfig from .file_utils import cached_path, WEIGHTS_NAME, TF_WEIGHTS_NAME logger = logging.getLogger(__name__) try: from torch.nn import Identity except ImportError: # Older PyTorch compatibility class Identity(nn.Module): r"""A placeholder identity operator that is argument-insensitive. """ def __init__(self, *args, **kwargs): super(Identity, self).__init__() def forward(self, input): return input class PreTrainedModel(nn.Module): r""" Base class for all models. :class:`~pytorch_transformers.PreTrainedModel` takes care of storing the configuration of the models and handles methods for loading/downloading/saving models as well as a few methods commons to all models to (i) resize the input embeddings and (ii) prune heads in the self-attention heads. Class attributes (overridden by derived classes): - ``config_class``: a class derived from :class:`~pytorch_transformers.PretrainedConfig` to use as configuration class for this model architecture. - ``pretrained_model_archive_map``: a python ``dict`` of with `short-cut-names` (string) as keys and `url` (string) of associated pretrained weights as values. - ``load_tf_weights``: a python ``method`` for loading a TensorFlow checkpoint in a PyTorch model, taking as arguments: - ``model``: an instance of the relevant subclass of :class:`~pytorch_transformers.PreTrainedModel`, - ``config``: an instance of the relevant subclass of :class:`~pytorch_transformers.PretrainedConfig`, - ``path``: a path (string) to the TensorFlow checkpoint. - ``base_model_prefix``: a string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model. """ config_class = None pretrained_model_archive_map = {} load_tf_weights = lambda model, config, path: None base_model_prefix = "" def __init__(self, config, *inputs, **kwargs): super(PreTrainedModel, self).__init__() if not isinstance(config, PretrainedConfig): raise ValueError( "Parameter config in `{}(config)` should be an instance of class `PretrainedConfig`. " "To create a model from a pretrained model use " "`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format( self.__class__.__name__, self.__class__.__name__ )) # Save config in model self.config = config def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None): """ Build a resized Embedding Module from a provided token Embedding Module. Increasing the size will add newly initialized vectors at the end Reducing the size will remove vectors from the end Args: new_num_tokens: (`optional`) int New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end Reducing the size will remove vectors from the end If not provided or None: return the provided token Embedding Module. Return: ``torch.nn.Embeddings`` Pointer to the resized Embedding Module or the old Embedding Module if new_num_tokens is None """ if new_num_tokens is None: return old_embeddings old_num_tokens, old_embedding_dim = old_embeddings.weight.size() if old_num_tokens == new_num_tokens: return old_embeddings # Build new embeddings new_embeddings = nn.Embedding(new_num_tokens, old_embedding_dim) new_embeddings.to(old_embeddings.weight.device) # initialize all new embeddings (in particular added tokens) self._init_weights(new_embeddings) # Copy word embeddings from the previous weights num_tokens_to_copy = min(old_num_tokens, new_num_tokens) new_embeddings.weight.data[:num_tokens_to_copy, :] = old_embeddings.weight.data[:num_tokens_to_copy, :] return new_embeddings def _tie_or_clone_weights(self, first_module, second_module): """ Tie or clone module weights depending of weither we are using TorchScript or not """ if self.config.torchscript: first_module.weight = nn.Parameter(second_module.weight.clone()) else: first_module.weight = second_module.weight if hasattr(first_module, 'bias') and first_module.bias is not None: first_module.bias.data = torch.nn.functional.pad( first_module.bias.data, (0, first_module.weight.shape[0] - first_module.bias.shape[0]), 'constant', 0 ) def resize_token_embeddings(self, new_num_tokens=None): """ Resize input token embeddings matrix of the model if new_num_tokens != config.vocab_size. Take care of tying weights embeddings afterwards if the model class has a `tie_weights()` method. Arguments: new_num_tokens: (`optional`) int: New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or None: does nothing and just returns a pointer to the input tokens ``torch.nn.Embeddings`` Module of the model. Return: ``torch.nn.Embeddings`` Pointer to the input tokens Embeddings Module of the model """ base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed model_embeds = base_model._resize_token_embeddings(new_num_tokens) if new_num_tokens is None: return model_embeds # Update base model and current model config self.config.vocab_size = new_num_tokens base_model.vocab_size = new_num_tokens # Tie weights again if needed if hasattr(self, 'tie_weights'): self.tie_weights() return model_embeds def init_weights(self): """ Initialize and prunes weights if needed. """ # Initialize weights self.apply(self._init_weights) # Prune heads if needed if self.config.pruned_heads: self.prune_heads(self.config.pruned_heads) def prune_heads(self, heads_to_prune): """ Prunes heads of the base model. Arguments: heads_to_prune: dict with keys being selected layer indices (`int`) and associated values being the list of heads to prune in said layer (list of `int`). E.g. {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2. """ base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed # save new sets of pruned heads as union of previously stored pruned heads and newly pruned heads for layer, heads in heads_to_prune.items(): union_heads = set(self.config.pruned_heads.get(layer, [])) | set(heads) self.config.pruned_heads[layer] = list(union_heads) # Unfortunately we have to store it as list for JSON base_model._prune_heads(heads_to_prune) def save_pretrained(self, save_directory): """ Save a model and its configuration file to a directory, so that it can be re-loaded using the `:func:`~pytorch_transformers.PreTrainedModel.from_pretrained`` class method. """ assert os.path.isdir(save_directory), "Saving path should be a directory where the model and configuration can be saved" # Only save the model it-self if we are using distributed training model_to_save = self.module if hasattr(self, 'module') else self # Save configuration file model_to_save.config.save_pretrained(save_directory) # If we save using the predefined names, we can load using `from_pretrained` output_model_file = os.path.join(save_directory, WEIGHTS_NAME) torch.save(model_to_save.state_dict(), output_model_file) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): r"""Instantiate a pretrained pytorch model from a pre-trained model configuration. The model is set in evaluation mode by default using ``model.eval()`` (Dropout modules are deactivated) To train the model, you should first set it back in training mode with ``model.train()`` The warning ``Weights from XXX not initialized from pretrained model`` means that the weights of XXX do not come pre-trained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning task. The warning ``Weights from XXX not used in YYY`` means that the layer XXX is not used by YYY, therefore those weights are discarded. Parameters: pretrained_model_name_or_path: either: - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``. - a path to a `directory` containing model weights saved using :func:`~pytorch_transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``. - a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. model_args: (`optional`) Sequence of positional arguments: All remaning positional arguments will be passed to the underlying model's ``__init__`` method config: (`optional`) instance of a class derived from :class:`~pytorch_transformers.PretrainedConfig`: Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when: - the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or - the model was saved using :func:`~pytorch_transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory. - the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory. state_dict: (`optional`) dict: an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file. This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using :func:`~pytorch_transformers.PreTrainedModel.save_pretrained` and :func:`~pytorch_transformers.PreTrainedModel.from_pretrained` is not a simpler option. cache_dir: (`optional`) string: Path to a directory in which a downloaded pre-trained model configuration should be cached if the standard cache should not be used. force_download: (`optional`) boolean, default False: Force to (re-)download the model weights and configuration files and override the cached versions if they exists. proxies: (`optional`) dict, default None: A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. The proxies are used on each request. output_loading_info: (`optional`) boolean: Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages. kwargs: (`optional`) Remaining dictionary of keyword arguments: Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded: - If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done) - If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~pytorch_transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function. Examples:: model = BertModel.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache. model = BertModel.from_pretrained('./test/saved_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')` model = BertModel.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading assert model.config.output_attention == True # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = BertConfig.from_json_file('./tf_model/my_tf_model_config.json') model = BertModel.from_pretrained('./tf_model/my_tf_checkpoint.ckpt.index', from_tf=True, config=config) """ config = kwargs.pop('config', None) state_dict = kwargs.pop('state_dict', None) cache_dir = kwargs.pop('cache_dir', None) from_tf = kwargs.pop('from_tf', False) force_download = kwargs.pop('force_download', False) proxies = kwargs.pop('proxies', None) output_loading_info = kwargs.pop('output_loading_info', False) # Load config if config is None: config, model_kwargs = cls.config_class.from_pretrained( pretrained_model_name_or_path, *model_args, cache_dir=cache_dir, return_unused_kwargs=True, force_download=force_download, **kwargs ) else: model_kwargs = kwargs # Load model if pretrained_model_name_or_path in cls.pretrained_model_archive_map: archive_file = cls.pretrained_model_archive_map[pretrained_model_name_or_path] elif os.path.isdir(pretrained_model_name_or_path): if from_tf: # Directly load from a TensorFlow checkpoint archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index") else: archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME) else: if from_tf: # Directly load from a TensorFlow checkpoint archive_file = pretrained_model_name_or_path + ".index" else: archive_file = pretrained_model_name_or_path # redirect to the cache, if necessary try: resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies) except EnvironmentError as e: if pretrained_model_name_or_path in cls.pretrained_model_archive_map: logger.error( "Couldn't reach server at '{}' to download pretrained weights.".format( archive_file)) else: logger.error( "Model name '{}' was not found in model name list ({}). " "We assumed '{}' was a path or url but couldn't find any file " "associated to this path or url.".format( pretrained_model_name_or_path, ', '.join(cls.pretrained_model_archive_map.keys()), archive_file)) raise e if resolved_archive_file == archive_file: logger.info("loading weights file {}".format(archive_file)) else: logger.info("loading weights file {} from cache at {}".format( archive_file, resolved_archive_file)) # Instantiate model. model = cls(config, *model_args, **model_kwargs) if state_dict is None and not from_tf: state_dict = torch.load(resolved_archive_file, map_location='cpu') if from_tf: # Directly load from a TensorFlow checkpoint return cls.load_tf_weights(model, config, resolved_archive_file[:-6]) # Remove the '.index' # Convert old format to new format if needed from a PyTorch state_dict old_keys = [] new_keys = [] for key in state_dict.keys(): new_key = None if 'gamma' in key: new_key = key.replace('gamma', 'weight') if 'beta' in key: new_key = key.replace('beta', 'bias') if new_key: old_keys.append(key) new_keys.append(new_key) for old_key, new_key in zip(old_keys, new_keys): state_dict[new_key] = state_dict.pop(old_key) # Load from a PyTorch state_dict missing_keys = [] unexpected_keys = [] error_msgs = [] # copy state_dict so _load_from_state_dict can modify it metadata = getattr(state_dict, '_metadata', None) state_dict = state_dict.copy() if metadata is not None: state_dict._metadata = metadata def load(module, prefix=''): local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) module._load_from_state_dict( state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs) for name, child in module._modules.items(): if child is not None: load(child, prefix + name + '.') # Make sure we are able to load base models as well as derived models (with heads) start_prefix = '' model_to_load = model if not hasattr(model, cls.base_model_prefix) and any(s.startswith(cls.base_model_prefix) for s in state_dict.keys()): start_prefix = cls.base_model_prefix + '.' if hasattr(model, cls.base_model_prefix) and not any(s.startswith(cls.base_model_prefix) for s in state_dict.keys()): model_to_load = getattr(model, cls.base_model_prefix) load(model_to_load, prefix=start_prefix) if len(missing_keys) > 0: logger.info("Weights of {} not initialized from pretrained model: {}".format( model.__class__.__name__, missing_keys)) if len(unexpected_keys) > 0: logger.info("Weights from pretrained model not used in {}: {}".format( model.__class__.__name__, unexpected_keys)) if len(error_msgs) > 0: raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format( model.__class__.__name__, "\n\t".join(error_msgs))) if hasattr(model, 'tie_weights'): model.tie_weights() # make sure word embedding weights are still tied # Set model in evaluation mode to desactivate DropOut modules by default model.eval() if output_loading_info: loading_info = {"missing_keys": missing_keys, "unexpected_keys": unexpected_keys, "error_msgs": error_msgs} return model, loading_info return model class Conv1D(nn.Module): def __init__(self, nf, nx): """ Conv1D layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2) Basically works like a Linear layer but the weights are transposed """ super(Conv1D, self).__init__() self.nf = nf w = torch.empty(nx, nf) nn.init.normal_(w, std=0.02) self.weight = nn.Parameter(w) self.bias = nn.Parameter(torch.zeros(nf)) def forward(self, x): size_out = x.size()[:-1] + (self.nf,) x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight) x = x.view(*size_out) return x class PoolerStartLogits(nn.Module): """ Compute SQuAD start_logits from sequence hidden states. """ def __init__(self, config): super(PoolerStartLogits, self).__init__() self.dense = nn.Linear(config.hidden_size, 1) def forward(self, hidden_states, p_mask=None): """ Args: **p_mask**: (`optional`) ``torch.FloatTensor`` of shape `(batch_size, seq_len)` invalid position mask such as query and special symbols (PAD, SEP, CLS) 1.0 means token should be masked. """ x = self.dense(hidden_states).squeeze(-1) if p_mask is not None: if next(self.parameters()).dtype == torch.float16: x = x * (1 - p_mask) - 65500 * p_mask else: x = x * (1 - p_mask) - 1e30 * p_mask return x class PoolerEndLogits(nn.Module): """ Compute SQuAD end_logits from sequence hidden states and start token hidden state. """ def __init__(self, config): super(PoolerEndLogits, self).__init__() self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size) self.activation = nn.Tanh() self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dense_1 = nn.Linear(config.hidden_size, 1) def forward(self, hidden_states, start_states=None, start_positions=None, p_mask=None): """ Args: One of ``start_states``, ``start_positions`` should be not None. If both are set, ``start_positions`` overrides ``start_states``. **start_states**: ``torch.LongTensor`` of shape identical to hidden_states hidden states of the first tokens for the labeled span. **start_positions**: ``torch.LongTensor`` of shape ``(batch_size,)`` position of the first token for the labeled span: **p_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, seq_len)`` Mask of invalid position such as query and special symbols (PAD, SEP, CLS) 1.0 means token should be masked. """ assert start_states is not None or start_positions is not None, "One of start_states, start_positions should be not None" if start_positions is not None: slen, hsz = hidden_states.shape[-2:] start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz) start_states = hidden_states.gather(-2, start_positions) # shape (bsz, 1, hsz) start_states = start_states.expand(-1, slen, -1) # shape (bsz, slen, hsz) x = self.dense_0(torch.cat([hidden_states, start_states], dim=-1)) x = self.activation(x) x = self.LayerNorm(x) x = self.dense_1(x).squeeze(-1) if p_mask is not None: x = x * (1 - p_mask) - 1e30 * p_mask return x class PoolerAnswerClass(nn.Module): """ Compute SQuAD 2.0 answer class from classification and start tokens hidden states. """ def __init__(self, config): super(PoolerAnswerClass, self).__init__() self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size) self.activation = nn.Tanh() self.dense_1 = nn.Linear(config.hidden_size, 1, bias=False) def forward(self, hidden_states, start_states=None, start_positions=None, cls_index=None): """ Args: One of ``start_states``, ``start_positions`` should be not None. If both are set, ``start_positions`` overrides ``start_states``. **start_states**: ``torch.LongTensor`` of shape identical to ``hidden_states``. hidden states of the first tokens for the labeled span. **start_positions**: ``torch.LongTensor`` of shape ``(batch_size,)`` position of the first token for the labeled span. **cls_index**: torch.LongTensor of shape ``(batch_size,)`` position of the CLS token. If None, take the last token. note(Original repo): no dependency on end_feature so that we can obtain one single `cls_logits` for each sample """ hsz = hidden_states.shape[-1] assert start_states is not None or start_positions is not None, "One of start_states, start_positions should be not None" if start_positions is not None: start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz) start_states = hidden_states.gather(-2, start_positions).squeeze(-2) # shape (bsz, hsz) if cls_index is not None: cls_index = cls_index[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz) cls_token_state = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, hsz) else: cls_token_state = hidden_states[:, -1, :] # shape (bsz, hsz) x = self.dense_0(torch.cat([start_states, cls_token_state], dim=-1)) x = self.activation(x) x = self.dense_1(x).squeeze(-1) return x class SQuADHead(nn.Module): r""" A SQuAD head inspired by XLNet. Parameters: config (:class:`~pytorch_transformers.XLNetConfig`): Model configuration class with all the parameters of the model. Inputs: **hidden_states**: ``torch.FloatTensor`` of shape ``(batch_size, seq_len, hidden_size)`` hidden states of sequence tokens **start_positions**: ``torch.LongTensor`` of shape ``(batch_size,)`` position of the first token for the labeled span. **end_positions**: ``torch.LongTensor`` of shape ``(batch_size,)`` position of the last token for the labeled span. **cls_index**: torch.LongTensor of shape ``(batch_size,)`` position of the CLS token. If None, take the last token. **is_impossible**: ``torch.LongTensor`` of shape ``(batch_size,)`` Whether the question has a possible answer in the paragraph or not. **p_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, seq_len)`` Mask of invalid position such as query and special symbols (PAD, SEP, CLS) 1.0 means token should be masked. Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned if both ``start_positions`` and ``end_positions`` are provided) ``torch.FloatTensor`` of shape ``(1,)``: Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses. **start_top_log_probs**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided) ``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top)`` Log probabilities for the top config.start_n_top start token possibilities (beam-search). **start_top_index**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided) ``torch.LongTensor`` of shape ``(batch_size, config.start_n_top)`` Indices for the top config.start_n_top start token possibilities (beam-search). **end_top_log_probs**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided) ``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)`` Log probabilities for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search). **end_top_index**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided) ``torch.LongTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)`` Indices for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search). **cls_logits**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided) ``torch.FloatTensor`` of shape ``(batch_size,)`` Log probabilities for the ``is_impossible`` label of the answers. """ def __init__(self, config): super(SQuADHead, self).__init__() self.start_n_top = config.start_n_top self.end_n_top = config.end_n_top self.start_logits = PoolerStartLogits(config) self.end_logits = PoolerEndLogits(config) self.answer_class = PoolerAnswerClass(config) def forward(self, hidden_states, start_positions=None, end_positions=None, cls_index=None, is_impossible=None, p_mask=None): outputs = () start_logits = self.start_logits(hidden_states, p_mask=p_mask) if start_positions is not None and end_positions is not None: # If we are on multi-GPU, let's remove the dimension added by batch splitting for x in (start_positions, end_positions, cls_index, is_impossible): if x is not None and x.dim() > 1: x.squeeze_(-1) # during training, compute the end logits based on the ground truth of the start position end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask) loss_fct = CrossEntropyLoss() start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if cls_index is not None and is_impossible is not None: # Predict answerability from the representation of CLS and START cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index) loss_fct_cls = nn.BCEWithLogitsLoss() cls_loss = loss_fct_cls(cls_logits, is_impossible) # note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss total_loss += cls_loss * 0.5 outputs = (total_loss,) + outputs else: # during inference, compute the end logits based on beam search bsz, slen, hsz = hidden_states.size() start_log_probs = F.softmax(start_logits, dim=-1) # shape (bsz, slen) start_top_log_probs, start_top_index = torch.topk(start_log_probs, self.start_n_top, dim=-1) # shape (bsz, start_n_top) start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz) # shape (bsz, start_n_top, hsz) start_states = torch.gather(hidden_states, -2, start_top_index_exp) # shape (bsz, start_n_top, hsz) start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1) # shape (bsz, slen, start_n_top, hsz) hidden_states_expanded = hidden_states.unsqueeze(2).expand_as(start_states) # shape (bsz, slen, start_n_top, hsz) p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask) end_log_probs = F.softmax(end_logits, dim=1) # shape (bsz, slen, start_n_top) end_top_log_probs, end_top_index = torch.topk(end_log_probs, self.end_n_top, dim=1) # shape (bsz, end_n_top, start_n_top) end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top) end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top) start_states = torch.einsum("blh,bl->bh", hidden_states, start_log_probs) cls_logits = self.answer_class(hidden_states, start_states=start_states, cls_index=cls_index) outputs = (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits) + outputs # return start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits # or (if labels are provided) (total_loss,) return outputs class SequenceSummary(nn.Module): r""" Compute a single vector summary of a sequence hidden states according to various possibilities: Args of the config class: summary_type: - 'last' => [default] take the last token hidden state (like XLNet) - 'first' => take the first token hidden state (like Bert) - 'mean' => take the mean of all tokens hidden states - 'cls_index' => supply a Tensor of classification token position (GPT/GPT-2) - 'attn' => Not implemented now, use multi-head attention summary_use_proj: Add a projection after the vector extraction summary_proj_to_labels: If True, the projection outputs to config.num_labels classes (otherwise to hidden_size). Default: False. summary_activation: 'tanh' => add a tanh activation to the output, Other => no activation. Default summary_first_dropout: Add a dropout before the projection and activation summary_last_dropout: Add a dropout after the projection and activation """ def __init__(self, config): super(SequenceSummary, self).__init__() self.summary_type = config.summary_type if hasattr(config, 'summary_use_proj') else 'last' if self.summary_type == 'attn': # We should use a standard multi-head attention module with absolute positional embedding for that. # Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276 # We can probably just use the multi-head attention module of PyTorch >=1.1.0 raise NotImplementedError self.summary = Identity() if hasattr(config, 'summary_use_proj') and config.summary_use_proj: if hasattr(config, 'summary_proj_to_labels') and config.summary_proj_to_labels and config.num_labels > 0: num_classes = config.num_labels else: num_classes = config.hidden_size self.summary = nn.Linear(config.hidden_size, num_classes) self.activation = Identity() if hasattr(config, 'summary_activation') and config.summary_activation == 'tanh': self.activation = nn.Tanh() self.first_dropout = Identity() if hasattr(config, 'summary_first_dropout') and config.summary_first_dropout > 0: self.first_dropout = nn.Dropout(config.summary_first_dropout) self.last_dropout = Identity() if hasattr(config, 'summary_last_dropout') and config.summary_last_dropout > 0: self.last_dropout = nn.Dropout(config.summary_last_dropout) def forward(self, hidden_states, cls_index=None): """ hidden_states: float Tensor in shape [bsz, seq_len, hidden_size], the hidden-states of the last layer. cls_index: [optional] position of the classification token if summary_type == 'cls_index', shape (bsz,) or more generally (bsz, ...) where ... are optional leading dimensions of hidden_states. if summary_type == 'cls_index' and cls_index is None: we take the last token of the sequence as classification token """ if self.summary_type == 'last': output = hidden_states[:, -1] elif self.summary_type == 'first': output = hidden_states[:, 0] elif self.summary_type == 'mean': output = hidden_states.mean(dim=1) elif self.summary_type == 'cls_index': if cls_index is None: cls_index = torch.full_like(hidden_states[..., :1, :], hidden_states.shape[-2]-1, dtype=torch.long) else: cls_index = cls_index.unsqueeze(-1).unsqueeze(-1) cls_index = cls_index.expand((-1,) * (cls_index.dim()-1) + (hidden_states.size(-1),)) # shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states output = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, XX, hidden_size) elif self.summary_type == 'attn': raise NotImplementedError output = self.first_dropout(output) output = self.summary(output) output = self.activation(output) output = self.last_dropout(output) return output def prune_linear_layer(layer, index, dim=0): """ Prune a linear layer (a model parameters) to keep only entries in index. Return the pruned layer as a new layer with requires_grad=True. Used to remove heads. """ index = index.to(layer.weight.device) W = layer.weight.index_select(dim, index).clone().detach() if layer.bias is not None: if dim == 1: b = layer.bias.clone().detach() else: b = layer.bias[index].clone().detach() new_size = list(layer.weight.size()) new_size[dim] = len(index) new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.device) new_layer.weight.requires_grad = False new_layer.weight.copy_(W.contiguous()) new_layer.weight.requires_grad = True if layer.bias is not None: new_layer.bias.requires_grad = False new_layer.bias.copy_(b.contiguous()) new_layer.bias.requires_grad = True return new_layer def prune_conv1d_layer(layer, index, dim=1): """ Prune a Conv1D layer (a model parameters) to keep only entries in index. A Conv1D work as a Linear layer (see e.g. BERT) but the weights are transposed. Return the pruned layer as a new layer with requires_grad=True. Used to remove heads. """ index = index.to(layer.weight.device) W = layer.weight.index_select(dim, index).clone().detach() if dim == 0: b = layer.bias.clone().detach() else: b = layer.bias[index].clone().detach() new_size = list(layer.weight.size()) new_size[dim] = len(index) new_layer = Conv1D(new_size[1], new_size[0]).to(layer.weight.device) new_layer.weight.requires_grad = False new_layer.weight.copy_(W.contiguous()) new_layer.weight.requires_grad = True new_layer.bias.requires_grad = False new_layer.bias.copy_(b.contiguous()) new_layer.bias.requires_grad = True return new_layer def prune_layer(layer, index, dim=None): """ Prune a Conv1D or nn.Linear layer (a model parameters) to keep only entries in index. Return the pruned layer as a new layer with requires_grad=True. Used to remove heads. """ if isinstance(layer, nn.Linear): return prune_linear_layer(layer, index, dim=0 if dim is None else dim) elif isinstance(layer, Conv1D): return prune_conv1d_layer(layer, index, dim=1 if dim is None else dim) else: raise ValueError("Can't prune layer of class {}".format(layer.__class__))
Versatile-Diffusion-master
lib/model_zoo/optimus_models/modeling_utils.py
""" Utilities for working with the local dataset cache. This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp Copyright by the AllenNLP authors. """ from __future__ import (absolute_import, division, print_function, unicode_literals) import sys import json import logging import os import six import shutil import tempfile import fnmatch from functools import wraps from hashlib import sha256 from io import open # import boto3 # from botocore.config import Config # from botocore.exceptions import ClientError import requests from tqdm import tqdm try: from torch.hub import _get_torch_home torch_cache_home = _get_torch_home() except ImportError: torch_cache_home = os.path.expanduser( os.getenv('TORCH_HOME', os.path.join( os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))) default_cache_path = os.path.join(torch_cache_home, 'pytorch_transformers') try: from urllib.parse import urlparse except ImportError: from urlparse import urlparse try: from pathlib import Path PYTORCH_PRETRAINED_BERT_CACHE = Path( os.getenv('PYTORCH_TRANSFORMERS_CACHE', os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path))) except (AttributeError, ImportError): PYTORCH_PRETRAINED_BERT_CACHE = os.getenv('PYTORCH_TRANSFORMERS_CACHE', os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)) PYTORCH_TRANSFORMERS_CACHE = PYTORCH_PRETRAINED_BERT_CACHE # Kept for backward compatibility WEIGHTS_NAME = "pytorch_model.bin" TF_WEIGHTS_NAME = 'model.ckpt' CONFIG_NAME = "config.json" logger = logging.getLogger(__name__) # pylint: disable=invalid-name if not six.PY2: def add_start_docstrings(*docstr): def docstring_decorator(fn): fn.__doc__ = ''.join(docstr) + fn.__doc__ return fn return docstring_decorator def add_end_docstrings(*docstr): def docstring_decorator(fn): fn.__doc__ = fn.__doc__ + ''.join(docstr) return fn return docstring_decorator else: # Not possible to update class docstrings on python2 def add_start_docstrings(*docstr): def docstring_decorator(fn): return fn return docstring_decorator def add_end_docstrings(*docstr): def docstring_decorator(fn): return fn return docstring_decorator def url_to_filename(url, etag=None): """ Convert `url` into a hashed filename in a repeatable way. If `etag` is specified, append its hash to the url's, delimited by a period. """ url_bytes = url.encode('utf-8') url_hash = sha256(url_bytes) filename = url_hash.hexdigest() if etag: etag_bytes = etag.encode('utf-8') etag_hash = sha256(etag_bytes) filename += '.' + etag_hash.hexdigest() return filename def filename_to_url(filename, cache_dir=None): """ Return the url and etag (which may be ``None``) stored for `filename`. Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist. """ if cache_dir is None: cache_dir = PYTORCH_TRANSFORMERS_CACHE if sys.version_info[0] == 3 and isinstance(cache_dir, Path): cache_dir = str(cache_dir) cache_path = os.path.join(cache_dir, filename) if not os.path.exists(cache_path): raise EnvironmentError("file {} not found".format(cache_path)) meta_path = cache_path + '.json' if not os.path.exists(meta_path): raise EnvironmentError("file {} not found".format(meta_path)) with open(meta_path, encoding="utf-8") as meta_file: metadata = json.load(meta_file) url = metadata['url'] etag = metadata['etag'] return url, etag def cached_path(url_or_filename, cache_dir=None, force_download=False, proxies=None): """ Given something that might be a URL (or might be a local path), determine which. If it's a URL, download the file and cache it, and return the path to the cached file. If it's already a local path, make sure the file exists and then return the path. Args: cache_dir: specify a cache directory to save the file to (overwrite the default cache dir). force_download: if True, re-dowload the file even if it's already cached in the cache dir. """ if cache_dir is None: cache_dir = PYTORCH_TRANSFORMERS_CACHE if sys.version_info[0] == 3 and isinstance(url_or_filename, Path): url_or_filename = str(url_or_filename) if sys.version_info[0] == 3 and isinstance(cache_dir, Path): cache_dir = str(cache_dir) parsed = urlparse(url_or_filename) if parsed.scheme in ('http', 'https', 's3'): # URL, so get it from the cache (downloading if necessary) return get_from_cache(url_or_filename, cache_dir=cache_dir, force_download=force_download, proxies=proxies) elif os.path.exists(url_or_filename): # File, and it exists. return url_or_filename elif parsed.scheme == '': # File, but it doesn't exist. raise EnvironmentError("file {} not found".format(url_or_filename)) else: # Something unknown raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename)) def split_s3_path(url): """Split a full s3 path into the bucket name and path.""" parsed = urlparse(url) if not parsed.netloc or not parsed.path: raise ValueError("bad s3 path {}".format(url)) bucket_name = parsed.netloc s3_path = parsed.path # Remove '/' at beginning of path. if s3_path.startswith("/"): s3_path = s3_path[1:] return bucket_name, s3_path def s3_request(func): """ Wrapper function for s3 requests in order to create more helpful error messages. """ @wraps(func) def wrapper(url, *args, **kwargs): try: return func(url, *args, **kwargs) except ClientError as exc: if int(exc.response["Error"]["Code"]) == 404: raise EnvironmentError("file {} not found".format(url)) else: raise return wrapper @s3_request def s3_etag(url, proxies=None): """Check ETag on S3 object.""" s3_resource = boto3.resource("s3", config=Config(proxies=proxies)) bucket_name, s3_path = split_s3_path(url) s3_object = s3_resource.Object(bucket_name, s3_path) return s3_object.e_tag @s3_request def s3_get(url, temp_file, proxies=None): """Pull a file directly from S3.""" s3_resource = boto3.resource("s3", config=Config(proxies=proxies)) bucket_name, s3_path = split_s3_path(url) s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file) def http_get(url, temp_file, proxies=None): req = requests.get(url, stream=True, proxies=proxies) content_length = req.headers.get('Content-Length') total = int(content_length) if content_length is not None else None progress = tqdm(unit="B", total=total) for chunk in req.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks progress.update(len(chunk)) temp_file.write(chunk) progress.close() def get_from_cache(url, cache_dir=None, force_download=False, proxies=None): """ Given a URL, look for the corresponding dataset in the local cache. If it's not there, download it. Then return the path to the cached file. """ if cache_dir is None: cache_dir = PYTORCH_TRANSFORMERS_CACHE if sys.version_info[0] == 3 and isinstance(cache_dir, Path): cache_dir = str(cache_dir) if sys.version_info[0] == 2 and not isinstance(cache_dir, str): cache_dir = str(cache_dir) if not os.path.exists(cache_dir): os.makedirs(cache_dir) # Get eTag to add to filename, if it exists. if url.startswith("s3://"): etag = s3_etag(url, proxies=proxies) else: try: response = requests.head(url, allow_redirects=True, proxies=proxies) if response.status_code != 200: etag = None else: etag = response.headers.get("ETag") except EnvironmentError: etag = None if sys.version_info[0] == 2 and etag is not None: etag = etag.decode('utf-8') filename = url_to_filename(url, etag) # get cache path to put the file cache_path = os.path.join(cache_dir, filename) # If we don't have a connection (etag is None) and can't identify the file # try to get the last downloaded one if not os.path.exists(cache_path) and etag is None: matching_files = fnmatch.filter(os.listdir(cache_dir), filename + '.*') matching_files = list(filter(lambda s: not s.endswith('.json'), matching_files)) if matching_files: cache_path = os.path.join(cache_dir, matching_files[-1]) if not os.path.exists(cache_path) or force_download: # Download to temporary file, then copy to cache dir once finished. # Otherwise you get corrupt cache entries if the download gets interrupted. with tempfile.NamedTemporaryFile() as temp_file: logger.info("%s not found in cache or force_download set to True, downloading to %s", url, temp_file.name) # GET file object if url.startswith("s3://"): s3_get(url, temp_file, proxies=proxies) else: http_get(url, temp_file, proxies=proxies) # we are copying the file before closing it, so flush to avoid truncation temp_file.flush() # shutil.copyfileobj() starts at the current position, so go to the start temp_file.seek(0) logger.info("copying %s to cache at %s", temp_file.name, cache_path) with open(cache_path, 'wb') as cache_file: shutil.copyfileobj(temp_file, cache_file) logger.info("creating metadata file for %s", cache_path) meta = {'url': url, 'etag': etag} meta_path = cache_path + '.json' with open(meta_path, 'w') as meta_file: output_string = json.dumps(meta) if sys.version_info[0] == 2 and isinstance(output_string, str): output_string = unicode(output_string, 'utf-8') # The beauty of python 2 meta_file.write(output_string) logger.info("removing temp file %s", temp_file.name) return cache_path
Versatile-Diffusion-master
lib/model_zoo/optimus_models/file_utils.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes.""" from __future__ import absolute_import, division, print_function, unicode_literals import collections import logging import os import unicodedata from io import open from .tokenization_utils import PreTrainedTokenizer logger = logging.getLogger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'vocab.txt'} PRETRAINED_VOCAB_FILES_MAP = { 'vocab_file': { 'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt", 'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt", 'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt", 'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt", 'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt", 'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt", 'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt", 'bert-base-german-cased': "https://int-deepset-models-bert.s3.eu-central-1.amazonaws.com/pytorch/bert-base-german-cased-vocab.txt", 'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-vocab.txt", 'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-vocab.txt", 'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-vocab.txt", 'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-vocab.txt", 'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-vocab.txt", } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { 'bert-base-uncased': 512, 'bert-large-uncased': 512, 'bert-base-cased': 512, 'bert-large-cased': 512, 'bert-base-multilingual-uncased': 512, 'bert-base-multilingual-cased': 512, 'bert-base-chinese': 512, 'bert-base-german-cased': 512, 'bert-large-uncased-whole-word-masking': 512, 'bert-large-cased-whole-word-masking': 512, 'bert-large-uncased-whole-word-masking-finetuned-squad': 512, 'bert-large-cased-whole-word-masking-finetuned-squad': 512, 'bert-base-cased-finetuned-mrpc': 512, } PRETRAINED_INIT_CONFIGURATION = { 'bert-base-uncased': {'do_lower_case': True}, 'bert-large-uncased': {'do_lower_case': True}, 'bert-base-cased': {'do_lower_case': False}, 'bert-large-cased': {'do_lower_case': False}, 'bert-base-multilingual-uncased': {'do_lower_case': True}, 'bert-base-multilingual-cased': {'do_lower_case': False}, 'bert-base-chinese': {'do_lower_case': False}, 'bert-base-german-cased': {'do_lower_case': False}, 'bert-large-uncased-whole-word-masking': {'do_lower_case': True}, 'bert-large-cased-whole-word-masking': {'do_lower_case': False}, 'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True}, 'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False}, 'bert-base-cased-finetuned-mrpc': {'do_lower_case': False}, } def load_vocab(vocab_file): """Loads a vocabulary file into a dictionary.""" vocab = collections.OrderedDict() with open(vocab_file, "r", encoding="utf-8") as reader: tokens = reader.readlines() for index, token in enumerate(tokens): token = token.rstrip('\n') vocab[token] = index return vocab def whitespace_tokenize(text): """Runs basic whitespace cleaning and splitting on a piece of text.""" text = text.strip() if not text: return [] tokens = text.split() return tokens class BertTokenizer(PreTrainedTokenizer): r""" Constructs a BertTokenizer. :class:`~pytorch_transformers.BertTokenizer` runs end-to-end tokenization: punctuation splitting + wordpiece Args: vocab_file: Path to a one-wordpiece-per-line vocabulary file do_lower_case: Whether to lower case the input. Only has an effect when do_wordpiece_only=False do_basic_tokenize: Whether to do basic tokenization before wordpiece. max_len: An artificial maximum length to truncate tokenized sequences to; Effective maximum length is always the minimum of this value (if specified) and the underlying BERT model's sequence length. never_split: List of tokens which will never be split during tokenization. Only has an effect when do_wordpiece_only=False """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", tokenize_chinese_chars=True, **kwargs): """Constructs a BertTokenizer. Args: **vocab_file**: Path to a one-wordpiece-per-line vocabulary file **do_lower_case**: (`optional`) boolean (default True) Whether to lower case the input Only has an effect when do_basic_tokenize=True **do_basic_tokenize**: (`optional`) boolean (default True) Whether to do basic tokenization before wordpiece. **never_split**: (`optional`) list of string List of tokens which will never be split during tokenization. Only has an effect when do_basic_tokenize=True **tokenize_chinese_chars**: (`optional`) boolean (default True) Whether to tokenize Chinese characters. This should likely be deactivated for Japanese: see: https://github.com/huggingface/pytorch-pretrained-BERT/issues/328 """ super(BertTokenizer, self).__init__(unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, **kwargs) self.max_len_single_sentence = self.max_len - 2 # take into account special tokens self.max_len_sentences_pair = self.max_len - 3 # take into account special tokens if not os.path.isfile(vocab_file): raise ValueError( "Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained " "model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file)) self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict( [(ids, tok) for tok, ids in self.vocab.items()]) self.do_basic_tokenize = do_basic_tokenize if do_basic_tokenize: self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token) @property def vocab_size(self): return len(self.vocab) def _tokenize(self, text): split_tokens = [] if self.do_basic_tokenize: for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens): for sub_token in self.wordpiece_tokenizer.tokenize(token): split_tokens.append(sub_token) else: split_tokens = self.wordpiece_tokenizer.tokenize(text) return split_tokens def _convert_token_to_id(self, token): """ Converts a token (str/unicode) in an id using the vocab. """ return self.vocab.get(token, self.vocab.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (string/unicode) using the vocab.""" return self.ids_to_tokens.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): """ Converts a sequence of tokens (string) in a single string. """ out_string = ' '.join(tokens).replace(' ##', '').strip() return out_string def add_special_tokens_single_sentence(self, token_ids): """ Adds special tokens to the a sequence for sequence classification tasks. A BERT sequence has the following format: [CLS] X [SEP] """ return [self.cls_token_id] + token_ids + [self.sep_token_id] def add_special_tokens_sentences_pair(self, token_ids_0, token_ids_1): """ Adds special tokens to a sequence pair for sequence classification tasks. A BERT sequence pair has the following format: [CLS] A [SEP] B [SEP] """ sep = [self.sep_token_id] cls = [self.cls_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep def save_vocabulary(self, vocab_path): """Save the tokenizer vocabulary to a directory or file.""" index = 0 if os.path.isdir(vocab_path): vocab_file = os.path.join(vocab_path, VOCAB_FILES_NAMES['vocab_file']) else: vocab_file = vocab_path with open(vocab_file, "w", encoding="utf-8") as writer: for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning("Saving vocabulary to {}: vocabulary indices are not consecutive." " Please check that the vocabulary is not corrupted!".format(vocab_file)) index = token_index writer.write(token + u'\n') index += 1 return (vocab_file,) class BasicTokenizer(object): """Runs basic tokenization (punctuation splitting, lower casing, etc.).""" def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True): """ Constructs a BasicTokenizer. Args: **do_lower_case**: Whether to lower case the input. **never_split**: (`optional`) list of str Kept for backward compatibility purposes. Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`) List of token not to split. **tokenize_chinese_chars**: (`optional`) boolean (default True) Whether to tokenize Chinese characters. This should likely be deactivated for Japanese: see: https://github.com/huggingface/pytorch-pretrained-BERT/issues/328 """ if never_split is None: never_split = [] self.do_lower_case = do_lower_case self.never_split = never_split self.tokenize_chinese_chars = tokenize_chinese_chars def tokenize(self, text, never_split=None): """ Basic Tokenization of a piece of text. Split on "white spaces" only, for sub-word tokenization, see WordPieceTokenizer. Args: **never_split**: (`optional`) list of str Kept for backward compatibility purposes. Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`) List of token not to split. """ never_split = self.never_split + (never_split if never_split is not None else []) text = self._clean_text(text) # This was added on November 1st, 2018 for the multilingual and Chinese # models. This is also applied to the English models now, but it doesn't # matter since the English models were not trained on any Chinese data # and generally don't have any Chinese data in them (there are Chinese # characters in the vocabulary because Wikipedia does have some Chinese # words in the English Wikipedia.). if self.tokenize_chinese_chars: text = self._tokenize_chinese_chars(text) orig_tokens = whitespace_tokenize(text) split_tokens = [] for token in orig_tokens: if self.do_lower_case and token not in never_split: token = token.lower() token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token)) output_tokens = whitespace_tokenize(" ".join(split_tokens)) return output_tokens def _run_strip_accents(self, text): """Strips accents from a piece of text.""" text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output) def _run_split_on_punc(self, text, never_split=None): """Splits punctuation on a piece of text.""" if never_split is not None and text in never_split: return [text] chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return ["".join(x) for x in output] def _tokenize_chinese_chars(self, text): """Adds whitespace around any CJK character.""" output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(" ") output.append(char) output.append(" ") else: output.append(char) return "".join(output) def _is_chinese_char(self, cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ((cp >= 0x4E00 and cp <= 0x9FFF) or # (cp >= 0x3400 and cp <= 0x4DBF) or # (cp >= 0x20000 and cp <= 0x2A6DF) or # (cp >= 0x2A700 and cp <= 0x2B73F) or # (cp >= 0x2B740 and cp <= 0x2B81F) or # (cp >= 0x2B820 and cp <= 0x2CEAF) or (cp >= 0xF900 and cp <= 0xFAFF) or # (cp >= 0x2F800 and cp <= 0x2FA1F)): # return True return False def _clean_text(self, text): """Performs invalid character removal and whitespace cleanup on text.""" output = [] for char in text: cp = ord(char) if cp == 0 or cp == 0xfffd or _is_control(char): continue if _is_whitespace(char): output.append(" ") else: output.append(char) return "".join(output) class WordpieceTokenizer(object): """Runs WordPiece tokenization.""" def __init__(self, vocab, unk_token, max_input_chars_per_word=100): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, text): """Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example: input = "unaffable" output = ["un", "##aff", "##able"] Args: text: A single token or whitespace separated tokens. This should have already been passed through `BasicTokenizer`. Returns: A list of wordpiece tokens. """ output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = "".join(chars[start:end]) if start > 0: substr = "##" + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens def _is_whitespace(char): """Checks whether `chars` is a whitespace character.""" # \t, \n, and \r are technically contorl characters but we treat them # as whitespace since they are generally considered as such. if char == " " or char == "\t" or char == "\n" or char == "\r": return True cat = unicodedata.category(char) if cat == "Zs": return True return False def _is_control(char): """Checks whether `chars` is a control character.""" # These are technically control characters but we count them as whitespace # characters. if char == "\t" or char == "\n" or char == "\r": return False cat = unicodedata.category(char) if cat.startswith("C"): return True return False def _is_punctuation(char): """Checks whether `chars` is a punctuation character.""" cp = ord(char) # We treat all non-letter/number ASCII as punctuation. # Characters such as "^", "$", and "`" are not in the Unicode # Punctuation class but we treat them as punctuation anyways, for # consistency. if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)): return True cat = unicodedata.category(char) if cat.startswith("P"): return True return False
Versatile-Diffusion-master
lib/model_zoo/optimus_models/tokenization_bert.py
# coding=utf-8 # Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ OpenAI GPT-2 configuration """ from __future__ import absolute_import, division, print_function, unicode_literals import json import logging import sys from io import open from .configuration_utils import PretrainedConfig logger = logging.getLogger(__name__) GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP = {"gpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-config.json", "gpt2-medium": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-config.json", "gpt2-large": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-large-config.json"} class GPT2Config(PretrainedConfig): """Configuration class to store the configuration of a `GPT2Model`. Args: vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `GPT2Model` or a configuration json file. n_positions: Number of positional embeddings. n_ctx: Size of the causal mask (usually same as n_positions). n_embd: Dimensionality of the embeddings and hidden states. n_layer: Number of hidden layers in the Transformer encoder. n_head: Number of attention heads for each attention layer in the Transformer encoder. layer_norm_epsilon: epsilon to use in the layer norm layers resid_pdrop: The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. attn_pdrop: The dropout ratio for the attention probabilities. embd_pdrop: The dropout ratio for the embeddings. initializer_range: The sttdev of the truncated_normal_initializer for initializing all weight matrices. """ pretrained_config_archive_map = GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP def __init__( self, vocab_size_or_config_json_file=50257, n_positions=1024, n_ctx=1024, n_embd=768, n_layer=12, n_head=12, resid_pdrop=0.1, embd_pdrop=0.1, attn_pdrop=0.1, layer_norm_epsilon=1e-5, initializer_range=0.02, num_labels=1, summary_type='cls_index', summary_use_proj=True, summary_activation=None, summary_proj_to_labels=True, summary_first_dropout=0.1, **kwargs ): """Constructs GPT2Config. Args: vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `GPT2Model` or a configuration json file. n_positions: Number of positional embeddings. n_ctx: Size of the causal mask (usually same as n_positions). n_embd: Dimensionality of the embeddings and hidden states. n_layer: Number of hidden layers in the Transformer encoder. n_head: Number of attention heads for each attention layer in the Transformer encoder. layer_norm_epsilon: epsilon to use in the layer norm layers resid_pdrop: The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. attn_pdrop: The dropout ratio for the attention probabilities. embd_pdrop: The dropout ratio for the embeddings. initializer_range: The sttdev of the truncated_normal_initializer for initializing all weight matrices. """ super(GPT2Config, self).__init__(**kwargs) if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2 and isinstance(vocab_size_or_config_json_file, unicode)): with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader: json_config = json.loads(reader.read()) for key, value in json_config.items(): self.__dict__[key] = value elif isinstance(vocab_size_or_config_json_file, int): self.vocab_size = vocab_size_or_config_json_file self.n_ctx = n_ctx self.n_positions = n_positions self.n_embd = n_embd self.n_layer = n_layer self.n_head = n_head self.resid_pdrop = resid_pdrop self.embd_pdrop = embd_pdrop self.attn_pdrop = attn_pdrop self.layer_norm_epsilon = layer_norm_epsilon self.initializer_range = initializer_range self.num_labels = num_labels self.summary_type = summary_type self.summary_use_proj = summary_use_proj self.summary_activation = summary_activation self.summary_first_dropout = summary_first_dropout self.summary_proj_to_labels = summary_proj_to_labels else: raise ValueError( "First argument must be either a vocabulary size (int)" "or the path to a pretrained model config file (str)" ) @property def max_position_embeddings(self): return self.n_positions @property def hidden_size(self): return self.n_embd @property def num_attention_heads(self): return self.n_head @property def num_hidden_layers(self): return self.n_layer
Versatile-Diffusion-master
lib/model_zoo/optimus_models/configuration_gpt2.py
import torch import torch.optim as optim import numpy as np import copy from ... import sync from ...cfg_holder import cfg_unique_holder as cfguh def singleton(class_): instances = {} def getinstance(*args, **kwargs): if class_ not in instances: instances[class_] = class_(*args, **kwargs) return instances[class_] return getinstance @singleton class get_scheduler(object): def __init__(self): self.lr_scheduler = {} def register(self, lrsf, name): self.lr_scheduler[name] = lrsf def __call__(self, cfg): if cfg is None: return None if isinstance(cfg, list): schedulers = [] for ci in cfg: t = ci.type schedulers.append( self.lr_scheduler[t](**ci.args)) if len(schedulers) == 0: raise ValueError else: return compose_scheduler(schedulers) t = cfg.type return self.lr_scheduler[t](**cfg.args) def register(name): def wrapper(class_): get_scheduler().register(class_, name) return class_ return wrapper class template_scheduler(object): def __init__(self, step): self.step = step def __getitem__(self, idx): raise ValueError def set_lr(self, optim, new_lr, pg_lrscale=None): """ Set Each parameter_groups in optim with new_lr New_lr can be find according to the idx. pg_lrscale tells how to scale each pg. """ # new_lr = self.__getitem__(idx) pg_lrscale = copy.deepcopy(pg_lrscale) for pg in optim.param_groups: if pg_lrscale is None: pg['lr'] = new_lr else: pg['lr'] = new_lr * pg_lrscale.pop(pg['name']) assert (pg_lrscale is None) or (len(pg_lrscale)==0), \ "pg_lrscale doesn't match pg" @register('constant') class constant_scheduler(template_scheduler): def __init__(self, lr, step): super().__init__(step) self.lr = lr def __getitem__(self, idx): if idx >= self.step: raise ValueError return self.lr @register('poly') class poly_scheduler(template_scheduler): def __init__(self, start_lr, end_lr, power, step): super().__init__(step) self.start_lr = start_lr self.end_lr = end_lr self.power = power def __getitem__(self, idx): if idx >= self.step: raise ValueError a, b = self.start_lr, self.end_lr p, n = self.power, self.step return b + (a-b)*((1-idx/n)**p) @register('linear') class linear_scheduler(template_scheduler): def __init__(self, start_lr, end_lr, step): super().__init__(step) self.start_lr = start_lr self.end_lr = end_lr def __getitem__(self, idx): if idx >= self.step: raise ValueError a, b, n = self.start_lr, self.end_lr, self.step return b + (a-b)*(1-idx/n) @register('multistage') class constant_scheduler(template_scheduler): def __init__(self, start_lr, milestones, gamma, step): super().__init__(step) self.start_lr = start_lr m = [0] + milestones + [step] lr_iter = start_lr self.lr = [] for ms, me in zip(m[0:-1], m[1:]): for _ in range(ms, me): self.lr.append(lr_iter) lr_iter *= gamma def __getitem__(self, idx): if idx >= self.step: raise ValueError return self.lr[idx] class compose_scheduler(template_scheduler): def __init__(self, schedulers): self.schedulers = schedulers self.step = [si.step for si in schedulers] self.step_milestone = [] acc = 0 for i in self.step: acc += i self.step_milestone.append(acc) self.step = sum(self.step) def __getitem__(self, idx): if idx >= self.step: raise ValueError ms = self.step_milestone for idx, (mi, mj) in enumerate(zip(ms[:-1], ms[1:])): if mi <= idx < mj: return self.schedulers[idx-mi] raise ValueError #################### # lambda schedular # #################### class LambdaWarmUpCosineScheduler(template_scheduler): """ note: use with a base_lr of 1.0 """ def __init__(self, base_lr, warm_up_steps, lr_min, lr_max, lr_start, max_decay_steps, verbosity_interval=0): cfgt = cfguh().cfg.train bs = cfgt.batch_size if 'gradacc_every' not in cfgt: print('Warning, gradacc_every is not found in xml, use 1 as default.') acc = cfgt.get('gradacc_every', 1) self.lr_multi = base_lr * bs * acc self.lr_warm_up_steps = warm_up_steps self.lr_start = lr_start self.lr_min = lr_min self.lr_max = lr_max self.lr_max_decay_steps = max_decay_steps self.last_lr = 0. self.verbosity_interval = verbosity_interval def schedule(self, n): if self.verbosity_interval > 0: if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_lr}") if n < self.lr_warm_up_steps: lr = (self.lr_max - self.lr_start) / self.lr_warm_up_steps * n + self.lr_start self.last_lr = lr return lr else: t = (n - self.lr_warm_up_steps) / (self.lr_max_decay_steps - self.lr_warm_up_steps) t = min(t, 1.0) lr = self.lr_min + 0.5 * (self.lr_max - self.lr_min) * ( 1 + np.cos(t * np.pi)) self.last_lr = lr return lr def __getitem__(self, idx): return self.schedule(idx) * self.lr_multi class LambdaWarmUpCosineScheduler2(template_scheduler): """ supports repeated iterations, configurable via lists note: use with a base_lr of 1.0. """ def __init__(self, base_lr, warm_up_steps, f_min, f_max, f_start, cycle_lengths, verbosity_interval=0): cfgt = cfguh().cfg.train # bs = cfgt.batch_size # if 'gradacc_every' not in cfgt: # print('Warning, gradacc_every is not found in xml, use 1 as default.') # acc = cfgt.get('gradacc_every', 1) # self.lr_multi = base_lr * bs * acc self.lr_multi = base_lr assert len(warm_up_steps) == len(f_min) == len(f_max) == len(f_start) == len(cycle_lengths) self.lr_warm_up_steps = warm_up_steps self.f_start = f_start self.f_min = f_min self.f_max = f_max self.cycle_lengths = cycle_lengths self.cum_cycles = np.cumsum([0] + list(self.cycle_lengths)) self.last_f = 0. self.verbosity_interval = verbosity_interval def find_in_interval(self, n): interval = 0 for cl in self.cum_cycles[1:]: if n <= cl: return interval interval += 1 def schedule(self, n): cycle = self.find_in_interval(n) n = n - self.cum_cycles[cycle] if self.verbosity_interval > 0: if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_f}, " f"current cycle {cycle}") if n < self.lr_warm_up_steps[cycle]: f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle] self.last_f = f return f else: t = (n - self.lr_warm_up_steps[cycle]) / (self.cycle_lengths[cycle] - self.lr_warm_up_steps[cycle]) t = min(t, 1.0) f = self.f_min[cycle] + 0.5 * (self.f_max[cycle] - self.f_min[cycle]) * ( 1 + np.cos(t * np.pi)) self.last_f = f return f def __getitem__(self, idx): return self.schedule(idx) * self.lr_multi @register('stable_diffusion_linear') class LambdaLinearScheduler(LambdaWarmUpCosineScheduler2): def schedule(self, n): cycle = self.find_in_interval(n) n = n - self.cum_cycles[cycle] if self.verbosity_interval > 0: if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_f}, " f"current cycle {cycle}") if n < self.lr_warm_up_steps[cycle]: f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle] self.last_f = f return f else: f = self.f_min[cycle] + (self.f_max[cycle] - self.f_min[cycle]) * (self.cycle_lengths[cycle] - n) / (self.cycle_lengths[cycle]) self.last_f = f return f
Versatile-Diffusion-master
lib/model_zoo/common/get_scheduler.py
import torch import torch.optim as optim import numpy as np import itertools def singleton(class_): instances = {} def getinstance(*args, **kwargs): if class_ not in instances: instances[class_] = class_(*args, **kwargs) return instances[class_] return getinstance class get_optimizer(object): def __init__(self): self.optimizer = {} self.register(optim.SGD, 'sgd') self.register(optim.Adam, 'adam') self.register(optim.AdamW, 'adamw') def register(self, optim, name): self.optimizer[name] = optim def __call__(self, net, cfg): if cfg is None: return None t = cfg.type if isinstance(net, (torch.nn.DataParallel, torch.nn.parallel.DistributedDataParallel)): netm = net.module else: netm = net pg = getattr(netm, 'parameter_group', None) if pg is not None: params = [] for group_name, module_or_para in pg.items(): if not isinstance(module_or_para, list): module_or_para = [module_or_para] grouped_params = [mi.parameters() if isinstance(mi, torch.nn.Module) else [mi] for mi in module_or_para] grouped_params = itertools.chain(*grouped_params) pg_dict = {'params':grouped_params, 'name':group_name} params.append(pg_dict) else: params = net.parameters() return self.optimizer[t](params, lr=0, **cfg.args)
Versatile-Diffusion-master
lib/model_zoo/common/get_optimizer.py
import torch import torch.nn as nn import torch.nn.functional as F import numpy as np import copy import functools import itertools import matplotlib.pyplot as plt ######## # unit # ######## def singleton(class_): instances = {} def getinstance(*args, **kwargs): if class_ not in instances: instances[class_] = class_(*args, **kwargs) return instances[class_] return getinstance def str2value(v): v = v.strip() try: return int(v) except: pass try: return float(v) except: pass if v in ('True', 'true'): return True elif v in ('False', 'false'): return False else: return v @singleton class get_unit(object): def __init__(self): self.unit = {} self.register('none', None) # general convolution self.register('conv' , nn.Conv2d) self.register('bn' , nn.BatchNorm2d) self.register('relu' , nn.ReLU) self.register('relu6' , nn.ReLU6) self.register('lrelu' , nn.LeakyReLU) self.register('dropout' , nn.Dropout) self.register('dropout2d', nn.Dropout2d) self.register('sine', Sine) self.register('relusine', ReLUSine) def register(self, name, unitf,): self.unit[name] = unitf def __call__(self, name): if name is None: return None i = name.find('(') i = len(name) if i==-1 else i t = name[:i] f = self.unit[t] args = name[i:].strip('()') if len(args) == 0: args = {} return f else: args = args.split('=') args = [[','.join(i.split(',')[:-1]), i.split(',')[-1]] for i in args] args = list(itertools.chain.from_iterable(args)) args = [i.strip() for i in args if len(i)>0] kwargs = {} for k, v in zip(args[::2], args[1::2]): if v[0]=='(' and v[-1]==')': kwargs[k] = tuple([str2value(i) for i in v.strip('()').split(',')]) elif v[0]=='[' and v[-1]==']': kwargs[k] = [str2value(i) for i in v.strip('[]').split(',')] else: kwargs[k] = str2value(v) return functools.partial(f, **kwargs) def register(name): def wrapper(class_): get_unit().register(name, class_) return class_ return wrapper class Sine(object): def __init__(self, freq, gain=1): self.freq = freq self.gain = gain self.repr = 'sine(freq={}, gain={})'.format(freq, gain) def __call__(self, x, gain=1): act_gain = self.gain * gain return torch.sin(self.freq * x) * act_gain def __repr__(self,): return self.repr class ReLUSine(nn.Module): def __init(self): super().__init__() def forward(self, input): a = torch.sin(30 * input) b = nn.ReLU(inplace=False)(input) return a+b @register('lrelu_agc') # class lrelu_agc(nn.Module): class lrelu_agc(object): """ The lrelu layer with alpha, gain and clamp """ def __init__(self, alpha=0.1, gain=1, clamp=None): # super().__init__() self.alpha = alpha if gain == 'sqrt_2': self.gain = np.sqrt(2) else: self.gain = gain self.clamp = clamp self.repr = 'lrelu_agc(alpha={}, gain={}, clamp={})'.format( alpha, gain, clamp) # def forward(self, x, gain=1): def __call__(self, x, gain=1): x = F.leaky_relu(x, negative_slope=self.alpha, inplace=True) act_gain = self.gain * gain act_clamp = self.clamp * gain if self.clamp is not None else None if act_gain != 1: x = x * act_gain if act_clamp is not None: x = x.clamp(-act_clamp, act_clamp) return x def __repr__(self,): return self.repr #################### # spatial encoding # #################### @register('se') class SpatialEncoding(nn.Module): def __init__(self, in_dim, out_dim, sigma = 6, cat_input=True, require_grad=False,): super().__init__() assert out_dim % (2*in_dim) == 0, "dimension must be dividable" n = out_dim // 2 // in_dim m = 2**np.linspace(0, sigma, n) m = np.stack([m] + [np.zeros_like(m)]*(in_dim-1), axis=-1) m = np.concatenate([np.roll(m, i, axis=-1) for i in range(in_dim)], axis=0) self.emb = torch.FloatTensor(m) if require_grad: self.emb = nn.Parameter(self.emb, requires_grad=True) self.in_dim = in_dim self.out_dim = out_dim self.sigma = sigma self.cat_input = cat_input self.require_grad = require_grad def forward(self, x, format='[n x c]'): """ Args: x: [n x m1], m1 usually is 2 Outputs: y: [n x m2] m2 dimention number """ if format == '[bs x c x 2D]': xshape = x.shape x = x.permute(0, 2, 3, 1).contiguous() x = x.view(-1, x.size(-1)) elif format == '[n x c]': pass else: raise ValueError if not self.require_grad: self.emb = self.emb.to(x.device) y = torch.mm(x, self.emb.T) if self.cat_input: z = torch.cat([x, torch.sin(y), torch.cos(y)], dim=-1) else: z = torch.cat([torch.sin(y), torch.cos(y)], dim=-1) if format == '[bs x c x 2D]': z = z.view(xshape[0], xshape[2], xshape[3], -1) z = z.permute(0, 3, 1, 2).contiguous() return z def extra_repr(self): outstr = 'SpatialEncoding (in={}, out={}, sigma={}, cat_input={}, require_grad={})'.format( self.in_dim, self.out_dim, self.sigma, self.cat_input, self.require_grad) return outstr @register('rffe') class RFFEncoding(SpatialEncoding): """ Random Fourier Features """ def __init__(self, in_dim, out_dim, sigma = 6, cat_input=True, require_grad=False,): super().__init__(in_dim, out_dim, sigma, cat_input, require_grad) n = out_dim // 2 m = np.random.normal(0, sigma, size=(n, in_dim)) self.emb = torch.FloatTensor(m) if require_grad: self.emb = nn.Parameter(self.emb, requires_grad=True) def extra_repr(self): outstr = 'RFFEncoding (in={}, out={}, sigma={}, cat_input={}, require_grad={})'.format( self.in_dim, self.out_dim, self.sigma, self.cat_input, self.require_grad) return outstr ########## # helper # ########## def freeze(net): for m in net.modules(): if isinstance(m, ( nn.BatchNorm2d, nn.SyncBatchNorm,)): # inplace_abn not supported m.eval() for pi in net.parameters(): pi.requires_grad = False return net def common_init(m): if isinstance(m, ( nn.Conv2d, nn.ConvTranspose2d,)): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, ( nn.BatchNorm2d, nn.SyncBatchNorm,)): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) else: pass def init_module(module): """ Args: module: [nn.module] list or nn.module a list of module to be initialized. """ if isinstance(module, (list, tuple)): module = list(module) else: module = [module] for mi in module: for mii in mi.modules(): common_init(mii) def get_total_param(net): if getattr(net, 'parameters', None) is None: return 0 return sum(p.numel() for p in net.parameters()) def get_total_param_sum(net): if getattr(net, 'parameters', None) is None: return 0 with torch.no_grad(): s = sum(p.cpu().detach().numpy().sum().item() for p in net.parameters()) return s
Versatile-Diffusion-master
lib/model_zoo/common/utils.py
from email.policy import strict import torch import torchvision.models import os.path as osp import copy from ...log_service import print_log from .utils import \ get_total_param, get_total_param_sum, \ get_unit def singleton(class_): instances = {} def getinstance(*args, **kwargs): if class_ not in instances: instances[class_] = class_(*args, **kwargs) return instances[class_] return getinstance def preprocess_model_args(args): # If args has layer_units, get the corresponding # units. # If args get backbone, get the backbone model. args = copy.deepcopy(args) if 'layer_units' in args: layer_units = [ get_unit()(i) for i in args.layer_units ] args.layer_units = layer_units if 'backbone' in args: args.backbone = get_model()(args.backbone) return args @singleton class get_model(object): def __init__(self): self.model = {} def register(self, model, name): self.model[name] = model def __call__(self, cfg, verbose=True): """ Construct model based on the config. """ t = cfg.type # the register is in each file if t.find('ldm')==0: from .. import ldm elif t=='autoencoderkl': from .. import autokl elif (t.find('clip')==0) or (t.find('openclip')==0): from .. import clip elif t.find('vd')==0: from .. import vd elif t.find('openai_unet')==0: from .. import openaimodel elif t.find('optimus')==0: from .. import optimus args = preprocess_model_args(cfg.args) net = self.model[t](**args) map_location = cfg.get('map_location', 'cpu') strict_sd = cfg.get('strict_sd', True) if 'ckpt' in cfg: checkpoint = torch.load(cfg.ckpt, map_location=map_location) net.load_state_dict(checkpoint['state_dict'], strict=strict_sd) if verbose: print_log('Load ckpt from {}'.format(cfg.ckpt)) elif 'pth' in cfg: sd = torch.load(cfg.pth, map_location=map_location) net.load_state_dict(sd, strict=strict_sd) if verbose: print_log('Load pth from {}'.format(cfg.pth)) elif 'hfm' in cfg: from huggingface_hub import hf_hub_download temppath = hf_hub_download(cfg.hfm[0], cfg.hfm[1]) sd = torch.load(temppath, map_location='cpu') strict_sd = cfg.get('strict_sd', True) net.load_state_dict(sd, strict=strict_sd) if verbose: print_log('Load hfm from {}/{}'.format(*cfg.hfm)) # display param_num & param_sum if verbose: print_log( 'Load {} with total {} parameters,' '{:.3f} parameter sum.'.format( t, get_total_param(net), get_total_param_sum(net) )) return net def register(name): def wrapper(class_): get_model().register(class_, name) return class_ return wrapper
Versatile-Diffusion-master
lib/model_zoo/common/get_model.py
from setuptools import setup, find_packages setup( name = 'retro-pytorch', packages = find_packages(exclude=[]), version = '0.3.8', license='MIT', description = 'RETRO - Retrieval Enhanced Transformer - Pytorch', long_description_content_type = 'text/markdown', author = 'Phil Wang', author_email = '[email protected]', url = 'https://github.com/lucidrains/RETRO-pytorch', keywords = [ 'artificial intelligence', 'deep learning', 'transformers', 'attention-mechanism', 'retrieval', ], install_requires=[ 'autofaiss', 'einops>=0.3', 'numpy', 'sentencepiece', 'torch>=1.6', 'tqdm' ], classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Topic :: Scientific/Engineering :: Artificial Intelligence', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.6', ], )
RETRO-pytorch-main
setup.py
from functools import partial import torch import torch.nn.functional as F from torch import nn, einsum from retro_pytorch.retrieval import BERT_VOCAB_SIZE from einops import rearrange, repeat # constants MIN_DIM_HEAD = 32 # helper functions def exists(val): return val is not None def default(val, d): return val if exists(val) else d def divisible_by(val, divisor): return (val / divisor).is_integer() def cast_tuple(val, num = 1): return val if isinstance(val, tuple) else ((val,) * num) # deepnet init def deepnorm_init(transformer, beta, module_name_match_list = ['.ff.', '.to_v', '.to_out']): for name, module in transformer.named_modules(): if type(module) != nn.Linear: continue needs_beta_gain = any(map(lambda substr: substr in name, module_name_match_list)) gain = beta if needs_beta_gain else 1 nn.init.xavier_normal_(module.weight.data, gain = gain) if exists(module.bias): nn.init.constant_(module.bias.data, 0) # normalization class RMSNorm(nn.Module): def __init__( self, dim, *, eps = 1e-8, gated = False ): super().__init__() self.eps = eps self.scale = dim ** -0.5 self.gamma = nn.Parameter(torch.ones(dim)) self.weight = nn.Parameter(torch.ones(dim)) if gated else None def forward(self, x): norm = x.norm(keepdim = True, dim = -1) * self.scale out = (x / norm.clamp(min = self.eps)) * self.gamma if not exists(self.weight): return out return out * (x * self.weight).sigmoid() # pre and post norm residual wrapper modules class PreNorm(nn.Module): def __init__(self, dim, fn, norm_klass = RMSNorm): super().__init__() self.fn = fn self.norm = norm_klass(dim) def forward(self, x, *args, **kwargs): return self.fn(self.norm(x), *args, **kwargs) + x class PostNorm(nn.Module): def __init__(self, dim, fn, scale_residual = 1, norm_klass = RMSNorm): super().__init__() self.fn = fn self.scale_residual = scale_residual self.norm = norm_klass(dim) def forward(self, x, *args, **kwargs): residual = x * self.scale_residual out = self.fn(x, *args, **kwargs) + residual return self.norm(out) # positional embedding class RotaryEmbedding(nn.Module): def __init__(self, dim): super().__init__() inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim)) self.register_buffer('inv_freq', inv_freq) def forward(self, max_seq_len, *, device, offset = 0): seq = torch.arange(max_seq_len, device = device) + offset freqs = einsum('i , j -> i j', seq.type_as(self.inv_freq), self.inv_freq) emb = torch.cat((freqs, freqs), dim = -1) return rearrange(emb, 'n d -> 1 1 n d') def rotate_half(x): x = rearrange(x, '... (j d) -> ... j d', j = 2) x1, x2 = x.unbind(dim = -2) return torch.cat((-x2, x1), dim = -1) def apply_rotary_pos_emb(t, freqs): seq_len, rot_dim = t.shape[-2], freqs.shape[-1] t, t_pass = t[..., :rot_dim], t[..., rot_dim:] t = (t * freqs.cos()) + (rotate_half(t) * freqs.sin()) return torch.cat((t, t_pass), dim = -1) # feedforward class FeedForward(nn.Module): def __init__(self, dim, mult = 4, dropout = 0.): super().__init__() inner_dim = int(mult * dim) self.ff = nn.Sequential( nn.Linear(dim, inner_dim), nn.GELU(), nn.Dropout(dropout), nn.Linear(inner_dim, dim) ) def forward(self, x): return self.ff(x) # attention class Attention(nn.Module): def __init__( self, dim, *, context_dim = None, dim_head = 64, heads = 8, causal = False, dropout = 0., null_kv = False ): super().__init__() context_dim = default(context_dim, dim) self.heads = heads self.scale = dim_head ** -0.5 self.causal = causal inner_dim = dim_head * heads self.dropout = nn.Dropout(dropout) self.to_q = nn.Linear(dim, inner_dim, bias = False) self.to_k = nn.Linear(context_dim, inner_dim, bias = False) self.to_v = nn.Linear(context_dim, inner_dim, bias = False) self.to_out = nn.Linear(inner_dim, dim) # allowing for attending to nothing (null function) # and to save attention from breaking if all retrieved chunks are padded out self.null_k = nn.Parameter(torch.randn(inner_dim)) if null_kv else None self.null_v = nn.Parameter(torch.randn(inner_dim)) if null_kv else None def forward(self, x, mask = None, context = None, pos_emb = None): b, device, h, scale = x.shape[0], x.device, self.heads, self.scale kv_input = default(context, x) q, k, v = self.to_q(x), self.to_k(kv_input), self.to_v(kv_input) # split heads q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v)) # scale q = q * scale # apply relative positional encoding (rotary embeddings) if exists(pos_emb): q_pos_emb, k_pos_emb = cast_tuple(pos_emb, num = 2) q = apply_rotary_pos_emb(q, q_pos_emb) k = apply_rotary_pos_emb(k, k_pos_emb) # add null key / values if exists(self.null_k): nk, nv = self.null_k, self.null_v nk, nv = map(lambda t: repeat(t, '(h d) -> b h 1 d', b = b, h = h), (nk, nv)) k = torch.cat((nk, k), dim = -2) v = torch.cat((nv, v), dim = -2) # derive query key similarities sim = einsum('b h i d, b h j d -> b h i j', q, k) # masking mask_value = -torch.finfo(sim.dtype).max if exists(mask): if exists(self.null_k): mask = F.pad(mask, (1, 0), value = True) mask = rearrange(mask, 'b j -> b 1 1 j') sim = sim.masked_fill(~mask, mask_value) if self.causal: i, j = sim.shape[-2:] causal_mask = torch.ones(i, j, device = device, dtype = torch.bool).triu(j - i + 1) sim = sim.masked_fill(causal_mask, mask_value) # attention attn = sim.softmax(dim = -1) attn = self.dropout(attn) # aggregate out = einsum('b h i j, b h j d -> b h i d', attn, v) # merge heads out = rearrange(out, 'b h n d -> b n (h d)') # combine heads linear out return self.to_out(out) class ChunkedCrossAttention(nn.Module): def __init__( self, chunk_size, **kwargs ): super().__init__() self.chunk_size = chunk_size self.cross_attn = Attention(null_kv = True, **kwargs) def forward(self, x, *, context_mask = None, context, pos_emb = None): # derive variables chunk_size = self.chunk_size b, n, num_chunks, num_retrieved = x.shape[0], x.shape[-2], *context.shape[-4:-2] # if sequence length less than chunk size, do an early return if n < self.chunk_size: return torch.zeros_like(x) # causal padding causal_padding = chunk_size - 1 x = F.pad(x, (0, 0, -causal_padding, causal_padding), value = 0.) # remove sequence which is ahead of the neighbors retrieved (during inference) seq_index = (n // chunk_size) * chunk_size x, x_remainder = x[:, :seq_index], x[:, seq_index:] seq_remain_len = x_remainder.shape[-2] # take care of rotary positional embedding # make sure queries positions are properly shifted to the future q_pos_emb, k_pos_emb = pos_emb q_pos_emb = F.pad(q_pos_emb, (0, 0, -causal_padding, causal_padding), value = 0.) k_pos_emb = repeat(k_pos_emb, 'b h n d -> b h (r n) d', r = num_retrieved) pos_emb = (q_pos_emb, k_pos_emb) # reshape so we have chunk to chunk attention, without breaking causality x = rearrange(x, 'b (k n) d -> (b k) n d', k = num_chunks) context = rearrange(context, 'b k r n d -> (b k) (r n) d') if exists(context_mask): context_mask = rearrange(context_mask, 'b k r n -> (b k) (r n)') # cross attention out = self.cross_attn(x, context = context, mask = context_mask, pos_emb = pos_emb) # reshape back to original sequence out = rearrange(out, '(b k) n d -> b (k n) d', b = b) # pad back to original, with 0s at the beginning (which will be added to the residual and be fine) out = F.pad(out, (0, 0, causal_padding, -causal_padding + seq_remain_len), value = 0.) return out # encoder and decoder classes class Encoder(nn.Module): def __init__( self, dim, *, depth, context_dim = None, causal = False, heads = 8, dim_head = 64, attn_dropout = 0., ff_mult = 4, ff_dropout = 0., final_norm = True, cross_attn_layers = None, post_norm = False, output_dim = None, norm_klass = RMSNorm, scale_residual = 1. ): super().__init__() self.layers = nn.ModuleList([]) # partial rotary embeddings, which is better than full rotary # Wang and Komatsuzaki et al https://github.com/kingoflolz/mesh-transformer-jax/ rotary_emb_dim = min(dim_head, MIN_DIM_HEAD) self.rotary_pos_emb = RotaryEmbedding(rotary_emb_dim) wrapper = partial(PreNorm, dim, norm_klass = norm_klass) if not post_norm else partial(PostNorm, dim, scale_residual = scale_residual, norm_klass = norm_klass) for layer_num in range(1, depth + 1): has_cross_attn = not exists(cross_attn_layers) or layer_num in cross_attn_layers self.layers.append(nn.ModuleList([ wrapper(Attention(dim = dim, dim_head = dim_head, heads = heads, dropout = attn_dropout, causal = causal)), wrapper(Attention(dim = dim, context_dim = context_dim, dim_head = dim_head, heads = heads, dropout = attn_dropout)) if has_cross_attn else None, wrapper(FeedForward(dim = dim, mult = ff_mult, dropout = ff_dropout)), ])) self.norm_out = norm_klass(dim) if final_norm and not post_norm else nn.Identity() self.project_out = nn.Linear(dim, output_dim) if exists(output_dim) else nn.Identity() def forward(self, x, *, mask = None, chunked_seq): device, chunk_size, seq_len = x.device, x.shape[-2], chunked_seq.shape[-2] q_pos_emb = self.rotary_pos_emb(chunk_size, device = device) k_pos_emb = self.rotary_pos_emb(seq_len, device = device) for attn, cross_attn, ff in self.layers: x = attn(x, mask = mask, pos_emb = q_pos_emb) if exists(cross_attn): x = cross_attn(x, context = chunked_seq, pos_emb = (q_pos_emb, k_pos_emb)) x = ff(x) x = self.norm_out(x) return self.project_out(x) class Decoder(nn.Module): def __init__( self, dim, *, depth, heads = 8, dim_head = 64, attn_dropout = 0., ff_mult = 4, ff_dropout = 0., final_norm = True, cross_attn_layers = None, chunk_size = 64, post_norm = False, norm_klass = RMSNorm, scale_residual = 1. ): super().__init__() self.layers = nn.ModuleList([]) # partial rotary embeddings, which is better than full rotary # Wang and Komatsuzaki et al https://github.com/kingoflolz/mesh-transformer-jax/ rotary_emb_dim = min(dim_head, MIN_DIM_HEAD) self.rotary_pos_emb = RotaryEmbedding(rotary_emb_dim) wrapper = partial(PreNorm, dim, norm_klass = norm_klass) if not post_norm else partial(PostNorm, dim, scale_residual = scale_residual, norm_klass = norm_klass) self.chunk_size = chunk_size for layer_num in range(1, depth + 1): has_cross_attn = not exists(cross_attn_layers) or layer_num in cross_attn_layers self.layers.append(nn.ModuleList([ wrapper(Attention(dim = dim, dim_head = dim_head, heads = heads, dropout = attn_dropout, causal = True)), wrapper(ChunkedCrossAttention(chunk_size = chunk_size, dim = dim, dim_head = dim_head, heads = heads, dropout = attn_dropout)) if has_cross_attn else None, wrapper(FeedForward(dim = dim, mult = ff_mult, dropout = ff_dropout)), ])) self.norm_out = norm_klass(dim) if final_norm and not post_norm else nn.Identity() def forward(self, x, *, encoder = None, encoder_retrieved_mask = None, context_mask = None, retrieved = None): device, seq_len = x.device, x.shape[-2] self_attn_pos_emb = self.rotary_pos_emb(seq_len, device = device) # calculate seq index num_seq_chunks = seq_len // self.chunk_size seq_index = num_seq_chunks * self.chunk_size # rotary positions on the retrieved chunks if exists(retrieved): num_chunks, num_neighbors, chunk_size = retrieved.shape[-4:-1] cross_attn_q_pos_emb = self.rotary_pos_emb(self.chunk_size, device = device, offset = self.chunk_size - 1) # need to add extra chunk size, since it will be shifted cross_attn_k_pos_emb = self.rotary_pos_emb(chunk_size, device = device) cross_attn_pos_emb = (cross_attn_q_pos_emb, cross_attn_k_pos_emb) # keep track of whether retrieved tokens are encoded yet retrieved_encoded = False # go through the decoder layers for attn, cross_attn, ff in self.layers: x = attn(x, pos_emb = self_attn_pos_emb) if exists(cross_attn) and exists(retrieved): if not retrieved_encoded: retrieved = rearrange(retrieved, 'b k r n d -> (b k r) n d') seq_as_context = repeat(x[:, :seq_index], 'b (k n) d -> (b k r) n d', n = self.chunk_size, r = num_neighbors) retrieved = encoder(retrieved, mask = encoder_retrieved_mask, chunked_seq = seq_as_context) retrieved = rearrange(retrieved, '(b k r) n d -> b k r n d', k = num_chunks, r = num_neighbors) retrieved_encoded = True x = cross_attn( x, context = retrieved, context_mask = context_mask, pos_emb = cross_attn_pos_emb ) x = ff(x) return self.norm_out(x) # main class class RETRO(nn.Module): def __init__( self, *, num_tokens = BERT_VOCAB_SIZE, max_seq_len = 2048, enc_dim = 896, enc_depth = 2, enc_cross_attn_layers = None, dec_depth = 12, dec_cross_attn_layers = (1, 3, 6, 9), heads = 8, dec_dim = 768, dim_head = 64, enc_attn_dropout = 0., enc_ff_dropout = 0., dec_attn_dropout = 0., dec_ff_dropout = 0., chunk_size = 64, pad_id = 0, enc_scale_residual = None, dec_scale_residual = None, norm_klass = None, gated_rmsnorm = False, use_deepnet = False ): super().__init__() assert dim_head >= MIN_DIM_HEAD, f'dimension per head must be greater than {MIN_DIM_HEAD}' self.seq_len = max_seq_len self.pad_id = pad_id self.token_emb = nn.Embedding(num_tokens, enc_dim) self.pos_emb = nn.Embedding(max_seq_len, enc_dim) self.chunk_size = chunk_size self.to_decoder_model_dim = nn.Linear(enc_dim, dec_dim) if enc_dim != dec_dim else nn.Identity() # for deepnet, residual scales # follow equation in Figure 2. in https://arxiv.org/abs/2203.00555 norm_klass = default(norm_klass, RMSNorm) if use_deepnet: enc_scale_residual = default(enc_scale_residual, 0.81 * ((enc_depth ** 4) * dec_depth) ** .0625) dec_scale_residual = default(dec_scale_residual, (3 * dec_depth) ** 0.25) norm_klass = nn.LayerNorm # allow for gated rmsnorm if gated_rmsnorm: norm_klass = partial(RMSNorm, gated = True) # define encoder and decoders self.encoder = Encoder( dim = enc_dim, context_dim = dec_dim, depth = enc_depth, attn_dropout = enc_attn_dropout, ff_dropout = enc_ff_dropout, cross_attn_layers = enc_cross_attn_layers, post_norm = use_deepnet, norm_klass = norm_klass, scale_residual = enc_scale_residual, output_dim = dec_dim ) self.decoder = Decoder( dim = dec_dim, depth = dec_depth, attn_dropout = dec_attn_dropout, ff_dropout = dec_ff_dropout, cross_attn_layers = dec_cross_attn_layers, chunk_size = chunk_size, post_norm = use_deepnet, norm_klass = norm_klass, scale_residual = dec_scale_residual ) self.to_logits = nn.Linear(dec_dim, num_tokens) # deepnet has special init of weight matrices if use_deepnet: deepnorm_init(self.encoder, 0.87 * ((enc_depth ** 4) * dec_depth) ** -0.0625) deepnorm_init(self.decoder, (12 * dec_depth) ** -0.25) def forward_without_retrieval( self, seq ): # embed sequence embed = self.token_emb(seq) embed = embed[:, :self.seq_len] # get absolute positional embedding pos_emb = self.pos_emb(torch.arange(embed.shape[1], device = embed.device)) pos_emb = rearrange(pos_emb, 'n d -> 1 n d') embed = embed + pos_emb embed = self.to_decoder_model_dim(embed) embed = self.decoder(embed) # project to logits return self.to_logits(embed) def forward( self, seq, retrieved = None, return_loss = False ): """ b - batch n - sequence length / chunk length k - number of chunks d - feature dimension r - num retrieved neighbors """ if not exists(retrieved): return self.forward_without_retrieval(seq) assert not (return_loss and not self.training), 'must be training if returning loss' # assume padding token id (usually 0.) is to be masked out mask = retrieved != self.pad_id # handle some user inputs if retrieved.ndim == 3: retrieved = rearrange(retrieved, 'b k n -> b k 1 n') # 1 neighbor retrieved # if training, derive labels if return_loss: seq, labels = seq[:, :-1], seq[:, 1:] # variables n, num_chunks, num_neighbors, chunk_size, retrieved_shape, device = seq.shape[-1], *retrieved.shape[-3:], retrieved.shape, seq.device assert chunk_size >= self.chunk_size, 'chunk size of retrieval input must be greater or equal to the designated chunk_size on RETRO initialization' num_seq_chunks = n // self.chunk_size assert num_chunks == num_seq_chunks, f'sequence requires {num_seq_chunks} retrieved chunks, but only {num_chunks} passed in' # sequence index at which k-nearest neighbors have not been fetched yet after seq_index = num_seq_chunks * self.chunk_size # embed both sequence and retrieved chunks embed = self.token_emb(seq) retrieved = self.token_emb(retrieved) # get absolute positional embedding pos_emb = self.pos_emb(torch.arange(n, device = device)) pos_emb = rearrange(pos_emb, 'n d -> 1 n d') embed = embed + pos_emb # handle masks for encoder and decoder, if needed encoder_retrieved_mask = decoder_retrieved_mask = None if exists(mask): assert mask.shape == retrieved_shape, 'retrieval mask must be of the same shape as the retrieval tokens' encoder_retrieved_mask = rearrange(mask, 'b k r n -> (b k r) n') decoder_retrieved_mask = mask # project both sequence embedding and retrieved embedding to decoder dimension if necessary embed = self.to_decoder_model_dim(embed) # decode embed = self.decoder( embed, encoder = self.encoder, context_mask = decoder_retrieved_mask, encoder_retrieved_mask = encoder_retrieved_mask, retrieved = retrieved ) # project to logits logits = self.to_logits(embed) if not return_loss: return logits # cross entropy loss loss = F.cross_entropy(rearrange(logits, 'b n c -> b c n'), labels, ignore_index = self.pad_id) return loss
RETRO-pytorch-main
retro_pytorch/retro_pytorch.py
from retro_pytorch.retro_pytorch import RETRO from retro_pytorch.data import RETRODataset from retro_pytorch.training import TrainingWrapper
RETRO-pytorch-main
retro_pytorch/__init__.py
import os import numpy as np from pathlib import Path from shutil import rmtree from contextlib import contextmanager def is_true_env_flag(env_flag): return os.getenv(env_flag, 'false').lower() in ('true', '1', 't') def reset_folder_(p): path = Path(p) rmtree(path, ignore_errors = True) path.mkdir(exist_ok = True, parents = True) @contextmanager def memmap(*args, **kwargs): pointer = np.memmap(*args, **kwargs) yield pointer del pointer
RETRO-pytorch-main
retro_pytorch/utils.py
from pathlib import Path from math import ceil import torch import torch.nn.functional as F import logging import numpy as np from einops import rearrange import faiss from autofaiss import build_index from retro_pytorch.utils import memmap, reset_folder_ # constants SOS_ID = 101 EOS_ID = 102 BERT_MODEL_DIM = 768 BERT_VOCAB_SIZE = 28996 TMP_PATH = Path('./.tmp') INDEX_FOLDER_PATH = TMP_PATH / '.index' EMBEDDING_TMP_SUBFOLDER = 'embeddings' # helper functions def exists(val): return val is not None def range_chunked(max_value, *, batch_size): counter = 0 while counter < max_value: curr = counter + batch_size curr = min(curr, max_value) yield slice(counter, curr) counter = curr # indexing helper functions def faiss_read_index(path): return faiss.read_index(str(path), faiss.IO_FLAG_MMAP | faiss.IO_FLAG_READ_ONLY) # singleton globals MODEL = None TOKENIZER = None def get_tokenizer(): global TOKENIZER if not exists(TOKENIZER): TOKENIZER = torch.hub.load('huggingface/pytorch-transformers', 'tokenizer', 'bert-base-cased') return TOKENIZER def get_bert(): global MODEL if not exists(MODEL): MODEL = torch.hub.load('huggingface/pytorch-transformers', 'model', 'bert-base-cased') if torch.cuda.is_available(): MODEL = MODEL.cuda() return MODEL # tokenize def tokenize(texts, add_special_tokens = True): if not isinstance(texts, (list, tuple)): texts = [texts] tokenizer = get_tokenizer() encoding = tokenizer.batch_encode_plus( texts, add_special_tokens = add_special_tokens, padding = True, return_tensors = 'pt' ) token_ids = encoding.input_ids return token_ids # text to chunks def doc_text_to_chunks_and_seq_indices( *, doc_text, chunk_size = 64, seq_len = 2048, pad_id = 0 ): assert (seq_len % chunk_size) == 0, 'sequence length must be divisible by chunk size' ids = tokenize(doc_text) ids = rearrange(ids, '1 ... -> ...') text_len = ids.shape[-1] # pad to multiple of chunk size with an extra token padding = chunk_size - ((text_len - 1) % chunk_size) ids = F.pad(ids, (0, padding)) # split out very last token ids, last_token = ids[:-1], ids[-1:] ids = rearrange(ids, '(n c) -> n c', c = chunk_size) # first tokens of chunk [2:] and on will become the last token of chunk [1:] last_token_per_chunk = ids[1:, 0] all_last_tokens = torch.cat((last_token_per_chunk, last_token), dim = 0) all_last_tokens = rearrange(all_last_tokens, 'n -> n 1') # append all last tokens to ids for (num_chunks, chunk_size + 1) chunks_with_extra_token = torch.cat((ids, all_last_tokens), dim = -1) # calculate chunk indices starting at 0, spaced number of chunks of seq len apart total_chunks = ids.shape[0] num_chunks_per_seq = seq_len // chunk_size seq = torch.arange(0, total_chunks, num_chunks_per_seq) return chunks_with_extra_token, seq def text_folder_to_chunks_( *, folder, chunks_memmap_path, seqs_memmap_path, doc_ids_memmap_path, chunk_size = 64, seq_len = 2048, glob = '**/*.txt', max_chunks = 1_000_000, max_seqs = 100_000 ): paths = sorted([*Path(folder).glob(glob)]) total_chunks = 0 total_docs = 0 total_seqs = 0 chunks_shape = (max_chunks, chunk_size + 1) seqs_shape = (max_seqs,) doc_ids_shape = (max_chunks,) with memmap(chunks_memmap_path, shape = chunks_shape, dtype = np.int32, mode = 'w+') as chunks_memmap\ , memmap(seqs_memmap_path, shape = seqs_shape, dtype = np.int32, mode = 'w+') as seqs_memmap\ , memmap(doc_ids_memmap_path, shape = doc_ids_shape, dtype = np.int32, mode = 'w+') as doc_ids_memmap: for path in paths: print(f'processing {path}') chunks, seq = doc_text_to_chunks_and_seq_indices( doc_text = path.read_text(), chunk_size = chunk_size, seq_len = seq_len ) doc_chunk_len = chunks.shape[0] doc_seq_len = seq.shape[0] chunks_memmap[total_chunks:(total_chunks + doc_chunk_len)] = chunks.numpy() seqs_memmap[total_seqs:(total_seqs + doc_seq_len)] = seq.numpy() + total_chunks doc_ids_memmap[total_chunks:(total_chunks + doc_chunk_len)] = np.full((doc_chunk_len,), total_docs) total_chunks += doc_chunk_len total_seqs += doc_seq_len total_docs += 1 return dict( chunks = total_chunks, docs = total_docs, seqs = total_seqs ) # embedding function @torch.no_grad() def bert_embed( token_ids, return_cls_repr = False, eps = 1e-8, pad_id = 0. ): model = get_bert() mask = token_ids != pad_id if torch.cuda.is_available(): token_ids = token_ids.cuda() mask = mask.cuda() outputs = model( input_ids = token_ids, attention_mask = mask, output_hidden_states = True ) hidden_state = outputs.hidden_states[-1] if return_cls_repr: return hidden_state[:, 0] # return [cls] as representation if not exists(mask): return hidden_state.mean(dim = 1) mask = mask[:, 1:] # mean all tokens excluding [cls], accounting for length mask = rearrange(mask, 'b n -> b n 1') numer = (hidden_state[:, 1:] * mask).sum(dim = 1) denom = mask.sum(dim = 1) masked_mean = numer / (denom + eps) return masked_mean # chunks to knn def chunks_to_embeddings_( *, num_chunks, chunks_memmap_path, embeddings_memmap_path, chunk_size = 64, embed_dim = BERT_MODEL_DIM, batch_size = 16, use_cls_repr = False, pad_id = 0. ): chunks_shape = (num_chunks, chunk_size + 1) embed_shape = (num_chunks, embed_dim) with memmap(chunks_memmap_path, shape = chunks_shape, dtype = np.int32) as chunks\ , memmap(embeddings_memmap_path, shape = embed_shape, dtype = np.float32, mode = 'w+') as embeddings: for dim_slice in range_chunked(num_chunks, batch_size = batch_size): batch_chunk_npy = chunks[dim_slice] batch_chunk = torch.from_numpy(batch_chunk_npy) cls_tokens = torch.full((batch_chunk.shape[0], 1), SOS_ID) batch_chunk = torch.cat((cls_tokens, batch_chunk), dim = 1) batch_chunk = batch_chunk[:, :-1] # omit last token, the first token of the next chunk, used for autoregressive training batch_embed = bert_embed( batch_chunk, return_cls_repr = use_cls_repr ) embeddings[dim_slice] = batch_embed.detach().cpu().numpy() print(f'embedded {dim_slice.stop} / {num_chunks}') def memmap_file_to_chunks_( memmap_path, *, folder, shape, dtype, max_rows_per_file = 500 ): rows, _ = shape with memmap(memmap_path, shape = shape, dtype = dtype, mode = 'r') as f: root_path = TMP_PATH / folder reset_folder_(root_path) for ind, dim_slice in enumerate(range_chunked(rows, batch_size = max_rows_per_file)): filename = root_path / f'{ind:05d}.npy' data_slice = f[dim_slice] np.save(str(filename), f[dim_slice]) print(f'saved {str(filename)}') def index_embeddings( embeddings_folder, *, index_file = 'knn.index', index_infos_file = 'index_infos.json', max_index_memory_usage = '100m', current_memory_available = '1G' ): embeddings_path = TMP_PATH / embeddings_folder index_path = INDEX_FOLDER_PATH / index_file reset_folder_(INDEX_FOLDER_PATH) build_index( embeddings = str(embeddings_path), index_path = str(index_path), index_infos_path = str(INDEX_FOLDER_PATH / index_infos_file), metric_type = "l2", max_index_memory_usage = max_index_memory_usage, current_memory_available = current_memory_available, make_direct_map = True, should_be_memory_mappable = False, use_gpu = torch.cuda.is_available(), ) index = faiss_read_index(index_path) return index def chunks_to_index_and_embed( *, num_chunks, chunk_size, chunk_memmap_path, use_cls_repr = False, max_rows_per_file = 500, chunks_to_embeddings_batch_size = 16, embed_dim = BERT_MODEL_DIM, index_file = 'knn.index', **index_kwargs ): embedding_path = f'{chunk_memmap_path}.embedded' embed_shape = (num_chunks, embed_dim) chunks_to_embeddings_( num_chunks = num_chunks, chunk_size = chunk_size, chunks_memmap_path = chunk_memmap_path, embeddings_memmap_path = embedding_path, use_cls_repr = use_cls_repr, batch_size = chunks_to_embeddings_batch_size, embed_dim = embed_dim ) memmap_file_to_chunks_( embedding_path, shape = embed_shape, dtype = np.float32, folder = EMBEDDING_TMP_SUBFOLDER, max_rows_per_file = max_rows_per_file ) index = index_embeddings( embeddings_folder = EMBEDDING_TMP_SUBFOLDER, index_file = index_file, **index_kwargs ) embeddings = np.memmap(embedding_path, shape = embed_shape, dtype = np.float32, mode = 'r') return index, embeddings def chunks_to_precalculated_knn_( *, num_nearest_neighbors, num_chunks, chunk_size, chunk_memmap_path, doc_ids_memmap_path, use_cls_repr = False, max_rows_per_file = 500, chunks_to_embeddings_batch_size = 16, embed_dim = BERT_MODEL_DIM, num_extra_neighbors = 10, force_reprocess = False, index_file = 'knn.index', **index_kwargs ): chunk_path = Path(chunk_memmap_path) knn_path = chunk_path.parents[0] / f'{chunk_path.stem}.knn{chunk_path.suffix}' index_path = INDEX_FOLDER_PATH / index_file # early return knn path and faiss index # unless if force_reprocess is True if index_path.exists() and knn_path.exists() and not force_reprocess: print(f'preprocessed knn found at {str(knn_path)}, faiss index reconstituted from {str(index_path)}') index = faiss_read_index(index_path) return knn_path, index # fetch the faiss index and calculated embeddings for the chunks index, embeddings = chunks_to_index_and_embed( num_chunks = num_chunks, chunk_size = chunk_size, chunk_memmap_path = chunk_memmap_path, index_file = index_file, **index_kwargs ) total_neighbors_to_fetch = num_extra_neighbors + num_nearest_neighbors + 1 with memmap(knn_path, shape = (num_chunks, num_nearest_neighbors), dtype = np.int32, mode = 'w+') as knns\ , memmap(doc_ids_memmap_path, shape = (num_chunks,), dtype = np.int32, mode = 'r') as doc_ids: for dim_slice in range_chunked(num_chunks, batch_size = max_rows_per_file): query_vector = embeddings[dim_slice] distances, indices = index.search(query_vector, k = total_neighbors_to_fetch) # remove self from distances and indices distances = distances[:, 1:] indices = indices[:, 1:] # mask out any neighbors that belong to the same document to -1 query_doc_ids = doc_ids[dim_slice] neighbor_doc_ids = doc_ids[indices] neighbor_from_same_doc = query_doc_ids[..., None] == neighbor_doc_ids indices = np.where(neighbor_from_same_doc, -1, indices) distances = np.where(neighbor_from_same_doc, 1e3, distances) # re-sort indices by updated distances indices = np.take_along_axis(indices, np.argsort(distances, axis = 1), axis = 1) # store nearest neighbors to knn memmap knns[dim_slice] = indices[:, :num_nearest_neighbors] print(f'knns calculated for {dim_slice.stop} / {num_chunks}') print(f'knn saved to {knn_path}') return knn_path, index
RETRO-pytorch-main
retro_pytorch/retrieval.py
from torch.optim import AdamW def separate_weight_decayable_params(params): no_wd_params = set([param for param in params if param.ndim < 2]) wd_params = set(params) - no_wd_params return wd_params, no_wd_params def get_optimizer(params, lr = 3e-4, wd = 1e-1, filter_by_requires_grad = False): if filter_by_requires_grad: params = list(filter(lambda t: t.requires_grad, params)) params = set(params) wd_params, no_wd_params = separate_weight_decayable_params(params) param_groups = [ {'params': list(wd_params)}, {'params': list(no_wd_params), 'weight_decay': 0}, ] return AdamW(param_groups, lr = lr, weight_decay = wd)
RETRO-pytorch-main
retro_pytorch/optimizer.py
import numpy as np from functools import partial import json from pathlib import Path import torch from torch import nn import torch.nn.functional as F from torch.utils.data import DataLoader from retro_pytorch import RETRO, RETRODataset from retro_pytorch.data import knn_to_retrieved_chunks from retro_pytorch.optimizer import get_optimizer from retro_pytorch.retrieval import text_folder_to_chunks_, chunks_to_precalculated_knn_, bert_embed, SOS_ID, EOS_ID from retro_pytorch.utils import memmap, is_true_env_flag from einops import rearrange # helpers def exists(val): return val is not None def eval_decorator(fn): def inner(model, *args, **kwargs): was_training = model.training model.eval() out = fn(model, *args, **kwargs) model.train(was_training) return out return inner def safe_cat(accum, t, dim = -1): if not exists(accum): return t return torch.cat((accum, t), dim = dim) # sampling helpers def log(t, eps = 1e-20): return torch.log(t.clamp(min = eps)) def gumbel_noise(t): noise = torch.zeros_like(t).uniform_(0, 1) return -log(-log(noise)) def gumbel_sample(t, temperature = 1., dim = -1): return ((t / temperature) + gumbel_noise(t)).argmax(dim = dim) def top_k(logits, thres = 0.9): num_logits = logits.shape[-1] k = max(int((1 - thres) * num_logits), 1) val, ind = torch.topk(logits, k) probs = torch.full_like(logits, float('-inf')) probs.scatter_(1, ind, val) return probs def top_p(logits, thres = 0.9): sorted_logits, sorted_indices = torch.sort(logits, descending=True) cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) sorted_indices_to_remove = cum_probs > (1 - thres) sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone() sorted_indices_to_remove[:, 0] = 0 sorted_logits[sorted_indices_to_remove] = float('-inf') return sorted_logits.scatter(1, sorted_indices, sorted_logits) # function that returns knn chunks from seq chunks # # 1. adds sos and eos to seq chunks # 2. embeds the seq chunks with special tokens with frozen BERT # 3. fetches the knn indices with faiss # 4. gets the knn chunks as well as the continuation from a reference to the chunks data (memmap) # def knn_chunks_from_seq_chunks( seq_chunks, *, knn, faiss_index, num_chunks, chunk_size, chunks_memmap_path, ): b, device = seq_chunks.shape[0], seq_chunks.device # prepare last chunk with sos and eos tokens for BERT embed ones = torch.ones((b, 1), dtype = torch.bool, device = device) sos = ones * SOS_ID eos = ones * EOS_ID seq_chunks = torch.cat((sos, seq_chunks, eos), dim = 1) # embed with frozen BERT embeds = bert_embed(seq_chunks.cpu()) # fetch embeds on CPU for now # retrieval of knn with faiss _, knn_indices = faiss_index.search(embeds.cpu().numpy(), k = knn) # numpy to torch with memmap(chunks_memmap_path, dtype = np.int32, shape = (num_chunks + 1, chunk_size + 1)) as chunk_memmap: knn_chunks = knn_to_retrieved_chunks( knn_indices, chunk_memmap, add_continuations = True, num_chunks = num_chunks ) knn_chunks_torch = torch.from_numpy(knn_chunks).to(device) return knn_chunks_torch # training wrapper class class TrainingWrapper(nn.Module): def __init__( self, *, retro, chunk_size, documents_path, knn, glob = '**/*.txt', chunks_memmap_path = './train.chunks.dat', seqs_memmap_path = './train.seq.dat', doc_ids_memmap_path = './train.doc_ids.dat', max_chunks = 1_000_000, max_seqs = 100_000, knn_extra_neighbors = 100, processed_stats_json_path = './processed-stats.json', faiss_index_filename = 'knn.index', **index_kwargs ): super().__init__() assert isinstance(retro, RETRO), 'retro must be instance of RETRO' self.retro = retro force_reprocess = is_true_env_flag('REPROCESS') # store the processed training data statistics # number of chunks, number of sequences stats_path = Path(processed_stats_json_path) # if the statistics file does not exist, process folders of text # force reprocess by setting REPROCESS=1 when running training script if not stats_path.exists() or force_reprocess: self.stats = text_folder_to_chunks_( folder = documents_path, glob = glob, chunks_memmap_path = chunks_memmap_path, seqs_memmap_path = seqs_memmap_path, doc_ids_memmap_path = doc_ids_memmap_path, chunk_size = chunk_size, seq_len = retro.seq_len, max_chunks = max_chunks, max_seqs = max_seqs ) with open(processed_stats_json_path, 'w') as f: json.dump(self.stats, f) else: print(f'found to be previously processed at {str(stats_path)}') self.stats = json.loads(stats_path.read_text()) # get number of chunks and number of sequences num_chunks = self.stats['chunks'] num_seqs = self.stats['seqs'] # calculate knn memmap path and get the faiss index # todo - make sure if faiss_index_filename is found, do not reprocess unless flag is given knn_memmap_path, faiss_index = chunks_to_precalculated_knn_( num_chunks = num_chunks, chunk_size = chunk_size, chunk_memmap_path = chunks_memmap_path, doc_ids_memmap_path = doc_ids_memmap_path, num_nearest_neighbors = knn, num_extra_neighbors = knn_extra_neighbors, index_file = faiss_index_filename, force_reprocess = force_reprocess, **index_kwargs ) # retro dataset self.ds = RETRODataset( num_sequences = num_seqs, num_chunks = num_chunks, num_neighbors = knn, chunk_size = chunk_size, seq_len = retro.seq_len, chunk_memmap_path = chunks_memmap_path, chunk_nn_memmap_path = knn_memmap_path, seq_memmap_path = seqs_memmap_path ) # params needed for generation self.chunk_size = chunk_size self.max_seq_len = self.retro.seq_len self.fetch_knn_chunks_fn = partial( knn_chunks_from_seq_chunks, knn = knn, chunk_size = chunk_size, num_chunks = num_chunks, chunks_memmap_path = chunks_memmap_path, faiss_index = faiss_index ) @torch.no_grad() @eval_decorator def generate( self, start = None, retrieved = None, filter_fn = top_k, filter_thres = 0.9, temperature = 1.0, ): assert filter_fn in {top_k, top_p}, 'filter function must be either top-k or nucleus' device = next(self.retro.parameters()).device # if not prime tokens given, assume sampling from SOS token with batch size of 1 if not exists(start): start = torch.full((1, 1), SOS_ID, device = device).long() b, start_seq_len = start.shape # move onto same device as RETRO start = start.to(device) # prepare retrieval related variables if start_seq_len >= self.chunk_size: seq_index = (start_seq_len // self.chunk_size) * self.chunk_size past_seq_chunks = rearrange(start[:, :seq_index], 'b (n c) -> (b n) c', c = self.chunk_size) retrieved = self.fetch_knn_chunks_fn(past_seq_chunks) retrieved = rearrange(retrieved, '(b n) k c -> b n k c', b = b) # get starting sequence index out = start # sampling loop for i in range(start_seq_len - 1, self.max_seq_len): logits = self.retro(out, retrieved = retrieved) logits = logits[:, i] logits = filter_fn(logits, thres = filter_thres) sampled = gumbel_sample(logits, temperature = temperature, dim = -1) sampled = rearrange(sampled, 'b -> b 1') out = torch.cat((out, sampled), dim = 1) # early terminate if all EOS is_eos_tokens = (out == EOS_ID) if is_eos_tokens.any(dim = -1).all(): # mask out everything after the eos tokens shifted_is_eos_tokens = F.pad(is_eos_tokens, (1, -1)) mask = shifted_is_eos_tokens.float().cumsum(dim = -1) >= 1 out = out.masked_fill(mask, self.retro.pad_id) break # when the sequence length is a multiple of the chunk size # retrieve the next set of knns curr_seq_len = out.shape[-1] if (curr_seq_len % self.chunk_size) == 0: last_chunk = rearrange(out, 'b (c n) -> b c n', n = self.chunk_size)[:, -1] knn_chunks = self.fetch_knn_chunks_fn(last_chunk) # concat retrieved knn chunks to all retrieved # to be sent to Retro for chunked cross attention at the next iteration knn_chunks = rearrange(knn_chunks, 'b k r -> b 1 k r') retrieved = safe_cat(retrieved, knn_chunks, dim = 1) print(f'retrieved at {curr_seq_len} / {self.max_seq_len}') return out def get_dataloader(self, **kwargs): return DataLoader(self.ds, **kwargs) def get_optimizer(self, **kwargs): return get_optimizer(self.retro.parameters(), **kwargs) def forward(self): raise NotImplemented
RETRO-pytorch-main
retro_pytorch/training.py
from functools import partial import numpy as np import torch from torch.utils.data import Dataset from retro_pytorch.retrieval import EOS_ID from retro_pytorch.utils import memmap # knn to retrieved chunks def knn_to_retrieved_chunks( knns, chunks_memmap, *, add_continuations, num_chunks, pad_id = 0, eos_id = EOS_ID, ): # derive mask for no neighbors found (-1) no_neighbor_mask = knns == -1 knns = np.maximum(knns, 0) # get neighbor and continuation chunks knn_chunks = chunks_memmap[knns] is_last_document_chunk = np.any(knn_chunks == eos_id, axis = -1, keepdims = True) # use presence of [EOS] in chunk as way to detect document boundaries # [EOS] in BERT tokenizer is 102 retrieved = knn_chunks[..., :-1] if add_continuations: continuation_indices = np.clip(knns + 1, 0, num_chunks - 1) # chunks are stored contiguously continuation_chunks = chunks_memmap[continuation_indices][..., :-1] continuation_chunks *= ~is_last_document_chunk # combine neighbors with continuations retrieved = np.concatenate((retrieved, continuation_chunks), axis = -1) # mask out any nearest neighbor chunks that was -1 (not found at index time) to padding id retrieved = np.where(~no_neighbor_mask[..., None], retrieved, pad_id) return retrieved # dataset class RETRODataset(Dataset): def __init__( self, *, num_chunks, chunk_size, seq_len, num_sequences, num_neighbors, chunk_memmap_path, chunk_nn_memmap_path, seq_memmap_path, eos_id = EOS_ID, pad_id = 0., add_continuations = True ): super().__init__() self.num_chunks = num_chunks self.num_sequences = num_sequences self.seq_num_chunks = seq_len // chunk_size self.eos_id = eos_id self.pad_id = pad_id num_chunks_with_padding = num_chunks + self.seq_num_chunks chunks_shape = (num_chunks_with_padding, chunk_size + 1) knn_shape = (num_chunks_with_padding, num_neighbors) self.add_continuations = add_continuations self.get_chunks = partial(memmap, chunk_memmap_path, dtype = np.int32, shape = chunks_shape) self.get_knns = partial(memmap, chunk_nn_memmap_path, dtype = np.int32, shape = knn_shape) self.get_seqs = partial(memmap, seq_memmap_path, dtype = np.int32, shape = (num_sequences,)) def __len__(self): return self.num_sequences def __getitem__(self, ind): with self.get_chunks() as chunks_memmap, self.get_knns() as knns_memmap, self.get_seqs() as seqs_memmap: begin_chunk_index = seqs_memmap[ind] chunk_range = slice(begin_chunk_index, (begin_chunk_index + self.seq_num_chunks)) chunks = chunks_memmap[chunk_range] # excise the last token, except for last token of last chunk seq_tokens = np.concatenate((chunks[:, :-1].flatten(), chunks[-1, -1:])) # mask out (with padding tokens) any token following an <eos> | disallow having more than 1 document in a sequence, as it would break RETRO's CCA seq_mask = np.cumsum(seq_tokens == self.eos_id, axis = 0) seq_mask = np.pad(seq_mask, (1, 0))[:-1] == 0. seq_tokens = np.where(seq_mask, seq_tokens, 0.) # derive retrieved tokens knns = knns_memmap[chunk_range] retrieved = knn_to_retrieved_chunks( knns, chunks_memmap, add_continuations = self.add_continuations, eos_id = self.eos_id, num_chunks = self.num_chunks ) seq_tokens_torch = torch.from_numpy(seq_tokens).long() retrieved_torch = torch.from_numpy(retrieved).long() return seq_tokens_torch, retrieved_torch
RETRO-pytorch-main
retro_pytorch/data.py
# Copyright (c) 2022 Microsoft # Licensed under The MIT License [see LICENSE for details] from io import open from setuptools import find_packages, setup setup( name="torchscale", version="0.2.0", author="TorchScale Team", author_email="[email protected]", description="Transformers at any scale", long_description=open("README.md", "r", encoding="utf-8").read(), long_description_content_type="text/markdown", keywords="Transformers at any scale", license="MIT", url="https://github.com/microsoft/torchscale", packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]), install_requires=["torch>=1.8", "fairscale==0.4.0", "timm==0.4.12"], python_requires=">=3.8.0", classifiers=[ "Programming Language :: Python :: 3", ], )
torchscale-flash-master
setup.py
# Copyright (c) 2022 Microsoft # Licensed under The MIT License [see LICENSE for details]
torchscale-flash-master
torchscale/__init__.py
# Copyright (c) 2022 Microsoft # Licensed under The MIT License [see LICENSE for details] import numpy as np import torch import torch.nn as nn def fixed_pos_embedding(x): seq_len, dim = x.shape inv_freq = 1.0 / (10000 ** (torch.arange(0, dim) / dim)) sinusoid_inp = ( torch.einsum("i , j -> i j", torch.arange(0, seq_len, dtype=torch.float), inv_freq).to(x) ) return torch.sin(sinusoid_inp), torch.cos(sinusoid_inp) def rotate_every_two(x): x1 = x[:, :, ::2] x2 = x[:, :, 1::2] x = torch.stack((-x2, x1), dim=-1) return x.flatten(-2) # in einsum notation: rearrange(x, '... d j -> ... (d j)')\ def duplicate_interleave(m): """ A simple version of `torch.repeat_interleave` for duplicating a matrix while interleaving the copy. """ dim0 = m.shape[0] m = m.view(-1, 1) # flatten the matrix m = m.repeat(1, 2) # repeat all elements into the 2nd dimension m = m.view(dim0, -1) # reshape into a matrix, interleaving the copy return m def apply_rotary_pos_emb(x, sin, cos, scale=1): sin, cos = map(lambda t: duplicate_interleave(t * scale), (sin, cos)) # einsum notation for lambda t: repeat(t[offset:x.shape[1]+offset,:], "n d -> () n () (d j)", j=2) return (x * cos) + (rotate_every_two(x) * sin) class XPOS(nn.Module): def __init__( self, head_dim, scale_base=512 ): super().__init__() self.head_dim = head_dim self.scale_base = scale_base self.register_buffer( "scale", (torch.arange(0, head_dim, 2) + 0.4 * head_dim) / (1.4 * head_dim) ) def forward(self, x, offset=0, downscale=False): length = x.shape[1] min_pos = -(length + offset) // 2 max_pos = length + offset + min_pos scale = self.scale ** torch.arange(min_pos, max_pos, 1).to(self.scale).div(self.scale_base)[:, None] sin, cos = fixed_pos_embedding(scale) if scale.shape[0] > length: scale = scale[-length:] sin = sin[-length:] cos = cos[-length:] if downscale: scale = 1 / scale x = apply_rotary_pos_emb(x, sin, cos, scale) return x
torchscale-flash-master
torchscale/component/xpos_relative_position.py
# Copyright (c) 2022 Microsoft # Licensed under The MIT License [see LICENSE for details] import copy import torch import torch.nn as nn def MultiwayWrapper(args, module, dim=1): if args.multiway: return MultiwayNetwork(module, dim=dim) return module def set_split_position(position): def apply_fn(module): if hasattr(module, "split_position"): module.split_position = position return apply_fn class MultiwayNetwork(nn.Module): def __init__(self, module, dim=1): super().__init__() self.dim = dim self.A = module self.B = copy.deepcopy(module) self.B.reset_parameters() self.split_position = -1 def forward(self, x, **kwargs): if self.split_position == -1: return self.A(x, **kwargs) if self.split_position == 0: return self.B(x, **kwargs) x1, x2 = torch.split( x, [self.split_position, x.size(self.dim) - self.split_position], dim=self.dim, ) # x1, x2 = x[:self.split_position], x[self.split_position:] y1, y2 = self.A(x1, **kwargs), self.B(x2, **kwargs) return torch.cat([y1, y2], dim=self.dim) class MutliwayEmbedding(MultiwayNetwork): def __init__(self, modules, dim=1): super(MultiwayNetwork, self).__init__() self.dim = dim assert len(modules) == 2 self.A = modules[0] self.B = modules[1] self.split_position = -1
torchscale-flash-master
torchscale/component/multiway_network.py
""" # Copyright (c) 2022 Microsoft # Licensed under The MIT License [see LICENSE for details] import math import torch import torch.nn.functional as F from torch import nn try: from apex.normalization import FusedLayerNorm as LayerNorm except ModuleNotFoundError: from torch.nn import LayerNorm from .multiway_network import MultiwayWrapper from .xpos_relative_position import XPOS from einops import rearrange class MultiheadAttention(nn.Module): def __init__( self, args, embed_dim, num_heads, dropout=0.0, self_attention=False, encoder_decoder_attention=False, subln=False, casual=False ): super().__init__() self.args = args self.embed_dim = embed_dim self.num_heads = num_heads self.head_dim = embed_dim // num_heads self.scaling = self.head_dim**-0.5 self.self_attention = self_attention self.encoder_decoder_attention = encoder_decoder_attention assert self.self_attention ^ self.encoder_decoder_attention self.k_proj = MultiwayWrapper(args, nn.Linear(embed_dim, embed_dim, bias=True)) self.v_proj = MultiwayWrapper(args, nn.Linear(embed_dim, embed_dim, bias=True)) self.q_proj = MultiwayWrapper(args, nn.Linear(embed_dim, embed_dim, bias=True)) self.out_proj = MultiwayWrapper( args, nn.Linear(embed_dim, embed_dim, bias=True) ) self.inner_attn_ln = ( MultiwayWrapper(args, LayerNorm(self.embed_dim, eps=args.layernorm_eps)) if subln and self.self_attention else None ) self.dropout_module = torch.nn.Dropout(dropout) self.xpos = ( XPOS(self.head_dim, args.xpos_scale_base) if args.xpos_rel_pos and self.self_attention else None ) self.casual = casual self.flash_config = args.flash_config def flash_scaled_dot_product_attention(self, q, k, v, attn_mask=None): _, heads, q_len, _, k_len, is_cuda = *q.shape, k.shape[-2], q.is_cuda() #check if masks exists and expand to compatible shape if attn_mask is not None and attn_mask.ndim != 4: mask = rearrange(attn_mask, 'b j -> b 1 1 j') mask = mask.expand(-1, heads, q_len, -1) #check if threis a comptaible device for flash attention config = self.flash_config if is_cuda else self.cpu_config with torch.backends.cuda.sdp_kernel(**config._asdict()): attn_weights = torch.bmm(q, k.tranpose(1, 2)) attn_weights = F.softmax(attn_weights, dim=-1, dtype=torch.float32).type_as(attn_weights) if attn_mask is not None: attn_weights += attn_mask attn_probs = self.dropout_module(attn_weights) attn = torch.bmm(attn_probs, v) return attn, attn_weights def reset_parameters(self): nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2)) nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2)) nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2)) nn.init.xavier_uniform_(self.out_proj.weight) nn.init.constant_(self.out_proj.bias, 0.0) def forward( self, query, key, value, incremental_state=None, key_padding_mask=None, attn_mask=None, rel_pos=None, ): bsz, tgt_len, embed_dim = query.size() src_len = tgt_len assert embed_dim == self.embed_dim, f"query dim {embed_dim} != {self.embed_dim}" key_bsz, src_len, _ = key.size() assert key_bsz == bsz, f"{query.size(), key.size()}" assert value is not None assert bsz, src_len == value.shape[:2] q = self.q_proj(query) k = self.k_proj(key) v = self.v_proj(value) q *= self.scaling q = q.view(bsz, tgt_len, self.num_heads, self.head_dim).transpose(1, 2) k = k.view(bsz, src_len, self.num_heads, self.head_dim).transpose(1, 2) v = v.view(bsz, src_len, self.num_heads, self.head_dim).transpose(1, 2) q = q.reshape(bsz * self.num_heads, tgt_len, self.head_dim) k = k.reshape(bsz * self.num_heads, src_len, self.head_dim) v = v.reshape(bsz * self.num_heads, src_len, self.head_dim) if incremental_state is not None: if "prev_key" in incremental_state: prev_key = incremental_state["prev_key"].view( bsz * self.num_heads, -1, self.head_dim ) prev_value = incremental_state["prev_value"].view( bsz * self.num_heads, -1, self.head_dim ) k = torch.cat([prev_key, k], dim=1) v = torch.cat([prev_value, v], dim=1) incremental_state["prev_key"] = k.view( bsz, self.num_heads, -1, self.head_dim ) incremental_state["prev_value"] = v.view( bsz, self.num_heads, -1, self.head_dim ) src_len = k.size(1) if self.xpos is not None: if incremental_state is not None: offset = src_len - 1 else: offset = 0 k = self.xpos(k, offset=0, downscale=True) q = self.xpos(q, offset=offset, downscale=False) attn_weights = torch.bmm(q, k.transpose(1, 2)) # if attn_mask is not None: # attn_weights = torch.nan_to_num(attn_weights) # attn_mask = attn_mask.unsqueeze(0) # attn_weights += attn_mask # if key_padding_mask is not None: # attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) # attn_weights = attn_weights.masked_fill( # key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), # float("-inf"), # ) # attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn, attn_weights = self.flash_scaled_dot_product_attention(q, k, v, attn_mask) if rel_pos is not None: rel_pos = rel_pos.view(attn_weights.size()) attn_weights = attn_weights + rel_pos attn_weights = F.softmax(attn_weights, dim=-1, dtype=torch.float32).type_as( attn_weights ) attn_probs = self.dropout_module(attn_weights) attn = torch.bmm(attn_probs, v) # attn_weights = F.softmax(attn_weights, dim=-1, dtype=torch.float32).type_as( # attn_weights # ) # attn_probs = self.dropout_module(attn_weights) # attn = torch.bmm(attn_probs, v) attn = attn.transpose(0, 1).reshape(tgt_len, bsz, embed_dim).transpose(0, 1) if self.inner_attn_ln is not None: attn = self.inner_attn_ln(attn) attn = self.out_proj(attn) attn_weights = attn_weights.view( bsz, self.num_heads, tgt_len, src_len ).transpose(1, 0) return attn, attn_weights """ #V2 ===================================> import math import torch import torch.nn.functional as F from torch import nn try: from apex.normalization import FusedLayerNorm as LayerNorm except ModuleNotFoundError: from torch.nn import LayerNorm from multiway_network import MultiwayWrapper from xpos_relative_position import XPOS from einops import rearrange # from flash_attention import FlashAttention, FlashMHA from flash_attn.flash_attention import FlashMHA #sparsificaiton, pruning, fp16, layer norm, keys and values are precomputed for the encoder decoder mechanism -- class MultiheadAttention(nn.Module): def __init__( self, args, embed_dim, num_heads, dropout=0.0, self_attention=False, encoder_decoder_attention=False, subln=False, casual=False, flash_attention=False ): super().__init__() self.args = args self.embed_dim = embed_dim self.num_heads = num_heads self.head_dim = embed_dim // num_heads self.scaling = self.head_dim**-0.5 self.self_attention = self_attention self.encoder_decoder_attention = encoder_decoder_attention assert self.self_attention ^ self.encoder_decoder_attention self.k_proj = MultiwayWrapper(args, nn.Linear(embed_dim, embed_dim, bias=True )) self.v_proj = MultiwayWrapper(args, nn.Linear(embed_dim, embed_dim, bias=True)) self.q_proj = MultiwayWrapper(args, nn.Linear(embed_dim, embed_dim, bias=True)) self.out_proj = MultiwayWrapper( args, nn.Linear(embed_dim, embed_dim, bias=True) ) self.inner_attn_ln = ( MultiwayWrapper(args, LayerNorm(self.embed_dim, eps=args.layernorm_eps)) if subln and self.self_attention else None ) self.dropout_module = torch.nn.Dropout(dropout) self.xpos = ( XPOS(self.head_dim, args.xpos_scale_base) if args.xpos_rel_pos and self.self_attention else None ) self.casual = casual self.flash_attention = flash_attention if flash_attention: self.flash_mha = FlashMHA(embed_dim, num_heads, attention_dropout=dropout, causal=casual) self.flash_config = args.flash_config def apply_pruning(self, tensor, top_k=0.5): k = max(int(tensor.shape[-1] * top_k), 1) _, indices = torch.topk(tensor, k, dim=-1, sorted=False) mask = torch.zeros_like(tensor).scatter_(-1, indices, 1.0) pruned_tensor = tensor * mask return pruned_tensor def forward( self, query, key, value, incremental_state=None, key_padding_mask=None, attn_mask=None, rel_pos=None, precomputed_kv=False ): bsz, tgt_len, embed_dim = query.size() src_len = tgt_len assert embed_dim == self.embed_dim, f"query dim {embed_dim} != {self.embed_dim}" key_bsz, src_len, _ = key.size() assert key_bsz == bsz, f"{query.size(), key.size()}" assert value is not None assert bsz, src_len == value.shape[:2] if not precomputed_kv: k = self.k_proj(key) v = self.v_proj(value) q = self.q_proj(query) k = self.k_proj(key) v = self.v_proj(value) q = q * self.scaling #pruning/sparsification q = self.apply_pruning(q) k = self.apply_pruning(k) v = self.apply_pruning(v) # flash attention if self.flash_attention: # Use FlashAttention instead of the default scaled dot product attention q = q.view(bsz, tgt_len, self.num_heads, self.head_dim).transpose(1, 2) k = k.view(bsz, src_len, self.num_heads, self.head_dim).transpose(1, 2) v = v.view(bsz, src_len, self.num_heads, self.head_dim).transpose(1, 2) qkv = torch.stack([q, k, v], dim=2) attn_output, attn_output_weights = self.flash_mha(qkv, key_padding_mask=key_padding_mask) else: q = q.view(bsz, tgt_len, self.num_heads, self.head_dim).transpose(1, 2) k = k.view(bsz, src_len, self.num_heads, self.head_dim).transpose(1, 2) v = v.view(bsz, src_len, self.num_heads, self.head_dim).transpose(1, 2) q = q.reshape(bsz * self.num_heads, tgt_len, self.head_dim) k = k.reshape(bsz * self.num_heads, tgt_len, self.head_dim) v = v.reshape(bsz * self.num_heads, tgt_len, self.head_dim) if key_padding_mask is not None: attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.masked_fill( key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), float("-inf"), ) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if incremental_state is not None: if "prev_key" in incremental_state: prev_key = incremental_state["prev_key"].view( bsz * self.num_heads, -1, self.head_dim ) prev_value = incremental_state["prev_value"].view( bsz * self.num_heads, -1, self.head_dim ) k = torch.cat([prev_key, k], dim=1) v = torch.cat([prev_value, v], dim=1) incremental_state["prev_key"] = k.view( bsz, self.num_heads, -1, self.head_dim ) incremental_state["prev_value"] = v.view( bsz, self.num_heads, -1, self.head_dim ) src_len = k.size(1) if self.xpos is not None: if incremental_state is not None: offset = src_len - 1 else: offset = 0 k = self.xpos(k, offset=0, downscale=True) q = self.xpos(q, offset=offset, downscale=False) attn_weights = torch.bmm(q, k.transpose(1, 2)) if attn_mask is not None: attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights += attn_mask.unsqueeze(1).expand(-1, self.num_heads, -1, -1) if key_padding_mask is not None: attn_weights = attn_weights.masked_fill( key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), float("-inf"), ) attn_weights = F.softmax(attn_weights, dim=-1, dtype=torch.float32).type_as( attn_weights ) if rel_pos is not None: rel_pos = rel_pos.view(attn_weights.size()) attn_weights = attn_weights + rel_pos #convert attention weights to mixed precision fp16 attn_weights = attn_weights.to(torch.float16) attn_probs = self.dropout_module(attn_weights) #convert attention weights back to original dtype attn_probs = attn_probs.to(torch.float32) attn = torch.bmm(attn_probs, v) attn = attn.transpose(0, 1).reshape(tgt_len, bsz, embed_dim).transpose(0, 1) if self.inner_attn_ln is not None: attn = self.inner_attn_ln(attn) attn = self.out_proj(attn) attn_weights = attn_weights.view( bsz, self.num_heads, tgt_len, src_len ).transpose(1, 0) return attn, attn_weights """ Yes, the integrated Flash module takes into account dropout, key padding masking, and causal masking. Here's a breakdown of how each of these components is handled in the provided code: Dropout: The FlashAttention class has a dropout_p attribute, which is passed to the forward function of flash_attn_unpadded_qkvpacked_func. The dropout rate is applied to the attention weights during the forward pass. The dropout rate is used only during training (self.training is True), and no dropout is applied during evaluation. python Copy code output_unpad = flash_attn_unpadded_qkvpacked_func( x_unpad, cu_seqlens, max_s, self.dropout_p if self.training else 0.0, softmax_scale=self.softmax_scale, causal=causal ) Key padding masking: When you pass the key_padding_mask to the forward function of the FlashAttention class, it handles the key padding mask by appropriately unpadding the input and re-padding the output: python Copy code x_unpad, indices, cu_seqlens, max_s = unpad_input(x, key_padding_mask) # ... output = rearrange(pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'), indices, batch_size, seqlen), 'b s (h d) -> b s h d', h=nheads) Causal masking: The FlashAttention class has a causal attribute that is passed to the forward function of flash_attn_unpadded_qkvpacked_func. This attribute is used to apply causal masking during the attention calculation: python Copy code output_unpad = flash_attn_unpadded_qkvpacked_func( x_unpad, cu_seqlens, max_s, self.dropout_p if self.training else 0.0, softmax_scale=self.softmax_scale, causal=causal ) As you can see, the Flash module handles dropout, masking, and causal masking during its forward pass. """ """ forward v2 def forward( self, query, key, value, incremental_state=None, key_padding_mask=None, attn_mask=None, rel_pos=None, precomputed_kv=False ): bsz, tgt_len, embed_dim = query.size() src_len = tgt_len assert embed_dim == self.embed_dim, f"query dim {embed_dim} != {self.embed_dim}" key_bsz, src_len, _ = key.size() assert key_bsz == bsz, f"{query.size(), key.size()}" assert value is not None assert bsz, src_len == value.shape[:2] if not precomputed_kv: k = self.k_proj(key) v = self.v_proj(value) q = self.q_proj(query) k = self.k_proj(key) v = self.v_proj(value) q = q * self.scaling #pruning/sparsification q = self.apply_pruning(q) k = self.apply_pruning(k) v = self.apply_pruning(v) # flash attention if self.flash_attention: # Use FlashAttention instead of the default scaled dot product attention q = q.view(bsz, tgt_len, self.num_heads, self.head_dim).transpose(1, 2) k = k.view(bsz, src_len, self.num_heads, self.head_dim).transpose(1, 2) v = v.view(bsz, src_len, self.num_heads, self.head_dim).transpose(1, 2) if self.xpos is not None: q = self.xpos(q) k = self.xpos(k) qkv = torch.stack([q, k, v], dim=2) attn_output, attn_output_weights = self.flash_mha(qkv, key_padding_mask=key_padding_mask) else: q = q.view(bsz, tgt_len, self.num_heads, self.head_dim).transpose(1, 2) k = k.view(bsz, src_len, self.num_heads, self.head_dim).transpose(1, 2) v = v.view(bsz, src_len, self.num_heads, self.head_dim).transpose(1, 2) q = q.reshape(bsz * self.num_heads, tgt_len, self.head_dim) k = k.reshape(bsz * self.num_heads, tgt_len, self.head_dim) v = v.reshape(bsz * self.num_heads, tgt_len, self.head_dim) if key_padding_mask is not None: attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.masked_fill( key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), float("-inf"), ) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if incremental_state is not None: if "prev_key" in incremental_state: prev_key = incremental_state["prev_key"].view( bsz * self.num_heads, -1, self.head_dim ) prev_value = incremental_state["prev_value"].view( bsz * self.num_heads, -1, self.head_dim ) k = torch.cat([prev_key, k], dim=1) v = torch.cat([prev_value, v], dim=1) incremental_state["prev_key"] = k.view( bsz, self.num_heads, -1, self.head_dim ) incremental_state["prev_value"] = v.view( bsz, self.num_heads, -1, self.head_dim ) src_len = k.size(1) if self.xpos is not None: if incremental_state is not None: offset = src_len - 1 else: offset = 0 k = self.xpos(k, offset=0, downscale=True) q = self.xpos(q, offset=offset, downscale=False) attn_weights = torch.bmm(q, k.transpose(1, 2)) if attn_mask is not None: attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights += attn_mask.unsqueeze(1).expand(-1, self.num_heads, -1, -1) if key_padding_mask is not None: attn_weights = attn_weights.masked_fill( key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), float("-inf"), ) attn_weights = F.softmax(attn_weights, dim=-1, dtype=torch.float32).type_as( attn_weights ) if rel_pos is not None: rel_pos = rel_pos.view(attn_weights.size()) attn_weights = attn_weights + rel_pos #convert attention weights to mixed precision fp16 attn_weights = attn_weights.to(torch.float16) attn_probs = self.dropout_module(attn_weights) #convert attention weights back to original dtype attn_probs = attn_probs.to(torch.float32) attn = torch.bmm(attn_probs, v) attn = attn.transpose(0, 1).reshape(tgt_len, bsz, embed_dim).transpose(0, 1) if self.inner_attn_ln is not None: attn = self.inner_attn_ln(attn) attn = self.out_proj(attn) attn_weights = attn_weights.view( bsz, self.num_heads, tgt_len, src_len ).transpose(1, 0) return attn, attn_weights """
torchscale-flash-master
torchscale/component/multihead_attention.py
# Copyright (c) 2022 Microsoft # Licensed under The MIT License [see LICENSE for details] import math import torch import torch.nn as nn class RelativePositionBias(nn.Module): def __init__( self, bidirectional=True, num_buckets=32, max_distance=128, n_heads=12 ): super().__init__() self.bidirectional = bidirectional self.num_buckets = num_buckets self.max_distance = max_distance self.n_heads = n_heads self.relative_attention_bias = nn.Embedding(self.num_buckets, self.n_heads) @staticmethod def _relative_position_bucket( relative_position, bidirectional=True, num_buckets=32, max_distance=128 ): ret = 0 n = -relative_position if bidirectional: num_buckets //= 2 ret += (n < 0).to(torch.long) * num_buckets n = torch.abs(n) else: n = torch.max(n, torch.zeros_like(n)) max_exact = num_buckets // 2 is_small = n < max_exact val_if_large = max_exact + ( torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) val_if_large = torch.min( val_if_large, torch.full_like(val_if_large, num_buckets - 1) ) ret += torch.where(is_small, n, val_if_large) return ret def compute_bias(self, qlen, klen, step=None): step = 0 if step is None else step context_position = torch.arange( step, step + qlen, dtype=torch.long, device=self.relative_attention_bias.weight.device, )[:, None] memory_position = torch.arange( klen, dtype=torch.long, device=self.relative_attention_bias.weight.device )[None, :] relative_position = memory_position - context_position # shape (qlen, klen) rp_bucket = self._relative_position_bucket( relative_position, # shape (qlen, klen) bidirectional=self.bidirectional, num_buckets=self.num_buckets, max_distance=self.max_distance, ) rp_bucket = rp_bucket.to(self.relative_attention_bias.weight.device) values = self.relative_attention_bias( rp_bucket ) # shape (qlen, klen, num_heads) values = values.permute([2, 0, 1]).unsqueeze( 0 ) # shape (1, num_heads, qlen, klen) return values def forward(self, batch_size, qlen, klen, step=None): # shape (batch * num_heads, qlen, klen) return ( self.compute_bias(qlen, klen, step) .repeat(batch_size, 1, 1, 1) .view(-1, qlen, klen) )
torchscale-flash-master
torchscale/component/relative_position_bias.py
# Copyright (c) 2022 Microsoft # Licensed under The MIT License [see LICENSE for details] import torch import torch.nn as nn import torch.nn.functional as F class VisionLanguageEmbedding(nn.Module): def __init__(self, text_embed, vision_embed): super().__init__() self.text_embed = text_embed self.vision_embed = vision_embed def forward(self, textual_tokens, visual_tokens, **kwargs): if textual_tokens is None: return self.vision_embed(visual_tokens) if visual_tokens is None: return self.text_embed(textual_tokens) x1 = self.vision_embed(visual_tokens) x2 = self.text_embed(textual_tokens) return torch.cat([x1, x2], dim=1) class VisionEmbedding(nn.Module): """Image to Patch Embedding""" def __init__( self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, contain_mask_token=False, prepend_cls_token=False, ): super().__init__() img_size = (img_size, img_size) patch_size = (patch_size, patch_size) num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) self.img_size = img_size self.patch_size = patch_size self.num_patches = num_patches self.proj = nn.Conv2d( in_chans, embed_dim, kernel_size=patch_size, stride=patch_size ) if contain_mask_token: self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) else: self.mask_token = None if prepend_cls_token: self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) else: self.cls_token = None def num_position_embeddings(self): if self.cls_token is None: return self.num_patches else: return self.num_patches + 1 def forward(self, x, masked_position=None, **kwargs): B, C, H, W = x.shape assert ( H == self.img_size[0] and W == self.img_size[1] ), f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." x = self.proj(x).flatten(2).transpose(1, 2) batch_size, seq_len, _ = x.size() if masked_position is not None: assert self.mask_token is not None mask_token = self.mask_token.expand(batch_size, seq_len, -1) w = masked_position.unsqueeze(-1).type_as(mask_token) x = x * (1 - w) + mask_token * w if self.cls_token is not None: cls_tokens = self.cls_token.expand( batch_size, -1, -1 ) # stole cls_tokens impl from Phil Wang, thanks x = torch.cat((cls_tokens, x), dim=1) return x class TextEmbedding(nn.Embedding): def reset_parameters(self): nn.init.normal_(self.weight, mean=0, std=self.embedding_dim**-0.5) self._fill_padding_idx_with_zero() class PositionalEmbedding(nn.Embedding): def forward( self, x, positions=None, **kwargs, ): if positions is None: # being consistent with Fairseq, which starts from 2. positions = ( torch.arange(2, x.size(1) + 2, device=x.device).long().unsqueeze(0) ) return F.embedding( positions, self.weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse, )
torchscale-flash-master
torchscale/component/embedding.py
# Copyright (c) 2022 Microsoft # Licensed under The MIT License [see LICENSE for details] import torch.nn as nn from timm.models.layers import drop_path class DropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob=None): super(DropPath, self).__init__() self.drop_prob = drop_prob def forward(self, x): return drop_path(x, self.drop_prob, self.training) def extra_repr(self): return "p={}".format(self.drop_prob)
torchscale-flash-master
torchscale/component/droppath.py
# Copyright (c) 2022 Microsoft # Licensed under The MIT License [see LICENSE for details]
torchscale-flash-master
torchscale/component/__init__.py
# Copyright (c) 2022 Microsoft # Licensed under The MIT License [see LICENSE for details] import torch import torch.nn as nn import torch.nn.functional as F try: from apex.normalization import FusedLayerNorm as LayerNorm except ModuleNotFoundError: from torch.nn import LayerNorm class set_torch_seed(object): def __init__(self, seed): assert isinstance(seed, int) self.rng_state = self.get_rng_state() torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed(seed) def get_rng_state(self): state = {"torch_rng_state": torch.get_rng_state()} if torch.cuda.is_available(): state["cuda_rng_state"] = torch.cuda.get_rng_state() return state def set_rng_state(self, state): torch.set_rng_state(state["torch_rng_state"]) if torch.cuda.is_available(): torch.cuda.set_rng_state(state["cuda_rng_state"]) def __enter__(self): return self def __exit__(self, *exc): self.set_rng_state(self.rng_state) def make_experts(args, embed_dim, expert_ffn_dim): world_size = ( 1 if not torch.distributed.is_initialized() else torch.distributed.get_world_size() ) expert_list = [] ddp_rank = args.ddp_rank start_seed = torch.randint(1000000, (1,)).item() # at least as many experts than gpus if args.moe_expert_count >= world_size: assert ( args.moe_expert_count % world_size == 0 ), f"{args.moe_expert_count}, {world_size}" local_moe_expert_count = args.moe_expert_count // world_size for i in range(local_moe_expert_count): with set_torch_seed(start_seed + ddp_rank * local_moe_expert_count + i): expert_list.append( FeedForwardNetwork( embed_dim, expert_ffn_dim, args.activation_fn, args.dropout, args.activation_dropout, args.layernorm_eps, args.subln, ) ) else: assert ( world_size % args.moe_expert_count == 0 ), f"{world_size}, {args.moe_expert_count}" with set_torch_seed(start_seed + ddp_rank % args.moe_expert_count): expert_list.append( FeedForwardNetwork( embed_dim, expert_ffn_dim, args.activation_fn, args.dropout, args.activation_dropout, args.layernorm_eps, args.subln, ) ) experts = nn.ModuleList(expert_list) return experts def get_activation_fn(activation): if activation == "relu": return F.relu elif activation == "gelu": return F.gelu else: raise NotImplementedError class FeedForwardNetwork(nn.Module): def __init__( self, embed_dim, ffn_dim, activation_fn, dropout, activation_dropout, layernorm_eps, subln=False, ): super().__init__() self.embed_dim = embed_dim self.activation_fn = get_activation_fn(activation=str(activation_fn)) self.activation_dropout_module = torch.nn.Dropout(activation_dropout) self.dropout_module = torch.nn.Dropout(dropout) self.fc1 = nn.Linear(self.embed_dim, ffn_dim) self.fc2 = nn.Linear(ffn_dim, self.embed_dim) self.ffn_layernorm = LayerNorm(ffn_dim, eps=layernorm_eps) if subln else None def reset_parameters(self): self.fc1.reset_parameters() self.fc2.reset_parameters() if self.ffn_layernorm is not None: self.ffn_layernorm.reset_parameters() def forward(self, x): x_shape = x.shape x = x.reshape(-1, x.size(-1)) x = self.fc1(x) x = self.activation_fn(x.float()).type_as(x) x = self.activation_dropout_module(x) if self.ffn_layernorm is not None: x = self.ffn_layernorm(x) x = self.fc2(x) x = x.view(x_shape) x = self.dropout_module(x) return x
torchscale-flash-master
torchscale/component/feedforward_network.py
# Copyright (c) 2022 Microsoft # Licensed under The MIT License [see LICENSE for details]
torchscale-flash-master
torchscale/component/xmoe/__init__.py
# Copyright (c) 2022 Microsoft # Licensed under The MIT License [see LICENSE for details] # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. # # This source code is licensed under the BSD license found in the # LICENSE file in the root directory of this source tree. # NOTE: This is a mirror of the code in # https://github.com/facebookresearch/fairscale/tree/master/fairscale/nn/moe import logging import time from typing import Any, Tuple, cast import torch import torch.distributed as dist from torch import Tensor from torch.nn import Module, ModuleList try: from fairseq.modules.moe import MOELayer has_fairseq = True Base = MOELayer except ModuleNotFoundError: Base = Module has_fairseq = False try: # To enable Tutel MoE optimizations: # python3 -m pip install --user --upgrade git+https://github.com/microsoft/[email protected] from tutel import moe as tutel_moe has_tutel, fused_cumsum_sub_one = True, tutel_moe.fast_cumsum_sub_one except ModuleNotFoundError: has_tutel, fused_cumsum_sub_one = False, lambda mask: torch.cumsum(mask, dim=0) - 1 logger = logging.getLogger(__name__) # einsum dimensions: (g)roup, (s)equence, (e)xpert, (m)odel, (c)apacity # See https://arxiv.org/pdf/2006.16668.pdf for details. # Based on https://github.com/pytorch/pytorch/pull/40762 class _AllToAll(torch.autograd.Function): @staticmethod def forward(ctx: Any, group: dist.ProcessGroup, input: Tensor) -> Tensor: # type: ignore ctx.group = group input = input.contiguous() output = torch.empty_like(input) if torch.distributed.is_initialized(): dist.all_to_all_single(output, input, group=group) else: assert group is None output = input return output @staticmethod def backward(ctx: Any, *grad_output: Tensor) -> Tuple[None, Tensor]: return (None, _AllToAll.apply(ctx.group, *grad_output)) def _find_my_group_index(grouped_ranks): my_rank = dist.get_rank() for i, group in enumerate(grouped_ranks): if my_rank in group: return i raise RuntimeError def get_moe_group(moe_expert_count): if dist.is_initialized(): if not hasattr(get_moe_group, "_moe_groups"): world_size = dist.get_world_size() if world_size <= moe_expert_count: assert moe_expert_count % world_size == 0 moe_groups = [[i] for i in range(world_size)] else: assert world_size % moe_expert_count == 0 ranks_per_group = world_size // moe_expert_count moe_groups = [ [i + j * moe_expert_count for j in range(ranks_per_group)] for i in range(moe_expert_count) ] get_moe_group._moe_group_idx = moe_groups get_moe_group._moe_groups = [dist.new_group(g) for g in moe_groups] my_group_idx = _find_my_group_index(get_moe_group._moe_group_idx) return get_moe_group._moe_groups[my_group_idx] def get_all2all_group(moe_expert_count): if dist.is_initialized(): if not hasattr(get_all2all_group, "_all2all_groups"): world_size = dist.get_world_size() # more experts than world size if world_size <= moe_expert_count: assert moe_expert_count % world_size == 0 all2all_groups = [[i for i in range(world_size)]] # larger world than num experts else: assert world_size % moe_expert_count == 0 ranks_per_group = world_size // moe_expert_count all2all_groups = [ [i * moe_expert_count + j for j in range(moe_expert_count)] for i in range(ranks_per_group) ] get_all2all_group._all2all_group_idx = all2all_groups get_all2all_group._all2all_groups = [ dist.new_group(g) for g in all2all_groups ] my_group_idx = _find_my_group_index(get_all2all_group._all2all_group_idx) return get_all2all_group._all2all_groups[my_group_idx] class MOELayer(Base): """MOELayer module which implements MixtureOfExperts as described in Gshard_. :: gate = Top2Gate(model_dim, num_experts) moe = MOELayer(gate, expert) output = moe(input) l_aux = moe.l_aux .. Gshard_: https://arxiv.org/pdf/2006.16668.pdf Args: gate (torch.nn.Module): gate network expert (torch.nn.Module): expert network """ def __init__(self, gate, experts, args): if has_fairseq: super(Base, self).__init__() else: super().__init__() self.gate = gate if type(experts) == ModuleList: self.experts = cast(ModuleList, experts) else: self.experts = ModuleList([experts]) self.expert_group = get_moe_group(args.moe_expert_count) self.all2all_group = get_all2all_group(args.moe_expert_count) self.world_size = dist.get_world_size(group=self.expert_group) self.all2all_size = dist.get_world_size(group=self.all2all_group) for p in experts.parameters(): p.expert = True # type: ignore self.num_local_experts = len(self.experts) self.args = args self.in_generation = False self.a2a_cuda_event_intervals = [] self.a2a_cpu_time_ms = 0.0 def forward(self, *input: Tensor, input_padding_mask=None, **kwargs: Any) -> Tensor: assert len(input) == 1, "only single input Tensor supported" input = input[0] assert ( len(input.shape) == 3 ), "input Tensor must have dimensions: (s)equence, (t)oken, (m)odel" if input_padding_mask is not None: assert ( len(input_padding_mask.shape) == 2 ), "input Tensor must have dimensions: (s)equence, (t)oken" assert input_padding_mask.shape[0] == input.shape[0] assert input_padding_mask.shape[1] == input.shape[1] # assert input.shape[0] % len(self.experts) == 0, "num tokens must be order of number of local experts" # Implement Algorithm 2 from GShard paper. d_model = input.shape[2] # Pad to expected batch size input_shape = list(input.shape) expected_bsz = ( getattr(self.args, "batch_size", 0) if self.training else getattr(self.args, "batch_size_valid", 0) ) # This indicates that --batch-size or --max-sentences is not specified if expected_bsz is None: expected_bsz = 0 # Note: Padding is not necessary at generation time at present # because all DDP workers process the same batch. Also, batch size at generation time # can be different from that present in the checkpoint state if ( not self.in_generation and expected_bsz != 0 and input_shape[0] != expected_bsz ): logger.warning( f"padding batch with unexpected size {input_shape[0]} (expected: {expected_bsz})" ) assert input_shape[0] < expected_bsz, f"{input_shape[0]} < {expected_bsz}" padded_input = torch.zeros( (expected_bsz, input_shape[1], input_shape[2]), dtype=input.dtype, layout=input.layout, device=input.device, ) padded_input[: input_shape[0], :, :] = input input = padded_input padded_input_padding_mask = torch.ones( ( expected_bsz, input_shape[1], ), dtype=torch.bool, device=input.device, ) if input_padding_mask is not None: padded_input_padding_mask[: input_shape[0], :] = input_padding_mask else: padded_input_padding_mask[: input_shape[0], :] = False input_padding_mask = padded_input_padding_mask # Reshape into S tokens by dropping sequence dimension. reshaped_input = input.reshape(-1, d_model) reshaped_input_shape = reshaped_input.shape reshaped_input_padding_mask = ( input_padding_mask.reshape(-1) if input_padding_mask is not None else None ) # Doing padding here when --max-tokens is specified and not --batch-size or --max-sentences # Pro of --max-tokens: more flexible for MT variable sequence lengths # Con of --max-tokens: extra all-reduce needed to figure out optimal padding without running OOM if expected_bsz == 0: expected_dim = reshaped_input_shape[0] * torch.ones( (1,), dtype=torch.long, device=input.device ) dist.all_reduce(expected_dim, group=dist.group.WORLD, op=dist.ReduceOp.MAX) expected_dim = int(expected_dim.item()) padded_input = torch.zeros( (expected_dim, reshaped_input_shape[1]), dtype=input.dtype, layout=input.layout, device=input.device, ) padded_input[: reshaped_input_shape[0], :] = reshaped_input reshaped_input = padded_input padded_input_padding_mask = torch.ones( (expected_dim,), dtype=torch.bool, device=padded_input.device ) if reshaped_input_padding_mask is not None: padded_input_padding_mask[ : reshaped_input_shape[0] ] = reshaped_input_padding_mask else: padded_input_padding_mask[: reshaped_input_shape[0]] = False reshaped_input_padding_mask = padded_input_padding_mask if has_tutel: l_aux, self.metadata, C, E, indices_, locations_, gates_ = self.gate( reshaped_input, reshaped_input_padding_mask ) S, M = reshaped_input.size(0), reshaped_input.size(1) if not hasattr(self, "_tutel_dispatcher"): self._tutel_dispatcher = tutel_moe.fast_dispatcher( E, C, M, dispatch_dtype=reshaped_input.dtype ) self._tutel_dispatcher.update(indices_, locations_, gates_, capacity=C) dispatched_input = self._tutel_dispatcher.encode(reshaped_input) else: l_aux, combine_weights, dispatch_mask, self.metadata = self.gate( reshaped_input, reshaped_input_padding_mask ) dispatch_mask = dispatch_mask.to(input.dtype).permute( 1, 2, 0 ) # S,E,C -> E,C,S E, C, S = dispatch_mask.size() M = reshaped_input.size(1) assert reshaped_input.size() == (S, M) # einsum("sec,sm->ecm") dispatched_input = torch.mm( dispatch_mask.view(E * C, S), reshaped_input ) # -> (E*C),M if self.all2all_size > 1: dispatched_input = self.all_to_all_wrapper(dispatched_input) # Re-shape after all-to-all: ecm -> gecm dispatched_input = dispatched_input.reshape( self.all2all_size, self.num_local_experts, -1, d_model ) chunks = dispatched_input.chunk(self.num_local_experts, dim=1) expert_outputs = [] for chunk, expert in zip(chunks, self.experts): expert_outputs += [expert(chunk)] expert_output = torch.cat(expert_outputs, dim=1) if self.all2all_size > 1: expert_output = self.all_to_all_wrapper(expert_output) # Re-shape back: gecm -> ecm expert_output = expert_output.reshape( self.all2all_size * self.num_local_experts, -1, d_model ) if has_tutel: combined_output = self._tutel_dispatcher.decode( expert_output.view(E * C, M) ) else: # einsum("sec,ecm->sm") combined_output = combine_weights.view(S, E * C).mm( expert_output.view(E * C, M) ) # Remove padding here when --max-tokens is specified and not --batch-size or --max-sentences combined_output = combined_output[: reshaped_input_shape[0], :] combined_output = combined_output.reshape(input.shape) combined_output = combined_output[: input_shape[0], :, :] self.record_all_to_all_stats() return combined_output, l_aux def prepare_for_inference_(self): self.in_generation = True def all_to_all_wrapper(self, input: Tensor): dummy_a2a = getattr(self.args, "dummy_a2a", False) if dummy_a2a: input = input.contiguous() output = input.detach().clone() return input # always record times, since it is not a lot of overhead # if we do not log it we simply clear it off in record_all_to_all_stats cuda_start = torch.cuda.Event(enable_timing=True) cuda_end = torch.cuda.Event(enable_timing=True) cpu_start = time.time() * 1000 cuda_start.record() output = _AllToAll.apply(self.all2all_group, input) cuda_end.record() cpu_end = time.time() * 1000 self.a2a_cpu_time_ms += cpu_end - cpu_start self.a2a_cuda_event_intervals.append((cuda_start, cuda_end)) return output def record_all_to_all_stats(self): # controlled via an argument as we want to minimize any impact from torch.cuda.synchronize() record_a2a_perf_stats = getattr(self.args, "record_a2a_perf_stats", False) if record_a2a_perf_stats: torch.cuda.synchronize() self.metadata["all_to_all_cpu_time_ms"] = self.a2a_cpu_time_ms a2a_cuda_time_ms = 0.0 for ev_start, ev_end in self.a2a_cuda_event_intervals: a2a_cuda_time_ms += ev_start.elapsed_time(ev_end) self.metadata["all_to_all_cuda_time_ms"] = a2a_cuda_time_ms # reset stats self.a2a_cpu_time_ms = 0.0 self.a2a_cuda_event_intervals = []
torchscale-flash-master
torchscale/component/xmoe/moe_layer.py
# Copyright (c) 2022 Microsoft # Licensed under The MIT License [see LICENSE for details] # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. # # This source code is licensed under the BSD license found in the # LICENSE file in the root directory of this source tree. # Implementation of Top2Gating described in https://arxiv.org/pdf/2006.16668.pdf # Code is inspired by Top2GatingOnLogits from lingvo: # https://github.com/tensorflow/lingvo/blob/21b8106c5f1d30a196c98eedc441d4fd70833b11/lingvo/core/moe_layers.py#L477 # NOTE: This is a mirror of the code in # https://github.com/facebookresearch/fairscale/tree/master/fairscale/nn/moe import math from typing import Callable, Dict, Optional, Tuple import torch import torch.nn.functional as F from torch import Tensor from .moe_layer import fused_cumsum_sub_one, has_tutel # use a fixed temperature to compute balance loss TEMPERATURE_FOR_L_UAX = 0.07 # maximum capacity of 1 expert as a fraction of number of tokens in the batch # Note: setting this to 1.0 causes inference to significantly slow down EVAL_CAPACITY_TOKEN_FRACTION = 0.25 # logging SAMPLE_FRACTION = 0.2 def top1gating( logits: torch.Tensor, input_mask: Optional[torch.Tensor] = None, use_fp32=False, capacity_factor=1.0, eval_mode=False, moe_eval_capacity_token_fraction=EVAL_CAPACITY_TOKEN_FRACTION, use_xmoe=False, gate_obj=None, ) -> Tuple[Tensor, Tensor, Tensor, Dict]: """Implements Top2Gating on logits.""" metadata = {} if use_fp32: orig_dtype = logits.dtype logits = logits.float() gates = F.softmax(logits, dim=1) metadata["entropy_gating"] = entropy(probs=gates).mean().detach() # gates has shape of SE num_tokens = gates.shape[0] num_experts = gates.shape[1] if moe_eval_capacity_token_fraction > 0.0 and eval_mode: capacity = math.ceil(moe_eval_capacity_token_fraction * num_tokens) else: # capacity = capacity_factor * S/E capacity = int(capacity_factor * math.ceil(num_tokens / num_experts)) # Create a mask for 1st's expert per token indices1_s = torch.argmax(gates, dim=1) mask1 = one_hot(indices1_s, num_classes=num_experts, unsqueeze_indices=True) if input_mask is not None and input_mask.any(): nonpadding = ~input_mask mask1 = mask1 * nonpadding.unsqueeze(-1).to(mask1.dtype) # for logging (percent of tokens routed to each expert) expert1_hist = ( 100 * torch.histc( (indices1_s.squeeze() + 1), bins=num_experts, min=1, max=num_experts ) / num_tokens ) metadata["unused_expert1_count"] = (expert1_hist == 0).sum() expert1_hist = ( torch.sort(expert1_hist, dim=0, descending=True).values + torch.finfo(torch.float32).tiny ) sample_count = max(math.ceil(num_experts * SAMPLE_FRACTION), 1) metadata["expert1_balance_top"] = expert1_hist[:sample_count].sum() metadata["expert1_balance_bottom"] = expert1_hist[-sample_count:].sum() gates1_s = (gates * mask1).sum(dim=1) # Compute locations in capacity buffer locations1 = fused_cumsum_sub_one(mask1) # Compute l_aux me = torch.mean(gates, dim=0) ce = torch.mean(mask1.to(gates.dtype), dim=0) l_aux = torch.mean(me * ce) l_aux = l_aux * num_experts * num_experts if has_tutel: locations1_s = torch.sum(locations1 * mask1, dim=1) return ( l_aux, metadata, capacity, num_experts, [ indices1_s, ], [ locations1_s, ], [ gates1_s, ], ) # Remove locations outside capacity from mask mask1 = mask1 * torch.lt(locations1, capacity) # Store the capacity location for each token locations1_s = torch.sum(locations1 * mask1, dim=1) # Calculate combine_weights and dispatch_mask gates1 = gates1_s.unsqueeze(-1) * mask1.to(gates1_s.dtype) # einsum("s,se->se") # locations1_sc = num_tokens * capacity locations1_sc = one_hot(locations1_s, num_classes=capacity, unsqueeze_indices=True) combine1_sec = torch.bmm( # einsum("se,sc->sec") gates1.unsqueeze(-1), locations1_sc.to(gates1.dtype).unsqueeze(1), ) dispatch_mask = combine1_sec.bool() if use_fp32: return l_aux, combine1_sec.to(orig_dtype), dispatch_mask, metadata else: return l_aux, combine1_sec, dispatch_mask, metadata class Top1Gate(torch.nn.Module): """Gate module which implements Top2Gating as described in Gshard_. :: gate = Top2Gate(model_dim, num_experts) l_aux, combine_weights, dispatch_mask = gate(input) .. Gshard_: https://arxiv.org/pdf/2006.16668.pdf Args: model_dim (int): size of model embedding dimension num_experts (ints): number of experts in model """ wg: torch.nn.Linear def __init__( self, model_dim: int, num_experts: int, use_fp32=False, input_noise_type=None, capacity_factor=1.0, moe_eval_capacity_token_fraction=EVAL_CAPACITY_TOKEN_FRACTION, use_xmoe=False, ) -> None: # TODO: merge this to top2gate.py # super().__init__() if not use_xmoe: self.wg = torch.nn.Linear(model_dim, num_experts, bias=False) else: self.wg_reduction = torch.nn.Linear(model_dim, 16, bias=False) wg = torch.empty(num_experts, 16) torch.nn.init.orthogonal_(wg, gain=0.32) self.register_parameter("wg", torch.nn.Parameter(wg)) self.use_xmoe = use_xmoe self.use_fp32 = use_fp32 self.input_noise_type = input_noise_type self.capacity_factor = capacity_factor self.moe_eval_capacity_token_fraction = moe_eval_capacity_token_fraction def forward(self, input, mask=None): # type: ignore if self.use_xmoe: input = self.wg_reduction(input) with torch.no_grad(): wg_norm = self.wg.norm(p=2.0, dim=1, keepdim=True) self.wg.mul_(1.5 / wg_norm) logits = self._cosine(input, self.wg) logits = self._make_finite(logits) else: logits = self.wg(input) return top1gating( logits, mask, use_fp32=self.use_fp32, capacity_factor=self.capacity_factor, eval_mode=not self.training, moe_eval_capacity_token_fraction=self.moe_eval_capacity_token_fraction, use_xmoe=self.use_xmoe, gate_obj=self, ) def _make_finite(self, scores): ok = scores.isfinite() if not ok.all(): # NaNs here can break the assignment algorithm scores[~ok] = scores[ok].min() return scores def _get_gating_temperature(self, eps=1e-4): if self.gating_t.data.item() < eps: return eps return self.gating_t def _cosine(self, mat1, mat2, eps=1e-4): assert mat1.dim() == 2 assert mat2.dim() == 2 # mat1 = F.normalize(mat1, p=2.0, dim=1, eps=eps) mat2 = F.normalize(mat2.float(), p=2.0, dim=1, eps=eps) return mat1.float().matmul(mat2.transpose(0, 1)).type_as(mat1) gumbel_map: Dict[torch.device, Callable] = {} def gumbel_rsample(shape: Tuple, device: torch.device) -> Tensor: gumbel = gumbel_map.get(device) if gumbel is None: one = torch.tensor(1.0, device=device) zero = torch.tensor(0.0, device=device) gumbel = torch.distributions.gumbel.Gumbel(zero, one).rsample # type: ignore gumbel_map[device] = gumbel return gumbel(shape) def one_hot(indices: torch.Tensor, num_classes: int, unsqueeze_indices=False) -> Tensor: if unsqueeze_indices: indices = indices.unsqueeze(-1) assert indices.shape[-1] == 1, "last dimension of indices must be have size 1" output = torch.zeros( indices.shape[:-1] + (num_classes,), device=indices.device, dtype=indices.dtype ) output.scatter_(len(output.shape) - 1, indices, 1) return output def entropy(probs): logits = torch.distributions.utils.probs_to_logits(probs) p_log_p = probs * logits return -p_log_p.sum(-1) def top2gating( logits: torch.Tensor, input_mask: Optional[torch.Tensor] = None, use_fp32=False, second_expert_policy="sampling", normalize_gate_prob_before_dropping=False, eval_mode=False, moe_eval_capacity_token_fraction=0.25, batch_prioritized_routing=False, ) -> Tuple[Tensor, Tensor, Tensor]: """Implements Top2Gating on logits.""" metadata = {} if use_fp32: orig_dtype = logits.dtype logits = logits.float() gates = F.softmax(logits, dim=1) metadata["entropy_gating"] = entropy(probs=gates).mean().detach() # gates has shape of SE num_tokens = gates.shape[0] num_experts = gates.shape[1] if moe_eval_capacity_token_fraction > 0.0 and eval_mode: capacity = math.ceil(moe_eval_capacity_token_fraction * num_tokens) else: # capacity = 2S/E capacity = 2 * math.ceil(num_tokens / num_experts) # Create a mask for 1st's expert per token indices1_s = torch.argmax(gates, dim=1, keepdim=True) mask1 = one_hot(indices1_s, num_experts) if second_expert_policy == "sampling": # Create a mask for 2nd's expert per token using Gumbel-max trick # https://timvieira.github.io/blog/post/2014/07/31/gumbel-max-trick/ logits_w_noise = logits + gumbel_rsample(logits.shape, device=logits.device) else: logits_w_noise = logits # Replace top-expert with min value logits_except1 = logits_w_noise.masked_fill(mask1.bool(), float("-inf")) indices2_s = torch.argmax(logits_except1, dim=1, keepdim=True) mask2 = one_hot(indices2_s, num_experts) gates1_s = (gates * mask1).sum(dim=1) gates2_s = (gates * mask2).sum(dim=1) if normalize_gate_prob_before_dropping: # Normalize gate probabilities denom_s = gates1_s + gates2_s # Avoid divide-by-zero denom_s = torch.clamp(denom_s, min=torch.finfo(denom_s.dtype).eps) gates1_s = gates1_s / denom_s gates2_s = gates2_s / denom_s if second_expert_policy == "random": sampled = (2 * gates2_s) > torch.rand_like(gates2_s) mask2 = mask2 * sampled.repeat(num_experts, 1).transpose(1, 0) # Compute locations in capacity buffer if input_mask is not None and input_mask.any(): nonpadding = ~input_mask mask1 = mask1 * nonpadding.unsqueeze(-1).to(mask1.dtype) mask2 = mask2 * nonpadding.unsqueeze(-1).to(mask1.dtype) if batch_prioritized_routing: # if batch_prioritized_routing: importance_scores = -1 * gates.max(dim=1)[0] sorted_mask1 = mask1[importance_scores.argsort(dim=0)] sorted_cumsum1 = fused_cumsum_sub_one(sorted_mask1) * sorted_mask1 importance_sorted_locations1 = sorted_cumsum1[ importance_scores.argsort(dim=0).argsort(dim=0) ] sorted_mask2 = mask2[importance_scores.argsort(dim=0)] sorted_cumsum2 = fused_cumsum_sub_one(sorted_mask2) * sorted_mask2 importance_sorted_locations2 = sorted_cumsum2[ importance_scores.argsort(dim=0).argsort(dim=0) ] importance_sorted_locations2 += torch.sum(mask1, dim=0, keepdim=True) locations1, locations2 = ( importance_sorted_locations1, importance_sorted_locations2, ) else: locations1 = fused_cumsum_sub_one(mask1) locations2 = fused_cumsum_sub_one(mask2) # Update 2nd's location by accounting for locations of 1st locations2 += torch.sum(mask1, dim=0, keepdim=True) # Compute l_aux me = torch.mean(gates, dim=0) ce = torch.mean(mask1.to(gates.dtype), dim=0) l_aux = torch.mean(me * ce) l_aux = l_aux * num_experts * num_experts # for logging purposes metadata["overflow_expert1"] = ( 100 * torch.sum(mask1 * torch.ge(locations1, capacity)) / torch.sum(mask1) ) metadata["overflow_expert2"] = ( 100 * torch.sum(mask2 * torch.ge(locations2, capacity)) / torch.sum(mask2) ) # Remove locations outside capacity from mask mask1_, mask2_ = mask1, mask2 mask1 = mask1 * torch.lt(locations1, capacity) mask2 = mask2 * torch.lt(locations2, capacity) # for logging (percent of tokens routed to each expert) expert1_hist = ( 100 * torch.histc( (indices1_s.squeeze() + 1), bins=num_experts, min=1, max=num_experts ) / num_tokens ) metadata["unused_expert1_count"] = (expert1_hist == 0).sum() expert1_hist = ( torch.sort(expert1_hist, dim=0, descending=True).values + torch.finfo(torch.float32).tiny ) expert2_hist = ( 100 * torch.histc( (indices2_s.squeeze() + 1), bins=num_experts, min=1, max=num_experts ) / num_tokens ) metadata["unused_expert2_count"] = (expert2_hist == 0).sum() expert2_hist = ( torch.sort(expert2_hist, dim=0, descending=True).values + torch.finfo(torch.float32).tiny ) sample_count = max(math.ceil(num_experts * SAMPLE_FRACTION), 1) metadata["expert1_balance_top"] = expert1_hist[:sample_count].sum() metadata["expert1_balance_bottom"] = expert1_hist[-sample_count:].sum() metadata["expert2_balance_top"] = expert2_hist[:sample_count].sum() metadata["expert2_balance_bottom"] = expert2_hist[-sample_count:].sum() if not normalize_gate_prob_before_dropping: # Normalize gate probabilities gates1_s = (gates * mask1).sum(dim=1) gates2_s = (gates * mask2).sum(dim=1) denom_s = gates1_s + gates2_s # Avoid divide-by-zero denom_s = torch.clamp(denom_s, min=torch.finfo(denom_s.dtype).eps) gates1_s /= denom_s gates2_s /= denom_s if has_tutel: locations1_s = torch.sum(locations1 * mask1_, dim=1) locations2_s = torch.sum(locations2 * mask2_, dim=1) return ( l_aux, metadata, capacity, num_experts, [indices1_s, indices2_s], [locations1_s, locations2_s], [gates1_s, gates2_s], ) # Store the capacity location for each token locations1_s = torch.sum(locations1 * mask1, dim=1) locations2_s = torch.sum(locations2 * mask2, dim=1) # Calculate combine_weights and dispatch_mask gates1 = gates1_s.unsqueeze(-1) * mask1.to(gates1_s.dtype) # einsum("s,se->se") gates2 = gates2_s.unsqueeze(-1) * mask2.to(gates2_s.dtype) # einsum("s,se->se") locations1_sc = one_hot(locations1_s, num_classes=capacity, unsqueeze_indices=True) locations2_sc = one_hot(locations2_s, num_classes=capacity, unsqueeze_indices=True) combine1_sec = torch.bmm( # einsum("se,sc->sec") gates1.unsqueeze(-1), locations1_sc.to(gates1.dtype).unsqueeze(1), ) combine2_sec = torch.bmm( # einsum("se,sc->sec") gates2.unsqueeze(-1), locations2_sc.to(gates2.dtype).unsqueeze(1), ) combine_weights = combine1_sec + combine2_sec dispatch_mask = combine_weights.bool() if use_fp32: return l_aux, combine_weights.to(orig_dtype), dispatch_mask, metadata else: return l_aux, combine_weights, dispatch_mask, metadata class Top2Gate(torch.nn.Module): """Gate module which implements Top2Gating as described in Gshard_. :: gate = Top2Gate(model_dim, num_experts) l_aux, combine_weights, dispatch_mask = gate(input) .. Gshard_: https://arxiv.org/pdf/2006.16668.pdf Args: model_dim (int): size of model embedding dimension num_experts (ints): number of experts in model """ wg: torch.nn.Linear def __init__( self, model_dim: int, num_experts: int, use_fp32=False, second_expert_policy="sampling", normalize_gate_prob_before_dropping=False, moe_eval_capacity_token_fraction=0.25, batch_prioritized_routing=False, use_xmoe=False, ) -> None: super().__init__() if not use_xmoe: self.wg = torch.nn.Linear(model_dim, num_experts, bias=False) else: self.wg_reduction = torch.nn.Linear(model_dim, 16, bias=False) wg = torch.empty(num_experts, 16) torch.nn.init.orthogonal_(wg, gain=0.32) self.register_parameter("wg", torch.nn.Parameter(wg)) self.use_fp32 = use_fp32 self.second_expert_policy = second_expert_policy self.normalize_gate_prob_before_dropping = normalize_gate_prob_before_dropping self.moe_eval_capacity_token_fraction = moe_eval_capacity_token_fraction self.batch_prioritized_routing = batch_prioritized_routing self.use_xmoe = use_xmoe def forward(self, input, mask=None): # type: ignore if self.use_xmoe: input = self.wg_reduction(input) with torch.no_grad(): wg_norm = self.wg.norm(p=2.0, dim=1, keepdim=True) self.wg.mul_(1.5 / wg_norm) logits = self._cosine(input, self.wg) logits = self._make_finite(logits) else: logits = self.wg(input) return top2gating( logits, mask, use_fp32=self.use_fp32, second_expert_policy=self.second_expert_policy, normalize_gate_prob_before_dropping=self.normalize_gate_prob_before_dropping, eval_mode=not self.training, moe_eval_capacity_token_fraction=self.moe_eval_capacity_token_fraction, batch_prioritized_routing=self.batch_prioritized_routing, ) def _cosine(self, mat1, mat2, eps=1e-4): assert mat1.dim() == 2 assert mat2.dim() == 2 # mat1 = F.normalize(mat1, p=2.0, dim=1, eps=eps) mat2 = F.normalize(mat2.float(), p=2.0, dim=1, eps=eps) return mat1.float().matmul(mat2.transpose(0, 1)).type_as(mat1) def _make_finite(self, scores): ok = scores.isfinite() if not ok.all(): # NaNs here can break the assignment algorithm scores[~ok] = scores[ok].min() return scores
torchscale-flash-master
torchscale/component/xmoe/routing.py
# Copyright (c) 2022 Microsoft # Licensed under The MIT License [see LICENSE for details] import math import numpy as np import torch import torch.nn as nn from fairscale.nn import checkpoint_wrapper, wrap from torchscale.architecture.utils import init_bert_params from torchscale.component.droppath import DropPath from torchscale.component.feedforward_network import FeedForwardNetwork, make_experts from torchscale.component.multihead_attention import MultiheadAttention from torchscale.component.relative_position_bias import RelativePositionBias from torchscale.component.xmoe.moe_layer import MOELayer from torchscale.component.xmoe.routing import Top1Gate, Top2Gate try: from apex.normalization import FusedLayerNorm as LayerNorm except ModuleNotFoundError: from torch.nn import LayerNorm class DecoderLayer(nn.Module): def __init__( self, args, depth, is_moe_layer=False, is_encoder_decoder=False, ): super().__init__() self.args = args self.embed_dim = args.decoder_embed_dim self.dropout_module = torch.nn.Dropout(args.dropout) if args.drop_path_rate > 0: drop_path_prob = np.linspace(0, args.drop_path_rate, args.decoder_layers)[ depth ] self.drop_path = DropPath(drop_path_prob) else: self.drop_path = None self.self_attn = self.build_self_attention(self.embed_dim, args) self.normalize_before = args.decoder_normalize_before self.self_attn_layer_norm = LayerNorm(self.embed_dim, eps=args.layernorm_eps) if not is_encoder_decoder: self.encoder_attn = None self.encoder_attn_layer_norm = None else: self.encoder_attn = self.build_encoder_attention(self.embed_dim, args) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, eps=args.layernorm_eps) self.is_moe_layer = is_moe_layer self.ffn_dim = args.decoder_ffn_embed_dim if not self.is_moe_layer: self.ffn = self.build_ffn( self.embed_dim, self.args, ) else: if args.moe_top1_expert: gate = Top1Gate( self.embed_dim, args.moe_expert_count, use_fp32=args.moe_gating_use_fp32, moe_eval_capacity_token_fraction=args.moe_eval_capacity_token_fraction, use_xmoe=args.use_xmoe, ) else: gate = Top2Gate( self.embed_dim, args.moe_expert_count, args.moe_gating_use_fp32, args.moe_second_expert_policy, args.moe_normalize_gate_prob_before_dropping, args.moe_eval_capacity_token_fraction, use_xmoe=args.use_xmoe, ) experts = make_experts(args, self.embed_dim, self.ffn_dim) self.moe_layer = MOELayer(gate, experts, args) self.final_layer_norm = LayerNorm(self.embed_dim, eps=args.layernorm_eps) if args.deepnorm: if is_encoder_decoder: self.alpha = math.pow(3.0 * args.decoder_layers, 0.25) else: self.alpha = math.pow(2.0 * args.decoder_layers, 0.25) else: self.alpha = 1.0 def build_ffn(self, embed_dim, args): return FeedForwardNetwork( embed_dim, self.ffn_dim, args.activation_fn, args.dropout, args.activation_dropout, args.layernorm_eps, args.subln, ) def build_self_attention(self, embed_dim, args): return MultiheadAttention( args, embed_dim, args.decoder_attention_heads, dropout=args.attention_dropout, self_attention=True, encoder_decoder_attention=False, subln=args.subln, ) def build_encoder_attention(self, embed_dim, args): return MultiheadAttention( args, embed_dim, args.decoder_attention_heads, dropout=args.attention_dropout, self_attention=False, encoder_decoder_attention=True, subln=args.subln, ) def residual_connection(self, x, residual): return residual * self.alpha + x def forward( self, x, encoder_out=None, encoder_padding_mask=None, incremental_state=None, self_attn_mask=None, self_attn_padding_mask=None, self_attn_rel_pos=None, cross_attn_rel_pos=None, ): residual = x if self.normalize_before: x = self.self_attn_layer_norm(x) x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, attn_mask=self_attn_mask, rel_pos=self_attn_rel_pos, ) x = self.dropout_module(x) if self.drop_path is not None: x = self.drop_path(x) x = self.residual_connection(x, residual) if not self.normalize_before: x = self.self_attn_layer_norm(x) if self.encoder_attn is not None and encoder_out is not None: residual = x if self.normalize_before: x = self.encoder_attn_layer_norm(x) x, attn = self.encoder_attn( query=x, key=encoder_out, value=encoder_out, key_padding_mask=encoder_padding_mask, incremental_state=None, rel_pos=cross_attn_rel_pos, ) x = self.dropout_module(x) if self.drop_path is not None: x = self.drop_path(x) x = self.residual_connection(x, residual) if not self.normalize_before: x = self.encoder_attn_layer_norm(x) residual = x if self.normalize_before: x = self.final_layer_norm(x) if not self.is_moe_layer: x = self.ffn(x) l_aux = None else: x, l_aux = self.moe_layer(x) if self.drop_path is not None: x = self.drop_path(x) x = self.residual_connection(x, residual) if not self.normalize_before: x = self.final_layer_norm(x) return x, attn, None, l_aux class Decoder(nn.Module): def __init__( self, args, embed_tokens=None, embed_positions=None, output_projection=None, is_encoder_decoder=False, **kwargs ): super().__init__(**kwargs) self.args = args self.dropout_module = torch.nn.Dropout(args.dropout) embed_dim = args.decoder_embed_dim self.embed_dim = embed_dim self.embed_scale = 1.0 if args.no_scale_embedding else math.sqrt(embed_dim) self.embed_tokens = embed_tokens self.embed_positions = embed_positions if ( output_projection is None and not args.no_output_layer and args.vocab_size > 0 ): self.output_projection = self.build_output_projection(args) else: self.output_projection = output_projection if args.layernorm_embedding: self.layernorm_embedding = LayerNorm(embed_dim, eps=args.layernorm_eps) else: self.layernorm_embedding = None self.layers = nn.ModuleList([]) moe_freq = args.moe_freq for i in range(args.decoder_layers): is_moe_layer = moe_freq != 0 and (i + 1) % moe_freq == 0 self.layers.append( self.build_decoder_layer( args, depth=i, is_moe_layer=is_moe_layer, is_encoder_decoder=is_encoder_decoder, ) ) self.num_layers = len(self.layers) if args.decoder_normalize_before: self.layer_norm = LayerNorm(embed_dim, eps=args.layernorm_eps) else: self.layer_norm = None self.self_attn_relative_position = None self.cross_attn_relative_position = None if args.rel_pos_buckets > 0 and args.max_rel_pos > 0: self.self_attn_relative_position = RelativePositionBias( num_buckets=args.rel_pos_buckets, max_distance=args.max_rel_pos, n_heads=args.decoder_attention_heads, ) if is_encoder_decoder: self.cross_attn_relative_position = RelativePositionBias( num_buckets=args.rel_pos_buckets, max_distance=args.max_rel_pos, n_heads=args.decoder_attention_heads, ) if args.bert_init: self.apply(init_bert_params) if args.deepnorm: if is_encoder_decoder: init_scale = math.pow(12.0 * args.decoder_layers, 0.25) else: init_scale = math.pow(8.0 * args.decoder_layers, 0.25) for name, p in self.named_parameters(): if ( "fc1" in name or "fc2" in name or "out_proj" in name or "v_proj" in name ): p.data.div_(init_scale) if args.subln: if is_encoder_decoder: init_scale = math.sqrt(math.log(args.decoder_layers * 3)) else: init_scale = math.sqrt(math.log(args.decoder_layers * 2)) for name, p in self.named_parameters(): if "encoder_attn" in name: continue if ( "fc1" in name or "fc2" in name or "out_proj" in name or "v_proj" in name ): p.data.mul_(init_scale) def build_output_projection( self, args, ): if args.share_decoder_input_output_embed: output_projection = torch.nn.Linear( self.embed_tokens.weight.shape[1], self.embed_tokens.weight.shape[0], bias=False, ) output_projection.weight = self.embed_tokens.weight else: output_projection = torch.nn.Linear( args.decoder_embed_dim, args.vocab_size, bias=False ) torch.nn.init.normal_( output_projection.weight, mean=0, std=args.decoder_embed_dim**-0.5 ) return output_projection def build_decoder_layer( self, args, depth, is_moe_layer=False, is_encoder_decoder=False ): layer = DecoderLayer( args, depth, is_moe_layer=is_moe_layer, is_encoder_decoder=is_encoder_decoder, ) if args.checkpoint_activations: layer = checkpoint_wrapper(layer) if args.fsdp: layer = wrap(layer) return layer def forward_embedding( self, tokens, token_embedding=None, incremental_state=None, ): positions = None if self.embed_positions is not None: positions = self.embed_positions( tokens, incremental_state=incremental_state ) if incremental_state is not None: tokens = tokens[:, -1:] if positions is not None: positions = positions[:, -1:] if token_embedding is None: token_embedding = self.embed_tokens(tokens) x = embed = self.embed_scale * token_embedding if positions is not None: x += positions if self.layernorm_embedding is not None: x = self.layernorm_embedding(x) x = self.dropout_module(x) return x, embed def forward( self, prev_output_tokens, self_attn_padding_mask=None, encoder_out=None, incremental_state=None, features_only=False, return_all_hiddens=False, token_embeddings=None, **kwargs ): # embed tokens and positions x, _ = self.forward_embedding( prev_output_tokens, token_embeddings, incremental_state ) # relative position self_attn_rel_pos_bias = None slen = prev_output_tokens.size(1) if self.self_attn_relative_position is not None: self_attn_rel_pos_bias = self.self_attn_relative_position( batch_size=x.size(0), qlen=slen, klen=slen ) if incremental_state is not None: self_attn_rel_pos_bias = self_attn_rel_pos_bias[-1:, :, :] cross_attn_rel_pos_bias = None if self.cross_attn_relative_position is not None: cross_attn_rel_pos_bias = self.cross_attn_relative_position( batch_size=x.size(0), qlen=slen, klen=encoder_out["encoder_out"].size(1), ) if incremental_state is not None: cross_attn_rel_pos_bias = cross_attn_rel_pos_bias[-1:, :, :] # decoder layers inner_states = [x] if encoder_out is None: l_aux = [] else: l_aux = encoder_out["l_aux"] if "l_aux" in encoder_out else [] for idx, layer in enumerate(self.layers): if incremental_state is None: self_attn_mask = torch.triu( torch.zeros([x.size(1), x.size(1)]) .float() .fill_(float("-inf")) .type_as(x), 1, ) else: self_attn_mask = None if idx not in incremental_state: incremental_state[idx] = {} x, layer_attn, _, l_aux_i = layer( x, encoder_out["encoder_out"] if encoder_out is not None else None, encoder_out["encoder_padding_mask"] if encoder_out is not None else None, incremental_state[idx] if incremental_state is not None else None, self_attn_mask=self_attn_mask, self_attn_padding_mask=self_attn_padding_mask, self_attn_rel_pos=self_attn_rel_pos_bias, cross_attn_rel_pos=cross_attn_rel_pos_bias, ) l_aux.append(l_aux_i) inner_states.append(x) if self.layer_norm is not None: x = self.layer_norm(x) if not features_only: x = self.output_layer(x) return x, { "inner_states": inner_states, "l_aux": l_aux, "attn": None, } def output_layer(self, features): return self.output_projection(features)
torchscale-flash-master
torchscale/architecture/decoder.py
# Copyright (c) 2022 Microsoft # Licensed under The MIT License [see LICENSE for details] class EncoderConfig(object): def __init__(self, **kwargs): self.encoder_embed_dim = kwargs.pop("encoder_embed_dim", 768) self.encoder_attention_heads = kwargs.pop("encoder_attention_heads", 12) self.encoder_ffn_embed_dim = kwargs.pop("encoder_ffn_embed_dim", 3072) self.encoder_layers = kwargs.pop("encoder_layers", 12) self.encoder_normalize_before = kwargs.pop("encoder_normalize_before", True) self.normalize_output = kwargs.pop("normalize_output", True) self.activation_fn = kwargs.pop("activation_fn", "gelu") self.dropout = kwargs.pop("dropout", 0.0) self.drop_path_rate = kwargs.pop("drop_path_rate", 0.0) self.attention_dropout = kwargs.pop("attention_dropout", 0.0) self.activation_dropout = kwargs.pop("activation_dropout", 0.0) self.no_scale_embedding = kwargs.pop("no_scale_embedding", True) self.layernorm_embedding = kwargs.pop("layernorm_embedding", False) self.moe_freq = kwargs.pop("moe_freq", 0) self.moe_top1_expert = kwargs.pop("moe_top1_expert", False) self.moe_expert_count = kwargs.pop("moe_expert_count", 0) self.moe_gating_use_fp32 = kwargs.pop("moe_gating_use_fp32", True) self.moe_eval_capacity_token_fraction = kwargs.pop( "moe_eval_capacity_token_fraction", 0.25 ) self.moe_second_expert_policy = kwargs.pop("moe_second_expert_policy", "random") self.moe_normalize_gate_prob_before_dropping = kwargs.pop( "moe_normalize_gate_prob_before_dropping", False ) self.use_xmoe = kwargs.pop("use_xmoe", False) self.rel_pos_buckets = kwargs.pop("rel_pos_buckets", 0) self.max_rel_pos = kwargs.pop("max_rel_pos", 0) self.deepnorm = kwargs.pop("deepnorm", False) self.subln = kwargs.pop("subln", True) self.bert_init = kwargs.pop("bert_init", False) self.multiway = kwargs.pop("multiway", False) self.share_encoder_input_output_embed = kwargs.pop( "share_encoder_input_output_embed", False ) self.max_source_positions = kwargs.pop("max_source_positions", 1024) self.no_output_layer = kwargs.pop("no_output_layer", False) self.layernorm_eps = kwargs.pop("layernorm_eps", 1e-5) # Text self.vocab_size = kwargs.pop("vocab_size", -1) # Vision self.img_size = kwargs.pop("img_size", 224) self.patch_size = kwargs.pop("patch_size", 16) self.in_chans = kwargs.pop("in_chans", 3) # Fairscale self.checkpoint_activations = kwargs.pop("checkpoint_activations", False) self.fsdp = kwargs.pop("fsdp", False) self.ddp_rank = kwargs.pop("ddp_rank", 0) self.xpos_rel_pos = kwargs.pop("xpos_rel_pos", False) self.xpos_scale_base = kwargs.pop("xpos_scale_base", 512) if self.deepnorm: self.encoder_normalize_before = False self.subln = False if self.subln: self.encoder_normalize_before = True self.deepnorm = False if self.use_xmoe: self.moe_normalize_gate_prob_before_dropping = True self.moe_second_expert_policy = "random" assert self.moe_freq > 0 and self.moe_expert_count > 0 def override(self, args): for hp in self.__dict__.keys(): if getattr(args, hp, None) is not None: self.__dict__[hp] = getattr(args, hp, None) class DecoderConfig(object): def __init__(self, **kwargs): self.decoder_embed_dim = kwargs.pop("decoder_embed_dim", 768) self.decoder_attention_heads = kwargs.pop("decoder_attention_heads", 12) self.decoder_ffn_embed_dim = kwargs.pop("decoder_ffn_embed_dim", 3072) self.decoder_layers = kwargs.pop("decoder_layers", 12) self.decoder_normalize_before = kwargs.pop("decoder_normalize_before", True) self.activation_fn = kwargs.pop("activation_fn", "gelu") self.dropout = kwargs.pop("dropout", 0.0) self.drop_path_rate = kwargs.pop("drop_path_rate", 0.0) self.attention_dropout = kwargs.pop("attention_dropout", 0.0) self.activation_dropout = kwargs.pop("activation_dropout", 0.0) self.no_scale_embedding = kwargs.pop("no_scale_embedding", True) self.layernorm_embedding = kwargs.pop("layernorm_embedding", False) self.moe_freq = kwargs.pop("moe_freq", 0) self.moe_top1_expert = kwargs.pop("moe_top1_expert", False) self.moe_expert_count = kwargs.pop("moe_expert_count", 0) self.moe_gating_use_fp32 = kwargs.pop("moe_gating_use_fp32", True) self.moe_eval_capacity_token_fraction = kwargs.pop( "moe_eval_capacity_token_fraction", 0.25 ) self.moe_second_expert_policy = kwargs.pop("moe_second_expert_policy", "random") self.moe_normalize_gate_prob_before_dropping = kwargs.pop( "moe_normalize_gate_prob_before_dropping", False ) self.use_xmoe = kwargs.pop("use_xmoe", False) self.rel_pos_buckets = kwargs.pop("rel_pos_buckets", 0) self.max_rel_pos = kwargs.pop("max_rel_pos", 0) self.deepnorm = kwargs.pop("deepnorm", False) self.subln = kwargs.pop("subln", True) self.bert_init = kwargs.pop("bert_init", False) self.multiway = kwargs.pop("multiway", False) self.share_decoder_input_output_embed = kwargs.pop( "share_decoder_input_output_embed", False ) self.max_target_positions = kwargs.pop("max_target_positions", 1024) self.no_output_layer = kwargs.pop("no_output_layer", False) self.layernorm_eps = kwargs.pop("layernorm_eps", 1e-5) # Text self.vocab_size = kwargs.pop("vocab_size", -1) # Fairscale self.checkpoint_activations = kwargs.pop("checkpoint_activations", False) self.fsdp = kwargs.pop("fsdp", False) self.ddp_rank = kwargs.pop("ddp_rank", 0) self.xpos_rel_pos = kwargs.pop("xpos_rel_pos", False) self.xpos_scale_base = kwargs.pop("xpos_scale_base", 512) if self.deepnorm: self.decoder_normalize_before = False self.subln = False if self.subln: self.decoder_normalize_before = True self.deepnorm = False if self.use_xmoe: self.moe_normalize_gate_prob_before_dropping = True self.moe_second_expert_policy = "random" assert self.moe_freq > 0 and self.moe_expert_count > 0 def override(self, args): for hp in self.__dict__.keys(): if getattr(args, hp, None) is not None: self.__dict__[hp] = getattr(args, hp, None) class EncoderDecoderConfig(object): def __init__(self, **kwargs): self.encoder_embed_dim = kwargs.pop("encoder_embed_dim", 768) self.encoder_attention_heads = kwargs.pop("encoder_attention_heads", 12) self.encoder_ffn_embed_dim = kwargs.pop("encoder_ffn_embed_dim", 3072) self.encoder_layers = kwargs.pop("encoder_layers", 12) self.encoder_normalize_before = kwargs.pop("encoder_normalize_before", True) self.decoder_embed_dim = kwargs.pop("decoder_embed_dim", 768) self.decoder_attention_heads = kwargs.pop("decoder_attention_heads", 12) self.decoder_ffn_embed_dim = kwargs.pop("decoder_ffn_embed_dim", 3072) self.decoder_layers = kwargs.pop("decoder_layers", 12) self.decoder_normalize_before = kwargs.pop("decoder_normalize_before", True) self.activation_fn = kwargs.pop("activation_fn", "gelu") self.dropout = kwargs.pop("dropout", 0.0) self.drop_path_rate = kwargs.pop("drop_path_rate", 0.0) self.attention_dropout = kwargs.pop("attention_dropout", 0.0) self.activation_dropout = kwargs.pop("activation_dropout", 0.0) self.no_scale_embedding = kwargs.pop("no_scale_embedding", True) self.layernorm_embedding = kwargs.pop("layernorm_embedding", False) self.moe_freq = kwargs.pop("moe_freq", 0) self.moe_top1_expert = kwargs.pop("moe_top1_expert", False) self.moe_expert_count = kwargs.pop("moe_expert_count", 0) self.moe_gating_use_fp32 = kwargs.pop("moe_gating_use_fp32", True) self.moe_eval_capacity_token_fraction = kwargs.pop( "moe_eval_capacity_token_fraction", 0.25 ) self.moe_second_expert_policy = kwargs.pop("moe_second_expert_policy", "random") self.moe_normalize_gate_prob_before_dropping = kwargs.pop( "moe_normalize_gate_prob_before_dropping", False ) self.use_xmoe = kwargs.pop("use_xmoe", False) self.rel_pos_buckets = kwargs.pop("rel_pos_buckets", 0) self.max_rel_pos = kwargs.pop("max_rel_pos", 0) self.deepnorm = kwargs.pop("deepnorm", False) self.subln = kwargs.pop("subln", True) self.bert_init = kwargs.pop("bert_init", False) self.multiway = kwargs.pop("multiway", False) self.share_all_embeddings = kwargs.pop("share_all_embeddings", False) self.share_decoder_input_output_embed = kwargs.pop( "share_decoder_input_output_embed", False ) self.max_source_positions = kwargs.pop("max_source_positions", 1024) self.max_target_positions = kwargs.pop("max_target_positions", 1024) self.no_output_layer = kwargs.pop("no_output_layer", False) self.layernorm_eps = kwargs.pop("layernorm_eps", 1e-5) # Text self.vocab_size = kwargs.pop("vocab_size", -1) # Fairscale self.checkpoint_activations = kwargs.pop("checkpoint_activations", False) self.fsdp = kwargs.pop("fsdp", False) self.ddp_rank = kwargs.pop("ddp_rank", 0) self.xpos_rel_pos = kwargs.pop("xpos_rel_pos", False) self.xpos_scale_base = kwargs.pop("xpos_scale_base", 512) if self.deepnorm: self.encoder_normalize_before = False self.decoder_normalize_before = False self.subln = False if self.subln: self.encoder_normalize_before = True self.decoder_normalize_before = True self.deepnorm = False if self.use_xmoe: self.moe_normalize_gate_prob_before_dropping = True self.moe_second_expert_policy = "random" assert self.moe_freq > 0 and self.moe_expert_count > 0 def override(self, args): for hp in self.__dict__.keys(): if getattr(args, hp, None) is not None: self.__dict__[hp] = getattr(args, hp, None)
torchscale-flash-master
torchscale/architecture/config.py
# Copyright (c) 2022 Microsoft # Licensed under The MIT License [see LICENSE for details] import torch.nn as nn from torchscale.architecture.decoder import Decoder from torchscale.architecture.encoder import Encoder class EncoderDecoder(nn.Module): def __init__( self, args, encoder_embed_tokens=None, encoder_embed_positions=None, decoder_embed_tokens=None, decoder_embed_positions=None, output_projection=None, **kwargs ): super().__init__() self.args = args if args.share_all_embeddings: args.share_decoder_input_output_embed = True self.encoder = Encoder( args, encoder_embed_tokens, encoder_embed_positions, is_encoder_decoder=True, **kwargs ) if args.share_all_embeddings and decoder_embed_tokens is None: decoder_embed_tokens = self.encoder.embed_tokens self.decoder = Decoder( args, decoder_embed_tokens, decoder_embed_positions, output_projection, is_encoder_decoder=True, **kwargs ) def forward( self, src_tokens, prev_output_tokens, return_all_hiddens=False, features_only=False, **kwargs ): encoder_out = self.encoder(src_tokens, return_all_hiddens=return_all_hiddens) decoder_out = self.decoder( prev_output_tokens, encoder_out=encoder_out, features_only=features_only, return_all_hiddens=return_all_hiddens, ) return decoder_out
torchscale-flash-master
torchscale/architecture/encoder_decoder.py
# Copyright (c) 2022 Microsoft # Licensed under The MIT License [see LICENSE for details]
torchscale-flash-master
torchscale/architecture/__init__.py
# Copyright (c) 2022 Microsoft # Licensed under The MIT License [see LICENSE for details] import math import numpy as np import torch import torch.nn as nn from fairscale.nn import checkpoint_wrapper, wrap try: from apex.normalization import FusedLayerNorm as LayerNorm except ModuleNotFoundError: from torch.nn import LayerNorm from torchscale.architecture.utils import init_bert_params from torchscale.component.droppath import DropPath from torchscale.component.feedforward_network import FeedForwardNetwork, make_experts from torchscale.component.multihead_attention import MultiheadAttention from torchscale.component.multiway_network import MultiwayWrapper, set_split_position from torchscale.component.relative_position_bias import RelativePositionBias from torchscale.component.xmoe.moe_layer import MOELayer from torchscale.component.xmoe.routing import Top1Gate, Top2Gate class EncoderLayer(nn.Module): def __init__(self, args, depth, is_moe_layer=False, is_encoder_decoder=False): super().__init__() self.args = args self.embed_dim = args.encoder_embed_dim self.self_attn = self.build_self_attention(self.embed_dim, args) self.self_attn_layer_norm = MultiwayWrapper(args, LayerNorm(self.embed_dim, eps=args.layernorm_eps)) self.dropout_module = torch.nn.Dropout(args.dropout) if args.drop_path_rate > 0: drop_path_prob = np.linspace(0, args.drop_path_rate, args.encoder_layers)[ depth ] self.drop_path = DropPath(drop_path_prob) else: self.drop_path = None self.normalize_before = args.encoder_normalize_before self.is_moe_layer = is_moe_layer self.ffn_dim = args.encoder_ffn_embed_dim if not self.is_moe_layer: self.ffn = MultiwayWrapper( args, self.build_ffn( self.embed_dim, self.args, ), ) else: assert not self.args.multiway if args.moe_top1_expert: gate = Top1Gate( self.embed_dim, args.moe_expert_count, use_fp32=args.moe_gating_use_fp32, moe_eval_capacity_token_fraction=args.moe_eval_capacity_token_fraction, use_xmoe=args.use_xmoe, ) else: gate = Top2Gate( self.embed_dim, args.moe_expert_count, args.moe_gating_use_fp32, args.moe_second_expert_policy, args.moe_normalize_gate_prob_before_dropping, args.moe_eval_capacity_token_fraction, use_xmoe=args.use_xmoe, ) experts = make_experts(args, self.embed_dim, self.ffn_dim) self.moe_layer = MOELayer(gate, experts, args) self.final_layer_norm = MultiwayWrapper(args, LayerNorm(self.embed_dim, eps=args.layernorm_eps)) if args.deepnorm: if is_encoder_decoder: self.alpha = ( math.pow( math.pow(args.encoder_layers, 4) * args.decoder_layers, 0.0625 ) * 0.81 ) else: self.alpha = math.pow(2.0 * args.encoder_layers, 0.25) else: self.alpha = 1.0 def build_ffn(self, embed_dim, args): return FeedForwardNetwork( embed_dim, self.ffn_dim, args.activation_fn, args.dropout, args.activation_dropout, args.layernorm_eps, args.subln, ) def build_self_attention(self, embed_dim, args): return MultiheadAttention( args, embed_dim, args.encoder_attention_heads, dropout=args.attention_dropout, self_attention=True, encoder_decoder_attention=False, subln=args.subln, ) def residual_connection(self, x, residual): return residual * self.alpha + x def forward(self, x, encoder_padding_mask, attn_mask=None, rel_pos=None, multiway_split_position=None, incremental_state=None): if multiway_split_position is not None: assert self.args.multiway self.apply(set_split_position(multiway_split_position)) if attn_mask is not None: attn_mask = attn_mask.masked_fill(attn_mask.to(torch.bool), -1e8) residual = x if self.normalize_before: x = self.self_attn_layer_norm(x) x, _ = self.self_attn( query=x, key=x, value=x, key_padding_mask=encoder_padding_mask, attn_mask=attn_mask, rel_pos=rel_pos, incremental_state=incremental_state, ) x = self.dropout_module(x) if self.drop_path is not None: x = self.drop_path(x) x = self.residual_connection(x, residual) if not self.normalize_before: x = self.self_attn_layer_norm(x) residual = x if self.normalize_before: x = self.final_layer_norm(x) if not self.is_moe_layer: x = self.ffn(x) l_aux = None else: x = x.transpose(0, 1) x, l_aux = self.moe_layer(x) x = x.transpose(0, 1) if self.drop_path is not None: x = self.drop_path(x) x = self.residual_connection(x, residual) if not self.normalize_before: x = self.final_layer_norm(x) return x, l_aux class Encoder(nn.Module): def __init__( self, args, embed_tokens=None, embed_positions=None, output_projection=None, is_encoder_decoder=False, **kwargs ): self.args = args super().__init__(**kwargs) self.dropout_module = torch.nn.Dropout(args.dropout) embed_dim = args.encoder_embed_dim self.embed_scale = 1.0 if args.no_scale_embedding else math.sqrt(embed_dim) self.embed_tokens = embed_tokens self.embed_positions = embed_positions if ( output_projection is None and not is_encoder_decoder and not args.no_output_layer and args.vocab_size > 0 ): self.output_projection = self.build_output_projection(args) else: self.output_projection = output_projection if args.layernorm_embedding: self.layernorm_embedding = MultiwayWrapper( args, LayerNorm(embed_dim, eps=args.layernorm_eps), dim=1 ) else: self.layernorm_embedding = None self.layers = nn.ModuleList([]) moe_freq = args.moe_freq for i in range(args.encoder_layers): is_moe_layer = moe_freq != 0 and (i + 1) % moe_freq == 0 self.layers.append( self.build_encoder_layer( args, depth=i, is_moe_layer=is_moe_layer, is_encoder_decoder=is_encoder_decoder, ) ) self.num_layers = len(self.layers) if args.encoder_normalize_before and args.normalize_output: self.layer_norm = MultiwayWrapper(args, LayerNorm(embed_dim, eps=args.layernorm_eps)) else: self.layer_norm = None if args.rel_pos_buckets > 0 and args.max_rel_pos > 0: self.relative_position = RelativePositionBias( num_buckets=args.rel_pos_buckets, max_distance=args.max_rel_pos, n_heads=args.encoder_attention_heads, ) else: self.relative_position = None if args.bert_init: self.apply(init_bert_params) if args.deepnorm: if is_encoder_decoder: init_scale = ( math.pow( math.pow(args.encoder_layers, 4) * args.decoder_layers, 0.0625 ) / 1.15 ) else: init_scale = math.pow(8.0 * args.encoder_layers, 0.25) for name, p in self.named_parameters(): if ( "fc1" in name or "fc2" in name or "out_proj" in name or "v_proj" in name ): p.data.div_(init_scale) if args.subln: if is_encoder_decoder: init_scale = math.sqrt( math.log(3 * args.decoder_layers) * math.log(2 * args.encoder_layers) / 3 ) else: init_scale = math.sqrt(math.log(args.encoder_layers * 2)) for name, p in self.named_parameters(): if ( "fc1" in name or "fc2" in name or "out_proj" in name or "v_proj" in name ): p.data.mul_(init_scale) def build_output_projection( self, args, ): if args.share_encoder_input_output_embed: assert args.encoder_embedding_type == "language" output_projection = torch.nn.Linear( self.embed_tokens.weight.shape[1], self.embed_tokens.weight.shape[0], bias=False, ) output_projection.weight = self.embed_tokens.weight else: output_projection = torch.nn.Linear( args.encoder_embed_dim, args.vocab_size, bias=False ) torch.nn.init.normal_( output_projection.weight, mean=0, std=args.encoder_embed_dim**-0.5 ) return output_projection def build_encoder_layer( self, args, depth, is_moe_layer=False, is_encoder_decoder=False ): layer = EncoderLayer( args, depth, is_moe_layer=is_moe_layer, is_encoder_decoder=is_encoder_decoder, ) if args.checkpoint_activations: layer = checkpoint_wrapper(layer) if args.fsdp: layer = wrap(layer) return layer def forward_embedding( self, src_tokens, token_embedding=None, positions=None, ): if token_embedding is None: token_embedding = self.embed_tokens(src_tokens) x = embed = self.embed_scale * token_embedding if self.embed_positions is not None: if src_tokens is not None: x = embed + self.embed_positions(src_tokens, positions=positions) else: x = embed + self.embed_positions(x, positions=positions) if self.layernorm_embedding is not None: x = self.layernorm_embedding(x) x = self.dropout_module(x) return x, embed def forward( self, src_tokens, encoder_padding_mask=None, attn_mask=None, return_all_hiddens=False, token_embeddings=None, multiway_split_position=None, features_only=False, incremental_state=None, positions=None, **kwargs ): assert src_tokens is not None or token_embeddings is not None if encoder_padding_mask is None: if src_tokens is not None: encoder_padding_mask = torch.zeros_like( src_tokens, device=src_tokens.device ).bool() else: encoder_padding_mask = torch.zeros( [token_embeddings.size(0), token_embeddings.size(1)], device=token_embeddings.device, ).bool() if multiway_split_position is not None: assert self.args.multiway self.apply(set_split_position(multiway_split_position)) x, encoder_embedding = self.forward_embedding(src_tokens, token_embeddings, positions) x = x * (1 - encoder_padding_mask.unsqueeze(-1).type_as(x)) encoder_states = [] if return_all_hiddens: encoder_states.append(x) rel_pos_bias = None if self.relative_position is not None: rel_pos_bias = self.relative_position( batch_size=x.size(0), qlen=x.size(1), klen=x.size(1) ) # incremental_state is not None during inference if we use the bidirectional encoder as a generator as in s2s-ft (https://arxiv.org/abs/2110.13640) l_aux = [] for idx, layer in enumerate(self.layers): x, l_aux_i = layer( x, encoder_padding_mask=encoder_padding_mask if incremental_state is None else None, attn_mask=attn_mask, rel_pos=rel_pos_bias, multiway_split_position=multiway_split_position, incremental_state=incremental_state[idx] if incremental_state is not None else None, ) if return_all_hiddens: assert encoder_states is not None encoder_states.append(x) l_aux.append(l_aux_i) if self.layer_norm is not None: x = self.layer_norm(x) if not features_only and self.output_projection is not None: x = self.output_projection(x) return { "encoder_out": x, "encoder_embedding": encoder_embedding, "encoder_padding_mask": encoder_padding_mask, "encoder_states": encoder_states, "l_aux": l_aux, }
torchscale-flash-master
torchscale/architecture/encoder.py
# Copyright (c) 2022 Microsoft # Licensed under The MIT License [see LICENSE for details] import torch.nn as nn from torchscale.component.multihead_attention import MultiheadAttention from torchscale.component.multiway_network import MultiwayNetwork def init_bert_params(module): def normal_(data): data.copy_(data.cpu().normal_(mean=0.0, std=0.02).to(data.device)) if isinstance(module, nn.Linear): normal_(module.weight.data) if module.bias is not None: module.bias.data.zero_() if isinstance(module, nn.Embedding): normal_(module.weight.data) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() if isinstance(module, MultiheadAttention): if isinstance(module.q_proj, MultiwayNetwork): normal_(module.q_proj.A.weight.data) normal_(module.q_proj.B.weight.data) normal_(module.k_proj.A.weight.data) normal_(module.k_proj.B.weight.data) normal_(module.v_proj.A.weight.data) normal_(module.v_proj.B.weight.data) else: normal_(module.q_proj.weight.data) normal_(module.k_proj.weight.data) normal_(module.v_proj.weight.data)
torchscale-flash-master
torchscale/architecture/utils.py
# Copyright (c) 2022 Microsoft # Licensed under The MIT License [see LICENSE for details] import torch import torch.nn as nn from torchscale.architecture.encoder import Encoder from torchscale.component.embedding import ( PositionalEmbedding, TextEmbedding, VisionEmbedding, ) from torchscale.component.multiway_network import MutliwayEmbedding class BEiT3(nn.Module): def __init__(self, args, **kwargs): super().__init__() self.args = args assert args.multiway assert args.vocab_size > 0 assert not args.share_encoder_input_output_embed self.text_embed = TextEmbedding(args.vocab_size, args.encoder_embed_dim) self.vision_embed = VisionEmbedding( args.img_size, args.patch_size, args.in_chans, args.encoder_embed_dim, contain_mask_token=True, prepend_cls_token=True, ) # being consistent with Fairseq, which starts from 2 for position embedding embed_positions = MutliwayEmbedding( modules=[ PositionalEmbedding(self.vision_embed.num_position_embeddings() + 2, args.encoder_embed_dim), PositionalEmbedding(args.max_source_positions, args.encoder_embed_dim), ], dim=1, ) self.encoder = Encoder( args, embed_tokens=None, embed_positions=embed_positions, output_projection=None, is_encoder_decoder=False, ) def forward( self, textual_tokens=None, visual_tokens=None, text_padding_position=None, attn_mask=None, vision_masked_position=None, incremental_state=None, positions=None, ): assert textual_tokens is not None or visual_tokens is not None if textual_tokens is None: x = self.vision_embed(visual_tokens, vision_masked_position) encoder_padding_mask = None multiway_split_position = -1 elif visual_tokens is None: x = self.text_embed(textual_tokens) encoder_padding_mask = text_padding_position multiway_split_position = 0 else: x1 = self.vision_embed(visual_tokens, vision_masked_position) multiway_split_position = x1.size(1) x2 = self.text_embed(textual_tokens) x = torch.cat([x1, x2], dim=1) if text_padding_position is not None: encoder_padding_mask = torch.cat( [ torch.zeros(x1.shape[:-1]).to(x1.device).bool(), text_padding_position, ], dim=1, ) else: encoder_padding_mask = None encoder_out = self.encoder( src_tokens=None, encoder_padding_mask=encoder_padding_mask, attn_mask=attn_mask, token_embeddings=x, multiway_split_position=multiway_split_position, incremental_state=incremental_state, positions=positions, ) encoder_out["multiway_split_position"] = multiway_split_position return encoder_out
torchscale-flash-master
torchscale/model/BEiT3.py
# Copyright (c) 2022 Microsoft # Licensed under The MIT License [see LICENSE for details]
torchscale-flash-master
torchscale/model/__init__.py
# Copyright (c) 2022 Microsoft # Licensed under The MIT License [see LICENSE for details] import pytest import torch from torchscale.architecture.config import DecoderConfig from torchscale.architecture.decoder import Decoder testcases = [ {}, {"vocab_size": 64000}, {"activation_fn": "relu"}, {"drop_path_rate": 0.1}, {"decoder_normalize_before": False}, {"no_scale_embedding": False}, {"layernorm_embedding": True}, {"rel_pos_buckets": 32, "max_rel_pos": 256}, {"deepnorm": True, "subln": False, "decoder_normalize_before": False}, {"bert_init": True}, {"multiway": True}, {"share_decoder_input_output_embed": True}, {"checkpoint_activations": True}, {"fsdp": True}, ] @pytest.mark.parametrize("args", testcases) def test_decoder(args): config = DecoderConfig(**args) model = Decoder(config) prev_output_tokens = torch.ones(2, 10) token_embeddings = torch.rand(2, 10, config.decoder_embed_dim) model( prev_output_tokens=prev_output_tokens, token_embeddings=token_embeddings, features_only=True, )
torchscale-flash-master
tests/test_decoder.py
# Copyright (c) 2022 Microsoft # Licensed under The MIT License [see LICENSE for details] import pytest import torch from torchscale.architecture.config import EncoderConfig from torchscale.architecture.encoder import Encoder testcases = [ {}, {"vocab_size": 64000}, {"activation_fn": "relu"}, {"drop_path_rate": 0.1}, {"encoder_normalize_before": False}, {"no_scale_embedding": False}, {"layernorm_embedding": True}, {"rel_pos_buckets": 32, "max_rel_pos": 256}, {"deepnorm": True, "subln": False, "encoder_normalize_before": False}, {"bert_init": True}, {"multiway": True}, {"share_encoder_input_output_embed": True}, {"checkpoint_activations": True}, {"fsdp": True}, ] @pytest.mark.parametrize("args", testcases) def test_encoder(args): config = EncoderConfig(**args) model = Encoder(config) token_embeddings = torch.rand(2, 10, config.encoder_embed_dim) model(src_tokens=None, token_embeddings=token_embeddings)
torchscale-flash-master
tests/test_encoder.py
# Copyright (c) 2022 Microsoft # Licensed under The MIT License [see LICENSE for details]
torchscale-flash-master
tests/__init__.py
# Copyright (c) 2022 Microsoft # Licensed under The MIT License [see LICENSE for details] import pytest import torch from torchscale.architecture.config import EncoderDecoderConfig from torchscale.architecture.encoder_decoder import EncoderDecoder from torchscale.component.embedding import PositionalEmbedding, TextEmbedding testcases = [ {}, {"vocab_size": 64000}, {"activation_fn": "relu"}, {"drop_path_rate": 0.1}, {"encoder_normalize_before": False, "decoder_normalize_before": False}, {"no_scale_embedding": False}, {"layernorm_embedding": True}, {"rel_pos_buckets": 32, "max_rel_pos": 256}, { "deepnorm": True, "subln": False, "encoder_normalize_before": False, "decoder_normalize_before": False, }, {"bert_init": True}, {"multiway": True}, {"share_decoder_input_output_embed": True}, {"share_all_embeddings": True}, {"checkpoint_activations": True}, {"fsdp": True}, ] @pytest.mark.parametrize("args", testcases) def test_decoder(args): config = EncoderDecoderConfig(**args) model = EncoderDecoder( config, encoder_embed_tokens=TextEmbedding(64000, config.encoder_embed_dim), decoder_embed_tokens=TextEmbedding(64000, config.decoder_embed_dim), encoder_embed_positions=PositionalEmbedding( config.max_source_positions, config.encoder_embed_dim ), decoder_embed_positions=PositionalEmbedding( config.max_target_positions, config.decoder_embed_dim ), ) src_tokens = torch.ones(2, 20).long() prev_output_tokens = torch.ones(2, 10).long() model( src_tokens=src_tokens, prev_output_tokens=prev_output_tokens, features_only=True, )
torchscale-flash-master
tests/test_encoder_decoder.py
# Copyright (c) 2022 Microsoft # Licensed under The MIT License [see LICENSE for details]
torchscale-flash-master
examples/__init__.py
# Copyright (c) 2022 Microsoft # Licensed under The MIT License [see LICENSE for details] # flake8: noqa import models import tasks import criterions from fairseq_cli.generate import cli_main if __name__ == "__main__": cli_main()
torchscale-flash-master
examples/fairseq/generate.py
# Copyright (c) 2022 Microsoft # Licensed under The MIT License [see LICENSE for details]
torchscale-flash-master
examples/fairseq/__init__.py
# Copyright (c) 2022 Microsoft # Licensed under The MIT License [see LICENSE for details] # flake8: noqa import models import tasks import criterions from fairseq_cli.interactive import cli_main if __name__ == "__main__": cli_main()
torchscale-flash-master
examples/fairseq/interactive.py