python_code
stringlengths
0
992k
repo_name
stringlengths
8
46
file_path
stringlengths
5
162
"""Run validation loop for GILL.""" import collections from PIL import Image import time import tqdm import torch import torch.distributed as dist from torch.utils.tensorboard import SummaryWriter from torch.utils.data import Subset from torchmetrics import BLEUScore import torchvision from gill import losses as losses_utils from gill import utils from gill import data def validate(val_loader, model, tokenizer, criterion, epoch, args): ngpus_per_node = torch.cuda.device_count() writer = SummaryWriter(args.log_dir) bleu_scorers = [BLEUScore(n_gram=i) for i in [1, 2, 3, 4]] actual_step = (epoch + 1) * args.steps_per_epoch model_modes = ['captioning', 'retrieval', 'generation'] num_words = 32 # Number of words to generate. feature_extractor = utils.get_feature_extractor_for_model(args.visual_model, image_size=args.image_size, train=False) def get_pixel_values_from_path(path: str): img = Image.open(path) img = img.resize((args.image_size, args.image_size)) pixel_values = utils.get_pixel_values_for_model(feature_extractor, img)[None, ...] if args.precision == 'fp16': pixel_values = pixel_values.half() elif args.precision == 'bf16': pixel_values = pixel_values.bfloat16() if torch.cuda.is_available(): pixel_values = pixel_values.cuda() return pixel_values def run_validate(loader, base_progress=0): with torch.no_grad(): end = time.time() all_generated_captions = [] all_gt_captions = [] all_generated_image_paths = [] all_image_features = [] all_text_features = [] for i, (image_paths, images, caption_images, ret_tokens, ret_caption_len, gen_tokens, gen_caption_len, clip_emb) in tqdm.tqdm(enumerate(loader), position=0, total=len(loader)): i = base_progress + i if torch.cuda.is_available(): ret_tokens = ret_tokens.cuda(args.gpu, non_blocking=True) ret_caption_len = ret_caption_len.cuda(args.gpu, non_blocking=True) gen_tokens = gen_tokens.cuda(args.gpu, non_blocking=True) gen_caption_len = gen_caption_len.cuda(args.gpu, non_blocking=True) images = images.cuda() clip_emb = clip_emb.cuda() if args.precision == 'fp16': images = images.half() elif args.precision == 'bf16': images = images.bfloat16() for model_mode in model_modes: # compute output if model_mode == 'retrieval': tgt_tokens, token_len = ret_tokens, ret_caption_len elif model_mode == 'generation': tgt_tokens, token_len = gen_tokens, gen_caption_len else: tgt_tokens, token_len = ret_tokens, ret_caption_len # For captioning, it doesn't matter. (model_output, full_labels, last_embedding, _, visual_embs, visual_embs_norm, input_embs_norm, _) = model(images, tgt_tokens, token_len, mode=model_mode, input_prefix=args.input_prompt) # (N, T, C) if model_mode == 'captioning': loss = args.cap_loss_scale * model_output.loss elif model_mode == 'retrieval': loss = args.ret_loss_scale * model_output.loss elif model_mode == 'generation': loss = args.ret_loss_scale * model_output.loss else: raise NotImplementedError output = model_output.logits if model_mode == 'captioning': acc1, acc5 = utils.accuracy(output[:, :-1, :], full_labels[:, 1:], -100, topk=(1, 5)) top1.update(acc1[0], images.size(0)) top5.update(acc5[0], images.size(0)) ce_losses.update(loss.item(), images.size(0)) elif model_mode == 'retrieval': if args.distributed: original_last_embedding = torch.clone(last_embedding) all_visual_embs = [torch.zeros_like(visual_embs) for _ in range(dist.get_world_size())] all_last_embedding = [torch.zeros_like(last_embedding) for _ in range(dist.get_world_size())] dist.all_gather(all_visual_embs, visual_embs) dist.all_gather(all_last_embedding, last_embedding) # Overwrite with embeddings produced on this replica, which track the gradients. all_visual_embs[dist.get_rank()] = visual_embs all_last_embedding[dist.get_rank()] = last_embedding visual_embs = torch.cat(all_visual_embs) last_embedding = torch.cat(all_last_embedding) start_idx = args.rank * images.shape[0] end_idx = start_idx + images.shape[0] assert torch.all(last_embedding[start_idx:end_idx] == original_last_embedding), args.rank all_text_features.append(last_embedding.cpu()) all_image_features.append(visual_embs.cpu()) elif model_mode == 'generation': if args.num_clip_tokens != args.num_tokens: seq_len = clip_emb.shape[1] last_embedding = last_embedding.reshape((last_embedding.shape[0], seq_len, -1)) assert last_embedding.shape == clip_emb.shape, (last_embedding.shape == clip_emb.shape) image_loss = losses_utils.l2_loss(clip_emb, last_embedding) # (N,) gen_loss = args.gen_loss_scale * image_loss.mean() gen_losses.update(gen_loss.item(), image_loss.size(0)) # Run auto-regressive generation sample if model_mode == 'captioning': min_word_tokens = num_words input_embs = model.module.model.get_visual_embs(images, mode='captioning') # (2, n_visual_tokens, D) if args.input_prompt is not None: print(f'Adding prefix "{args.input_prompt}" to captioning generate=True.') prompt_ids = tokenizer(args.input_prompt, add_special_tokens=True, return_tensors="pt").input_ids prompt_ids = prompt_ids.to(visual_embs.device) prompt_embs = model.module.model.input_embeddings(prompt_ids) prompt_embs = prompt_embs.repeat(input_embs.shape[0], 1, 1) input_embs = torch.cat([input_embs, prompt_embs], dim=1) generated_ids, _, _ = model(input_embs, tgt_tokens, token_len, generate=True, num_words=num_words, temperature=0.0, top_p=1.0, min_word_tokens=min_word_tokens) if args.distributed and ngpus_per_node > 1: all_generated_ids = [torch.zeros_like(generated_ids) for _ in range(dist.get_world_size())] dist.all_gather(all_generated_ids, generated_ids) all_generated_ids[dist.get_rank()] = generated_ids generated_ids = torch.cat(all_generated_ids) all_tgt_tokens = [torch.zeros_like(tgt_tokens) for _ in range(dist.get_world_size())] dist.all_gather(all_tgt_tokens, tgt_tokens) all_tgt_tokens[dist.get_rank()] = tgt_tokens all_tgt_tokens = torch.cat(all_tgt_tokens) all_image_paths = [[None for _ in image_paths] for _ in range(dist.get_world_size())] dist.all_gather_object(all_image_paths, image_paths) all_image_paths[dist.get_rank()] = image_paths image_paths = [] for p in all_image_paths: image_paths.extend(p) else: all_tgt_tokens = tgt_tokens all_tgt_tokens[all_tgt_tokens == -100] = tokenizer.pad_token_id generated_captions = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) gt_captions = tokenizer.batch_decode(all_tgt_tokens, skip_special_tokens=True) for cap_i in range(len(generated_captions)): image_path = image_paths[cap_i] all_generated_image_paths.append(image_path) stop_idx = generated_captions[cap_i].find('.') if stop_idx > 5: all_generated_captions.append(generated_captions[cap_i][:stop_idx]) else: all_generated_captions.append(generated_captions[cap_i]) all_gt_captions.append([gt_captions[cap_i]]) elif model_mode in ['retrieval', 'generation']: if i == 0: # Generate without conditions just to test. input_ids = tgt_tokens[:, :3] # Use first 3 tokens as initial prompt for generation. input_embs = model.module.model.input_embeddings(input_ids) # (N, T, D) generated_ids, _, _ = model(input_embs, tgt_tokens, token_len, generate=True, num_words=num_words, temperature=0.0, top_p=1.0) generated_ids = torch.cat([input_ids, generated_ids], dim=1) generated_captions = tokenizer.batch_decode(generated_ids, skip_special_tokens=False) gt_captions = tokenizer.batch_decode(tgt_tokens, skip_special_tokens=False) else: raise NotImplementedError if i == 0: max_to_display = 5 print('=' * 30) print('Generated samples:') for cap_i, cap in enumerate(generated_captions[:max_to_display]): print(f'{cap_i}) {cap}') print('=' * 30) print('Real samples:') for cap_i, cap in enumerate(gt_captions[:max_to_display]): print(f'{cap_i}) {cap}') print('=' * 30) # Write images. if not args.distributed or (args.rank % ngpus_per_node == 0): max_images_to_show = 16 normalized_images = images - images.min() normalized_images /= normalized_images.max() # (N, 3, H, W) # Create generated caption text. generated_cap_images = torch.stack([ utils.create_image_of_text( generated_captions[j].encode('ascii', 'ignore'), width=normalized_images.shape[3], color=(255, 255, 0)) for j in range(normalized_images.shape[0])], axis=0) # Append gt/generated caption images. display_images = torch.cat([normalized_images.float().cpu(), caption_images, generated_cap_images], axis=2)[:max_images_to_show] grid = torchvision.utils.make_grid(display_images, nrow=int(max_images_to_show ** 0.5), padding=4) writer.add_image(f'val/images_{model_mode}', grid, actual_step) vis_emb_norm.update(visual_embs_norm.item(), images.size(0)) inp_emb_norm.update(input_embs_norm.item(), images.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() if i % args.print_freq == 0: progress.display(i + 1) if i == args.val_steps_per_epoch - 1: break # Measure captioning metrics. path2captions = collections.defaultdict(list) for image_path, caption in zip(all_generated_image_paths, all_gt_captions): assert len(caption) == 1, caption trunc_cap = caption[0] for i in range(args.num_tokens): trunc_cap = trunc_cap.replace(f'[IMG{i}]', '') path2captions[image_path].append(trunc_cap.strip()) full_gt_captions = [path2captions[path] for path in all_generated_image_paths] print(f'Computing BLEU with {len(all_generated_captions)} generated captions:' f'{all_generated_captions[:5]} and {len(full_gt_captions)} groundtruth captions:', f'{full_gt_captions[:5]}.') bleu1_score = bleu_scorers[0](all_generated_captions, full_gt_captions) bleu1.update(bleu1_score, 1) bleu2_score = bleu_scorers[1](all_generated_captions, full_gt_captions) bleu2.update(bleu2_score, 1) bleu3_score = bleu_scorers[2](all_generated_captions, full_gt_captions) bleu3.update(bleu3_score, 2) bleu4_score = bleu_scorers[3](all_generated_captions, full_gt_captions) bleu4.update(bleu4_score, 3) # Measure retrieval metrics over the entire validation set. all_image_features = torch.cat(all_image_features, axis=0) # (coco_val_len, 2048) all_text_features = torch.cat(all_text_features, axis=0) # (coco_val_len, 2048) print(f"Computing similarity between {all_image_features.shape} and {all_text_features.shape}.") logits_per_image = all_image_features @ all_text_features.t() logits_per_text = logits_per_image.t() all_image_acc1, all_image_acc5 = losses_utils.contrastive_acc(logits_per_image, topk=(1, 5)) all_caption_acc1, all_caption_acc5 = losses_utils.contrastive_acc(logits_per_text, topk=(1, 5)) image_loss = losses_utils.contrastive_loss(logits_per_image) caption_loss = losses_utils.contrastive_loss(logits_per_text) loss = args.ret_loss_scale * (image_loss + caption_loss) / 2.0 cont_losses.update(loss.item(), logits_per_image.size(0)) top1_caption.update(all_caption_acc1.item(), logits_per_image.size(0)) top5_caption.update(all_caption_acc5.item(), logits_per_image.size(0)) top1_image.update(all_image_acc1.item(), logits_per_image.size(0)) top5_image.update(all_image_acc5.item(), logits_per_image.size(0)) batch_time = utils.AverageMeter('Time', ':6.3f', utils.Summary.AVERAGE) cont_losses = utils.AverageMeter('ContLoss', ':.4e', utils.Summary.AVERAGE) ce_losses = utils.AverageMeter('CeLoss', ':.4e', utils.Summary.AVERAGE) gen_losses = utils.AverageMeter('GenLoss', ':.4e', utils.Summary.AVERAGE) top1 = utils.AverageMeter('Acc@1', ':6.2f', utils.Summary.AVERAGE) top5 = utils.AverageMeter('Acc@5', ':6.2f', utils.Summary.AVERAGE) bleu1 = utils.AverageMeter('BLEU@1', ':6.2f', utils.Summary.AVERAGE) bleu2 = utils.AverageMeter('BLEU@2', ':6.2f', utils.Summary.AVERAGE) bleu3 = utils.AverageMeter('BLEU@3', ':6.2f', utils.Summary.AVERAGE) bleu4 = utils.AverageMeter('BLEU@4', ':6.2f', utils.Summary.AVERAGE) vis_emb_norm = utils.AverageMeter('VisualEmbNorm', ':.4e', utils.Summary.AVERAGE) inp_emb_norm = utils.AverageMeter('TextEmbNorm', ':.4e', utils.Summary.AVERAGE) top1_caption = utils.AverageMeter('CaptionAcc@1', ':6.2f', utils.Summary.AVERAGE) top5_caption = utils.AverageMeter('CaptionAcc@5', ':6.2f', utils.Summary.AVERAGE) top1_image = utils.AverageMeter('ImageAcc@1', ':6.2f', utils.Summary.AVERAGE) top5_image = utils.AverageMeter('ImageAcc@5', ':6.2f', utils.Summary.AVERAGE) progress = utils.ProgressMeter( len(val_loader) + (args.distributed and (len(val_loader.sampler) * args.world_size < len(val_loader.dataset))), [batch_time, cont_losses, ce_losses, gen_losses, top1, top5, bleu4], prefix='Test: ') # switch to evaluate mode model.eval() run_validate(val_loader) if args.distributed: batch_time.all_reduce() cont_losses.all_reduce() gen_losses.all_reduce() vis_emb_norm.all_reduce() inp_emb_norm.all_reduce() bleu1.all_reduce() bleu2.all_reduce() bleu3.all_reduce() bleu4.all_reduce() top1.all_reduce() top5.all_reduce() top1_caption.all_reduce() top5_caption.all_reduce() top1_image.all_reduce() top5_image.all_reduce() if args.distributed and (len(val_loader.sampler) * args.world_size < len(val_loader.dataset)): aux_val_dataset = Subset(val_loader.dataset, range(len(val_loader.sampler) * args.world_size, len(val_loader.dataset))) aux_val_loader = torch.utils.data.DataLoader( aux_val_dataset, batch_size=(args.val_batch_size or args.batch_size), shuffle=False, num_workers=args.workers, pin_memory=True, collate_fn=data.collate_fn) run_validate(aux_val_loader, len(val_loader)) progress.display_summary() writer.add_scalar('val/vis_emb_norm', vis_emb_norm.avg, actual_step) writer.add_scalar('val/text_emb_norm', inp_emb_norm.avg, actual_step) writer.add_scalar('val/total_secs_per_batch', batch_time.avg, actual_step) writer.add_scalar('val/seq_top1_acc', top1.avg, actual_step) writer.add_scalar('val/seq_top5_acc', top5.avg, actual_step) writer.add_scalar('val/ce_loss', ce_losses.avg, actual_step) writer.add_scalar('val/bleu1', bleu1.avg, actual_step) writer.add_scalar('val/bleu2', bleu2.avg, actual_step) writer.add_scalar('val/bleu3', bleu3.avg, actual_step) writer.add_scalar('val/bleu4', bleu4.avg, actual_step) writer.add_scalar('val/contrastive_loss', cont_losses.avg, actual_step) writer.add_scalar('val/gen_l2_loss', gen_losses.avg, actual_step) writer.add_scalar('val/t2i_top1_acc', top1_caption.avg, actual_step) writer.add_scalar('val/t2i_top5_acc', top5_caption.avg, actual_step) writer.add_scalar('val/i2t_top1_acc', top1_image.avg, actual_step) writer.add_scalar('val/i2t_top5_acc', top5_image.avg, actual_step) writer.add_scalar('val/top1_acc', (top1_caption.avg + top1_image.avg) / 2.0, actual_step) writer.add_scalar('val/top5_acc', (top5_caption.avg + top5_image.avg) / 2.0, actual_step) writer.close() # Use top1 accuracy as the metric for keeping the best checkpoint. return top1_caption.avg
gill-main
gill/validate.py
from enum import Enum import subprocess import sys import shutil import torch import torch.distributed as dist from torchvision.transforms import functional as F from torchvision import transforms as T from transformers import AutoFeatureExtractor from PIL import Image, ImageDraw, ImageFont, ImageOps import random import requests from io import BytesIO def dump_git_status(out_file=sys.stdout, exclude_file_patterns=['*.ipynb', '*.th', '*.sh', '*.txt', '*.json']): """Logs git status to stdout.""" subprocess.call('git rev-parse HEAD', shell=True, stdout=out_file) subprocess.call('echo', shell=True, stdout=out_file) exclude_string = '' subprocess.call('git --no-pager diff -- . {}'.format(exclude_string), shell=True, stdout=out_file) def get_image_from_url(url: str): response = requests.get(url) img = Image.open(BytesIO(response.content)) img = img.resize((224, 224)) img = img.convert('RGB') return img def truncate_caption(caption: str) -> str: """Truncate captions at periods and newlines.""" caption = caption.strip('\n') trunc_index = caption.find('\n') + 1 if trunc_index <= 0: trunc_index = caption.find('.') + 1 if trunc_index > 0: caption = caption[:trunc_index] return caption def pad_to_size(x, size=256): delta_w = size - x.size[0] delta_h = size - x.size[1] padding = ( delta_w // 2, delta_h // 2, delta_w - (delta_w // 2), delta_h - (delta_h // 2), ) new_im = ImageOps.expand(x, padding) return new_im class RandCropResize(object): """ Randomly crops, then randomly resizes, then randomly crops again, an image. Mirroring the augmentations from https://arxiv.org/abs/2102.12092 """ def __init__(self, target_size): self.target_size = target_size def __call__(self, img): img = pad_to_size(img, self.target_size) d_min = min(img.size) img = T.RandomCrop(size=d_min)(img) t_min = min(d_min, round(9 / 8 * self.target_size)) t_max = min(d_min, round(12 / 8 * self.target_size)) t = random.randint(t_min, t_max + 1) img = T.Resize(t)(img) if min(img.size) < 256: img = T.Resize(256)(img) return T.RandomCrop(size=self.target_size)(img) class SquarePad(object): """Pads image to square. From https://discuss.pytorch.org/t/how-to-resize-and-pad-in-a-torchvision-transforms-compose/71850/9 """ def __call__(self, image): max_wh = max(image.size) p_left, p_top = [(max_wh - s) // 2 for s in image.size] p_right, p_bottom = [max_wh - (s+pad) for s, pad in zip(image.size, [p_left, p_top])] padding = (p_left, p_top, p_right, p_bottom) return F.pad(image, padding, 0, 'constant') def create_image_of_text(text: str, width: int = 224, nrows: int = 2, color=(255, 255, 255), font=None) -> torch.Tensor: """Creates a (3, nrows * 14, width) image of text. Returns: cap_img: (3, 14 * nrows, width) image of wrapped text. """ height = 12 padding = 5 effective_width = width - 2 * padding # Create a black image to draw text on. cap_img = Image.new('RGB', (effective_width * nrows, height), color = (0, 0, 0)) draw = ImageDraw.Draw(cap_img) draw.text((0, 0), text, color, font=font or ImageFont.load_default()) cap_img = F.convert_image_dtype(F.pil_to_tensor(cap_img), torch.float32) # (3, height, W * nrows) cap_img = torch.split(cap_img, effective_width, dim=-1) # List of nrow elements of shape (3, height, W) cap_img = torch.cat(cap_img, dim=1) # (3, height * nrows, W) # Add zero padding. cap_img = torch.nn.functional.pad(cap_img, [padding, padding, 0, padding]) return cap_img def get_feature_extractor_for_model(model_name: str, image_size: int = 224, train: bool = True): print(f'Using HuggingFace AutoFeatureExtractor for {model_name}.') feature_extractor = AutoFeatureExtractor.from_pretrained(model_name) return feature_extractor def get_pixel_values_for_model(feature_extractor, img: Image.Image): pixel_values = feature_extractor(img.convert('RGB'), return_tensors="pt").pixel_values[0, ...] # (3, H, W) return pixel_values def save_checkpoint(state, is_best, filename='checkpoint'): torch.save(state, filename + '.pth.tar') if is_best: shutil.copyfile(filename + '.pth.tar', filename + '_best.pth.tar') def accuracy(output, target, padding, topk=(1,)): """Computes the accuracy over the k top predictions for the specified values of k""" with torch.no_grad(): maxk = max(topk) if output.shape[-1] < maxk: print(f"[WARNING] Less than {maxk} predictions available. Using {output.shape[-1]} for topk.") maxk = min(maxk, output.shape[-1]) batch_size = target.size(0) # Take topk along the last dimension. _, pred = output.topk(maxk, -1, True, True) # (N, T, topk) mask = (target != padding).type(target.dtype) target_expand = target[..., None].expand_as(pred) correct = pred.eq(target_expand) correct = correct * mask[..., None].expand_as(correct) res = [] for k in topk: correct_k = correct[..., :k].reshape(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / mask.sum())) return res def get_params_count(model, max_name_len: int = 60): params = [(name[:max_name_len], p.numel(), str(tuple(p.shape)), p.requires_grad) for name, p in model.named_parameters()] total_trainable_params = sum([x[1] for x in params if x[-1]]) total_nontrainable_params = sum([x[1] for x in params if not x[-1]]) return params, total_trainable_params, total_nontrainable_params def get_params_count_str(model, max_name_len: int = 60): padding = 70 # Hardcoded depending on desired amount of padding and separators. params, total_trainable_params, total_nontrainable_params = get_params_count(model, max_name_len) param_counts_text = '' param_counts_text += '=' * (max_name_len + padding) + '\n' param_counts_text += f'| {"Module":<{max_name_len}} | {"Trainable":<10} | {"Shape":>15} | {"Param Count":>12} |\n' param_counts_text += '-' * (max_name_len + padding) + '\n' for name, param_count, shape, trainable in params: param_counts_text += f'| {name:<{max_name_len}} | {"True" if trainable else "False":<10} | {shape:>15} | {param_count:>12,} |\n' param_counts_text += '-' * (max_name_len + padding) + '\n' param_counts_text += f'| {"Total trainable params":<{max_name_len}} | {"":<10} | {"":<15} | {total_trainable_params:>12,} |\n' param_counts_text += f'| {"Total non-trainable params":<{max_name_len}} | {"":<10} | {"":<15} | {total_nontrainable_params:>12,} |\n' param_counts_text += '=' * (max_name_len + padding) + '\n' return param_counts_text class Summary(Enum): NONE = 0 AVERAGE = 1 SUM = 2 COUNT = 3 class ProgressMeter(object): def __init__(self, num_batches, meters, prefix=""): self.batch_fmtstr = self._get_batch_fmtstr(num_batches) self.meters = meters self.prefix = prefix def display(self, batch): entries = [self.prefix + self.batch_fmtstr.format(batch)] entries += [str(meter) for meter in self.meters] print('\t'.join(entries)) def display_summary(self): entries = [" *"] entries += [meter.summary() for meter in self.meters] print(' '.join(entries)) def _get_batch_fmtstr(self, num_batches): num_digits = len(str(num_batches // 1)) fmt = '{:' + str(num_digits) + 'd}' return '[' + fmt + '/' + fmt.format(num_batches) + ']' class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self, name, fmt=':f', summary_type=Summary.AVERAGE): self.name = name self.fmt = fmt self.summary_type = summary_type self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def all_reduce(self): device = "cuda" if torch.cuda.is_available() else "cpu" total = torch.tensor([self.sum, self.count], dtype=torch.float32, device=device) dist.all_reduce(total, dist.ReduceOp.SUM, async_op=False) self.sum, self.count = total.tolist() self.avg = self.sum / self.count def __str__(self): fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})' return fmtstr.format(**self.__dict__) def summary(self): fmtstr = '' if self.summary_type is Summary.NONE: fmtstr = '' elif self.summary_type is Summary.AVERAGE: fmtstr = '{name} {avg:.3f}' elif self.summary_type is Summary.SUM: fmtstr = '{name} {sum:.3f}' elif self.summary_type is Summary.COUNT: fmtstr = '{name} {count:.3f}' else: raise ValueError('invalid summary type %r' % self.summary_type) return fmtstr.format(**self.__dict__)
gill-main
gill/utils.py
"""A slightly modified version of the HuggingFace StableDiffusion pipeline, to allow us to extract text embeddings.""" # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Any, Callable, Dict, List, Optional, Union import torch from packaging import version from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL, UNet2DConditionModel from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import deprecate, is_accelerate_available, logging, randn_tensor, replace_example_docstring from diffusers.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import StableDiffusionPipeline >>> pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16) >>> pipe = pipe.to("cuda") >>> prompt = "a photo of an astronaut riding a horse on mars" >>> image = pipe(prompt).images[0] ``` """ class StableDiffusionPipeline(DiffusionPipeline): r""" Pipeline for text-to-image generation using Stable Diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder. Stable Diffusion uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. feature_extractor ([`CLIPFeatureExtractor`]): Model that extracts features from generated images to be used as inputs for the `safety_checker`. """ _optional_components = ["safety_checker", "feature_extractor"] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPFeatureExtractor, requires_safety_checker: bool = True, truncate_side: str = 'right', ): super().__init__() if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) if safety_checker is None and requires_safety_checker: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) if safety_checker is not None and feature_extractor is None: raise ValueError( "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." ) is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( version.parse(unet.config._diffusers_version).base_version ) < version.parse("0.9.0.dev0") is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.register_to_config(requires_safety_checker=requires_safety_checker) self.truncate_side = truncate_side def enable_vae_slicing(self): r""" Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ self.vae.enable_slicing() def disable_vae_slicing(self): r""" Disable sliced VAE decoding. If `enable_vae_slicing` was previously invoked, this method will go back to computing decoding in one step. """ self.vae.disable_slicing() def enable_sequential_cpu_offload(self, gpu_id=0): r""" Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet, text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called. """ if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`") device = torch.device(f"cuda:{gpu_id}") for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: cpu_offload(cpu_offloaded_model, device) if self.safety_checker is not None: cpu_offload(self.safety_checker, execution_device=device, offload_buffers=True) @property def _execution_device(self): r""" Returns the device on which the pipeline's models will be executed. After calling `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module hooks. """ if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"): return self.device for module in self.unet.modules(): if ( hasattr(module, "_hf_hook") and hasattr(module._hf_hook, "execution_device") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_ prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. """ if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if self.truncate_side == 'left': # Truncate from the left. if untruncated_ids.shape[-1] > self.tokenizer.model_max_length: print('Original prompt:', prompt) prompt = self.tokenizer.batch_decode( untruncated_ids[:, -1-self.tokenizer.model_max_length: -1] ) print('Trunc prompt:', prompt) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids if self.truncate_side == 'right': untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, ) prompt_embeds = prompt_embeds[0] prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) return prompt_embeds def run_safety_checker(self, image, device, dtype): if self.safety_checker is not None: safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) else: has_nsfw_concept = None return image, has_nsfw_concept def decode_latents(self, latents): # latents = 1 / self.vae.config.scaling_factor * latents latents = 1 / 0.18215 * latents image = self.vae.decode(latents).sample image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, height, width, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: Optional[int] = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, return_prompts_only: bool = False, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttnProcessor` as defined under `self.processor` in [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py). Examples: Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds ) # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 if return_prompts_only: # Don't concat two prompts do_classifier_free_guidance = False # 3. Encode input prompt prompt_embeds = self._encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, ) if return_prompts_only: return prompt_embeds # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare latent variables num_channels_latents = self.unet.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, ) # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, ).sample # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): if callback is not None and i % callback_steps == 0: callback(i, t, latents) # 8. Post-processing image = self.decode_latents(latents) # 9. Run safety checker image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) # 10. Convert to PIL if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
gill-main
gill/custom_sd.py
"""Helper file defining some common loss functions.""" from typing import Optional import torch from gill import utils def l1_loss(u: torch.Tensor, v: torch.Tensor) -> torch.Tensor: """ Args: u: (N, D) tensor. v: (N, D) tensor. Returns: l1_loss: (N,) tensor of summed L1 loss. """ assert u.shape == v.shape, (u.shape, v.shape) return torch.abs(u - v).sum(dim=-1) def l2_loss(u: torch.Tensor, v: torch.Tensor) -> torch.Tensor: """ Args: u: (N, T, D) tensor. v: (N, T, D) tensor. Returns: l1_loss: (N,) tensor of summed L1 loss. """ assert u.shape == v.shape, (u.shape, v.shape) return ((u - v) ** 2).sum(dim=-1) ** 0.5 def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: return torch.nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device)) def contrastive_acc(logits: torch.Tensor, target: Optional[torch.Tensor] = None, topk=(1,)) -> torch.Tensor: """ Args: logits: (N, N) predictions. target: (N, num_correct_answers) labels. """ assert len(logits.shape) == 2, logits.shape batch_size = logits.shape[0] if target is None: target = torch.arange(len(logits), device=logits.device) return utils.accuracy(logits, target, -1, topk) else: assert len(target.shape) == 2, target.shape with torch.no_grad(): maxk = max(topk) if logits.shape[-1] < maxk: print(f"[WARNING] Less than {maxk} predictions available. Using {logits.shape[-1]} for topk.") maxk = min(maxk, logits.shape[-1]) # Take topk along the last dimension. _, pred = logits.topk(maxk, -1, True, True) # (N, topk) assert pred.shape == (batch_size, maxk) target_expand = target[:, :, None].repeat(1, 1, maxk) # (N, num_correct_answers, topk) pred_expand = pred[:, None, :].repeat(1, target.shape[1], 1) # (N, num_correct_answers, topk) correct = pred_expand.eq(target_expand) # (N, num_correct_answers, topk) correct = torch.any(correct, dim=1) # (N, topk) res = [] for k in topk: any_k_correct = torch.clamp(correct[:, :k].sum(1), max=1) # (N,) correct_k = any_k_correct.float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / batch_size)) return res
gill-main
gill/losses.py
import torch from torch import nn class TextFcLayer(nn.Module): """Layers used in mapping text embeddings to visual outputs.""" def __init__(self, in_dim: int, out_dim: int, num_input_tokens: int = 1, num_output_tokens: int = 1, mode: str = 'linear'): super().__init__() self.num_input_tokens = num_input_tokens self.num_output_tokens = num_output_tokens self.mode = mode if mode == 'linear': self.model = nn.Linear(in_dim, out_dim) elif mode == 'gill_mapper': hidden_dim = 512 self.fc = nn.Linear(in_dim, hidden_dim) self.tfm = nn.Transformer(batch_first=True, norm_first=True, d_model=hidden_dim, num_encoder_layers=4, num_decoder_layers=4, dim_feedforward=hidden_dim * 4, dropout=0.0, nhead=4) self.model = nn.Linear(hidden_dim, out_dim) self.query_embs = nn.Parameter(torch.randn(1, num_output_tokens, hidden_dim)) else: raise NotImplementedError(mode) def forward(self, x: torch.Tensor, input_embs: torch.Tensor) -> torch.Tensor: outputs = None if self.mode == 'gill_mapper': x = x + input_embs if isinstance(self.model, nn.ModuleList): assert len(self.model) == x.shape[1] == self.num_input_tokens, (len(self.model), x.shape, self.num_input_tokens) outputs = [] for i in range(self.num_input_tokens): outputs.append(self.model[i](x[:, i, :])) # (N, D) outputs = torch.stack(outputs, dim=1) # (N, T, D) else: if self.mode == 'gill_mapper': x = self.fc(x) x = self.tfm(x, self.query_embs.repeat(x.shape[0], 1, 1)) outputs = self.model(x) if outputs.shape[1] != self.num_output_tokens and self.mode == 'linear': if self.mode == 'linear': outputs = outputs[:, :self.num_output_tokens, :] else: raise NotImplementedError assert outputs.shape[1] == 1 or (outputs.shape[1] * outputs.shape[2] == self.num_output_tokens * 768), (outputs.shape, self.num_output_tokens) return outputs # (N, T, D)
gill-main
gill/layers.py
"""Modified from https://github.com/mlfoundations/open_clip""" from typing import Optional, Tuple, List import collections import logging import os import numpy as np import pandas as pd import torch import torchvision.datasets as datasets from torchvision import transforms as T from PIL import Image, ImageFont from torch.utils.data import Dataset from gill import utils def collate_fn(batch): batch = list(filter(lambda x: x is not None, batch)) return torch.utils.data.dataloader.default_collate(batch) def get_dataset(args, split: str, tokenizer, precision: str = 'fp32') -> Dataset: assert split in ['train', 'val' ], 'Expected split to be one of "train" or "val", got {split} instead.' dataset_paths = [] image_data_dirs = [] train = split == 'train' # Default configs for datasets. # Folder structure should look like: if split == 'train': if 'cc3m' in args.dataset: dataset_paths.append(os.path.join(args.dataset_dir, 'cc3m_train.tsv')) image_data_dirs.append(os.path.join(args.image_dir, 'cc3m/training/')) else: raise NotImplementedError elif split == 'val': if 'cc3m' in args.val_dataset: dataset_paths.append(os.path.join(args.dataset_dir, 'cc3m_val.tsv')) image_data_dirs.append(os.path.join(args.image_dir, 'cc3m/validation')) else: raise NotImplementedError assert len(dataset_paths) == len(image_data_dirs) == 1, (dataset_paths, image_data_dirs) else: raise NotImplementedError if len(dataset_paths) > 1: print(f'{len(dataset_paths)} datasets requested: {dataset_paths}') dataset = torch.utils.data.ConcatDataset([ CsvDataset(path, image_dir, tokenizer, 'image', 'caption', args.visual_model, train=train, max_len=args.max_len, precision=args.precision, image_size=args.image_size, retrieval_token_idx=args.retrieval_token_idx, gen_token_idx=args.gen_token_idx, num_tokens=args.num_tokens, num_clip_tokens=args.num_clip_tokens) for (path, image_dir) in zip(dataset_paths, image_data_dirs)]) elif len(dataset_paths) == 1: dataset = CsvDataset(dataset_paths[0], image_data_dirs[0], tokenizer, 'image', 'caption', args.visual_model, train=train, max_len=args.max_len, precision=args.precision, image_size=args.image_size, retrieval_token_idx=args.retrieval_token_idx, gen_token_idx=args.gen_token_idx, num_tokens=args.num_tokens, num_clip_tokens=args.num_clip_tokens) else: raise ValueError(f'There should be at least one valid dataset, got train={args.dataset}, val={args.val_dataset} instead.') return dataset class CsvDataset(Dataset): def __init__(self, input_filename, base_image_dir, tokenizer, img_key, caption_key, feature_extractor_model: str, train: bool = True, max_len: int = 32, sep="\t", precision: str = 'fp32', image_size: int = 224, retrieval_token_idx: List[int] = [-1], gen_token_idx: List[int] = [-1], num_tokens: int = 1, num_clip_tokens: int = 1): logging.debug(f'Loading tsv data from {input_filename}.') df = pd.read_csv(input_filename, sep=sep) self.base_image_dir = base_image_dir self.images = df[img_key].tolist() self.captions = df[caption_key].tolist() assert len(self.images) == len(self.captions) self.feature_extractor_model = feature_extractor_model self.feature_extractor = utils.get_feature_extractor_for_model( feature_extractor_model, image_size=image_size, train=False) self.image_size = image_size self.tokenizer = tokenizer self.max_len = max_len self.precision = precision self.retrieval_token_idx = retrieval_token_idx self.gen_token_idx = gen_token_idx self.num_tokens = num_tokens self.num_clip_tokens = num_clip_tokens self.font = None logging.debug('Done loading data.') def __len__(self): return len(self.captions) def __getitem__(self, idx): while True: image_path = os.path.join(self.base_image_dir, str(self.images[idx])) caption = str(self.captions[idx]) clip_l_path = os.path.join(self.base_image_dir, 'clip_embs', str(self.images[idx]) + '.npy') try: img = Image.open(image_path) images = utils.get_pixel_values_for_model(self.feature_extractor, img) # Only load if we are in generation mode. with open(clip_l_path, 'rb') as f: clip_emb = np.load(f, allow_pickle=True) # (num_clip_tokens, 768) clip_emb = clip_emb[:self.num_clip_tokens, :] # Generation mode. caption = caption for i in range(self.num_tokens): caption += f'[IMG{i}]' tokenized_data = self.tokenizer( caption, return_tensors="pt", padding='max_length', truncation=True, max_length=self.max_len) tokens = tokenized_data.input_ids[0] caption_len = tokenized_data.attention_mask[0].sum() # If IMG tokens are overridden by padding, replace them with the correct token. if tokens[-1] not in [self.tokenizer.pad_token_id, self.gen_token_idx[-1]]: tokens[-self.num_tokens:] = torch.tensor(self.gen_token_idx).to(dtype=tokens.dtype, device=tokens.device) decode_caption = self.tokenizer.decode(tokens, skip_special_tokens=False) self.font = self.font or ImageFont.load_default() cap_img = utils.create_image_of_text(decode_caption.encode('ascii', 'ignore'), width=self.image_size, nrows=2, font=self.font) return image_path, images, cap_img, tokens, caption_len, tokens, caption_len, clip_emb except Exception as e: print(f'Error reading for {image_path} with caption {caption}: {e}') # Pick a new example at random. idx = np.random.randint(0, len(self)-1)
gill-main
gill/data.py
import collections import json import os import PIL from tqdm import tqdm from gill import utils # Download the Visual Storytelling SIS dataset from https://visionandlanguage.net/VIST/json_files/story-in-sequence/SIS-with-labels.tar.gz # Extract the files (there should be three sets: train, val, and test). # We use the val set for reporting results. vist_val_json_path = 'sis/val.story-in-sequence.json' # Output directory to save images. output_dir = 'sis/val_images/' os.makedirs(output_dir, exist_ok=True) # Path to save formatted annotations to. val_formatted_path = 'sis/val_formatted.json' if __name__ == '__main__': # Load VIST data. with open(vist_val_json_path, 'r') as f: vist_data_raw = json.load(f) # Format into a dictionary of {story_id: data} items. vist_data = { 'annotations': collections.defaultdict(list) } used_image_ids = [] for ann in vist_data_raw['annotations']: assert len(ann) == 1 ann = ann[0] story_id = ann['story_id'] vist_data['annotations'][story_id].append({ 'caption': ann['text'], 'image_id': ann['photo_flickr_id'], 'sequence_index': ann['worker_arranged_photo_order'], }) used_image_ids.append(ann['photo_flickr_id']) used_image_ids = set(used_image_ids) # Save formatted annotations. with open(val_formatted_path, 'w') as wf: json.dump(vist_data, wf) # Map image ids to urls. id2url = {} for image_data in vist_data_raw['images']: image_id = image_data['id'] if image_id in used_image_ids: image_url = image_data.get('url_o', None) if image_url is not None: id2url[image_id] = image_url # Download images. processed_images = set() print("Saving images to", output_dir) for story_idx, (story_id, story_data) in tqdm(enumerate(vist_data['annotations'].items()), total=len(vist_data['annotations'])): for s in story_data: image_id = s['image_id'] if image_id not in processed_images: output_path = os.path.join(output_dir, f'{image_id}.png') # Save image if we have the url and it doesn't already exist. if image_id in id2url and not os.path.exists(output_path): try: image = utils.get_image_from_url(id2url[image_id]) # Save image to output dir. with open(output_path, 'wb') as wf: image.save(wf) except PIL.UnidentifiedImageError: print("Error saving image", image_id) processed_images.add(image_id)
gill-main
evals/download_vist_images.py
import json import os import numpy as np from PIL import Image from transformers import CLIPProcessor, CLIPModel from tqdm import tqdm # Define the paths to the groundtruth / generated image directories. gen_img_dir = 'gill_vist_outputs/' gt_img_dir = "sis/val_images/" vist_data_path = 'sis/val_formatted.json' if __name__ == "__main__": # Load CLIP model. device = 'cuda' model_name = "openai/clip-vit-large-patch14" clip_model = CLIPModel.from_pretrained(model_name) clip_processor = CLIPProcessor.from_pretrained(model_name) clip_model.to(device) with open(vist_data_path, 'r') as f: vist_data = json.load(f) all_scores = [] for story_idx, (story_id, story_data) in tqdm(enumerate(vist_data['annotations'].items()), total=len(vist_data['annotations'])): gt_image_id = story_data[-1]['image_id'] gt_img_path = os.path.join(gt_img_dir, gt_image_id + '.png') gen_img_path = os.path.join(gen_img_dir, gt_image_id + '.png') if not os.path.exists(gt_img_path) or not os.path.exists(gen_img_path): print(f'Skipping example {story_id} because one of {gt_img_path} or {gen_img_path} does not exist.') else: # Load groundtruth image and compute its CLIP image features with open(gt_img_path, 'rb') as f: img = Image.open(f) inputs = clip_processor(images=img, return_tensors="pt") inputs = {k: v.cuda() for k, v in inputs.items()} gt_feat = clip_model.get_image_features(**inputs) # Compute generated image features. with open(gen_img_path, 'rb') as f: img = Image.open(f) inputs = clip_processor(images=img, return_tensors="pt") inputs = {k: v.cuda() for k, v in inputs.items()} image_feat = clip_model.get_image_features(**inputs) # Compute cosine similarity. score = ((image_feat / image_feat.norm()) @ (gt_feat / gt_feat.norm()).T).item() all_scores.append(score) score = np.mean(all_scores) print('CLIP similarity:', score) with open('vist_clip_similarity.txt', 'w') as wf: wf.write(str(score))
gill-main
evals/compute_clip_similarity_vist.py
import json import os import numpy as np from PIL import Image from transformers import CLIPProcessor, CLIPModel from tqdm import tqdm # Define the paths to the groundtruth / generated image directories. gen_img_dir = 'gill_visdial_outputs/' visdial_dir = 'VisualDialog/' if __name__ == "__main__": # Load CLIP model. device = 'cuda' model_name = "openai/clip-vit-large-patch14" clip_model = CLIPModel.from_pretrained(model_name) clip_processor = CLIPProcessor.from_pretrained(model_name) clip_model.to(device) # Load VisDial data. split = 'val' gt_img_dir = os.path.join(visdial_dir, f'VisualDialog_{split}2018/') with open(os.path.join(visdial_dir, f'visdial_1.0_{split}.json'), 'r') as f: visdial_data = json.load(f) with open(os.path.join(visdial_dir, f'visdial_1.0_{split}_dense_annotations.json'), 'r') as f: dense_data = json.load(f) # Sanity check for VisDial dialogues. assert len(dense_data) == len(visdial_data['data']['dialogs']) for i in range(len(dense_data)): assert dense_data[i]['image_id'] == visdial_data['data']['dialogs'][i]['image_id'] questions = visdial_data['data']['questions'] answers = visdial_data['data']['answers'] dialogs = visdial_data['data']['dialogs'] all_scores = [] for example_idx in tqdm(range(len(dialogs))): dialog = dialogs[example_idx] gt_image_id = str(dialog['image_id']).rjust(12, '0') gt_img_path = os.path.join(gt_img_dir, f'VisualDialog_{split}2018_{gt_image_id}.jpg') gen_img_path = os.path.join(gen_img_dir, f'{gt_image_id}.png') if not os.path.exists(gt_img_path) or not os.path.exists(gen_img_path): print(f'Skipping example {example_idx} because one of {gt_img_path} or {gen_img_path} does not exist.') else: # Load groundtruth image and compute its CLIP image features with open(gt_img_path, 'rb') as f: img = Image.open(f) inputs = clip_processor(images=img, return_tensors="pt") inputs = {k: v.cuda() for k, v in inputs.items()} gt_feat = clip_model.get_image_features(**inputs) # Compute generated image features. with open(gen_img_path, 'rb') as f: img = Image.open(f) inputs = clip_processor(images=img, return_tensors="pt") inputs = {k: v.cuda() for k, v in inputs.items()} image_feat = clip_model.get_image_features(**inputs) # Compute cosine similarity. score = ((image_feat / image_feat.norm()) @ (gt_feat / gt_feat.norm()).T).item() all_scores.append(score) score = np.mean(all_scores) print('CLIP similarity:', score) with open('visdial_clip_similarity.txt', 'w') as wf: wf.write(str(score))
gill-main
evals/compute_clip_similarity_visdial.py
"""Uses GILL to generate images for VIST interleaved image + text sequences. Example usage: python generate_vist_images.py gill_vist_outputs """ from collections import namedtuple import json import os import pickle as pkl import sys from PIL import Image import torch from tqdm import tqdm from gill import models # Path containing the VIST images. vist_image_dir = 'sis/val_images/' # Path containing the formatted VIST annotations. vist_data_path = 'sis/val_formatted.json' if __name__ == '__main__': output_dir = sys.argv[1] os.makedirs(output_dir, exist_ok=True) print('Saving to', output_dir) model = models.load_gill('checkpoints/gill_opt/', load_ret_embs=False) g_cuda = torch.Generator(device='cuda').manual_seed(42) # Fix the random seed. # Load VIST data. with open(vist_data_path, 'r') as f: vist_data = json.load(f) story_ids = list(vist_data['annotations'].keys()) for story_idx, (story_id, story_data) in tqdm(enumerate(vist_data['annotations'].items()), total=len(vist_data['annotations'])): # Load all images except the last (we're generating the last one) image_paths = [os.path.join(vist_image_dir, s['image_id'] + '.png') for s in story_data][:-1] gt_image_id = story_data[-1]['image_id'] captions = [s['caption'] for s in story_data] assert (len(image_paths) == len(captions) - 1) or (len(image_paths) == len(captions)) should_process = True for path in image_paths: if not os.path.exists(path): print(f'Image not found: {path}. Skipping story {story_id}') should_process = False break if should_process: caption_range = range(len(captions)) input_data = [] for i_i, i in enumerate(caption_range): caption = captions[i] input_data.append(caption) if i < len(captions) - 1: # Use first n-1 images with open(image_paths[i], 'rb') as f: img = Image.open(f).convert('RGB').resize((224, 224)) input_data.append(img) # Print outputs for first 3 examples as a sanity check. if story_idx < 3: print(input_data) # Set a really high ret scale so that we force the model to generate an image # This is equivalent to explicitly appending the [IMG] tokens to the input. return_outputs = model.generate_for_images_and_texts( input_data, num_words=2, gen_scale_factor=1e5, generator=g_cuda) # Save the generated image. generated_img = return_outputs[1]['gen'][0][0] with open(os.path.join(output_dir, f'{gt_image_id}.png'), 'wb') as f: generated_img.save(f) print("Saving to", os.path.join(output_dir, f'{gt_image_id}.png'))
gill-main
evals/generate_vist_images.py
import argparse import os import lpips import torchvision from tqdm import tqdm import numpy as np import ssl ssl._create_default_https_context = ssl._create_unverified_context parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-d0','--dir0', type=str, default='./imgs/ex_dir0') parser.add_argument('-d1','--dir1', type=str, default='./imgs/ex_dir1') parser.add_argument('-o','--out', type=str, default='./imgs/example_dists.txt') parser.add_argument('-v','--version', type=str, default='0.1') parser.add_argument('--use_gpu', action='store_true', help='turn on flag to use GPU') opt = parser.parse_args() ## Initializing the model loss_fn = lpips.LPIPS(net='alex',version=opt.version) if(opt.use_gpu): loss_fn.cuda() # crawl directories f = open(opt.out,'w') files = os.listdir(opt.dir0) all_scores = [] for file in tqdm(files, total=len(files)): if(os.path.exists(os.path.join(opt.dir1,file))): # Load images img0 = lpips.im2tensor(lpips.load_image(os.path.join(opt.dir0,file))) # RGB image from [-1,1] img1 = lpips.im2tensor(lpips.load_image(os.path.join(opt.dir1,file))) img0 = torchvision.transforms.functional.resize(img0, (256, 256), antialias=True) img1 = torchvision.transforms.functional.resize(img1, (256, 256), antialias=True) if(opt.use_gpu): img0 = img0.cuda() img1 = img1.cuda() # Compute distance dist01 = loss_fn.forward(img0,img1) all_scores.append(dist01.item()) #print('%s: %.3f'%(file,dist01)) f.writelines('%s: %.6f\n'%(file,dist01)) f.writelines(f'Average: {np.mean(all_scores)}') f.close()
gill-main
evals/lpips_2dirs.py
"""Uses GILL to generate images for VisDial dialogue sequences. Example usage: python generate_visdial_images.py gill_visdial_outputs """ from collections import namedtuple import json import os import pickle as pkl import sys from PIL import Image import torch from tqdm import tqdm from gill import models # Download the VisDial validation annotations (https://www.dropbox.com/s/ibs3a0zhw74zisc/visdial_1.0_val.zip?dl=0), # the dense answer annotations (https://www.dropbox.com/s/3knyk09ko4xekmc/visdial_1.0_val_dense_annotations.json?dl=0) # and the images (https://www.dropbox.com/s/twmtutniktom7tu/VisualDialog_val2018.zip?dl=0). # Extract everything to the `VisualDialog` folder. visdial_dir = 'VisualDialog/' if __name__ == '__main__': output_dir = sys.argv[1] os.makedirs(output_dir, exist_ok=True) print('Saving to', output_dir) model = models.load_gill('checkpoints/gill_opt/', load_ret_embs=False) g_cuda = torch.Generator(device='cuda').manual_seed(42) # Fix the random seed. # Load VisDial data. split = 'val' img_dir = os.path.join(visdial_dir, f'VisualDialog_{split}2018/') with open(os.path.join(visdial_dir, f'visdial_1.0_{split}.json'), 'r') as f: visdial_data = json.load(f) with open(os.path.join(visdial_dir, f'visdial_1.0_{split}_dense_annotations.json'), 'r') as f: dense_data = json.load(f) # Sanity check for VisDial dialogues. assert len(dense_data) == len(visdial_data['data']['dialogs']) for i in range(len(dense_data)): assert dense_data[i]['image_id'] == visdial_data['data']['dialogs'][i]['image_id'] questions = visdial_data['data']['questions'] answers = visdial_data['data']['answers'] dialogs = visdial_data['data']['dialogs'] for example_idx in tqdm(range(len(dialogs))): dialog = dialogs[example_idx] image_id = str(dialog['image_id']).rjust(12, '0') contexts = [] for i in range(len(dialog['dialog'])): contexts.append('Q: ' + questions[dialog['dialog'][i]['question']] + '?') contexts.append('A: ' + answers[dialog['dialog'][i]['answer']]) cond_caption = '\n'.join(contexts) # Print inputs for the first few examples. if example_idx < 3: print(cond_caption) return_outputs = model.generate_for_images_and_texts( [cond_caption], num_words=2, gen_scale_factor=1e5, generator=g_cuda) gen_img = return_outputs[1]['gen'][0][0] with open(os.path.join(output_dir, f'{image_id}.png'), 'wb') as wf: gen_img.save(wf)
gill-main
evals/generate_visdial_images.py
"""Runs Stable Diffusion v1.5 to generate images for PartiPrompts. Example usage: python scripts/generate_sd_p2_images.py data/PartiPromptsAllDecisions.tsv partiprompts_sd_v1.5_outputs """ import os import sys import numpy as np import torch from tqdm import tqdm from diffusers import StableDiffusionPipeline p2_fn = sys.argv[1] output_dir = sys.argv[2] batch_size = 16 if __name__ == "__main__": os.makedirs(output_dir, exist_ok=True) pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16) pipe = pipe.to("cuda") # NOTE: You may have to uncomment this line to override the safety checker # to avoid getting empty images for certain prompts. # pipe.safety_checker = lambda images, clip_input: (images, False) # Load PartiPrompts. with open(p2_fn, 'r') as f: captions = [] filenames = [] for i, line in enumerate(f.readlines()[1:]): data = line.strip().split('\t') captions.append(data[0]) filenames.append(f'{i}.png') g_cuda = torch.Generator(device='cuda').manual_seed(1337) # Generate images in batches. num_batches = int(np.ceil(len(filenames) / batch_size)) for i in tqdm(range(num_batches)): start_idx = i * batch_size end_idx = start_idx + batch_size images = pipe(captions[start_idx:end_idx], generator=g_cuda).images for j, fn in enumerate(filenames[start_idx:end_idx]): with open(os.path.join(output_dir, fn), 'wb') as wf: images[j].save(wf)
gill-main
scripts/generate_sd_p2_images.py
"""Prunes model weights to keep just the necessary trained weights. Example usage: python scripts/prune_model_ckpt.py runs/gill_exp """ import json import os import sys import torch if __name__ == '__main__': model_dir = sys.argv[1] with open(os.path.join(model_dir, 'ckpt_best.pth.tar'), 'rb') as f: checkpoint = torch.load(f) with open(os.path.join(model_dir, 'model_args.json'), 'rb') as f: model_args = json.load(f) del checkpoint['epoch'] del checkpoint['best_acc1'] del checkpoint['optimizer'] del checkpoint['scheduler'] state_dict = {} for k, v in checkpoint['state_dict'].items(): state_dict[k.replace('module.', '')] = v.detach().clone() checkpoint['state_dict'] = state_dict finetuned_tokens = checkpoint['state_dict']['model.input_embeddings.weight'][-model_args['num_gen_tokens']:, :].detach().clone() checkpoint['state_dict']['model.input_embeddings.weight'] = finetuned_tokens with open(os.path.join(model_dir, 'pretrained_ckpt.pth.tar'), 'wb') as f: torch.save(checkpoint, f)
gill-main
scripts/prune_model_ckpt.py
"""Preprocesses annotated PartiPrompts decisions to keep only those with high inter-annotator agreement. Example usage: python scripts/process_p2_annotations.py """ import collections if __name__ == "__main__": # Load the annotated PartiPrompts. id2decision = {} with open('data/PartiPromptsAllDecisions_Alignment.tsv', 'r') as f: outputs = f.readlines() for i in range(1, len(outputs)): votes = outputs[i].split('\t')[-1].strip().split(',') id2decision[i] = votes # # Filter Confident Examples # Set examples *without* high inter-annotator agreement to have the 'same' vote. id2vote = {} for p_id in id2decision: counts = collections.Counter(id2decision[p_id]) if (counts['gen'] >= 4 or counts['ret'] >= 4) or \ (counts['gen'] == 3 and counts['ret'] <= 1) or \ (counts['ret'] == 3 and counts['gen'] <= 1): id2vote[p_id] = counts.most_common(1)[0][0] else: id2vote[p_id] = 'same' print(collections.Counter(id2vote.values())) output_path = 'data/PartiPromptsDecisionsConfident.tsv' with open(output_path, 'w') as wf: wf.write(outputs[0].replace('\tDecisions\n', '\tDecision\n')) for i in range(1, len(outputs)): # Remove last column. curr_data = outputs[i].split('\t') curr_data = '\t'.join(curr_data[:-1]) # Add majority vote into the new column. curr_data += f'\t{id2vote[i]}' wf.write(curr_data + '\n') print('Saved to', output_path)
gill-main
scripts/process_p2_annotations.py
"""This script extracts text embeddings from the text encoder of Stable Diffusion for a given dataset of captions, and saves them to disk. The outputs are used in training GILL. Example usage: python scripts/preprocess_sd_embeddings.py datasets/cc3m_val.tsv data/cc3m/validation/clip_embs """ import numpy as np import os import sys from joblib import Parallel, delayed from tqdm import tqdm import torch # Load a slightly modified version of the Stable Diffusion pipeline. # This allows us to extract text embeddings directly (without generating images). from gill.custom_sd import StableDiffusionPipeline # Default arguments for running preprocessing. model_id = "runwayml/stable-diffusion-v1-5" batch_size = 128 input_captions_fp = sys.argv[1] # tab separated file of captions and image ids clip_output_dir = sys.argv[2] # output directory to save clip embeddings in os.makedirs(clip_output_dir, exist_ok=True) def save_to_path(emb, path): """Save embeddings to disk.""" try: with open(path, 'wb') as wf: np.save(wf, emb) except: print("Error with", path) return path if __name__ == '__main__': dtype = torch.float16 if torch.cuda.is_available() else torch.float32 pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=dtype) if not torch.cuda.is_available(): print('WARNING: using CPU, this will be slow!') else: pipe = pipe.to("cuda") # Get existing files, so that we don't recompute them. existing_files = set([f.strip('.npy') for f in os.listdir(clip_output_dir)]) # Load captions and associated image ids. with open(input_captions_fp, 'r') as f: data = f.readlines() examples = data[1:] captions = [] image_ids = [] for x in examples: d = x.strip().split('\t') if d[1] not in existing_files: captions.append(d[0]) image_ids.append(d[1]) assert len(captions) == len(image_ids) # Extract embeddings in batches. num_batches = int(np.ceil(len(captions) / batch_size)) for i in tqdm(range(num_batches)): start_idx = i * batch_size end_idx = start_idx + batch_size batch_captions = captions[start_idx:end_idx] batch_ids = image_ids[start_idx:end_idx] prompt_embeds = pipe(batch_captions, return_prompts_only=True).detach().cpu().numpy() # Save embeddings to disk in parallel. Parallel(n_jobs=8)(delayed(save_to_path)( prompt_embeds[j, :, ...], os.path.join(clip_output_dir, f'{batch_ids[j]}.npy') ) for j in range(prompt_embeds.shape[0]))
gill-main
scripts/preprocess_sd_embeddings.py
from setuptools import setup, find_packages setup( name = 'flash-attention-jax', packages = find_packages(exclude=[]), version = '0.2.0', license='MIT', description = 'Flash Attention - in Jax', author = 'Phil Wang', author_email = '[email protected]', long_description_content_type = 'text/markdown', url = 'https://github.com/lucidrains/flash-attention-jax', keywords = [ 'artificial intelligence', 'deep learning', 'transformers', 'attention mechanism', 'jax' ], install_requires=[ 'einops', 'jax>=0.2.20' ], classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Topic :: Scientific/Engineering :: Artificial Intelligence', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.6', ], )
flash-attention-jax-main
setup.py
import jax from jax import nn from jax import jit, numpy as jnp from jax.numpy import einsum from einops import rearrange EPSILON = 1e-10 MASK_VALUE = -1e10 COSINE_SIM_SCALE = 10 @jit def attention(q, k, v, key_mask): dim, k_len = q.shape[-1], k.shape[-2] scale = 1 / jnp.sqrt(dim) q = q * scale sim = einsum('... i d, ... j d -> ... i j', q, k) key_mask = rearrange(key_mask, 'b j -> b 1 1 j') sim = jnp.where(key_mask, sim, MASK_VALUE) attn = nn.softmax(sim, axis = -1) return attn @ v @jit def causal_attention(q, k, v): q_len, dim, k_len = *q.shape[-2:], k.shape[-2] scale = 1 / jnp.sqrt(dim) q = q * scale sim = einsum('... i d, ... j d -> ... i j', q, k) causal_mask = jnp.triu(jnp.ones((q_len, k_len)), k_len - q_len + 1) sim = jnp.where(causal_mask, MASK_VALUE, sim) attn = nn.softmax(sim, axis = -1) return einsum('... i j, ... j d -> ... i d', attn, v) # cosine sim attention @jit def l2norm(t): return t / (jnp.linalg.norm(t) + EPSILON) @jit def cosine_sim_attention(q, k, v, key_mask): dim, k_len = q.shape[-1], k.shape[-2] q, k = map(l2norm, (q, k)) sim = einsum('... i d, ... j d -> ... i j', q, k) * COSINE_SIM_SCALE key_mask = rearrange(key_mask, 'b j -> b 1 1 j') sim = jnp.where(key_mask, sim, MASK_VALUE) attn = nn.softmax(sim, axis = -1) return einsum('... i j, ... j d -> ... i d', attn, v)
flash-attention-jax-main
flash_attention_jax/attention.py
import math from functools import partial import jax from jax import lax, numpy as jnp, jit # constants HIGHEST_PRECISION = jax.lax.Precision.HIGHEST einsum = partial(jnp.einsum, precision = HIGHEST_PRECISION) # Figure 1 from https://arxiv.org/abs/2112.05682 # cleaned up def _query_chunk_attention(q, k, v, k_chunk_size = 4096): q_len, k_len, dim, v_dim = q.shape[-2], *k.shape, v.shape[-1] k_chunk_size = min(k_chunk_size, k_len) q = q / jnp.sqrt(dim) @partial(jax.checkpoint, prevent_cse = False) def summarize_chunk(q, k, v): attn_weights = einsum('qd, kd -> qk', q, k) max_score = jnp.max(attn_weights, axis = -1, keepdims = True) max_score = jax.lax.stop_gradient(max_score) exp_weights = jnp.exp(attn_weights - max_score) exp_values = einsum('vf, qv -> qf', v, exp_weights) return (exp_values, exp_weights.sum(axis = -1), max_score.reshape((q_len,))) def chunk_scanner(chunk_idx): k_chunk = lax.dynamic_slice(k, (chunk_idx, 0), slice_sizes=(k_chunk_size, dim)) v_chunk = lax.dynamic_slice(v, (chunk_idx, 0), slice_sizes=(k_chunk_size, v_dim)) return summarize_chunk(q, k_chunk, v_chunk) chunk_values, chunk_weights, chunk_max = jax.lax.map(chunk_scanner, xs = jnp.arange(0, k_len, k_chunk_size)) global_max = jnp.max(chunk_max, axis = 0, keepdims = True) max_diffs = jnp.exp(chunk_max - global_max) chunk_values *= jnp.expand_dims(max_diffs, axis=-1) chunk_weights *= max_diffs all_values = chunk_values.sum(axis = 0) all_weights = jnp.expand_dims(chunk_weights, -1).sum(axis = 0) return all_values / all_weights @jit def rabe_attention(q, k, v, q_chunk_size = 1024, k_chunk_size = 4096): q_len, dim, v_dim = *q.shape, v.shape[-1] def chunk_scanner(chunk_idx, _): q_chunk = lax.dynamic_slice(q, (chunk_idx, 0), slice_sizes = (min(q_chunk_size, q_len), dim)) return (chunk_idx + q_chunk_size, _query_chunk_attention(q_chunk, k, v, k_chunk_size = k_chunk_size)) _, res = jax.lax.scan(chunk_scanner, init = 0, xs = None, length = math.ceil(q_len / q_chunk_size)) return res.reshape(q_len, v_dim)
flash-attention-jax-main
flash_attention_jax/rabe_attention.py
from flash_attention_jax.flash_attention import flash_attention from flash_attention_jax.cosine_sim_flash_attention import cosine_sim_flash_attention from flash_attention_jax.causal_flash_attention import causal_flash_attention from flash_attention_jax.rabe_attention import rabe_attention from flash_attention_jax.attention import attention, causal_attention, cosine_sim_attention from flash_attention_jax.utils import value_and_grad_difference, PRNGKeyGenerator plain_attention = attention
flash-attention-jax-main
flash_attention_jax/__init__.py
import math import jax from functools import partial from jax import nn from jax import custom_vjp from jax import numpy as jnp, lax, jit # constants EPSILON = 1e-10 MASK_VALUE = -1e10 Q_CHUNK_SIZE = 1024 K_CHUNK_SIZE = 1024 COSINE_SIM_SCALE = 10 # this may need to be a function of log(sequence length), but 16 was sufficient for 2048 and 4096 in my tests # flash attention def _query_chunk_flash_attention(chunk_idx, q, k, v, key_mask): q_len, k_len, dim, v_dim = q.shape[-2], *k.shape, v.shape[-1] def chunk_scanner(carries, _): chunk_idx, out, row_sum = carries k_chunk_sizes = min(K_CHUNK_SIZE, k_len) k_chunk = lax.dynamic_slice(k, (chunk_idx, 0), slice_sizes=(k_chunk_sizes, dim)) v_chunk = lax.dynamic_slice(v, (chunk_idx, 0), slice_sizes=(k_chunk_sizes, v_dim)) key_mask_chunk = lax.dynamic_slice(key_mask, (chunk_idx,), slice_sizes=(k_chunk_sizes,)) attn_weights = (q @ k_chunk.transpose() * COSINE_SIM_SCALE) - COSINE_SIM_SCALE # the output of this will range from [-2 * scale, 0], and the row sums are now bounded by key/value sequence length - you can also shift this more if you wish to tailor the normalization constant (in the case of extreme sequence lengths) attn_weights = jnp.where(key_mask_chunk, attn_weights, MASK_VALUE) exp_weights = jnp.exp(attn_weights) exp_weights = jnp.where(key_mask_chunk, exp_weights, 0.) block_row_sum = jnp.sum(exp_weights, axis = -1, keepdims = True) exp_values = exp_weights @ v_chunk chunk_out = exp_values / k_len return (chunk_idx + k_chunk_sizes, out + chunk_out, row_sum + block_row_sum), None out = jnp.zeros((q_len, dim)) row_sum = jnp.zeros((q_len, 1)) (_, out, row_sum), _ = lax.scan(chunk_scanner, init = (0, out, row_sum), xs = None, length = math.ceil(k_len / K_CHUNK_SIZE)) out = out * (k_len / (row_sum + EPSILON)) # renormalize after acquiring all the correct row sums out = out.reshape(q_len, v_dim) row_sum = row_sum.reshape(q_len) return out, row_sum @jit def l2norm(t): return t / (jnp.linalg.norm(t) + EPSILON) @jit def cosine_sim_flash_attention(q, k, v, key_mask): q, k = map(l2norm, (q, k)) return cosine_sim_flash_attention_after_l2norm(q, k, v, key_mask) def _cosine_sim_flash_attention_after_l2norm(q, k, v, key_mask): q_len, dim, v_dim = *q.shape, v.shape[-1] def chunk_scanner(chunk_idx, _): chunk_sizes = min(Q_CHUNK_SIZE, q_len) q_chunk = lax.dynamic_slice(q, (chunk_idx, 0), slice_sizes = (chunk_sizes, dim)) return (chunk_idx + chunk_sizes, _query_chunk_flash_attention(chunk_idx, q_chunk, k, v, key_mask)) _, (out, row_sum) = lax.scan(chunk_scanner, init = 0, xs = None, length = math.ceil(q_len / Q_CHUNK_SIZE)) out = out.reshape(q_len, v_dim) row_sum = row_sum.reshape(q_len) return out, (row_sum,) @custom_vjp def cosine_sim_flash_attention_after_l2norm(q, k, v, key_mask): out, _ = _cosine_sim_flash_attention_after_l2norm(q, k, v, key_mask) return out @jit def flash_attention_forward(q, k, v, key_mask): out, (row_sum,) = _cosine_sim_flash_attention_after_l2norm(q, k, v, key_mask) return out, (q, k, v, key_mask, out, row_sum) def _query_chunk_flash_attention_backward(q, k, v, key_mask,o, do, l): q_len, dim, k_len, v_dim = *q.shape, *v.shape def chunk_scanner(carries, _): chunk_idx, dq = carries k_chunk_sizes = min(K_CHUNK_SIZE, k_len) k_chunk = lax.dynamic_slice(k, (chunk_idx, 0), slice_sizes=(k_chunk_sizes, dim)) v_chunk = lax.dynamic_slice(v, (chunk_idx, 0), slice_sizes=(k_chunk_sizes, v_dim)) key_mask_chunk = lax.dynamic_slice(key_mask, (chunk_idx,), slice_sizes=(k_chunk_sizes,)) attn_weights = q @ k_chunk.transpose() * COSINE_SIM_SCALE - COSINE_SIM_SCALE exp_attn_weights = jnp.exp(attn_weights) exp_attn_weights = jnp.where(key_mask_chunk, exp_attn_weights, 0.) p = exp_attn_weights / (l + EPSILON) dv_chunk = p.transpose() @ do dp = do @ v_chunk.transpose() D = jnp.sum(do * o, axis = -1, keepdims = True) ds = p * COSINE_SIM_SCALE * (dp - D) dq_chunk = ds @ k_chunk dk_chunk = ds.transpose() @ q return (chunk_idx + k_chunk_sizes, dq + dq_chunk), (dk_chunk, dv_chunk) dq = jnp.zeros_like(q) (_, dq), (dk, dv) = lax.scan(chunk_scanner, init = (0, dq), xs = None, length = math.ceil(k_len / K_CHUNK_SIZE)) dq = dq.reshape(q_len, dim) dk = dk.reshape(k_len, v_dim) dv = dv.reshape(k_len, v_dim) return dq, dk, dv @jit def flash_attention_backward(res, do): q, k, v, key_mask, o, l = res q_len, dim = q.shape dk = jnp.zeros_like(k) dv = jnp.zeros_like(v) l = l.reshape(q_len, 1) def chunk_scanner(carries, _): chunk_idx, dk, dv = carries chunk_sizes = min(Q_CHUNK_SIZE, q_len) q_chunk = lax.dynamic_slice(q, (chunk_idx, 0), slice_sizes = (chunk_sizes, q.shape[-1])) l_chunk = lax.dynamic_slice(l, (chunk_idx, 0), slice_sizes = (chunk_sizes, 1)) o_chunk = lax.dynamic_slice(o, (chunk_idx, 0), slice_sizes = (chunk_sizes, o.shape[-1])) do_chunk = lax.dynamic_slice(do, (chunk_idx, 0), slice_sizes = (chunk_sizes, do.shape[-1])) dq_chunk, dk_chunk, dv_chunk = _query_chunk_flash_attention_backward(q_chunk, k, v, key_mask, o_chunk, do_chunk, l_chunk) return (chunk_idx + chunk_sizes, dk + dk_chunk, dv + dv_chunk), dq_chunk (_, dk, dv), dq = lax.scan(chunk_scanner, init = (0, dk, dv), xs = None, length = math.ceil(q_len / Q_CHUNK_SIZE)) dq = dq.reshape(q_len, dim) return dq, dk, dv, None cosine_sim_flash_attention_after_l2norm.defvjp(flash_attention_forward, flash_attention_backward)
flash-attention-jax-main
flash_attention_jax/cosine_sim_flash_attention.py
import math import jax from functools import partial from jax import nn from jax import custom_vjp from jax import numpy as jnp, lax, jit # constants EPSILON = 1e-10 MASK_VALUE = -1e10 Q_CHUNK_SIZE = 1024 K_CHUNK_SIZE = 1024 # flash attention def _query_chunk_flash_attention(q_range_chunk, k_range, q, k, v): q_len, k_len, dim, v_dim = q.shape[-2], *k.shape, v.shape[-1] scale = 1 / jnp.sqrt(dim) q_scaled = q * scale def chunk_scanner(carries, _): key_chunk_idx, out, row_sum, row_max = carries k_chunk_sizes = min(K_CHUNK_SIZE, k_len) k_chunk = lax.dynamic_slice(k, (key_chunk_idx, 0), slice_sizes=(k_chunk_sizes, dim)) v_chunk = lax.dynamic_slice(v, (key_chunk_idx, 0), slice_sizes=(k_chunk_sizes, v_dim)) k_range_chunk = lax.dynamic_slice(k_range, (0, key_chunk_idx), slice_sizes=(1, k_chunk_sizes)) causal_mask = q_range_chunk < k_range_chunk attn_weights = q_scaled @ k_chunk.transpose() attn_weights = jnp.where(causal_mask, MASK_VALUE, attn_weights) block_row_max = jnp.max(attn_weights, axis = -1, keepdims = True) exp_weights = jnp.exp(attn_weights - block_row_max) exp_weights = jnp.where(causal_mask, 0., exp_weights) block_row_sum = jnp.sum(exp_weights, axis = -1, keepdims = True) + EPSILON exp_values = exp_weights @ v_chunk new_row_max = jnp.maximum(block_row_max, row_max) exp_row_max_diff = jnp.exp(row_max - new_row_max) exp_block_row_max_diff = jnp.exp(block_row_max - new_row_max) new_row_sum = exp_row_max_diff * row_sum + exp_block_row_max_diff * block_row_sum out = (row_sum / new_row_sum) * exp_row_max_diff * out + \ (exp_block_row_max_diff / new_row_sum) * exp_values return (key_chunk_idx + k_chunk_sizes, out, new_row_sum, new_row_max), None out = jnp.zeros((q_len, dim)) row_sum = jnp.zeros((q_len, 1)) row_max = jnp.ones((q_len, 1)) * -1e6 (_, out, row_sum, row_max), _ = lax.scan(chunk_scanner, init = (0, out, row_sum, row_max), xs = None, length = math.ceil(k_len / K_CHUNK_SIZE)) out = out.reshape(q_len, v_dim) row_sum = row_sum.reshape(q_len) row_max = row_max.reshape(q_len) return out, row_sum, row_max def _causal_flash_attention(q, k, v): q_len, dim, k_len, v_dim = *q.shape, *v.shape q_range = jnp.arange(q_len).reshape(q_len, 1) + (k_len - q_len) k_range = jnp.arange(k_len).reshape(1, k_len) def chunk_scanner(chunk_idx, _): chunk_sizes = min(Q_CHUNK_SIZE, q_len) q_chunk = lax.dynamic_slice(q, (chunk_idx, 0), slice_sizes = (chunk_sizes, dim)) q_range_chunk = lax.dynamic_slice(q_range, (chunk_idx, 0), slice_sizes = (chunk_sizes, 1)) return (chunk_idx + chunk_sizes, _query_chunk_flash_attention(q_range_chunk, k_range, q_chunk, k, v)) _, (out, row_sum, row_max) = lax.scan(chunk_scanner, init = 0, xs = None, length = math.ceil(q_len / Q_CHUNK_SIZE)) out = out.reshape(q_len, v_dim) row_sum = row_sum.reshape(q_len) row_max = row_max.reshape(q_len) return out, (row_sum, row_max) @custom_vjp def causal_flash_attention(q, k, v): out, _ = _causal_flash_attention(q, k, v) return out @jit def flash_attention_forward(q, k, v): out, (row_sum, row_max) = _causal_flash_attention(q, k, v) return out, (q, k, v, out, row_sum, row_max) def _query_chunk_flash_attention_backward(query_range_chunk, key_range, q, k, v, o, do, l, m): q_len, dim, k_len, v_dim = *q.shape, *v.shape scale = 1 / jnp.sqrt(dim) q_scaled = q * scale def chunk_scanner(carries, _): key_chunk_idx, dq = carries k_chunk_sizes = min(K_CHUNK_SIZE, k_len) k_chunk = lax.dynamic_slice(k, (key_chunk_idx, 0), slice_sizes=(k_chunk_sizes, dim)) v_chunk = lax.dynamic_slice(v, (key_chunk_idx, 0), slice_sizes=(k_chunk_sizes, v_dim)) key_range_chunk = lax.dynamic_slice(key_range, (0, key_chunk_idx), slice_sizes=(1, k_chunk_sizes)) causal_mask = query_range_chunk < key_range_chunk attn_weights = q_scaled @ k_chunk.transpose() attn_weights = jnp.where(causal_mask, MASK_VALUE, attn_weights) exp_attn_weights = jnp.exp(attn_weights - m) exp_attn_weights = jnp.where(causal_mask, 0., exp_attn_weights) p = exp_attn_weights / l dv_chunk = p.transpose() @ do dp = do @ v_chunk.transpose() D = jnp.sum(do * o, axis = -1, keepdims = True) ds = p * scale * (dp - D) dq_chunk = ds @ k_chunk dk_chunk = ds.transpose() @ q return (key_chunk_idx + k_chunk_sizes, dq + dq_chunk), (dk_chunk, dv_chunk) dq = jnp.zeros_like(q) (_, dq), (dk, dv) = lax.scan(chunk_scanner, init = (0, dq), xs = None, length = math.ceil(k_len / K_CHUNK_SIZE)) dq = dq.reshape(q_len, dim) dk = dk.reshape(k_len, v_dim) dv = dv.reshape(k_len, v_dim) return dq, dk, dv @jit def flash_attention_backward(res, do): q, k, v, o, l, m = res q_len, dim, k_len, v_dim = *q.shape, *v.shape dk = jnp.zeros_like(k) dv = jnp.zeros_like(v) m = m.reshape(q_len, 1) l = l.reshape(q_len, 1) q_range = jnp.arange(q_len).reshape(q_len, 1) + (k_len - q_len) k_range = jnp.arange(k_len).reshape(1, k_len) def chunk_scanner(carries, _): chunk_idx, dk, dv = carries chunk_sizes = min(Q_CHUNK_SIZE, q_len) q_chunk = lax.dynamic_slice(q, (chunk_idx, 0), slice_sizes = (chunk_sizes, q.shape[-1])) q_range_chunk = lax.dynamic_slice(q_range, (chunk_idx, 0), slice_sizes = (chunk_sizes, 1)) m_chunk = lax.dynamic_slice(m, (chunk_idx, 0), slice_sizes = (chunk_sizes, 1)) l_chunk = lax.dynamic_slice(l, (chunk_idx, 0), slice_sizes = (chunk_sizes, 1)) o_chunk = lax.dynamic_slice(o, (chunk_idx, 0), slice_sizes = (chunk_sizes, o.shape[-1])) do_chunk = lax.dynamic_slice(do, (chunk_idx, 0), slice_sizes = (chunk_sizes, do.shape[-1])) dq_chunk, dk_chunk, dv_chunk = _query_chunk_flash_attention_backward(q_range_chunk, k_range, q_chunk, k, v, o_chunk, do_chunk, l_chunk, m_chunk) return (chunk_idx + chunk_sizes, dk + dk_chunk, dv + dv_chunk), dq_chunk (_, dk, dv), dq = lax.scan(chunk_scanner, init = (0, dk, dv), xs = None, length = math.ceil(q_len / Q_CHUNK_SIZE)) dq = dq.reshape(q_len, dim) return dq, dk, dv causal_flash_attention.defvjp(flash_attention_forward, flash_attention_backward)
flash-attention-jax-main
flash_attention_jax/causal_flash_attention.py
import jax from functools import partial import jax.numpy as jnp from jax import random from jax import value_and_grad def value_and_grad_wrapper(fn, **kwargs): @partial(value_and_grad, **kwargs) def inner(*args, **kwargs): return jnp.sum(fn(*args, **kwargs)) return inner def diff(t1, t2): return jnp.max(jnp.abs(t1 - t2)) def PRNGKeyGenerator(seed = 42): key = random.PRNGKey(seed) while True: sub_key, key = random.split(key) yield sub_key def value_and_grad_difference( fn1, fn2, seed = 42, batch = 2, heads = 4, q_seq_len = 4096, k_seq_len = 8192, add_key_mask = True, dim = 512 ): key_gen = PRNGKeyGenerator(seed) q = random.normal(next(key_gen), (batch, heads, q_seq_len, dim)) k = random.normal(next(key_gen), (batch, heads, k_seq_len, dim)) v = random.normal(next(key_gen), (batch, heads, k_seq_len, dim)) key_mask = random.randint(next(key_gen), (batch, k_seq_len), 0, 2) == 1 fn1_value_and_grad, fn2_value_and_grad = map(partial(value_and_grad_wrapper, argnums = (0, 1, 2)), (fn1, fn2)) args = (q, k, v) if add_key_mask: args = (*args, key_mask) o1, grads1 = fn1_value_and_grad(*args) o2, grads2 = fn2_value_and_grad(*args) return diff(o1, o2), [diff(*args) for args in zip(grads1, grads2)]
flash-attention-jax-main
flash_attention_jax/utils.py
import math import jax from functools import partial from jax import nn from jax import custom_vjp from jax import numpy as jnp, lax, jit from jax.numpy import einsum from einops import rearrange # constants EPSILON = 1e-10 MASK_VALUE = -1e10 Q_CHUNK_SIZE = 1024 K_CHUNK_SIZE = 1024 # flash attention def _query_chunk_flash_attention(chunk_idx, q, k, v, key_mask): q_len, batch, heads, dim, k_len, v_dim = *q.shape, k.shape[0], v.shape[-1] scale = 1 / jnp.sqrt(dim) q_scaled = q * scale def chunk_scanner(carries, _): chunk_idx, out, row_sum, row_max = carries k_chunk_sizes = min(K_CHUNK_SIZE, k_len) k_chunk = lax.dynamic_slice(k, (chunk_idx, 0, 0, 0), slice_sizes=(k_chunk_sizes, batch, heads, dim)) v_chunk = lax.dynamic_slice(v, (chunk_idx, 0, 0, 0), slice_sizes=(k_chunk_sizes, batch, heads, v_dim)) key_mask_chunk = lax.dynamic_slice(key_mask, (chunk_idx, 0), slice_sizes=(k_chunk_sizes, batch)) attn_weights = einsum('i ... d, j ... d -> i ... j', q_scaled, k_chunk) key_mask_chunk = rearrange(key_mask_chunk, 'j b -> 1 b 1 j') attn_weights = jnp.where(key_mask_chunk, attn_weights, MASK_VALUE) block_row_max = jnp.max(attn_weights, axis = -1, keepdims = True) exp_weights = jnp.exp(attn_weights - block_row_max) exp_weights = jnp.where(key_mask_chunk, exp_weights, 0.) block_row_sum = jnp.sum(exp_weights, axis = -1, keepdims = True) + EPSILON exp_values = einsum('i ... j, j ... d -> i ... d', exp_weights, v_chunk) new_row_max = jnp.maximum(block_row_max, row_max) exp_row_max_diff = jnp.exp(row_max - new_row_max) exp_block_row_max_diff = jnp.exp(block_row_max - new_row_max) new_row_sum = exp_row_max_diff * row_sum + exp_block_row_max_diff * block_row_sum out = (row_sum / new_row_sum) * exp_row_max_diff * out + \ (exp_block_row_max_diff / new_row_sum) * exp_values return (chunk_idx + k_chunk_sizes, out, new_row_sum, new_row_max), None out = jnp.zeros((q_len, batch, heads, dim)) row_sum = jnp.zeros((q_len, batch, heads, 1)) row_max = jnp.ones((q_len, batch, heads, 1)) * -1e6 (_, out, row_sum, row_max), _ = lax.scan(chunk_scanner, init = (0, out, row_sum, row_max), xs = None, length = math.ceil(k_len / K_CHUNK_SIZE)) row_sum = rearrange(row_sum, 'n ... 1 -> n ...') row_max = rearrange(row_max, 'n ... 1 -> n ...') lse = jnp.log(row_sum) + row_max return out, lse def _flash_attention(q, k, v, key_mask): batch, heads, q_len, dim, v_dim = *q.shape, v.shape[-1] def chunk_scanner(chunk_idx, _): chunk_sizes = min(Q_CHUNK_SIZE, q_len) q_chunk = lax.dynamic_slice(q, (chunk_idx, 0, 0, 0), slice_sizes = (chunk_sizes, batch, heads, dim)) return (chunk_idx + chunk_sizes, _query_chunk_flash_attention(chunk_idx, q_chunk, k, v, key_mask)) q, k, v = map(lambda t: rearrange(t, 'b h n d -> n b h d'), (q, k, v)) key_mask = rearrange(key_mask, 'b j -> j b') _, (out, lse) = lax.scan(chunk_scanner, init = 0, xs = None, length = math.ceil(q_len / Q_CHUNK_SIZE)) out = rearrange(out, 'c n b h d -> b h (c n) d') lse = rearrange(lse, 'c n b h -> b h (c n)') return out, lse @custom_vjp @jit def flash_attention(q, k, v, key_mask): out, _ = _flash_attention(q, k, v, key_mask) return out @jit def flash_attention_forward(q, k, v, key_mask): out, lse = _flash_attention(q, k, v, key_mask) return out, (q, k, v, key_mask, out, lse) def _query_chunk_flash_attention_backward(q, k, v, key_mask, o, do, lse): q_len, batch, heads, dim, k_len, v_dim = *q.shape, v.shape[0], v.shape[-1] scale = 1 / jnp.sqrt(dim) q_scaled = q * scale def chunk_scanner(carries, _): chunk_idx, dq = carries k_chunk_sizes = min(K_CHUNK_SIZE, k_len) k_chunk = lax.dynamic_slice(k, (chunk_idx, batch, heads, 0), slice_sizes=(k_chunk_sizes, batch, heads, dim)) v_chunk = lax.dynamic_slice(v, (chunk_idx, batch, heads, 0), slice_sizes=(k_chunk_sizes, batch, heads, v_dim)) key_mask_chunk = lax.dynamic_slice(key_mask, (chunk_idx, batch), slice_sizes=(k_chunk_sizes, batch)) attn_weights = einsum('i ... d, j ... d -> i ... j', q_scaled, k_chunk) p = jnp.exp(attn_weights - lse) key_mask_chunk = rearrange(key_mask_chunk, 'j b -> 1 b 1 j') p = jnp.where(key_mask_chunk, p, 0.) dv_chunk = einsum('i ... j, i ... d -> j ... d', p, do) dp = einsum('i ... d, j ... d -> i ... j', do, v_chunk) D = jnp.sum(do * o, axis = -1, keepdims = True) ds = p * scale * (dp - D) dq_chunk = einsum('i ... j, j ... d -> i ... d', ds, k_chunk) dk_chunk = einsum('i ... j, i ... d -> j ... d', ds, q) return (chunk_idx + k_chunk_sizes, dq + dq_chunk), (dk_chunk, dv_chunk) dq = jnp.zeros_like(q) (_, dq), (dk, dv) = lax.scan(chunk_scanner, init = (0, dq), xs = None, length = math.ceil(k_len / K_CHUNK_SIZE)) dk = rearrange(dk, 'c n ... -> (c n) ...') dv = rearrange(dv, 'c n ... -> (c n) ...') return dq, dk, dv @jit def flash_attention_backward(res, do): q, k, v, key_mask, o, lse = res batch, heads, q_len, dim = q.shape lse = rearrange(lse, 'b h n -> n b h 1') q, k, v, o, do = map(lambda t: rearrange(t, 'b h n d -> n b h d'), (q, k, v, o, do)) key_mask = rearrange(key_mask, 'b j -> j b') dk = jnp.zeros_like(k) dv = jnp.zeros_like(v) def chunk_scanner(carries, _): chunk_idx, dk, dv = carries chunk_sizes = min(Q_CHUNK_SIZE, q_len) q_chunk = lax.dynamic_slice(q, (chunk_idx, batch, heads, 0), slice_sizes = (chunk_sizes, batch, heads, q.shape[-1])) lse_chunk = lax.dynamic_slice(lse, (chunk_idx, batch, heads, 0), slice_sizes = (chunk_sizes, batch, heads, 1)) o_chunk = lax.dynamic_slice(o, (chunk_idx, batch, heads, 0), slice_sizes = (chunk_sizes, batch, heads, o.shape[-1])) do_chunk = lax.dynamic_slice(do, (chunk_idx, batch, heads, 0), slice_sizes = (chunk_sizes, batch, heads, do.shape[-1])) dq_chunk, dk_chunk, dv_chunk = _query_chunk_flash_attention_backward(q_chunk, k, v, key_mask, o_chunk, do_chunk, lse_chunk) return (chunk_idx + chunk_sizes, dk + dk_chunk, dv + dv_chunk), dq_chunk (_, dk, dv), dq = lax.scan(chunk_scanner, init = (0, dk, dv), xs = None, length = math.ceil(q_len / Q_CHUNK_SIZE)) dq = rearrange(dq, 'c n b h d -> b h (c n) d') dk, dv = map(lambda t: rearrange(t, 'n b h d -> b h n d'), (dk, dv)) return dq, dk, dv, None flash_attention.defvjp(flash_attention_forward, flash_attention_backward)
flash-attention-jax-main
flash_attention_jax/flash_attention.py
Shuurai-main
example.py
Shuurai-main
shuurai/__init__.py
import torch from kosmos.model import Kosmos2 #usage img = torch.randn(1, 3, 256, 256) text = torch.randint(0, 20000, (1, 4096)) model = Kosmos2() output = model(img, text)
Kosmos-2-main
example.py
from kosmos.model import Kosmos2, Kosmos2Tokenizer
Kosmos-2-main
kosmos/__init__.py
import logging import torch from transformers import AutoTokenizer, CLIPProcessor from kosmos.transformer import Decoder, Transformer, ViTransformerWrapper, Encoder logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s') # Implement classes with type hints and error handling class Kosmos2Tokenizer: """ A tokenizer class for the kosmos model Attributes: processor(CLIPProcessor): The processor to tokenize images tokenizer: (AutoTokenizer): The tokenizer to tokenize text im_idx: (int): The Index of the "" token. """ def __init__(self): try: self.processor = CLIPProcessor.from_pretrained("laion/CLIP-ViT-L-14-laion2B-s32B-b82K") self.tokenizer = AutoTokenizer.from_pretrained( "EleutherAI/gpt-neox-20b", additional_special_tokens=[""], eos_token="<eos>", pad_token="<pad>", extra_ids=0, model_max_length=8192 ) except Exception as e: logging.error(f"Failed to initialize KosmosTokenizer: {e}") raise self.im_idx, self.im_end_idx = self.tokenizer.convert_tokens_to_ids([""]) def tokenize_texts(self, texts: str): try: texts = self.tokenizer(texts, return_tensors="pt", padding=True, truncation=True).input_ids # Add image tokens to text as "<s>  text </s>" image_tokens = torch.tensor([[self.im_idx, self.im_end_idx]] * texts.shape[0]) return torch.cat([texts[:, 0:1], image_tokens, texts[:, 1:]], dim=1), texts except Exception as e: logging.error(f"Failed to tokenize texts: {e}") raise def tokenize_images(self, images): try: return self.processor(images=images, return_tensors="pt").pixel_values except Exception as e: logging.error(f"Failed to tokenize images: {e}") raise def tokenize(self, sample): try: text_tokens, only_text_tokens = self.tokenize_texts(sample["target_text"]) attention_mask = text_tokens != self.tokenizer.pad_token_id dummy_image_features = torch.ones((text_tokens.shape[0], 64)) attention_mask = torch.cat([dummy_image_features, attention_mask], dim=1) return { "text_tokens": text_tokens, "images": self.tokenize_images(sample["image"]), "labels": only_text_tokens, "attention_mask": attention_mask, } except Exception as e: logging.error(f"Failed to tokenize sample: {e}") raise class Kosmos2(torch.nn.Module): def __init__(self, image_size=256, patch_size=32, encoder_dim=512, encoder_depth=6, encoder_heads=8, num_tokens=20000, max_seq_len=1024, decoder_dim=512, decoder_depth=6, decoder_heads=8, alibi_num_heads=4, use_abs_pos_emb=False, cross_attend=True, alibi_pos_bias=True, rotary_xpos=True, attn_flash=True, qk_norm=True): super(Kosmos2, self).__init__() self.encoder = ViTransformerWrapper( image_size=image_size, patch_size=patch_size, attn_layers=Encoder( dim=encoder_dim, depth=encoder_depth, heads=encoder_heads ) ) self.decoder = Transformer( num_tokens=num_tokens, max_seq_len=max_seq_len, use_abs_pos_emb=use_abs_pos_emb, attn_layers=Decoder( dim=decoder_dim, depth=decoder_depth, heads=decoder_heads, cross_attend=cross_attend, alibi_pos_bias=alibi_pos_bias, alibi_num_heads=alibi_num_heads, rotary_xpos=rotary_xpos, attn_flash=attn_flash, qk_norm=qk_norm, ) ) def forward(self, img, text): try: encoded = self.encoder(img, return_embeddings=True) return self.decoder(text, context=encoded) except Exception as error: print(f"Failed in forward method: {error}") raise
Kosmos-2-main
kosmos/model.py
from functools import partial from typing import Optional import torch from torch import nn, einsum, Tensor import torch.nn.functional as F from collections import namedtuple from functools import wraps from packaging import version from dataclasses import dataclass from einops import rearrange # constants EfficientAttentionConfig = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient']) @dataclass class Intermediates: qk_similarities: Optional[Tensor] = None pre_softmax_attn: Optional[Tensor] = None post_softmax_attn: Optional[Tensor] = None def to_tuple(self): return (self.qk_similarities, self.pre_softmax_attn, self.post_softmax_attn) # helpers def exists(val): return val is not None def default(val, d): return val if exists(val) else d def compact(arr): return [*filter(exists, arr)] def once(fn): called = False @wraps(fn) def inner(x): nonlocal called if called: return called = True return fn(x) return inner print_once = once(print) # functions for creating causal mask # need a special one for onnx cpu (no support for .triu) def create_causal_mask(i, j, device): return torch.ones((i, j), device = device, dtype = torch.bool).triu(j - i + 1) def onnx_create_causal_mask(i, j, device): r = torch.arange(i, device = device) causal_mask = rearrange(r, 'i -> i 1') < rearrange(r, 'j -> 1 j') causal_mask = F.pad(causal_mask, (j - i, 0), value = False) return causal_mask # main class class Attend(nn.Module): def __init__( self, *, dropout = 0., causal = False, heads = None, talking_heads = False, sparse_topk = None, scale = None, qk_norm = False, flash = False, add_zero_kv = False, onnxable = False ): super().__init__() self.scale = scale self.qk_norm = qk_norm self.causal = causal self.create_causal_mask = onnx_create_causal_mask if onnxable else create_causal_mask self.attn_fn = partial(F.softmax, dtype = torch.float32) if not qk_norm else F.softmax self.dropout = dropout self.attn_dropout = nn.Dropout(dropout) # talking heads assert not (flash and talking_heads), 'talking heads not compatible with flash attention' self.talking_heads = talking_heads if talking_heads: self.pre_softmax_talking_heads = nn.Conv2d(heads, heads, 1, bias = False) self.post_softmax_talking_heads = nn.Conv2d(heads, heads, 1, bias = False) # sparse topk assert not (flash and sparse_topk), 'sparse topk not compatible with flash attention' self.sparse_topk = sparse_topk # add a key / value token composed of zeros # in case this helps controlling outliers, proposed by https://www.evanmiller.org/attention-is-off-by-one.html self.add_zero_kv = add_zero_kv # flash attention self.flash = flash assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above' # determine efficient attention configs for cuda and cpu self.cpu_config = EfficientAttentionConfig(True, True, True) self.cuda_config = None if not torch.cuda.is_available() or not flash: return device_properties = torch.cuda.get_device_properties(torch.device('cuda')) if device_properties.major == 8 and device_properties.minor == 0: print_once('A100 GPU detected, using flash attention if input tensor is on cuda') self.cuda_config = EfficientAttentionConfig(True, False, False) else: print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda') self.cuda_config = EfficientAttentionConfig(False, True, True) def flash_attn( self, q, k, v, mask = None, attn_bias = None ): batch, heads, q_len, _, k_len, is_cuda, device = *q.shape, k.shape[-2], q.is_cuda, q.device # Recommended for multi-query single-key-value attention by Tri Dao # kv shape torch.Size([1, 512, 64]) -> torch.Size([1, 8, 512, 64]) if k.ndim == 3: k = rearrange(k, 'b ... -> b 1 ...').expand_as(q) if v.ndim == 3: v = rearrange(v, 'b ... -> b 1 ...').expand_as(q) # handle scale - by default they scale by dim_head ** -0.5, but need to take care if using cosine sim attention if self.qk_norm: default_scale = q.shape[-1] ** -0.5 q = q * (default_scale / self.scale) # Check if mask exists and expand to compatible shape # The mask is B L, so it would have to be expanded to B H N L causal = self.causal if exists(mask): assert mask.ndim == 4 mask = mask.expand(batch, heads, q_len, k_len) # manually handle causal mask, if another mask was given if causal: causal_mask = self.create_causal_mask(q_len, k_len, device = device) mask = mask & ~causal_mask causal = False # handle alibi positional bias # convert from bool to float if exists(attn_bias): attn_bias = rearrange(attn_bias, 'h i j -> 1 h i j').expand(batch, heads, -1, -1) # if mask given, the mask would already contain the causal mask from above logic # otherwise, if no mask given but still causal, mask out alibi positional bias to a large negative number mask_value = -torch.finfo(q.dtype).max if exists(mask): attn_bias = attn_bias.masked_fill(~mask, mask_value // 2) elif causal: causal_mask = self.create_causal_mask(q_len, k_len, device = device) attn_bias = attn_bias.masked_fill(causal_mask, mask_value // 2) causal = False # scaled_dot_product_attention handles attn_mask either as bool or additive bias # make it an additive bias here mask = attn_bias # Check if there is a compatible device for flash attention config = self.cuda_config if is_cuda else self.cpu_config # pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale with torch.backends.cuda.sdp_kernel(**config._asdict()): out = F.scaled_dot_product_attention( q, k, v, attn_mask = mask, dropout_p = self.dropout if self.training else 0., is_causal = causal ) return out, Intermediates() def forward( self, q, k, v, mask = None, attn_bias = None, prev_attn = None ): """ einstein notation b - batch h - heads n, i, j - sequence length (base sequence length, source, target) d - feature dimension """ n, device = q.shape[-2], q.device scale = default(self.scale, q.shape[-1] ** -0.5) if self.add_zero_kv: k, v = map(lambda t: F.pad(t, (0, 0, 1, 0), value = 0.), (k, v)) if exists(mask): mask = F.pad(mask, (1, 0), value = True) if exists(attn_bias): attn_bias = F.pad(attn_bias, (1, 0), value = 0.) if self.flash: assert not exists(prev_attn), 'residual attention not compatible with flash attention' return self.flash_attn(q, k, v, mask = mask, attn_bias = attn_bias) kv_einsum_eq = 'b j d' if k.ndim == 3 else 'b h j d' dots = einsum(f'b h i d, {kv_einsum_eq} -> b h i j', q, k) * scale if exists(prev_attn): dots = dots + prev_attn qk_similarities = dots.clone() if self.talking_heads: dots = self.pre_softmax_talking_heads(dots) if exists(attn_bias): dots = dots + attn_bias i, j, dtype = *dots.shape[-2:], dots.dtype mask_value = -torch.finfo(dots.dtype).max if exists(self.sparse_topk) and self.sparse_topk < j: top_values, _ = dots.topk(self.sparse_topk, dim = -1) sparse_topk_mask = dots < top_values[..., -1:] mask = (mask & sparse_topk_mask) if exists(mask) else sparse_topk_mask if exists(mask): dots = dots.masked_fill(~mask, mask_value) if self.causal: causal_mask = self.create_causal_mask(i, j, device = device) dots = dots.masked_fill(causal_mask, mask_value) pre_softmax_attn = dots.clone() attn = self.attn_fn(dots, dim = -1) attn = attn.type(dtype) post_softmax_attn = attn.clone() attn = self.attn_dropout(attn) if self.talking_heads: attn = self.post_softmax_talking_heads(attn) out = einsum(f'b h i j, {kv_einsum_eq} -> b h i d', attn, v) intermediates = Intermediates( qk_similarities = qk_similarities, pre_softmax_attn = pre_softmax_attn, post_softmax_attn = post_softmax_attn ) return out, intermediates # cascading heads logic def to_single_heads(t, dim = 1): heads = t.unbind(dim = dim) return tuple(head.unsqueeze(dim) for head in heads) class CascadingHeads(nn.Module): def __init__(self, attend: Attend): super().__init__() self.attend = attend def forward( self, q, k, v, mask = None, attn_bias = None, prev_attn = None ): assert q.shape[-1] == v.shape[-1], 'cascading heads can only be done if query / key and value head dimensions are the same' # split inputs into per-head inputs heads = q.shape[1] queries = to_single_heads(q) keys = to_single_heads(k) if k.ndim == 4 else ((k,) * heads) values = to_single_heads(v) if v.ndim == 4 else ((v,) * heads) mask = (mask,) * heads attn_bias = to_single_heads(attn_bias, dim = 0) if exists(attn_bias) else ((None,) * heads) prev_attn = to_single_heads(prev_attn) if exists(prev_attn) else ((None,) * heads) # now loop through each head, without output of previous head summed with the next head # thus cascading all_outs = [] all_intermediates = [] prev_head_out = None for h_q, h_k, h_v, h_mask, h_attn_bias, h_prev_attn in zip(queries, keys, values, mask, attn_bias, prev_attn): if exists(prev_head_out): h_q = h_q + prev_head_out out, intermediates = self.attend( h_q, h_k, h_v, mask = h_mask, attn_bias = h_attn_bias, prev_attn = h_prev_attn ) prev_head_out = out all_outs.append(out) all_intermediates.append(intermediates) # cat all output heads all_outs = torch.cat(all_outs, dim = 1) # cat all intermediates, if they exist qk_similarities, pre_softmax_attn, post_softmax_attn = zip(*map(lambda i: i.to_tuple(), all_intermediates)) qk_similarities, pre_softmax_attn, post_softmax_attn = map(compact, (qk_similarities, pre_softmax_attn, post_softmax_attn)) aggregated_intermediates = Intermediates( qk_similarities = torch.cat(qk_similarities, dim = 1) if len(qk_similarities) > 0 else None, pre_softmax_attn = torch.cat(pre_softmax_attn, dim = 1) if len(pre_softmax_attn) > 0 else None, post_softmax_attn = torch.cat(post_softmax_attn, dim = 1) if len(post_softmax_attn) > 0 else None ) return all_outs, aggregated_intermediates
Kosmos-2-main
kosmos/attend.py
import math from dataclasses import dataclass from functools import partial, wraps from inspect import isfunction # constants from math import ceil from random import random from typing import Callable, List, Optional import torch import torch.nn.functional as F from einops import pack, rearrange, reduce, repeat, unpack from torch import Tensor, einsum, nn from kosmos.attend import Attend, Intermediates def exists(val): return val is not None def eval_decorator(fn): def inner(self, *args, **kwargs): was_training = self.training self.eval() out = fn(self, *args, **kwargs) self.train(was_training) return out return inner # nucleus def top_p(logits, thres = 0.9): sorted_logits, sorted_indices = torch.sort(logits, descending=True) cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) sorted_indices_to_remove = cum_probs > (1 - thres) sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone() sorted_indices_to_remove[:, 0] = 0 sorted_logits[sorted_indices_to_remove] = float('-inf') return sorted_logits.scatter(1, sorted_indices, sorted_logits) # topk def top_k(logits, thres = 0.9): k = ceil((1 - thres) * logits.shape[-1]) val, ind = torch.topk(logits, k) probs = torch.full_like(logits, float('-inf')) probs.scatter_(1, ind, val) return probs # top_a def top_a(logits, min_p_pow=2.0, min_p_ratio=0.02): probs = F.softmax(logits, dim=-1) limit = torch.pow(torch.max(probs), min_p_pow) * min_p_ratio logits[probs < limit] = float('-inf') logits[probs >= limit] = 1 return logits # autoregressive wrapper class class AutoregressiveWrapper(nn.Module): def __init__( self, net, ignore_index = -100, pad_value = 0, mask_prob = 0. ): super().__init__() self.pad_value = pad_value self.ignore_index = ignore_index self.net = net self.max_seq_len = net.max_seq_len # paper shows masking (MLM) in conjunction with autoregressive decoder-only training leads to big improvements https://arxiv.org/abs/2210.13432 assert mask_prob < 1. self.mask_prob = mask_prob @torch.no_grad() @eval_decorator def generate( self, start_tokens, seq_len, eos_token = None, temperature = 1., filter_logits_fn = top_k, filter_thres = 0.9, min_p_pow = 2.0, min_p_ratio = 0.02, **kwargs ): start_tokens, ps = pack([start_tokens], '* n') b, t = start_tokens.shape out = start_tokens for _ in range(seq_len): x = out[:, -self.max_seq_len:] logits = self.net(x, **kwargs)[:, -1] if filter_logits_fn in {top_k, top_p}: filtered_logits = filter_logits_fn(logits, thres = filter_thres) probs = F.softmax(filtered_logits / temperature, dim=-1) elif filter_logits_fn is top_a: filtered_logits = filter_logits_fn(logits, min_p_pow = min_p_pow, min_p_ratio= min_p_ratio) probs = F.softmax(filtered_logits / temperature, dim=-1) sample = torch.multinomial(probs, 1) out = torch.cat((out, sample), dim=-1) if exists(eos_token): is_eos_tokens = (out == eos_token) if is_eos_tokens.any(dim = -1).all(): # mask out everything after the eos tokens shifted_is_eos_tokens = F.pad(is_eos_tokens, (1, -1)) mask = shifted_is_eos_tokens.float().cumsum(dim = -1) >= 1 out = out.masked_fill(mask, self.pad_value) break out = out[:, t:] out, = unpack(out, ps, '* n') return out def forward(self, x, return_loss=True, **kwargs): seq, ignore_index = x.shape[1], self.ignore_index inp, target = x[:, :-1], x[:, 1:] if self.mask_prob > 0.: rand = torch.randn(inp.shape, device = x.device) rand[:, 0] = -torch.finfo(rand.dtype).max # first token should not be masked out num_mask = min(int(seq * self.mask_prob), seq - 1) indices = rand.topk(num_mask, dim = -1).indices mask = ~torch.zeros_like(inp).scatter(1, indices, 1.).bool() kwargs.update(self_attn_context_mask = mask) logits = self.net(inp, **kwargs) loss = F.cross_entropy( rearrange(logits, 'b n c -> b c n'), target, ignore_index = ignore_index ) if return_loss: return logits, loss return logits DEFAULT_DIM_HEAD = 64 @dataclass class LayerIntermediates: hiddens: Optional[List[Tensor]] = None attn_intermediates: Optional[List[Intermediates]] = None layer_hiddens: Optional[List[Tensor]] = None attn_z_loss: Optional[Tensor] = None # helpers def exists(val): return val is not None def default(val, d): if exists(val): return val return d() if isfunction(d) else d def cast_tuple(val, depth): return val if isinstance(val, tuple) else (val,) * depth def maybe(fn): @wraps(fn) def inner(x, *args, **kwargs): if not exists(x): return x return fn(x, *args, **kwargs) return inner class always(): def __init__(self, val): self.val = val def __call__(self, *args, **kwargs): return self.val class not_equals(): def __init__(self, val): self.val = val def __call__(self, x, *args, **kwargs): return x != self.val class equals(): def __init__(self, val): self.val = val def __call__(self, x, *args, **kwargs): return x == self.val def Sequential(*modules): return nn.Sequential(*filter(exists, modules)) # tensor helpers def max_neg_value(tensor): return -torch.finfo(tensor.dtype).max def l2norm(t, groups = 1): t = rearrange(t, '... (g d) -> ... g d', g = groups) t = F.normalize(t, p = 2, dim = -1) return rearrange(t, '... g d -> ... (g d)') def pad_at_dim(t, pad, dim = -1, value = 0.): dims_from_right = (- dim - 1) if dim < 0 else (t.ndim - dim - 1) zeros = ((0, 0) * dims_from_right) return F.pad(t, (*zeros, *pad), value = value) def or_reduce(masks): head, *body = masks for rest in body: head = head | rest return head # auxiliary loss helpers def calc_z_loss( pre_softmax_attns: List[Tensor], mask = None, weight = 1. ): # the same loss applied to the mixture of experts router logits in https://arxiv.org/abs/2202.08906 # in the paper, in a tiny footnote, they mention using it on attention logits with stabilizing effects # also used in PaLM as one of the measures lse = 0. for attn in pre_softmax_attns: lse = lse + attn.logsumexp(dim = -1) loss = torch.square(lse) loss = reduce(loss, 'b h n -> b n', 'sum') if not exists(mask): return loss.mean() * weight loss = loss[mask].sum() / mask.sum().clamp(min = 1e-5) return loss * weight # init helpers def init_zero_(layer): nn.init.constant_(layer.weight, 0.) if exists(layer.bias): nn.init.constant_(layer.bias, 0.) # keyword argument helpers def pick_and_pop(keys, d): values = list(map(lambda key: d.pop(key), keys)) return dict(zip(keys, values)) def group_dict_by_key(cond, d): return_val = [dict(),dict()] for key in d.keys(): match = bool(cond(key)) ind = int(not match) return_val[ind][key] = d[key] return (*return_val,) def string_begins_with(prefix, str): return str.startswith(prefix) def group_by_key_prefix(prefix, d): return group_dict_by_key(partial(string_begins_with, prefix), d) def groupby_prefix_and_trim(prefix, d): kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d) kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items()))) return kwargs_without_prefix, kwargs # initializations def deepnorm_init( transformer, beta, module_name_match_list = ['.ff.', '.to_v', '.to_out'] ): for name, module in transformer.named_modules(): if type(module) != nn.Linear: continue needs_beta_gain = any(map(lambda substr: substr in name, module_name_match_list)) gain = beta if needs_beta_gain else 1 nn.init.xavier_normal_(module.weight.data, gain = gain) if exists(module.bias): nn.init.constant_(module.bias.data, 0) # structured dropout, more effective than traditional attention dropouts def dropout_seq(seq, mask, dropout): b, n, *_, device = *seq.shape, seq.device logits = torch.randn(b, n, device = device) if exists(mask): mask_value = max_neg_value(logits) logits = logits.masked_fill(~mask, mask_value) keep_prob = 1. - dropout num_keep = max(1, int(keep_prob * n)) keep_indices = logits.topk(num_keep, dim = 1).indices batch_indices = torch.arange(b, device = device) batch_indices = rearrange(batch_indices, 'b -> b 1') seq = seq[batch_indices, keep_indices] if exists(mask): seq_counts = mask.sum(dim = -1) seq_keep_counts = torch.ceil(seq_counts * keep_prob).int() keep_mask = torch.arange(num_keep, device = device) < rearrange(seq_keep_counts, 'b -> b 1') mask = mask[batch_indices, keep_indices] & keep_mask return seq, mask # activations class ReluSquared(nn.Module): def forward(self, x): return F.relu(x) ** 2 # embedding class TokenEmbedding(nn.Module): def __init__(self, dim, num_tokens, l2norm_embed = False): super().__init__() self.l2norm_embed = l2norm_embed self.emb = nn.Embedding(num_tokens, dim) def forward(self, x): token_emb = self.emb(x) return l2norm(token_emb) if self.l2norm_embed else token_emb # positional embeddings class AbsolutePositionalEmbedding(nn.Module): def __init__(self, dim, max_seq_len, l2norm_embed = False): super().__init__() self.scale = dim ** -0.5 if not l2norm_embed else 1. self.max_seq_len = max_seq_len self.l2norm_embed = l2norm_embed self.emb = nn.Embedding(max_seq_len, dim) def forward(self, x, pos = None): seq_len, device = x.shape[1], x.device assert seq_len <= self.max_seq_len, f'you are passing in a sequence length of {seq_len} but your absolute positional embedding has a max sequence length of {self.max_seq_len}' if not exists(pos): pos = torch.arange(seq_len, device = device) pos_emb = self.emb(pos) pos_emb = pos_emb * self.scale return l2norm(pos_emb) if self.l2norm_embed else pos_emb class ScaledSinusoidalEmbedding(nn.Module): def __init__(self, dim, theta = 10000): super().__init__() assert (dim % 2) == 0 self.scale = nn.Parameter(torch.ones(1) * dim ** -0.5) half_dim = dim // 2 freq_seq = torch.arange(half_dim).float() / half_dim inv_freq = theta ** -freq_seq self.register_buffer('inv_freq', inv_freq, persistent = False) def forward(self, x, pos = None): seq_len, device = x.shape[1], x.device if not exists(pos): pos = torch.arange(seq_len, device = device) emb = einsum('i, j -> i j', pos, self.inv_freq) emb = torch.cat((emb.sin(), emb.cos()), dim = -1) return emb * self.scale class RelativePositionBias(nn.Module): def __init__(self, scale, causal = False, num_buckets = 32, max_distance = 128, heads = 8): super().__init__() self.scale = scale self.causal = causal self.num_buckets = num_buckets self.max_distance = max_distance self.relative_attention_bias = nn.Embedding(num_buckets, heads) @staticmethod def _relative_position_bucket(relative_position, causal = True, num_buckets = 32, max_distance = 128): ret = 0 n = -relative_position if not causal: num_buckets //= 2 ret += (n < 0).long() * num_buckets n = torch.abs(n) else: n = torch.max(n, torch.zeros_like(n)) max_exact = num_buckets // 2 is_small = n < max_exact val_if_large = max_exact + ( torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).long() val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1)) ret += torch.where(is_small, n, val_if_large) return ret @property def device(self): return next(self.parameters()).device def forward(self, i, j): device = self.device q_pos = torch.arange(j - i, j, dtype = torch.long, device = device) k_pos = torch.arange(j, dtype = torch.long, device = device) rel_pos = k_pos[None, :] - q_pos[:, None] rp_bucket = self._relative_position_bucket(rel_pos, causal = self.causal, num_buckets = self.num_buckets, max_distance = self.max_distance) values = self.relative_attention_bias(rp_bucket) bias = rearrange(values, 'i j h -> h i j') return bias * self.scale class DynamicPositionBias(nn.Module): def __init__(self, dim, *, heads, depth, log_distance = False, norm = False): super().__init__() assert depth >= 1, 'depth for dynamic position bias MLP must be greater or equal to 1' self.log_distance = log_distance self.mlp = nn.ModuleList([]) self.mlp.append(Sequential( nn.Linear(1, dim), nn.LayerNorm(dim) if norm else None, nn.SiLU() )) for _ in range(depth - 1): self.mlp.append(Sequential( nn.Linear(dim, dim), nn.LayerNorm(dim) if norm else None, nn.SiLU() )) self.mlp.append(nn.Linear(dim, heads)) @property def device(self): return next(self.parameters()).device def forward(self, i, j): assert i == j n, device = j, self.device # get the (n x n) matrix of distances seq_arange = torch.arange(n, device = device) context_arange = torch.arange(n, device = device) indices = rearrange(seq_arange, 'i -> i 1') - rearrange(context_arange, 'j -> 1 j') indices += (n - 1) # input to continuous positions MLP pos = torch.arange(-n + 1, n, device = device).float() pos = rearrange(pos, '... -> ... 1') if self.log_distance: pos = torch.sign(pos) * torch.log(pos.abs() + 1) # log of distance is sign(rel_pos) * log(abs(rel_pos) + 1) for layer in self.mlp: pos = layer(pos) # get position biases bias = pos[indices] bias = rearrange(bias, 'i j h -> h i j') return bias class AlibiPositionalBias(nn.Module): def __init__(self, heads, total_heads, **kwargs): super().__init__() self.heads = heads self.total_heads = total_heads slopes = Tensor(self._get_slopes(heads)) slopes = rearrange(slopes, 'h -> h 1 1') self.register_buffer('slopes', slopes, persistent = False) self.register_buffer('bias', None, persistent = False) def get_bias(self, i, j, device): i_arange = torch.arange(j - i, j, device = device) j_arange = torch.arange(j, device = device) bias = -torch.abs(rearrange(j_arange, 'j -> 1 1 j') - rearrange(i_arange, 'i -> 1 i 1')) return bias @staticmethod def _get_slopes(heads): def get_slopes_power_of_2(n): start = (2**(-2**-(math.log2(n)-3))) ratio = start return [start*ratio**i for i in range(n)] if math.log2(heads).is_integer(): return get_slopes_power_of_2(heads) closest_power_of_2 = 2 ** math.floor(math.log2(heads)) return get_slopes_power_of_2(closest_power_of_2) + get_slopes_power_of_2(2 * closest_power_of_2)[0::2][:heads-closest_power_of_2] @property def device(self): return next(self.buffers()).device def forward(self, i, j): h, device = self.total_heads, self.device if exists(self.bias) and self.bias.shape[-1] >= j and self.bias.shape[-2] >= i: return self.bias[..., :i, :j] bias = self.get_bias(i, j, device) bias = bias * self.slopes num_heads_unalibied = h - bias.shape[0] bias = pad_at_dim(bias, (0, num_heads_unalibied), dim = 0) self.register_buffer('bias', bias, persistent = False) return self.bias class RotaryEmbedding(nn.Module): def __init__( self, dim, use_xpos = False, scale_base = 512, interpolation_factor = 1., base = 10000, base_rescale_factor = 1. ): super().__init__() # proposed by reddit user bloc97, to rescale rotary embeddings to longer sequence length without fine-tuning # has some connection to NTK literature # https://www.reddit.com/r/LocalLLaMA/comments/14lz7j5/ntkaware_scaled_rope_allows_llama_models_to_have/ base *= base_rescale_factor ** (dim / (dim - 2)) inv_freq = 1. / (base ** (torch.arange(0, dim, 2).float() / dim)) self.register_buffer('inv_freq', inv_freq) assert interpolation_factor >= 1. self.interpolation_factor = interpolation_factor if not use_xpos: self.register_buffer('scale', None) return scale = (torch.arange(0, dim, 2) + 0.4 * dim) / (1.4 * dim) self.scale_base = scale_base self.register_buffer('scale', scale) def forward(self, seq_len, device): t = torch.arange(seq_len, device = device).type_as(self.inv_freq) t = t / self.interpolation_factor freqs = torch.einsum('i , j -> i j', t, self.inv_freq) freqs = torch.cat((freqs, freqs), dim = -1) if not exists(self.scale): return freqs, 1. power = (torch.arange(seq_len, device = device) - (seq_len // 2)) / self.scale_base scale = self.scale ** rearrange(power, 'n -> n 1') scale = torch.cat((scale, scale), dim = -1) return freqs, scale def rotate_half(x): x = rearrange(x, '... (j d) -> ... j d', j = 2) x1, x2 = x.unbind(dim = -2) return torch.cat((-x2, x1), dim = -1) def apply_rotary_pos_emb(t, freqs, scale = 1): seq_len = t.shape[-2] freqs = freqs[-seq_len:, :] return (t * freqs.cos() * scale) + (rotate_half(t) * freqs.sin() * scale) # norms class Scale(nn.Module): def __init__(self, value, fn): super().__init__() self.value = value self.fn = fn def forward(self, x, **kwargs): out = self.fn(x, **kwargs) scale_fn = lambda t: t * self.value if not isinstance(out, tuple): return scale_fn(out) return (scale_fn(out[0]), *out[1:]) class ScaleNorm(nn.Module): def __init__(self, dim, eps = 1e-5): super().__init__() self.eps = eps self.g = nn.Parameter(torch.ones(1) * (dim ** -0.5)) def forward(self, x): norm = torch.norm(x, dim = -1, keepdim = True) return x / norm.clamp(min = self.eps) * self.g class RMSNorm(nn.Module): def __init__(self, dim): super().__init__() self.scale = dim ** 0.5 self.g = nn.Parameter(torch.ones(dim)) def forward(self, x): return F.normalize(x, dim = -1) * self.scale * self.g class SimpleRMSNorm(nn.Module): def __init__(self, dim): super().__init__() self.scale = dim ** 0.5 def forward(self, x): return F.normalize(x, dim = -1) * self.scale # residual and residual gates class Residual(nn.Module): def __init__(self, dim, scale_residual = False, scale_residual_constant = 1.): super().__init__() self.residual_scale = nn.Parameter(torch.ones(dim)) if scale_residual else None self.scale_residual_constant = scale_residual_constant def forward(self, x, residual): if exists(self.residual_scale): residual = residual * self.residual_scale if self.scale_residual_constant != 1: residual = residual * self.scale_residual_constant return x + residual class GRUGating(nn.Module): def __init__(self, dim, scale_residual = False, **kwargs): super().__init__() self.gru = nn.GRUCell(dim, dim) self.residual_scale = nn.Parameter(torch.ones(dim)) if scale_residual else None def forward(self, x, residual): if exists(self.residual_scale): residual = residual * self.residual_scale gated_output = self.gru( rearrange(x, 'b n d -> (b n) d'), rearrange(residual, 'b n d -> (b n) d') ) return gated_output.reshape_as(x) # token shifting def shift(t, amount, mask = None): if amount == 0: return t else: amount = min(amount, t.shape[1]) if exists(mask): t = t.masked_fill(~mask[..., None], 0.) return pad_at_dim(t, (amount, -amount), dim = - 2, value = 0.) class ShiftTokens(nn.Module): def __init__(self, shifts, fn): super().__init__() self.fn = fn self.shifts = tuple(shifts) def forward(self, x, **kwargs): mask = kwargs.get('mask', None) shifts = self.shifts segments = len(shifts) feats_per_shift = x.shape[-1] // segments splitted = x.split(feats_per_shift, dim = -1) segments_to_shift, rest = splitted[:segments], splitted[segments:] segments_to_shift = list(map(lambda args: shift(*args, mask = mask), zip(segments_to_shift, shifts))) x = torch.cat((*segments_to_shift, *rest), dim = -1) return self.fn(x, **kwargs) # feedforward class GLU(nn.Module): def __init__( self, dim_in, dim_out, activation: Callable, mult_bias = False ): super().__init__() self.act = activation self.proj = nn.Linear(dim_in, dim_out * 2) self.mult_bias = nn.Parameter(torch.ones(dim_out)) if mult_bias else 1. def forward(self, x): x, gate = self.proj(x).chunk(2, dim = -1) return x * self.act(gate) * self.mult_bias class FeedForward(nn.Module): def __init__( self, dim, dim_out = None, mult = 4, glu = False, glu_mult_bias = False, swish = False, relu_squared = False, post_act_ln = False, dropout = 0., no_bias = False, zero_init_output = False ): super().__init__() inner_dim = int(dim * mult) dim_out = default(dim_out, dim) if relu_squared: activation = ReluSquared() elif swish: activation = nn.SiLU() else: activation = nn.GELU() if glu: project_in = GLU(dim, inner_dim, activation, mult_bias = glu_mult_bias) else: project_in = nn.Sequential( nn.Linear(dim, inner_dim, bias = not no_bias), activation ) self.ff = Sequential( project_in, nn.LayerNorm(inner_dim) if post_act_ln else None, nn.Dropout(dropout), nn.Linear(inner_dim, dim_out, bias = not no_bias) ) # init last linear layer to 0 if zero_init_output: init_zero_(self.ff[-1]) def forward(self, x): return self.ff(x) # attention. it is all we need class Attention(nn.Module): def __init__( self, dim, dim_head = DEFAULT_DIM_HEAD, heads = 8, causal = False, flash = False, talking_heads = False, head_scale = False, sparse_topk = None, num_mem_kv = 0, dropout = 0., on_attn = False, gate_values = False, zero_init_output = False, max_attend_past = None, qk_norm = False, qk_norm_groups = 1, qk_norm_scale = 10, qk_norm_dim_scale = False, one_kv_head = False, shared_kv = False, value_dim_head = None, tensor_product = False, # https://arxiv.org/abs/2208.06061 cascading_heads = False, add_zero_kv = False, # same as add_zero_attn in pytorch onnxable = False ): super().__init__() self.scale = dim_head ** -0.5 self.heads = heads self.causal = causal self.max_attend_past = max_attend_past value_dim_head = default(value_dim_head, dim_head) q_dim = k_dim = dim_head * heads v_dim = out_dim = value_dim_head * heads self.one_kv_head = one_kv_head if one_kv_head: k_dim = dim_head v_dim = value_dim_head out_dim = v_dim * heads self.to_q = nn.Linear(dim, q_dim, bias = False) self.to_k = nn.Linear(dim, k_dim, bias = False) # shared key / values, for further memory savings during inference assert not (shared_kv and value_dim_head != dim_head), 'key and value head dimensions must be equal for shared key / values' self.to_v = nn.Linear(dim, v_dim, bias = False) if not shared_kv else None # relations projection from tp-attention self.to_r = nn.Linear(dim, v_dim, bias = False) if tensor_product else None # add GLU gating for aggregated values, from alphafold2 self.to_v_gate = None if gate_values: self.to_v_gate = nn.Linear(dim, out_dim) nn.init.constant_(self.to_v_gate.weight, 0) nn.init.constant_(self.to_v_gate.bias, 1) # cosine sim attention self.qk_norm = qk_norm self.qk_norm_groups = qk_norm_groups self.qk_norm_scale = qk_norm_scale # whether to use the rmsnorm (equivalent to cosine sim attention when scale is equal to 1) - https://arxiv.org/abs/2302.05442 self.qk_norm_dim_scale = qk_norm_dim_scale self.qk_norm_q_scale = self.qk_norm_k_scale = 1 if qk_norm and qk_norm_dim_scale: self.qk_norm_q_scale = nn.Parameter(torch.ones(dim_head)) self.qk_norm_k_scale = nn.Parameter(torch.ones(dim_head)) assert (not qk_norm) or (dim_head % qk_norm_groups) == 0, 'dimension per attention head must be divisible by the qk norm groups' assert not (qk_norm and (dim_head // qk_norm_groups) <= 2), 'the group dimension may be too small (2 was too small in my tests, but 4 still works, surprisingly)' # attend class - includes core attention algorithm + talking heads self.attend = Attend( heads = heads, causal = causal, talking_heads = talking_heads, dropout = dropout, sparse_topk = sparse_topk, qk_norm = qk_norm, scale = qk_norm_scale if qk_norm else self.scale, add_zero_kv = add_zero_kv, flash = flash, onnxable = onnxable ) # head scaling self.head_scale = head_scale if head_scale: self.head_scale_params = nn.Parameter(torch.ones(1, heads, 1, 1)) # explicit topk sparse attention self.sparse_topk = sparse_topk # add memory key / values self.num_mem_kv = num_mem_kv if num_mem_kv > 0: self.mem_k = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head)) self.mem_v = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head)) # attention on attention self.attn_on_attn = on_attn self.to_out = nn.Sequential(nn.Linear(out_dim, dim * 2, bias = False), nn.GLU()) if on_attn else nn.Linear(out_dim, dim, bias = False) # init output projection 0 if zero_init_output: init_zero_(self.to_out) def forward( self, x, context = None, mask = None, context_mask = None, attn_mask = None, rel_pos = None, rotary_pos_emb = None, prev_attn = None, mem = None ): b, n, _, h, head_scale, device, has_context = *x.shape, self.heads, self.head_scale, x.device, exists(context) kv_input = default(context, x) q_input = x k_input = kv_input v_input = kv_input r_input = x if exists(mem): k_input = torch.cat((mem, k_input), dim = -2) v_input = torch.cat((mem, v_input), dim = -2) q = self.to_q(q_input) k = self.to_k(k_input) v = self.to_v(v_input) if exists(self.to_v) else k r = self.to_r(r_input) if exists(self.to_r) else None q = rearrange(q, 'b n (h d) -> b h n d', h = h) if not self.one_kv_head: k, v, r = map(lambda t: maybe(rearrange)(t, 'b n (h d) -> b h n d', h = h), (k, v, r)) if self.qk_norm: qk_l2norm = partial(l2norm, groups = self.qk_norm_groups) q, k = map(qk_l2norm, (q, k)) scale = self.qk_norm_scale q = q * self.qk_norm_q_scale k = k * self.qk_norm_k_scale if exists(rotary_pos_emb) and not has_context: freqs, xpos_scale = rotary_pos_emb l = freqs.shape[-1] q_xpos_scale, k_xpos_scale = (xpos_scale, xpos_scale ** -1.) if exists(xpos_scale) else (1., 1.) (ql, qr), (kl, kr), (vl, vr) = map(lambda t: (t[..., :l], t[..., l:]), (q, k, v)) ql, kl, vl = map(lambda arg: apply_rotary_pos_emb(arg[0], freqs, arg[1]), ((ql, q_xpos_scale), (kl, k_xpos_scale), (vl, k_xpos_scale))) q, k, v = map(lambda t: torch.cat(t, dim = -1), ((ql, qr), (kl, kr), (vl, vr))) input_mask = context_mask if has_context else mask if self.num_mem_kv > 0: mem_k, mem_v = map(lambda t: repeat(t, 'h n d -> b h n d', b = b), (self.mem_k, self.mem_v)) if self.qk_norm: mem_k = l2norm(mem_k) mem_k = mem_k * self.qk_norm_k_scale k = torch.cat((mem_k, k), dim = -2) v = torch.cat((mem_v, v), dim = -2) if exists(input_mask): input_mask = pad_at_dim(input_mask, (self.num_mem_kv, 0), dim = -1, value = True) i, j = map(lambda t: t.shape[-2], (q, k)) # determine masking mask_value = max_neg_value(q) masks = [] final_attn_mask = None if exists(input_mask): input_mask = rearrange(input_mask, 'b j -> b 1 1 j') masks.append(~input_mask) if exists(attn_mask): assert 2 <= attn_mask.ndim <= 4, 'attention mask must have greater than 2 dimensions but less than or equal to 4' if attn_mask.ndim == 2: attn_mask = rearrange(attn_mask, 'i j -> 1 1 i j') elif attn_mask.ndim == 3: attn_mask = rearrange(attn_mask, 'h i j -> 1 h i j') masks.append(~attn_mask) if exists(self.max_attend_past): range_q = torch.arange(j - i, j, device = device) range_k = torch.arange(j, device = device) dist = rearrange(range_q, 'i -> 1 1 i 1') - rearrange(range_k, 'j -> 1 1 1 j') max_attend_past_mask = dist > self.max_attend_past masks.append(max_attend_past_mask) if len(masks) > 0: final_attn_mask = ~or_reduce(masks) # prepare relative positional bias, if needed attn_bias = None if exists(rel_pos): attn_bias = rel_pos(i, j) # attention is all we need out, intermediates = self.attend( q, k, v, mask = final_attn_mask, attn_bias = attn_bias, prev_attn = prev_attn ) # https://arxiv.org/abs/2208.06061 proposes to add a residual for better gradients if exists(r): out = out * r + out # normformer scaling of heads if head_scale: out = out * self.head_scale_params # merge heads out = rearrange(out, 'b h n d -> b n (h d)') # alphafold2 styled gating of the values if exists(self.to_v_gate): gates = self.to_v_gate(x) out = out * gates.sigmoid() # combine the heads out = self.to_out(out) if exists(mask): mask = rearrange(mask, 'b n -> b n 1') out = out.masked_fill(~mask, 0.) return out, intermediates class AttentionLayers(nn.Module): def __init__( self, dim, depth, heads = 8, causal = False, cross_attend = False, only_cross = False, use_scalenorm = False, use_rmsnorm = False, use_simple_rmsnorm = False, alibi_pos_bias = False, alibi_num_heads = None, rel_pos_bias = False, rel_pos_num_buckets = 32, rel_pos_max_distance = 128, dynamic_pos_bias = False, dynamic_pos_bias_log_distance = False, dynamic_pos_bias_mlp_depth = 2, dynamic_pos_bias_norm = False, rotary_pos_emb = False, rotary_emb_dim = None, rotary_xpos = False, rotary_interpolation_factor = 1., rotary_xpos_scale_base = 512, rotary_base_rescale_factor = 1., custom_layers = None, sandwich_coef = None, par_ratio = None, residual_attn = False, cross_residual_attn = False, macaron = False, pre_norm = True, pre_norm_has_final_norm = True, gate_residual = False, scale_residual = False, scale_residual_constant = 1., deepnorm = False, shift_tokens = 0, sandwich_norm = False, resi_dual = False, resi_dual_scale = 1., zero_init_branch_output = False, layer_dropout = 0., cross_attn_tokens_dropout = 0., **kwargs ): super().__init__() rotary_pos_emb = rotary_pos_emb or rotary_xpos ff_kwargs, kwargs = groupby_prefix_and_trim('ff_', kwargs) attn_kwargs, kwargs = groupby_prefix_and_trim('attn_', kwargs) dim_head = attn_kwargs.get('dim_head', DEFAULT_DIM_HEAD) self.dim = dim self.depth = depth self.layers = nn.ModuleList([]) self.has_pos_emb = rel_pos_bias or rotary_pos_emb rotary_emb_dim = max(default(rotary_emb_dim, dim_head // 2), 32) assert not (rotary_xpos and not causal), 'rotary xpos is not compatible with bidirectional attention' self.rotary_pos_emb = RotaryEmbedding(rotary_emb_dim, use_xpos = rotary_xpos, scale_base = rotary_xpos_scale_base, interpolation_factor = rotary_interpolation_factor, base_rescale_factor = rotary_base_rescale_factor) if rotary_pos_emb else None assert not (alibi_pos_bias and rel_pos_bias), 'you can only choose Alibi positional bias or T5 relative positional bias, not both' assert rel_pos_num_buckets <= rel_pos_max_distance, 'number of relative position buckets must be less than the relative position max distance' # relative positional bias flash_attn = attn_kwargs.get('flash', False) assert (int(rel_pos_bias) + int(dynamic_pos_bias) + int(alibi_pos_bias)) <= 1, 'you can only choose up to one of t5, alibi, or dynamic positional bias' self.rel_pos = None if rel_pos_bias: assert not flash_attn, 'flash attention not compatible with t5 relative positional bias' self.rel_pos = RelativePositionBias(scale = dim_head ** 0.5, causal = causal, heads = heads, num_buckets = rel_pos_num_buckets, max_distance = rel_pos_max_distance) elif dynamic_pos_bias: assert not flash_attn, 'flash attention not compatible with dynamic positional bias' self.rel_pos = DynamicPositionBias(dim = dim // 4, heads = heads, log_distance = dynamic_pos_bias_log_distance, depth = dynamic_pos_bias_mlp_depth, norm = dynamic_pos_bias_norm) elif alibi_pos_bias: alibi_num_heads = default(alibi_num_heads, heads) assert alibi_num_heads <= heads, 'number of ALiBi heads must be less than the total number of heads' self.rel_pos = AlibiPositionalBias(heads = alibi_num_heads, total_heads = heads) # determine deepnorm and residual scale if deepnorm: assert scale_residual_constant == 1, 'scale residual constant is being overridden by deep norm settings' pre_norm = sandwich_norm = resi_dual = False scale_residual = True scale_residual_constant = (2 * depth) ** 0.25 assert (int(sandwich_norm) + int(resi_dual)) <= 1, 'either sandwich norm or resiDual is selected, but not both' assert not (not pre_norm and sandwich_norm), 'sandwich norm cannot be used when not using prenorm' if resi_dual: pre_norm = False self.pre_norm = pre_norm self.sandwich_norm = sandwich_norm self.resi_dual = resi_dual assert 0 < resi_dual_scale <= 1., 'resiDual prenorm residual must be scaled by a factor greater than 0 and less than or equal to 1.' self.resi_dual_scale = resi_dual_scale self.residual_attn = residual_attn self.cross_residual_attn = cross_residual_attn assert not (flash_attn and (residual_attn or cross_residual_attn)), 'flash attention is not compatible with residual attention' self.cross_attend = cross_attend assert (int(use_scalenorm) + int(use_rmsnorm) + int(use_simple_rmsnorm)) <= 1, 'you can only use either scalenorm, rmsnorm, or simple rmsnorm' if use_scalenorm: norm_class = ScaleNorm elif use_rmsnorm: norm_class = RMSNorm elif use_simple_rmsnorm: norm_class = SimpleRMSNorm else: norm_class = nn.LayerNorm norm_fn = partial(norm_class, dim) if cross_attend and not only_cross: default_block = ('a', 'c', 'f') elif cross_attend and only_cross: default_block = ('c', 'f') else: default_block = ('a', 'f') if macaron: default_block = ('f',) + default_block # zero init if zero_init_branch_output: attn_kwargs = {**attn_kwargs, 'zero_init_output': True} ff_kwargs = {**ff_kwargs, 'zero_init_output': True} # calculate layer block order if exists(custom_layers): layer_types = custom_layers elif exists(par_ratio): par_depth = depth * len(default_block) assert 1 < par_ratio <= par_depth, 'par ratio out of range' default_block = tuple(filter(not_equals('f'), default_block)) par_attn = par_depth // par_ratio depth_cut = par_depth * 2 // 3 # 2 / 3 attention layer cutoff suggested by PAR paper par_width = (depth_cut + depth_cut // par_attn) // par_attn assert len(default_block) <= par_width, 'default block is too large for par_ratio' par_block = default_block + ('f',) * (par_width - len(default_block)) par_head = par_block * par_attn layer_types = par_head + ('f',) * (par_depth - len(par_head)) elif exists(sandwich_coef): assert sandwich_coef > 0 and sandwich_coef <= depth, 'sandwich coefficient should be less than the depth' layer_types = ('a',) * sandwich_coef + default_block * (depth - sandwich_coef) + ('f',) * sandwich_coef else: layer_types = default_block * depth self.layer_types = layer_types self.num_attn_layers = len(list(filter(equals('a'), layer_types))) # stochastic depth self.layer_dropouts = cast_tuple(layer_dropout, len(layer_types)) # structured dropout for cross attending self.cross_attn_tokens_dropout = cross_attn_tokens_dropout # calculate token shifting shift_tokens = cast_tuple(shift_tokens, len(layer_types)) # whether it has post norm self.final_norm = norm_fn() if pre_norm or resi_dual else nn.Identity() # iterate and construct layers for ind, (layer_type, layer_shift_tokens) in enumerate(zip(self.layer_types, shift_tokens)): is_last_layer = ind == (len(self.layer_types) - 1) if layer_type == 'a': layer = Attention(dim, heads = heads, causal = causal, **attn_kwargs) elif layer_type == 'c': layer = Attention(dim, heads = heads, **attn_kwargs) elif layer_type == 'f': layer = FeedForward(dim, **ff_kwargs) layer = layer if not macaron else Scale(0.5, layer) else: raise Exception(f'invalid layer type {layer_type}') if layer_shift_tokens > 0: shift_range_upper = layer_shift_tokens + 1 shift_range_lower = -layer_shift_tokens if not causal else 0 layer = ShiftTokens(range(shift_range_lower, shift_range_upper), layer) residual_fn = GRUGating if gate_residual else Residual residual = residual_fn(dim, scale_residual = scale_residual, scale_residual_constant = scale_residual_constant) pre_branch_norm = norm_fn() if pre_norm else None post_branch_norm = norm_fn() if sandwich_norm else None post_main_norm = norm_fn() if not pre_norm else None norms = nn.ModuleList([ pre_branch_norm, post_branch_norm, post_main_norm ]) self.layers.append(nn.ModuleList([ norms, layer, residual ])) if deepnorm: init_gain = (8 * depth) ** -0.25 deepnorm_init(self, init_gain) def forward( self, x, context = None, mask = None, context_mask = None, attn_mask = None, self_attn_context_mask = None, mems = None, return_hiddens = False ): assert not (self.cross_attend ^ exists(context)), 'context must be passed in if cross_attend is set to True' hiddens = [] layer_hiddens = [] intermediates = [] prev_attn = None prev_cross_attn = None mems = mems.copy() if exists(mems) else [None] * self.num_attn_layers rotary_pos_emb = None if exists(self.rotary_pos_emb): max_rotary_emb_length = max(list(map(lambda m: (m.shape[1] if exists(m) else 0) + x.shape[1], mems))) rotary_pos_emb = self.rotary_pos_emb(max_rotary_emb_length, x.device) outer_residual = x * self.resi_dual_scale for ind, (layer_type, (norm, block, residual_fn), layer_dropout) in enumerate(zip(self.layer_types, self.layers, self.layer_dropouts)): is_last = ind == (len(self.layers) - 1) if self.training and layer_dropout > 0. and random() < layer_dropout: continue if layer_type == 'a': if return_hiddens: hiddens.append(x) layer_mem = mems.pop(0) if mems else None if layer_type == 'c': if self.training and self.cross_attn_tokens_dropout > 0.: context, context_mask = dropout_seq(context, context_mask, self.cross_attn_tokens_dropout) inner_residual = x if return_hiddens: layer_hiddens.append(x) pre_norm, post_branch_norm, post_main_norm = norm if exists(pre_norm): x = pre_norm(x) if layer_type == 'a': out, inter = block(x, mask = mask, context_mask = self_attn_context_mask, attn_mask = attn_mask, rel_pos = self.rel_pos, rotary_pos_emb = rotary_pos_emb, prev_attn = prev_attn, mem = layer_mem) elif layer_type == 'c': out, inter = block(x, context = context, mask = mask, context_mask = context_mask, prev_attn = prev_cross_attn) elif layer_type == 'f': out = block(x) if self.resi_dual: outer_residual = outer_residual + out * self.resi_dual_scale if exists(post_branch_norm): out = post_branch_norm(out) x = residual_fn(out, inner_residual) if layer_type in ('a', 'c') and return_hiddens: intermediates.append(inter) if layer_type == 'a' and self.residual_attn: prev_attn = inter.pre_softmax_attn elif layer_type == 'c' and self.cross_residual_attn: prev_cross_attn = inter.pre_softmax_attn if exists(post_main_norm): x = post_main_norm(x) if return_hiddens: layer_hiddens.append(x) if self.resi_dual: x = x + self.final_norm(outer_residual) else: x = self.final_norm(x) if return_hiddens: intermediates = LayerIntermediates( hiddens = hiddens, attn_intermediates = intermediates, layer_hiddens = layer_hiddens ) return x, intermediates return x class Encoder(AttentionLayers): def __init__(self, **kwargs): assert 'causal' not in kwargs, 'cannot set causality on encoder' super().__init__(causal = False, **kwargs) class Decoder(AttentionLayers): def __init__(self, **kwargs): assert 'causal' not in kwargs, 'cannot set causality on decoder' super().__init__(causal = True, **kwargs) class CrossAttender(AttentionLayers): def __init__(self, **kwargs): super().__init__(cross_attend = True, only_cross = True, **kwargs) class ViTransformerWrapper(nn.Module): def __init__( self, *, image_size, patch_size, attn_layers, channels = 3, num_classes = None, post_emb_norm = False, emb_dropout = 0. ): super().__init__() assert isinstance(attn_layers, Encoder), 'attention layers must be an Encoder' assert image_size % patch_size == 0, 'image dimensions must be divisible by the patch size' dim = attn_layers.dim num_patches = (image_size // patch_size) ** 2 patch_dim = channels * patch_size ** 2 self.patch_size = patch_size self.pos_embedding = nn.Parameter(torch.randn(1, num_patches, dim)) self.patch_to_embedding = nn.Sequential( nn.LayerNorm(patch_dim), nn.Linear(patch_dim, dim), nn.LayerNorm(dim) ) self.post_emb_norm = nn.LayerNorm(dim) if post_emb_norm else nn.Identity() self.dropout = nn.Dropout(emb_dropout) self.attn_layers = attn_layers self.mlp_head = nn.Linear(dim, num_classes) if exists(num_classes) else nn.Identity() def forward( self, img, return_embeddings = False ): p = self.patch_size x = rearrange(img, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = p, p2 = p) x = self.patch_to_embedding(x) n = x.shape[1] x = x + self.pos_embedding[:, :n] x = self.post_emb_norm(x) x = self.dropout(x) x = self.attn_layers(x) if not exists(self.mlp_head) or return_embeddings: return x x = x.mean(dim = -2) return self.mlp_head(x) class Transformer(nn.Module): def __init__( self, *, num_tokens, max_seq_len, attn_layers, emb_dim = None, max_mem_len = 0, shift_mem_down = 0, emb_dropout = 0., post_emb_norm = False, num_memory_tokens = None, tie_embedding = False, logits_dim = None, use_abs_pos_emb = True, scaled_sinu_pos_emb = False, l2norm_embed = False, emb_frac_gradient = 1., # GLM-130B and Cogview successfully used this, set at 0.1 attn_z_loss_weight = 1e-4 ): super().__init__() assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder' dim = attn_layers.dim emb_dim = default(emb_dim, dim) self.emb_dim = emb_dim self.num_tokens = num_tokens self.max_seq_len = max_seq_len self.max_mem_len = max_mem_len self.shift_mem_down = shift_mem_down self.l2norm_embed = l2norm_embed self.token_emb = TokenEmbedding(emb_dim, num_tokens, l2norm_embed = l2norm_embed) if not (use_abs_pos_emb and not attn_layers.has_pos_emb): self.pos_emb = always(0) elif scaled_sinu_pos_emb: self.pos_emb = ScaledSinusoidalEmbedding(emb_dim) else: self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len, l2norm_embed = l2norm_embed) self.emb_frac_gradient = emb_frac_gradient # fraction of the gradient that should go to the embedding, https://arxiv.org/abs/2105.13290 self.post_emb_norm = nn.LayerNorm(emb_dim) if post_emb_norm else nn.Identity() self.emb_dropout = nn.Dropout(emb_dropout) self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity() self.attn_layers = attn_layers self.init_() logits_dim = default(logits_dim, num_tokens) self.to_logits = nn.Linear(dim, logits_dim) if not tie_embedding else lambda t: t @ self.token_emb.emb.weight.t() # memory tokens (like [cls]) from Memory Transformers paper num_memory_tokens = default(num_memory_tokens, 0) self.num_memory_tokens = num_memory_tokens if num_memory_tokens > 0: self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim)) def init_(self): if self.l2norm_embed: nn.init.normal_(self.token_emb.emb.weight, std = 1e-5) if not isinstance(self.pos_emb, always): nn.init.normal_(self.pos_emb.emb.weight, std = 1e-5) return nn.init.kaiming_normal_(self.token_emb.emb.weight) def forward( self, x, return_embeddings = False, return_logits_and_embeddings = False, return_intermediates = False, mask = None, return_mems = False, return_attn = False, mems = None, pos = None, prepend_embeds = None, sum_embeds = None, return_attn_z_loss = False, attn_z_loss_weight = 1e-4, **kwargs ): b, n, device, num_mem, emb_frac_gradient = *x.shape, x.device, self.num_memory_tokens, self.emb_frac_gradient return_hiddens = return_mems | return_attn | return_intermediates | return_attn_z_loss # absolute positional embedding external_pos_emb = exists(pos) and pos.dtype != torch.long pos_emb = self.pos_emb(x, pos = pos) if not external_pos_emb else pos x = self.token_emb(x) + pos_emb # for summing embeddings passed externally - needs this for self-conditioning in non-autoregressive training if exists(sum_embeds): x = x + sum_embeds # post embedding norm, purportedly leads to greater stabilization x = self.post_emb_norm(x) # whether to append embeds, as in PaLI, for image embeddings if exists(prepend_embeds): prepend_seq, prepend_dim = prepend_embeds.shape[1:] assert prepend_dim == x.shape[-1], 'prepended embeddings need to have same dimensions as text model dimensions' x = torch.cat((prepend_embeds, x), dim = -2) # whether to reduce the gradient going to the embedding, from cogview paper, corroborated by GLM-130B model if emb_frac_gradient < 1: assert emb_frac_gradient > 0 x = x * emb_frac_gradient + x.detach() * (1 - emb_frac_gradient) # embedding dropout x = self.emb_dropout(x) x = self.project_emb(x) if num_mem > 0: mem = repeat(self.memory_tokens, 'n d -> b n d', b = b) x = torch.cat((mem, x), dim = 1) # auto-handle masking after appending memory tokens if exists(mask): mask = pad_at_dim(mask, (num_mem, 0), dim = -1, value = True) if self.shift_mem_down and exists(mems): mems_l, mems_r = mems[:self.shift_mem_down], mems[self.shift_mem_down:] mems = [*mems_r, *mems_l] if return_hiddens: x, intermediates = self.attn_layers(x, mask = mask, mems = mems, return_hiddens = True, **kwargs) else: x = self.attn_layers(x, mask = mask, mems = mems, **kwargs) mem, x = x[:, :num_mem], x[:, num_mem:] if return_logits_and_embeddings: out = (self.to_logits(x), x) elif return_embeddings: out = x else: out = self.to_logits(x) if return_attn_z_loss: pre_softmax_attns = list(map(lambda t: t.pre_softmax_attn, intermediates.attn_intermediates)) intermediates.attn_z_loss = calc_z_loss(pre_softmax_attns, weight = attn_z_loss_weight) return_intermediates = True if return_intermediates: return out, intermediates if return_mems: hiddens = intermediates.hiddens new_mems = list(map(lambda pair: torch.cat(pair, dim = -2), zip(mems, hiddens))) if exists(mems) else hiddens new_mems = list(map(lambda t: t[..., -self.max_mem_len:, :].detach(), new_mems)) return out, new_mems if return_attn: attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates)) return out, attn_maps return out
Kosmos-2-main
kosmos/transformer.py
import torch from alpha_dev.model import AlphaDev model = AlphaDev().cuda() x = torch.randint(0, 256, (1, 1024)).cuda() model(x) # (1, 1024, 20000)
AlphaDev-main
example.py
import collections import functools import math from typing import Any, Callable, Dict, NamedTuple, Optional, Sequence import chex import haiku as hk import jax import jax.lax import jax.numpy as jnp import ml_collections import numpy import optax ############################ ###### 1. Environment ###### class TaskSpec(NamedTuple): max_program_size: int num_inputs: int num_funcs: int num_locations: int num_actions: int correct_reward: float correctness_reward_weight: float latency_reward_weight: float latency_quantile: float class AssemblyGame(object): """The environment AlphaDev is interacting with.""" class AssemblyInstruction(object): pass class AssemblySimulator(object): # pylint: disable-next=unused-argument def apply(self, instruction): return {} def measure_latency(self, program) -> float: pass def __init__(self, task_spec): self.task_spec = task_spec self.program = [] self.simulator = self.AssemblySimulator(task_spec) self.previous_correct_items = 0 def step(self, action): instruction = self.AssemblyInstruction(action) self.program.append(instruction) self.execution_state = self.simulator.apply(instruction) return self.observation(), self.correctness_reward() def observation(self): return { 'program': self.program, 'program_length': len(self.program), 'memory': self.execution_state.memory, 'registers': self.execution_state.registers, } def correctness_reward(self) -> float: """Computes a reward based on the correctness of the output.""" make_expected_outputs = lambda: [] expected_outputs = make_expected_outputs() state = self.execution_state # Weighted sum of correctly placed items correct_items = 0 for output, expected in zip(state.memory, expected_outputs): correct_items += output.weight * sum( output[i] == expected[i] for i in range(len(output)) ) reward = self.task_spec.correctness_reward_weight * ( correct_items - self.previous_correct_items ) self.previous_correct_items = correct_items # Bonus for fully correct programs all_correct = all( output == expected for output, expected in zip(state.memory, expected_outputs) ) reward += self.task_spec.correct_reward * all_correct return reward def latency_reward(self) -> float: latency_samples = [ self.simulator.measure_latency(self.program) for _ in range(self.task_spec.num_latency_simulation) ] return ( numpy.quantile(latency_samples, self.task_spec.latency_quantile) * self.task_spec.latency_reward_weight ) def clone(self): pass class Action(object): """Action representation.""" def __init__(self, index: int): self.index = index def __hash__(self): return self.index def __eq__(self, other): return self.index == other.index def __gt__(self, other): return self.index > other.index class NetworkOutput(NamedTuple): value: float correctness_value_logits: jnp.ndarray latency_value_logits: jnp.ndarray policy_logits: Dict[Action, float] class Network(object): """Wrapper around Representation and Prediction networks.""" def __init__(self, hparams: ml_collections.ConfigDict, task_spec: TaskSpec): self.representation = hk.transform(RepresentationNet( hparams, task_spec, hparams.embedding_dim )) self.prediction = hk.transform(PredictionNet( task_spec=task_spec, value_max=hparams.value.max, value_num_bins=hparams.value.num_bins, embedding_dim=hparams.embedding_dim, )) rep_key, pred_key = jax.random.PRNGKey(42).split() self.params = { 'representation': self.representation.init(rep_key), 'prediction': self.prediction.init(pred_key), } def inference(self, params: Any, observation: jnp.array) -> NetworkOutput: # representation + prediction function embedding = self.representation.apply(params['representation'], observation) return self.prediction.apply(params['prediction'], embedding) def get_params(self): # Returns the weights of this network. return self.params def update_params(self, updates: Any) -> None: # Update network weights internally. self.params = jax.tree_map(lambda p, u: p + u, self.params, updates) def training_steps(self) -> int: # How many steps / batches the network has been trained for. return 0 class UniformNetwork(object): """Network representation that returns uniform output.""" # pylint: disable-next=unused-argument def inference(self, observation) -> NetworkOutput: # representation + prediction function return NetworkOutput(0, 0, 0, {}) def get_params(self): # Returns the weights of this network. return self.params def update_params(self, updates: Any) -> None: # Update network weights internally. self.params = jax.tree_map(lambda p, u: p + u, self.params, updates) def training_steps(self) -> int: # How many steps / batches the network has been trained for. return 0 ######## 2.2 Representation Network ######## class RepresentationNet(hk.Module): """Representation network.""" def __init__( self, hparams: ml_collections.ConfigDict, task_spec: TaskSpec, embedding_dim: int, name: str = 'representation', ): super().__init__(name=name) self._hparams = hparams self._task_spec = task_spec self._embedding_dim = embedding_dim def __call__(self, inputs): batch_size = inputs['program'].shape[0] program_encoding = None if self._hparams.representation.use_program: program_encoding = self._encode_program(inputs, batch_size) if ( self._hparams.representation.use_locations and self._hparams.representation.use_locations_binary ): raise ValueError( 'only one of `use_locations` and `use_locations_binary` may be used.' ) locations_encoding = None if self._hparams.representation.use_locations: locations_encoding = self._make_locations_encoding_onehot( inputs, batch_size ) elif self._hparams.representation.use_locations_binary: locations_encoding = self._make_locations_encoding_binary( inputs, batch_size ) permutation_embedding = None if self._hparams.representation.use_permutation_embedding: permutation_embedding = self.make_permutation_embedding(batch_size) return self.aggregate_locations_program( locations_encoding, permutation_embedding, program_encoding, batch_size ) def _encode_program(self, inputs, batch_size): program = inputs['program'] max_program_size = inputs['program'].shape[1] program_length = inputs['program_length'].astype(jnp.int32) program_onehot = self.make_program_onehot( program, batch_size, max_program_size ) program_encoding = self.apply_program_mlp_embedder(program_onehot) program_encoding = self.apply_program_attention_embedder(program_encoding) return self.pad_program_encoding( program_encoding, batch_size, program_length, max_program_size ) def aggregate_locations_program( self, locations_encoding, unused_permutation_embedding, program_encoding, batch_size, ): locations_embedder = hk.Sequential( [ hk.Linear(self._embedding_dim), hk.LayerNorm(axis=-1), jax.nn.relu, hk.Linear(self._embedding_dim), ], name='per_locations_embedder', ) # locations_encoding.shape == [B, P, D] so map embedder across locations to # share weights locations_embedding = hk.vmap( locations_embedder, in_axes=1, out_axes=1, split_rng=False )(locations_encoding) program_encoded_repeat = self.repeat_program_encoding( program_encoding, batch_size ) grouped_representation = jnp.concatenate( [locations_embedding, program_encoded_repeat], axis=-1 ) return self.apply_joint_embedder(grouped_representation, batch_size) def repeat_program_encoding(self, program_encoding, batch_size): return jnp.broadcast_to( program_encoding, [batch_size, self._task_spec.num_inputs, program_encoding.shape[-1]], ) def apply_joint_embedder(self, grouped_representation, batch_size): all_locations_net = hk.Sequential( [ hk.Linear(self._embedding_dim), hk.LayerNorm(axis=-1), jax.nn.relu, hk.Linear(self._embedding_dim), ], name='per_element_embedder', ) joint_locations_net = hk.Sequential( [ hk.Linear(self._embedding_dim), hk.LayerNorm(axis=-1), jax.nn.relu, hk.Linear(self._embedding_dim), ], name='joint_embedder', ) joint_resnet = [ ResBlockV2(self._embedding_dim, name=f'joint_resblock_{i}') for i in range(self._hparams.representation.repr_net_res_blocks) ] chex.assert_shape( grouped_representation, (batch_size, self._task_spec.num_inputs, None) ) permutations_encoded = all_locations_net(grouped_representation) # Combine all permutations into a single vector. joint_encoding = joint_locations_net(jnp.mean(permutations_encoded, axis=1)) for net in joint_resnet: joint_encoding = net(joint_encoding) return joint_encoding def make_program_onehot(self, program, batch_size, max_program_size): func = program[:, :, 0] arg1 = program[:, :, 1] arg2 = program[:, :, 2] func_onehot = jax.nn.one_hot(func, self._task_spec.num_funcs) arg1_onehot = jax.nn.one_hot(arg1, self._task_spec.num_locations) arg2_onehot = jax.nn.one_hot(arg2, self._task_spec.num_locations) program_onehot = jnp.concatenate( [func_onehot, arg1_onehot, arg2_onehot], axis=-1 ) chex.assert_shape(program_onehot, (batch_size, max_program_size, None)) return program_onehot def pad_program_encoding( self, program_encoding, batch_size, program_length, max_program_size ): """Pads the program encoding to account for state-action stagger.""" chex.assert_shape(program_encoding, (batch_size, max_program_size, None)) empty_program_output = jnp.zeros( [batch_size, program_encoding.shape[-1]], ) program_encoding = jnp.concatenate( [empty_program_output[:, None, :], program_encoding], axis=1 ) program_length_onehot = jax.nn.one_hot(program_length, max_program_size + 1) program_encoding = jnp.einsum( 'bnd,bNn->bNd', program_encoding, program_length_onehot ) return program_encoding def apply_program_mlp_embedder(self, program_encoding): program_embedder = hk.Sequential( [ hk.Linear(self._embedding_dim), hk.LayerNorm(axis=-1), jax.nn.relu, hk.Linear(self._embedding_dim), ], name='per_instruction_program_embedder', ) program_encoding = program_embedder(program_encoding) return program_encoding def apply_program_attention_embedder(self, program_encoding): attention_params = self._hparams.representation.attention make_attention_block = functools.partial( MultiQueryAttentionBlock, attention_params, causal_mask=False ) attention_encoders = [ make_attention_block(name=f'attention_program_sequencer_{i}') for i in range(self._hparams.representation.attention_num_layers) ] *_, seq_size, feat_size = program_encoding.shape position_encodings = jnp.broadcast_to( MultiQueryAttentionBlock.sinusoid_position_encoding( seq_size, feat_size ), program_encoding.shape, ) program_encoding += position_encodings for e in attention_encoders: program_encoding, _ = e(program_encoding, encoded_state=None) return program_encoding def _make_locations_encoding_onehot(self, inputs, batch_size): """Creates location encoding using onehot representation.""" memory = inputs['memory'] registers = inputs['registers'] locations = jnp.concatenate([memory, registers], axis=-1) # [B, H, P, D] locations = jnp.transpose(locations, [0, 2, 1, 3]) # [B, P, H, D] # One-hot encode the values in the memory and average everything across # permutations. locations_onehot = jax.nn.one_hot( locations, self._task_spec.num_location_values, dtype=jnp.int32 ) locations_onehot = locations_onehot.reshape( [batch_size, self._task_spec.num_inputs, -1] ) return locations_onehot def _make_locations_encoding_binary(self, inputs, batch_size): """Creates location encoding using binary representation.""" memory_binary = int2bin(inputs['memory']).astype(jnp.float32) registers_binary = int2bin(inputs['registers']).astype(jnp.float32) # Note the extra I dimension for the length of the binary integer (32) locations = jnp.concatenate( [memory_binary, registers_binary], axis=-1 ) # [B, H, P, D*I] locations = jnp.transpose(locations, [0, 2, 1, 3]) # [B, P, H, D*I] locations = locations.reshape([batch_size, self._task_spec.num_inputs, -1]) return locations ######## 2.3 Prediction Network ######## class DistributionSupport(object): def __init__(self, value_max: float, num_bins: int): self.value_max = value_max self.num_bins = num_bins def mean(self, logits: jnp.ndarray) -> float: pass def scalar_to_two_hot(self, scalar: float) -> jnp.ndarray: pass class CategoricalHead(hk.Module): """A head that represents continuous values by a categorical distribution.""" def __init__( self, embedding_dim: int, support: DistributionSupport, name: str = 'CategoricalHead', ): super().__init__(name=name) self._value_support = support self._embedding_dim = embedding_dim self._head = make_head_network( embedding_dim, output_size=self._value_support.num_bins ) def __call__(self, x: jnp.ndarray): # For training returns the logits, for inference the mean. logits = self._head(x) probs = jax.nn.softmax(logits) mean = jax.vmap(self._value_support.mean)(probs) return dict(logits=logits, mean=mean) class PredictionNet(hk.Module): """MuZero prediction network.""" def __init__( self, task_spec: TaskSpec, value_max: float, value_num_bins: int, embedding_dim: int, name: str = 'prediction', ): super().__init__(name=name) self.task_spec = task_spec self.support = DistributionSupport(self.value_max, self.value_num_bins) self.embedding_dim = embedding_dim def __call__(self, embedding: jnp.ndarray): policy_head = make_head_network( self.embedding_dim, self.task_spec.num_actions ) value_head = CategoricalHead(self.embedding_dim, self.support) latency_value_head = CategoricalHead(self.embedding_dim, self.support) correctness_value = value_head(embedding) latency_value = latency_value_head(embedding) return NetworkOutput( value=correctness_value['mean'] + latency_value['mean'], correctness_value_logits=correctness_value['logits'], latency_value_logits=latency_value['logits'], policy=policy_head(embedding), ) class MinMaxStats(object): """A class that holds the min-max values of the tree.""" def __init__(self, known_bounds: Optional[KnownBounds]): self.maximum = known_bounds.max if known_bounds else -MAXIMUM_FLOAT_VALUE self.minimum = known_bounds.min if known_bounds else MAXIMUM_FLOAT_VALUE def update(self, value: float): self.maximum = max(self.maximum, value) self.minimum = min(self.minimum, value) def normalize(self, value: float) -> float: if self.maximum > self.minimum: # We normalize only when we have set the maximum and minimum values. return (value - self.minimum) / (self.maximum - self.minimum) return value class Player(object): pass class Node(object): """MCTS node.""" def __init__(self, prior: float): self.visit_count = 0 self.to_play = -1 self.prior = prior self.value_sum = 0 self.children = {} self.hidden_state = None self.reward = 0 def expanded(self) -> bool: return bool(self.children) def value(self) -> float: if self.visit_count == 0: return 0 return self.value_sum / self.visit_count class ActionHistory(object): """Simple history container used inside the search. Only used to keep track of the actions executed. """ def __init__(self, history: Sequence[Action], action_space_size: int): self.history = list(history) self.action_space_size = action_space_size def clone(self): return ActionHistory(self.history, self.action_space_size) def add_action(self, action: Action): self.history.append(action) def last_action(self) -> Action: return self.history[-1] def action_space(self) -> Sequence[Action]: return [Action(i) for i in range(self.action_space_size)] def to_play(self) -> Player: return Player() class Target(NamedTuple): correctness_value: float latency_value: float policy: Sequence[int] bootstrap_discount: float class Sample(NamedTuple): observation: Dict[str, jnp.ndarray] bootstrap_observation: Dict[str, jnp.ndarray] target: Target class Game(object): """A single episode of interaction with the environment.""" def __init__( self, action_space_size: int, discount: float, task_spec: TaskSpec ): self.task_spec = task_spec self.environment = AssemblyGame(task_spec) self.history = [] self.rewards = [] self.latency_reward = 0 self.child_visits = [] self.root_values = [] self.action_space_size = action_space_size self.discount = discount def terminal(self) -> bool: # Game specific termination rules. # For sorting, a game is terminal if we sort all sequences correctly or # we reached the end of the buffer. pass def is_correct(self) -> bool: # Whether the current algorithm solves the game. pass def legal_actions(self) -> Sequence[Action]: # Game specific calculation of legal actions. return [] def apply(self, action: Action): _, reward = self.environment.step(action) self.rewards.append(reward) self.history.append(action) if self.terminal() and self.is_correct(): self.latency_reward = self.environment.latency_reward() def store_search_statistics(self, root: Node): sum_visits = sum(child.visit_count for child in root.children.values()) action_space = (Action(index) for index in range(self.action_space_size)) self.child_visits.append( [ root.children[a].visit_count / sum_visits if a in root.children else 0 for a in action_space ] ) self.root_values.append(root.value()) def make_observation(self, state_index: int): if state_index == -1: return self.environment.observation() env = AssemblyGame(self.task_spec) for action in self.history[:state_index]: observation, _ = env.step(action) return observation def make_target( # pylint: disable-next=unused-argument self, state_index: int, td_steps: int, to_play: Player ) -> Target: """Creates the value target for training.""" # The value target is the discounted sum of all rewards until N steps # into the future, to which we will add the discounted boostrapped future # value. bootstrap_index = state_index + td_steps for i, reward in enumerate(self.rewards[state_index:bootstrap_index]): value += reward * self.discount**i # pytype: disable=unsupported-operands if bootstrap_index < len(self.root_values): bootstrap_discount = self.discount**td_steps else: bootstrap_discount = 0 return Target( value, self.latency_reward, self.child_visits[state_index], bootstrap_discount, ) def to_play(self) -> Player: return Player() def action_history(self) -> ActionHistory: return ActionHistory(self.history, self.action_space_size) class ReplayBuffer(object): """Replay buffer object storing games for training.""" def __init__(self, config: AlphaDevConfig): self.window_size = config.window_size self.batch_size = config.batch_size self.buffer = [] def save_game(self, game): if len(self.buffer) > self.window_size: self.buffer.pop(0) self.buffer.append(game) def sample_batch(self, td_steps: int) -> Sequence[Sample]: games = [self.sample_game() for _ in range(self.batch_size)] game_pos = [(g, self.sample_position(g)) for g in games] # pylint: disable=g-complex-comprehension return [ Sample( observation=g.make_observation(i), bootstrap_observation=g.make_observation(i + td_steps), target=g.make_target(i, td_steps, g.to_play()), ) for (g, i) in game_pos ] # pylint: enable=g-complex-comprehension def sample_game(self) -> Game: # Sample game from buffer either uniformly or according to some priority. return self.buffer[0] # pylint: disable-next=unused-argument def sample_position(self, game) -> int: # Sample position from game either uniformly or according to some priority. return -1 class SharedStorage(object): """Controls which network is used at inference.""" def __init__(self): self._networks = {} def latest_network(self) -> Network: if self._networks: return self._networks[max(self._networks.keys())] else: # policy -> uniform, value -> 0, reward -> 0 return make_uniform_network() def save_network(self, step: int, network: Network): self._networks[step] = network ##### End Helpers ######## ########################## # AlphaDev training is split into two independent parts: Network training and # self-play data generation. # These two parts only communicate by transferring the latest network checkpoint # from the training to the self-play, and the finished games from the self-play # to the training. def alphadev(config: AlphaDevConfig): storage = SharedStorage() replay_buffer = ReplayBuffer(config) for _ in range(config.num_actors): launch_job(run_selfplay, config, storage, replay_buffer) train_network(config, storage, replay_buffer) return storage.latest_network() ##################################### ####### 4. Part 1: Self-Play ######## # Each self-play job is independent of all others; it takes the latest network # snapshot, produces a game and makes it available to the training job by # writing it to a shared replay buffer. def run_selfplay( config: AlphaDevConfig, storage: SharedStorage, replay_buffer: ReplayBuffer ): while True: network = storage.latest_network() game = play_game(config, network) replay_buffer.save_game(game) def play_game(config: AlphaDevConfig, network: Network) -> Game: """Plays an AlphaDev game. Each game is produced by starting at the initial empty program, then repeatedly executing a Monte Carlo Tree Search to generate moves until the end of the game is reached. Args: config: An instance of the AlphaDev configuration. network: Networks used for inference. Returns: The played game. """ game = config.new_game() while not game.terminal() and len(game.history) < config.max_moves: min_max_stats = MinMaxStats(config.known_bounds) # Initialisation of the root node and addition of exploration noise root = Node(0) current_observation = game.make_observation(-1) network_output = network.inference(current_observation) _expand_node( root, game.to_play(), game.legal_actions(), network_output, reward=0 ) _backpropagate( [root], network_output.value, game.to_play(), config.discount, min_max_stats, ) _add_exploration_noise(config, root) # We then run a Monte Carlo Tree Search using the environment. run_mcts( config, root, game.action_history(), network, min_max_stats, game.environment, ) action = _select_action(config, len(game.history), root, network) game.apply(action) game.store_search_statistics(root) return game def run_mcts( config: AlphaDevConfig, root: Node, action_history: ActionHistory, network: Network, min_max_stats: MinMaxStats, env: AssemblyGame, ): """Runs the Monte Carlo Tree Search algorithm. To decide on an action, we run N simulations, always starting at the root of the search tree and traversing the tree according to the UCB formula until we reach a leaf node. Args: config: AlphaDev configuration root: The root node of the MCTS tree from which we start the algorithm action_history: history of the actions taken so far. network: instances of the networks that will be used. min_max_stats: min-max statistics for the tree. env: an instance of the AssemblyGame. """ for _ in range(config.num_simulations): history = action_history.clone() node = root search_path = [node] sim_env = env.clone() while node.expanded(): action, node = _select_child(config, node, min_max_stats) sim_env.step(action) history.add_action(action) search_path.append(node) # Inside the search tree we use the environment to obtain the next # observation and reward given an action. observation, reward = sim_env.step(action) network_output = network.inference(observation) _expand_node( node, history.to_play(), history.action_space(), network_output, reward ) _backpropagate( search_path, network_output.value, history.to_play(), config.discount, min_max_stats, ) def _select_action( # pylint: disable-next=unused-argument config: AlphaDevConfig, num_moves: int, node: Node, network: Network ): visit_counts = [ (child.visit_count, action) for action, child in node.children.items() ] t = config.visit_softmax_temperature_fn( training_steps=network.training_steps() ) _, action = softmax_sample(visit_counts, t) return action def _select_child( config: AlphaDevConfig, node: Node, min_max_stats: MinMaxStats ): """Selects the child with the highest UCB score.""" _, action, child = max( (_ucb_score(config, node, child, min_max_stats), action, child) for action, child in node.children.items() ) return action, child def _ucb_score( config: AlphaDevConfig, parent: Node, child: Node, min_max_stats: MinMaxStats, ) -> float: """Computes the UCB score based on its value + exploration based on prior.""" pb_c = ( math.log((parent.visit_count + config.pb_c_base + 1) / config.pb_c_base) + config.pb_c_init ) pb_c *= math.sqrt(parent.visit_count) / (child.visit_count + 1) prior_score = pb_c * child.prior if child.visit_count > 0: value_score = min_max_stats.normalize( child.reward + config.discount * child.value() ) else: value_score = 0 return prior_score + value_score def _expand_node( node: Node, to_play: Player, actions: Sequence[Action], network_output: NetworkOutput, reward: float, ): """Expands the node using value, reward and policy predictions from the NN.""" node.to_play = to_play node.hidden_state = network_output.hidden_state node.reward = reward policy = {a: math.exp(network_output.policy_logits[a]) for a in actions} policy_sum = sum(policy.values()) for action, p in policy.items(): node.children[action] = Node(p / policy_sum) def _backpropagate( search_path: Sequence[Node], value: float, to_play: Player, discount: float, min_max_stats: MinMaxStats, ): """Propagates the evaluation all the way up the tree to the root.""" for node in reversed(search_path): node.value_sum += value if node.to_play == to_play else -value node.visit_count += 1 min_max_stats.update(node.value()) value = node.reward + discount * value def _add_exploration_noise(config: AlphaDevConfig, node: Node): """Adds dirichlet noise to the prior of the root to encourage exploration.""" actions = list(node.children.keys()) noise = numpy.random.dirichlet([config.root_dirichlet_alpha] * len(actions)) frac = config.root_exploration_fraction for a, n in zip(actions, noise): node.children[a].prior = node.children[a].prior * (1 - frac) + n * frac def train_network( config: AlphaDevConfig, storage: SharedStorage, replay_buffer: ReplayBuffer ): """Trains the network on data stored in the replay buffer.""" network = Network(config.hparams, config.task_spec) target_network = Network(config.hparams, config.task_spec) optimizer = optax.sgd(config.lr_init, config.momentum) optimizer_state = optimizer.init(network.get_params()) for i in range(config.training_steps): if i % config.checkpoint_interval == 0: storage.save_network(i, network) if i % config.target_network_interval == 0: target_network = network.copy() batch = replay_buffer.sample_batch(config.num_unroll_steps, config.td_steps) optimizer_state = _update_weights( optimizer, optimizer_state, network, target_network, batch) storage.save_network(config.training_steps, network)
AlphaDev-main
alpha_dev/pseudocode.py
from alpha_dev.model import AlphaDev
AlphaDev-main
alpha_dev/__init__.py
from torch.nn import Module from alpha_dev.transformer import AutoregressiveWrapper, Decoder, Transformer class AlphaDev(Module): """ AlphaDev is a transformer-based model architecture. It initializes with a Transformer and AutoregressiveWrapper with default or user-specified parameters. Initialize the model with specified or default parameters. Args: - num_tokens: Number of tokens in the vocabulary - max_seq_len: Maximum sequence length - dim: Dimension of the model - depth: Depth of the model - dim_head: Dimension of the model head - heads: Number of heads - use_abs_pos_emb: Whether to use absolute position embedding - alibi_pos_bias: Alibi position bias - alibi_num_heads: Number of alibi heads - rotary_xpos: Rotary position - attn_flash: Attention flash - deepnorm: Deep normalization - shift_tokens: Number of tokens to shift - attn_one_kv_head: Attention one key/value head - qk_norm: Query-key normalization - attn_qk_norm: Attention query-key normalization - attn_qk_norm_dim_scale: Attention query-key normalization dimension scale - embedding_provider: Embedding provider module """ def __init__(self, num_tokens=50432, max_seq_len=8192, dim=2560, depth=32, dim_head=128, heads=24, use_abs_pos_emb=False, alibi_pos_bias=True, alibi_num_heads=12, rotary_xpos=True, attn_flash=True, attn_kv_heads = 2, qk_norm=True, attn_qk_norm=True, attn_qk_norm_dim_scale=True, ): super().__init__() try: self.decoder = Transformer( num_tokens=num_tokens, max_seq_len=max_seq_len, use_abs_pos_emb=use_abs_pos_emb, attn_layers=Decoder( dim=dim, depth=depth, dim_head=dim_head, heads=heads, alibi_pos_bias=alibi_pos_bias, alibi_num_heads=alibi_num_heads, rotary_xpos=rotary_xpos, attn_flash=attn_flash, attn_kv_heads=attn_kv_heads, qk_norm=qk_norm, attn_qk_norm=attn_qk_norm, attn_qk_norm_dim_scale=attn_qk_norm_dim_scale ) ) self.decoder = AutoregressiveWrapper(self.decoder) except Exception as e: print("Failed to initialize Andromeda: ", e) raise def forward(self, text_tokens, **kwargs): try: model_input = self.decoder.forward(text_tokens)[0] return self.decoder(model_input, padded_x=model_input[0]) except Exception as e: print("Failed in forward method: ", e) raise
AlphaDev-main
alpha_dev/model.py
from functools import partial from typing import Optional import torch from torch import nn, einsum, Tensor import torch.nn.functional as F from collections import namedtuple from functools import wraps from packaging import version from dataclasses import dataclass from einops import rearrange # constants EfficientAttentionConfig = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient']) @dataclass class Intermediates: qk_similarities: Optional[Tensor] = None pre_softmax_attn: Optional[Tensor] = None post_softmax_attn: Optional[Tensor] = None def to_tuple(self): return (self.qk_similarities, self.pre_softmax_attn, self.post_softmax_attn) # helpers def exists(val): return val is not None def default(val, d): return val if exists(val) else d def compact(arr): return [*filter(exists, arr)] def once(fn): called = False @wraps(fn) def inner(x): nonlocal called if called: return called = True return fn(x) return inner print_once = once(print) # functions for creating causal mask # need a special one for onnx cpu (no support for .triu) def create_causal_mask(i, j, device): return torch.ones((i, j), device = device, dtype = torch.bool).triu(j - i + 1) def onnx_create_causal_mask(i, j, device): r = torch.arange(i, device = device) causal_mask = rearrange(r, 'i -> i 1') < rearrange(r, 'j -> 1 j') causal_mask = F.pad(causal_mask, (j - i, 0), value = False) return causal_mask # main class class Attend(nn.Module): def __init__( self, *, dropout = 0., causal = False, heads = None, talking_heads = False, sparse_topk = None, scale = None, qk_norm = False, flash = False, add_zero_kv = False, onnxable = False ): super().__init__() self.scale = scale self.qk_norm = qk_norm self.causal = causal self.create_causal_mask = onnx_create_causal_mask if onnxable else create_causal_mask self.attn_fn = partial(F.softmax, dtype = torch.float32) if not qk_norm else F.softmax self.dropout = dropout self.attn_dropout = nn.Dropout(dropout) # talking heads assert not (flash and talking_heads), 'talking heads not compatible with flash attention' self.talking_heads = talking_heads if talking_heads: self.pre_softmax_talking_heads = nn.Conv2d(heads, heads, 1, bias = False) self.post_softmax_talking_heads = nn.Conv2d(heads, heads, 1, bias = False) # sparse topk assert not (flash and sparse_topk), 'sparse topk not compatible with flash attention' self.sparse_topk = sparse_topk # add a key / value token composed of zeros # in case this helps controlling outliers, proposed by https://www.evanmiller.org/attention-is-off-by-one.html self.add_zero_kv = add_zero_kv # flash attention self.flash = flash assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above' # determine efficient attention configs for cuda and cpu self.cpu_config = EfficientAttentionConfig(True, True, True) self.cuda_config = None if not torch.cuda.is_available() or not flash: return device_properties = torch.cuda.get_device_properties(torch.device('cuda')) if device_properties.major == 8 and device_properties.minor == 0: print_once('A100 GPU detected, using flash attention if input tensor is on cuda') self.cuda_config = EfficientAttentionConfig(True, False, False) else: print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda') self.cuda_config = EfficientAttentionConfig(False, True, True) def flash_attn( self, q, k, v, mask = None, attn_bias = None ): batch, heads, q_len, _, k_len, is_cuda, device = *q.shape, k.shape[-2], q.is_cuda, q.device # Recommended for multi-query single-key-value attention by Tri Dao # kv shape torch.Size([1, 512, 64]) -> torch.Size([1, 8, 512, 64]) if k.ndim == 3: k = rearrange(k, 'b ... -> b 1 ...').expand_as(q) if v.ndim == 3: v = rearrange(v, 'b ... -> b 1 ...').expand_as(q) # handle scale - by default they scale by dim_head ** -0.5, but need to take care if using cosine sim attention if self.qk_norm: default_scale = q.shape[-1] ** -0.5 q = q * (default_scale / self.scale) # Check if mask exists and expand to compatible shape # The mask is B L, so it would have to be expanded to B H N L causal = self.causal if exists(mask): assert mask.ndim == 4 mask = mask.expand(batch, heads, q_len, k_len) # manually handle causal mask, if another mask was given if causal: causal_mask = self.create_causal_mask(q_len, k_len, device = device) mask = mask & ~causal_mask causal = False # handle alibi positional bias # convert from bool to float if exists(attn_bias): attn_bias = rearrange(attn_bias, 'h i j -> 1 h i j').expand(batch, heads, -1, -1) # if mask given, the mask would already contain the causal mask from above logic # otherwise, if no mask given but still causal, mask out alibi positional bias to a large negative number mask_value = -torch.finfo(q.dtype).max if exists(mask): attn_bias = attn_bias.masked_fill(~mask, mask_value // 2) elif causal: causal_mask = self.create_causal_mask(q_len, k_len, device = device) attn_bias = attn_bias.masked_fill(causal_mask, mask_value // 2) causal = False # scaled_dot_product_attention handles attn_mask either as bool or additive bias # make it an additive bias here mask = attn_bias # Check if there is a compatible device for flash attention config = self.cuda_config if is_cuda else self.cpu_config # pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale with torch.backends.cuda.sdp_kernel(**config._asdict()): out = F.scaled_dot_product_attention( q, k, v, attn_mask = mask, dropout_p = self.dropout if self.training else 0., is_causal = causal ) return out, Intermediates() def forward( self, q, k, v, mask = None, attn_bias = None, prev_attn = None ): """ einstein notation b - batch h - heads n, i, j - sequence length (base sequence length, source, target) d - feature dimension """ n, device = q.shape[-2], q.device scale = default(self.scale, q.shape[-1] ** -0.5) if self.add_zero_kv: k, v = map(lambda t: F.pad(t, (0, 0, 1, 0), value = 0.), (k, v)) if exists(mask): mask = F.pad(mask, (1, 0), value = True) if exists(attn_bias): attn_bias = F.pad(attn_bias, (1, 0), value = 0.) if self.flash: assert not exists(prev_attn), 'residual attention not compatible with flash attention' return self.flash_attn(q, k, v, mask = mask, attn_bias = attn_bias) kv_einsum_eq = 'b j d' if k.ndim == 3 else 'b h j d' dots = einsum(f'b h i d, {kv_einsum_eq} -> b h i j', q, k) * scale if exists(prev_attn): dots = dots + prev_attn qk_similarities = dots.clone() if self.talking_heads: dots = self.pre_softmax_talking_heads(dots) if exists(attn_bias): dots = dots + attn_bias i, j, dtype = *dots.shape[-2:], dots.dtype mask_value = -torch.finfo(dots.dtype).max if exists(self.sparse_topk) and self.sparse_topk < j: top_values, _ = dots.topk(self.sparse_topk, dim = -1) sparse_topk_mask = dots < top_values[..., -1:] mask = (mask & sparse_topk_mask) if exists(mask) else sparse_topk_mask if exists(mask): dots = dots.masked_fill(~mask, mask_value) if self.causal: causal_mask = self.create_causal_mask(i, j, device = device) dots = dots.masked_fill(causal_mask, mask_value) pre_softmax_attn = dots.clone() attn = self.attn_fn(dots, dim = -1) attn = attn.type(dtype) post_softmax_attn = attn.clone() attn = self.attn_dropout(attn) if self.talking_heads: attn = self.post_softmax_talking_heads(attn) out = einsum(f'b h i j, {kv_einsum_eq} -> b h i d', attn, v) intermediates = Intermediates( qk_similarities = qk_similarities, pre_softmax_attn = pre_softmax_attn, post_softmax_attn = post_softmax_attn ) return out, intermediates # cascading heads logic def to_single_heads(t, dim = 1): heads = t.unbind(dim = dim) return tuple(head.unsqueeze(dim) for head in heads) class CascadingHeads(nn.Module): def __init__(self, attend: Attend): super().__init__() self.attend = attend def forward( self, q, k, v, mask = None, attn_bias = None, prev_attn = None ): assert q.shape[-1] == v.shape[-1], 'cascading heads can only be done if query / key and value head dimensions are the same' # split inputs into per-head inputs heads = q.shape[1] queries = to_single_heads(q) keys = to_single_heads(k) if k.ndim == 4 else ((k,) * heads) values = to_single_heads(v) if v.ndim == 4 else ((v,) * heads) mask = (mask,) * heads attn_bias = to_single_heads(attn_bias, dim = 0) if exists(attn_bias) else ((None,) * heads) prev_attn = to_single_heads(prev_attn) if exists(prev_attn) else ((None,) * heads) # now loop through each head, without output of previous head summed with the next head # thus cascading all_outs = [] all_intermediates = [] prev_head_out = None for h_q, h_k, h_v, h_mask, h_attn_bias, h_prev_attn in zip(queries, keys, values, mask, attn_bias, prev_attn): if exists(prev_head_out): h_q = h_q + prev_head_out out, intermediates = self.attend( h_q, h_k, h_v, mask = h_mask, attn_bias = h_attn_bias, prev_attn = h_prev_attn ) prev_head_out = out all_outs.append(out) all_intermediates.append(intermediates) # cat all output heads all_outs = torch.cat(all_outs, dim = 1) # cat all intermediates, if they exist qk_similarities, pre_softmax_attn, post_softmax_attn = zip(*map(lambda i: i.to_tuple(), all_intermediates)) qk_similarities, pre_softmax_attn, post_softmax_attn = map(compact, (qk_similarities, pre_softmax_attn, post_softmax_attn)) aggregated_intermediates = Intermediates( qk_similarities = torch.cat(qk_similarities, dim = 1) if len(qk_similarities) > 0 else None, pre_softmax_attn = torch.cat(pre_softmax_attn, dim = 1) if len(pre_softmax_attn) > 0 else None, post_softmax_attn = torch.cat(post_softmax_attn, dim = 1) if len(post_softmax_attn) > 0 else None ) return all_outs, aggregated_intermediates
AlphaDev-main
alpha_dev/attend.py
import math from dataclasses import dataclass from functools import partial, wraps from inspect import isfunction # constants from math import ceil from random import random from typing import Callable, List, Optional import torch import torch.nn.functional as F from einops import pack, rearrange, reduce, repeat, unpack from torch import Tensor, einsum, nn from alpha_dev.attend import Attend, Intermediates def eval_decorator(fn): def inner(self, *args, **kwargs): was_training = self.training self.eval() out = fn(self, *args, **kwargs) self.train(was_training) return out return inner # nucleus def top_p(logits, thres = 0.9): sorted_logits, sorted_indices = torch.sort(logits, descending=True) cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) sorted_indices_to_remove = cum_probs > (1 - thres) sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone() sorted_indices_to_remove[:, 0] = 0 sorted_logits[sorted_indices_to_remove] = float('-inf') return sorted_logits.scatter(1, sorted_indices, sorted_logits) # topk def top_k(logits, thres = 0.9): k = ceil((1 - thres) * logits.shape[-1]) val, ind = torch.topk(logits, k) probs = torch.full_like(logits, float('-inf')) probs.scatter_(1, ind, val) return probs # top_a def top_a(logits, min_p_pow=2.0, min_p_ratio=0.02): probs = F.softmax(logits, dim=-1) limit = torch.pow(torch.max(probs), min_p_pow) * min_p_ratio logits[probs < limit] = float('-inf') logits[probs >= limit] = 1 return logits # autoregressive wrapper class class AutoregressiveWrapper(nn.Module): def __init__( self, net, ignore_index = -100, pad_value = 0, mask_prob = 0. ): super().__init__() self.pad_value = pad_value self.ignore_index = ignore_index self.net = net self.max_seq_len = net.max_seq_len # paper shows masking (MLM) in conjunction with autoregressive decoder-only training leads to big improvements https://arxiv.org/abs/2210.13432 assert mask_prob < 1. self.mask_prob = mask_prob @torch.no_grad() @eval_decorator def generate( self, start_tokens, seq_len, eos_token = None, temperature = 1., filter_logits_fn = top_k, filter_thres = 0.9, min_p_pow = 2.0, min_p_ratio = 0.02, **kwargs ): start_tokens, ps = pack([start_tokens], '* n') b, t = start_tokens.shape out = start_tokens for _ in range(seq_len): x = out[:, -self.max_seq_len:] logits = self.net(x, **kwargs)[:, -1] if filter_logits_fn in {top_k, top_p}: filtered_logits = filter_logits_fn(logits, thres = filter_thres) probs = F.softmax(filtered_logits / temperature, dim=-1) elif filter_logits_fn is top_a: filtered_logits = filter_logits_fn(logits, min_p_pow = min_p_pow, min_p_ratio= min_p_ratio) probs = F.softmax(filtered_logits / temperature, dim=-1) sample = torch.multinomial(probs, 1) out = torch.cat((out, sample), dim=-1) if exists(eos_token): is_eos_tokens = (out == eos_token) if is_eos_tokens.any(dim = -1).all(): # mask out everything after the eos tokens shifted_is_eos_tokens = F.pad(is_eos_tokens, (1, -1)) mask = shifted_is_eos_tokens.float().cumsum(dim = -1) >= 1 out = out.masked_fill(mask, self.pad_value) break out = out[:, t:] out, = unpack(out, ps, '* n') return out def forward(self, x, return_loss=True, **kwargs): seq, ignore_index = x.shape[1], self.ignore_index inp, target = x[:, :-1], x[:, 1:] if self.mask_prob > 0.: rand = torch.randn(inp.shape, device = x.device) rand[:, 0] = -torch.finfo(rand.dtype).max # first token should not be masked out num_mask = min(int(seq * self.mask_prob), seq - 1) indices = rand.topk(num_mask, dim = -1).indices mask = ~torch.zeros_like(inp).scatter(1, indices, 1.).bool() kwargs.update(self_attn_context_mask = mask) logits = self.net(inp, **kwargs) loss = F.cross_entropy( rearrange(logits, 'b n c -> b c n'), target, ignore_index = ignore_index ) if return_loss: return logits, loss return logits DEFAULT_DIM_HEAD = 64 @dataclass class LayerIntermediates: hiddens: Optional[List[Tensor]] = None attn_intermediates: Optional[List[Intermediates]] = None layer_hiddens: Optional[List[Tensor]] = None attn_z_loss: Optional[Tensor] = None # helpers def exists(val): return val is not None def default(val, d): if exists(val): return val return d() if isfunction(d) else d def cast_tuple(val, depth): return val if isinstance(val, tuple) else (val,) * depth def maybe(fn): @wraps(fn) def inner(x, *args, **kwargs): if not exists(x): return x return fn(x, *args, **kwargs) return inner class always(): def __init__(self, val): self.val = val def __call__(self, *args, **kwargs): return self.val class not_equals(): def __init__(self, val): self.val = val def __call__(self, x, *args, **kwargs): return x != self.val class equals(): def __init__(self, val): self.val = val def __call__(self, x, *args, **kwargs): return x == self.val def Sequential(*modules): return nn.Sequential(*filter(exists, modules)) # tensor helpers def max_neg_value(tensor): return -torch.finfo(tensor.dtype).max def l2norm(t, groups = 1): t = rearrange(t, '... (g d) -> ... g d', g = groups) t = F.normalize(t, p = 2, dim = -1) return rearrange(t, '... g d -> ... (g d)') def pad_at_dim(t, pad, dim = -1, value = 0.): dims_from_right = (- dim - 1) if dim < 0 else (t.ndim - dim - 1) zeros = ((0, 0) * dims_from_right) return F.pad(t, (*zeros, *pad), value = value) def or_reduce(masks): head, *body = masks for rest in body: head = head | rest return head # auxiliary loss helpers def calc_z_loss( pre_softmax_attns: List[Tensor], mask = None, weight = 1. ): # the same loss applied to the mixture of experts router logits in https://arxiv.org/abs/2202.08906 # in the paper, in a tiny footnote, they mention using it on attention logits with stabilizing effects # also used in PaLM as one of the measures lse = 0. for attn in pre_softmax_attns: lse = lse + attn.logsumexp(dim = -1) loss = torch.square(lse) loss = reduce(loss, 'b h n -> b n', 'sum') if not exists(mask): return loss.mean() * weight loss = loss[mask].sum() / mask.sum().clamp(min = 1e-5) return loss * weight # init helpers def init_zero_(layer): nn.init.constant_(layer.weight, 0.) if exists(layer.bias): nn.init.constant_(layer.bias, 0.) # keyword argument helpers def pick_and_pop(keys, d): values = list(map(lambda key: d.pop(key), keys)) return dict(zip(keys, values)) def group_dict_by_key(cond, d): return_val = [dict(),dict()] for key in d.keys(): match = bool(cond(key)) ind = int(not match) return_val[ind][key] = d[key] return (*return_val,) def string_begins_with(prefix, str): return str.startswith(prefix) def group_by_key_prefix(prefix, d): return group_dict_by_key(partial(string_begins_with, prefix), d) def groupby_prefix_and_trim(prefix, d): kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d) kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items()))) return kwargs_without_prefix, kwargs # initializations def deepnorm_init( transformer, beta, module_name_match_list = ['.ff.', '.to_v', '.to_out'] ): for name, module in transformer.named_modules(): if type(module) != nn.Linear: continue needs_beta_gain = any(map(lambda substr: substr in name, module_name_match_list)) gain = beta if needs_beta_gain else 1 nn.init.xavier_normal_(module.weight.data, gain = gain) if exists(module.bias): nn.init.constant_(module.bias.data, 0) # structured dropout, more effective than traditional attention dropouts def dropout_seq(seq, mask, dropout): b, n, *_, device = *seq.shape, seq.device logits = torch.randn(b, n, device = device) if exists(mask): mask_value = max_neg_value(logits) logits = logits.masked_fill(~mask, mask_value) keep_prob = 1. - dropout num_keep = max(1, int(keep_prob * n)) keep_indices = logits.topk(num_keep, dim = 1).indices batch_indices = torch.arange(b, device = device) batch_indices = rearrange(batch_indices, 'b -> b 1') seq = seq[batch_indices, keep_indices] if exists(mask): seq_counts = mask.sum(dim = -1) seq_keep_counts = torch.ceil(seq_counts * keep_prob).int() keep_mask = torch.arange(num_keep, device = device) < rearrange(seq_keep_counts, 'b -> b 1') mask = mask[batch_indices, keep_indices] & keep_mask return seq, mask # activations class ReluSquared(nn.Module): def forward(self, x): return F.relu(x) ** 2 # embedding class TokenEmbedding(nn.Module): def __init__(self, dim, num_tokens, l2norm_embed = False): super().__init__() self.l2norm_embed = l2norm_embed self.emb = nn.Embedding(num_tokens, dim) def forward(self, x): token_emb = self.emb(x) return l2norm(token_emb) if self.l2norm_embed else token_emb # positional embeddings class AbsolutePositionalEmbedding(nn.Module): def __init__(self, dim, max_seq_len, l2norm_embed = False): super().__init__() self.scale = dim ** -0.5 if not l2norm_embed else 1. self.max_seq_len = max_seq_len self.l2norm_embed = l2norm_embed self.emb = nn.Embedding(max_seq_len, dim) def forward(self, x, pos = None): seq_len, device = x.shape[1], x.device assert seq_len <= self.max_seq_len, f'you are passing in a sequence length of {seq_len} but your absolute positional embedding has a max sequence length of {self.max_seq_len}' if not exists(pos): pos = torch.arange(seq_len, device = device) pos_emb = self.emb(pos) pos_emb = pos_emb * self.scale return l2norm(pos_emb) if self.l2norm_embed else pos_emb class ScaledSinusoidalEmbedding(nn.Module): def __init__(self, dim, theta = 10000): super().__init__() assert (dim % 2) == 0 self.scale = nn.Parameter(torch.ones(1) * dim ** -0.5) half_dim = dim // 2 freq_seq = torch.arange(half_dim).float() / half_dim inv_freq = theta ** -freq_seq self.register_buffer('inv_freq', inv_freq, persistent = False) def forward(self, x, pos = None): seq_len, device = x.shape[1], x.device if not exists(pos): pos = torch.arange(seq_len, device = device) emb = einsum('i, j -> i j', pos, self.inv_freq) emb = torch.cat((emb.sin(), emb.cos()), dim = -1) return emb * self.scale class RelativePositionBias(nn.Module): def __init__(self, scale, causal = False, num_buckets = 32, max_distance = 128, heads = 8): super().__init__() self.scale = scale self.causal = causal self.num_buckets = num_buckets self.max_distance = max_distance self.relative_attention_bias = nn.Embedding(num_buckets, heads) @staticmethod def _relative_position_bucket(relative_position, causal = True, num_buckets = 32, max_distance = 128): ret = 0 n = -relative_position if not causal: num_buckets //= 2 ret += (n < 0).long() * num_buckets n = torch.abs(n) else: n = torch.max(n, torch.zeros_like(n)) max_exact = num_buckets // 2 is_small = n < max_exact val_if_large = max_exact + ( torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).long() val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1)) ret += torch.where(is_small, n, val_if_large) return ret @property def device(self): return next(self.parameters()).device def forward(self, i, j): device = self.device q_pos = torch.arange(j - i, j, dtype = torch.long, device = device) k_pos = torch.arange(j, dtype = torch.long, device = device) rel_pos = k_pos[None, :] - q_pos[:, None] rp_bucket = self._relative_position_bucket(rel_pos, causal = self.causal, num_buckets = self.num_buckets, max_distance = self.max_distance) values = self.relative_attention_bias(rp_bucket) bias = rearrange(values, 'i j h -> h i j') return bias * self.scale class AlibiPositionalBias(nn.Module): def __init__(self, heads, total_heads, **kwargs): super().__init__() self.heads = heads self.total_heads = total_heads slopes = Tensor(self._get_slopes(heads)) slopes = rearrange(slopes, 'h -> h 1 1') self.register_buffer('slopes', slopes, persistent = False) self.register_buffer('bias', None, persistent = False) def get_bias(self, i, j, device): i_arange = torch.arange(j - i, j, device = device) j_arange = torch.arange(j, device = device) bias = -torch.abs(rearrange(j_arange, 'j -> 1 1 j') - rearrange(i_arange, 'i -> 1 i 1')) return bias @staticmethod def _get_slopes(heads): def get_slopes_power_of_2(n): start = (2**(-2**-(math.log2(n)-3))) ratio = start return [start*ratio**i for i in range(n)] if math.log2(heads).is_integer(): return get_slopes_power_of_2(heads) closest_power_of_2 = 2 ** math.floor(math.log2(heads)) return get_slopes_power_of_2(closest_power_of_2) + get_slopes_power_of_2(2 * closest_power_of_2)[0::2][:heads-closest_power_of_2] @property def device(self): return next(self.buffers()).device def forward(self, i, j): h, device = self.total_heads, self.device if exists(self.bias) and self.bias.shape[-1] >= j and self.bias.shape[-2] >= i: return self.bias[..., :i, :j] bias = self.get_bias(i, j, device) bias = bias * self.slopes num_heads_unalibied = h - bias.shape[0] bias = pad_at_dim(bias, (0, num_heads_unalibied), dim = 0) self.register_buffer('bias', bias, persistent = False) return self.bias class RotaryEmbedding(nn.Module): def __init__( self, dim, use_xpos = False, scale_base = 512, interpolation_factor = 1., base = 10000, base_rescale_factor = 1. ): super().__init__() # proposed by reddit user bloc97, to rescale rotary embeddings to longer sequence length without fine-tuning # has some connection to NTK literature # https://www.reddit.com/r/LocalLLaMA/comments/14lz7j5/ntkaware_scaled_rope_allows_llama_models_to_have/ base *= base_rescale_factor ** (dim / (dim - 2)) inv_freq = 1. / (base ** (torch.arange(0, dim, 2).float() / dim)) self.register_buffer('inv_freq', inv_freq) assert interpolation_factor >= 1. self.interpolation_factor = interpolation_factor if not use_xpos: self.register_buffer('scale', None) return scale = (torch.arange(0, dim, 2) + 0.4 * dim) / (1.4 * dim) self.scale_base = scale_base self.register_buffer('scale', scale) def forward(self, seq_len, device): t = torch.arange(seq_len, device = device).type_as(self.inv_freq) t = t / self.interpolation_factor freqs = torch.einsum('i , j -> i j', t, self.inv_freq) freqs = torch.cat((freqs, freqs), dim = -1) if not exists(self.scale): return freqs, 1. power = (torch.arange(seq_len, device = device) - (seq_len // 2)) / self.scale_base scale = self.scale ** rearrange(power, 'n -> n 1') scale = torch.cat((scale, scale), dim = -1) return freqs, scale def rotate_half(x): x = rearrange(x, '... (j d) -> ... j d', j = 2) x1, x2 = x.unbind(dim = -2) return torch.cat((-x2, x1), dim = -1) def apply_rotary_pos_emb(t, freqs, scale = 1): seq_len = t.shape[-2] freqs = freqs[-seq_len:, :] return (t * freqs.cos() * scale) + (rotate_half(t) * freqs.sin() * scale) # norms class Scale(nn.Module): def __init__(self, value, fn): super().__init__() self.value = value self.fn = fn def forward(self, x, **kwargs): out = self.fn(x, **kwargs) scale_fn = lambda t: t * self.value if not isinstance(out, tuple): return scale_fn(out) return (scale_fn(out[0]), *out[1:]) class ScaleNorm(nn.Module): def __init__(self, dim, eps = 1e-5): super().__init__() self.eps = eps self.g = nn.Parameter(torch.ones(1) * (dim ** -0.5)) def forward(self, x): norm = torch.norm(x, dim = -1, keepdim = True) return x / norm.clamp(min = self.eps) * self.g class RMSNorm(nn.Module): def __init__(self, dim): super().__init__() self.scale = dim ** 0.5 self.g = nn.Parameter(torch.ones(dim)) def forward(self, x): return F.normalize(x, dim = -1) * self.scale * self.g class SimpleRMSNorm(nn.Module): def __init__(self, dim): super().__init__() self.scale = dim ** 0.5 def forward(self, x): return F.normalize(x, dim = -1) * self.scale # residual and residual gates class Residual(nn.Module): def __init__(self, dim, scale_residual = False, scale_residual_constant = 1.): super().__init__() self.residual_scale = nn.Parameter(torch.ones(dim)) if scale_residual else None self.scale_residual_constant = scale_residual_constant def forward(self, x, residual): if exists(self.residual_scale): residual = residual * self.residual_scale if self.scale_residual_constant != 1: residual = residual * self.scale_residual_constant return x + residual # feedforward class GLU(nn.Module): def __init__( self, dim_in, dim_out, activation: Callable, mult_bias = False ): super().__init__() self.act = activation self.proj = nn.Linear(dim_in, dim_out * 2) self.mult_bias = nn.Parameter(torch.ones(dim_out)) if mult_bias else 1. def forward(self, x): x, gate = self.proj(x).chunk(2, dim = -1) return x * self.act(gate) * self.mult_bias class FeedForward(nn.Module): def __init__( self, dim, dim_out = None, mult = 4, glu = False, glu_mult_bias = False, swish = False, relu_squared = False, post_act_ln = False, dropout = 0., no_bias = False, zero_init_output = False ): super().__init__() inner_dim = int(dim * mult) dim_out = default(dim_out, dim) if relu_squared: activation = ReluSquared() elif swish: activation = nn.SiLU() else: activation = nn.GELU() if glu: project_in = GLU(dim, inner_dim, activation, mult_bias = glu_mult_bias) else: project_in = nn.Sequential( nn.Linear(dim, inner_dim, bias = not no_bias), activation ) self.ff = Sequential( project_in, nn.LayerNorm(inner_dim) if post_act_ln else None, nn.Dropout(dropout), nn.Linear(inner_dim, dim_out, bias = not no_bias) ) # init last linear layer to 0 if zero_init_output: init_zero_(self.ff[-1]) def forward(self, x): return self.ff(x) # attention. it is all we need class Attention(nn.Module): def __init__( self, dim, dim_head = DEFAULT_DIM_HEAD, heads = 8, causal = False, flash = False, talking_heads = False, head_scale = False, sparse_topk = None, num_mem_kv = 0, dropout = 0., on_attn = False, gate_values = False, zero_init_output = False, max_attend_past = None, qk_norm = False, qk_norm_groups = 1, qk_norm_scale = 10, qk_norm_dim_scale = False, one_kv_head = False, shared_kv = False, value_dim_head = None, tensor_product = False, # https://arxiv.org/abs/2208.06061 cascading_heads = False, add_zero_kv = False, # same as add_zero_attn in pytorch onnxable = False ): super().__init__() self.scale = dim_head ** -0.5 self.heads = heads self.causal = causal self.max_attend_past = max_attend_past value_dim_head = default(value_dim_head, dim_head) q_dim = k_dim = dim_head * heads v_dim = out_dim = value_dim_head * heads self.one_kv_head = one_kv_head if one_kv_head: k_dim = dim_head v_dim = value_dim_head out_dim = v_dim * heads self.to_q = nn.Linear(dim, q_dim, bias = False) self.to_k = nn.Linear(dim, k_dim, bias = False) # shared key / values, for further memory savings during inference assert not (shared_kv and value_dim_head != dim_head), 'key and value head dimensions must be equal for shared key / values' self.to_v = nn.Linear(dim, v_dim, bias = False) if not shared_kv else None # relations projection from tp-attention self.to_r = nn.Linear(dim, v_dim, bias = False) if tensor_product else None # add GLU gating for aggregated values, from alphafold2 self.to_v_gate = None if gate_values: self.to_v_gate = nn.Linear(dim, out_dim) nn.init.constant_(self.to_v_gate.weight, 0) nn.init.constant_(self.to_v_gate.bias, 1) # cosine sim attention self.qk_norm = qk_norm self.qk_norm_groups = qk_norm_groups self.qk_norm_scale = qk_norm_scale # whether to use the rmsnorm (equivalent to cosine sim attention when scale is equal to 1) - https://arxiv.org/abs/2302.05442 self.qk_norm_dim_scale = qk_norm_dim_scale self.qk_norm_q_scale = self.qk_norm_k_scale = 1 if qk_norm and qk_norm_dim_scale: self.qk_norm_q_scale = nn.Parameter(torch.ones(dim_head)) self.qk_norm_k_scale = nn.Parameter(torch.ones(dim_head)) assert (not qk_norm) or (dim_head % qk_norm_groups) == 0, 'dimension per attention head must be divisible by the qk norm groups' assert not (qk_norm and (dim_head // qk_norm_groups) <= 2), 'the group dimension may be too small (2 was too small in my tests, but 4 still works, surprisingly)' # attend class - includes core attention algorithm + talking heads self.attend = Attend( heads = heads, causal = causal, talking_heads = talking_heads, dropout = dropout, sparse_topk = sparse_topk, qk_norm = qk_norm, scale = qk_norm_scale if qk_norm else self.scale, add_zero_kv = add_zero_kv, flash = flash, onnxable = onnxable ) # head scaling self.head_scale = head_scale if head_scale: self.head_scale_params = nn.Parameter(torch.ones(1, heads, 1, 1)) # explicit topk sparse attention self.sparse_topk = sparse_topk # add memory key / values self.num_mem_kv = num_mem_kv if num_mem_kv > 0: self.mem_k = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head)) self.mem_v = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head)) # attention on attention self.attn_on_attn = on_attn self.to_out = nn.Sequential(nn.Linear(out_dim, dim * 2, bias = False), nn.GLU()) if on_attn else nn.Linear(out_dim, dim, bias = False) # init output projection 0 if zero_init_output: init_zero_(self.to_out) def forward( self, x, context = None, mask = None, context_mask = None, attn_mask = None, rel_pos = None, rotary_pos_emb = None, prev_attn = None, mem = None ): b, n, _, h, head_scale, device, has_context = *x.shape, self.heads, self.head_scale, x.device, exists(context) kv_input = default(context, x) q_input = x k_input = kv_input v_input = kv_input r_input = x if exists(mem): k_input = torch.cat((mem, k_input), dim = -2) v_input = torch.cat((mem, v_input), dim = -2) q = self.to_q(q_input) k = self.to_k(k_input) v = self.to_v(v_input) if exists(self.to_v) else k r = self.to_r(r_input) if exists(self.to_r) else None q = rearrange(q, 'b n (h d) -> b h n d', h = h) if not self.one_kv_head: k, v, r = map(lambda t: maybe(rearrange)(t, 'b n (h d) -> b h n d', h = h), (k, v, r)) if self.qk_norm: qk_l2norm = partial(l2norm, groups = self.qk_norm_groups) q, k = map(qk_l2norm, (q, k)) scale = self.qk_norm_scale q = q * self.qk_norm_q_scale k = k * self.qk_norm_k_scale if exists(rotary_pos_emb) and not has_context: freqs, xpos_scale = rotary_pos_emb l = freqs.shape[-1] q_xpos_scale, k_xpos_scale = (xpos_scale, xpos_scale ** -1.) if exists(xpos_scale) else (1., 1.) (ql, qr), (kl, kr), (vl, vr) = map(lambda t: (t[..., :l], t[..., l:]), (q, k, v)) ql, kl, vl = map(lambda arg: apply_rotary_pos_emb(arg[0], freqs, arg[1]), ((ql, q_xpos_scale), (kl, k_xpos_scale), (vl, k_xpos_scale))) q, k, v = map(lambda t: torch.cat(t, dim = -1), ((ql, qr), (kl, kr), (vl, vr))) input_mask = context_mask if has_context else mask if self.num_mem_kv > 0: mem_k, mem_v = map(lambda t: repeat(t, 'h n d -> b h n d', b = b), (self.mem_k, self.mem_v)) if self.qk_norm: mem_k = l2norm(mem_k) mem_k = mem_k * self.qk_norm_k_scale k = torch.cat((mem_k, k), dim = -2) v = torch.cat((mem_v, v), dim = -2) if exists(input_mask): input_mask = pad_at_dim(input_mask, (self.num_mem_kv, 0), dim = -1, value = True) i, j = map(lambda t: t.shape[-2], (q, k)) # determine masking mask_value = max_neg_value(q) masks = [] final_attn_mask = None if exists(input_mask): input_mask = rearrange(input_mask, 'b j -> b 1 1 j') masks.append(~input_mask) if exists(attn_mask): assert 2 <= attn_mask.ndim <= 4, 'attention mask must have greater than 2 dimensions but less than or equal to 4' if attn_mask.ndim == 2: attn_mask = rearrange(attn_mask, 'i j -> 1 1 i j') elif attn_mask.ndim == 3: attn_mask = rearrange(attn_mask, 'h i j -> 1 h i j') masks.append(~attn_mask) if exists(self.max_attend_past): range_q = torch.arange(j - i, j, device = device) range_k = torch.arange(j, device = device) dist = rearrange(range_q, 'i -> 1 1 i 1') - rearrange(range_k, 'j -> 1 1 1 j') max_attend_past_mask = dist > self.max_attend_past masks.append(max_attend_past_mask) if len(masks) > 0: final_attn_mask = ~or_reduce(masks) # prepare relative positional bias, if needed attn_bias = None if exists(rel_pos): attn_bias = rel_pos(i, j) # attention is all we need out, intermediates = self.attend( q, k, v, mask = final_attn_mask, attn_bias = attn_bias, prev_attn = prev_attn ) # https://arxiv.org/abs/2208.06061 proposes to add a residual for better gradients if exists(r): out = out * r + out # normformer scaling of heads if head_scale: out = out * self.head_scale_params # merge heads out = rearrange(out, 'b h n d -> b n (h d)') # alphafold2 styled gating of the values if exists(self.to_v_gate): gates = self.to_v_gate(x) out = out * gates.sigmoid() # combine the heads out = self.to_out(out) if exists(mask): mask = rearrange(mask, 'b n -> b n 1') out = out.masked_fill(~mask, 0.) return out, intermediates class AttentionLayers(nn.Module): def __init__( self, dim, depth, heads = 8, causal = False, cross_attend = False, only_cross = False, use_scalenorm = False, use_rmsnorm = False, use_simple_rmsnorm = False, alibi_pos_bias = False, alibi_num_heads = None, rel_pos_bias = False, rel_pos_num_buckets = 32, rel_pos_max_distance = 128, dynamic_pos_bias = False, dynamic_pos_bias_log_distance = False, dynamic_pos_bias_mlp_depth = 2, dynamic_pos_bias_norm = False, rotary_pos_emb = False, rotary_emb_dim = None, rotary_xpos = False, rotary_interpolation_factor = 1., rotary_xpos_scale_base = 512, rotary_base_rescale_factor = 1., custom_layers = None, sandwich_coef = None, par_ratio = None, residual_attn = False, cross_residual_attn = False, macaron = False, pre_norm = True, pre_norm_has_final_norm = True, gate_residual = False, scale_residual = False, scale_residual_constant = 1., deepnorm = False, shift_tokens = 0, sandwich_norm = False, resi_dual = False, resi_dual_scale = 1., zero_init_branch_output = False, layer_dropout = 0., cross_attn_tokens_dropout = 0., **kwargs ): super().__init__() rotary_pos_emb = rotary_pos_emb or rotary_xpos ff_kwargs, kwargs = groupby_prefix_and_trim('ff_', kwargs) attn_kwargs, kwargs = groupby_prefix_and_trim('attn_', kwargs) dim_head = attn_kwargs.get('dim_head', DEFAULT_DIM_HEAD) self.dim = dim self.depth = depth self.layers = nn.ModuleList([]) self.has_pos_emb = rel_pos_bias or rotary_pos_emb rotary_emb_dim = max(default(rotary_emb_dim, dim_head // 2), 32) assert not (rotary_xpos and not causal), 'rotary xpos is not compatible with bidirectional attention' self.rotary_pos_emb = RotaryEmbedding(rotary_emb_dim, use_xpos = rotary_xpos, scale_base = rotary_xpos_scale_base, interpolation_factor = rotary_interpolation_factor, base_rescale_factor = rotary_base_rescale_factor) if rotary_pos_emb else None assert not (alibi_pos_bias and rel_pos_bias), 'you can only choose Alibi positional bias or T5 relative positional bias, not both' assert rel_pos_num_buckets <= rel_pos_max_distance, 'number of relative position buckets must be less than the relative position max distance' # relative positional bias flash_attn = attn_kwargs.get('flash', False) assert (int(rel_pos_bias) + int(dynamic_pos_bias) + int(alibi_pos_bias)) <= 1, 'you can only choose up to one of t5, alibi, or dynamic positional bias' self.rel_pos = None if rel_pos_bias: assert not flash_attn, 'flash attention not compatible with t5 relative positional bias' self.rel_pos = RelativePositionBias(scale = dim_head ** 0.5, causal = causal, heads = heads, num_buckets = rel_pos_num_buckets, max_distance = rel_pos_max_distance) # elif dynamic_pos_bias: # assert not flash_attn, 'flash attention not compatible with dynamic positional bias' # self.rel_pos = DynamicPositionBias(dim = dim // 4, heads = heads, log_distance = dynamic_pos_bias_log_distance, depth = dynamic_pos_bias_mlp_depth, norm = dynamic_pos_bias_norm) elif alibi_pos_bias: alibi_num_heads = default(alibi_num_heads, heads) assert alibi_num_heads <= heads, 'number of ALiBi heads must be less than the total number of heads' self.rel_pos = AlibiPositionalBias(heads = alibi_num_heads, total_heads = heads) # determine deepnorm and residual scale if deepnorm: assert scale_residual_constant == 1, 'scale residual constant is being overridden by deep norm settings' pre_norm = sandwich_norm = resi_dual = False scale_residual = True scale_residual_constant = (2 * depth) ** 0.25 assert (int(sandwich_norm) + int(resi_dual)) <= 1, 'either sandwich norm or resiDual is selected, but not both' assert not (not pre_norm and sandwich_norm), 'sandwich norm cannot be used when not using prenorm' if resi_dual: pre_norm = False self.pre_norm = pre_norm self.sandwich_norm = sandwich_norm self.resi_dual = resi_dual assert 0 < resi_dual_scale <= 1., 'resiDual prenorm residual must be scaled by a factor greater than 0 and less than or equal to 1.' self.resi_dual_scale = resi_dual_scale self.residual_attn = residual_attn self.cross_residual_attn = cross_residual_attn assert not (flash_attn and (residual_attn or cross_residual_attn)), 'flash attention is not compatible with residual attention' self.cross_attend = cross_attend assert (int(use_scalenorm) + int(use_rmsnorm) + int(use_simple_rmsnorm)) <= 1, 'you can only use either scalenorm, rmsnorm, or simple rmsnorm' if use_scalenorm: norm_class = ScaleNorm elif use_rmsnorm: norm_class = RMSNorm elif use_simple_rmsnorm: norm_class = SimpleRMSNorm else: norm_class = nn.LayerNorm norm_fn = partial(norm_class, dim) if cross_attend and not only_cross: default_block = ('a', 'c', 'f') elif cross_attend and only_cross: default_block = ('c', 'f') else: default_block = ('a', 'f') if macaron: default_block = ('f',) + default_block # zero init if zero_init_branch_output: attn_kwargs = {**attn_kwargs, 'zero_init_output': True} ff_kwargs = {**ff_kwargs, 'zero_init_output': True} # calculate layer block order if exists(custom_layers): layer_types = custom_layers elif exists(par_ratio): par_depth = depth * len(default_block) assert 1 < par_ratio <= par_depth, 'par ratio out of range' default_block = tuple(filter(not_equals('f'), default_block)) par_attn = par_depth // par_ratio depth_cut = par_depth * 2 // 3 # 2 / 3 attention layer cutoff suggested by PAR paper par_width = (depth_cut + depth_cut // par_attn) // par_attn assert len(default_block) <= par_width, 'default block is too large for par_ratio' par_block = default_block + ('f',) * (par_width - len(default_block)) par_head = par_block * par_attn layer_types = par_head + ('f',) * (par_depth - len(par_head)) elif exists(sandwich_coef): assert sandwich_coef > 0 and sandwich_coef <= depth, 'sandwich coefficient should be less than the depth' layer_types = ('a',) * sandwich_coef + default_block * (depth - sandwich_coef) + ('f',) * sandwich_coef else: layer_types = default_block * depth self.layer_types = layer_types self.num_attn_layers = len(list(filter(equals('a'), layer_types))) # stochastic depth self.layer_dropouts = cast_tuple(layer_dropout, len(layer_types)) # structured dropout for cross attending self.cross_attn_tokens_dropout = cross_attn_tokens_dropout # calculate token shifting shift_tokens = cast_tuple(shift_tokens, len(layer_types)) # whether it has post norm self.final_norm = norm_fn() if pre_norm or resi_dual else nn.Identity() # iterate and construct layers for ind, (layer_type, layer_shift_tokens) in enumerate(zip(self.layer_types, shift_tokens)): is_last_layer = ind == (len(self.layer_types) - 1) if layer_type == 'a': layer = Attention(dim, heads = heads, causal = causal, **attn_kwargs) elif layer_type == 'c': layer = Attention(dim, heads = heads, **attn_kwargs) elif layer_type == 'f': layer = FeedForward(dim, **ff_kwargs) layer = layer if not macaron else Scale(0.5, layer) else: raise Exception(f'invalid layer type {layer_type}') # if layer_shift_tokens > 0: # shift_range_upper = layer_shift_tokens + 1 # shift_range_lower = -layer_shift_tokens if not causal else 0 # layer = ShiftTokens(range(shift_range_lower, shift_range_upper), layer) residual_fn = Residual residual = residual_fn(dim, scale_residual = scale_residual, scale_residual_constant = scale_residual_constant) pre_branch_norm = norm_fn() if pre_norm else None post_branch_norm = norm_fn() if sandwich_norm else None post_main_norm = norm_fn() if not pre_norm else None norms = nn.ModuleList([ pre_branch_norm, post_branch_norm, post_main_norm ]) self.layers.append(nn.ModuleList([ norms, layer, residual ])) if deepnorm: init_gain = (8 * depth) ** -0.25 deepnorm_init(self, init_gain) def forward( self, x, context = None, mask = None, context_mask = None, attn_mask = None, self_attn_context_mask = None, mems = None, return_hiddens = False ): assert not (self.cross_attend ^ exists(context)), 'context must be passed in if cross_attend is set to True' hiddens = [] layer_hiddens = [] intermediates = [] prev_attn = None prev_cross_attn = None mems = mems.copy() if exists(mems) else [None] * self.num_attn_layers rotary_pos_emb = None if exists(self.rotary_pos_emb): max_rotary_emb_length = max(list(map(lambda m: (m.shape[1] if exists(m) else 0) + x.shape[1], mems))) rotary_pos_emb = self.rotary_pos_emb(max_rotary_emb_length, x.device) outer_residual = x * self.resi_dual_scale for ind, (layer_type, (norm, block, residual_fn), layer_dropout) in enumerate(zip(self.layer_types, self.layers, self.layer_dropouts)): is_last = ind == (len(self.layers) - 1) if self.training and layer_dropout > 0. and random() < layer_dropout: continue if layer_type == 'a': if return_hiddens: hiddens.append(x) layer_mem = mems.pop(0) if mems else None if layer_type == 'c': if self.training and self.cross_attn_tokens_dropout > 0.: context, context_mask = dropout_seq(context, context_mask, self.cross_attn_tokens_dropout) inner_residual = x if return_hiddens: layer_hiddens.append(x) pre_norm, post_branch_norm, post_main_norm = norm if exists(pre_norm): x = pre_norm(x) if layer_type == 'a': out, inter = block(x, mask = mask, context_mask = self_attn_context_mask, attn_mask = attn_mask, rel_pos = self.rel_pos, rotary_pos_emb = rotary_pos_emb, prev_attn = prev_attn, mem = layer_mem) elif layer_type == 'c': out, inter = block(x, context = context, mask = mask, context_mask = context_mask, prev_attn = prev_cross_attn) elif layer_type == 'f': out = block(x) if self.resi_dual: outer_residual = outer_residual + out * self.resi_dual_scale if exists(post_branch_norm): out = post_branch_norm(out) x = residual_fn(out, inner_residual) if layer_type in ('a', 'c') and return_hiddens: intermediates.append(inter) if layer_type == 'a' and self.residual_attn: prev_attn = inter.pre_softmax_attn elif layer_type == 'c' and self.cross_residual_attn: prev_cross_attn = inter.pre_softmax_attn if exists(post_main_norm): x = post_main_norm(x) if return_hiddens: layer_hiddens.append(x) if self.resi_dual: x = x + self.final_norm(outer_residual) else: x = self.final_norm(x) if return_hiddens: intermediates = LayerIntermediates( hiddens = hiddens, attn_intermediates = intermediates, layer_hiddens = layer_hiddens ) return x, intermediates return x class Encoder(AttentionLayers): def __init__(self, **kwargs): assert 'causal' not in kwargs, 'cannot set causality on encoder' super().__init__(causal = False, **kwargs) class Decoder(AttentionLayers): def __init__(self, **kwargs): assert 'causal' not in kwargs, 'cannot set causality on decoder' super().__init__(causal = True, **kwargs) class CrossAttender(AttentionLayers): def __init__(self, **kwargs): super().__init__(cross_attend = True, only_cross = True, **kwargs) class ViTransformerWrapper(nn.Module): def __init__( self, *, image_size, patch_size, attn_layers, channels = 3, num_classes = None, post_emb_norm = False, emb_dropout = 0. ): super().__init__() assert isinstance(attn_layers, Encoder), 'attention layers must be an Encoder' assert image_size % patch_size == 0, 'image dimensions must be divisible by the patch size' dim = attn_layers.dim num_patches = (image_size // patch_size) ** 2 patch_dim = channels * patch_size ** 2 self.patch_size = patch_size self.pos_embedding = nn.Parameter(torch.randn(1, num_patches, dim)) self.patch_to_embedding = nn.Sequential( nn.LayerNorm(patch_dim), nn.Linear(patch_dim, dim), nn.LayerNorm(dim) ) self.post_emb_norm = nn.LayerNorm(dim) if post_emb_norm else nn.Identity() self.dropout = nn.Dropout(emb_dropout) self.attn_layers = attn_layers self.mlp_head = nn.Linear(dim, num_classes) if exists(num_classes) else nn.Identity() def forward( self, img, return_embeddings = False ): p = self.patch_size x = rearrange(img, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = p, p2 = p) x = self.patch_to_embedding(x) n = x.shape[1] x = x + self.pos_embedding[:, :n] x = self.post_emb_norm(x) x = self.dropout(x) x = self.attn_layers(x) if not exists(self.mlp_head) or return_embeddings: return x x = x.mean(dim = -2) return self.mlp_head(x) class Transformer(nn.Module): def __init__( self, *, num_tokens, max_seq_len, attn_layers, emb_dim = None, max_mem_len = 0, shift_mem_down = 0, emb_dropout = 0., post_emb_norm = False, num_memory_tokens = None, tie_embedding = False, logits_dim = None, use_abs_pos_emb = True, scaled_sinu_pos_emb = False, l2norm_embed = False, emb_frac_gradient = 1., # GLM-130B and Cogview successfully used this, set at 0.1 attn_z_loss_weight = 1e-4 ): super().__init__() assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder' dim = attn_layers.dim emb_dim = default(emb_dim, dim) self.emb_dim = emb_dim self.num_tokens = num_tokens self.max_seq_len = max_seq_len self.max_mem_len = max_mem_len self.shift_mem_down = shift_mem_down self.l2norm_embed = l2norm_embed self.token_emb = TokenEmbedding(emb_dim, num_tokens, l2norm_embed = l2norm_embed) if not (use_abs_pos_emb and not attn_layers.has_pos_emb): self.pos_emb = always(0) elif scaled_sinu_pos_emb: self.pos_emb = ScaledSinusoidalEmbedding(emb_dim) else: self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len, l2norm_embed = l2norm_embed) self.emb_frac_gradient = emb_frac_gradient # fraction of the gradient that should go to the embedding, https://arxiv.org/abs/2105.13290 self.post_emb_norm = nn.LayerNorm(emb_dim) if post_emb_norm else nn.Identity() self.emb_dropout = nn.Dropout(emb_dropout) self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity() self.attn_layers = attn_layers self.init_() logits_dim = default(logits_dim, num_tokens) self.to_logits = nn.Linear(dim, logits_dim) if not tie_embedding else lambda t: t @ self.token_emb.emb.weight.t() # memory tokens (like [cls]) from Memory Transformers paper num_memory_tokens = default(num_memory_tokens, 0) self.num_memory_tokens = num_memory_tokens if num_memory_tokens > 0: self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim)) def init_(self): if self.l2norm_embed: nn.init.normal_(self.token_emb.emb.weight, std = 1e-5) if not isinstance(self.pos_emb, always): nn.init.normal_(self.pos_emb.emb.weight, std = 1e-5) return nn.init.kaiming_normal_(self.token_emb.emb.weight) def forward( self, x, return_embeddings = False, return_logits_and_embeddings = False, return_intermediates = False, mask = None, return_mems = False, return_attn = False, mems = None, pos = None, prepend_embeds = None, sum_embeds = None, return_attn_z_loss = False, attn_z_loss_weight = 1e-4, **kwargs ): b, n, device, num_mem, emb_frac_gradient = *x.shape, x.device, self.num_memory_tokens, self.emb_frac_gradient return_hiddens = return_mems | return_attn | return_intermediates | return_attn_z_loss # absolute positional embedding external_pos_emb = exists(pos) and pos.dtype != torch.long pos_emb = self.pos_emb(x, pos = pos) if not external_pos_emb else pos x = self.token_emb(x) + pos_emb # for summing embeddings passed externally - needs this for self-conditioning in non-autoregressive training if exists(sum_embeds): x = x + sum_embeds # post embedding norm, purportedly leads to greater stabilization x = self.post_emb_norm(x) # whether to append embeds, as in PaLI, for image embeddings if exists(prepend_embeds): prepend_seq, prepend_dim = prepend_embeds.shape[1:] assert prepend_dim == x.shape[-1], 'prepended embeddings need to have same dimensions as text model dimensions' x = torch.cat((prepend_embeds, x), dim = -2) # whether to reduce the gradient going to the embedding, from cogview paper, corroborated by GLM-130B model if emb_frac_gradient < 1: assert emb_frac_gradient > 0 x = x * emb_frac_gradient + x.detach() * (1 - emb_frac_gradient) # embedding dropout x = self.emb_dropout(x) x = self.project_emb(x) if num_mem > 0: mem = repeat(self.memory_tokens, 'n d -> b n d', b = b) x = torch.cat((mem, x), dim = 1) # auto-handle masking after appending memory tokens if exists(mask): mask = pad_at_dim(mask, (num_mem, 0), dim = -1, value = True) if self.shift_mem_down and exists(mems): mems_l, mems_r = mems[:self.shift_mem_down], mems[self.shift_mem_down:] mems = [*mems_r, *mems_l] if return_hiddens: x, intermediates = self.attn_layers(x, mask = mask, mems = mems, return_hiddens = True, **kwargs) else: x = self.attn_layers(x, mask = mask, mems = mems, **kwargs) mem, x = x[:, :num_mem], x[:, num_mem:] if return_logits_and_embeddings: out = (self.to_logits(x), x) elif return_embeddings: out = x else: out = self.to_logits(x) if return_attn_z_loss: pre_softmax_attns = list(map(lambda t: t.pre_softmax_attn, intermediates.attn_intermediates)) intermediates.attn_z_loss = calc_z_loss(pre_softmax_attns, weight = attn_z_loss_weight) return_intermediates = True if return_intermediates: return out, intermediates if return_mems: hiddens = intermediates.hiddens new_mems = list(map(lambda pair: torch.cat(pair, dim = -2), zip(mems, hiddens))) if exists(mems) else hiddens new_mems = list(map(lambda t: t[..., -self.max_mem_len:, :].detach(), new_mems)) return out, new_mems if return_attn: attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates)) return out, attn_maps return out
AlphaDev-main
alpha_dev/transformer.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import argparse import os import random import numpy as np import torch import torch.backends.cudnn as cudnn import lavis.tasks as tasks from lavis.common.config import Config from lavis.common.dist_utils import get_rank, init_distributed_mode from lavis.common.logger import setup_logger from lavis.common.optims import ( LinearWarmupCosineLRScheduler, LinearWarmupStepLRScheduler, ) from lavis.common.registry import registry from lavis.common.utils import now # imports modules for registration from lavis.datasets.builders import * from lavis.models import * from lavis.processors import * from lavis.runners import * from lavis.tasks import * def parse_args(): parser = argparse.ArgumentParser(description="Training") parser.add_argument("--cfg-path", required=True, help="path to configuration file.") parser.add_argument( "--options", nargs="+", help="override some settings in the used config, the key-value pair " "in xxx=yyy format will be merged into config file (deprecate), " "change to --cfg-options instead.", ) # replace some settings in the used config parser.add_argument("--replace_cfg", nargs="+", help="replace some settings in the used config", default=None) parser.add_argument("--job_id", default=None, help="job id") # python train.py --cfg-path configs/cont_train.yaml \ # --replace-cfg run_cfg.seed=1 run_cfg.local_rank=0 --job-id 1 args = parser.parse_args() # if 'LOCAL_RANK' not in os.environ: # os.environ['LOCAL_RANK'] = str(args.local_rank) return args def setup_seeds(config): seed = config.run_cfg.seed + get_rank() random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) cudnn.benchmark = False cudnn.deterministic = True def get_runner_class(cfg): """ Get runner class from config. Default to epoch-based runner. """ runner_cls = registry.get_runner_class(cfg.run_cfg.get("runner", "runner_base")) return runner_cls def main(): # allow auto-dl completes on main process without timeout when using NCCL backend. # os.environ["NCCL_BLOCKING_WAIT"] = "1" # set before init_distributed_mode() to ensure the same job_id shared across all ranks. args = parse_args() torch.multiprocessing.set_start_method("spawn") job_id = now() if args.job_id is None else args.job_id cfg = Config(args) init_distributed_mode(cfg.run_cfg) setup_seeds(cfg) # set after init_distributed_mode() to only log on master. setup_logger() cfg.pretty_print() # with torch.cuda.amp.autocast(dtype=torch.float32): task = tasks.setup_task(cfg) datasets = task.build_datasets(cfg) model = task.build_model(cfg) runner = get_runner_class(cfg)(cfg=cfg, job_id=job_id, task=task, model=model, datasets=datasets) runner.train() if __name__ == "__main__": main()
3D-LLM-main
3DLLM_BLIP2-base/train.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import argparse import random import numpy as np import torch import torch.backends.cudnn as cudnn import lavis.tasks as tasks from lavis.common.config import Config from lavis.common.dist_utils import get_rank, init_distributed_mode from lavis.common.logger import setup_logger from lavis.common.optims import ( LinearWarmupCosineLRScheduler, LinearWarmupStepLRScheduler, ) from lavis.common.utils import now # imports modules for registration from lavis.datasets.builders import * from lavis.models import * from lavis.processors import * from lavis.runners.runner_base import RunnerBase from lavis.tasks import * def parse_args(): parser = argparse.ArgumentParser(description="Training") parser.add_argument("--cfg-path", required=True, help="path to configuration file.") parser.add_argument( "--options", nargs="+", help="override some settings in the used config, the key-value pair " "in xxx=yyy format will be merged into config file (deprecate), " "change to --cfg-options instead.", ) args = parser.parse_args() # if 'LOCAL_RANK' not in os.environ: # os.environ['LOCAL_RANK'] = str(args.local_rank) return args def setup_seeds(config): seed = config.run_cfg.seed + get_rank() random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) cudnn.benchmark = False cudnn.deterministic = True def main(): # allow auto-dl completes on main process without timeout when using NCCL backend. # os.environ["NCCL_BLOCKING_WAIT"] = "1" # set before init_distributed_mode() to ensure the same job_id shared across all ranks. job_id = now() cfg = Config(parse_args()) init_distributed_mode(cfg.run_cfg) setup_seeds(cfg) # set after init_distributed_mode() to only log on master. setup_logger() cfg.pretty_print() task = tasks.setup_task(cfg) datasets = task.build_datasets(cfg) model = task.build_model(cfg) runner = RunnerBase(cfg=cfg, job_id=job_id, task=task, model=model, datasets=datasets) runner.evaluate(skip_reload=True) if __name__ == "__main__": main()
3D-LLM-main
3DLLM_BLIP2-base/evaluate.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import os import sys from omegaconf import OmegaConf from lavis.common.registry import registry from lavis.datasets.builders import * from lavis.models import * from lavis.processors import * from lavis.tasks import * root_dir = os.path.dirname(os.path.abspath(__file__)) default_cfg = OmegaConf.load(os.path.join(root_dir, "configs/default.yaml")) registry.register_path("library_root", root_dir) repo_root = os.path.join(root_dir, "..") registry.register_path("repo_root", repo_root) cache_root = os.path.join(repo_root, default_cfg.env.cache_root) registry.register_path("cache_root", cache_root) registry.register("MAX_INT", sys.maxsize) registry.register("SPLIT_NAMES", ["train", "val", "test"])
3D-LLM-main
3DLLM_BLIP2-base/lavis/__init__.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import logging import os import torch import torch.distributed as dist from lavis.common.dist_utils import get_rank, get_world_size, is_main_process, is_dist_avail_and_initialized from lavis.common.logger import MetricLogger, SmoothedValue from lavis.common.registry import registry from lavis.datasets.data_utils import prepare_sample class BaseTask: def __init__(self, **kwargs): super().__init__() self.inst_id_key = "instance_id" @classmethod def setup_task(cls, **kwargs): return cls() def build_model(self, cfg): model_config = cfg.model_cfg model_cls = registry.get_model_class(model_config.arch) return model_cls.from_config(model_config) def build_datasets(self, cfg): """ Build a dictionary of datasets, keyed by split 'train', 'valid', 'test'. Download dataset and annotations automatically if not exist. Args: cfg (common.config.Config): _description_ Returns: dict: Dictionary of torch.utils.data.Dataset objects by split. """ datasets = dict() datasets_config = cfg.datasets_cfg assert len(datasets_config) > 0, "At least one dataset has to be specified." for name in datasets_config: dataset_config = datasets_config[name] builder = registry.get_builder_class(name)(dataset_config) dataset = builder.build_datasets() datasets[name] = dataset return datasets def train_step(self, model, samples): loss = model(samples)["loss"] return loss def valid_step(self, model, samples): raise NotImplementedError def before_evaluation(self, model, dataset, **kwargs): model.before_evaluation(dataset=dataset, task_type=type(self)) def after_evaluation(self, **kwargs): pass def inference_step(self): raise NotImplementedError def evaluation(self, model, data_loader, cuda_enabled=True): metric_logger = MetricLogger(delimiter=" ") header = "Evaluation" # TODO make it configurable print_freq = 10 results = [] for samples in metric_logger.log_every(data_loader, print_freq, header): samples = prepare_sample(samples, cuda_enabled=cuda_enabled) eval_output = self.valid_step(model=model, samples=samples) results.extend(eval_output) if is_dist_avail_and_initialized(): dist.barrier() return results def train_epoch( self, epoch, model, data_loader, optimizer, lr_scheduler, scaler=None, cuda_enabled=False, log_freq=50, accum_grad_iters=1, ): return self._train_inner_loop( epoch=epoch, iters_per_epoch=len(data_loader), model=model, data_loader=data_loader, optimizer=optimizer, scaler=scaler, lr_scheduler=lr_scheduler, log_freq=log_freq, cuda_enabled=cuda_enabled, accum_grad_iters=accum_grad_iters, ) def train_iters( self, epoch, start_iters, iters_per_inner_epoch, model, data_loader, optimizer, lr_scheduler, scaler=None, cuda_enabled=False, log_freq=50, accum_grad_iters=1, ): return self._train_inner_loop( epoch=epoch, start_iters=start_iters, iters_per_epoch=iters_per_inner_epoch, model=model, data_loader=data_loader, optimizer=optimizer, scaler=scaler, lr_scheduler=lr_scheduler, log_freq=log_freq, cuda_enabled=cuda_enabled, accum_grad_iters=accum_grad_iters, ) def _train_inner_loop( self, epoch, iters_per_epoch, model, data_loader, optimizer, lr_scheduler, scaler=None, start_iters=None, log_freq=50, cuda_enabled=False, accum_grad_iters=1, ): """ An inner training loop compatible with both epoch-based and iter-based training. When using epoch-based, training stops after one epoch; when using iter-based, training stops after #iters_per_epoch iterations. """ use_amp = scaler is not None if not hasattr(data_loader, "__next__"): # convert to iterator if not already data_loader = iter(data_loader) metric_logger = MetricLogger(delimiter=" ") metric_logger.add_meter("lr", SmoothedValue(window_size=1, fmt="{value:.6f}")) metric_logger.add_meter("loss", SmoothedValue(window_size=1, fmt="{value:.4f}")) # if iter-based runner, schedule lr based on inner epoch. logging.info("Start training epoch {}, {} iters per inner epoch.".format(epoch, iters_per_epoch)) header = "Train: data epoch: [{}]".format(epoch) if start_iters is None: # epoch-based runner inner_epoch = epoch else: # In iter-based runner, we schedule the learning rate based on iterations. inner_epoch = start_iters // iters_per_epoch header = header + "; inner epoch [{}]".format(inner_epoch) for i in metric_logger.log_every(range(iters_per_epoch), log_freq, header): # if using iter-based runner, we stop after iters_per_epoch iterations. if i >= iters_per_epoch: break samples = next(data_loader) samples = prepare_sample(samples, cuda_enabled=cuda_enabled) samples.update( { "epoch": inner_epoch, "num_iters_per_epoch": iters_per_epoch, "iters": i, } ) lr_scheduler.step(cur_epoch=inner_epoch, cur_step=i) with torch.cuda.amp.autocast(enabled=use_amp): loss = self.train_step(model=model, samples=samples) # after_train_step() if use_amp: scaler.scale(loss).backward() else: loss.backward() # update gradients every accum_grad_iters iterations if (i + 1) % accum_grad_iters == 0: if use_amp: scaler.step(optimizer) scaler.update() else: optimizer.step() optimizer.zero_grad() metric_logger.update(loss=loss.item()) metric_logger.update(lr=optimizer.param_groups[0]["lr"]) # after train_epoch() # gather the stats from all processes metric_logger.synchronize_between_processes() logging.info("Averaged stats: " + str(metric_logger.global_avg())) return {k: "{:.3f}".format(meter.global_avg) for k, meter in metric_logger.meters.items()} @staticmethod def save_result(result, result_dir, filename, remove_duplicate=""): import json result_file = os.path.join(result_dir, "%s_rank%d.json" % (filename, get_rank())) final_result_file = os.path.join(result_dir, "%s.json" % filename) json.dump(result, open(result_file, "w")) if is_dist_avail_and_initialized(): dist.barrier() if is_main_process(): logging.warning("rank %d starts merging results." % get_rank()) # combine results from all processes result = [] for rank in range(get_world_size()): result_file = os.path.join(result_dir, "%s_rank%d.json" % (filename, rank)) res = json.load(open(result_file, "r")) result += res if remove_duplicate: result_new = [] id_list = [] for res in result: if res[remove_duplicate] not in id_list: id_list.append(res[remove_duplicate]) result_new.append(res) result = result_new json.dump(result, open(final_result_file, "w")) print("result file saved to %s" % final_result_file) return final_result_file
3D-LLM-main
3DLLM_BLIP2-base/lavis/tasks/base_task.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ from lavis.common.registry import registry from lavis.tasks.base_task import BaseTask from lavis.tasks.captioning import CaptionTask from lavis.tasks.image_text_pretrain import ImageTextPretrainTask from lavis.tasks.multimodal_classification import ( MultimodalClassificationTask, ) from lavis.tasks.retrieval import RetrievalTask from lavis.tasks.vqa import VQATask, GQATask, ThreeDVQATask from lavis.tasks.vqa_reading_comprehension import VQARCTask, GQARCTask from lavis.tasks.dialogue import DialogueTask def setup_task(cfg): assert "task" in cfg.run_cfg, "Task name must be provided." task_name = cfg.run_cfg.task task = registry.get_task_class(task_name).setup_task(cfg=cfg) assert task is not None, "Task {} not properly registered.".format(task_name) return task __all__ = [ "BaseTask", "ThreeDVQATask", "RetrievalTask", "CaptionTask", "VQATask", "GQATask", "VQARCTask", "GQARCTask", "MultimodalClassificationTask", # "VideoQATask", # "VisualEntailmentTask", "ImageTextPretrainTask", "DialogueTask", ]
3D-LLM-main
3DLLM_BLIP2-base/lavis/tasks/__init__.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import json import logging import os import numpy as np import torch from lavis.common.dist_utils import is_main_process from lavis.common.registry import registry from lavis.tasks.base_task import BaseTask @registry.register_task("retrieval") class RetrievalTask(BaseTask): def __init__(self, cfg): super().__init__() self.cfg = cfg @classmethod def setup_task(cls, cfg): run_cfg = cfg.run_cfg return cls(cfg=run_cfg) def evaluation(self, model, data_loader, **kwargs): # score_i2t, score_t2i = model.compute_sim_matrix(model, data_loader) score_i2t, score_t2i = model.compute_sim_matrix(data_loader, task_cfg=self.cfg) if is_main_process(): eval_result = self._report_metrics( score_i2t, score_t2i, data_loader.dataset.txt2img, data_loader.dataset.img2txt, ) logging.info(eval_result) else: eval_result = None return eval_result def after_evaluation(self, val_result, **kwargs): return val_result @staticmethod @torch.no_grad() def _report_metrics(scores_i2t, scores_t2i, txt2img, img2txt): # Images->Text ranks = np.zeros(scores_i2t.shape[0]) for index, score in enumerate(scores_i2t): inds = np.argsort(score)[::-1] # Score rank = 1e20 for i in img2txt[index]: tmp = np.where(inds == i)[0][0] if tmp < rank: rank = tmp ranks[index] = rank # Compute metrics tr1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks) tr5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks) tr10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks) # Text->Images ranks = np.zeros(scores_t2i.shape[0]) for index, score in enumerate(scores_t2i): inds = np.argsort(score)[::-1] ranks[index] = np.where(inds == txt2img[index])[0][0] # Compute metrics ir1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks) ir5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks) ir10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks) tr_mean = (tr1 + tr5 + tr10) / 3 ir_mean = (ir1 + ir5 + ir10) / 3 r_mean = (tr_mean + ir_mean) / 2 agg_metrics = (tr1 + tr5 + tr10) / 3 eval_result = { "txt_r1": tr1, "txt_r5": tr5, "txt_r10": tr10, "txt_r_mean": tr_mean, "img_r1": ir1, "img_r5": ir5, "img_r10": ir10, "img_r_mean": ir_mean, "r_mean": r_mean, "agg_metrics": agg_metrics, } with open(os.path.join(registry.get_path("output_dir"), "evaluate.txt"), "a") as f: f.write(json.dumps(eval_result) + "\n") return eval_result
3D-LLM-main
3DLLM_BLIP2-base/lavis/tasks/retrieval.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ from lavis.common.registry import registry from lavis.tasks.base_task import BaseTask @registry.register_task("image_text_pretrain") class ImageTextPretrainTask(BaseTask): def __init__(self): super().__init__() def evaluation(self, model, data_loader, cuda_enabled=True): pass
3D-LLM-main
3DLLM_BLIP2-base/lavis/tasks/image_text_pretrain.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import json import os from lavis.common.dist_utils import main_process from lavis.common.logger import MetricLogger from lavis.common.registry import registry from lavis.tasks.base_task import BaseTask from lavis.datasets.data_utils import prepare_sample import numpy as np @registry.register_task("dialogue") class DialogueTask(BaseTask): def __init__(self, num_beams, max_len, min_len, evaluate, report_metric=True): super().__init__() self.num_beams = num_beams self.max_len = max_len self.min_len = min_len self.evaluate = evaluate self.report_metric = report_metric @classmethod def setup_task(cls, cfg): run_cfg = cfg.run_cfg num_beams = run_cfg.num_beams max_len = run_cfg.max_len min_len = run_cfg.min_len evaluate = run_cfg.evaluate report_metric = run_cfg.get("report_metric", True) return cls( num_beams=num_beams, max_len=max_len, min_len=min_len, evaluate=evaluate, report_metric=report_metric, ) def valid_step(self, model, samples): results = [] loss = model(samples)["loss"].item() return [loss] def after_evaluation(self, val_result, split_name, epoch, **kwargs): if self.report_metric: avg_loss = np.mean(val_result) metrics = {"agg_metrics": avg_loss} else: metrics = {"agg_metrics": 0.0} return metrics @main_process def _report_metrics(self, eval_result_file, split_name): # TODO better way to define this coco_gt_root = os.path.join(registry.get_path("cache_root"), "coco_gt") coco_val = coco_dialogue_eval(coco_gt_root, eval_result_file, split_name) agg_metrics = coco_val.eval["CIDEr"] + coco_val.eval["Bleu_4"] log_stats = {split_name: {k: v for k, v in coco_val.eval.items()}} with open(os.path.join(registry.get_path("output_dir"), "evaluate.txt"), "a") as f: f.write(json.dumps(log_stats) + "\n") coco_res = {k: v for k, v in coco_val.eval.items()} coco_res["agg_metrics"] = agg_metrics return coco_res # TODO better structure for this. from pycocoevalcap.eval import COCOEvalCap from pycocotools.coco import COCO from torchvision.datasets.utils import download_url def coco_dialogue_eval(coco_gt_root, results_file, split): urls = { "val": "https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_val_gt.json", "test": "https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_test_gt.json", } filenames = { "val": "coco_karpathy_val_gt.json", "test": "coco_karpathy_test_gt.json", } download_url(urls[split], coco_gt_root) annotation_file = os.path.join(coco_gt_root, filenames[split]) # create coco object and coco_result object coco = COCO(annotation_file) coco_result = coco.loadRes(results_file) # create coco_eval object by taking coco and coco_result coco_eval = COCOEvalCap(coco, coco_result) # evaluate on a subset of images by setting # coco_eval.params['image_id'] = coco_result.getImgIds() # please remove this line when evaluating the full validation set # coco_eval.params['image_id'] = coco_result.getImgIds() # evaluate results # SPICE will take a few minutes the first time, but speeds up due to caching coco_eval.evaluate() # print output evaluation scores for metric, score in coco_eval.eval.items(): print(f"{metric}: {score:.3f}") return coco_eval
3D-LLM-main
3DLLM_BLIP2-base/lavis/tasks/dialogue.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import json import os from lavis.common.dist_utils import main_process from lavis.common.registry import registry from lavis.tasks.base_task import BaseTask @registry.register_task("captioning") class CaptionTask(BaseTask): def __init__(self, num_beams, max_len, min_len, evaluate, report_metric=True): super().__init__() self.num_beams = num_beams self.max_len = max_len self.min_len = min_len self.evaluate = evaluate self.report_metric = report_metric @classmethod def setup_task(cls, cfg): run_cfg = cfg.run_cfg num_beams = run_cfg.num_beams max_len = run_cfg.max_len min_len = run_cfg.min_len evaluate = run_cfg.evaluate report_metric = run_cfg.get("report_metric", True) return cls( num_beams=num_beams, max_len=max_len, min_len=min_len, evaluate=evaluate, report_metric=report_metric, ) def valid_step(self, model, samples): results = [] # run_cfg = slf.cfg.run_cfg captions = model.generate( samples, use_nucleus_sampling=False, num_beams=self.num_beams, max_length=self.max_len, min_length=self.min_len, ) img_ids = samples["image_id"] for caption, img_id in zip(captions, img_ids): results.append({"caption": caption, "image_id": int(img_id)}) return results def after_evaluation(self, val_result, split_name, epoch, **kwargs): eval_result_file = self.save_result( result=val_result, result_dir=registry.get_path("result_dir"), filename="{}_epoch{}".format(split_name, epoch), remove_duplicate="image_id", ) # if self.report_metric: # metrics = self._report_metrics( # eval_result_file=eval_result_file, split_name=split_name # ) # else: # metrics = {"agg_metrics": 0.0} # return metrics @main_process def _report_metrics(self, eval_result_file, split_name): # TODO better way to define this coco_gt_root = os.path.join(registry.get_path("cache_root"), "coco_gt") coco_val = coco_caption_eval(coco_gt_root, eval_result_file, split_name) agg_metrics = coco_val.eval["CIDEr"] + coco_val.eval["Bleu_4"] log_stats = {split_name: {k: v for k, v in coco_val.eval.items()}} with open(os.path.join(registry.get_path("output_dir"), "evaluate.txt"), "a") as f: f.write(json.dumps(log_stats) + "\n") coco_res = {k: v for k, v in coco_val.eval.items()} coco_res["agg_metrics"] = agg_metrics return coco_res # TODO better structure for this. from pycocoevalcap.eval import COCOEvalCap from pycocotools.coco import COCO from torchvision.datasets.utils import download_url def coco_caption_eval(coco_gt_root, results_file, split): urls = { "val": "https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_val_gt.json", "test": "https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_test_gt.json", } filenames = { "val": "coco_karpathy_val_gt.json", "test": "coco_karpathy_test_gt.json", } download_url(urls[split], coco_gt_root) annotation_file = os.path.join(coco_gt_root, filenames[split]) # create coco object and coco_result object coco = COCO(annotation_file) coco_result = coco.loadRes(results_file) # create coco_eval object by taking coco and coco_result coco_eval = COCOEvalCap(coco, coco_result) # evaluate on a subset of images by setting # coco_eval.params['image_id'] = coco_result.getImgIds() # please remove this line when evaluating the full validation set # coco_eval.params['image_id'] = coco_result.getImgIds() # evaluate results # SPICE will take a few minutes the first time, but speeds up due to caching coco_eval.evaluate() # print output evaluation scores for metric, score in coco_eval.eval.items(): print(f"{metric}: {score:.3f}") return coco_eval
3D-LLM-main
3DLLM_BLIP2-base/lavis/tasks/captioning.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import logging import json import os import lavis.common.dist_utils as dist_utils from lavis.common.registry import registry from lavis.common.vqa_tools.vqa import VQA from lavis.common.vqa_tools.vqa_eval import VQAEval from lavis.tasks.base_task import BaseTask @registry.register_task("vqa") class VQATask(BaseTask): def __init__( self, num_beams, max_len, min_len, evaluate, num_ans_candidates, inference_method="rank", prompt="", ): super().__init__() self.num_beams = num_beams self.max_len = max_len self.min_len = min_len self.evaluate = evaluate self.inference_method = inference_method self.num_ans_candidates = num_ans_candidates self.prompt = prompt self.answer_list = None self.ques_files = dict() self.anno_files = dict() @classmethod def setup_task(cls, cfg): run_cfg = cfg.run_cfg num_beams = run_cfg.get("num_beams", 3) max_len = run_cfg.get("max_len", 10) min_len = run_cfg.get("min_len", 1) evaluate = run_cfg.get("evaluate", False) inference_method = run_cfg.get("inference_method", "rank") num_ans_candidates = run_cfg.get("num_ans_candidates", 128) prompt = run_cfg.get("prompt", "") return cls( num_beams=num_beams, max_len=max_len, min_len=min_len, evaluate=evaluate, num_ans_candidates=num_ans_candidates, inference_method=inference_method, prompt=prompt, ) def build_datasets(self, cfg): datasets = super().build_datasets(cfg) # get question file, annotation file and anwser list in COCO format for dataset in datasets.values(): for split in dataset: if hasattr(dataset[split], "coco_fmt_qust_file") and dataset[split].coco_fmt_qust_file is not None: self.ques_files[split] = dataset[split].coco_fmt_qust_file self.anno_files[split] = dataset[split].coco_fmt_anno_file try: self.answer_list = dataset[split].answer_list except AttributeError: # if answer_list is not provided, then set it to None pass if len(self.ques_files) > 0: assert len(self.ques_files) == len(self.anno_files), "Only support one split for evaluation." return datasets def valid_step(self, model, samples): answers = model.predict_answers( samples=samples, answer_list=self.answer_list, inference_method=self.inference_method, num_beams=self.num_beams, max_len=self.max_len, min_len=self.min_len, num_ans_candidates=self.num_ans_candidates, prompt=self.prompt, ) pred_qa_pairs = [] question_id = samples["question_id"] for answer, ques_id in zip(answers, question_id): ques_id = int(ques_id.item()) pred_qa_pairs.append({"question_id": ques_id, "answer": answer}) return pred_qa_pairs def after_evaluation(self, val_result, split_name, epoch): result_file = self.save_result( val_result, result_dir=registry.get_path("result_dir"), filename=f"{split_name}_%d_vqa_result" % epoch, remove_duplicate="question_id", ) metrics = self._report_metrics(result_file=result_file, split=split_name) return metrics @dist_utils.main_process def _report_metrics(self, result_file, split): """ Use official VQA evaluation script to report metrics. """ metrics = {} if split in self.ques_files and split in self.anno_files: vqa = VQA(self.anno_files[split], self.ques_files[split]) vqa_result = vqa.loadRes(resFile=result_file, quesFile=self.ques_files[split]) # create vqaEval object by taking vqa and vqaRes # n is precision of accuracy (number of places after decimal), default is 2 vqa_scorer = VQAEval(vqa, vqa_result, n=2) logging.info("Start VQA evaluation.") vqa_scorer.evaluate() # print accuracies overall_acc = vqa_scorer.accuracy["overall"] metrics["agg_metrics"] = overall_acc logging.info("Overall Accuracy is: %.02f\n" % overall_acc) logging.info("Per Answer Type Accuracy is the following:") for ans_type in vqa_scorer.accuracy["perAnswerType"]: logging.info("%s : %.02f" % (ans_type, vqa_scorer.accuracy["perAnswerType"][ans_type])) metrics[ans_type] = vqa_scorer.accuracy["perAnswerType"][ans_type] with open(os.path.join(registry.get_path("output_dir"), "evaluate.txt"), "a") as f: f.write(json.dumps(metrics) + "\n") return metrics # @registry.register_task("gqa") # GQATask = VQATask # @registry.register_task("aok_vqa") # AOKVQATask = VQATask @registry.register_task("gqa") class GQATask(VQATask): pass # def valid_step(self, model, samples): # answers = model.predict_answers( # samples=samples, # answer_list=self.answer_list, # inference_method=self.inference_method, # num_beams=self.num_beams, # max_len=self.max_len, # min_len=self.min_len, # num_ans_candidates=self.num_ans_candidates, # prompt=self.prompt, # ) # pred_qa_pairs = [] # question_id = samples["question_id"] # gt_answers = samples["answer"] # for answer, ques_id, gt_answer in zip(answers, question_id, gt_answers): # ques_id = int(ques_id.item()) # pred_qa_pairs.append({"question_id": ques_id, "pred_ans": answer, "gt_ans": gt_answer}) # return pred_qa_pairs # @dist_utils.main_process # def _report_metrics(self, result_file, split): # """ # TODO: add other evaluation metrics for GQA # """ # results = json.load(open(result_file, "r")) # acc = [] # vqa_tool = VQAEval() # for res in results: # if res["gt_ans"] is None: # prepare test results for leaderboard evaluation # self._save_result_leaderboard(results) # return # gt_ans = res["gt_ans"] # pred = res["pred_ans"] # if self.inference_method == "generate": # pred = vqa_tool.processPunctuation(pred) # pred = vqa_tool.processDigitArticle(pred) # vqa_acc = 1 if pred == gt_ans else 0 # acc.append(vqa_acc) # accuracy = sum(acc) / len(acc) * 100 # metrics = {"agg_metrics": accuracy, "acc": accuracy} # with open( # os.path.join(registry.get_path("output_dir"), "evaluate.txt"), "a" # ) as f: # f.write(json.dumps(metrics) + "\n") # logging.info(metrics) # return metrics @registry.register_task("3d_vqa") class ThreeDVQATask(VQATask): pass # def valid_step(self, model, samples): # answers = model.predict_answers( # samples=samples, # answer_list=self.answer_list, # inference_method=self.inference_method, # num_beams=self.num_beams, # max_len=self.max_len, # min_len=self.min_len, # num_ans_candidates=self.num_ans_candidates, # ) # pred_qa_pairs = [] # question_id = samples["question_id"] # gt_answers = samples["direct_answers"] # for pred_answer, ques_id, gt_answer in zip(answers, question_id, gt_answers): # pred_qa_pairs.append( # {"question_id": ques_id, "pred_ans": pred_answer, "gt_ans": gt_answer} # ) # return pred_qa_pairs # @dist_utils.main_process # def _report_metrics(self, result_file, split): # """ # Implementing accuracy computation for AOKVQA, see # https://github.com/allenai/aokvqa/blob/main/evaluation/eval_predictions.py#L45 for details. # """ # TODO add evaluation for multi-choice # results = json.load(open(result_file, "r")) # acc = [] # for res in results: # if res["gt_ans"] is None: # prepare test results for leaderboard evaluation # self._save_result_leaderboard(results) # return # pred = res["pred_ans"] # gt_ans = res["gt_ans"] # num_match = sum([pred == gt for gt in gt_ans]) # vqa_acc = min(1.0, num_match / 3.0) # acc.append(vqa_acc) # # accuracy = sum(acc) / len(acc) * 100 # metrics = {"agg_metrics": accuracy, "acc": accuracy} # with open( # os.path.join(registry.get_path("output_dir"), "evaluate.txt"), "a" # ) as f: # f.write(json.dumps(metrics) + "\n") # logging.info(metrics) # return metrics # @dist_utils.main_process # def _save_result_leaderboard(self, results): # """ # Saving the results in the format required for leaderboard evaluation. # [TODO] add support for multi-choice. # """ # result_leaderboard = dict() # for res in results: # result_leaderboard[res["question_id"]] = { # "direct_answer": res["pred_ans"], # "multiple_choice": "", # } # result_file = registry.get_path("result_dir") + "_leaderboard.json" # with open(result_file, "w") as f: # json.dump(result_leaderboard, f) # logging.info(f"Saved results for leaderboard evaluation at {result_file}")
3D-LLM-main
3DLLM_BLIP2-base/lavis/tasks/vqa.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import logging import json import os import torch import torch.distributed as dist from itertools import chain import lavis.common.dist_utils as dist_utils from lavis.common.dist_utils import get_rank, get_world_size, is_main_process from lavis.common.registry import registry from lavis.common.vqa_tools.vqa_eval import VQAEval as VQATool from lavis.tasks.vqa import VQATask @registry.register_task("vqa_reading_comprehension") class VQARCTask(VQATask): def __init__( self, num_beams, max_len, min_len, evaluate, num_ans_candidates, inference_method="rank", **kwargs, ): super().__init__(num_beams, max_len, min_len, evaluate, num_ans_candidates, inference_method) self.config = kwargs.get("config") @classmethod def setup_task(cls, cfg): run_cfg = cfg.run_cfg num_beams = run_cfg.get("num_beams", 3) max_len = run_cfg.get("max_len", 10) min_len = run_cfg.get("min_len", 1) evaluate = run_cfg.get("evaluate", False) inference_method = run_cfg.get("inference_method", "rank") num_ans_candidates = run_cfg.get("num_ans_candidates", 128) return cls( num_beams=num_beams, max_len=max_len, min_len=min_len, evaluate=evaluate, num_ans_candidates=num_ans_candidates, inference_method=inference_method, config=run_cfg, ) def valid_step(self, model, samples): answers, captions, gradcams = model.predict_answers( samples=samples, inference_method=self.inference_method, num_beams=self.num_beams, max_len=self.max_len, min_len=self.min_len, internal_bsz_fid=self.config["internal_bsz_fid"], num_captions=self.config["num_captions"], num_captions_fid=self.config["num_captions_fid"], cap_max_length=self.config["cap_max_length"], cap_min_length=self.config["cap_min_length"], top_k=self.config["top_k"], top_p=self.config["top_p"], repetition_penalty=self.config["repetition_penalty"], num_patches=self.config["num_patches"], block_num=self.config["block_num"], ) pred_qa_pairs = [] sample_captions = [] sample_gradcams = [] question_id = samples["question_id"] for answer, caption, gradcam, ques_id in zip(answers, captions, gradcams, question_id): ques_id = int(ques_id.item()) pred_qa_pairs.append({"question_id": ques_id, "answer": answer}) sample_captions.append({"question_id": ques_id, "caption": caption}) sample_gradcams.append({"question_id": ques_id, "gradcam": gradcam}) return [sample_gradcams, sample_captions, pred_qa_pairs] def after_evaluation(self, val_result, split_name, **kwargs): result_ = list(chain(*val_result[0::3])) result_file = self.save_gradcam( result_, result_dir=registry.get_path("result_dir"), filename=f"{split_name}_gradcam_result", remove_duplicate="question_id", ) result_ = list(chain(*val_result[1::3])) result_file = self.save_result( result_, result_dir=registry.get_path("result_dir"), filename=f"{split_name}_caption_result", remove_duplicate="question_id", ) result_ = list(chain(*val_result[2::3])) result_file = self.save_result( result_, result_dir=registry.get_path("result_dir"), filename=f"{split_name}_vqa_result", remove_duplicate="question_id", ) metrics = self._report_metrics(result_file=result_file, split=split_name) return metrics def save_gradcam(self, result, result_dir, filename, remove_duplicate=""): result_file = os.path.join(result_dir, "%s_rank%d.pth" % (filename, get_rank())) final_result_file = os.path.join(result_dir, "%s.pth" % filename) torch.save({"result": result}, result_file) dist.barrier() if is_main_process(): logging.warning("rank %d starts merging results." % get_rank()) # combine results from all processes result = [] for rank in range(get_world_size()): result_file = os.path.join(result_dir, "%s_rank%d.pth" % (filename, rank)) res_ckpt = torch.load(result_file, map_location="cpu") res = res_ckpt["result"] result += res if remove_duplicate: result_new = [] id_list = [] for res in result: if res[remove_duplicate] not in id_list: id_list.append(res[remove_duplicate]) result_new.append(res) result = result_new torch.save({"result": result}, final_result_file) print("result file saved to %s" % final_result_file) return final_result_file @registry.register_task("gqa_reading_comprehension") class GQARCTask(VQARCTask): def valid_step(self, model, samples): answers, captions, gradcams = model.predict_answers( samples=samples, inference_method=self.inference_method, num_beams=self.num_beams, max_len=self.max_len, min_len=self.min_len, internal_bsz_fid=self.config["internal_bsz_fid"], num_captions=self.config["num_captions"], num_captions_fid=self.config["num_captions_fid"], cap_max_length=self.config["cap_max_length"], cap_min_length=self.config["cap_min_length"], top_k=self.config["top_k"], top_p=self.config["top_p"], repetition_penalty=self.config["repetition_penalty"], num_patches=self.config["num_patches"], block_num=self.config["block_num"], ) pred_qa_pairs = [] sample_captions = [] sample_gradcams = [] question_id = samples["question_id"] gt_answers = samples["answer"] for pred_answer, caption, gradcam, ques_id, gt_answer in zip( answers, captions, gradcams, question_id, gt_answers ): ques_id = int(ques_id.item()) pred_qa_pairs.append({"question_id": ques_id, "pred_ans": pred_answer, "gt_ans": gt_answer}) sample_captions.append({"question_id": ques_id, "caption": caption}) sample_gradcams.append({"question_id": ques_id, "gradcam": gradcam}) return [sample_gradcams, sample_captions, pred_qa_pairs] @dist_utils.main_process def _report_metrics(self, result_file, split): """ TODO: add other evaluation metrics for GQA """ results = json.load(open(result_file, "r")) acc = [] vqa_tool = VQATool() for res in results: if res["gt_ans"] is None: # prepare test results for leaderboard evaluation self._save_result_leaderboard(results) return gt_ans = res["gt_ans"] pred = res["pred_ans"] if self.inference_method == "generate": pred = vqa_tool.processPunctuation(pred) pred = vqa_tool.processDigitArticle(pred) vqa_acc = 1 if pred == gt_ans else 0 acc.append(vqa_acc) accuracy = sum(acc) / len(acc) * 100 metrics = {"agg_metrics": accuracy, "acc": accuracy} with open(os.path.join(registry.get_path("output_dir"), "evaluate.txt"), "a") as f: f.write(json.dumps(metrics) + "\n") logging.info(metrics) return metrics @dist_utils.main_process def _save_result_leaderboard(self, results): """ Saving the results in the format required for leaderboard evaluation. """ result_leaderboard = [] for res in results: result_leaderboard.append( { "questionId": str(res["question_id"]), "prediction": str(res["pred_ans"]), } ) result_file = registry.get_path("result_dir") + "_leaderboard.json" with open(result_file, "w") as f: json.dump(result_leaderboard, f) logging.info(f"Saved results for leaderboard evaluation at {result_file}")
3D-LLM-main
3DLLM_BLIP2-base/lavis/tasks/vqa_reading_comprehension.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import json import os import logging import numpy as np import torch from lavis.common.dist_utils import main_process from lavis.common.registry import registry from lavis.tasks.base_task import BaseTask @registry.register_task("multimodal_classification") class MultimodalClassificationTask(BaseTask): def __init__(self): super().__init__() def valid_step(self, model, samples): results = [] outputs = model.predict(samples) predictions = outputs["predictions"] targets = outputs["targets"] predictions = predictions.max(1)[1].cpu().numpy() targets = targets.cpu().numpy() indices = samples[self.inst_id_key] for pred, tgt, index in zip(predictions, targets, indices): if isinstance(index, torch.Tensor): index = index.item() results.append( { self.inst_id_key: index, "prediction": pred.item(), "target": tgt.item(), } ) return results def after_evaluation(self, val_result, split_name, epoch, **kwargs): eval_result_file = self.save_result( result=val_result, result_dir=registry.get_path("result_dir"), filename="{}_epoch{}".format(split_name, epoch), remove_duplicate=self.inst_id_key, ) metrics = self._report_metrics(eval_result_file=eval_result_file, split_name=split_name) return metrics @main_process def _report_metrics(self, eval_result_file, split_name): results = json.load(open(eval_result_file)) predictions = np.array([res["prediction"] for res in results]) targets = np.array([res["target"] for res in results]) accuracy = (targets == predictions).sum() / targets.shape[0] metrics = {"agg_metrics": accuracy, "acc": accuracy} log_stats = {split_name: {k: v for k, v in metrics.items()}} with open(os.path.join(registry.get_path("output_dir"), "evaluate.txt"), "a") as f: f.write(json.dumps(log_stats) + "\n") logging.info(metrics) return metrics
3D-LLM-main
3DLLM_BLIP2-base/lavis/tasks/multimodal_classification.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import gzip import logging import os import random as rnd import tarfile import zipfile import decord import webdataset as wds import numpy as np import torch from torch.utils.data.dataset import IterableDataset, ChainDataset from decord import VideoReader from lavis.common.registry import registry from lavis.datasets.datasets.base_dataset import ConcatDataset from tqdm import tqdm decord.bridge.set_bridge("torch") MAX_INT = registry.get("MAX_INT") def load_video(video_path, n_frms=MAX_INT, height=-1, width=-1, sampling="uniform"): vr = VideoReader(uri=video_path, height=height, width=width) vlen = len(vr) start, end = 0, vlen n_frms = min(n_frms, vlen) if sampling == "uniform": indices = np.arange(start, end, vlen / n_frms).astype(int) elif sampling == "headtail": indices_h = sorted(rnd.sample(range(vlen // 2), n_frms // 2)) indices_t = sorted(rnd.sample(range(vlen // 2, vlen), n_frms // 2)) indices = indices_h + indices_t else: raise NotImplementedError # get_batch -> T, H, W, C frms = vr.get_batch(indices).permute(3, 0, 1, 2).float() # (C, T, H, W) return frms def apply_to_sample(f, sample): if len(sample) == 0: return {} def _apply(x): if torch.is_tensor(x): return f(x) elif isinstance(x, dict): return {key: _apply(value) for key, value in x.items()} elif isinstance(x, list): return [_apply(x) for x in x] else: return x return _apply(sample) def move_to_cuda(sample): def _move_to_cuda(tensor): return tensor.cuda() return apply_to_sample(_move_to_cuda, sample) def prepare_sample(samples, cuda_enabled=True): if cuda_enabled: samples = move_to_cuda(samples) # TODO fp16 support return samples def reorg_datasets_by_split(datasets): """ Organizes datasets by split. Args: datasets: dict of torch.utils.data.Dataset objects by name. Returns: Dict of datasets by split {split_name: List[Datasets]}. """ # if len(datasets) == 1: # return datasets[list(datasets.keys())[0]] # else: reorg_datasets = dict() # reorganize by split for _, dataset in datasets.items(): for split_name, dataset_split in dataset.items(): if split_name not in reorg_datasets: reorg_datasets[split_name] = [dataset_split] else: reorg_datasets[split_name].append(dataset_split) return reorg_datasets def concat_datasets(datasets): """ Concatenates multiple datasets into a single dataset. It supports may-style datasets and DataPipeline from WebDataset. Currently, does not support generic IterableDataset because it requires creating separate samplers. Now only supports conctenating training datasets and assuming validation and testing have only a single dataset. This is because metrics should not be computed on the concatenated datasets. Args: datasets: dict of torch.utils.data.Dataset objects by split. Returns: Dict of concatenated datasets by split, "train" is the concatenation of multiple datasets, "val" and "test" remain the same. If the input training datasets contain both map-style and DataPipeline datasets, returns a tuple, where the first element is a concatenated map-style dataset and the second element is a chained DataPipeline dataset. """ # concatenate datasets in the same split for split_name in datasets: if split_name != "train": assert len(datasets[split_name]) == 1, "Do not support multiple {} datasets.".format(split_name) datasets[split_name] = datasets[split_name][0] else: iterable_datasets, map_datasets = [], [] for dataset in datasets[split_name]: if isinstance(dataset, wds.DataPipeline): logging.info("Dataset {} is IterableDataset, can't be concatenated.".format(dataset)) iterable_datasets.append(dataset) elif isinstance(dataset, IterableDataset): raise NotImplementedError("Do not support concatenation of generic IterableDataset.") else: map_datasets.append(dataset) # if len(iterable_datasets) > 0: # concatenate map-style datasets and iterable-style datasets separately chained_datasets = ChainDataset(iterable_datasets) if len(iterable_datasets) > 0 else None concat_datasets = ConcatDataset(map_datasets) if len(map_datasets) > 0 else None train_datasets = concat_datasets, chained_datasets train_datasets = tuple([x for x in train_datasets if x is not None]) train_datasets = train_datasets[0] if len(train_datasets) == 1 else train_datasets datasets[split_name] = train_datasets return datasets def extract_archive(from_path, to_path=None, overwrite=False): """Extract archive. Args: from_path: the path of the archive. to_path: the root path of the extracted files (directory of from_path) overwrite: overwrite existing files (False) Returns: List of paths to extracted files even if not overwritten. Examples: >>> url = 'http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/validation.tar.gz' >>> from_path = './validation.tar.gz' >>> to_path = './' >>> torchtext.utils.download_from_url(url, from_path) >>> torchtext.utils.extract_archive(from_path, to_path) >>> ['.data/val.de', '.data/val.en'] >>> torchtext.utils.download_from_url(url, from_path) >>> torchtext.utils.extract_archive(from_path, to_path) >>> ['.data/val.de', '.data/val.en'] """ if to_path is None: to_path = os.path.dirname(from_path) if from_path.endswith((".tar.gz", ".tgz")): logging.info("Opening tar file {} to {}.".format(from_path, to_path)) with tarfile.open(from_path, "r") as tar: files = [] for file_ in tqdm(tar): file_path = os.path.join(to_path, file_.name) if file_.isfile(): files.append(file_path) if os.path.exists(file_path): logging.info("{} already extracted.".format(file_path)) if not overwrite: continue tar.extract(file_, to_path) logging.info("Finished extracting tar file {}.".format(from_path)) return files elif from_path.endswith(".zip"): assert zipfile.is_zipfile(from_path), from_path logging.info("Opening zip file {} to {}.".format(from_path, to_path)) with zipfile.ZipFile(from_path, "r") as zfile: files = [] for file_ in tqdm(zfile.namelist()): file_path = os.path.join(to_path, file_) files.append(file_path) if os.path.exists(file_path): logging.info("{} already extracted.".format(file_path)) if not overwrite: continue zfile.extract(file_, to_path) files = [f for f in files if os.path.isfile(f)] logging.info("Finished extracting zip file {}.".format(from_path)) return files elif from_path.endswith(".gz"): logging.info("Opening gz file {} to {}.".format(from_path, to_path)) default_block_size = 65536 filename = from_path[:-3] files = [filename] with gzip.open(from_path, "rb") as gzfile, open(filename, "wb") as d_file: while True: block = gzfile.read(default_block_size) if not block: break else: d_file.write(block) d_file.write(block) logging.info("Finished extracting gz file {}.".format(from_path)) return files else: raise NotImplementedError("We currently only support tar.gz, .tgz, .gz and zip achives.") def save_frames_grid(img_array, out_path): import torch from PIL import Image from torchvision.utils import make_grid if len(img_array.shape) == 3: img_array = img_array.unsqueeze(0) elif len(img_array.shape) == 5: b, t, c, h, w = img_array.shape img_array = img_array.view(-1, c, h, w) elif len(img_array.shape) == 4: pass else: raise NotImplementedError("Supports only (b,t,c,h,w)-shaped inputs. First two dimensions can be ignored.") assert img_array.shape[1] == 3, "Exepcting input shape of (H, W, 3), i.e. RGB-only." grid = make_grid(img_array) ndarr = grid.permute(1, 2, 0).to("cpu", torch.uint8).numpy() img = Image.fromarray(ndarr) img.save(out_path)
3D-LLM-main
3DLLM_BLIP2-base/lavis/datasets/data_utils.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import os from lavis.common.registry import registry from lavis.datasets.builders.base_dataset_builder import BaseDatasetBuilder from lavis.datasets.datasets.imagefolder_dataset import ImageFolderDataset @registry.register_builder("imagenet") class ImageNetBuilder(BaseDatasetBuilder): train_dataset_cls = ImageFolderDataset eval_dataset_cls = ImageFolderDataset DATASET_CONFIG_DICT = {"default": "configs/datasets/imagenet/defaults.yaml"} def _download_ann(self): pass def build(self): self.build_processors() build_info = self.config.build_info vis_info = build_info.get(self.data_type) datasets = dict() for split in build_info.splits: assert split in [ "train", "val", ], "Invalid split name {}, must be one of 'train', 'val' and 'test'." is_train = split == "train" vis_processor = self.vis_processors["train"] if is_train else self.vis_processors["eval"] vis_path = os.path.join(vis_info.storage, split) # create datasets dataset_cls = self.train_dataset_cls if is_train else self.eval_dataset_cls datasets[split] = dataset_cls( vis_processor=vis_processor, vis_root=vis_path, classnames=imagenet_classnames, ) return datasets imagenet_classnames = [ "tench", "goldfish", "great white shark", "tiger shark", "hammerhead shark", "electric ray", "stingray", "rooster", "hen", "ostrich", "brambling", "goldfinch", "house finch", "junco", "indigo bunting", "American robin", "bulbul", "jay", "magpie", "chickadee", "American dipper", "kite (bird of prey)", "bald eagle", "vulture", "great grey owl", "fire salamander", "smooth newt", "newt", "spotted salamander", "axolotl", "American bullfrog", "tree frog", "tailed frog", "loggerhead sea turtle", "leatherback sea turtle", "mud turtle", "terrapin", "box turtle", "banded gecko", "green iguana", "Carolina anole", "desert grassland whiptail lizard", "agama", "frilled-necked lizard", "alligator lizard", "Gila monster", "European green lizard", "chameleon", "Komodo dragon", "Nile crocodile", "American alligator", "triceratops", "worm snake", "ring-necked snake", "eastern hog-nosed snake", "smooth green snake", "kingsnake", "garter snake", "water snake", "vine snake", "night snake", "boa constrictor", "African rock python", "Indian cobra", "green mamba", "sea snake", "Saharan horned viper", "eastern diamondback rattlesnake", "sidewinder rattlesnake", "trilobite", "harvestman", "scorpion", "yellow garden spider", "barn spider", "European garden spider", "southern black widow", "tarantula", "wolf spider", "tick", "centipede", "black grouse", "ptarmigan", "ruffed grouse", "prairie grouse", "peafowl", "quail", "partridge", "african grey parrot", "macaw", "sulphur-crested cockatoo", "lorikeet", "coucal", "bee eater", "hornbill", "hummingbird", "jacamar", "toucan", "duck", "red-breasted merganser", "goose", "black swan", "tusker", "echidna", "platypus", "wallaby", "koala", "wombat", "jellyfish", "sea anemone", "brain coral", "flatworm", "nematode", "conch", "snail", "slug", "sea slug", "chiton", "chambered nautilus", "Dungeness crab", "rock crab", "fiddler crab", "red king crab", "American lobster", "spiny lobster", "crayfish", "hermit crab", "isopod", "white stork", "black stork", "spoonbill", "flamingo", "little blue heron", "great egret", "bittern bird", "crane bird", "limpkin", "common gallinule", "American coot", "bustard", "ruddy turnstone", "dunlin", "common redshank", "dowitcher", "oystercatcher", "pelican", "king penguin", "albatross", "grey whale", "killer whale", "dugong", "sea lion", "Chihuahua", "Japanese Chin", "Maltese", "Pekingese", "Shih Tzu", "King Charles Spaniel", "Papillon", "toy terrier", "Rhodesian Ridgeback", "Afghan Hound", "Basset Hound", "Beagle", "Bloodhound", "Bluetick Coonhound", "Black and Tan Coonhound", "Treeing Walker Coonhound", "English foxhound", "Redbone Coonhound", "borzoi", "Irish Wolfhound", "Italian Greyhound", "Whippet", "Ibizan Hound", "Norwegian Elkhound", "Otterhound", "Saluki", "Scottish Deerhound", "Weimaraner", "Staffordshire Bull Terrier", "American Staffordshire Terrier", "Bedlington Terrier", "Border Terrier", "Kerry Blue Terrier", "Irish Terrier", "Norfolk Terrier", "Norwich Terrier", "Yorkshire Terrier", "Wire Fox Terrier", "Lakeland Terrier", "Sealyham Terrier", "Airedale Terrier", "Cairn Terrier", "Australian Terrier", "Dandie Dinmont Terrier", "Boston Terrier", "Miniature Schnauzer", "Giant Schnauzer", "Standard Schnauzer", "Scottish Terrier", "Tibetan Terrier", "Australian Silky Terrier", "Soft-coated Wheaten Terrier", "West Highland White Terrier", "Lhasa Apso", "Flat-Coated Retriever", "Curly-coated Retriever", "Golden Retriever", "Labrador Retriever", "Chesapeake Bay Retriever", "German Shorthaired Pointer", "Vizsla", "English Setter", "Irish Setter", "Gordon Setter", "Brittany dog", "Clumber Spaniel", "English Springer Spaniel", "Welsh Springer Spaniel", "Cocker Spaniel", "Sussex Spaniel", "Irish Water Spaniel", "Kuvasz", "Schipperke", "Groenendael dog", "Malinois", "Briard", "Australian Kelpie", "Komondor", "Old English Sheepdog", "Shetland Sheepdog", "collie", "Border Collie", "Bouvier des Flandres dog", "Rottweiler", "German Shepherd Dog", "Dobermann", "Miniature Pinscher", "Greater Swiss Mountain Dog", "Bernese Mountain Dog", "Appenzeller Sennenhund", "Entlebucher Sennenhund", "Boxer", "Bullmastiff", "Tibetan Mastiff", "French Bulldog", "Great Dane", "St. Bernard", "husky", "Alaskan Malamute", "Siberian Husky", "Dalmatian", "Affenpinscher", "Basenji", "pug", "Leonberger", "Newfoundland dog", "Great Pyrenees dog", "Samoyed", "Pomeranian", "Chow Chow", "Keeshond", "brussels griffon", "Pembroke Welsh Corgi", "Cardigan Welsh Corgi", "Toy Poodle", "Miniature Poodle", "Standard Poodle", "Mexican hairless dog (xoloitzcuintli)", "grey wolf", "Alaskan tundra wolf", "red wolf or maned wolf", "coyote", "dingo", "dhole", "African wild dog", "hyena", "red fox", "kit fox", "Arctic fox", "grey fox", "tabby cat", "tiger cat", "Persian cat", "Siamese cat", "Egyptian Mau", "cougar", "lynx", "leopard", "snow leopard", "jaguar", "lion", "tiger", "cheetah", "brown bear", "American black bear", "polar bear", "sloth bear", "mongoose", "meerkat", "tiger beetle", "ladybug", "ground beetle", "longhorn beetle", "leaf beetle", "dung beetle", "rhinoceros beetle", "weevil", "fly", "bee", "ant", "grasshopper", "cricket insect", "stick insect", "cockroach", "praying mantis", "cicada", "leafhopper", "lacewing", "dragonfly", "damselfly", "red admiral butterfly", "ringlet butterfly", "monarch butterfly", "small white butterfly", "sulphur butterfly", "gossamer-winged butterfly", "starfish", "sea urchin", "sea cucumber", "cottontail rabbit", "hare", "Angora rabbit", "hamster", "porcupine", "fox squirrel", "marmot", "beaver", "guinea pig", "common sorrel horse", "zebra", "pig", "wild boar", "warthog", "hippopotamus", "ox", "water buffalo", "bison", "ram (adult male sheep)", "bighorn sheep", "Alpine ibex", "hartebeest", "impala (antelope)", "gazelle", "arabian camel", "llama", "weasel", "mink", "European polecat", "black-footed ferret", "otter", "skunk", "badger", "armadillo", "three-toed sloth", "orangutan", "gorilla", "chimpanzee", "gibbon", "siamang", "guenon", "patas monkey", "baboon", "macaque", "langur", "black-and-white colobus", "proboscis monkey", "marmoset", "white-headed capuchin", "howler monkey", "titi monkey", "Geoffroy's spider monkey", "common squirrel monkey", "ring-tailed lemur", "indri", "Asian elephant", "African bush elephant", "red panda", "giant panda", "snoek fish", "eel", "silver salmon", "rock beauty fish", "clownfish", "sturgeon", "gar fish", "lionfish", "pufferfish", "abacus", "abaya", "academic gown", "accordion", "acoustic guitar", "aircraft carrier", "airliner", "airship", "altar", "ambulance", "amphibious vehicle", "analog clock", "apiary", "apron", "trash can", "assault rifle", "backpack", "bakery", "balance beam", "balloon", "ballpoint pen", "Band-Aid", "banjo", "baluster / handrail", "barbell", "barber chair", "barbershop", "barn", "barometer", "barrel", "wheelbarrow", "baseball", "basketball", "bassinet", "bassoon", "swimming cap", "bath towel", "bathtub", "station wagon", "lighthouse", "beaker", "military hat (bearskin or shako)", "beer bottle", "beer glass", "bell tower", "baby bib", "tandem bicycle", "bikini", "ring binder", "binoculars", "birdhouse", "boathouse", "bobsleigh", "bolo tie", "poke bonnet", "bookcase", "bookstore", "bottle cap", "hunting bow", "bow tie", "brass memorial plaque", "bra", "breakwater", "breastplate", "broom", "bucket", "buckle", "bulletproof vest", "high-speed train", "butcher shop", "taxicab", "cauldron", "candle", "cannon", "canoe", "can opener", "cardigan", "car mirror", "carousel", "tool kit", "cardboard box / carton", "car wheel", "automated teller machine", "cassette", "cassette player", "castle", "catamaran", "CD player", "cello", "mobile phone", "chain", "chain-link fence", "chain mail", "chainsaw", "storage chest", "chiffonier", "bell or wind chime", "china cabinet", "Christmas stocking", "church", "movie theater", "cleaver", "cliff dwelling", "cloak", "clogs", "cocktail shaker", "coffee mug", "coffeemaker", "spiral or coil", "combination lock", "computer keyboard", "candy store", "container ship", "convertible", "corkscrew", "cornet", "cowboy boot", "cowboy hat", "cradle", "construction crane", "crash helmet", "crate", "infant bed", "Crock Pot", "croquet ball", "crutch", "cuirass", "dam", "desk", "desktop computer", "rotary dial telephone", "diaper", "digital clock", "digital watch", "dining table", "dishcloth", "dishwasher", "disc brake", "dock", "dog sled", "dome", "doormat", "drilling rig", "drum", "drumstick", "dumbbell", "Dutch oven", "electric fan", "electric guitar", "electric locomotive", "entertainment center", "envelope", "espresso machine", "face powder", "feather boa", "filing cabinet", "fireboat", "fire truck", "fire screen", "flagpole", "flute", "folding chair", "football helmet", "forklift", "fountain", "fountain pen", "four-poster bed", "freight car", "French horn", "frying pan", "fur coat", "garbage truck", "gas mask or respirator", "gas pump", "goblet", "go-kart", "golf ball", "golf cart", "gondola", "gong", "gown", "grand piano", "greenhouse", "radiator grille", "grocery store", "guillotine", "hair clip", "hair spray", "half-track", "hammer", "hamper", "hair dryer", "hand-held computer", "handkerchief", "hard disk drive", "harmonica", "harp", "combine harvester", "hatchet", "holster", "home theater", "honeycomb", "hook", "hoop skirt", "gymnastic horizontal bar", "horse-drawn vehicle", "hourglass", "iPod", "clothes iron", "carved pumpkin", "jeans", "jeep", "T-shirt", "jigsaw puzzle", "rickshaw", "joystick", "kimono", "knee pad", "knot", "lab coat", "ladle", "lampshade", "laptop computer", "lawn mower", "lens cap", "letter opener", "library", "lifeboat", "lighter", "limousine", "ocean liner", "lipstick", "slip-on shoe", "lotion", "music speaker", "loupe magnifying glass", "sawmill", "magnetic compass", "messenger bag", "mailbox", "tights", "one-piece bathing suit", "manhole cover", "maraca", "marimba", "mask", "matchstick", "maypole", "maze", "measuring cup", "medicine cabinet", "megalith", "microphone", "microwave oven", "military uniform", "milk can", "minibus", "miniskirt", "minivan", "missile", "mitten", "mixing bowl", "mobile home", "ford model t", "modem", "monastery", "monitor", "moped", "mortar and pestle", "graduation cap", "mosque", "mosquito net", "vespa", "mountain bike", "tent", "computer mouse", "mousetrap", "moving van", "muzzle", "metal nail", "neck brace", "necklace", "baby pacifier", "notebook computer", "obelisk", "oboe", "ocarina", "odometer", "oil filter", "pipe organ", "oscilloscope", "overskirt", "bullock cart", "oxygen mask", "product packet / packaging", "paddle", "paddle wheel", "padlock", "paintbrush", "pajamas", "palace", "pan flute", "paper towel", "parachute", "parallel bars", "park bench", "parking meter", "railroad car", "patio", "payphone", "pedestal", "pencil case", "pencil sharpener", "perfume", "Petri dish", "photocopier", "plectrum", "Pickelhaube", "picket fence", "pickup truck", "pier", "piggy bank", "pill bottle", "pillow", "ping-pong ball", "pinwheel", "pirate ship", "drink pitcher", "block plane", "planetarium", "plastic bag", "plate rack", "farm plow", "plunger", "Polaroid camera", "pole", "police van", "poncho", "pool table", "soda bottle", "plant pot", "potter's wheel", "power drill", "prayer rug", "printer", "prison", "missile", "projector", "hockey puck", "punching bag", "purse", "quill", "quilt", "race car", "racket", "radiator", "radio", "radio telescope", "rain barrel", "recreational vehicle", "fishing casting reel", "reflex camera", "refrigerator", "remote control", "restaurant", "revolver", "rifle", "rocking chair", "rotisserie", "eraser", "rugby ball", "ruler measuring stick", "sneaker", "safe", "safety pin", "salt shaker", "sandal", "sarong", "saxophone", "scabbard", "weighing scale", "school bus", "schooner", "scoreboard", "CRT monitor", "screw", "screwdriver", "seat belt", "sewing machine", "shield", "shoe store", "shoji screen / room divider", "shopping basket", "shopping cart", "shovel", "shower cap", "shower curtain", "ski", "balaclava ski mask", "sleeping bag", "slide rule", "sliding door", "slot machine", "snorkel", "snowmobile", "snowplow", "soap dispenser", "soccer ball", "sock", "solar thermal collector", "sombrero", "soup bowl", "keyboard space bar", "space heater", "space shuttle", "spatula", "motorboat", "spider web", "spindle", "sports car", "spotlight", "stage", "steam locomotive", "through arch bridge", "steel drum", "stethoscope", "scarf", "stone wall", "stopwatch", "stove", "strainer", "tram", "stretcher", "couch", "stupa", "submarine", "suit", "sundial", "sunglasses", "sunglasses", "sunscreen", "suspension bridge", "mop", "sweatshirt", "swim trunks / shorts", "swing", "electrical switch", "syringe", "table lamp", "tank", "tape player", "teapot", "teddy bear", "television", "tennis ball", "thatched roof", "front curtain", "thimble", "threshing machine", "throne", "tile roof", "toaster", "tobacco shop", "toilet seat", "torch", "totem pole", "tow truck", "toy store", "tractor", "semi-trailer truck", "tray", "trench coat", "tricycle", "trimaran", "tripod", "triumphal arch", "trolleybus", "trombone", "hot tub", "turnstile", "typewriter keyboard", "umbrella", "unicycle", "upright piano", "vacuum cleaner", "vase", "vaulted or arched ceiling", "velvet fabric", "vending machine", "vestment", "viaduct", "violin", "volleyball", "waffle iron", "wall clock", "wallet", "wardrobe", "military aircraft", "sink", "washing machine", "water bottle", "water jug", "water tower", "whiskey jug", "whistle", "hair wig", "window screen", "window shade", "Windsor tie", "wine bottle", "airplane wing", "wok", "wooden spoon", "wool", "split-rail fence", "shipwreck", "sailboat", "yurt", "website", "comic book", "crossword", "traffic or street sign", "traffic light", "dust jacket", "menu", "plate", "guacamole", "consomme", "hot pot", "trifle", "ice cream", "popsicle", "baguette", "bagel", "pretzel", "cheeseburger", "hot dog", "mashed potatoes", "cabbage", "broccoli", "cauliflower", "zucchini", "spaghetti squash", "acorn squash", "butternut squash", "cucumber", "artichoke", "bell pepper", "cardoon", "mushroom", "Granny Smith apple", "strawberry", "orange", "lemon", "fig", "pineapple", "banana", "jackfruit", "cherimoya (custard apple)", "pomegranate", "hay", "carbonara", "chocolate syrup", "dough", "meatloaf", "pizza", "pot pie", "burrito", "red wine", "espresso", "tea cup", "eggnog", "mountain", "bubble", "cliff", "coral reef", "geyser", "lakeshore", "promontory", "sandbar", "beach", "valley", "volcano", "baseball player", "bridegroom", "scuba diver", "rapeseed", "daisy", "yellow lady's slipper", "corn", "acorn", "rose hip", "horse chestnut seed", "coral fungus", "agaric", "gyromitra", "stinkhorn mushroom", "earth star fungus", "hen of the woods mushroom", "bolete", "corn cob", "toilet paper", ]
3D-LLM-main
3DLLM_BLIP2-base/lavis/datasets/builders/imagefolder_builder.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import logging import os import shutil import warnings import lavis.common.utils as utils import torch.distributed as dist from lavis.common.dist_utils import is_dist_avail_and_initialized, is_main_process from lavis.common.registry import registry from lavis.datasets.data_utils import extract_archive from lavis.processors.base_processor import BaseProcessor from omegaconf import OmegaConf from torchvision.datasets.utils import download_url class BaseDatasetBuilder: train_dataset_cls, eval_dataset_cls = None, None def __init__(self, cfg=None): super().__init__() if cfg is None: # help to create datasets from default config. self.config = load_dataset_config(self.default_config_path()) elif isinstance(cfg, str): self.config = load_dataset_config(cfg) else: # when called from task.build_dataset() self.config = cfg self.data_type = self.config.data_type self.vis_processors = {"train": BaseProcessor(), "eval": BaseProcessor()} self.text_processors = {"train": BaseProcessor(), "eval": BaseProcessor()} def build_datasets(self): # download, split, etc... # only called on 1 GPU/TPU in distributed # if is_main_process(): # self._download_data() if is_dist_avail_and_initialized(): dist.barrier() # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") datasets = self.build() # dataset['train'/'val'/'test'] return datasets def build_processors(self): vis_proc_cfg = self.config.get("vis_processor") txt_proc_cfg = self.config.get("text_processor") if vis_proc_cfg is not None: vis_train_cfg = vis_proc_cfg.get("train") vis_eval_cfg = vis_proc_cfg.get("eval") self.vis_processors["train"] = self._build_proc_from_cfg(vis_train_cfg) self.vis_processors["eval"] = self._build_proc_from_cfg(vis_eval_cfg) if txt_proc_cfg is not None: txt_train_cfg = txt_proc_cfg.get("train") txt_eval_cfg = txt_proc_cfg.get("eval") self.text_processors["train"] = self._build_proc_from_cfg(txt_train_cfg) self.text_processors["eval"] = self._build_proc_from_cfg(txt_eval_cfg) @staticmethod def _build_proc_from_cfg(cfg): return registry.get_processor_class(cfg.name).from_config(cfg) if cfg is not None else None @classmethod def default_config_path(cls, type="default"): return utils.get_abs_path(cls.DATASET_CONFIG_DICT[type]) def _download_data(self): self._download_ann() self._download_vis() def _download_ann(self): """ Download annotation files if necessary. All the vision-language datasets should have annotations of unified format. storage_path can be: (1) relative/absolute: will be prefixed with env.cache_root to make full path if relative. (2) basename/dirname: will be suffixed with base name of URL if dirname is provided. Local annotation paths should be relative. """ anns = self.config.build_info.annotations splits = anns.keys() cache_root = registry.get_path("cache_root") for split in splits: info = anns[split] urls, storage_paths = info.get("url", None), info.storage if isinstance(urls, str): urls = [urls] if isinstance(storage_paths, str): storage_paths = [storage_paths] assert len(urls) == len(storage_paths) for url_or_filename, storage_path in zip(urls, storage_paths): # if storage_path is relative, make it full by prefixing with cache_root. if not os.path.isabs(storage_path): storage_path = os.path.join(cache_root, storage_path) dirname = os.path.dirname(storage_path) if not os.path.exists(dirname): os.makedirs(dirname) if os.path.isfile(url_or_filename): src, dst = url_or_filename, storage_path if not os.path.exists(dst): shutil.copyfile(src=src, dst=dst) else: logging.info("Using existing file {}.".format(dst)) else: if os.path.isdir(storage_path): # if only dirname is provided, suffix with basename of URL. raise ValueError( "Expecting storage_path to be a file path, got directory {}".format(storage_path) ) else: filename = os.path.basename(storage_path) download_url(url=url_or_filename, root=dirname, filename=filename) def _download_vis(self): storage_path = self.config.build_info.get(self.data_type).storage storage_path = utils.get_cache_path(storage_path) if not os.path.exists(storage_path): warnings.warn( f""" The specified path {storage_path} for visual inputs does not exist. Please provide a correct path to the visual inputs or refer to datasets/download_scripts/README.md for downloading instructions. """ ) def build(self): """ Create by split datasets inheriting torch.utils.data.Datasets. # build() can be dataset-specific. Overwrite to customize. """ self.build_processors() build_info = self.config.build_info ann_info = build_info.annotations vis_info = build_info.get(self.data_type) datasets = dict() for split in ann_info.keys(): if split not in ["train", "val", "test"]: continue is_train = split == "train" # processors vis_processor = self.vis_processors["train"] if is_train else self.vis_processors["eval"] text_processor = self.text_processors["train"] if is_train else self.text_processors["eval"] # annotation path ann_paths = ann_info.get(split).storage if isinstance(ann_paths, str): ann_paths = [ann_paths] abs_ann_paths = [] for ann_path in ann_paths: if not os.path.isabs(ann_path) and not ann_path.startswith("."): ann_path = utils.get_cache_path(ann_path) abs_ann_paths.append(ann_path) ann_paths = abs_ann_paths # visual data storage path vis_path = vis_info.storage if not os.path.isabs(vis_path) and not vis_path.startswith("."): # vis_path = os.path.join(utils.get_cache_path(), vis_path) vis_path = utils.get_cache_path(vis_path) if not os.path.exists(vis_path): warnings.warn("storage path {} does not exist.".format(vis_path)) # create datasets dataset_cls = self.train_dataset_cls if is_train else self.eval_dataset_cls datasets[split] = dataset_cls( vis_processor=vis_processor, text_processor=text_processor, ann_paths=ann_paths, vis_root=vis_path, ) return datasets def load_dataset_config(cfg_path): cfg = OmegaConf.load(cfg_path).datasets cfg = cfg[list(cfg.keys())[0]] return cfg
3D-LLM-main
3DLLM_BLIP2-base/lavis/datasets/builders/base_dataset_builder.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ from lavis.common.registry import registry from lavis.common.utils import get_cache_path from lavis.datasets.builders.base_dataset_builder import BaseDatasetBuilder from lavis.datasets.datasets.video_vqa_datasets import VideoQADataset class VideoQABuilder(BaseDatasetBuilder): train_dataset_cls = VideoQADataset eval_dataset_cls = VideoQADataset def build(self): datasets = super().build() ans2label = self.config.build_info.annotations.get("ans2label") if ans2label is None: raise ValueError("ans2label is not specified in build_info.") ans2label = get_cache_path(ans2label.storage) for split in datasets: datasets[split]._build_class_labels(ans2label) return datasets @registry.register_builder("msrvtt_qa") class MSRVTTQABuilder(VideoQABuilder): DATASET_CONFIG_DICT = { "default": "configs/datasets/msrvtt/defaults_qa.yaml", } @registry.register_builder("msvd_qa") class MSVDQABuilder(VideoQABuilder): DATASET_CONFIG_DICT = { "default": "configs/datasets/msvd/defaults_qa.yaml", }
3D-LLM-main
3DLLM_BLIP2-base/lavis/datasets/builders/video_qa_builder.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ from lavis.datasets.builders.base_dataset_builder import load_dataset_config from lavis.datasets.builders.caption_builder import ( COCOCapBuilder, MSRVTTCapBuilder, MSVDCapBuilder, VATEXCapBuilder, ) from lavis.datasets.builders.image_text_pair_builder import ( ConceptualCaption12MBuilder, ConceptualCaption3MBuilder, VGCaptionBuilder, SBUCaptionBuilder, ) from lavis.datasets.builders.classification_builder import ( NLVRBuilder, SNLIVisualEntailmentBuilder, ) from lavis.datasets.builders.imagefolder_builder import ImageNetBuilder from lavis.datasets.builders.video_qa_builder import MSRVTTQABuilder, MSVDQABuilder from lavis.datasets.builders.vqa_builder import ThreeDVQABuilder from lavis.datasets.builders.retrieval_builder import ( MSRVTTRetrievalBuilder, DiDeMoRetrievalBuilder, COCORetrievalBuilder, Flickr30kBuilder, ) from lavis.datasets.builders.dialogue_builder import AVSDDialBuilder from lavis.common.registry import registry __all__ = [ "COCOCapBuilder", "COCORetrievalBuilder", "ConceptualCaption12MBuilder", "ConceptualCaption3MBuilder", "DiDeMoRetrievalBuilder", "Flickr30kBuilder", "ImageNetBuilder", "MSRVTTCapBuilder", "MSRVTTQABuilder", "MSRVTTRetrievalBuilder", "MSVDCapBuilder", "MSVDQABuilder", "NLVRBuilder", "SBUCaptionBuilder", "SNLIVisualEntailmentBuilder", "VATEXCapBuilder", "VGCaptionBuilder", "AVSDDialBuilder", ] def load_dataset(name, cfg_path=None, vis_path=None, data_type=None): """ Example >>> dataset = load_dataset("coco_caption", cfg=None) >>> splits = dataset.keys() >>> print([len(dataset[split]) for split in splits]) """ if cfg_path is None: cfg = None else: cfg = load_dataset_config(cfg_path) try: builder = registry.get_builder_class(name)(cfg) except TypeError: print(f"Dataset {name} not found. Available datasets:\n" + ", ".join([str(k) for k in dataset_zoo.get_names()])) exit(1) if vis_path is not None: if data_type is None: # use default data type in the config data_type = builder.config.data_type assert data_type in builder.config.build_info, f"Invalid data_type {data_type} for {name}." builder.config.build_info.get(data_type).storage = vis_path dataset = builder.build_datasets() return dataset class DatasetZoo: def __init__(self) -> None: self.dataset_zoo = { k: list(v.DATASET_CONFIG_DICT.keys()) for k, v in sorted(registry.mapping["builder_name_mapping"].items()) } def get_names(self): return list(self.dataset_zoo.keys()) dataset_zoo = DatasetZoo()
3D-LLM-main
3DLLM_BLIP2-base/lavis/datasets/builders/__init__.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ from lavis.datasets.builders.base_dataset_builder import BaseDatasetBuilder from lavis.datasets.datasets.retrieval_datasets import ( RetrievalDataset, RetrievalEvalDataset, VideoRetrievalDataset, VideoRetrievalEvalDataset, ) from lavis.common.registry import registry @registry.register_builder("msrvtt_retrieval") class MSRVTTRetrievalBuilder(BaseDatasetBuilder): train_dataset_cls = VideoRetrievalDataset eval_dataset_cls = VideoRetrievalEvalDataset DATASET_CONFIG_DICT = {"default": "configs/datasets/msrvtt/defaults_ret.yaml"} @registry.register_builder("didemo_retrieval") class DiDeMoRetrievalBuilder(BaseDatasetBuilder): train_dataset_cls = VideoRetrievalDataset eval_dataset_cls = VideoRetrievalEvalDataset DATASET_CONFIG_DICT = {"default": "configs/datasets/didemo/defaults_ret.yaml"} @registry.register_builder("coco_retrieval") class COCORetrievalBuilder(BaseDatasetBuilder): train_dataset_cls = RetrievalDataset eval_dataset_cls = RetrievalEvalDataset DATASET_CONFIG_DICT = {"default": "configs/datasets/coco/defaults_ret.yaml"} @registry.register_builder("flickr30k") class Flickr30kBuilder(BaseDatasetBuilder): train_dataset_cls = RetrievalDataset eval_dataset_cls = RetrievalEvalDataset DATASET_CONFIG_DICT = {"default": "configs/datasets/flickr30k/defaults.yaml"}
3D-LLM-main
3DLLM_BLIP2-base/lavis/datasets/builders/retrieval_builder.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ from lavis.datasets.builders.base_dataset_builder import BaseDatasetBuilder from lavis.common.registry import registry from lavis.datasets.datasets.threedvqa_datasets import ThreeDVQADataset, ThreeDVQAEvalDataset @registry.register_builder("3d_vqa") class ThreeDVQABuilder(BaseDatasetBuilder): train_dataset_cls = ThreeDVQADataset eval_dataset_cls = ThreeDVQAEvalDataset DATASET_CONFIG_DICT = {"default": "configs/datasets/3dvqa/defaults.yaml"}
3D-LLM-main
3DLLM_BLIP2-base/lavis/datasets/builders/vqa_builder.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ from lavis.common.registry import registry from lavis.datasets.builders.base_dataset_builder import BaseDatasetBuilder from lavis.datasets.datasets.avsd_dialogue_datasets import ( AVSDDialDataset, AVSDDialEvalDataset, ) @registry.register_builder("avsd_dialogue") class AVSDDialBuilder(BaseDatasetBuilder): train_dataset_cls = AVSDDialDataset eval_dataset_cls = AVSDDialEvalDataset DATASET_CONFIG_DICT = {"default": "configs/datasets/avsd/defaults_dial.yaml"}
3D-LLM-main
3DLLM_BLIP2-base/lavis/datasets/builders/dialogue_builder.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import os from lavis.common.registry import registry from lavis.datasets.builders.base_dataset_builder import BaseDatasetBuilder from lavis.datasets.datasets.image_text_pair_datasets import ImageTextPairDataset from lavis.datasets.datasets.laion_dataset import LaionDataset @registry.register_builder("conceptual_caption_3m") class ConceptualCaption3MBuilder(BaseDatasetBuilder): train_dataset_cls = ImageTextPairDataset DATASET_CONFIG_DICT = {"default": "configs/datasets/conceptual_caption/defaults_3m.yaml"} @registry.register_builder("conceptual_caption_12m") class ConceptualCaption12MBuilder(BaseDatasetBuilder): train_dataset_cls = ImageTextPairDataset DATASET_CONFIG_DICT = {"default": "configs/datasets/conceptual_caption/defaults_12m.yaml"} @registry.register_builder("sbu_caption") class SBUCaptionBuilder(BaseDatasetBuilder): train_dataset_cls = ImageTextPairDataset DATASET_CONFIG_DICT = {"default": "configs/datasets/sbu_caption/defaults.yaml"} @registry.register_builder("vg_caption") class VGCaptionBuilder(BaseDatasetBuilder): train_dataset_cls = ImageTextPairDataset DATASET_CONFIG_DICT = {"default": "configs/datasets/vg/defaults_caption.yaml"} @registry.register_builder("laion2B_multi") class Laion2BMultiBuilder(BaseDatasetBuilder): train_dataset_cls = LaionDataset DATASET_CONFIG_DICT = {"default": "configs/datasets/laion/defaults_2B_multi.yaml"} def _download_ann(self): pass def _download_vis(self): pass def build(self): self.build_processors() build_info = self.config.build_info datasets = dict() split = "train" # laion dataset only has train split # create datasets # [NOTE] return inner_datasets (wds.DataPipeline) dataset_cls = self.train_dataset_cls datasets[split] = dataset_cls( vis_processor=self.vis_processors[split], text_processor=self.text_processors[split], location=build_info.storage, ).inner_dataset return datasets
3D-LLM-main
3DLLM_BLIP2-base/lavis/datasets/builders/image_text_pair_builder.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ from lavis.datasets.builders.base_dataset_builder import BaseDatasetBuilder from lavis.datasets.datasets.coco_caption_datasets import ( COCOCapDataset, COCOCapEvalDataset, NoCapsEvalDataset, ) from lavis.common.registry import registry from lavis.datasets.datasets.video_caption_datasets import ( VideoCaptionDataset, VideoCaptionEvalDataset, ) @registry.register_builder("coco_caption") class COCOCapBuilder(BaseDatasetBuilder): train_dataset_cls = COCOCapDataset eval_dataset_cls = COCOCapEvalDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco/defaults_cap.yaml", } @registry.register_builder("nocaps") class COCOCapBuilder(BaseDatasetBuilder): eval_dataset_cls = NoCapsEvalDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/nocaps/defaults.yaml", } @registry.register_builder("msrvtt_caption") class MSRVTTCapBuilder(BaseDatasetBuilder): train_dataset_cls = VideoCaptionDataset eval_dataset_cls = VideoCaptionEvalDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/msrvtt/defaults_cap.yaml", } @registry.register_builder("msvd_caption") class MSVDCapBuilder(BaseDatasetBuilder): train_dataset_cls = VideoCaptionDataset eval_dataset_cls = VideoCaptionEvalDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/msvd/defaults_cap.yaml", } @registry.register_builder("vatex_caption") class VATEXCapBuilder(BaseDatasetBuilder): train_dataset_cls = VideoCaptionDataset eval_dataset_cls = VideoCaptionEvalDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/vatex/defaults_cap.yaml", }
3D-LLM-main
3DLLM_BLIP2-base/lavis/datasets/builders/caption_builder.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ from lavis.common.registry import registry from lavis.datasets.builders.base_dataset_builder import BaseDatasetBuilder from lavis.datasets.datasets.nlvr_datasets import NLVRDataset, NLVREvalDataset from lavis.datasets.datasets.snli_ve_datasets import SNLIVisualEntialmentDataset @registry.register_builder("nlvr") class NLVRBuilder(BaseDatasetBuilder): train_dataset_cls = NLVRDataset eval_dataset_cls = NLVREvalDataset DATASET_CONFIG_DICT = {"default": "configs/datasets/nlvr/defaults.yaml"} @registry.register_builder("snli_ve") class SNLIVisualEntailmentBuilder(BaseDatasetBuilder): train_dataset_cls = SNLIVisualEntialmentDataset eval_dataset_cls = SNLIVisualEntialmentDataset DATASET_CONFIG_DICT = {"default": "configs/datasets/snli_ve/defaults.yaml"}
3D-LLM-main
3DLLM_BLIP2-base/lavis/datasets/builders/classification_builder.py
import json from typing import Iterable from torch.utils.data import Dataset, ConcatDataset from torch.utils.data.dataloader import default_collate class BaseDataset(Dataset): def __init__(self, vis_processor=None, text_processor=None, vis_root=None, ann_paths=[]): self.vis_root = vis_root self.annotation = [] for ann_path in ann_paths: self.annotation.extend(json.load(open(ann_path, "r"))) self.vis_processor = vis_processor self.text_processor = text_processor self._add_instance_ids() def __len__(self): return len(self.annotation) def collater(self, samples): # samples is a list of dicts if "answer" in samples[0] and isinstance(samples[0]["answer"], list): max_length = max(len(s["answer"]) for s in samples) for s in samples: s["answer"] += [s["answer"][0]] * (max_length - len(s["answer"])) return default_collate(samples) def set_processors(self, vis_processor, text_processor): self.vis_processor = vis_processor self.text_processor = text_processor def _add_instance_ids(self, key="instance_id"): for idx, ann in enumerate(self.annotation): ann[key] = str(idx) class ConcatDataset(ConcatDataset): def __init__(self, datasets: Iterable[Dataset]) -> None: super().__init__(datasets) def collater(self, samples): all_keys = set() for s in samples: all_keys.update(s) shared_keys = all_keys for s in samples: shared_keys = shared_keys & set(s.keys()) samples_shared_keys = [] for s in samples: samples_shared_keys.append({k: s[k] for k in s.keys() if k in shared_keys}) return self.datasets[0].collater(samples_shared_keys)
3D-LLM-main
3DLLM_BLIP2-base/lavis/datasets/datasets/base_dataset.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import time import random import torch from lavis.datasets.data_utils import move_to_cuda from torch.utils.data import DataLoader class MultiIterLoader: """ A simple wrapper for iterating over multiple iterators. Args: loaders (List[Loader]): List of Iterator loaders. ratios (List[float]): List of ratios to sample from each loader. If None, all loaders are sampled uniformly. """ def __init__(self, loaders, ratios=None): # assert all loaders has __next__ method for loader in loaders: assert hasattr(loader, "__next__"), "Loader {} has no __next__ method.".format(loader) if ratios is None: ratios = [1.0] * len(loaders) else: assert len(ratios) == len(loaders) ratios = [float(ratio) / sum(ratios) for ratio in ratios] self.loaders = loaders self.ratios = ratios def __next__(self): # random sample from each loader by ratio loader_idx = random.choices(range(len(self.loaders)), self.ratios, k=1)[0] return next(self.loaders[loader_idx]) class PrefetchLoader(object): """ Modified from https://github.com/ChenRocks/UNITER. overlap compute and cuda data transfer (copied and then modified from nvidia apex) """ def __init__(self, loader): self.loader = loader self.stream = torch.cuda.Stream() def __iter__(self): loader_it = iter(self.loader) self.preload(loader_it) batch = self.next(loader_it) while batch is not None: is_tuple = isinstance(batch, tuple) if is_tuple: task, batch = batch if is_tuple: yield task, batch else: yield batch batch = self.next(loader_it) def __len__(self): return len(self.loader) def preload(self, it): try: self.batch = next(it) except StopIteration: self.batch = None return # if record_stream() doesn't work, another option is to make sure # device inputs are created on the main stream. # self.next_input_gpu = torch.empty_like(self.next_input, # device='cuda') # self.next_target_gpu = torch.empty_like(self.next_target, # device='cuda') # Need to make sure the memory allocated for next_* is not still in use # by the main stream at the time we start copying to next_*: # self.stream.wait_stream(torch.cuda.current_stream()) with torch.cuda.stream(self.stream): self.batch = move_to_cuda(self.batch) # more code for the alternative if record_stream() doesn't work: # copy_ will record the use of the pinned source tensor in this # side stream. # self.next_input_gpu.copy_(self.next_input, non_blocking=True) # self.next_target_gpu.copy_(self.next_target, non_blocking=True) # self.next_input = self.next_input_gpu # self.next_target = self.next_target_gpu def next(self, it): torch.cuda.current_stream().wait_stream(self.stream) batch = self.batch if batch is not None: record_cuda_stream(batch) self.preload(it) return batch def __getattr__(self, name): method = self.loader.__getattribute__(name) return method def record_cuda_stream(batch): if isinstance(batch, torch.Tensor): batch.record_stream(torch.cuda.current_stream()) elif isinstance(batch, list) or isinstance(batch, tuple): for t in batch: record_cuda_stream(t) elif isinstance(batch, dict): for t in batch.values(): record_cuda_stream(t) else: pass class IterLoader: """ A wrapper to convert DataLoader as an infinite iterator. Modified from: https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/iter_based_runner.py """ def __init__(self, dataloader: DataLoader, use_distributed: bool = False): self._dataloader = dataloader self.iter_loader = iter(self._dataloader) self._use_distributed = use_distributed self._epoch = 0 @property def epoch(self) -> int: return self._epoch def __next__(self): try: data = next(self.iter_loader) except StopIteration: self._epoch += 1 if hasattr(self._dataloader.sampler, "set_epoch") and self._use_distributed: self._dataloader.sampler.set_epoch(self._epoch) time.sleep(2) # Prevent possible deadlock during epoch transition self.iter_loader = iter(self._dataloader) data = next(self.iter_loader) return data def __iter__(self): return self def __len__(self): return len(self._dataloader)
3D-LLM-main
3DLLM_BLIP2-base/lavis/datasets/datasets/dataloader_utils.py
import torch from lavis.datasets.datasets.base_dataset import BaseDataset class VQADataset(BaseDataset): def __init__(self, vis_processor, text_processor, vis_root, ann_paths): super().__init__(vis_processor, text_processor, vis_root, ann_paths) def collater(self, samples): pc_list, points_list, question_list, answer_list, weight_list = [], [], [], [], [] num_answers = [] for sample in samples: pc_list.append(sample["pc_feat"]) points_list.append(sample["pc"]) question_list.append(sample["text_input"]) weight_list.extend(sample["weight"]) answers = sample["answer"] answer_list.extend(answers) num_answers.append(len(answers)) return { "pc_feat": torch.stack(pc_list, dim=0), "pc": torch.stack(points_list, dim=0), "text_input": question_list, "answer": answer_list, "weight": torch.Tensor(weight_list), "n_answers": torch.LongTensor(num_answers), } class VQAEvalDataset(BaseDataset): def __init__(self, vis_processor, text_processor, vis_root, ann_paths): super().__init__(vis_processor, text_processor, vis_root, ann_paths)
3D-LLM-main
3DLLM_BLIP2-base/lavis/datasets/datasets/vqa_datasets.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import os import json import torch import numpy as np from PIL import Image from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True from lavis.datasets.datasets.vqa_datasets import VQADataset, VQAEvalDataset class __DisplMixin: def displ_item(self, index): sample, ann = self.__getitem__(index), self.annotation[index] return OrderedDict( { "file": ann["image"], "question": ann["question"], "question_id": ann["question_id"], "answer": "; ".join(ann["answers"]), "pc_feat": sample["pc_feat"], "pc": sample["pc"], } ) class ThreeDVQADataset(VQADataset, __DisplMixin): def __init__(self, vis_processor, text_processor, vis_root, ann_paths): """ vis_root (string): Root directory of images (e.g. coco/images/) ann_root (string): directory to store the annotation file """ super().__init__(vis_processor, text_processor, vis_root, ann_paths) self.scene_ids = {} n = 0 new_annotation = [] for ann in self.annotation: try: img_id = ann["scene_id"] if img_id not in self.scene_ids.keys(): self.scene_ids[img_id] = n n += 1 new_annotation.append(ann) except: pass self.annotation = new_annotation self.pc_feat_root = "examples/voxelized_features_sam_nonzero" # 2 self.voxel_root = "examples/voxelized_voxels_sam_nonzero" # flatten self.annotation = [ ann for ann in self.annotation if os.path.exists(os.path.join(self.pc_feat_root, ann["scene_id"] + ".pt")) ] def __getitem__(self, index): ann = self.annotation[index] caption = self.text_processor(ann["question"]) scene_id = ann["scene_id"] pc_feat = torch.load(os.path.join(self.pc_feat_root, f"{scene_id}.pt"), map_location="cpu") pc = np.load(os.path.join(self.voxel_root, f"{scene_id}.npy")) pc = torch.tensor(pc).float().cpu() # sample 10000 points: [N, 1408] -> [10000, 1408] if pc_feat.shape[0] > 10000: idxes = torch.sort(torch.randperm(pc_feat.shape[0])[:10000])[1] pc_feat = pc_feat[idxes] pc = pc[idxes] else: pc_feat = torch.cat([pc_feat, torch.zeros(10000 - pc_feat.shape[0], 1408)], dim=0) pc = torch.cat([pc, torch.zeros(10000 - pc.shape[0], 3)], dim=0) answer_weight = {} for answer in ann["answers"]: if answer in answer_weight.keys(): answer_weight[answer] += 1 / len(ann["answers"]) else: answer_weight[answer] = 1 / len(ann["answers"]) answers = list(answer_weight.keys()) weights = list(answer_weight.values()) return { "pc_feat": pc_feat, "pc": pc, "text_input": caption, "answer": answers, "weight": weights, "scene_id": self.scene_ids[ann["scene_id"]], "question_id": index, } def __len__(self): return len(self.annotation) class ThreeDVQAEvalDataset(VQAEvalDataset): def __init__(self, vis_processor, text_processor, vis_root, ann_paths): """ vis_root (string): Root directory of images (e.g. coco/images/) ann_root (string): directory to store the annotation file split (string): val or test """ super().__init__(vis_processor, text_processor, vis_root, ann_paths) self.scene_ids = {} n = 0 new_annotation = [] for ann in self.annotation: try: img_id = ann["scene_id"] if img_id not in self.scene_ids.keys(): self.scene_ids[img_id] = n n += 1 new_annotation.append(ann) except: pass self.annotation = new_annotation self.pc_feat_root = "examples/voxelized_features_sam_nonzero" self.voxel_root = "examples/voxelized_voxels_sam_nonzero" self.annotation = [ ann for ann in self.annotation if os.path.exists(os.path.join(self.pc_feat_root, ann["scene_id"] + ".pt")) ] def __getitem__(self, index): ann = self.annotation[index] caption = self.text_processor(ann["question"]) scene_id = ann["scene_id"] pc_feat = torch.load(os.path.join(self.pc_feat_root, f"{scene_id}.pt"), map_location="cpu") # [N, 1408] pc = np.load(os.path.join(self.voxel_root, f"{scene_id}.npy")) pc = torch.tensor(pc).float().cpu() # sample 10000 points: [N, 1408] -> [10000, 1408] if pc_feat.shape[0] > 10000: idxes = torch.sort(torch.randperm(pc_feat.shape[0])[:10000])[1] pc_feat = pc_feat[idxes] pc = pc[idxes] else: pc_feat = torch.cat([pc_feat, torch.zeros(10000 - pc_feat.shape[0], 1408)], dim=0) pc = torch.cat([pc, torch.zeros(10000 - pc.shape[0], 3)], dim=0) return { "pc_feat": pc_feat, "pc": pc, "text_input": caption, "image_id": self.scene_ids[scene_id], "instance_id": scene_id, "question_id": index, } def __len__(self): return len(self.annotation) class NoCapsEvalDataset(VQAEvalDataset): def __init__(self, vis_processor, text_processor, vis_root, ann_paths): """ vis_root (string): Root directory of images (e.g. coco/images/) ann_root (string): directory to store the annotation file split (string): val or test """ super().__init__(vis_processor, text_processor, vis_root, ann_paths) def __getitem__(self, index): ann = self.annotation[index] scene_id = ann["scene_id"] pc_feat = torch.load(os.path.join(self.pc_feat_root, f"{scene_id}.pt"), map_location="cpu") # sample 10000 points: [N, 1408] -> [10000, 1408] if pc_feat.shape[0] > 10000: pc_feat = pc_feat[torch.randperm(pc_feat.shape[0])[:10000]] else: pc_feat = torch.cat([pc_feat, torch.zeros(10000 - pc_feat.shape[0], 1408)], dim=0) caption = self.text_processor(ann["question"]) return { "pc_feat": pc_feat, "text_input": caption, "instance_id": scene_id, }
3D-LLM-main
3DLLM_BLIP2-base/lavis/datasets/datasets/threedvqa_datasets.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause Based on huggingface code base https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert """ import math import os import warnings from dataclasses import dataclass from typing import Optional, Tuple import torch from torch import Tensor, device import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss import torch.nn.functional as F from transformers import BatchEncoding, PreTrainedTokenizer from transformers.activations import ACT2FN from transformers.file_utils import ( ModelOutput, ) from transformers.modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from transformers.modeling_utils import ( PreTrainedModel, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer, ) from transformers.utils import logging from transformers.models.bert.configuration_bert import BertConfig from lavis.common.utils import get_abs_path from lavis.models.base_model import BaseEncoder logging.set_verbosity_error() logger = logging.get_logger(__name__) class BertEmbeddings(nn.Module): """Construct the embeddings from word and position embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) if config.add_type_embeddings: self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.config = config def forward( self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0, ): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) if token_type_ids is not None: token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings else: embeddings = inputs_embeds if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class BertSelfAttention(nn.Module): def __init__(self, config, is_cross_attention): super().__init__() self.config = config if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( "The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (config.hidden_size, config.num_attention_heads) ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) if is_cross_attention: self.key = nn.Linear(config.encoder_width, self.all_head_size) self.value = nn.Linear(config.encoder_width, self.all_head_size) else: self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.save_attention = False def save_attn_gradients(self, attn_gradients): self.attn_gradients = attn_gradients def get_attn_gradients(self): return self.attn_gradients def save_attention_map(self, attention_map): self.attention_map = attention_map def get_attention_map(self): return self.attention_map def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + ( self.num_attention_heads, self.attention_head_size, ) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": seq_length = hidden_states.size()[1] position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility if self.position_embedding_type == "relative_key": relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == "relative_key_query": relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in BertModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.Softmax(dim=-1)(attention_scores) if is_cross_attention and self.save_attention: self.save_attention_map(attention_probs) attention_probs.register_hook(self.save_attn_gradients) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs_dropped = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs_dropped = attention_probs_dropped * head_mask context_layer = torch.matmul(attention_probs_dropped, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) outputs = outputs + (past_key_value,) return outputs class BertSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertAttention(nn.Module): def __init__(self, config, is_cross_attention=False): super().__init__() self.self = BertSelfAttention(config, is_cross_attention) self.output = BertSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads, ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): self_outputs = self.self( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs class BertIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class BertOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertLayer(nn.Module): def __init__(self, config, layer_num): super().__init__() self.config = config self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = BertAttention(config) self.layer_num = layer_num # compatibility for ALBEF and BLIP try: # ALBEF & ALPRO fusion_layer = self.config.fusion_layer add_cross_attention = fusion_layer <= layer_num and self.config.add_cross_attention self.fusion_layer = fusion_layer except AttributeError: # BLIP self.fusion_layer = self.config.num_hidden_layers add_cross_attention = self.config.add_cross_attention # if self.config.add_cross_attention: if add_cross_attention: self.crossattention = BertAttention(config, is_cross_attention=self.config.add_cross_attention) self.intermediate = BertIntermediate(config) self.output = BertOutput(config) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, mode=None, ): # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] # TODO line 482 in albef/models/xbert.py # compatibility for ALBEF and BLIP if mode in ["multimodal", "fusion"] and hasattr(self, "crossattention"): assert encoder_hidden_states is not None, "encoder_hidden_states must be given for cross-attention layers" if isinstance(encoder_hidden_states, list): cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states[(self.layer_num - self.fusion_layer) % len(encoder_hidden_states)], encoder_attention_mask[(self.layer_num - self.fusion_layer) % len(encoder_hidden_states)], output_attentions=output_attentions, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] else: cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, output_attentions=output_attentions, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output, ) outputs = (layer_output,) + outputs outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class BertEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([BertLayer(config, i) for i in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, return_dict=True, mode="multimodal", ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None try: # ALBEF fusion_layer = self.config.fusion_layer except AttributeError: # BLIP fusion_layer = self.config.num_hidden_layers if mode == "text": start_layer = 0 # output_layer = self.config.fusion_layer output_layer = fusion_layer elif mode == "fusion": # start_layer = self.config.fusion_layer start_layer = fusion_layer output_layer = self.config.num_hidden_layers elif mode == "multimodal": start_layer = 0 output_layer = self.config.num_hidden_layers # compatibility for ALBEF and BLIP # for i in range(self.config.num_hidden_layers): for i in range(start_layer, output_layer): layer_module = self.layer[i] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None # TODO pay attention to this. if self.gradient_checkpointing and self.training: if use_cache: logger.warn( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, past_key_value, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, mode=mode, ) else: layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, mode=mode, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) class BertPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class BertPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class BertLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states class BertOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = BertLMPredictionHead(config) def forward(self, sequence_output): prediction_scores = self.predictions(sequence_output) return prediction_scores class BertPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = BertConfig base_model_prefix = "bert" _keys_to_ignore_on_load_missing = [r"position_ids"] def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() class BertModel(BertPreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in `Attention is all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an input to the forward pass. """ def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = BertEmbeddings(config) self.encoder = BertEncoder(config) self.pooler = BertPooler(config) if add_pooling_layer else None self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def get_extended_attention_mask( self, attention_mask: Tensor, input_shape: Tuple[int], device: device, is_decoder: bool, ) -> Tensor: """ Makes broadcastable attention and causal masks so that future and masked tokens are ignored. Arguments: attention_mask (:obj:`torch.Tensor`): Mask with ones indicating tokens to attend to, zeros for tokens to ignore. input_shape (:obj:`Tuple[int]`): The shape of the input to the model. device: (:obj:`torch.device`): The device of the input to the model. Returns: :obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`. """ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. if attention_mask.dim() == 3: extended_attention_mask = attention_mask[:, None, :, :] elif attention_mask.dim() == 2: # Provided a padding mask of dimensions [batch_size, seq_length] # - if the model is a decoder, apply a causal mask in addition to the padding mask # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length] if is_decoder: batch_size, seq_length = input_shape seq_ids = torch.arange(seq_length, device=device) causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None] # in case past_key_values are used we need to add a prefix ones mask to the causal mask # causal and attention masks must have same type with pytorch version < 1.3 causal_mask = causal_mask.to(attention_mask.dtype) if causal_mask.shape[1] < attention_mask.shape[1]: prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1] causal_mask = torch.cat( [ torch.ones( (batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype, ), causal_mask, ], axis=-1, ) extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :] else: extended_attention_mask = attention_mask[:, None, None, :] else: raise ValueError( "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format( input_shape, attention_mask.shape ) ) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 return extended_attention_mask def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, is_decoder=False, mode="multimodal", ): r""" encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. use_cache (:obj:`bool`, `optional`): If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up decoding (see :obj:`past_key_values`). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() batch_size, seq_length = input_shape device = input_ids.device elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] batch_size, seq_length = input_shape device = inputs_embeds.device elif encoder_embeds is not None: input_shape = encoder_embeds.size()[:-1] batch_size, seq_length = input_shape device = encoder_embeds.device else: raise ValueError("You have to specify either input_ids or inputs_embeds or encoder_embeds") # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask( attention_mask, input_shape, device, is_decoder ) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_hidden_states is not None: if type(encoder_hidden_states) == list: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size() else: ( encoder_batch_size, encoder_sequence_length, _, ) = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if type(encoder_attention_mask) == list: encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask] elif encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) if encoder_embeds is None: embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, ) else: embedding_output = encoder_embeds encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, mode=mode, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) class BertForMaskedLM(BertPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r"pooler"] _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] def __init__(self, config): super().__init__(config) self.bert = BertModel(config, add_pooling_layer=False) self.cls = BertOnlyMLMHead(config) self.init_weights() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings def forward( self, input_ids=None, attention_mask=None, # token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, is_decoder=False, mode="multimodal", soft_labels=None, alpha=0, return_logits=False, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert( input_ids, attention_mask=attention_mask, # token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_embeds=encoder_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, is_decoder=is_decoder, mode=mode, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) if return_logits: return prediction_scores masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # -100 index = padding token masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if soft_labels is not None: loss_distill = -torch.sum(F.log_softmax(prediction_scores, dim=-1) * soft_labels, dim=-1) loss_distill = loss_distill[labels != -100].mean() masked_lm_loss = (1 - alpha) * masked_lm_loss + alpha * loss_distill if not return_dict: output = (prediction_scores,) + outputs[2:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs): input_shape = input_ids.shape effective_batch_size = input_shape[0] # add a dummy token assert self.config.pad_token_id is not None, "The PAD token should be defined for generation" attention_mask = torch.cat( [attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1, ) dummy_token = torch.full( (effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device, ) input_ids = torch.cat([input_ids, dummy_token], dim=1) return {"input_ids": input_ids, "attention_mask": attention_mask} class BertLMHeadModel(BertPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r"pooler"] _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] def __init__(self, config): super().__init__(config) self.bert = BertModel(config, add_pooling_layer=False) self.cls = BertOnlyMLMHead(config) self.init_weights() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings def forward( self, input_ids=None, attention_mask=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, labels=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, return_logits=False, is_decoder=True, reduction="mean", mode="multimodal", soft_labels=None, alpha=0, ): r""" encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]`` past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. use_cache (:obj:`bool`, `optional`): If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up decoding (see :obj:`past_key_values`). Returns: Example:: >>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig >>> import torch >>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased') >>> config = BertConfig.from_pretrained("bert-base-cased") >>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config) >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> prediction_logits = outputs.logits """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False outputs = self.bert( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, is_decoder=is_decoder, mode=mode, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) if return_logits: return prediction_scores[:, :-1, :].contiguous() lm_loss = None if labels is not None: # we are doing next-token prediction; shift prediction scores and input ids by one shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() labels = labels[:, 1:].contiguous() loss_fct = CrossEntropyLoss(reduction=reduction, label_smoothing=0.1) lm_loss = loss_fct( shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1), ) if reduction == "none": lm_loss = lm_loss.view(prediction_scores.size(0), -1).sum(1) if soft_labels is not None: loss_distill = -torch.sum(F.log_softmax(shifted_prediction_scores, dim=-1) * soft_labels, dim=-1) loss_distill = (loss_distill * (labels != -100)).sum(1) lm_loss = (1 - alpha) * lm_loss + alpha * loss_distill if not return_dict: output = (prediction_scores,) + outputs[2:] return ((lm_loss,) + output) if lm_loss is not None else output return CausalLMOutputWithCrossAttentions( loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs): input_shape = input_ids.shape # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly if attention_mask is None: attention_mask = input_ids.new_ones(input_shape) # cut decoder_input_ids if past is used if past is not None: input_ids = input_ids[:, -1:] return { "input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past, "encoder_hidden_states": model_kwargs.get("encoder_hidden_states", None), "encoder_attention_mask": model_kwargs.get("encoder_attention_mask", None), "is_decoder": True, } def _reorder_cache(self, past, beam_idx): reordered_past = () for layer_past in past: reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),) return reordered_past class XBertLMHeadDecoder(BertLMHeadModel): """ This class decouples the decoder forward logic from the VL model. In this way, different VL models can share this decoder as long as they feed encoder_embeds as required. """ @classmethod def from_config(cls, cfg, from_pretrained=False): med_config_path = get_abs_path(cfg.get("med_config_path")) med_config = BertConfig.from_json_file(med_config_path) if from_pretrained: return cls.from_pretrained("bert-base-uncased", config=med_config) else: return cls(config=med_config) def generate_from_encoder( self, tokenized_prompt, visual_embeds, sep_token_id, pad_token_id, use_nucleus_sampling=False, num_beams=3, max_length=30, min_length=10, top_p=0.9, repetition_penalty=1.0, **kwargs ): if not use_nucleus_sampling: num_beams = num_beams visual_embeds = visual_embeds.repeat_interleave(num_beams, dim=0) image_atts = torch.ones(visual_embeds.size()[:-1], dtype=torch.long).to(self.device) model_kwargs = { "encoder_hidden_states": visual_embeds, "encoder_attention_mask": image_atts, } if use_nucleus_sampling: # nucleus sampling outputs = self.generate( input_ids=tokenized_prompt.input_ids, max_length=max_length, min_length=min_length, do_sample=True, top_p=top_p, num_return_sequences=1, eos_token_id=sep_token_id, pad_token_id=pad_token_id, repetition_penalty=1.1, **model_kwargs ) else: # beam search outputs = self.generate( input_ids=tokenized_prompt.input_ids, max_length=max_length, min_length=min_length, num_beams=num_beams, eos_token_id=sep_token_id, pad_token_id=pad_token_id, repetition_penalty=repetition_penalty, **model_kwargs ) return outputs class XBertEncoder(BertModel, BaseEncoder): @classmethod def from_config(cls, cfg, from_pretrained=False): med_config_path = get_abs_path(cfg.get("med_config_path")) med_config = BertConfig.from_json_file(med_config_path) if from_pretrained: return cls.from_pretrained("bert-base-uncased", config=med_config, add_pooling_layer=False) else: return cls(config=med_config, add_pooling_layer=False) def forward_automask(self, tokenized_text, visual_embeds, **kwargs): image_atts = torch.ones(visual_embeds.size()[:-1], dtype=torch.long).to(self.device) text = tokenized_text text_output = super().forward( text.input_ids, attention_mask=text.attention_mask, encoder_hidden_states=visual_embeds, encoder_attention_mask=image_atts, return_dict=True, ) return text_output def forward_text(self, tokenized_text, **kwargs): text = tokenized_text token_type_ids = kwargs.get("token_type_ids", None) text_output = super().forward( text.input_ids, attention_mask=text.attention_mask, token_type_ids=token_type_ids, return_dict=True, mode="text", ) return text_output
3D-LLM-main
3DLLM_BLIP2-base/lavis/models/med.py
from collections import OrderedDict from itertools import repeat import collections.abc import math import torch import torch.nn.functional as F from torch import nn from fairscale.nn.checkpoint.checkpoint_activations import checkpoint_wrapper from lavis.models.eva_vit import convert_weights_to_fp16 from lavis.common.dist_utils import download_cached_file class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1): super().__init__() # all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1 self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.relu1 = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.relu2 = nn.ReLU(inplace=True) self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity() self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False) self.bn3 = nn.BatchNorm2d(planes * self.expansion) self.relu3 = nn.ReLU(inplace=True) self.downsample = None self.stride = stride if stride > 1 or inplanes != planes * Bottleneck.expansion: # downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1 self.downsample = nn.Sequential( OrderedDict( [ ("-1", nn.AvgPool2d(stride)), ("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)), ("1", nn.BatchNorm2d(planes * self.expansion)), ] ) ) def forward(self, x: torch.Tensor): identity = x out = self.relu1(self.bn1(self.conv1(x))) out = self.relu2(self.bn2(self.conv2(out))) out = self.avgpool(out) out = self.bn3(self.conv3(out)) if self.downsample is not None: identity = self.downsample(x) out += identity out = self.relu3(out) return out class AttentionPool2d(nn.Module): def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None): super().__init__() self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5) self.k_proj = nn.Linear(embed_dim, embed_dim) self.q_proj = nn.Linear(embed_dim, embed_dim) self.v_proj = nn.Linear(embed_dim, embed_dim) self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim) self.num_heads = num_heads def forward(self, x): x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC x, _ = F.multi_head_attention_forward( query=x, key=x, value=x, embed_dim_to_check=x.shape[-1], num_heads=self.num_heads, q_proj_weight=self.q_proj.weight, k_proj_weight=self.k_proj.weight, v_proj_weight=self.v_proj.weight, in_proj_weight=None, in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]), bias_k=None, bias_v=None, add_zero_attn=False, dropout_p=0, out_proj_weight=self.c_proj.weight, out_proj_bias=self.c_proj.bias, use_separate_proj_weight=True, training=self.training, need_weights=False, ) return x[0] class LayerNorm(nn.LayerNorm): """Subclass torch's LayerNorm to handle fp16.""" def forward(self, x: torch.Tensor): orig_type = x.dtype ret = super().forward(x.type(torch.float32)) return ret.type(orig_type) class QuickGELU(nn.Module): def forward(self, x: torch.Tensor): return x * torch.sigmoid(1.702 * x) class ResidualAttentionBlock(nn.Module): def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None, use_grad_checkpointing=False): super().__init__() self.attn = nn.MultiheadAttention(d_model, n_head) self.ln_1 = LayerNorm(d_model) self.mlp = nn.Sequential( OrderedDict( [ ("c_fc", nn.Linear(d_model, d_model * 4)), ("gelu", QuickGELU()), ("c_proj", nn.Linear(d_model * 4, d_model)), ] ) ) self.ln_2 = LayerNorm(d_model) self.attn_mask = attn_mask if use_grad_checkpointing: self.attn = checkpoint_wrapper(self.attn) self.mlp = checkpoint_wrapper(self.mlp) def attention(self, x: torch.Tensor): self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0] def forward(self, x: torch.Tensor): x = x + self.attention(self.ln_1(x)) x = x + self.mlp(self.ln_2(x)) return x class Transformer(nn.Module): def __init__( self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None, use_grad_checkpointing=False ): super().__init__() self.width = width self.layers = layers self.resblocks = nn.Sequential( *[ResidualAttentionBlock(width, heads, attn_mask, use_grad_checkpointing and i > 12) for i in range(layers)] ) def forward(self, x: torch.Tensor): return self.resblocks(x) class VisionTransformer(nn.Module): def __init__( self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, use_grad_checkpointing: bool ): super().__init__() self.input_resolution = input_resolution self.num_features = width self.num_heads = heads self.num_patches = (input_resolution // patch_size) ** 2 self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False) scale = width ** -0.5 self.class_embedding = nn.Parameter(scale * torch.randn(width)) self.positional_embedding = nn.Parameter(scale * torch.randn(self.num_patches + 1, width)) self.ln_pre = LayerNorm(width) self.transformer = Transformer(width, layers - 1, heads, use_grad_checkpointing=use_grad_checkpointing) # self.ln_final = LayerNorm(width) def forward(self, x: torch.Tensor): x = self.conv1(x) # shape = [*, width, grid, grid] x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2] x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width] x = torch.cat( [ self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x, ], dim=1, ) # shape = [*, grid ** 2 + 1, width] x = x + self.positional_embedding.to(x.dtype) x = self.ln_pre(x) x = x.permute(1, 0, 2) # NLD -> LND x = self.transformer(x) x = x.permute(1, 0, 2) # LND -> NLD # x = self.ln_final(x) return x # From PyTorch internals def _ntuple(n): def parse(x): if isinstance(x, collections.abc.Iterable): return x return tuple(repeat(x, n)) return parse to_2tuple = _ntuple(2) def interpolate_pos_embed(model, state_dict, interpolation: str = "bicubic", seq_dim=1): # Rescale the grid of position embeddings when loading from state_dict old_pos_embed = state_dict.get("positional_embedding", None) grid_size = round((model.positional_embedding.shape[0] - 1) ** 0.5) if old_pos_embed is None: return grid_size = to_2tuple(grid_size) extra_tokens = 1 # FIXME detect different token configs (ie no class token, or more) new_seq_len = grid_size[0] * grid_size[1] + extra_tokens if new_seq_len == old_pos_embed.shape[0]: return if extra_tokens: pos_emb_tok, pos_emb_img = old_pos_embed[:extra_tokens], old_pos_embed[extra_tokens:] else: pos_emb_tok, pos_emb_img = None, old_pos_embed old_grid_size = to_2tuple(int(math.sqrt(len(pos_emb_img)))) print("Resizing position embedding grid-size from %s to %s", old_grid_size, grid_size) pos_emb_img = pos_emb_img.reshape(1, old_grid_size[0], old_grid_size[1], -1).permute(0, 3, 1, 2) pos_emb_img = F.interpolate( pos_emb_img, size=grid_size, mode=interpolation, align_corners=True, ) pos_emb_img = pos_emb_img.permute(0, 2, 3, 1).reshape(1, grid_size[0] * grid_size[1], -1)[0] if pos_emb_tok is not None: new_pos_embed = torch.cat([pos_emb_tok, pos_emb_img], dim=0) else: new_pos_embed = pos_emb_img state_dict["positional_embedding"] = new_pos_embed def create_clip_vit_L(img_size=224, use_checkpoint=False, precision="fp16"): model = VisionTransformer( input_resolution=img_size, patch_size=14, width=1024, layers=22, heads=16, use_grad_checkpointing=use_checkpoint, ) url = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/clip_vit_L.pth" cached_file = download_cached_file(url, check_hash=False, progress=True) state_dict = torch.load(cached_file, map_location="cpu") interpolate_pos_embed(model, state_dict) incompatible_keys = model.load_state_dict(state_dict, strict=False) # print(incompatible_keys) if precision == "fp16": convert_weights_to_fp16(model) return model
3D-LLM-main
3DLLM_BLIP2-base/lavis/models/clip_vit.py
# Based on EVA, BEIT, timm and DeiT code bases # https://github.com/baaivision/EVA # https://github.com/rwightman/pytorch-image-models/tree/master/timm # https://github.com/microsoft/unilm/tree/master/beit # https://github.com/facebookresearch/deit/ # https://github.com/facebookresearch/dino # --------------------------------------------------------' import math from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint as checkpoint from timm.models.layers import drop_path, to_2tuple, trunc_normal_ from timm.models.registry import register_model from lavis.common.dist_utils import download_cached_file def _cfg(url="", **kwargs): return { "url": url, "num_classes": 1000, "input_size": (3, 224, 224), "pool_size": None, "crop_pct": 0.9, "interpolation": "bicubic", "mean": (0.5, 0.5, 0.5), "std": (0.5, 0.5, 0.5), **kwargs, } class DropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob=None): super(DropPath, self).__init__() self.drop_prob = drop_prob def forward(self, x): return drop_path(x, self.drop_prob, self.training) def extra_repr(self) -> str: return "p={}".format(self.drop_prob) class Mlp(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.act(x) # x = self.drop(x) # commit this for the orignal BERT implement x = self.fc2(x) x = self.drop(x) return x class Attention(nn.Module): def __init__( self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0, window_size=None, attn_head_dim=None, ): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads if attn_head_dim is not None: head_dim = attn_head_dim all_head_dim = head_dim * self.num_heads self.scale = qk_scale or head_dim ** -0.5 self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False) if qkv_bias: self.q_bias = nn.Parameter(torch.zeros(all_head_dim)) self.v_bias = nn.Parameter(torch.zeros(all_head_dim)) else: self.q_bias = None self.v_bias = None if window_size: self.window_size = window_size self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 self.relative_position_bias_table = nn.Parameter( torch.zeros(self.num_relative_distance, num_heads) ) # 2*Wh-1 * 2*Ww-1, nH # cls to token & token 2 cls & cls to cls # get pair-wise relative position index for each token inside the window coords_h = torch.arange(window_size[0]) coords_w = torch.arange(window_size[1]) coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0 relative_coords[:, :, 1] += window_size[1] - 1 relative_coords[:, :, 0] *= 2 * window_size[1] - 1 relative_position_index = torch.zeros( size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype ) relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww relative_position_index[0, 0:] = self.num_relative_distance - 3 relative_position_index[0:, 0] = self.num_relative_distance - 2 relative_position_index[0, 0] = self.num_relative_distance - 1 self.register_buffer("relative_position_index", relative_position_index) else: self.window_size = None self.relative_position_bias_table = None self.relative_position_index = None self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(all_head_dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x, rel_pos_bias=None): B, N, C = x.shape qkv_bias = None if self.q_bias is not None: qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias)) # qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias) qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) q = q * self.scale attn = q @ k.transpose(-2, -1) if self.relative_position_bias_table is not None: relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view( self.window_size[0] * self.window_size[1] + 1, self.window_size[0] * self.window_size[1] + 1, -1 ) # Wh*Ww,Wh*Ww,nH relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww attn = attn + relative_position_bias.unsqueeze(0) if rel_pos_bias is not None: attn = attn + rel_pos_bias attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, -1) x = self.proj(x) x = self.proj_drop(x) return x class Block(nn.Module): def __init__( self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, init_values=None, act_layer=nn.GELU, norm_layer=nn.LayerNorm, window_size=None, attn_head_dim=None, ): super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention( dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, window_size=window_size, attn_head_dim=attn_head_dim, ) # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) if init_values is not None and init_values > 0: self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True) self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True) else: self.gamma_1, self.gamma_2 = None, None def forward(self, x, rel_pos_bias=None): if self.gamma_1 is None: x = x + self.drop_path(self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias)) x = x + self.drop_path(self.mlp(self.norm2(x))) else: x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias)) x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x))) return x class PatchEmbed(nn.Module): """Image to Patch Embedding""" def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): super().__init__() img_size = to_2tuple(img_size) patch_size = to_2tuple(patch_size) num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) self.img_size = img_size self.patch_size = patch_size self.num_patches = num_patches self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) def forward(self, x, **kwargs): B, C, H, W = x.shape # FIXME look at relaxing size constraints assert ( H == self.img_size[0] and W == self.img_size[1] ), f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." x = self.proj(x).flatten(2).transpose(1, 2) return x class RelativePositionBias(nn.Module): def __init__(self, window_size, num_heads): super().__init__() self.window_size = window_size self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 self.relative_position_bias_table = nn.Parameter( torch.zeros(self.num_relative_distance, num_heads) ) # 2*Wh-1 * 2*Ww-1, nH # cls to token & token 2 cls & cls to cls # get pair-wise relative position index for each token inside the window coords_h = torch.arange(window_size[0]) coords_w = torch.arange(window_size[1]) coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0 relative_coords[:, :, 1] += window_size[1] - 1 relative_coords[:, :, 0] *= 2 * window_size[1] - 1 relative_position_index = torch.zeros( size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype ) relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww relative_position_index[0, 0:] = self.num_relative_distance - 3 relative_position_index[0:, 0] = self.num_relative_distance - 2 relative_position_index[0, 0] = self.num_relative_distance - 1 self.register_buffer("relative_position_index", relative_position_index) # trunc_normal_(self.relative_position_bias_table, std=.02) def forward(self): relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view( self.window_size[0] * self.window_size[1] + 1, self.window_size[0] * self.window_size[1] + 1, -1 ) # Wh*Ww,Wh*Ww,nH return relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww class VisionTransformer(nn.Module): """Vision Transformer with support for patch or hybrid CNN input stage""" def __init__( self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4.0, qkv_bias=False, qk_scale=None, drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.0, norm_layer=nn.LayerNorm, init_values=None, use_abs_pos_emb=True, use_rel_pos_bias=False, use_shared_rel_pos_bias=False, use_mean_pooling=True, init_scale=0.001, use_checkpoint=False, ): super().__init__() self.image_size = img_size self.num_classes = num_classes self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models self.patch_embed = PatchEmbed(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) num_patches = self.patch_embed.num_patches self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if use_abs_pos_emb: self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) else: self.pos_embed = None self.pos_drop = nn.Dropout(p=drop_rate) if use_shared_rel_pos_bias: self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.patch_shape, num_heads=num_heads) else: self.rel_pos_bias = None self.use_checkpoint = use_checkpoint dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule self.use_rel_pos_bias = use_rel_pos_bias self.blocks = nn.ModuleList( [ Block( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, init_values=init_values, window_size=self.patch_embed.patch_shape if use_rel_pos_bias else None, ) for i in range(depth) ] ) # self.norm = nn.Identity() if use_mean_pooling else norm_layer(embed_dim) # self.fc_norm = norm_layer(embed_dim) if use_mean_pooling else None # self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() if self.pos_embed is not None: trunc_normal_(self.pos_embed, std=0.02) trunc_normal_(self.cls_token, std=0.02) # trunc_normal_(self.mask_token, std=.02) # if isinstance(self.head, nn.Linear): # trunc_normal_(self.head.weight, std=.02) self.apply(self._init_weights) self.fix_init_weight() # if isinstance(self.head, nn.Linear): # self.head.weight.data.mul_(init_scale) # self.head.bias.data.mul_(init_scale) def fix_init_weight(self): def rescale(param, layer_id): param.div_(math.sqrt(2.0 * layer_id)) for layer_id, layer in enumerate(self.blocks): rescale(layer.attn.proj.weight.data, layer_id + 1) rescale(layer.mlp.fc2.weight.data, layer_id + 1) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=0.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) def get_classifier(self): return self.head def reset_classifier(self, num_classes, global_pool=""): self.num_classes = num_classes self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() def forward_features(self, x): x = self.patch_embed(x) batch_size, seq_len, _ = x.size() cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks x = torch.cat((cls_tokens, x), dim=1) if self.pos_embed is not None: x = x + self.pos_embed x = self.pos_drop(x) rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None for blk in self.blocks: if self.use_checkpoint: x = checkpoint.checkpoint(blk, x, rel_pos_bias) else: x = blk(x, rel_pos_bias) return x # x = self.norm(x) # if self.fc_norm is not None: # t = x[:, 1:, :] # return self.fc_norm(t.mean(1)) # else: # return x[:, 0] def forward(self, x): x = self.forward_features(x) # x = self.head(x) return x def get_intermediate_layers(self, x): x = self.patch_embed(x) batch_size, seq_len, _ = x.size() cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks x = torch.cat((cls_tokens, x), dim=1) if self.pos_embed is not None: x = x + self.pos_embed x = self.pos_drop(x) features = [] rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None for blk in self.blocks: x = blk(x, rel_pos_bias) features.append(x) return features def interpolate_pos_embed(model, checkpoint_model): if "pos_embed" in checkpoint_model: pos_embed_checkpoint = checkpoint_model["pos_embed"].float() embedding_size = pos_embed_checkpoint.shape[-1] num_patches = model.patch_embed.num_patches num_extra_tokens = model.pos_embed.shape[-2] - num_patches # height (== width) for the checkpoint position embedding orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5) # height (== width) for the new position embedding new_size = int(num_patches ** 0.5) # class_token and dist_token are kept unchanged if orig_size != new_size: print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size)) extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] # only the position tokens are interpolated pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2) pos_tokens = torch.nn.functional.interpolate( pos_tokens, size=(new_size, new_size), mode="bicubic", align_corners=False ) pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2) new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) checkpoint_model["pos_embed"] = new_pos_embed def convert_weights_to_fp16(model: nn.Module): """Convert applicable model parameters to fp16""" def _convert_weights_to_fp16(l): if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)): l.weight.data = l.weight.data.half() if l.bias is not None: l.bias.data = l.bias.data.half() # if isinstance(l, (nn.MultiheadAttention, Attention)): # for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]: # tensor = getattr(l, attr) # if tensor is not None: # tensor.data = tensor.data.half() model.apply(_convert_weights_to_fp16) def create_eva_vit_g(img_size=224, drop_path_rate=0.4, use_checkpoint=False, precision="fp16"): model = VisionTransformer( img_size=img_size, patch_size=14, use_mean_pooling=False, embed_dim=1408, depth=39, num_heads=1408 // 88, mlp_ratio=4.3637, qkv_bias=True, drop_path_rate=drop_path_rate, norm_layer=partial(nn.LayerNorm, eps=1e-6), use_checkpoint=use_checkpoint, ) url = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/eva_vit_g.pth" cached_file = download_cached_file(url, check_hash=False, progress=True) state_dict = torch.load(cached_file, map_location="cpu") interpolate_pos_embed(model, state_dict) incompatible_keys = model.load_state_dict(state_dict, strict=False) # print(incompatible_keys) if precision == "fp16": # model.to("cuda") convert_weights_to_fp16(model) return model
3D-LLM-main
3DLLM_BLIP2-base/lavis/models/eva_vit.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import logging import torch from omegaconf import OmegaConf from lavis.common.registry import registry from lavis.models.base_model import BaseModel from lavis.models.blip2_models.blip2 import Blip2Base from lavis.models.blip2_models.blip2_opt import Blip2OPT from lavis.models.blip2_models.blip2_t5 import Blip2T5 from lavis.models.blip2_models.blip2_qformer import Blip2Qformer from lavis.models.blip2_models.blip2_image_text_matching import Blip2ITM from lavis.models.med import XBertLMHeadDecoder from lavis.models.vit import VisionTransformerEncoder from lavis.processors.base_processor import BaseProcessor __all__ = [ "load_model", "Blip2Qformer", "Blip2Base", "Blip2ITM", "Blip2OPT", "Blip2T5", ] def load_model(name, model_type, is_eval=False, device="cpu", checkpoint=None): """ Load supported models. To list all available models and types in registry: >>> from lavis.models import model_zoo >>> print(model_zoo) Args: name (str): name of the model. model_type (str): type of the model. is_eval (bool): whether the model is in eval mode. Default: False. device (str): device to use. Default: "cpu". checkpoint (str): path or to checkpoint. Default: None. Note that expecting the checkpoint to have the same keys in state_dict as the model. Returns: model (torch.nn.Module): model. """ model = registry.get_model_class(name).from_pretrained(model_type=model_type) if checkpoint is not None: model.load_checkpoint(checkpoint) if is_eval: model.eval() if device == "cpu": model = model.float() return model.to(device) def load_preprocess(config): """ Load preprocessor configs and construct preprocessors. If no preprocessor is specified, return BaseProcessor, which does not do any preprocessing. Args: config (dict): preprocessor configs. Returns: vis_processors (dict): preprocessors for visual inputs. txt_processors (dict): preprocessors for text inputs. Key is "train" or "eval" for processors used in training and evaluation respectively. """ def _build_proc_from_cfg(cfg): return registry.get_processor_class(cfg.name).from_config(cfg) if cfg is not None else BaseProcessor() vis_processors = dict() txt_processors = dict() vis_proc_cfg = config.get("vis_processor") txt_proc_cfg = config.get("text_processor") if vis_proc_cfg is not None: vis_train_cfg = vis_proc_cfg.get("train") vis_eval_cfg = vis_proc_cfg.get("eval") else: vis_train_cfg = None vis_eval_cfg = None vis_processors["train"] = _build_proc_from_cfg(vis_train_cfg) vis_processors["eval"] = _build_proc_from_cfg(vis_eval_cfg) if txt_proc_cfg is not None: txt_train_cfg = txt_proc_cfg.get("train") txt_eval_cfg = txt_proc_cfg.get("eval") else: txt_train_cfg = None txt_eval_cfg = None txt_processors["train"] = _build_proc_from_cfg(txt_train_cfg) txt_processors["eval"] = _build_proc_from_cfg(txt_eval_cfg) return vis_processors, txt_processors def load_model_and_preprocess(name, model_type, is_eval=False, device="cpu"): """ Load model and its related preprocessors. List all available models and types in registry: >>> from lavis.models import model_zoo >>> print(model_zoo) Args: name (str): name of the model. model_type (str): type of the model. is_eval (bool): whether the model is in eval mode. Default: False. device (str): device to use. Default: "cpu". Returns: model (torch.nn.Module): model. vis_processors (dict): preprocessors for visual inputs. txt_processors (dict): preprocessors for text inputs. """ model_cls = registry.get_model_class(name) # load model model = model_cls.from_pretrained(model_type=model_type) if is_eval: model.eval() # load preprocess cfg = OmegaConf.load(model_cls.default_config_path(model_type)) if cfg is not None: preprocess_cfg = cfg.preprocess vis_processors, txt_processors = load_preprocess(preprocess_cfg) else: vis_processors, txt_processors = None, None logging.info( f"""No default preprocess for model {name} ({model_type}). This can happen if the model is not finetuned on downstream datasets, or it is not intended for direct use without finetuning. """ ) if device == "cpu" or device == torch.device("cpu"): model = model.float() return model.to(device), vis_processors, txt_processors class ModelZoo: """ A utility class to create string representation of available model architectures and types. >>> from lavis.models import model_zoo >>> # list all available models >>> print(model_zoo) >>> # show total number of models >>> print(len(model_zoo)) """ def __init__(self) -> None: self.model_zoo = { k: list(v.PRETRAINED_MODEL_CONFIG_DICT.keys()) for k, v in registry.mapping["model_name_mapping"].items() } def __str__(self) -> str: return ( "=" * 50 + "\n" + f"{'Architectures':<30} {'Types'}\n" + "=" * 50 + "\n" + "\n".join([f"{name:<30} {', '.join(types)}" for name, types in self.model_zoo.items()]) ) def __iter__(self): return iter(self.model_zoo.items()) def __len__(self): return sum([len(v) for v in self.model_zoo.values()]) model_zoo = ModelZoo()
3D-LLM-main
3DLLM_BLIP2-base/lavis/models/__init__.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import logging import os import numpy as np import torch import torch.nn as nn from lavis.common.dist_utils import download_cached_file, is_dist_avail_and_initialized from lavis.common.utils import get_abs_path, is_url from omegaconf import OmegaConf class BaseModel(nn.Module): """Base class for models.""" def __init__(self): super().__init__() @property def device(self): return list(self.parameters())[0].device def load_checkpoint(self, url_or_filename): """ Load from a finetuned checkpoint. This should expect no mismatch in the model keys and the checkpoint keys. """ if is_url(url_or_filename): cached_file = download_cached_file(url_or_filename, check_hash=False, progress=True) checkpoint = torch.load(cached_file, map_location="cpu") elif os.path.isfile(url_or_filename): checkpoint = torch.load(url_or_filename, map_location="cpu") else: raise RuntimeError("checkpoint url or path is invalid") if "model" in checkpoint.keys(): state_dict = checkpoint["model"] else: state_dict = checkpoint msg = self.load_state_dict(state_dict, strict=False) logging.info("Missing keys {}".format(msg.missing_keys)) logging.info("load checkpoint from %s" % url_or_filename) return msg @classmethod def from_pretrained(cls, model_type): """ Build a pretrained model from default configuration file, specified by model_type. Args: - model_type (str): model type, specifying architecture and checkpoints. Returns: - model (nn.Module): pretrained or finetuned model, depending on the configuration. """ model_cfg = OmegaConf.load(cls.default_config_path(model_type)).model model = cls.from_config(model_cfg) return model @classmethod def default_config_path(cls, model_type): assert model_type in cls.PRETRAINED_MODEL_CONFIG_DICT, "Unknown model type {}".format(model_type) return get_abs_path(cls.PRETRAINED_MODEL_CONFIG_DICT[model_type]) def load_checkpoint_from_config(self, cfg, **kwargs): """ Load checkpoint as specified in the config file. If load_finetuned is True, load the finetuned model; otherwise, load the pretrained model. When loading the pretrained model, each task-specific architecture may define their own load_from_pretrained() method. """ load_finetuned = cfg.get("load_finetuned", True) if load_finetuned: finetune_path = cfg.get("finetuned", None) assert finetune_path is not None, "Found load_finetuned is True, but finetune_path is None." self.load_checkpoint(url_or_filename=finetune_path) else: # load pre-trained weights pretrain_path = cfg.get("pretrained", None) assert "Found load_finetuned is False, but pretrain_path is None." self.load_from_pretrained(url_or_filename=pretrain_path, **kwargs) def before_evaluation(self, **kwargs): pass def show_n_params(self, return_str=True): tot = 0 for p in self.parameters(): w = 1 for x in p.shape: w *= x tot += w if return_str: if tot >= 1e6: return "{:.1f}M".format(tot / 1e6) else: return "{:.1f}K".format(tot / 1e3) else: return tot class BaseEncoder(nn.Module): """ Base class for primitive encoders, such as ViT, TimeSformer, etc. """ def __init__(self): super().__init__() def forward_features(self, samples, **kwargs): raise NotImplementedError @property def device(self): return list(self.parameters())[0].device class SharedQueueMixin: @torch.no_grad() def _dequeue_and_enqueue(self, image_feat, text_feat, idxs=None): # gather keys before updating queue image_feats = concat_all_gather(image_feat) text_feats = concat_all_gather(text_feat) batch_size = image_feats.shape[0] ptr = int(self.queue_ptr) assert self.queue_size % batch_size == 0 # for simplicity # replace the keys at ptr (dequeue and enqueue) self.image_queue[:, ptr : ptr + batch_size] = image_feats.T self.text_queue[:, ptr : ptr + batch_size] = text_feats.T if idxs is not None: idxs = concat_all_gather(idxs) self.idx_queue[:, ptr : ptr + batch_size] = idxs.T ptr = (ptr + batch_size) % self.queue_size # move pointer self.queue_ptr[0] = ptr class MomentumDistilationMixin: @torch.no_grad() def copy_params(self): for model_pair in self.model_pairs: for param, param_m in zip(model_pair[0].parameters(), model_pair[1].parameters()): param_m.data.copy_(param.data) # initialize param_m.requires_grad = False # not update by gradient @torch.no_grad() def _momentum_update(self): for model_pair in self.model_pairs: for param, param_m in zip(model_pair[0].parameters(), model_pair[1].parameters()): param_m.data = param_m.data * self.momentum + param.data * (1.0 - self.momentum) class GatherLayer(torch.autograd.Function): """ Gather tensors from all workers with support for backward propagation: This implementation does not cut the gradients as torch.distributed.all_gather does. """ @staticmethod def forward(ctx, x): output = [torch.zeros_like(x) for _ in range(torch.distributed.get_world_size())] torch.distributed.all_gather(output, x) return tuple(output) @staticmethod def backward(ctx, *grads): all_gradients = torch.stack(grads) torch.distributed.all_reduce(all_gradients) return all_gradients[torch.distributed.get_rank()] def all_gather_with_grad(tensors): """ Performs all_gather operation on the provided tensors. Graph remains connected for backward grad computation. """ # Queue the gathered tensors world_size = torch.distributed.get_world_size() # There is no need for reduction in the single-proc case if world_size == 1: return tensors # tensor_all = GatherLayer.apply(tensors) tensor_all = GatherLayer.apply(tensors) return torch.cat(tensor_all, dim=0) @torch.no_grad() def concat_all_gather(tensor): """ Performs all_gather operation on the provided tensors. *** Warning ***: torch.distributed.all_gather has no gradient. """ # if use distributed training if not is_dist_avail_and_initialized(): return tensor tensors_gather = [torch.ones_like(tensor) for _ in range(torch.distributed.get_world_size())] torch.distributed.all_gather(tensors_gather, tensor, async_op=False) output = torch.cat(tensors_gather, dim=0) return output def tile(x, dim, n_tile): init_dim = x.size(dim) repeat_idx = [1] * x.dim() repeat_idx[dim] = n_tile x = x.repeat(*(repeat_idx)) order_index = torch.LongTensor(np.concatenate([init_dim * np.arange(n_tile) + i for i in range(init_dim)])) return torch.index_select(x, dim, order_index.to(x.device))
3D-LLM-main
3DLLM_BLIP2-base/lavis/models/base_model.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause Based on timm code base https://github.com/rwightman/pytorch-image-models/tree/master/timm """ import math import torch import torch.nn as nn import torch.nn.functional as F from functools import partial from timm.models.vision_transformer import _cfg, PatchEmbed from timm.models.registry import register_model from timm.models.layers import trunc_normal_, DropPath from timm.models.helpers import named_apply, adapt_input_conv from fairscale.nn.checkpoint.checkpoint_activations import checkpoint_wrapper from lavis.models.base_model import BaseEncoder class Mlp(nn.Module): """MLP as used in Vision Transformer, MLP-Mixer and related networks""" def __init__( self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0, ): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x class Attention(nn.Module): def __init__( self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0, ): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights self.scale = qk_scale or head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) self.attn_gradients = None self.attention_map = None def save_attn_gradients(self, attn_gradients): self.attn_gradients = attn_gradients def get_attn_gradients(self): return self.attn_gradients def save_attention_map(self, attention_map): self.attention_map = attention_map def get_attention_map(self): return self.attention_map def forward(self, x, register_hook=False): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = ( qkv[0], qkv[1], qkv[2], ) # make torchscript happy (cannot use tensor as tuple) attn = (q @ k.transpose(-2, -1)) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) if register_hook: self.save_attention_map(attn) attn.register_hook(self.save_attn_gradients) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class Block(nn.Module): def __init__( self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn.GELU, norm_layer=nn.LayerNorm, use_grad_checkpointing=False, ): super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention( dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, ) # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp( in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop, ) if use_grad_checkpointing: self.attn = checkpoint_wrapper(self.attn) self.mlp = checkpoint_wrapper(self.mlp) def forward(self, x, register_hook=False): x = x + self.drop_path(self.attn(self.norm1(x), register_hook=register_hook)) x = x + self.drop_path(self.mlp(self.norm2(x))) return x class VisionTransformer(nn.Module): """Vision Transformer A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` - https://arxiv.org/abs/2010.11929 """ def __init__( self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4.0, qkv_bias=True, qk_scale=None, representation_size=None, drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.0, norm_layer=None, use_grad_checkpointing=False, ckpt_layer=0, ): """ Args: img_size (int, tuple): input image size patch_size (int, tuple): patch size in_chans (int): number of input channels num_classes (int): number of classes for classification head embed_dim (int): embedding dimension depth (int): depth of transformer num_heads (int): number of attention heads mlp_ratio (int): ratio of mlp hidden dim to embedding dim qkv_bias (bool): enable bias for qkv if True qk_scale (float): override default qk scale of head_dim ** -0.5 if set representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set drop_rate (float): dropout rate attn_drop_rate (float): attention dropout rate drop_path_rate (float): stochastic depth rate norm_layer: (nn.Module): normalization layer """ super().__init__() self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) self.patch_embed = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, ) num_patches = self.patch_embed.num_patches self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) self.pos_drop = nn.Dropout(p=drop_rate) dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule self.blocks = nn.ModuleList( [ Block( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, use_grad_checkpointing=(use_grad_checkpointing and i >= depth - ckpt_layer), ) for i in range(depth) ] ) self.norm = norm_layer(embed_dim) trunc_normal_(self.pos_embed, std=0.02) trunc_normal_(self.cls_token, std=0.02) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=0.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) @torch.jit.ignore def no_weight_decay(self): return {"pos_embed", "cls_token"} def forward(self, x, register_blk=-1): B = x.shape[0] x = self.patch_embed(x) cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks x = torch.cat((cls_tokens, x), dim=1) x = x + self.pos_embed[:, : x.size(1), :] x = self.pos_drop(x) for i, blk in enumerate(self.blocks): x = blk(x, register_blk == i) x = self.norm(x) return x @torch.jit.ignore() def load_pretrained(self, checkpoint_path, prefix=""): _load_weights(self, checkpoint_path, prefix) @torch.no_grad() def _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = ""): """Load weights from .npz checkpoints for official Google Brain Flax implementation""" import numpy as np def _n2p(w, t=True): if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1: w = w.flatten() if t: if w.ndim == 4: w = w.transpose([3, 2, 0, 1]) elif w.ndim == 3: w = w.transpose([2, 0, 1]) elif w.ndim == 2: w = w.transpose([1, 0]) return torch.from_numpy(w) w = np.load(checkpoint_path) if not prefix and "opt/target/embedding/kernel" in w: prefix = "opt/target/" if hasattr(model.patch_embed, "backbone"): # hybrid backbone = model.patch_embed.backbone stem_only = not hasattr(backbone, "stem") stem = backbone if stem_only else backbone.stem stem.conv.weight.copy_(adapt_input_conv(stem.conv.weight.shape[1], _n2p(w[f"{prefix}conv_root/kernel"]))) stem.norm.weight.copy_(_n2p(w[f"{prefix}gn_root/scale"])) stem.norm.bias.copy_(_n2p(w[f"{prefix}gn_root/bias"])) if not stem_only: for i, stage in enumerate(backbone.stages): for j, block in enumerate(stage.blocks): bp = f"{prefix}block{i + 1}/unit{j + 1}/" for r in range(3): getattr(block, f"conv{r + 1}").weight.copy_(_n2p(w[f"{bp}conv{r + 1}/kernel"])) getattr(block, f"norm{r + 1}").weight.copy_(_n2p(w[f"{bp}gn{r + 1}/scale"])) getattr(block, f"norm{r + 1}").bias.copy_(_n2p(w[f"{bp}gn{r + 1}/bias"])) if block.downsample is not None: block.downsample.conv.weight.copy_(_n2p(w[f"{bp}conv_proj/kernel"])) block.downsample.norm.weight.copy_(_n2p(w[f"{bp}gn_proj/scale"])) block.downsample.norm.bias.copy_(_n2p(w[f"{bp}gn_proj/bias"])) embed_conv_w = _n2p(w[f"{prefix}embedding/kernel"]) else: embed_conv_w = adapt_input_conv(model.patch_embed.proj.weight.shape[1], _n2p(w[f"{prefix}embedding/kernel"])) model.patch_embed.proj.weight.copy_(embed_conv_w) model.patch_embed.proj.bias.copy_(_n2p(w[f"{prefix}embedding/bias"])) model.cls_token.copy_(_n2p(w[f"{prefix}cls"], t=False)) pos_embed_w = _n2p(w[f"{prefix}Transformer/posembed_input/pos_embedding"], t=False) if pos_embed_w.shape != model.pos_embed.shape: pos_embed_w = resize_pos_embed( # resize pos embedding when different size from pretrained weights pos_embed_w, model.pos_embed, getattr(model, "num_tokens", 1), model.patch_embed.grid_size, ) model.pos_embed.copy_(pos_embed_w) model.norm.weight.copy_(_n2p(w[f"{prefix}Transformer/encoder_norm/scale"])) model.norm.bias.copy_(_n2p(w[f"{prefix}Transformer/encoder_norm/bias"])) # if isinstance(model.head, nn.Linear) and model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]: # model.head.weight.copy_(_n2p(w[f'{prefix}head/kernel'])) # model.head.bias.copy_(_n2p(w[f'{prefix}head/bias'])) # if isinstance(getattr(model.pre_logits, 'fc', None), nn.Linear) and f'{prefix}pre_logits/bias' in w: # model.pre_logits.fc.weight.copy_(_n2p(w[f'{prefix}pre_logits/kernel'])) # model.pre_logits.fc.bias.copy_(_n2p(w[f'{prefix}pre_logits/bias'])) for i, block in enumerate(model.blocks.children()): block_prefix = f"{prefix}Transformer/encoderblock_{i}/" mha_prefix = block_prefix + "MultiHeadDotProductAttention_1/" block.norm1.weight.copy_(_n2p(w[f"{block_prefix}LayerNorm_0/scale"])) block.norm1.bias.copy_(_n2p(w[f"{block_prefix}LayerNorm_0/bias"])) block.attn.qkv.weight.copy_( torch.cat([_n2p(w[f"{mha_prefix}{n}/kernel"], t=False).flatten(1).T for n in ("query", "key", "value")]) ) block.attn.qkv.bias.copy_( torch.cat([_n2p(w[f"{mha_prefix}{n}/bias"], t=False).reshape(-1) for n in ("query", "key", "value")]) ) block.attn.proj.weight.copy_(_n2p(w[f"{mha_prefix}out/kernel"]).flatten(1)) block.attn.proj.bias.copy_(_n2p(w[f"{mha_prefix}out/bias"])) for r in range(2): getattr(block.mlp, f"fc{r + 1}").weight.copy_(_n2p(w[f"{block_prefix}MlpBlock_3/Dense_{r}/kernel"])) getattr(block.mlp, f"fc{r + 1}").bias.copy_(_n2p(w[f"{block_prefix}MlpBlock_3/Dense_{r}/bias"])) block.norm2.weight.copy_(_n2p(w[f"{block_prefix}LayerNorm_2/scale"])) block.norm2.bias.copy_(_n2p(w[f"{block_prefix}LayerNorm_2/bias"])) def resize_pos_embed(posemb, posemb_new, num_tokens=1, gs_new=()): # Rescale the grid of position embeddings when loading from state_dict. Adapted from # https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224 print("Resized position embedding: %s to %s", posemb.shape, posemb_new.shape) ntok_new = posemb_new.shape[1] if num_tokens: posemb_tok, posemb_grid = posemb[:, :num_tokens], posemb[0, num_tokens:] ntok_new -= num_tokens else: posemb_tok, posemb_grid = posemb[:, :0], posemb[0] gs_old = int(math.sqrt(len(posemb_grid))) if not len(gs_new): # backwards compatibility gs_new = [int(math.sqrt(ntok_new))] * 2 assert len(gs_new) >= 2 print("Position embedding grid-size from %s to %s", [gs_old, gs_old], gs_new) posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2) posemb_grid = F.interpolate(posemb_grid, size=gs_new, mode="bicubic", align_corners=False) posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new[0] * gs_new[1], -1) posemb = torch.cat([posemb_tok, posemb_grid], dim=1) return def interpolate_pos_embed(pos_embed_checkpoint, visual_encoder): # interpolate position embedding embedding_size = pos_embed_checkpoint.shape[-1] num_patches = visual_encoder.patch_embed.num_patches num_extra_tokens = visual_encoder.pos_embed.shape[-2] - num_patches # height (== width) for the checkpoint position embedding orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5) # height (== width) for the new position embedding new_size = int(num_patches ** 0.5) if orig_size != new_size: # class_token and dist_token are kept unchanged extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] # only the position tokens are interpolated pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2) pos_tokens = torch.nn.functional.interpolate( pos_tokens, size=(new_size, new_size), mode="bicubic", align_corners=False ) pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2) new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) print("reshape position embedding from %d to %d" % (orig_size ** 2, new_size ** 2)) return new_pos_embed else: return pos_embed_checkpoint class VisionTransformerEncoder(VisionTransformer, BaseEncoder): @classmethod def from_config(cls, cfg, from_pretrained=False): vit_type = cfg.get("vit_type", "base") image_size = cfg.get("image_size", 384) ckpt_layer = cfg.get("vit_ckpt_layer", 0) drop_path_rate = cfg.get("vit_drop_path_rate", 0) norm_layer_eps = cfg.get("vit_layer_norm_epsilon", -1) use_grad_checkpointing = cfg.get("vit_grad_ckpt", False) if norm_layer_eps == -1: norm_layer = None else: norm_layer = partial(nn.LayerNorm, eps=norm_layer_eps) # norm_layer=partial(nn.LayerNorm, eps=1e-6), assert vit_type in ["base", "large"], "vit parameter must be base or large" if vit_type == "base": vision_width = 768 visual_encoder = cls( img_size=image_size, patch_size=16, embed_dim=vision_width, depth=12, num_heads=12, use_grad_checkpointing=use_grad_checkpointing, ckpt_layer=ckpt_layer, drop_path_rate=0 or drop_path_rate, norm_layer=norm_layer, ) if from_pretrained: checkpoint = torch.hub.load_state_dict_from_url( url="https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth", map_location="cpu", check_hash=True, ) state_dict = checkpoint["model"] state_dict["pos_embed"] = interpolate_pos_embed(state_dict["pos_embed"], visual_encoder) msg = visual_encoder.load_state_dict(state_dict, strict=False) elif vit_type == "large": vision_width = 1024 visual_encoder = cls( img_size=image_size, patch_size=16, embed_dim=vision_width, depth=24, num_heads=16, use_grad_checkpointing=use_grad_checkpointing, ckpt_layer=ckpt_layer, drop_path_rate=0.1 or drop_path_rate, norm_layer=norm_layer, ) if from_pretrained: from timm.models.helpers import load_custom_pretrained from timm.models.vision_transformer import default_cfgs load_custom_pretrained(visual_encoder, default_cfgs["vit_large_patch16_224_in21k"]) visual_encoder.vision_width = vision_width return visual_encoder def forward_features(self, x, register_blk=-1): return super().forward(x, register_blk)
3D-LLM-main
3DLLM_BLIP2-base/lavis/models/vit.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import torch import torch.nn.functional as F from lavis.common.registry import registry from lavis.models.blip2_models.blip2_qformer import Blip2Qformer @registry.register_model("blip2_image_text_matching") class Blip2ITM(Blip2Qformer): """ BLIP Image-Text Matching (ITM) model. Supported model types: - pretrained: pretrained model - coco: fintuned model on coco Usage: >>> from lavis.models import load_model >>> model = load_model("blip2_image_text_matching", "pretrained") >>> model = load_model("blip2_image_text_matching", "coco") """ def __init__( self, vit_model="eva_clip_g", img_size=224, drop_path_rate=0, use_grad_checkpoint=False, vit_precision="fp16", freeze_vit=True, num_query_token=32, cross_attention_freq=2, embed_dim=256, max_txt_len=32, ): super().__init__( vit_model=vit_model, img_size=img_size, drop_path_rate=drop_path_rate, use_grad_checkpoint=use_grad_checkpoint, vit_precision=vit_precision, freeze_vit=freeze_vit, num_query_token=num_query_token, cross_attention_freq=cross_attention_freq, embed_dim=embed_dim, max_txt_len=max_txt_len, ) def forward(self, samples, match_head="itm"): image = samples["image"] caption = samples["text_input"] with self.maybe_autocast(): image_embeds = self.ln_vision(self.visual_encoder(image)) image_embeds = image_embeds.float() image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(image.device) text = self.tokenizer( caption, truncation=True, max_length=self.max_txt_len, return_tensors="pt", ).to(image.device) if match_head == "itm": query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) query_atts = torch.ones(query_tokens.size()[:-1], dtype=torch.long).to(image.device) attention_mask = torch.cat([query_atts, text.attention_mask], dim=1) output_itm = self.Qformer.bert( text.input_ids, query_embeds=query_tokens, attention_mask=attention_mask, encoder_hidden_states=image_embeds, encoder_attention_mask=image_atts, return_dict=True, ) itm_embeddings = output_itm.last_hidden_state[:, : query_tokens.size(1), :] itm_logit = self.itm_head(itm_embeddings) itm_logit = itm_logit.mean(dim=1) return itm_logit elif match_head == "itc": query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) query_output = self.Qformer.bert( query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_atts, return_dict=True, ) image_feats = F.normalize(self.vision_proj(query_output.last_hidden_state), dim=-1) text_output = self.Qformer.bert( text.input_ids, attention_mask=text.attention_mask, return_dict=True, ) text_feat = F.normalize(self.text_proj(text_output.last_hidden_state[:, 0, :]), dim=-1) sims = torch.bmm(image_feats, text_feat.unsqueeze(-1)) sim, _ = torch.max(sims, dim=1) return sim
3D-LLM-main
3DLLM_BLIP2-base/lavis/models/blip2_models/blip2_image_text_matching.py
import logging import torch import torch.nn as nn from torch.cuda.amp import autocast as autocast from transformers import T5TokenizerFast from lavis.common.registry import registry from lavis.models.blip2_models.blip2 import Blip2Base, disabled_train from lavis.models.blip2_models.modeling_t5 import T5Config, T5ForConditionalGeneration from positional_encodings.torch_encodings import PositionalEncoding1D @registry.register_model("blip2_t5") class Blip2T5(Blip2Base): """ BLIP2 T5 model. Supported model types: - pretrain_flant5xl: pretrained model with FlanT5-XL - pretrain_flant5xxl: pretrained model with FlanT5-XXL - caption_coco_flant5xl: fintuned image captioning model with FlanT5-XL Usage: >>> from lavis.models import load_model >>> model = load_model("blip2_t5", "pretrain_flant5xl") """ PRETRAINED_MODEL_CONFIG_DICT = { "pretrain_flant5xl": "configs/models/blip2/blip2_pretrain_flant5xl.yaml", "pretrain_flant5xxl": "configs/models/blip2/blip2_pretrain_flant5xxl.yaml", "caption_coco_flant5xl": "configs/models/blip2/blip2_caption_flant5xl.yaml", } def __init__( self, vit_model="eva_clip_g", img_size=224, drop_path_rate=0, use_grad_checkpoint=False, vit_precision="fp16", freeze_vit=True, num_query_token=32, t5_model="google/flan-t5-xl", prompt="", max_txt_len=32, apply_lemmatizer=False, ): """ apply_lemmatizer: when set to True, postprocess predict_answers() result with lemmas. """ super().__init__() self.tokenizer = self.init_tokenizer() self.Qformer, self.query_tokens = self.init_Qformer(num_query_token, 1408) self.Qformer.cls = None self.Qformer.bert.embeddings.word_embeddings = None self.Qformer.bert.embeddings.position_embeddings = None for layer in self.Qformer.bert.encoder.layer: layer.output = None layer.intermediate = None self.t5_tokenizer = T5TokenizerFast.from_pretrained(t5_model) location_tokens = [] for i in range(64): # pc -> x,y,z tokens location_tokens.append("<loc%d>" % i) # bbox_min: 500; bbox_max: 550; (xmin, ymin, zmin) (xmax, ymax, zmax) self.t5_tokenizer.add_special_tokens({"additional_special_tokens": location_tokens}) t5_config = T5Config.from_pretrained(t5_model) t5_config.dense_act_fn = "gelu" self.t5_model = T5ForConditionalGeneration.from_pretrained(t5_model, config=t5_config) self.t5_model.resize_token_embeddings(len(self.t5_tokenizer)) for name, param in self.t5_model.named_parameters(): param.requires_grad = False param.data = param.data self.t5_model.get_output_embeddings().requires_grad_(True) self.t5_model.get_input_embeddings().requires_grad_(True) self.t5_proj = nn.Linear(self.Qformer.config.hidden_size, self.t5_model.config.hidden_size) pos_model = PositionalEncoding1D(1408 // 3) x = torch.zeros(1, 256, 1408 // 3) self.pos_embedding = pos_model(x).squeeze().cuda() self.max_txt_len = max_txt_len self.prompt = "" self._apply_lemmatizer = apply_lemmatizer self._lemmatizer = None def forward(self, samples): with torch.cuda.amp.autocast(dtype=torch.float32): pc_embeds = samples["pc_feat"] with torch.cuda.amp.autocast(dtype=torch.float32): pc = samples["pc"].long() all_pcs = torch.zeros((pc_embeds.shape)) for j in range(pc.shape[0]): pcs = [] for i in range(3): pc_i = pc[j][:, i] pcs.append(self.pos_embedding[pc_i]) pcs = torch.cat(pcs, -1) all_pcs[j][:, :1407] = pcs all_pcs = all_pcs.cuda() image_atts = torch.ones(pc_embeds.size()[:-1], dtype=torch.long).to(pc_embeds.device) query_tokens = self.query_tokens.expand(pc_embeds.shape[0], -1, -1) # 768 query_output = self.Qformer.bert( query_embeds=query_tokens, encoder_hidden_states=pc_embeds, encoder_attention_mask=image_atts, return_dict=True, ) inputs_t5 = self.t5_proj(query_output.last_hidden_state) atts_t5 = torch.ones(inputs_t5.size()[:-1], dtype=torch.long).to(pc_embeds.device) if self.prompt: text_input = [self.prompt.format(question) for question in samples["text_input"]] else: text_input = samples["text_input"] with torch.cuda.amp.autocast(dtype=torch.float32): input_tokens = self.t5_tokenizer( text_input, padding="longest", truncation=True, max_length=300, return_tensors="pt", ).to(pc_embeds.device) output_tokens = self.t5_tokenizer( samples["answer"], padding="longest", truncation=True, max_length=300, return_tensors="pt", ).to(pc_embeds.device) batch_input_tokens_input_ids = [] batch_input_tokens_atts = [] batch_atts_t5 = [] batch_inputs_t5 = [] for b, n in enumerate(samples["n_answers"]): batch_input_tokens_input_ids += [input_tokens.input_ids[b]] * n batch_input_tokens_atts += [input_tokens.attention_mask[b]] * n batch_atts_t5 += [atts_t5[b]] * n batch_inputs_t5 += [inputs_t5[b]] * n batch_input_tokens_input_ids = torch.stack(batch_input_tokens_input_ids, dim=0) batch_input_tokens_atts = torch.stack(batch_input_tokens_atts, dim=0) batch_atts_t5 = torch.stack(batch_atts_t5, dim=0) batch_inputs_t5 = torch.stack(batch_inputs_t5, dim=0) encoder_atts = torch.cat([batch_atts_t5, batch_input_tokens_atts], dim=1) targets = output_tokens.input_ids.masked_fill( output_tokens.input_ids == self.t5_tokenizer.pad_token_id, -100 ) inputs_embeds = self.t5_model.encoder.embed_tokens(batch_input_tokens_input_ids) inputs_embeds = torch.cat([batch_inputs_t5, inputs_embeds], dim=1) outputs = self.t5_model( inputs_embeds=inputs_embeds, attention_mask=encoder_atts, decoder_attention_mask=output_tokens.attention_mask, return_dict=True, labels=targets, ) loss = outputs.loss return {"loss": loss} @torch.no_grad() def generate( self, samples, use_nucleus_sampling=False, num_beams=5, max_length=30, min_length=1, top_p=0.9, repetition_penalty=1.0, length_penalty=1.0, num_captions=1, temperature=1, ): """ Args: samples (dict): A dictionary containing the following keys: - image (torch.Tensor): A tensor of shape (batch_size, 3, H, W) use_nucleus_sampling (bool): Whether to use nucleus sampling. If False, use top-k sampling. num_beams (int): Number of beams for beam search. 1 means no beam search. max_length (int): The maximum length of the sequence to be generated. min_length (int): The minimum length of the sequence to be generated. top_p (float): The cumulative probability for nucleus sampling. repetition_penalty (float): The parameter for repetition penalty. 1.0 means no penalty. num_captions (int): Number of captions to be generated for each image. Returns: captions (list): A list of strings of length batch_size * num_captions. """ with torch.cuda.amp.autocast(enabled=(self.device != torch.device("cpu")), dtype=torch.float32): pc_embeds = samples["pc_feat"] image_atts = torch.ones(pc_embeds.size()[:-1], dtype=torch.long).to(pc_embeds.device) query_tokens = self.query_tokens.expand(pc_embeds.shape[0], -1, -1) query_output = self.Qformer.bert( query_embeds=query_tokens, encoder_hidden_states=pc_embeds, encoder_attention_mask=image_atts, return_dict=True, ) inputs_t5 = self.t5_proj(query_output.last_hidden_state) atts_t5 = torch.ones(inputs_t5.size()[:-1], dtype=torch.long).to(pc_embeds.device) if "prompt" in samples.keys(): prompt = samples["prompt"] else: prompt = self.prompt if isinstance(prompt, str): prompt = [prompt] * pc_embeds.size(0) else: assert len(prompt) == pc_embeds.size(0), "The number of prompts must be equal to the batch size." input_tokens = self.t5_tokenizer(prompt, padding="longest", return_tensors="pt").to(pc_embeds.device) encoder_atts = torch.cat([atts_t5, input_tokens.attention_mask], dim=1) device_type = "cuda" if "cuda" in str(self.device) else "cpu" with torch.cuda.amp.autocast(enabled=(self.device != torch.device("cpu")), dtype=torch.float32): inputs_embeds = self.t5_model.encoder.embed_tokens(input_tokens.input_ids) inputs_embeds = torch.cat([inputs_t5, inputs_embeds], dim=1) outputs = self.t5_model.generate( inputs_embeds=inputs_embeds, attention_mask=encoder_atts, do_sample=use_nucleus_sampling, top_p=top_p, temperature=temperature, num_beams=num_beams, max_new_tokens=max_length, min_length=min_length, repetition_penalty=repetition_penalty, length_penalty=length_penalty, num_return_sequences=num_captions, ) output_text = self.t5_tokenizer.batch_decode(outputs, skip_special_tokens=True) return output_text def predict_answers( self, samples, num_beams=5, inference_method="generate", max_len=10, min_len=1, num_ans_candidates=128, answer_list=None, prompt="", length_penalty=-1, **kwargs, ): with torch.cuda.amp.autocast(enabled=(self.device != torch.device("cpu")), dtype=torch.float32): pc_embeds = samples["pc_feat"] with torch.cuda.amp.autocast(dtype=torch.float32): pc = samples["pc"].long() all_pcs = torch.zeros((pc_embeds.shape)) for j in range(pc.shape[0]): pcs = [] for i in range(3): pc_i = pc[j][:, i] pcs.append(self.pos_embedding[pc_i]) pcs = torch.cat(pcs, -1) all_pcs[j][:, :1407] = pcs all_pcs = all_pcs.cuda() image_atts = torch.ones(pc_embeds.size()[:-1], dtype=torch.long).to(pc_embeds.device) query_tokens = self.query_tokens.expand(pc_embeds.shape[0], -1, -1) query_output = self.Qformer.bert( query_embeds=query_tokens, encoder_hidden_states=pc_embeds, encoder_attention_mask=image_atts, return_dict=True, ) inputs_t5 = self.t5_proj(query_output.last_hidden_state) atts_t5 = torch.ones(inputs_t5.size()[:-1], dtype=torch.long).to(pc_embeds.device) if isinstance(samples["text_input"], str): samples["text_input"] = [samples["text_input"]] prompt = self.prompt if prompt: text_input = [prompt.format(question) for question in samples["text_input"]] else: text_input = samples["text_input"] input_tokens = self.t5_tokenizer(text_input, padding="longest", return_tensors="pt").to(pc_embeds.device) encoder_atts = torch.cat([atts_t5, input_tokens.attention_mask], dim=1) num_beams = 1 device_type = "cuda" if "cuda" in str(self.device) else "cpu" with torch.cuda.amp.autocast(enabled=(self.device != torch.device("cpu")), dtype=torch.float32): inputs_embeds = self.t5_model.encoder.embed_tokens(input_tokens.input_ids) inputs_embeds = torch.cat([inputs_t5, inputs_embeds], dim=1) outputs = self.t5_model.generate( inputs_embeds=inputs_embeds, attention_mask=encoder_atts, do_sample=False, num_beams=num_beams, max_new_tokens=max_len, min_length=1, length_penalty=-1, ) output_text = self.t5_tokenizer.batch_decode(outputs, skip_special_tokens=False) if self._apply_lemmatizer: output_text_new = self._lemmatize(output_text) output_text = output_text_new # if output_text_new!=output_text: # print("old: %s, new: %s\n"%(output_text, output_text_new)) # import pdb; pdb.set_trace() return output_text def _lemmatize(self, answers): def apply(answer): doc = self.lemmatizer(answer) words = [] for token in doc: if token.pos_ in ["NOUN", "VERB"]: words.append(token.lemma_) else: words.append(token.text) answer = " ".join(words) return answer return [apply(answer) for answer in answers] @property def lemmatizer(self): if self._lemmatizer is None: try: import spacy self._lemmatizer = spacy.load("en_core_web_sm") except ImportError: logging.error( """ Please install spacy and en_core_web_sm model to apply lemmatization. python -m spacy download en_core_web_sm OR import spacy.cli spacy.cli.download("en_core_web_sm") """ ) exit(1) return self._lemmatizer @classmethod def from_config(cls, cfg): img_size = cfg.get("image_size") num_query_token = cfg.get("num_query_token") t5_model = cfg.get("t5_model") drop_path_rate = cfg.get("drop_path_rate", 0) use_grad_checkpoint = cfg.get("use_grad_checkpoint", False) vit_precision = cfg.get("vit_precision", "fp16") freeze_vit = cfg.get("freeze_vit", True) prompt = cfg.get("prompt", "") max_txt_len = cfg.get("max_txt_len", 32) apply_lemmatizer = cfg.get("apply_lemmatizer", False) model = cls( img_size=img_size, drop_path_rate=drop_path_rate, use_grad_checkpoint=use_grad_checkpoint, vit_precision=vit_precision, freeze_vit=freeze_vit, num_query_token=num_query_token, t5_model=t5_model, prompt=prompt, max_txt_len=max_txt_len, apply_lemmatizer=apply_lemmatizer, ) model.load_checkpoint_from_config(cfg) return model
3D-LLM-main
3DLLM_BLIP2-base/lavis/models/blip2_models/blip2_t5.py
""" Copyright (c) 2023, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import logging import torch import torch.distributed as dist import torch.nn as nn from torch.cuda.amp import autocast as autocast from torch.nn import functional as F from lavis.common.registry import registry from lavis.models.base_model import all_gather_with_grad, concat_all_gather from lavis.models.blip2_models.blip2 import ( Blip2Base, compute_sim_matrix, disabled_train, ) from lavis.models.blip_models.blip_outputs import BlipOutput, BlipOutputFeatures @registry.register_model("blip2") @registry.register_model("blip2_feature_extractor") class Blip2Qformer(Blip2Base): """ BLIP2 first-stage model with Q-former and ViT. Supported model types: - pretrained: pretrained model with vit-g - pretrain_vitL: pretrained model with vit-large - coco: fintuned model on coco Usage: >>> from lavis.models import load_model >>> model = load_model("blip2", "pretrain") """ PRETRAINED_MODEL_CONFIG_DICT = { "pretrain": "configs/models/blip2/blip2_pretrain.yaml", "pretrain_vitL": "configs/models/blip2/blip2_pretrain_vitL.yaml", "coco": "configs/models/blip2/blip2_coco.yaml", } def __init__( self, vit_model="eva_clip_g", img_size=224, drop_path_rate=0, use_grad_checkpoint=False, vit_precision="fp16", freeze_vit=True, num_query_token=32, cross_attention_freq=2, embed_dim=256, max_txt_len=32, ): super().__init__() self.tokenizer = self.init_tokenizer() self.visual_encoder, self.ln_vision = self.init_vision_encoder( vit_model, img_size, drop_path_rate, use_grad_checkpoint, vit_precision ) if freeze_vit: for name, param in self.visual_encoder.named_parameters(): param.requires_grad = False self.visual_encoder = self.visual_encoder.eval() self.visual_encoder.train = disabled_train logging.info("freeze vision encoder") self.Qformer, self.query_tokens = self.init_Qformer( num_query_token, self.visual_encoder.num_features, cross_attention_freq ) self.Qformer.resize_token_embeddings(len(self.tokenizer)) state_dict = self.Qformer.state_dict() for name, param in self.Qformer.named_parameters(): if "_query" in name: key_orig = name.replace("_query", "") param.data.copy_(state_dict[key_orig]) self.vision_proj = nn.Linear(self.Qformer.config.hidden_size, embed_dim) self.text_proj = nn.Linear(self.Qformer.config.hidden_size, embed_dim) self.itm_head = nn.Linear(self.Qformer.config.hidden_size, 2) self.temp = nn.Parameter(0.07 * torch.ones([])) self.max_txt_len = max_txt_len def forward(self, samples): image = samples["image"] text = samples["text_input"] image_embeds = self.ln_vision(self.visual_encoder(image)) image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(image.device) query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) query_output = self.Qformer.bert( query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_atts, use_cache=True, return_dict=True, ) image_feats = F.normalize(self.vision_proj(query_output.last_hidden_state), dim=-1) text_tokens = self.tokenizer( text, padding="max_length", truncation=True, max_length=self.max_txt_len, return_tensors="pt", ).to(image.device) text_output = self.Qformer.bert( text_tokens.input_ids, attention_mask=text_tokens.attention_mask, return_dict=True, ) text_feat = F.normalize(self.text_proj(text_output.last_hidden_state[:, 0, :]), dim=-1) ###============== Image-text Contrastive ===================### image_feats_all = concat_all_gather(image_feats) # [batch_size*num_gpu, num_query_tokens, embed_dim] text_feat_all = concat_all_gather(text_feat) # [batch_size*num_gpu, embed_dim] sim_q2t = torch.matmul(image_feats.unsqueeze(1), text_feat_all.unsqueeze(-1)).squeeze() # [batch_size, batch_size*num_gpu, num_query_tokens] # image-text similarity: aggregate across all query tokens sim_i2t, _ = sim_q2t.max(-1) sim_i2t = sim_i2t / self.temp # text-query similarity: [batch_size, batch_size*num_gpu, num_query_tokens] sim_t2q = torch.matmul(text_feat.unsqueeze(1).unsqueeze(1), image_feats_all.permute(0, 2, 1)).squeeze() # text-image similarity: aggregate across all query tokens sim_t2i, _ = sim_t2q.max(-1) sim_t2i = sim_t2i / self.temp # [batch_size, batch_size*num_gpu] rank = dist.get_rank() bs = image.size(0) targets = torch.linspace(rank * bs, rank * bs + bs - 1, bs, dtype=int).to(image.device) loss_itc = ( F.cross_entropy(sim_i2t, targets, label_smoothing=0.1) + F.cross_entropy(sim_t2i, targets, label_smoothing=0.1) ) / 2 ###============== Image-text Matching ===================### text_input_ids_world = concat_all_gather(text_tokens.input_ids) text_attention_mask_world = concat_all_gather(text_tokens.attention_mask) image_embeds_world = all_gather_with_grad(image_embeds) with torch.no_grad(): weights_t2i = F.softmax(sim_t2i, dim=1) + 1e-4 weights_t2i[:, rank * bs : rank * bs + bs].fill_diagonal_(0) weights_i2t = F.softmax(sim_i2t, dim=1) + 1e-4 weights_i2t[:, rank * bs : rank * bs + bs].fill_diagonal_(0) # select a negative image for each text image_embeds_neg = [] for b in range(bs): neg_idx = torch.multinomial(weights_t2i[b], 1).item() image_embeds_neg.append(image_embeds_world[neg_idx]) image_embeds_neg = torch.stack(image_embeds_neg, dim=0) # select a negative text for each image text_ids_neg = [] text_atts_neg = [] for b in range(bs): neg_idx = torch.multinomial(weights_i2t[b], 1).item() text_ids_neg.append(text_input_ids_world[neg_idx]) text_atts_neg.append(text_attention_mask_world[neg_idx]) text_ids_neg = torch.stack(text_ids_neg, dim=0) text_atts_neg = torch.stack(text_atts_neg, dim=0) text_ids_all = torch.cat([text_tokens.input_ids, text_tokens.input_ids, text_ids_neg], dim=0) # pos, pos, neg text_atts_all = torch.cat( [text_tokens.attention_mask, text_tokens.attention_mask, text_atts_neg], dim=0, ) query_tokens_itm = self.query_tokens.expand(text_ids_all.shape[0], -1, -1) query_atts_itm = torch.ones(query_tokens_itm.size()[:-1], dtype=torch.long).to(image.device) attention_mask_all = torch.cat([query_atts_itm, text_atts_all], dim=1) image_embeds_all = torch.cat([image_embeds, image_embeds_neg, image_embeds], dim=0) # pos, neg, pos image_atts_all = torch.ones(image_embeds_all.size()[:-1], dtype=torch.long).to(image.device) output_itm = self.Qformer.bert( text_ids_all, query_embeds=query_tokens_itm, attention_mask=attention_mask_all, encoder_hidden_states=image_embeds_all, encoder_attention_mask=image_atts_all, return_dict=True, ) vl_embeddings = output_itm.last_hidden_state[:, : query_tokens_itm.size(1), :] vl_output = self.itm_head(vl_embeddings) logits = vl_output.mean(dim=1) itm_labels = torch.cat( [torch.ones(bs, dtype=torch.long), torch.zeros(2 * bs, dtype=torch.long)], dim=0, ).to(image.device) loss_itm = F.cross_entropy(logits, itm_labels) ##================= Image Captioning ========================## decoder_input_ids = text_tokens.input_ids.clone() decoder_input_ids[:, 0] = self.tokenizer.bos_token_id labels = decoder_input_ids.masked_fill(decoder_input_ids == self.tokenizer.pad_token_id, -100) query_atts = torch.ones(query_tokens.size()[:-1], dtype=torch.long).to(image.device) attention_mask = torch.cat([query_atts, text_tokens.attention_mask], dim=1) lm_output = self.Qformer( decoder_input_ids, attention_mask=attention_mask, past_key_values=query_output.past_key_values, return_dict=True, labels=labels, ) loss_lm = lm_output.loss return BlipOutput( loss=loss_itc + loss_itm + loss_lm, loss_itc=loss_itc, loss_itm=loss_itm, loss_lm=loss_lm, ) @torch.no_grad() def generate( self, samples, use_nucleus_sampling=False, num_beams=3, max_length=30, min_length=10, top_p=0.9, repetition_penalty=1.0, ): """ Args: samples (dict): A dictionary containing the following keys: - image (torch.Tensor): A tensor of shape (batch_size, 3, H, W) use_nucleus_sampling (bool): Whether to use nucleus sampling. If False, use top-k sampling. num_beams (int): Number of beams for beam search. 1 means no beam search. max_length (int): The maximum length of the sequence to be generated. min_length (int): The minimum length of the sequence to be generated. top_p (float): The cumulative probability for nucleus sampling. repetition_penalty (float): The parameter for repetition penalty. 1.0 means no penalty. num_captions (int): Number of captions to be generated for each image. Returns: captions (list): A list of strings of length batch_size * num_captions. """ image = samples["image"] image_embeds = self.ln_vision(self.visual_encoder(image)) if not use_nucleus_sampling: image_embeds = image_embeds.repeat_interleave(num_beams, dim=0) else: num_beams = 1 image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(image.device) model_kwargs = { "encoder_hidden_states": image_embeds, "encoder_attention_mask": image_atts, } input_ids = torch.LongTensor(image.size(0), 1).fill_(self.tokenizer.bos_token_id).to(image.device) query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) outputs = self.Qformer.generate( input_ids=input_ids, query_embeds=query_tokens, max_length=max_length, min_length=min_length, num_beams=num_beams, do_sample=use_nucleus_sampling, top_p=top_p, eos_token_id=self.tokenizer.sep_token_id, pad_token_id=self.tokenizer.pad_token_id, **model_kwargs, ) captions = self.tokenizer.batch_decode(outputs, skip_special_tokens=True) return captions def forward_image(self, image): image_embeds = self.ln_vision(self.visual_encoder(image)) image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(image.device) query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) query_output = self.Qformer.bert( query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_atts, return_dict=True, ) return query_output.last_hidden_state, image_embeds def forward_text(self, text_tokens): text_output = self.Qformer.bert( text_tokens.input_ids, attention_mask=text_tokens.attention_mask, return_dict=True, ) return text_output.last_hidden_state[:, 0, :] def compute_itm(self, image_inputs, text_ids, text_atts): image_atts = torch.ones(image_inputs.size()[:-1], dtype=torch.long).to(image_inputs.device) query_tokens = self.query_tokens.expand(image_inputs.shape[0], -1, -1) query_atts = torch.ones(query_tokens.size()[:-1], dtype=torch.long).to(image_inputs.device) attention_mask = torch.cat([query_atts, text_atts], dim=1) output_itm = self.Qformer.bert( text_ids, query_embeds=query_tokens, attention_mask=attention_mask, encoder_hidden_states=image_inputs, encoder_attention_mask=image_atts, return_dict=True, ) vl_embeddings = output_itm.last_hidden_state[:, : query_tokens.size(1), :] itm_logit = self.itm_head(vl_embeddings) itm_logit = itm_logit[:, :, 1].mean(dim=1) return itm_logit @torch.no_grad() def extract_features(self, samples, mode="multimodal"): """ Extract features for multimodal or unimodal samples. Args: samples (dict): A dictionary of samples, containing the following keys: - image (torch.Tensor): A tensor of shape (B, C, H, W) containing the image. Raw images should be preprocessed before being passed to feature extractor. - text_input (list): A list of strings containing the text, length B. mode (str): The mode of feature extraction. Can be either "multimodal", "text" or "image". If "multimodal", return image features and multimodal features; if "text", return text features; if "image", return image features. Default: "multimodal". Returns: BlipOutputFeatures: A BlipOutputFeatures object containing the features. See lavis/models/blip_models/blip_outputs.py for more details. """ image = samples.get("image") caption = samples.get("text_input") # assert mode is one of "image", "text", "multimodal" assert mode in [ "image", "text", "multimodal", ], "mode must be one of 'image', 'text', 'multimodal'" # initalize output image_embeds, text_embeds, multimodal_embeds = None, None, None image_features, text_features = None, None if mode == "image": assert image is not None, "Image is not provided for mode 'image' or 'multimodal'" # return query features with self.maybe_autocast(): image_embeds_frozen = self.ln_vision(self.visual_encoder(image)) image_embeds_frozen = image_embeds_frozen.float() image_atts = torch.ones(image_embeds_frozen.size()[:-1], dtype=torch.long).to(self.device) query_tokens = self.query_tokens.expand(image_embeds_frozen.shape[0], -1, -1) query_output = self.Qformer.bert( query_embeds=query_tokens, encoder_hidden_states=image_embeds_frozen, encoder_attention_mask=image_atts, return_dict=True, ) image_embeds = query_output.last_hidden_state image_features = F.normalize(self.vision_proj(image_embeds), dim=-1) elif mode == "text": assert caption is not None, "text input is None for mode 'text' or 'multimodal'" # return text features text = self.tokenizer(caption, return_tensors="pt", padding=True).to(self.device) text_output = self.Qformer.bert( text.input_ids, attention_mask=text.attention_mask, return_dict=True, ) text_embeds = text_output.last_hidden_state text_features = self.text_proj(text_embeds) text_features = F.normalize(text_features, dim=-1) elif mode == "multimodal": # return multimodel query features with self.maybe_autocast(): image_embeds_frozen = self.ln_vision(self.visual_encoder(image)) image_embeds_frozen = image_embeds_frozen.float() image_atts = torch.ones(image_embeds_frozen.size()[:-1], dtype=torch.long).to(self.device) query_tokens = self.query_tokens.expand(image_embeds_frozen.shape[0], -1, -1) query_atts = torch.ones(query_tokens.size()[:-1], dtype=torch.long).to(self.device) text = self.tokenizer(caption, return_tensors="pt", padding=True).to(self.device) attention_mask = torch.cat([query_atts, text.attention_mask], dim=1) output = self.Qformer.bert( text.input_ids, query_embeds=query_tokens, attention_mask=attention_mask, encoder_hidden_states=image_embeds_frozen, encoder_attention_mask=image_atts, return_dict=True, ) multimodal_embeds = output.last_hidden_state[:, : query_tokens.size(1), :] return BlipOutputFeatures( image_embeds=image_embeds, image_embeds_proj=image_features, text_embeds=text_embeds, text_embeds_proj=text_features, multimodal_embeds=multimodal_embeds, ) @classmethod def from_config(cls, cfg): vit_model = cfg.get("vit_model", "eva_clip_g") img_size = cfg.get("image_size") num_query_token = cfg.get("num_query_token") cross_attention_freq = cfg.get("cross_attention_freq", 2) drop_path_rate = cfg.get("drop_path_rate", 0) use_grad_checkpoint = cfg.get("use_grad_checkpoint", False) vit_precision = cfg.get("vit_precision", "fp16") freeze_vit = cfg.get("freeze_vit", True) max_txt_len = cfg.get("max_txt_len", 32) model = cls( vit_model=vit_model, img_size=img_size, drop_path_rate=drop_path_rate, use_grad_checkpoint=use_grad_checkpoint, vit_precision=vit_precision, freeze_vit=freeze_vit, num_query_token=num_query_token, cross_attention_freq=cross_attention_freq, max_txt_len=max_txt_len, ) model.load_checkpoint_from_config(cfg) return model def compute_sim_matrix(self, data_loader, task_cfg): """ Compute similarity i2t, t2i matrix for the given data loader. """ k_test = task_cfg.k_test return compute_sim_matrix(model=self, data_loader=data_loader, k_test=k_test)
3D-LLM-main
3DLLM_BLIP2-base/lavis/models/blip2_models/blip2_qformer.py
3D-LLM-main
3DLLM_BLIP2-base/lavis/models/blip2_models/__init__.py
""" Copyright (c) 2023, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import logging import torch from torch.cuda.amp import autocast as autocast import torch.nn as nn from lavis.common.registry import registry from lavis.models.blip2_models.blip2 import Blip2Base, disabled_train from lavis.models.blip2_models.modeling_opt import OPTForCausalLM, OPTConfig from transformers import AutoTokenizer from positional_encodings.torch_encodings import PositionalEncoding1D @registry.register_model("blip2_opt") class Blip2OPT(Blip2Base): """ BLIP2 OPT model. Supported model types: - pretrained_opt2.7b: pretrained model with OPT2.7b - pretrained_opt6.7b: pretrained model with OPT6.7b - caption_coco_opt2.7b: fintuned image captioning model with OPT2.7b - caption_coco_opt6.7b: fintuned image captioning model with OPT6.7b Usage: >>> from lavis.models import load_model >>> model = load_model("blip2_opt", "caption_coco_opt2.7b") """ PRETRAINED_MODEL_CONFIG_DICT = { "pretrain_opt2.7b": "configs/models/blip2/blip2_pretrain_opt2.7b.yaml", "pretrain_opt6.7b": "configs/models/blip2/blip2_pretrain_opt6.7b.yaml", "caption_coco_opt2.7b": "configs/models/blip2/blip2_caption_opt2.7b.yaml", "caption_coco_opt6.7b": "configs/models/blip2/blip2_caption_opt6.7b.yaml", } def __init__( self, vit_model="eva_clip_g", img_size=224, drop_path_rate=0, use_grad_checkpoint=False, vit_precision="fp16", freeze_vit=True, num_query_token=32, opt_model="facebook/opt-2.7b", prompt="", max_txt_len=32, ): super().__init__() self.tokenizer = self.init_tokenizer() self.Qformer, self.query_tokens = self.init_Qformer(num_query_token, 1408) self.Qformer.cls = None self.Qformer.bert.embeddings.word_embeddings = None self.Qformer.bert.embeddings.position_embeddings = None for layer in self.Qformer.bert.encoder.layer: layer.output = None layer.intermediate = None self.opt_tokenizer = AutoTokenizer.from_pretrained(opt_model, use_fast=False) location_tokens = [] for i in range(64): location_tokens.append("<loc%d>" % i) self.opt_tokenizer.add_special_tokens({"additional_special_tokens": location_tokens}) self.opt_model = OPTForCausalLM.from_pretrained(opt_model, torch_dtype=torch.float16) self.opt_model.resize_token_embeddings(len(self.opt_tokenizer)) for name, param in self.opt_model.named_parameters(): param.requires_grad = False self.eos_token_id = self.opt_tokenizer("\n", add_special_tokens=False).input_ids[0] self.opt_proj = nn.Linear(self.Qformer.config.hidden_size, self.opt_model.config.hidden_size) self.max_txt_len = max_txt_len self.prompt = "" prompt_tokens = self.opt_tokenizer(self.prompt, return_tensors="pt") self.prompt_length = prompt_tokens.attention_mask.sum(1) pos_model = PositionalEncoding1D(1408 // 3) x = torch.zeros(1, 256, 1408 // 3) self.pos_embedding = pos_model(x).squeeze().cuda() def forward(self, samples): pc_embeds = samples["pc_feat"] pc = samples["pc"].long() all_pcs = torch.zeros((pc_embeds.shape)) for j in range(pc.shape[0]): pcs = [] for i in range(3): pc_i = pc[j][:, i] pcs.append(self.pos_embedding[pc_i]) pcs = torch.cat(pcs, -1) all_pcs[j][:, :1407] = pcs all_pcs = all_pcs.cuda() pc_embeds = torch.cat([pc_embeds, all_pcs], 1) image_atts = torch.ones(pc_embeds.size()[:-1], dtype=torch.long).to(pc_embeds.device) query_tokens = self.query_tokens.expand(pc_embeds.shape[0], -1, -1) query_output = self.Qformer.bert( query_embeds=query_tokens, encoder_hidden_states=pc_embeds, encoder_attention_mask=image_atts, return_dict=True, ) inputs_opt = self.opt_proj(query_output.last_hidden_state) atts_opt = torch.ones(inputs_opt.size()[:-1], dtype=torch.long).to(pc_embeds.device) self.opt_tokenizer.padding_side = "left" prompt = self.prompt if self.prompt: text_input = [self.prompt.format(question) for question in samples["text_input"]] else: text_input = samples["text_input"] text = [t for t in text_input] answers = [(t + a + "\n") for (t, a) in zip(text, samples["answer"])] opt_tokens = self.opt_tokenizer( answers, return_tensors="pt", padding="longest", truncation=True, max_length=self.max_txt_len, ).to(pc_embeds.device) idxes = (torch.where(opt_tokens.input_ids == 1948)[1] + 2).cpu().numpy().tolist() output_tokens = self.opt_tokenizer( answers, padding="longest", truncation=True, max_length=self.max_txt_len, return_tensors="pt", ).to(pc_embeds.device) targets = output_tokens.input_ids.masked_fill(output_tokens.input_ids == self.opt_tokenizer.pad_token_id, -100) empty_targets = torch.ones(atts_opt.size(), dtype=torch.long).to(pc_embeds.device).fill_(-100) targets = torch.cat([empty_targets, targets], dim=1) inputs_embeds = self.opt_model.model.decoder.embed_tokens(opt_tokens.input_ids) inputs_embeds = torch.cat([inputs_opt, inputs_embeds], dim=1) attention_mask = torch.cat([atts_opt, opt_tokens.attention_mask], dim=1) with self.maybe_autocast(): outputs = self.opt_model( inputs_embeds=inputs_embeds, attention_mask=attention_mask, return_dict=True, labels=targets, ) loss = outputs.loss return {"loss": loss} @torch.no_grad() def generate( self, samples, use_nucleus_sampling=False, num_beams=5, max_length=30, min_length=1, top_p=0.9, repetition_penalty=1.0, length_penalty=1.0, num_captions=1, temperature=1, ): """ Args: samples (dict): A dictionary containing the following keys: - image (torch.Tensor): A tensor of shape (batch_size, 3, H, W) use_nucleus_sampling (bool): Whether to use nucleus sampling. If False, use top-k sampling. num_beams (int): Number of beams for beam search. 1 means no beam search. max_length (int): The maximum length of the sequence to be generated. min_length (int): The minimum length of the sequence to be generated. top_p (float): The cumulative probability for nucleus sampling. repetition_penalty (float): The parameter for repetition penalty. 1.0 means no penalty. num_captions (int): Number of captions to be generated for each image. Returns: captions (list): A list of strings of length batch_size * num_captions. """ with self.maybe_autocast(): pc_embeds = samples["pc_feat"] image_atts = torch.ones(pc_embeds.size()[:-1], dtype=torch.long).to(pc_embeds.device) query_tokens = self.query_tokens.expand(pc_embeds.shape[0], -1, -1) query_output = self.Qformer.bert( query_embeds=query_tokens, encoder_hidden_states=pc_embeds, encoder_attention_mask=image_atts, return_dict=True, ) inputs_opt = self.opt_proj(query_output.last_hidden_state) atts_opt = torch.ones(inputs_opt.size()[:-1], dtype=torch.long).to(pc_embeds.device) if "prompt" in samples.keys(): prompt = samples["prompt"] else: prompt = self.prompt prompt = [prompt] * pc_embeds.size(0) opt_tokens = self.opt_tokenizer(prompt, return_tensors="pt").to(pc_embeds.device) input_ids = opt_tokens.input_ids attention_mask = torch.cat([atts_opt, opt_tokens.attention_mask], dim=1) if use_nucleus_sampling: query_embeds = inputs_opt.repeat_interleave(num_captions, dim=0) num_beams = 1 else: query_embeds = inputs_opt.repeat_interleave(num_beams, dim=0) outputs = self.opt_model.generate( input_ids=input_ids, query_embeds=query_embeds, attention_mask=attention_mask, do_sample=use_nucleus_sampling, top_p=top_p, temperature=temperature, num_beams=num_beams, max_new_tokens=max_length, min_length=min_length, eos_token_id=self.eos_token_id, repetition_penalty=repetition_penalty, length_penalty=length_penalty, num_return_sequences=num_captions, ) prompt_length = opt_tokens.input_ids.shape[1] output_text = self.opt_tokenizer.batch_decode(outputs[:, prompt_length:], skip_special_tokens=False) output_text = [text.strip() for text in output_text] return output_text def predict_answers( self, samples, num_beams=5, inference_method="generate", use_nucleus_sampling=False, max_len=10, min_len=1, num_ans_candidates=128, answer_list=None, prompt="", length_penalty=-1, repetition_penalty=1.0, **kwargs, ): with self.maybe_autocast(): pc_embeds = samples["pc_feat"] pc = samples["pc"].long() all_pcs = torch.zeros((pc_embeds.shape)) for j in range(pc.shape[0]): pcs = [] for i in range(3): pc_i = pc[j][:, i] pcs.append(self.pos_embedding[pc_i]) pcs = torch.cat(pcs, -1) all_pcs[j][:, :1407] = pcs all_pcs = all_pcs.cuda() pc_embeds = torch.cat([pc_embeds, all_pcs], 1) image_atts = torch.ones(pc_embeds.size()[:-1], dtype=torch.long).to(pc_embeds.device) query_tokens = self.query_tokens.expand(pc_embeds.shape[0], -1, -1) query_output = self.Qformer.bert( query_embeds=query_tokens, encoder_hidden_states=pc_embeds, encoder_attention_mask=image_atts, return_dict=True, ) inputs_opt = self.opt_proj(query_output.last_hidden_state) atts_opt = torch.ones(inputs_opt.size()[:-1], dtype=torch.long).to(pc_embeds.device) if isinstance(samples["text_input"], str): samples["text_input"] = [samples["text_input"]] if self.prompt: text_input = [self.prompt.format(question) for question in samples["text_input"]] else: text_input = samples["text_input"] text = [t for t in text_input] input_tokens = self.opt_tokenizer(text, padding="longest", return_tensors="pt").to(pc_embeds.device) input_ids = input_tokens.input_ids encoder_atts = torch.cat([atts_opt, input_tokens.attention_mask], dim=1) if use_nucleus_sampling: input_embeds = inputs_opt.repeat_interleave(1, dim=0) # num_beams = 1 else: input_embeds = inputs_opt.repeat_interleave(num_beams, dim=0) device_type = "cuda" if "cuda" in str(self.device) else "cpu" with self.maybe_autocast(dtype=torch.float16): outputs = self.opt_model.generate( input_ids=input_ids, query_embeds=input_embeds, attention_mask=encoder_atts, do_sample=use_nucleus_sampling, num_beams=num_beams, max_new_tokens=max_len, min_length=min_len, length_penalty=length_penalty, # eos_token_id=self.eos_token_id, ) prompt_length = input_tokens.input_ids.shape[1] output_text = self.opt_tokenizer.batch_decode(outputs[:, prompt_length:], skip_special_tokens=False) return output_text @classmethod def from_config(cls, cfg): vit_model = cfg.get("vit_model", "eva_clip_g") img_size = cfg.get("image_size") num_query_token = cfg.get("num_query_token") opt_model = cfg.get("opt_model") drop_path_rate = cfg.get("drop_path_rate", 0) use_grad_checkpoint = cfg.get("use_grad_checkpoint", False) vit_precision = cfg.get("vit_precision", "fp16") freeze_vit = cfg.get("freeze_vit", True) prompt = cfg.get("prompt", "") max_txt_len = cfg.get("max_txt_len", 32) model = cls( vit_model=vit_model, img_size=img_size, drop_path_rate=drop_path_rate, use_grad_checkpoint=use_grad_checkpoint, vit_precision=vit_precision, freeze_vit=freeze_vit, num_query_token=num_query_token, opt_model=opt_model, prompt=prompt, max_txt_len=max_txt_len, ) model.load_checkpoint_from_config(cfg) return model
3D-LLM-main
3DLLM_BLIP2-base/lavis/models/blip2_models/blip2_opt.py
# Copyright 2018 Mesh TensorFlow authors, T5 Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch T5 model.""" import copy import math import os import warnings from typing import Optional, Tuple, Union import torch from torch import nn from torch.nn import CrossEntropyLoss from torch.utils.checkpoint import checkpoint from transformers.activations import ACT2FN from transformers.modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, ) from transformers.modeling_utils import PreTrainedModel from transformers.pytorch_utils import ( ALL_LAYERNORM_LAYERS, find_pruneable_heads_and_indices, prune_linear_layer, ) from transformers.utils import ( DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, add_start_docstrings_to_model_forward, is_torch_fx_proxy, logging, replace_return_docstrings, ) from transformers.utils.model_parallel_utils import assert_device_map, get_device_map from transformers.models.t5.configuration_t5 import T5Config logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "T5Config" _TOKENIZER_FOR_DOC = "T5Tokenizer" _CHECKPOINT_FOR_DOC = "t5-small" #################################################### # This dict contains ids and associated url # for the pretrained weights provided with the models #################################################### T5_PRETRAINED_MODEL_ARCHIVE_LIST = [ "t5-small", "t5-base", "t5-large", "t5-3b", "t5-11b", # See all T5 models at https://huggingface.co/models?filter=t5 ] #################################################### # This is a conversion method from TF 1.0 to PyTorch # More details: https://medium.com/huggingface/from-tensorflow-to-pytorch-265f40ef2a28 #################################################### def load_tf_weights_in_t5(model, config, tf_checkpoint_path): """Load tf checkpoints in a pytorch model.""" try: import re import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info(f"Converting TensorFlow checkpoint from {tf_path}") # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] tf_weights = {} for name, shape in init_vars: logger.info(f"Loading TF weight {name} with shape {shape}") array = tf.train.load_variable(tf_path, name) names.append(name) tf_weights[name] = array for txt_name in names: name = txt_name.split("/") # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if any( n in [ "adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step", ] for n in name ): logger.info(f"Skipping {'/'.join(name)}") tf_weights.pop(txt_name, None) continue if "_slot_" in name[-1]: logger.info(f"Skipping {'/'.join(name)}") tf_weights.pop(txt_name, None) continue pointer = model array = tf_weights[txt_name] for m_name in name: if re.fullmatch(r"[A-Za-z]+_\d+", m_name): scope_names = re.split(r"_(\d+)", m_name) else: scope_names = [m_name] if scope_names[0] in ["kernel", "scale", "embedding"]: pointer = getattr(pointer, "weight") elif scope_names[0] == "self_attention": pointer = getattr(pointer, "layer") pointer = pointer[0] elif scope_names[0] == "enc_dec_attention": pointer = getattr(pointer, "layer") pointer = pointer[1] elif scope_names[0] == "dense_relu_dense": pointer = getattr(pointer, "layer") pointer = pointer[2] elif scope_names[0] == "rms_norm": if hasattr(pointer, "layer_norm"): pointer = getattr(pointer, "layer_norm") elif hasattr(pointer, "final_layer_norm"): pointer = getattr(pointer, "final_layer_norm") elif scope_names[0] == "scale": pointer = getattr(pointer, "weight") elif scope_names[0] == "output_bias" or scope_names[0] == "beta": pointer = getattr(pointer, "bias") elif scope_names[0] == "squad": pointer = getattr(pointer, "classifier") elif scope_names[0] == "decoder" and name[1] == "logits": continue elif scope_names[0] == "logits": pointer = getattr(pointer, "lm_head") elif scope_names[0] == "wi" and len(scope_names) > 1 and scope_names[1].isdigit(): pointer = getattr(pointer, f"wi_{scope_names[1]}") continue else: try: pointer = getattr(pointer, scope_names[0]) except AttributeError: logger.info(f"Skipping {'/'.join(name)}") continue if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] if scope_names[0] not in ["kernel", "scale", "embedding"]: pointer = getattr(pointer, "weight") if scope_names[0] != "embedding": logger.info(f"Transposing numpy weight of shape {array.shape} for {name}") array = np.transpose(array) try: assert ( pointer.shape == array.shape ), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" except AssertionError as e: e.args += (pointer.shape, array.shape) raise logger.info(f"Initialize PyTorch weight {name}") pointer.data = torch.from_numpy(array.astype(np.float32)) tf_weights.pop(txt_name, None) logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}.") return model #################################################### # PyTorch Models are constructed by sub-classing # - torch.nn.Module for the layers and # - PreTrainedModel for the models (it-self a sub-class of nn.Module) #################################################### PARALLELIZE_DOCSTRING = r""" This is an experimental feature and is a subject to change at a moment's notice. Uses a device map to distribute attention modules of the model across several devices. If no device map is given, it will evenly distribute blocks across all devices. Args: device_map (`Dict[int, list]`, optional, defaults to None): A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always automatically mapped to the first device (for esoteric reasons). That means that the first device should have fewer attention modules mapped to it than other devices. For reference, the t5 models have the following number of attention modules: - t5-small: 6 - t5-base: 12 - t5-large: 24 - t5-3b: 24 - t5-11b: 24 Example: ```python # Here is an example of a device map on a machine with 4 GPUs using t5-3b, which has a total of 24 attention modules: model = T5ForConditionalGeneration.from_pretrained("t5-3b") device_map = { 0: [0, 1, 2], 1: [3, 4, 5, 6, 7, 8, 9], 2: [10, 11, 12, 13, 14, 15, 16], 3: [17, 18, 19, 20, 21, 22, 23], } model.parallelize(device_map) ``` """ DEPARALLELIZE_DOCSTRING = r""" Moves the model to cpu from a model parallel state. Example: ```python # On a 4 GPU machine with t5-3b: model = T5ForConditionalGeneration.from_pretrained("t5-3b") device_map = { 0: [0, 1, 2], 1: [3, 4, 5, 6, 7, 8, 9], 2: [10, 11, 12, 13, 14, 15, 16], 3: [17, 18, 19, 20, 21, 22, 23], } model.parallelize(device_map) # Splits the model across several devices model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache() ``` """ class T5LayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ Construct a layernorm module in the T5 style. No bias and no subtraction of mean. """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): # T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus varience is calculated # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for # half-precision inputs is done in fp32 variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) # convert into half-precision if necessary if self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) return self.weight * hidden_states try: from apex.normalization import FusedRMSNorm T5LayerNorm = FusedRMSNorm # noqa logger.info("Discovered apex.normalization.FusedRMSNorm - will use it instead of T5LayerNorm") except ImportError: # using the normal T5LayerNorm pass except Exception: logger.warning("discovered apex but it failed to load, falling back to T5LayerNorm") pass ALL_LAYERNORM_LAYERS.append(T5LayerNorm) class T5DenseActDense(nn.Module): def __init__(self, config: T5Config): super().__init__() self.wi = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_states = self.wi(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return hidden_states class T5DenseGatedActDense(nn.Module): def __init__(self, config: T5Config): super().__init__() self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_gelu = self.act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return hidden_states class T5LayerFF(nn.Module): def __init__(self, config: T5Config): super().__init__() if config.is_gated_act: self.DenseReluDense = T5DenseGatedActDense(config) else: self.DenseReluDense = T5DenseActDense(config) self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward(self, hidden_states): forwarded_states = self.layer_norm(hidden_states) forwarded_states = self.DenseReluDense(forwarded_states) hidden_states = hidden_states + self.dropout(forwarded_states) return hidden_states class T5Attention(nn.Module): def __init__(self, config: T5Config, has_relative_attention_bias=False): super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance = config.relative_attention_max_distance self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim # Mesh TensorFlow initialization to avoid scaling before softmax self.q = nn.Linear(self.d_model, self.inner_dim, bias=False) self.k = nn.Linear(self.d_model, self.inner_dim, bias=False) self.v = nn.Linear(self.d_model, self.inner_dim, bias=False) self.o = nn.Linear(self.inner_dim, self.d_model, bias=False) if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() self.gradient_checkpointing = False def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices(heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads) # Prune linear layers self.q = prune_linear_layer(self.q, index) self.k = prune_linear_layer(self.k, index) self.v = prune_linear_layer(self.v, index) self.o = prune_linear_layer(self.o, index, dim=1) # Update hyper params self.n_heads = self.n_heads - len(heads) self.inner_dim = self.key_value_proj_dim * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) @staticmethod def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): """ Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) """ relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0).to(torch.long) * num_buckets relative_position = torch.abs(relative_position) else: relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) # now relative_position is in the range [0, inf) # half of the buckets are for exact increments in positions max_exact = num_buckets // 2 is_small = relative_position < max_exact # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance relative_position_if_large = max_exact + ( torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) relative_position_if_large = torch.min( relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1), ) relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets def compute_bias(self, query_length, key_length, device=None): """Compute binned relative position bias""" if device is None: device = self.relative_attention_bias.weight.device context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None] memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :] relative_position = memory_position - context_position # shape (query_length, key_length) relative_position_bucket = self._relative_position_bucket( relative_position, # shape (query_length, key_length) bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads) values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length) return values def forward( self, hidden_states, mask=None, key_value_states=None, position_bias=None, past_key_value=None, layer_head_mask=None, query_length=None, use_cache=False, output_attentions=False, ): """ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states). """ # Input is (batch_size, seq_length, dim) # Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length) # past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head) batch_size, seq_length = hidden_states.shape[:2] real_seq_length = seq_length if past_key_value is not None: assert ( len(past_key_value) == 2 ), f"past_key_value should have 2 past states: keys and values. Got { len(past_key_value)} past states" real_seq_length += past_key_value[0].shape[2] if query_length is None else query_length key_length = real_seq_length if key_value_states is None else key_value_states.shape[1] def shape(states): """projection""" return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) def unshape(states): """reshape""" return states.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim) def project(hidden_states, proj_layer, key_value_states, past_key_value): """projects hidden states correctly to key/query states""" if key_value_states is None: # self-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(hidden_states)) elif past_key_value is None: # cross-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(key_value_states)) if past_key_value is not None: if key_value_states is None: # self-attn # (batch_size, n_heads, key_length, dim_per_head) hidden_states = torch.cat([past_key_value, hidden_states], dim=2) else: # cross-attn hidden_states = past_key_value return hidden_states # get query states query_states = shape(self.q(hidden_states)) # (batch_size, n_heads, seq_length, dim_per_head) # get key/value states key_states = project( hidden_states, self.k, key_value_states, past_key_value[0] if past_key_value is not None else None, ) value_states = project( hidden_states, self.v, key_value_states, past_key_value[1] if past_key_value is not None else None, ) # compute scores scores = torch.matmul( query_states, key_states.transpose(3, 2) ) # equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9 if position_bias is None: if not self.has_relative_attention_bias: position_bias = torch.zeros( (1, self.n_heads, real_seq_length, key_length), device=scores.device, dtype=scores.dtype, ) if self.gradient_checkpointing and self.training: position_bias.requires_grad = True else: position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device) # if key and values are already calculated # we want only the last query position bias if past_key_value is not None: position_bias = position_bias[:, :, -hidden_states.size(1) :, :] if mask is not None: position_bias = position_bias + mask # (batch_size, n_heads, seq_length, key_length) if self.pruned_heads: mask = torch.ones(position_bias.shape[1]) mask[list(self.pruned_heads)] = 0 position_bias_masked = position_bias[:, mask.bool()] else: position_bias_masked = position_bias scores += position_bias_masked attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as( scores ) # (batch_size, n_heads, seq_length, key_length) attn_weights = nn.functional.dropout( attn_weights, p=self.dropout, training=self.training ) # (batch_size, n_heads, seq_length, key_length) # Mask heads if we want to if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_output = unshape(torch.matmul(attn_weights, value_states)) # (batch_size, seq_length, dim) attn_output = self.o(attn_output) present_key_value_state = (key_states, value_states) if (self.is_decoder and use_cache) else None outputs = (attn_output,) + (present_key_value_state,) + (position_bias,) if output_attentions: outputs = outputs + (attn_weights,) return outputs class T5LayerSelfAttention(nn.Module): def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.SelfAttention = T5Attention(config, has_relative_attention_bias=has_relative_attention_bias) self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.SelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them return outputs class T5LayerCrossAttention(nn.Module): def __init__(self, config): super().__init__() self.EncDecAttention = T5Attention(config, has_relative_attention_bias=False) self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, key_value_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, query_length=None, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.EncDecAttention( normed_hidden_states, mask=attention_mask, key_value_states=key_value_states, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, query_length=query_length, output_attentions=output_attentions, ) layer_output = hidden_states + self.dropout(attention_output[0]) outputs = (layer_output,) + attention_output[1:] # add attentions if we output them return outputs class T5Block(nn.Module): def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.is_decoder = config.is_decoder self.layer = nn.ModuleList() self.layer.append(T5LayerSelfAttention(config, has_relative_attention_bias=has_relative_attention_bias)) if self.is_decoder: self.layer.append(T5LayerCrossAttention(config)) self.layer.append(T5LayerFF(config)) def forward( self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, cross_attn_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, return_dict=True, ): if past_key_value is not None: if not self.is_decoder: logger.warning("`past_key_values` is passed to the encoder. Please make sure this is intended.") expected_num_past_key_values = 2 if encoder_hidden_states is None else 4 if len(past_key_value) != expected_num_past_key_values: raise ValueError( f"There should be {expected_num_past_key_values} past states. " f"{'2 (past / key) for cross attention. ' if expected_num_past_key_values == 4 else ''}" f"Got {len(past_key_value)} past key / value states" ) self_attn_past_key_value = past_key_value[:2] cross_attn_past_key_value = past_key_value[2:] else: self_attn_past_key_value, cross_attn_past_key_value = None, None self_attention_outputs = self.layer[0]( hidden_states, attention_mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=self_attn_past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states, present_key_value_state = self_attention_outputs[:2] attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and relative position weights # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) do_cross_attention = self.is_decoder and encoder_hidden_states is not None if do_cross_attention: # the actual query length is unknown for cross attention # if using past key value states. Need to inject it here if present_key_value_state is not None: query_length = present_key_value_state[0].shape[2] else: query_length = None cross_attention_outputs = self.layer[1]( hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, position_bias=encoder_decoder_position_bias, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, query_length=query_length, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = cross_attention_outputs[0] # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) # Combine self attn and cross attn key value states if present_key_value_state is not None: present_key_value_state = present_key_value_state + cross_attention_outputs[1] # Keep cross-attention outputs and relative position weights attention_outputs = attention_outputs + cross_attention_outputs[2:] # Apply Feed Forward layer hidden_states = self.layer[-1](hidden_states) # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if use_cache: outputs = outputs + (present_key_value_state,) + attention_outputs else: outputs = outputs + attention_outputs return outputs # hidden-states, present_key_value_states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights) class T5PreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = T5Config load_tf_weights = load_tf_weights_in_t5 base_model_prefix = "transformer" is_parallelizable = True supports_gradient_checkpointing = True _no_split_modules = ["T5Block"] @property def dummy_inputs(self): input_ids = torch.tensor(DUMMY_INPUTS) input_mask = torch.tensor(DUMMY_MASK) dummy_inputs = { "decoder_input_ids": input_ids, "input_ids": input_ids, "decoder_attention_mask": input_mask, } return dummy_inputs def _init_weights(self, module): """Initialize the weights""" factor = self.config.initializer_factor # Used for testing weights initialization if isinstance(module, T5LayerNorm): module.weight.data.fill_(factor * 1.0) elif isinstance(module, (T5Model, T5ForConditionalGeneration, T5EncoderModel)): # Mesh TensorFlow embeddings initialization # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624 module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0) if hasattr(module, "lm_head") and not self.config.tie_word_embeddings: module.lm_head.weight.data.normal_(mean=0.0, std=factor * 1.0) elif isinstance(module, T5DenseActDense): # Mesh TensorFlow FF initialization # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56 # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89 module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi, "bias") and module.wi.bias is not None: module.wi.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, "bias") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, T5DenseGatedActDense): module.wi_0.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi_0, "bias") and module.wi_0.bias is not None: module.wi_0.bias.data.zero_() module.wi_1.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi_1, "bias") and module.wi_1.bias is not None: module.wi_1.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, "bias") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, T5Attention): # Mesh TensorFlow attention initialization to avoid scaling before softmax # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136 d_model = self.config.d_model key_value_proj_dim = self.config.d_kv n_heads = self.config.num_heads module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5)) module.k.weight.data.normal_(mean=0.0, std=factor * (d_model ** -0.5)) module.v.weight.data.normal_(mean=0.0, std=factor * (d_model ** -0.5)) module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5)) if module.has_relative_attention_bias: module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) ** -0.5)) def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, (T5Attention, T5Stack)): module.gradient_checkpointing = value def _shift_right(self, input_ids): decoder_start_token_id = self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id assert decoder_start_token_id is not None, ( "self.model.config.decoder_start_token_id has to be defined. In T5 it is usually set to the pad_token_id." " See T5 docs for more information" ) # shift inputs to the right if is_torch_fx_proxy(input_ids): # Item assignment is not supported natively for proxies. shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id) shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1) else: shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] = decoder_start_token_id assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined." # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids class T5Stack(T5PreTrainedModel): def __init__(self, config, embed_tokens=None): super().__init__(config) self.embed_tokens = embed_tokens self.is_decoder = config.is_decoder self.block = nn.ModuleList( [T5Block(config, has_relative_attention_bias=bool(i == 0)) for i in range(config.num_layers)] ) self.final_layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) # Initialize weights and apply final processing self.post_init() # Model parallel self.model_parallel = False self.device_map = None self.gradient_checkpointing = False @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): # Check validity of device_map self.device_map = ( get_device_map(len(self.block), range(torch.cuda.device_count())) if device_map is None else device_map ) assert_device_map(self.device_map, len(self.block)) self.model_parallel = True self.first_device = "cpu" if "cpu" in self.device_map.keys() else "cuda:" + str(min(self.device_map.keys())) self.last_device = "cuda:" + str(max(self.device_map.keys())) # Load onto devices for k, v in self.device_map.items(): for layer in v: cuda_device = "cuda:" + str(k) self.block[layer] = self.block[layer].to(cuda_device) # Set embed_tokens to first layer self.embed_tokens = self.embed_tokens.to(self.first_device) # Set final layer norm to last device self.final_layer_norm = self.final_layer_norm.to(self.last_device) @add_start_docstrings(PARALLELIZE_DOCSTRING) def deparallelize(self): self.model_parallel = False self.device_map = None self.first_device = "cpu" self.last_device = "cpu" for i in range(len(self.block)): self.block[i] = self.block[i].to("cpu") self.embed_tokens = self.embed_tokens.to("cpu") self.final_layer_norm = self.final_layer_norm.to("cpu") torch.cuda.empty_cache() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, new_embeddings): self.embed_tokens = new_embeddings def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): # Model parallel if self.model_parallel: torch.cuda.set_device(self.first_device) self.embed_tokens = self.embed_tokens.to(self.first_device) use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: err_msg_prefix = "decoder_" if self.is_decoder else "" raise ValueError( f"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time" ) elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: err_msg_prefix = "decoder_" if self.is_decoder else "" raise ValueError(f"You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds") if inputs_embeds is None: assert self.embed_tokens is not None, "You have to initialize the model with valid token embeddings" inputs_embeds = self.embed_tokens(input_ids) batch_size, seq_length = input_shape # required mask seq length can be calculated via length of past mask_seq_length = past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length if use_cache is True: assert self.is_decoder, f"`use_cache` can only be set to `True` if {self} is used as a decoder" if attention_mask is None: attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) if self.is_decoder and encoder_attention_mask is None and encoder_hidden_states is not None: encoder_seq_length = encoder_hidden_states.shape[1] encoder_attention_mask = torch.ones( batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long, ) # initialize past_key_values with `None` if past does not exist if past_key_values is None: past_key_values = [None] * len(self.block) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.is_decoder and encoder_hidden_states is not None: ( encoder_batch_size, encoder_sequence_length, _, ) = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed head_mask = self.get_head_mask(head_mask, self.config.num_layers) cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers) present_key_value_states = () if use_cache else None all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions = () if (output_attentions and self.is_decoder) else None position_bias = None encoder_decoder_position_bias = None hidden_states = self.dropout(inputs_embeds) for i, (layer_module, past_key_value) in enumerate(zip(self.block, past_key_values)): layer_head_mask = head_mask[i] cross_attn_layer_head_mask = cross_attn_head_mask[i] # Model parallel if self.model_parallel: torch.cuda.set_device(hidden_states.device) # Ensure that attention_mask is always on the same device as hidden_states if attention_mask is not None: attention_mask = attention_mask.to(hidden_states.device) if position_bias is not None: position_bias = position_bias.to(hidden_states.device) if encoder_hidden_states is not None: encoder_hidden_states = encoder_hidden_states.to(hidden_states.device) if encoder_extended_attention_mask is not None: encoder_extended_attention_mask = encoder_extended_attention_mask.to(hidden_states.device) if encoder_decoder_position_bias is not None: encoder_decoder_position_bias = encoder_decoder_position_bias.to(hidden_states.device) if layer_head_mask is not None: layer_head_mask = layer_head_mask.to(hidden_states.device) if cross_attn_layer_head_mask is not None: cross_attn_layer_head_mask = cross_attn_layer_head_mask.to(hidden_states.device) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: if use_cache: logger.warning( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False def create_custom_forward(module): def custom_forward(*inputs): return tuple(module(*inputs, use_cache, output_attentions)) return custom_forward layer_outputs = checkpoint( create_custom_forward(layer_module), hidden_states, extended_attention_mask, position_bias, encoder_hidden_states, encoder_extended_attention_mask, encoder_decoder_position_bias, layer_head_mask, cross_attn_layer_head_mask, None, # past_key_value is always None with gradient checkpointing ) else: layer_outputs = layer_module( hidden_states, attention_mask=extended_attention_mask, position_bias=position_bias, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, encoder_decoder_position_bias=encoder_decoder_position_bias, layer_head_mask=layer_head_mask, cross_attn_layer_head_mask=cross_attn_layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) # layer_outputs is a tuple with: # hidden-states, key-value-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights) if use_cache is False: layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:] hidden_states, present_key_value_state = layer_outputs[:2] # We share the position biases between the layers - the first layer store them # layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights), # (cross-attention position bias), (cross-attention weights) position_bias = layer_outputs[2] if self.is_decoder and encoder_hidden_states is not None: encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3] # append next layer key value states if use_cache: present_key_value_states = present_key_value_states + (present_key_value_state,) if output_attentions: all_attentions = all_attentions + (layer_outputs[3],) if self.is_decoder: all_cross_attentions = all_cross_attentions + (layer_outputs[5],) # Model Parallel: If it's the last layer for that device, put things on the next device if self.model_parallel: for k, v in self.device_map.items(): if i == v[-1] and "cuda:" + str(k) != self.last_device: hidden_states = hidden_states.to("cuda:" + str(k + 1)) hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, present_key_value_states, all_hidden_states, all_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=present_key_value_states, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) T5_START_DOCSTRING = r""" The T5 model was proposed in [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/abs/1910.10683) by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. It's an encoder decoder transformer pre-trained in a text-to-text denoising generative setting. This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`T5Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ T5_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining take a look a [T5 Training](./t5#training). attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) T5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [T5 Training](./t5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be input (see `past_key_values`). This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value of `inputs_embeds`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ T5_ENCODER_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. To know more on how to prepare `input_ids` for pretraining take a look a [T5 Training](./t5#training). attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ # Warning message for FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask __HEAD_MASK_WARNING_MSG = """ The input argument `head_mask` was split into two arguments `head_mask` and `decoder_head_mask`. Currently, `decoder_head_mask` is set to copy `head_mask`, but this feature is deprecated and will be removed in future versions. If you do not want to use any `decoder_head_mask` now, please set `decoder_head_mask = torch.ones(num_layers, num_heads)`. """ @add_start_docstrings( "The bare T5 Model transformer outputting raw hidden-states without any specific head on top.", T5_START_DOCSTRING, ) class T5Model(T5PreTrainedModel): _keys_to_ignore_on_load_missing = [ r"encoder.embed_tokens.weight", r"decoder.embed_tokens.weight", ] _keys_to_ignore_on_load_unexpected = [ r"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight", ] def __init__(self, config: T5Config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = T5Stack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.is_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = T5Stack(decoder_config, self.shared) # Initialize weights and apply final processing self.post_init() # Model parallel self.model_parallel = False self.device_map = None @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): self.device_map = ( get_device_map(len(self.encoder.block), range(torch.cuda.device_count())) if device_map is None else device_map ) assert_device_map(self.device_map, len(self.encoder.block)) self.encoder.parallelize(self.device_map) self.decoder.parallelize(self.device_map) self.model_parallel = True @add_start_docstrings(DEPARALLELIZE_DOCSTRING) def deparallelize(self): self.encoder.deparallelize() self.decoder.deparallelize() self.encoder = self.encoder.to("cpu") self.decoder = self.decoder.to("cpu") self.model_parallel = False self.device_map = None torch.cuda.empty_cache() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.Tensor] = None, decoder_inputs_embeds: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]: r""" Returns: Example: ```python >>> from transformers import T5Tokenizer, T5Model >>> tokenizer = T5Tokenizer.from_pretrained("t5-small") >>> model = T5Model.from_pretrained("t5-small") >>> input_ids = tokenizer( ... "Studies have been shown that owning a dog is good for you", return_tensors="pt" ... ).input_ids # Batch size 1 >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 >>> # preprocess: Prepend decoder_input_ids with start token which is pad token for T5Model. >>> # This is not needed for torch's T5ForConditionalGeneration as it does this internally using labels arg. >>> decoder_input_ids = model._shift_right(decoder_input_ids) >>> # forward pass >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) >>> last_hidden_states = outputs.last_hidden_state ```""" use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask if head_mask is not None and decoder_head_mask is None: if self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask # Encode if needed (training, first prediction pass) if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) hidden_states = encoder_outputs[0] # Set device for model parallelism if self.model_parallel: torch.cuda.set_device(self.decoder.first_device) hidden_states = hidden_states.to(self.decoder.first_device) if decoder_input_ids is not None: decoder_input_ids = decoder_input_ids.to(self.decoder.first_device) if attention_mask is not None: attention_mask = attention_mask.to(self.decoder.first_device) if decoder_attention_mask is not None: decoder_attention_mask = decoder_attention_mask.to(self.decoder.first_device) # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @add_start_docstrings("""T5 Model with a `language modeling` head on top.""", T5_START_DOCSTRING) class T5ForConditionalGeneration(T5PreTrainedModel): _keys_to_ignore_on_load_missing = [ r"encoder.embed_tokens.weight", r"decoder.embed_tokens.weight", r"lm_head.weight", ] _keys_to_ignore_on_load_unexpected = [ r"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight", ] def __init__(self, config: T5Config): super().__init__(config) self.model_dim = config.d_model self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = T5Stack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.is_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = T5Stack(decoder_config, self.shared) self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() # Model parallel self.model_parallel = False self.device_map = None @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): self.device_map = ( get_device_map(len(self.encoder.block), range(torch.cuda.device_count())) if device_map is None else device_map ) assert_device_map(self.device_map, len(self.encoder.block)) self.encoder.parallelize(self.device_map) self.decoder.parallelize(self.device_map) self.lm_head = self.lm_head.to(self.decoder.first_device) self.model_parallel = True @add_start_docstrings(DEPARALLELIZE_DOCSTRING) def deparallelize(self): self.encoder.deparallelize() self.decoder.deparallelize() self.encoder = self.encoder.to("cpu") self.decoder = self.decoder.to("cpu") self.lm_head = self.lm_head.to("cpu") self.model_parallel = False self.device_map = None torch.cuda.empty_cache() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def get_output_embeddings(self): return self.lm_head def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, reduction: Optional[str] = "mean", ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` Returns: Examples: ```python >>> from transformers import T5Tokenizer, T5ForConditionalGeneration >>> tokenizer = T5Tokenizer.from_pretrained("t5-small") >>> model = T5ForConditionalGeneration.from_pretrained("t5-small") >>> # training >>> input_ids = tokenizer("The <extra_id_0> walks in <extra_id_1> park", return_tensors="pt").input_ids >>> labels = tokenizer("<extra_id_0> cute dog <extra_id_1> the <extra_id_2>", return_tensors="pt").input_ids >>> outputs = model(input_ids=input_ids, labels=labels) >>> loss = outputs.loss >>> logits = outputs.logits >>> # inference >>> input_ids = tokenizer( ... "summarize: studies have shown that owning a dog is good for you", return_tensors="pt" ... ).input_ids # Batch size 1 >>> outputs = model.generate(input_ids) >>> print(tokenizer.decode(outputs[0], skip_special_tokens=True)) >>> # studies have shown that owning a dog is good for you. ```""" use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask if head_mask is not None and decoder_head_mask is None: if self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask # Encode if needed (training, first prediction pass) if encoder_outputs is None: # Convert encoder inputs in embeddings if needed encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) hidden_states = encoder_outputs[0] if self.model_parallel: torch.cuda.set_device(self.decoder.first_device) if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None: # get decoder inputs from shifting lm labels to the right decoder_input_ids = self._shift_right(labels) # Set device for model parallelism if self.model_parallel: torch.cuda.set_device(self.decoder.first_device) hidden_states = hidden_states.to(self.decoder.first_device) if decoder_input_ids is not None: decoder_input_ids = decoder_input_ids.to(self.decoder.first_device) if attention_mask is not None: attention_mask = attention_mask.to(self.decoder.first_device) if decoder_attention_mask is not None: decoder_attention_mask = decoder_attention_mask.to(self.decoder.first_device) # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = decoder_outputs[0] # Set device for model parallelism if self.model_parallel: torch.cuda.set_device(self.encoder.first_device) self.lm_head = self.lm_head.to(self.encoder.first_device) sequence_output = sequence_output.to(self.lm_head.weight.device) if self.config.tie_word_embeddings: # Rescale output before projecting on vocab # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 sequence_output = sequence_output * (self.model_dim ** -0.5) lm_logits = self.lm_head(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-100, reduction=reduction) loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) if reduction == "none": loss = loss.view(lm_logits.size(0), -1).sum(1) if not return_dict: output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs return ((loss,) + output) if loss is not None else output return Seq2SeqLMOutput( loss=loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past=None, attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs, ): # cut decoder_input_ids if past is used if past is not None: input_ids = input_ids[:, -1:] return { "decoder_input_ids": input_ids, "past_key_values": past, "encoder_outputs": encoder_outputs, "attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, "use_cache": use_cache, } def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return self._shift_right(labels) def _reorder_cache(self, past, beam_idx): # if decoder past is not included in output # speedy decoding is disabled and no need to reorder if past is None: logger.warning("You might want to consider setting `use_cache=True` to speed up decoding") return past reordered_decoder_past = () for layer_past_states in past: # get the correct batch idx from layer past batch dim # batch dim of `past` is at 2nd position reordered_layer_past_states = () for layer_past_state in layer_past_states: # need to set correct `past` for each of the four key / value states reordered_layer_past_states = reordered_layer_past_states + ( layer_past_state.index_select(0, beam_idx.to(layer_past_state.device)), ) assert reordered_layer_past_states[0].shape == layer_past_states[0].shape assert len(reordered_layer_past_states) == len(layer_past_states) reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,) return reordered_decoder_past @add_start_docstrings( "The bare T5 Model transformer outputting encoder's raw hidden-states without any specific head on top.", T5_START_DOCSTRING, ) class T5EncoderModel(T5PreTrainedModel): authorized_missing_keys = [ r"encoder.embed_tokens.weight", ] def __init__(self, config: T5Config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = T5Stack(encoder_config, self.shared) # Initialize weights and apply final processing self.post_init() # Model parallel self.model_parallel = False self.device_map = None @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): self.device_map = ( get_device_map(len(self.encoder.block), range(torch.cuda.device_count())) if device_map is None else device_map ) assert_device_map(self.device_map, len(self.encoder.block)) self.encoder.parallelize(self.device_map) self.model_parallel = True @add_start_docstrings(DEPARALLELIZE_DOCSTRING) def deparallelize(self): self.encoder.deparallelize() self.encoder = self.encoder.to("cpu") self.model_parallel = False self.device_map = None torch.cuda.empty_cache() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) def get_encoder(self): return self.encoder def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.block[layer].layer[0].SelfAttention.prune_heads(heads) @add_start_docstrings_to_model_forward(T5_ENCODER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], BaseModelOutput]: r""" Returns: Example: ```python >>> from transformers import T5Tokenizer, T5EncoderModel >>> tokenizer = T5Tokenizer.from_pretrained("t5-small") >>> model = T5EncoderModel.from_pretrained("t5-small") >>> input_ids = tokenizer( ... "Studies have been shown that owning a dog is good for you", return_tensors="pt" ... ).input_ids # Batch size 1 >>> outputs = model(input_ids=input_ids) >>> last_hidden_states = outputs.last_hidden_state ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) return encoder_outputs
3D-LLM-main
3DLLM_BLIP2-base/lavis/models/blip2_models/modeling_t5.py
# coding=utf-8 # Copyright 2022 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch OPT model.""" import random from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from transformers.activations import ACT2FN from transformers.modeling_outputs import ( BaseModelOutputWithPast, CausalLMOutputWithPast, ) from transformers.modeling_utils import PreTrainedModel from transformers.utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from transformers.models.opt.configuration_opt import OPTConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "facebook/opt-350m" _CONFIG_FOR_DOC = "OPTConfig" _TOKENIZER_FOR_DOC = "GPT2Tokenizer" # Base model docstring _EXPECTED_OUTPUT_SHAPE = [1, 8, 1024] # SequenceClassification docstring _CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION = "ArthurZ/opt-350m-dummy-sc" _SEQ_CLASS_EXPECTED_LOSS = 1.71 _SEQ_CLASS_EXPECTED_OUTPUT = "'LABEL_0'" # QuestionAnswering docstring _QA_EXPECTED_OUTPUT = "'a nice puppet'" _QA_EXPECTED_LOSS = 7.41 _QA_TARGET_START_INDEX = 14 _QA_TARGET_END_INDEX = 15 OPT_PRETRAINED_MODEL_ARCHIVE_LIST = [ "facebook/opt-125m", "facebook/opt-350m", "facebook/opt-1.3b", "facebook/opt-2.7b", "facebook/opt-6.7b", "facebook/opt-13b", "facebook/opt-30b", # See all OPT models at https://huggingface.co/models?filter=opt ] def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0): """ Make causal mask used for bi-directional self-attention. """ bsz, tgt_len = input_ids_shape mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min)) mask_cond = torch.arange(mask.size(-1)) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) mask = mask.to(dtype) if past_key_values_length > 0: mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype), mask], dim=-1) return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) class OPTLearnedPositionalEmbedding(nn.Embedding): """ This module learns positional embeddings up to a fixed maximum size. """ def __init__(self, num_embeddings: int, embedding_dim: int): # OPT is set up so that if padding_idx is specified then offset the embedding ids by 2 # and adjust num_embeddings appropriately. Other models don't have this hack self.offset = 2 super().__init__(num_embeddings + self.offset, embedding_dim) def forward(self, attention_mask: torch.LongTensor, past_key_values_length: int = 0): """`input_ids_shape` is expected to be [bsz x seqlen].""" attention_mask = attention_mask.long() # create positions depending on attention_mask positions = (torch.cumsum(attention_mask, dim=1).type_as(attention_mask) * attention_mask).long() - 1 # cut positions if `past_key_values_length` is > 0 positions = positions[:, past_key_values_length:] return super().forward(positions + self.offset) class OPTAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim ** -0.5 self.is_decoder = is_decoder self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, _ = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = torch.max(attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min)) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) # upcast to fp32 if the weights are in fp16. Please see https://github.com/huggingface/transformers/pull/17437 if attn_weights.dtype == torch.float16: attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(torch.float16) else: attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError( f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" f" {layer_head_mask.size()}" ) attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to be reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be # partitioned aross GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped, past_key_value class OPTDecoderLayer(nn.Module): def __init__(self, config: OPTConfig): super().__init__() self.embed_dim = config.hidden_size self.self_attn = OPTAttention( embed_dim=self.embed_dim, num_heads=config.num_attention_heads, dropout=config.attention_dropout, is_decoder=True, ) self.do_layer_norm_before = config.do_layer_norm_before self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.ffn_dim) self.fc2 = nn.Linear(config.ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, past_key_value: Optional[Tuple[torch.Tensor]] = None, ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`, *optional*): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states """ residual = hidden_states # 125m, 1.7B, ..., 175B applies layer norm BEFORE attention if self.do_layer_norm_before: hidden_states = self.self_attn_layer_norm(hidden_states) # Self Attention hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states # 350m applies layer norm AFTER attention if not self.do_layer_norm_before: hidden_states = self.self_attn_layer_norm(hidden_states) # Fully Connected hidden_states_shape = hidden_states.shape hidden_states = hidden_states.reshape(-1, hidden_states.size(-1)) residual = hidden_states # 125m, 1.7B, ..., 175B applies layer norm BEFORE attention if self.do_layer_norm_before: hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = (residual + hidden_states).view(hidden_states_shape) # 350m applies layer norm AFTER attention if not self.do_layer_norm_before: hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) return outputs OPT_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`OPTConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ @add_start_docstrings( "The bare OPT Model outputting raw hidden-states without any specific head on top.", OPT_START_DOCSTRING, ) class OPTPreTrainedModel(PreTrainedModel): config_class = OPTConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["OPTDecoderLayer"] _keys_to_ignore_on_load_unexpected = [r"decoder\.version"] def _init_weights(self, module): std = self.config.init_std if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, (OPTDecoder)): module.gradient_checkpointing = value OPT_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`GPT2Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) Indices can be obtained using [`OPTTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ class OPTDecoder(OPTPreTrainedModel): """ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`OPTDecoderLayer`] Args: config: OPTConfig """ def __init__(self, config: OPTConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.layerdrop self.padding_idx = config.pad_token_id self.max_target_positions = config.max_position_embeddings self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.word_embed_proj_dim, self.padding_idx) self.embed_positions = OPTLearnedPositionalEmbedding(config.max_position_embeddings, config.hidden_size) if config.word_embed_proj_dim != config.hidden_size: self.project_out = nn.Linear(config.hidden_size, config.word_embed_proj_dim, bias=False) else: self.project_out = None if config.word_embed_proj_dim != config.hidden_size: self.project_in = nn.Linear(config.word_embed_proj_dim, config.hidden_size, bias=False) else: self.project_in = None # Note that the only purpose of `config._remove_final_layer_norm` is to keep backward compatibility # with checkpoints that have been fine-tuned before transformers v4.20.1 # see https://github.com/facebookresearch/metaseq/pull/164 if config.do_layer_norm_before and not config._remove_final_layer_norm: self.final_layer_norm = nn.LayerNorm(config.hidden_size) else: self.final_layer_norm = None self.layers = nn.ModuleList([OPTDecoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length): # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] combined_attention_mask = None if input_shape[-1] > 1: combined_attention_mask = _make_causal_mask( input_shape, inputs_embeds.dtype, past_key_values_length=past_key_values_length, ).to(inputs_embeds.device) if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( inputs_embeds.device ) combined_attention_mask = ( expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask ) return combined_attention_mask def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, query_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPast]: r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`OPTTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(num_hidden_layers, num_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if query_embeds is not None: inputs_embeds = torch.cat([query_embeds, inputs_embeds], dim=1) input_shape = inputs_embeds.size()[:-1] # embed positions if attention_mask is None: attention_mask = torch.ones(inputs_embeds.shape[:2], dtype=torch.bool, device=inputs_embeds.device) pos_embeds = self.embed_positions(attention_mask, past_key_values_length) attention_mask = self._prepare_decoder_attention_mask( attention_mask, input_shape, inputs_embeds, past_key_values_length ) if self.project_in is not None: inputs_embeds = self.project_in(inputs_embeds) hidden_states = inputs_embeds + pos_embeds # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None next_decoder_cache = () if use_cache else None # check if head_mask has a correct number of layers specified if desired for attn_mask, mask_name in zip([head_mask], ["head_mask"]): if attn_mask is not None: if attn_mask.size()[0] != (len(self.layers)): raise ValueError( f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) dropout_probability = random.uniform(0, 1) if self.training and (dropout_probability < self.layerdrop): continue past_key_value = past_key_values[idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False def create_custom_forward(module): def custom_forward(*inputs): # None for past_key_value return module(*inputs, output_attentions, None) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(decoder_layer), hidden_states, attention_mask, head_mask[idx] if head_mask is not None else None, None, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) if self.final_layer_norm is not None: hidden_states = self.final_layer_norm(hidden_states) if self.project_out is not None: hidden_states = self.project_out(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, ) @add_start_docstrings( "The bare OPT Model outputting raw hidden-states without any specific head on top.", OPT_START_DOCSTRING, ) class OPTModel(OPTPreTrainedModel): def __init__(self, config: OPTConfig): super().__init__(config) self.decoder = OPTDecoder(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.decoder.embed_tokens def set_input_embeddings(self, value): self.decoder.embed_tokens = value def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(OPT_INPUTS_DOCSTRING) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPast, config_class=_CONFIG_FOR_DOC, expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, query_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) decoder_outputs = self.decoder( input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, query_embeds=query_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return decoder_outputs return BaseModelOutputWithPast( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, hidden_states=decoder_outputs.hidden_states, attentions=decoder_outputs.attentions, ) class OPTForCausalLM(OPTPreTrainedModel): _keys_to_ignore_on_load_missing = [r"lm_head.weight"] def __init__(self, config): super().__init__(config) self.model = OPTModel(config) # the lm_head weight is automatically tied to the embed tokens weight self.lm_head = nn.Linear(config.word_embed_proj_dim, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens def set_input_embeddings(self, value): self.model.decoder.embed_tokens = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def set_decoder(self, decoder): self.model.decoder = decoder def get_decoder(self): return self.model.decoder @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, query_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, reduction: Optional[str] = "mean", ) -> Union[Tuple, CausalLMOutputWithPast]: r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`OPTTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(num_hidden_layers, num_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional tensors are only required when the model is used as a decoder in a Sequence to Sequence model. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. Returns: Example: ```python >>> from transformers import GPT2Tokenizer, OPTForCausalLM >>> model = OPTForCausalLM.from_pretrained("facebook/opt-350m") >>> tokenizer = GPT2Tokenizer.from_pretrained("facebook/opt-350m") >>> prompt = "Hey, are you consciours? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you consciours? Can you talk to me?\nI'm not consciours, but I can talk to you." ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model.decoder( input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, query_embeds=query_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) logits = self.lm_head(outputs[0]).contiguous() loss = None if labels is not None: logits = logits[:, -labels.size(1) :, :] # Shift so that tokens < n predict n shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss(reduction=reduction) loss = loss_fct(shift_logits.view(-1, self.config.vocab_size), shift_labels.view(-1)) if reduction == "none": loss = loss.view(shift_logits.size(0), -1).sum(1) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids=None, query_embeds=None, past=None, attention_mask=None, use_cache=None, **kwargs, ): # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly if attention_mask is None: if input_ids is not None: attention_mask = input_ids.new_ones(input_ids.shape) if past: input_ids = input_ids[:, -1:] query_embeds = None # first step, decoder_cached_states are empty return { "input_ids": input_ids, "query_embeds": query_embeds, "attention_mask": attention_mask, "past_key_values": past, "use_cache": use_cache, } @staticmethod def _reorder_cache(past, beam_idx): reordered_past = () for layer_past in past: reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),) return reordered_past
3D-LLM-main
3DLLM_BLIP2-base/lavis/models/blip2_models/modeling_opt.py
""" * Copyright (c) 2023, salesforce.com, inc. * All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause * By Junnan Li * Based on huggingface code base * https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert """ import math import os import warnings from dataclasses import dataclass from typing import Optional, Tuple, Dict, Any import torch from torch import Tensor, device, dtype, nn import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss import torch.nn.functional as F from transformers.activations import ACT2FN from transformers.file_utils import ( ModelOutput, ) from transformers.modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from transformers.modeling_utils import ( PreTrainedModel, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer, ) from transformers.utils import logging from transformers.models.bert.configuration_bert import BertConfig logger = logging.get_logger(__name__) class BertEmbeddings(nn.Module): """Construct the embeddings from word and position embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.config = config def forward( self, input_ids=None, position_ids=None, query_embeds=None, past_key_values_length=0, ): if input_ids is not None: seq_length = input_ids.size()[1] else: seq_length = 0 if position_ids is None: position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length].clone() if input_ids is not None: embeddings = self.word_embeddings(input_ids) if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings = embeddings + position_embeddings if query_embeds is not None: embeddings = torch.cat((query_embeds, embeddings), dim=1) else: embeddings = query_embeds embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class BertSelfAttention(nn.Module): def __init__(self, config, is_cross_attention): super().__init__() self.config = config if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( "The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (config.hidden_size, config.num_attention_heads) ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) if is_cross_attention: self.key = nn.Linear(config.encoder_width, self.all_head_size) self.value = nn.Linear(config.encoder_width, self.all_head_size) else: self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.save_attention = False def save_attn_gradients(self, attn_gradients): self.attn_gradients = attn_gradients def get_attn_gradients(self): return self.attn_gradients def save_attention_map(self, attention_map): self.attention_map = attention_map def get_attention_map(self): return self.attention_map def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + ( self.num_attention_heads, self.attention_head_size, ) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) mixed_query_layer = self.query(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": seq_length = hidden_states.size()[1] position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility if self.position_embedding_type == "relative_key": relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == "relative_key_query": relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in BertModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.Softmax(dim=-1)(attention_scores) if is_cross_attention and self.save_attention: self.save_attention_map(attention_probs) attention_probs.register_hook(self.save_attn_gradients) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs_dropped = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs_dropped = attention_probs_dropped * head_mask context_layer = torch.matmul(attention_probs_dropped, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) outputs = outputs + (past_key_value,) return outputs class BertSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertAttention(nn.Module): def __init__(self, config, is_cross_attention=False): super().__init__() self.self = BertSelfAttention(config, is_cross_attention) self.output = BertSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads, ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): self_outputs = self.self( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs class BertIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class BertOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertLayer(nn.Module): def __init__(self, config, layer_num): super().__init__() self.config = config self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = BertAttention(config) self.layer_num = layer_num if self.config.add_cross_attention and layer_num % self.config.cross_attention_freq == 0: self.crossattention = BertAttention(config, is_cross_attention=self.config.add_cross_attention) self.has_cross_attention = True else: self.has_cross_attention = False self.intermediate = BertIntermediate(config) self.output = BertOutput(config) self.intermediate_query = BertIntermediate(config) self.output_query = BertOutput(config) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, query_length=0, ): # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] if query_length > 0: query_attention_output = attention_output[:, :query_length, :] if self.has_cross_attention: assert ( encoder_hidden_states is not None ), "encoder_hidden_states must be given for cross-attention layers" cross_attention_outputs = self.crossattention( query_attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, output_attentions=output_attentions, ) query_attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights layer_output = apply_chunking_to_forward( self.feed_forward_chunk_query, self.chunk_size_feed_forward, self.seq_len_dim, query_attention_output, ) if attention_output.shape[1] > query_length: layer_output_text = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output[:, query_length:, :], ) layer_output = torch.cat([layer_output, layer_output_text], dim=1) else: layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output, ) outputs = (layer_output,) + outputs outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output def feed_forward_chunk_query(self, attention_output): intermediate_output = self.intermediate_query(attention_output) layer_output = self.output_query(intermediate_output, attention_output) return layer_output class BertEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([BertLayer(config, i) for i in range(config.num_hidden_layers)]) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, return_dict=True, query_length=0, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None for i in range(self.config.num_hidden_layers): layer_module = self.layer[i] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if getattr(self.config, "gradient_checkpointing", False) and self.training: if use_cache: logger.warn( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, past_key_value, output_attentions, query_length) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, ) else: layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, query_length, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) class BertPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class BertPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class BertLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states class BertOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = BertLMPredictionHead(config) def forward(self, sequence_output): prediction_scores = self.predictions(sequence_output) return prediction_scores class BertPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = BertConfig base_model_prefix = "bert" _keys_to_ignore_on_load_missing = [r"position_ids"] def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() class BertModel(BertPreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in `Attention is all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an input to the forward pass. """ def __init__(self, config, add_pooling_layer=False): super().__init__(config) self.config = config self.embeddings = BertEmbeddings(config) self.encoder = BertEncoder(config) self.pooler = BertPooler(config) if add_pooling_layer else None self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def get_extended_attention_mask( self, attention_mask: Tensor, input_shape: Tuple[int], device: device, is_decoder: bool, has_query: bool = False, ) -> Tensor: """ Makes broadcastable attention and causal masks so that future and masked tokens are ignored. Arguments: attention_mask (:obj:`torch.Tensor`): Mask with ones indicating tokens to attend to, zeros for tokens to ignore. input_shape (:obj:`Tuple[int]`): The shape of the input to the model. device: (:obj:`torch.device`): The device of the input to the model. Returns: :obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`. """ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. if attention_mask.dim() == 3: extended_attention_mask = attention_mask[:, None, :, :] elif attention_mask.dim() == 2: # Provided a padding mask of dimensions [batch_size, seq_length] # - if the model is a decoder, apply a causal mask in addition to the padding mask # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length] if is_decoder: batch_size, seq_length = input_shape seq_ids = torch.arange(seq_length, device=device) causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None] # add a prefix ones mask to the causal mask # causal and attention masks must have same type with pytorch version < 1.3 causal_mask = causal_mask.to(attention_mask.dtype) if causal_mask.shape[1] < attention_mask.shape[1]: prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1] if has_query: # UniLM style attention mask causal_mask = torch.cat( [ torch.zeros( (batch_size, prefix_seq_len, seq_length), device=device, dtype=causal_mask.dtype, ), causal_mask, ], axis=1, ) causal_mask = torch.cat( [ torch.ones( (batch_size, causal_mask.shape[1], prefix_seq_len), device=device, dtype=causal_mask.dtype, ), causal_mask, ], axis=-1, ) extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :] else: extended_attention_mask = attention_mask[:, None, None, :] else: raise ValueError( "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format( input_shape, attention_mask.shape ) ) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 return extended_attention_mask def forward( self, input_ids=None, attention_mask=None, position_ids=None, head_mask=None, query_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, is_decoder=False, ): r""" encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. use_cache (:obj:`bool`, `optional`): If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up decoding (see :obj:`past_key_values`). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # use_cache = use_cache if use_cache is not None else self.config.use_cache if input_ids is None: assert query_embeds is not None, "You have to specify query_embeds when input_ids is None" # past_key_values_length past_key_values_length = ( past_key_values[0][0].shape[2] - self.config.query_length if past_key_values is not None else 0 ) query_length = query_embeds.shape[1] if query_embeds is not None else 0 embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, query_embeds=query_embeds, past_key_values_length=past_key_values_length, ) input_shape = embedding_output.size()[:-1] batch_size, seq_length = input_shape device = embedding_output.device if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. if is_decoder: extended_attention_mask = self.get_extended_attention_mask( attention_mask, input_ids.shape, device, is_decoder, has_query=(query_embeds is not None), ) else: extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, device, is_decoder) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_hidden_states is not None: if type(encoder_hidden_states) == list: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size() else: ( encoder_batch_size, encoder_sequence_length, _, ) = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if type(encoder_attention_mask) == list: encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask] elif encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, query_length=query_length, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) class BertLMHeadModel(BertPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r"pooler"] _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] def __init__(self, config): super().__init__(config) self.bert = BertModel(config, add_pooling_layer=False) self.cls = BertOnlyMLMHead(config) self.init_weights() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings def forward( self, input_ids=None, attention_mask=None, position_ids=None, head_mask=None, query_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, labels=None, past_key_values=None, use_cache=True, output_attentions=None, output_hidden_states=None, return_dict=None, return_logits=False, is_decoder=True, reduction="mean", ): r""" encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]`` past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. use_cache (:obj:`bool`, `optional`): If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up decoding (see :obj:`past_key_values`). Returns: Example:: >>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig >>> import torch >>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased') >>> config = BertConfig.from_pretrained("bert-base-cased") >>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config) >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> prediction_logits = outputs.logits """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False if past_key_values is not None: query_embeds = None outputs = self.bert( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, query_embeds=query_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, is_decoder=is_decoder, ) sequence_output = outputs[0] if query_embeds is not None: sequence_output = outputs[0][:, query_embeds.shape[1] :, :] prediction_scores = self.cls(sequence_output) if return_logits: return prediction_scores[:, :-1, :].contiguous() lm_loss = None if labels is not None: # we are doing next-token prediction; shift prediction scores and input ids by one shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() labels = labels[:, 1:].contiguous() loss_fct = CrossEntropyLoss(reduction=reduction, label_smoothing=0.1) lm_loss = loss_fct( shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1), ) if reduction == "none": lm_loss = lm_loss.view(prediction_scores.size(0), -1).sum(1) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((lm_loss,) + output) if lm_loss is not None else output return CausalLMOutputWithCrossAttentions( loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) def prepare_inputs_for_generation(self, input_ids, query_embeds, past=None, attention_mask=None, **model_kwargs): # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly if attention_mask is None: attention_mask = input_ids.new_ones(input_ids.shape) query_mask = input_ids.new_ones(query_embeds.shape[:-1]) attention_mask = torch.cat([query_mask, attention_mask], dim=-1) # cut decoder_input_ids if past is used if past is not None: input_ids = input_ids[:, -1:] return { "input_ids": input_ids, "query_embeds": query_embeds, "attention_mask": attention_mask, "past_key_values": past, "encoder_hidden_states": model_kwargs.get("encoder_hidden_states", None), "encoder_attention_mask": model_kwargs.get("encoder_attention_mask", None), "is_decoder": True, } def _reorder_cache(self, past, beam_idx): reordered_past = () for layer_past in past: reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),) return reordered_past class BertForMaskedLM(BertPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r"pooler"] _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] def __init__(self, config): super().__init__(config) self.bert = BertModel(config, add_pooling_layer=False) self.cls = BertOnlyMLMHead(config) self.init_weights() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings def forward( self, input_ids=None, attention_mask=None, position_ids=None, head_mask=None, query_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, return_logits=False, is_decoder=False, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, query_embeds=query_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, is_decoder=is_decoder, ) if query_embeds is not None: sequence_output = outputs[0][:, query_embeds.shape[1] :, :] prediction_scores = self.cls(sequence_output) if return_logits: return prediction_scores masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # -100 index = padding token masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
3D-LLM-main
3DLLM_BLIP2-base/lavis/models/blip2_models/Qformer.py
""" Copyright (c) 2023, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import contextlib import logging import os import time import datetime import torch import torch.nn as nn import torch.distributed as dist import torch.nn.functional as F import lavis.common.dist_utils as dist_utils from lavis.common.dist_utils import download_cached_file from lavis.common.utils import is_url from lavis.common.logger import MetricLogger from lavis.models.base_model import BaseModel from lavis.models.blip2_models.Qformer import BertConfig, BertLMHeadModel from lavis.models.eva_vit import create_eva_vit_g from lavis.models.clip_vit import create_clip_vit_L from transformers import BertTokenizer class Blip2Base(BaseModel): @classmethod def init_tokenizer(cls): tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") location_tokens = [] for i in range(11 * 11 * 11): location_tokens.append("<loc%d>" % i) tokenizer.add_special_tokens({"bos_token": "[DEC]", "additional_special_tokens": location_tokens}) return tokenizer def maybe_autocast(self, dtype=torch.float16): # if on cpu, don't use autocast # if on gpu, use autocast with dtype if provided, otherwise use torch.float16 enable_autocast = self.device != torch.device("cpu") if enable_autocast: return torch.cuda.amp.autocast(dtype=dtype) else: return contextlib.nullcontext() @classmethod def init_Qformer(cls, num_query_token, vision_width, cross_attention_freq=2): encoder_config = BertConfig.from_pretrained("bert-base-uncased") encoder_config.encoder_width = vision_width # insert cross-attention layer every other block encoder_config.add_cross_attention = True encoder_config.cross_attention_freq = cross_attention_freq encoder_config.query_length = num_query_token Qformer = BertLMHeadModel.from_pretrained("bert-base-uncased", config=encoder_config) query_tokens = nn.Parameter(torch.zeros(1, num_query_token, encoder_config.hidden_size)) query_tokens.data.normal_(mean=0.0, std=encoder_config.initializer_range) return Qformer, query_tokens @classmethod def init_vision_encoder(cls, model_name, img_size, drop_path_rate, use_grad_checkpoint, precision): assert model_name in [ "eva_clip_g", "clip_L", ], "vit model must be eva_clip_g or clip_L" if model_name == "eva_clip_g": visual_encoder = create_eva_vit_g(img_size, drop_path_rate, use_grad_checkpoint, precision) elif model_name == "clip_L": visual_encoder = create_clip_vit_L(img_size, use_grad_checkpoint, precision) ln_vision = LayerNorm(visual_encoder.num_features) return visual_encoder, ln_vision def load_from_pretrained(self, url_or_filename): if is_url(url_or_filename): cached_file = download_cached_file(url_or_filename, check_hash=False, progress=True) checkpoint = torch.load(cached_file, map_location="cpu") elif os.path.isfile(url_or_filename): checkpoint = torch.load(url_or_filename, map_location="cpu") else: raise RuntimeError("checkpoint url or path is invalid") state_dict = checkpoint["model"] msg = self.load_state_dict(state_dict, strict=False) # logging.info("Missing keys {}".format(msg.missing_keys)) logging.info("load checkpoint from %s" % url_or_filename) return msg def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self class LayerNorm(nn.LayerNorm): """Subclass torch's LayerNorm to handle fp16.""" def forward(self, x: torch.Tensor): orig_type = x.dtype ret = super().forward(x.type(torch.float32)) return ret.type(orig_type) def compute_sim_matrix(model, data_loader, **kwargs): k_test = kwargs.pop("k_test") metric_logger = MetricLogger(delimiter=" ") header = "Evaluation:" logging.info("Computing features for evaluation...") start_time = time.time() texts = data_loader.dataset.text num_text = len(texts) text_bs = 256 text_ids = [] text_embeds = [] text_atts = [] for i in range(0, num_text, text_bs): text = texts[i : min(num_text, i + text_bs)] text_input = model.tokenizer( text, padding="max_length", truncation=True, max_length=35, return_tensors="pt", ).to(model.device) text_feat = model.forward_text(text_input) text_embed = F.normalize(model.text_proj(text_feat)) text_embeds.append(text_embed) text_ids.append(text_input.input_ids) text_atts.append(text_input.attention_mask) text_embeds = torch.cat(text_embeds, dim=0) text_ids = torch.cat(text_ids, dim=0) text_atts = torch.cat(text_atts, dim=0) vit_feats = [] image_embeds = [] for samples in data_loader: image = samples["image"] image = image.to(model.device) image_feat, vit_feat = model.forward_image(image) image_embed = model.vision_proj(image_feat) image_embed = F.normalize(image_embed, dim=-1) vit_feats.append(vit_feat.cpu()) image_embeds.append(image_embed) vit_feats = torch.cat(vit_feats, dim=0) image_embeds = torch.cat(image_embeds, dim=0) sims_matrix = [] for image_embed in image_embeds: sim_q2t = image_embed @ text_embeds.t() sim_i2t, _ = sim_q2t.max(0) sims_matrix.append(sim_i2t) sims_matrix = torch.stack(sims_matrix, dim=0) score_matrix_i2t = torch.full((len(data_loader.dataset.image), len(texts)), -100.0).to(model.device) num_tasks = dist_utils.get_world_size() rank = dist_utils.get_rank() step = sims_matrix.size(0) // num_tasks + 1 start = rank * step end = min(sims_matrix.size(0), start + step) for i, sims in enumerate(metric_logger.log_every(sims_matrix[start:end], 50, header)): topk_sim, topk_idx = sims.topk(k=k_test, dim=0) image_inputs = vit_feats[start + i].repeat(k_test, 1, 1).to(model.device) score = model.compute_itm( image_inputs=image_inputs, text_ids=text_ids[topk_idx], text_atts=text_atts[topk_idx], ).float() score_matrix_i2t[start + i, topk_idx] = score + topk_sim sims_matrix = sims_matrix.t() score_matrix_t2i = torch.full((len(texts), len(data_loader.dataset.image)), -100.0).to(model.device) step = sims_matrix.size(0) // num_tasks + 1 start = rank * step end = min(sims_matrix.size(0), start + step) for i, sims in enumerate(metric_logger.log_every(sims_matrix[start:end], 50, header)): topk_sim, topk_idx = sims.topk(k=k_test, dim=0) image_inputs = vit_feats[topk_idx.cpu()].to(model.device) score = model.compute_itm( image_inputs=image_inputs, text_ids=text_ids[start + i].repeat(k_test, 1), text_atts=text_atts[start + i].repeat(k_test, 1), ).float() score_matrix_t2i[start + i, topk_idx] = score + topk_sim if dist_utils.is_dist_avail_and_initialized(): dist.barrier() torch.distributed.all_reduce(score_matrix_i2t, op=torch.distributed.ReduceOp.SUM) torch.distributed.all_reduce(score_matrix_t2i, op=torch.distributed.ReduceOp.SUM) total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) logging.info("Evaluation time {}".format(total_time_str)) return score_matrix_i2t.cpu().numpy(), score_matrix_t2i.cpu().numpy()
3D-LLM-main
3DLLM_BLIP2-base/lavis/models/blip2_models/blip2.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import warnings import torch def _is_tensor_video_clip(clip): if not torch.is_tensor(clip): raise TypeError("clip should be Tensor. Got %s" % type(clip)) if not clip.ndimension() == 4: raise ValueError("clip should be 4D. Got %dD" % clip.dim()) return True def crop(clip, i, j, h, w): """ Args: clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W) """ if len(clip.size()) != 4: raise ValueError("clip should be a 4D tensor") return clip[..., i : i + h, j : j + w] def resize(clip, target_size, interpolation_mode): if len(target_size) != 2: raise ValueError(f"target size should be tuple (height, width), instead got {target_size}") return torch.nn.functional.interpolate(clip, size=target_size, mode=interpolation_mode, align_corners=False) def resized_crop(clip, i, j, h, w, size, interpolation_mode="bilinear"): """ Do spatial cropping and resizing to the video clip Args: clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W) i (int): i in (i,j) i.e coordinates of the upper left corner. j (int): j in (i,j) i.e coordinates of the upper left corner. h (int): Height of the cropped region. w (int): Width of the cropped region. size (tuple(int, int)): height and width of resized clip Returns: clip (torch.tensor): Resized and cropped clip. Size is (C, T, H, W) """ if not _is_tensor_video_clip(clip): raise ValueError("clip should be a 4D torch.tensor") clip = crop(clip, i, j, h, w) clip = resize(clip, size, interpolation_mode) return clip def center_crop(clip, crop_size): if not _is_tensor_video_clip(clip): raise ValueError("clip should be a 4D torch.tensor") h, w = clip.size(-2), clip.size(-1) th, tw = crop_size if h < th or w < tw: raise ValueError("height and width must be no smaller than crop_size") i = int(round((h - th) / 2.0)) j = int(round((w - tw) / 2.0)) return crop(clip, i, j, th, tw) def to_tensor(clip): """ Convert tensor data type from uint8 to float, divide value by 255.0 and permute the dimensions of clip tensor Args: clip (torch.tensor, dtype=torch.uint8): Size is (T, H, W, C) Return: clip (torch.tensor, dtype=torch.float): Size is (C, T, H, W) """ _is_tensor_video_clip(clip) if not clip.dtype == torch.uint8: raise TypeError("clip tensor should have data type uint8. Got %s" % str(clip.dtype)) return clip.float().permute(3, 0, 1, 2) / 255.0 def normalize(clip, mean, std, inplace=False): """ Args: clip (torch.tensor): Video clip to be normalized. Size is (C, T, H, W) mean (tuple): pixel RGB mean. Size is (3) std (tuple): pixel standard deviation. Size is (3) Returns: normalized clip (torch.tensor): Size is (C, T, H, W) """ if not _is_tensor_video_clip(clip): raise ValueError("clip should be a 4D torch.tensor") if not inplace: clip = clip.clone() mean = torch.as_tensor(mean, dtype=clip.dtype, device=clip.device) std = torch.as_tensor(std, dtype=clip.dtype, device=clip.device) clip.sub_(mean[:, None, None, None]).div_(std[:, None, None, None]) return clip def hflip(clip): """ Args: clip (torch.tensor): Video clip to be normalized. Size is (C, T, H, W) Returns: flipped clip (torch.tensor): Size is (C, T, H, W) """ if not _is_tensor_video_clip(clip): raise ValueError("clip should be a 4D torch.tensor") return clip.flip(-1)
3D-LLM-main
3DLLM_BLIP2-base/lavis/processors/functional_video.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import re from lavis.common.registry import registry from lavis.processors.base_processor import BaseProcessor from lavis.processors.randaugment import RandomAugment from omegaconf import OmegaConf from torchvision import transforms from torchvision.transforms.functional import InterpolationMode class BlipImageBaseProcessor(BaseProcessor): def __init__(self, mean=None, std=None): if mean is None: mean = (0.48145466, 0.4578275, 0.40821073) if std is None: std = (0.26862954, 0.26130258, 0.27577711) self.normalize = transforms.Normalize(mean, std) @registry.register_processor("blip_caption") class BlipCaptionProcessor(BaseProcessor): def __init__(self, prompt="", max_words=50): self.prompt = prompt self.max_words = max_words def __call__(self, caption): caption = self.prompt + self.pre_caption(caption) return caption @classmethod def from_config(cls, cfg=None): if cfg is None: cfg = OmegaConf.create() prompt = cfg.get("prompt", "") max_words = cfg.get("max_words", 50) return cls(prompt=prompt, max_words=max_words) def pre_caption(self, caption): caption = re.sub( r"([.!\"()*#:;~])", " ", caption.lower(), ) caption = re.sub( r"\s{2,}", " ", caption, ) caption = caption.rstrip("\n") caption = caption.strip(" ") # truncate caption caption_words = caption.split(" ") if len(caption_words) > self.max_words: caption = " ".join(caption_words[: self.max_words]) return caption @registry.register_processor("blip_question") class BlipQuestionProcessor(BaseProcessor): def __init__(self, max_words=50): self.max_words = max_words def __call__(self, question): return self.pre_question(question) @classmethod def from_config(cls, cfg=None): if cfg is None: cfg = OmegaConf.create() max_words = cfg.get("max_words", 50) return cls(max_words=max_words) def pre_question(self, question): question = re.sub( r"([.!\"()*#:;~])", "", question.lower(), ) question = question.rstrip(" ") # truncate question question_words = question.split(" ") if len(question_words) > self.max_words: question = " ".join(question_words[: self.max_words]) return question @registry.register_processor("blip_image_train") class BlipImageTrainProcessor(BlipImageBaseProcessor): def __init__(self, image_size=384, mean=None, std=None, min_scale=0.5, max_scale=1.0): super().__init__(mean=mean, std=std) self.transform = transforms.Compose( [ transforms.RandomResizedCrop( image_size, scale=(min_scale, max_scale), interpolation=InterpolationMode.BICUBIC, ), transforms.RandomHorizontalFlip(), RandomAugment( 2, 5, isPIL=True, augs=[ "Identity", "AutoContrast", "Brightness", "Sharpness", "Equalize", "ShearX", "ShearY", "TranslateX", "TranslateY", "Rotate", ], ), transforms.ToTensor(), self.normalize, ] ) def __call__(self, item): return self.transform(item) @classmethod def from_config(cls, cfg=None): if cfg is None: cfg = OmegaConf.create() image_size = cfg.get("image_size", 384) mean = cfg.get("mean", None) std = cfg.get("std", None) min_scale = cfg.get("min_scale", 0.5) max_scale = cfg.get("max_scale", 1.0) return cls( image_size=image_size, mean=mean, std=std, min_scale=min_scale, max_scale=max_scale, ) @registry.register_processor("blip_image_eval") class BlipImageEvalProcessor(BlipImageBaseProcessor): def __init__(self, image_size=384, mean=None, std=None): super().__init__(mean=mean, std=std) self.transform = transforms.Compose( [ transforms.Resize((image_size, image_size), interpolation=InterpolationMode.BICUBIC), transforms.ToTensor(), self.normalize, ] ) def __call__(self, item): return self.transform(item) @classmethod def from_config(cls, cfg=None): if cfg is None: cfg = OmegaConf.create() image_size = cfg.get("image_size", 384) mean = cfg.get("mean", None) std = cfg.get("std", None) return cls(image_size=image_size, mean=mean, std=std) @registry.register_processor("blip2_image_train") class Blip2ImageTrainProcessor(BlipImageBaseProcessor): def __init__(self, image_size=364, mean=None, std=None, min_scale=0.5, max_scale=1.0): super().__init__(mean=mean, std=std) self.transform = transforms.Compose( [ transforms.RandomResizedCrop( image_size, scale=(min_scale, max_scale), interpolation=InterpolationMode.BICUBIC, ), transforms.RandomHorizontalFlip(), transforms.ToTensor(), self.normalize, ] ) def __call__(self, item): return self.transform(item) @classmethod def from_config(cls, cfg=None): if cfg is None: cfg = OmegaConf.create() image_size = cfg.get("image_size", 364) mean = cfg.get("mean", None) std = cfg.get("std", None) min_scale = cfg.get("min_scale", 0.5) max_scale = cfg.get("max_scale", 1.0) return cls( image_size=image_size, mean=mean, std=std, min_scale=min_scale, max_scale=max_scale, )
3D-LLM-main
3DLLM_BLIP2-base/lavis/processors/blip_processors.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ from lavis.processors.base_processor import BaseProcessor from lavis.processors.blip_processors import ( BlipImageTrainProcessor, Blip2ImageTrainProcessor, BlipImageEvalProcessor, BlipCaptionProcessor, ) from lavis.common.registry import registry __all__ = [ "BaseProcessor", # BLIP "BlipImageTrainProcessor", "Blip2ImageTrainProcessor", "BlipImageEvalProcessor", "BlipCaptionProcessor", ] def load_processor(name, cfg=None): """ Example >>> processor = load_processor("alpro_video_train", cfg=None) """ processor = registry.get_processor_class(name).from_config(cfg) return processor
3D-LLM-main
3DLLM_BLIP2-base/lavis/processors/__init__.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ from omegaconf import OmegaConf class BaseProcessor: def __init__(self): self.transform = lambda x: x return def __call__(self, item): return self.transform(item) @classmethod def from_config(cls, cfg=None): return cls() def build(self, **kwargs): cfg = OmegaConf.create(kwargs) return self.from_config(cfg)
3D-LLM-main
3DLLM_BLIP2-base/lavis/processors/base_processor.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import cv2 import numpy as np import torch ## aug functions def identity_func(img): return img def autocontrast_func(img, cutoff=0): """ same output as PIL.ImageOps.autocontrast """ n_bins = 256 def tune_channel(ch): n = ch.size cut = cutoff * n // 100 if cut == 0: high, low = ch.max(), ch.min() else: hist = cv2.calcHist([ch], [0], None, [n_bins], [0, n_bins]) low = np.argwhere(np.cumsum(hist) > cut) low = 0 if low.shape[0] == 0 else low[0] high = np.argwhere(np.cumsum(hist[::-1]) > cut) high = n_bins - 1 if high.shape[0] == 0 else n_bins - 1 - high[0] if high <= low: table = np.arange(n_bins) else: scale = (n_bins - 1) / (high - low) offset = -low * scale table = np.arange(n_bins) * scale + offset table[table < 0] = 0 table[table > n_bins - 1] = n_bins - 1 table = table.clip(0, 255).astype(np.uint8) return table[ch] channels = [tune_channel(ch) for ch in cv2.split(img)] out = cv2.merge(channels) return out def equalize_func(img): """ same output as PIL.ImageOps.equalize PIL's implementation is different from cv2.equalize """ n_bins = 256 def tune_channel(ch): hist = cv2.calcHist([ch], [0], None, [n_bins], [0, n_bins]) non_zero_hist = hist[hist != 0].reshape(-1) step = np.sum(non_zero_hist[:-1]) // (n_bins - 1) if step == 0: return ch n = np.empty_like(hist) n[0] = step // 2 n[1:] = hist[:-1] table = (np.cumsum(n) // step).clip(0, 255).astype(np.uint8) return table[ch] channels = [tune_channel(ch) for ch in cv2.split(img)] out = cv2.merge(channels) return out def rotate_func(img, degree, fill=(0, 0, 0)): """ like PIL, rotate by degree, not radians """ H, W = img.shape[0], img.shape[1] center = W / 2, H / 2 M = cv2.getRotationMatrix2D(center, degree, 1) out = cv2.warpAffine(img, M, (W, H), borderValue=fill) return out def solarize_func(img, thresh=128): """ same output as PIL.ImageOps.posterize """ table = np.array([el if el < thresh else 255 - el for el in range(256)]) table = table.clip(0, 255).astype(np.uint8) out = table[img] return out def color_func(img, factor): """ same output as PIL.ImageEnhance.Color """ ## implementation according to PIL definition, quite slow # degenerate = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)[:, :, np.newaxis] # out = blend(degenerate, img, factor) # M = ( # np.eye(3) * factor # + np.float32([0.114, 0.587, 0.299]).reshape(3, 1) * (1. - factor) # )[np.newaxis, np.newaxis, :] M = np.float32([[0.886, -0.114, -0.114], [-0.587, 0.413, -0.587], [-0.299, -0.299, 0.701]]) * factor + np.float32( [[0.114], [0.587], [0.299]] ) out = np.matmul(img, M).clip(0, 255).astype(np.uint8) return out def contrast_func(img, factor): """ same output as PIL.ImageEnhance.Contrast """ mean = np.sum(np.mean(img, axis=(0, 1)) * np.array([0.114, 0.587, 0.299])) table = np.array([(el - mean) * factor + mean for el in range(256)]).clip(0, 255).astype(np.uint8) out = table[img] return out def brightness_func(img, factor): """ same output as PIL.ImageEnhance.Contrast """ table = (np.arange(256, dtype=np.float32) * factor).clip(0, 255).astype(np.uint8) out = table[img] return out def sharpness_func(img, factor): """ The differences the this result and PIL are all on the 4 boundaries, the center areas are same """ kernel = np.ones((3, 3), dtype=np.float32) kernel[1][1] = 5 kernel /= 13 degenerate = cv2.filter2D(img, -1, kernel) if factor == 0.0: out = degenerate elif factor == 1.0: out = img else: out = img.astype(np.float32) degenerate = degenerate.astype(np.float32)[1:-1, 1:-1, :] out[1:-1, 1:-1, :] = degenerate + factor * (out[1:-1, 1:-1, :] - degenerate) out = out.astype(np.uint8) return out def shear_x_func(img, factor, fill=(0, 0, 0)): H, W = img.shape[0], img.shape[1] M = np.float32([[1, factor, 0], [0, 1, 0]]) out = cv2.warpAffine(img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR).astype(np.uint8) return out def translate_x_func(img, offset, fill=(0, 0, 0)): """ same output as PIL.Image.transform """ H, W = img.shape[0], img.shape[1] M = np.float32([[1, 0, -offset], [0, 1, 0]]) out = cv2.warpAffine(img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR).astype(np.uint8) return out def translate_y_func(img, offset, fill=(0, 0, 0)): """ same output as PIL.Image.transform """ H, W = img.shape[0], img.shape[1] M = np.float32([[1, 0, 0], [0, 1, -offset]]) out = cv2.warpAffine(img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR).astype(np.uint8) return out def posterize_func(img, bits): """ same output as PIL.ImageOps.posterize """ out = np.bitwise_and(img, np.uint8(255 << (8 - bits))) return out def shear_y_func(img, factor, fill=(0, 0, 0)): H, W = img.shape[0], img.shape[1] M = np.float32([[1, 0, 0], [factor, 1, 0]]) out = cv2.warpAffine(img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR).astype(np.uint8) return out def cutout_func(img, pad_size, replace=(0, 0, 0)): replace = np.array(replace, dtype=np.uint8) H, W = img.shape[0], img.shape[1] rh, rw = np.random.random(2) pad_size = pad_size // 2 ch, cw = int(rh * H), int(rw * W) x1, x2 = max(ch - pad_size, 0), min(ch + pad_size, H) y1, y2 = max(cw - pad_size, 0), min(cw + pad_size, W) out = img.copy() out[x1:x2, y1:y2, :] = replace return out ### level to args def enhance_level_to_args(MAX_LEVEL): def level_to_args(level): return ((level / MAX_LEVEL) * 1.8 + 0.1,) return level_to_args def shear_level_to_args(MAX_LEVEL, replace_value): def level_to_args(level): level = (level / MAX_LEVEL) * 0.3 if np.random.random() > 0.5: level = -level return (level, replace_value) return level_to_args def translate_level_to_args(translate_const, MAX_LEVEL, replace_value): def level_to_args(level): level = (level / MAX_LEVEL) * float(translate_const) if np.random.random() > 0.5: level = -level return (level, replace_value) return level_to_args def cutout_level_to_args(cutout_const, MAX_LEVEL, replace_value): def level_to_args(level): level = int((level / MAX_LEVEL) * cutout_const) return (level, replace_value) return level_to_args def solarize_level_to_args(MAX_LEVEL): def level_to_args(level): level = int((level / MAX_LEVEL) * 256) return (level,) return level_to_args def none_level_to_args(level): return () def posterize_level_to_args(MAX_LEVEL): def level_to_args(level): level = int((level / MAX_LEVEL) * 4) return (level,) return level_to_args def rotate_level_to_args(MAX_LEVEL, replace_value): def level_to_args(level): level = (level / MAX_LEVEL) * 30 if np.random.random() < 0.5: level = -level return (level, replace_value) return level_to_args func_dict = { "Identity": identity_func, "AutoContrast": autocontrast_func, "Equalize": equalize_func, "Rotate": rotate_func, "Solarize": solarize_func, "Color": color_func, "Contrast": contrast_func, "Brightness": brightness_func, "Sharpness": sharpness_func, "ShearX": shear_x_func, "TranslateX": translate_x_func, "TranslateY": translate_y_func, "Posterize": posterize_func, "ShearY": shear_y_func, } translate_const = 10 MAX_LEVEL = 10 replace_value = (128, 128, 128) arg_dict = { "Identity": none_level_to_args, "AutoContrast": none_level_to_args, "Equalize": none_level_to_args, "Rotate": rotate_level_to_args(MAX_LEVEL, replace_value), "Solarize": solarize_level_to_args(MAX_LEVEL), "Color": enhance_level_to_args(MAX_LEVEL), "Contrast": enhance_level_to_args(MAX_LEVEL), "Brightness": enhance_level_to_args(MAX_LEVEL), "Sharpness": enhance_level_to_args(MAX_LEVEL), "ShearX": shear_level_to_args(MAX_LEVEL, replace_value), "TranslateX": translate_level_to_args(translate_const, MAX_LEVEL, replace_value), "TranslateY": translate_level_to_args(translate_const, MAX_LEVEL, replace_value), "Posterize": posterize_level_to_args(MAX_LEVEL), "ShearY": shear_level_to_args(MAX_LEVEL, replace_value), } class RandomAugment(object): def __init__(self, N=2, M=10, isPIL=False, augs=[]): self.N = N self.M = M self.isPIL = isPIL if augs: self.augs = augs else: self.augs = list(arg_dict.keys()) def get_random_ops(self): sampled_ops = np.random.choice(self.augs, self.N) return [(op, 0.5, self.M) for op in sampled_ops] def __call__(self, img): if self.isPIL: img = np.array(img) ops = self.get_random_ops() for name, prob, level in ops: if np.random.random() > prob: continue args = arg_dict[name](level) img = func_dict[name](img, *args) return img class VideoRandomAugment(object): def __init__(self, N=2, M=10, p=0.0, tensor_in_tensor_out=True, augs=[]): self.N = N self.M = M self.p = p self.tensor_in_tensor_out = tensor_in_tensor_out if augs: self.augs = augs else: self.augs = list(arg_dict.keys()) def get_random_ops(self): sampled_ops = np.random.choice(self.augs, self.N, replace=False) return [(op, self.M) for op in sampled_ops] def __call__(self, frames): assert frames.shape[-1] == 3, "Expecting last dimension for 3-channels RGB (b, h, w, c)." if self.tensor_in_tensor_out: frames = frames.numpy().astype(np.uint8) num_frames = frames.shape[0] ops = num_frames * [self.get_random_ops()] apply_or_not = num_frames * [np.random.random(size=self.N) > self.p] frames = torch.stack(list(map(self._aug, frames, ops, apply_or_not)), dim=0).float() return frames def _aug(self, img, ops, apply_or_not): for i, (name, level) in enumerate(ops): if not apply_or_not[i]: continue args = arg_dict[name](level) img = func_dict[name](img, *args) return torch.from_numpy(img) if __name__ == "__main__": a = RandomAugment() img = np.random.randn(32, 32, 3) a(img)
3D-LLM-main
3DLLM_BLIP2-base/lavis/processors/randaugment.py
#!/usr/bin/env python3 """ Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import numbers import random from torchvision.transforms import ( RandomCrop, RandomResizedCrop, ) import lavis.processors.functional_video as F __all__ = [ "RandomCropVideo", "RandomResizedCropVideo", "CenterCropVideo", "NormalizeVideo", "ToTensorVideo", "RandomHorizontalFlipVideo", ] class RandomCropVideo(RandomCrop): def __init__(self, size): if isinstance(size, numbers.Number): self.size = (int(size), int(size)) else: self.size = size def __call__(self, clip): """ Args: clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W) Returns: torch.tensor: randomly cropped/resized video clip. size is (C, T, OH, OW) """ i, j, h, w = self.get_params(clip, self.size) return F.crop(clip, i, j, h, w) def __repr__(self) -> str: return f"{self.__class__.__name__}(size={self.size})" class RandomResizedCropVideo(RandomResizedCrop): def __init__( self, size, scale=(0.08, 1.0), ratio=(3.0 / 4.0, 4.0 / 3.0), interpolation_mode="bilinear", ): if isinstance(size, tuple): if len(size) != 2: raise ValueError(f"size should be tuple (height, width), instead got {size}") self.size = size else: self.size = (size, size) self.interpolation_mode = interpolation_mode self.scale = scale self.ratio = ratio def __call__(self, clip): """ Args: clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W) Returns: torch.tensor: randomly cropped/resized video clip. size is (C, T, H, W) """ i, j, h, w = self.get_params(clip, self.scale, self.ratio) return F.resized_crop(clip, i, j, h, w, self.size, self.interpolation_mode) def __repr__(self) -> str: return f"{self.__class__.__name__}(size={self.size}, interpolation_mode={self.interpolation_mode}, scale={self.scale}, ratio={self.ratio})" class CenterCropVideo: def __init__(self, crop_size): if isinstance(crop_size, numbers.Number): self.crop_size = (int(crop_size), int(crop_size)) else: self.crop_size = crop_size def __call__(self, clip): """ Args: clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W) Returns: torch.tensor: central cropping of video clip. Size is (C, T, crop_size, crop_size) """ return F.center_crop(clip, self.crop_size) def __repr__(self) -> str: return f"{self.__class__.__name__}(crop_size={self.crop_size})" class NormalizeVideo: """ Normalize the video clip by mean subtraction and division by standard deviation Args: mean (3-tuple): pixel RGB mean std (3-tuple): pixel RGB standard deviation inplace (boolean): whether do in-place normalization """ def __init__(self, mean, std, inplace=False): self.mean = mean self.std = std self.inplace = inplace def __call__(self, clip): """ Args: clip (torch.tensor): video clip to be normalized. Size is (C, T, H, W) """ return F.normalize(clip, self.mean, self.std, self.inplace) def __repr__(self) -> str: return f"{self.__class__.__name__}(mean={self.mean}, std={self.std}, inplace={self.inplace})" class ToTensorVideo: """ Convert tensor data type from uint8 to float, divide value by 255.0 and permute the dimensions of clip tensor """ def __init__(self): pass def __call__(self, clip): """ Args: clip (torch.tensor, dtype=torch.uint8): Size is (T, H, W, C) Return: clip (torch.tensor, dtype=torch.float): Size is (C, T, H, W) """ return F.to_tensor(clip) def __repr__(self) -> str: return self.__class__.__name__ class RandomHorizontalFlipVideo: """ Flip the video clip along the horizonal direction with a given probability Args: p (float): probability of the clip being flipped. Default value is 0.5 """ def __init__(self, p=0.5): self.p = p def __call__(self, clip): """ Args: clip (torch.tensor): Size is (C, T, H, W) Return: clip (torch.tensor): Size is (C, T, H, W) """ if random.random() < self.p: clip = F.hflip(clip) return clip def __repr__(self) -> str: return f"{self.__class__.__name__}(p={self.p})"
3D-LLM-main
3DLLM_BLIP2-base/lavis/processors/transforms_video.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import math from lavis.common.registry import registry @registry.register_lr_scheduler("linear_warmup_step_lr") class LinearWarmupStepLRScheduler: def __init__( self, optimizer, max_epoch, min_lr, init_lr, decay_rate=1, warmup_start_lr=-1, warmup_steps=0, **kwargs ): self.optimizer = optimizer self.max_epoch = max_epoch self.min_lr = min_lr self.decay_rate = decay_rate self.init_lr = init_lr self.warmup_steps = warmup_steps self.warmup_start_lr = warmup_start_lr if warmup_start_lr >= 0 else init_lr def step(self, cur_epoch, cur_step): if cur_epoch == 0: warmup_lr_schedule( step=cur_step, optimizer=self.optimizer, max_step=self.warmup_steps, init_lr=self.warmup_start_lr, max_lr=self.init_lr, ) else: step_lr_schedule( epoch=cur_epoch, optimizer=self.optimizer, init_lr=self.init_lr, min_lr=self.min_lr, decay_rate=self.decay_rate, ) @registry.register_lr_scheduler("linear_warmup_cosine_lr") class LinearWarmupCosineLRScheduler: def __init__(self, optimizer, max_epoch, min_lr, init_lr, warmup_steps=0, warmup_start_lr=-1, **kwargs): self.optimizer = optimizer self.max_epoch = max_epoch self.min_lr = min_lr self.init_lr = init_lr self.warmup_steps = warmup_steps self.warmup_start_lr = warmup_start_lr if warmup_start_lr >= 0 else init_lr def step(self, cur_epoch, cur_step): # assuming the warmup iters less than one epoch if cur_epoch == 0: warmup_lr_schedule( step=cur_step, optimizer=self.optimizer, max_step=self.warmup_steps, init_lr=self.warmup_start_lr, max_lr=self.init_lr, ) else: cosine_lr_schedule( epoch=cur_epoch, optimizer=self.optimizer, max_epoch=self.max_epoch, init_lr=self.init_lr, min_lr=self.min_lr, ) def cosine_lr_schedule(optimizer, epoch, max_epoch, init_lr, min_lr): """Decay the learning rate""" lr = (init_lr - min_lr) * 0.5 * (1.0 + math.cos(math.pi * epoch / max_epoch)) + min_lr for param_group in optimizer.param_groups: param_group["lr"] = lr def warmup_lr_schedule(optimizer, step, max_step, init_lr, max_lr): """Warmup the learning rate""" lr = min(max_lr, init_lr + (max_lr - init_lr) * step / max(max_step, 1)) for param_group in optimizer.param_groups: param_group["lr"] = lr def step_lr_schedule(optimizer, epoch, init_lr, min_lr, decay_rate): """Decay the learning rate""" lr = max(min_lr, init_lr * (decay_rate ** epoch)) for param_group in optimizer.param_groups: param_group["lr"] = lr
3D-LLM-main
3DLLM_BLIP2-base/lavis/common/optims.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import logging import json from typing import Dict from omegaconf import OmegaConf from lavis.common.registry import registry class Config: def __init__(self, args): self.config = {} self.args = args # Register the config and configuration for setup registry.register("configuration", self) user_config = self._build_opt_list(self.args.options) config = OmegaConf.load(self.args.cfg_path) # Replace the configuration with user options. if hasattr(args, "replace_cfg") and args.replace_cfg is not None: replace_cfg = [x.split("=") for x in args.replace_cfg] for keys, value in replace_cfg: keys = keys.split(".") d = config for key in keys[:-1]: d = d[key] d[keys[-1]] = value runner_config = self.build_runner_config(config) model_config = self.build_model_config(config, **user_config) dataset_config = self.build_dataset_config(config) # Validate the user-provided runner configuration # model and dataset configuration are supposed to be validated by the respective classes # [TODO] validate the model/dataset configuration # self._validate_runner_config(runner_config) # Override the default configuration with user options. self.config = OmegaConf.merge(runner_config, model_config, dataset_config, user_config) def _validate_runner_config(self, runner_config): """ This method validates the configuration, such that 1) all the user specified options are valid; 2) no type mismatches between the user specified options and the config. """ runner_config_validator = create_runner_config_validator() runner_config_validator.validate(runner_config) def _build_opt_list(self, opts): opts_dot_list = self._convert_to_dot_list(opts) return OmegaConf.from_dotlist(opts_dot_list) @staticmethod def build_model_config(config, **kwargs): model = config.get("model", None) assert model is not None, "Missing model configuration file." model_cls = registry.get_model_class(model.arch) assert model_cls is not None, f"Model '{model.arch}' has not been registered." model_type = kwargs.get("model.model_type", None) if not model_type: model_type = model.get("model_type", None) # else use the model type selected by user. assert model_type is not None, "Missing model_type." model_config_path = model_cls.default_config_path(model_type=model_type) model_config = OmegaConf.create() # hiararchy override, customized config > default config model_config = OmegaConf.merge( model_config, OmegaConf.load(model_config_path), {"model": config["model"]}, ) return model_config @staticmethod def build_runner_config(config): return {"run": config.run} @staticmethod def build_dataset_config(config): datasets = config.get("datasets", None) if datasets is None: raise KeyError("Expecting 'datasets' as the root key for dataset configuration.") dataset_config = OmegaConf.create() for dataset_name in datasets: builder_cls = registry.get_builder_class(dataset_name) dataset_config_type = datasets[dataset_name].get("type", "default") dataset_config_path = builder_cls.default_config_path(type=dataset_config_type) # hiararchy override, customized config > default config dataset_config = OmegaConf.merge( dataset_config, OmegaConf.load(dataset_config_path), {"datasets": {dataset_name: config["datasets"][dataset_name]}}, ) return dataset_config def _convert_to_dot_list(self, opts): if opts is None: opts = [] if len(opts) == 0: return opts has_equal = opts[0].find("=") != -1 if has_equal: return opts return [(opt + "=" + value) for opt, value in zip(opts[0::2], opts[1::2])] def get_config(self): return self.config @property def run_cfg(self): return self.config.run @property def datasets_cfg(self): return self.config.datasets @property def model_cfg(self): return self.config.model def pretty_print(self): logging.info("\n===== Running Parameters =====") logging.info(self._convert_node_to_json(self.config.run)) logging.info("\n====== Dataset Attributes ======") datasets = self.config.datasets for dataset in datasets: if dataset in self.config.datasets: logging.info(f"\n======== {dataset} =======") dataset_config = self.config.datasets[dataset] logging.info(self._convert_node_to_json(dataset_config)) else: logging.warning(f"No dataset named '{dataset}' in config. Skipping") logging.info(f"\n====== Model Attributes ======") logging.info(self._convert_node_to_json(self.config.model)) def _convert_node_to_json(self, node): container = OmegaConf.to_container(node, resolve=True) return json.dumps(container, indent=4, sort_keys=True) def to_dict(self): return OmegaConf.to_container(self.config) def node_to_dict(node): return OmegaConf.to_container(node) class ConfigValidator: """ This is a preliminary implementation to centralize and validate the configuration. May be altered in the future. A helper class to validate configurations from yaml file. This serves the following purposes: 1. Ensure all the options in the yaml are defined, raise error if not. 2. when type mismatches are found, the validator will raise an error. 3. a central place to store and display helpful messages for supported configurations. """ class _Argument: def __init__(self, name, choices=None, type=None, help=None): self.name = name self.val = None self.choices = choices self.type = type self.help = help def __str__(self): s = f"{self.name}={self.val}" if self.type is not None: s += f", ({self.type})" if self.choices is not None: s += f", choices: {self.choices}" if self.help is not None: s += f", ({self.help})" return s def __init__(self, description): self.description = description self.arguments = dict() self.parsed_args = None def __getitem__(self, key): assert self.parsed_args is not None, "No arguments parsed yet." return self.parsed_args[key] def __str__(self) -> str: return self.format_help() def add_argument(self, *args, **kwargs): """ Assume the first argument is the name of the argument. """ self.arguments[args[0]] = self._Argument(*args, **kwargs) def validate(self, config=None): """ Convert yaml config (dict-like) to list, required by argparse. """ for k, v in config.items(): assert ( k in self.arguments ), f"""{k} is not a valid argument. Support arguments are {self.format_arguments()}.""" if self.arguments[k].type is not None: try: self.arguments[k].val = self.arguments[k].type(v) except ValueError: raise ValueError(f"{k} is not a valid {self.arguments[k].type}.") if self.arguments[k].choices is not None: assert v in self.arguments[k].choices, f"""{k} must be one of {self.arguments[k].choices}.""" return config def format_arguments(self): return str([f"{k}" for k in sorted(self.arguments.keys())]) def format_help(self): # description + key-value pair string for each argument help_msg = str(self.description) return help_msg + ", available arguments: " + self.format_arguments() def print_help(self): # display help message print(self.format_help()) def create_runner_config_validator(): validator = ConfigValidator(description="Runner configurations") validator.add_argument( "runner", type=str, choices=["runner_base", "runner_iter"], help="""Runner to use. The "runner_base" uses epoch-based training while iter-based runner runs based on iters. Default: runner_base""", ) # add argumetns for training dataset ratios validator.add_argument( "train_dataset_ratios", type=Dict[str, float], help="""Ratios of training dataset. This is used in iteration-based runner. Do not support for epoch-based runner because how to define an epoch becomes tricky. Default: None""", ) validator.add_argument( "max_iters", type=float, help="Maximum number of iterations to run.", ) validator.add_argument( "max_epoch", type=int, help="Maximum number of epochs to run.", ) # add arguments for iters_per_inner_epoch validator.add_argument( "iters_per_inner_epoch", type=float, help="Number of iterations per inner epoch. This is required when runner is runner_iter.", ) lr_scheds_choices = registry.list_lr_schedulers() validator.add_argument( "lr_sched", type=str, choices=lr_scheds_choices, help="Learning rate scheduler to use, from {}".format(lr_scheds_choices), ) task_choices = registry.list_tasks() validator.add_argument( "task", type=str, choices=task_choices, help="Task to use, from {}".format(task_choices), ) # add arguments for init_lr validator.add_argument( "init_lr", type=float, help="Initial learning rate. This will be the learning rate after warmup and before decay.", ) # add arguments for min_lr validator.add_argument( "min_lr", type=float, help="Minimum learning rate (after decay).", ) # add arguments for warmup_lr validator.add_argument( "warmup_lr", type=float, help="Starting learning rate for warmup.", ) # add arguments for learning rate decay rate validator.add_argument( "lr_decay_rate", type=float, help="Learning rate decay rate. Required if using a decaying learning rate scheduler.", ) # add arguments for weight decay validator.add_argument( "weight_decay", type=float, help="Weight decay rate.", ) # add arguments for training batch size validator.add_argument( "batch_size_train", type=int, help="Training batch size.", ) # add arguments for evaluation batch size validator.add_argument( "batch_size_eval", type=int, help="Evaluation batch size, including validation and testing.", ) # add arguments for number of workers for data loading validator.add_argument( "num_workers", help="Number of workers for data loading.", ) # add arguments for warm up steps validator.add_argument( "warmup_steps", type=int, help="Number of warmup steps. Required if a warmup schedule is used.", ) # add arguments for random seed validator.add_argument( "seed", type=int, help="Random seed.", ) # add arguments for output directory validator.add_argument( "output_dir", type=str, help="Output directory to save checkpoints and logs.", ) # add arguments for whether only use evaluation validator.add_argument( "evaluate", help="Whether to only evaluate the model. If true, training will not be performed.", ) # add arguments for splits used for training, e.g. ["train", "val"] validator.add_argument( "train_splits", type=list, help="Splits to use for training.", ) # add arguments for splits used for validation, e.g. ["val"] validator.add_argument( "valid_splits", type=list, help="Splits to use for validation. If not provided, will skip the validation.", ) # add arguments for splits used for testing, e.g. ["test"] validator.add_argument( "test_splits", type=list, help="Splits to use for testing. If not provided, will skip the testing.", ) # add arguments for accumulating gradient for iterations validator.add_argument( "accum_grad_iters", type=int, help="Number of iterations to accumulate gradient for.", ) # ====== distributed training ====== validator.add_argument( "device", type=str, choices=["cpu", "cuda"], help="Device to use. Support 'cuda' or 'cpu' as for now.", ) validator.add_argument( "world_size", type=int, help="Number of processes participating in the job.", ) validator.add_argument("dist_url", type=str) validator.add_argument("distributed", type=bool) # add arguments to opt using distributed sampler during evaluation or not validator.add_argument( "use_dist_eval_sampler", type=bool, help="Whether to use distributed sampler during evaluation or not.", ) # ====== task specific ====== # generation task specific arguments # add arguments for maximal length of text output validator.add_argument( "max_len", type=int, help="Maximal length of text output.", ) # add arguments for minimal length of text output validator.add_argument( "min_len", type=int, help="Minimal length of text output.", ) # add arguments number of beams validator.add_argument( "num_beams", type=int, help="Number of beams used for beam search.", ) # vqa task specific arguments # add arguments for number of answer candidates validator.add_argument( "num_ans_candidates", type=int, help="""For ALBEF and BLIP, these models first rank answers according to likelihood to select answer candidates.""", ) # add arguments for inference method validator.add_argument( "inference_method", type=str, choices=["genearte", "rank"], help="""Inference method to use for question answering. If rank, requires a answer list.""", ) # ====== model specific ====== validator.add_argument( "k_test", type=int, help="Number of top k most similar samples from ITC/VTC selection to be tested.", ) return validator
3D-LLM-main
3DLLM_BLIP2-base/lavis/common/config.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ class Registry: mapping = { "builder_name_mapping": {}, "task_name_mapping": {}, "processor_name_mapping": {}, "model_name_mapping": {}, "lr_scheduler_name_mapping": {}, "runner_name_mapping": {}, "state": {}, "paths": {}, } @classmethod def register_builder(cls, name): r"""Register a dataset builder to registry with key 'name' Args: name: Key with which the builder will be registered. Usage: from lavis.common.registry import registry from lavis.datasets.base_dataset_builder import BaseDatasetBuilder """ def wrap(builder_cls): from lavis.datasets.builders.base_dataset_builder import BaseDatasetBuilder assert issubclass( builder_cls, BaseDatasetBuilder ), "All builders must inherit BaseDatasetBuilder class, found {}".format(builder_cls) if name in cls.mapping["builder_name_mapping"]: raise KeyError( "Name '{}' already registered for {}.".format(name, cls.mapping["builder_name_mapping"][name]) ) cls.mapping["builder_name_mapping"][name] = builder_cls return builder_cls return wrap @classmethod def register_task(cls, name): r"""Register a task to registry with key 'name' Args: name: Key with which the task will be registered. Usage: from lavis.common.registry import registry """ def wrap(task_cls): from lavis.tasks.base_task import BaseTask assert issubclass(task_cls, BaseTask), "All tasks must inherit BaseTask class" if name in cls.mapping["task_name_mapping"]: raise KeyError( "Name '{}' already registered for {}.".format(name, cls.mapping["task_name_mapping"][name]) ) cls.mapping["task_name_mapping"][name] = task_cls return task_cls return wrap @classmethod def register_model(cls, name): r"""Register a task to registry with key 'name' Args: name: Key with which the task will be registered. Usage: from lavis.common.registry import registry """ def wrap(model_cls): from lavis.models import BaseModel assert issubclass(model_cls, BaseModel), "All models must inherit BaseModel class" if name in cls.mapping["model_name_mapping"]: raise KeyError( "Name '{}' already registered for {}.".format(name, cls.mapping["model_name_mapping"][name]) ) cls.mapping["model_name_mapping"][name] = model_cls return model_cls return wrap @classmethod def register_processor(cls, name): r"""Register a processor to registry with key 'name' Args: name: Key with which the task will be registered. Usage: from lavis.common.registry import registry """ def wrap(processor_cls): from lavis.processors import BaseProcessor assert issubclass(processor_cls, BaseProcessor), "All processors must inherit BaseProcessor class" if name in cls.mapping["processor_name_mapping"]: raise KeyError( "Name '{}' already registered for {}.".format(name, cls.mapping["processor_name_mapping"][name]) ) cls.mapping["processor_name_mapping"][name] = processor_cls return processor_cls return wrap @classmethod def register_lr_scheduler(cls, name): r"""Register a model to registry with key 'name' Args: name: Key with which the task will be registered. Usage: from lavis.common.registry import registry """ def wrap(lr_sched_cls): if name in cls.mapping["lr_scheduler_name_mapping"]: raise KeyError( "Name '{}' already registered for {}.".format(name, cls.mapping["lr_scheduler_name_mapping"][name]) ) cls.mapping["lr_scheduler_name_mapping"][name] = lr_sched_cls return lr_sched_cls return wrap @classmethod def register_runner(cls, name): r"""Register a model to registry with key 'name' Args: name: Key with which the task will be registered. Usage: from lavis.common.registry import registry """ def wrap(runner_cls): if name in cls.mapping["runner_name_mapping"]: raise KeyError( "Name '{}' already registered for {}.".format(name, cls.mapping["runner_name_mapping"][name]) ) cls.mapping["runner_name_mapping"][name] = runner_cls return runner_cls return wrap @classmethod def register_path(cls, name, path): r"""Register a path to registry with key 'name' Args: name: Key with which the path will be registered. Usage: from lavis.common.registry import registry """ assert isinstance(path, str), "All path must be str." if name in cls.mapping["paths"]: raise KeyError("Name '{}' already registered.".format(name)) cls.mapping["paths"][name] = path @classmethod def register(cls, name, obj): r"""Register an item to registry with key 'name' Args: name: Key with which the item will be registered. Usage:: from lavis.common.registry import registry registry.register("config", {}) """ path = name.split(".") current = cls.mapping["state"] for part in path[:-1]: if part not in current: current[part] = {} current = current[part] current[path[-1]] = obj # @classmethod # def get_trainer_class(cls, name): # return cls.mapping["trainer_name_mapping"].get(name, None) @classmethod def get_builder_class(cls, name): return cls.mapping["builder_name_mapping"].get(name, None) @classmethod def get_model_class(cls, name): return cls.mapping["model_name_mapping"].get(name, None) @classmethod def get_task_class(cls, name): return cls.mapping["task_name_mapping"].get(name, None) @classmethod def get_processor_class(cls, name): return cls.mapping["processor_name_mapping"].get(name, None) @classmethod def get_lr_scheduler_class(cls, name): return cls.mapping["lr_scheduler_name_mapping"].get(name, None) @classmethod def get_runner_class(cls, name): return cls.mapping["runner_name_mapping"].get(name, None) @classmethod def list_runners(cls): return sorted(cls.mapping["runner_name_mapping"].keys()) @classmethod def list_models(cls): return sorted(cls.mapping["model_name_mapping"].keys()) @classmethod def list_tasks(cls): return sorted(cls.mapping["task_name_mapping"].keys()) @classmethod def list_processors(cls): return sorted(cls.mapping["processor_name_mapping"].keys()) @classmethod def list_lr_schedulers(cls): return sorted(cls.mapping["lr_scheduler_name_mapping"].keys()) @classmethod def list_datasets(cls): return sorted(cls.mapping["builder_name_mapping"].keys()) @classmethod def get_path(cls, name): return cls.mapping["paths"].get(name, None) @classmethod def get(cls, name, default=None, no_warning=False): r"""Get an item from registry with key 'name' Args: name (string): Key whose value needs to be retrieved. default: If passed and key is not in registry, default value will be returned with a warning. Default: None no_warning (bool): If passed as True, warning when key doesn't exist will not be generated. Useful for MMF's internal operations. Default: False """ original_name = name name = name.split(".") value = cls.mapping["state"] for subname in name: value = value.get(subname, default) if value is default: break if "writer" in cls.mapping["state"] and value == default and no_warning is False: cls.mapping["state"]["writer"].warning( "Key {} is not present in registry, returning default value " "of {}".format(original_name, default) ) return value @classmethod def unregister(cls, name): r"""Remove an item from registry with key 'name' Args: name: Key which needs to be removed. Usage:: from mmf.common.registry import registry config = registry.unregister("config") """ return cls.mapping["state"].pop(name, None) registry = Registry()
3D-LLM-main
3DLLM_BLIP2-base/lavis/common/registry.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import datetime import logging import time from collections import defaultdict, deque import torch import torch.distributed as dist from lavis.common import dist_utils class SmoothedValue(object): """Track a series of values and provide access to smoothed values over a window or the global series average. """ def __init__(self, window_size=20, fmt=None): if fmt is None: fmt = "{median:.4f} ({global_avg:.4f})" self.deque = deque(maxlen=window_size) self.total = 0.0 self.count = 0 self.fmt = fmt def update(self, value, n=1): self.deque.append(value) self.count += n self.total += value * n def synchronize_between_processes(self): """ Warning: does not synchronize the deque! """ if not dist_utils.is_dist_avail_and_initialized(): return t = torch.tensor([self.count, self.total], dtype=torch.float64, device="cuda") dist.barrier() dist.all_reduce(t) t = t.tolist() self.count = int(t[0]) self.total = t[1] @property def median(self): d = torch.tensor(list(self.deque)) return d.median().item() @property def avg(self): d = torch.tensor(list(self.deque), dtype=torch.float32) return d.mean().item() @property def global_avg(self): return self.total / self.count @property def max(self): return max(self.deque) @property def value(self): return self.deque[-1] def __str__(self): return self.fmt.format( median=self.median, avg=self.avg, global_avg=self.global_avg, max=self.max, value=self.value, ) class MetricLogger(object): def __init__(self, delimiter="\t"): self.meters = defaultdict(SmoothedValue) self.delimiter = delimiter def update(self, **kwargs): for k, v in kwargs.items(): if isinstance(v, torch.Tensor): v = v.item() assert isinstance(v, (float, int)) self.meters[k].update(v) def __getattr__(self, attr): if attr in self.meters: return self.meters[attr] if attr in self.__dict__: return self.__dict__[attr] raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, attr)) def __str__(self): loss_str = [] for name, meter in self.meters.items(): loss_str.append("{}: {}".format(name, str(meter))) return self.delimiter.join(loss_str) def global_avg(self): loss_str = [] for name, meter in self.meters.items(): loss_str.append("{}: {:.4f}".format(name, meter.global_avg)) return self.delimiter.join(loss_str) def synchronize_between_processes(self): for meter in self.meters.values(): meter.synchronize_between_processes() def add_meter(self, name, meter): self.meters[name] = meter def log_every(self, iterable, print_freq, header=None): i = 0 if not header: header = "" start_time = time.time() end = time.time() iter_time = SmoothedValue(fmt="{avg:.4f}") data_time = SmoothedValue(fmt="{avg:.4f}") space_fmt = ":" + str(len(str(len(iterable)))) + "d" log_msg = [ header, "[{0" + space_fmt + "}/{1}]", "eta: {eta}", "{meters}", "time: {time}", "data: {data}", ] if torch.cuda.is_available(): log_msg.append("max mem: {memory:.0f}") log_msg = self.delimiter.join(log_msg) MB = 1024.0 * 1024.0 for obj in iterable: data_time.update(time.time() - end) yield obj iter_time.update(time.time() - end) if i % print_freq == 0 or i == len(iterable) - 1: eta_seconds = iter_time.global_avg * (len(iterable) - i) eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) if torch.cuda.is_available(): print( log_msg.format( i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time), memory=torch.cuda.max_memory_allocated() / MB, ) ) else: print( log_msg.format( i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time), ) ) i += 1 end = time.time() total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) print("{} Total time: {} ({:.4f} s / it)".format(header, total_time_str, total_time / len(iterable))) class AttrDict(dict): def __init__(self, *args, **kwargs): super(AttrDict, self).__init__(*args, **kwargs) self.__dict__ = self def setup_logger(): logging.basicConfig( level=logging.INFO if dist_utils.is_main_process() else logging.WARN, format="%(asctime)s [%(levelname)s] %(message)s", handlers=[logging.StreamHandler()], )
3D-LLM-main
3DLLM_BLIP2-base/lavis/common/logger.py
import os import sys from easydict import EasyDict CONF = EasyDict() # path CONF.PATH = EasyDict() CONF.PATH.BASE = "." # TODO: change this CONF.PATH.DATA = os.path.join(CONF.PATH.BASE, "data") CONF.PATH.SCANNET = os.path.join(CONF.PATH.DATA, "scannet") CONF.PATH.LIB = os.path.join(CONF.PATH.BASE, "lib") CONF.PATH.MODELS = os.path.join(CONF.PATH.BASE, "models") CONF.PATH.UTILS = os.path.join(CONF.PATH.BASE, "utils") CONF.PATH.WEIGHTS = os.path.join(CONF.PATH.BASE, "weights") # append to syspath for _, path in CONF.PATH.items(): sys.path.append(path) # scannet data CONF.PATH.SCANNET_SCANS = os.path.join(CONF.PATH.SCANNET, "scans") CONF.PATH.SCANNET_META = os.path.join(CONF.PATH.SCANNET, "meta_data") CONF.PATH.SCANNET_DATA = os.path.join(CONF.PATH.SCANNET, "scannet_data") # scanqa data CONF.PATH.SCANQA = os.path.join(CONF.PATH.DATA, "qa") # data CONF.SCANNET_DIR = os.path.join(CONF.PATH.BASE, "data/scannet/scans") CONF.SCANNET_FRAMES_ROOT = os.path.join(CONF.PATH.BASE, "data/frames_square") CONF.PROJECTION = os.path.join(CONF.PATH.BASE, "data/multiview_projection_scanrefer") CONF.ENET_FEATURES_ROOT = os.path.join(CONF.PATH.BASE, "data/enet_features") CONF.ENET_FEATURES_SUBROOT = os.path.join(CONF.ENET_FEATURES_ROOT, "{}") # scene_id CONF.ENET_FEATURES_PATH = os.path.join(CONF.ENET_FEATURES_SUBROOT, "{}.npy") # frame_id CONF.SCANNET_FRAMES = os.path.join(CONF.SCANNET_FRAMES_ROOT, "{}/{}") # scene_id, mode CONF.SCENE_NAMES = sorted(os.listdir(CONF.SCANNET_DIR)) CONF.ENET_WEIGHTS = os.path.join(CONF.PATH.BASE, "data/scannetv2_enet.pth") CONF.MULTIVIEW = os.path.join(CONF.PATH.SCANNET_DATA, "enet_feats_maxpool") CONF.NYU40_LABELS = os.path.join(CONF.PATH.SCANNET_META, "nyu40_labels.csv") # scannet CONF.SCANNETV2_TRAIN = os.path.join(CONF.PATH.SCANNET_META, "scannetv2_train.txt") CONF.SCANNETV2_VAL = os.path.join(CONF.PATH.SCANNET_META, "scannetv2_val.txt") CONF.SCANNETV2_TEST = os.path.join(CONF.PATH.SCANNET_META, "scannetv2_test.txt") CONF.SCANNETV2_LIST = os.path.join(CONF.PATH.SCANNET_META, "scannetv2.txt") # output CONF.PATH.OUTPUT = os.path.join(CONF.PATH.BASE, "outputs") # train CONF.TRAIN = EasyDict() CONF.TRAIN.MAX_TEXT_LEN = 36 # CONF.TRAIN.SEED = 42
3D-LLM-main
3DLLM_BLIP2-base/lavis/common/config_scanqa.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import io import json import logging import os import pickle import re import shutil import urllib import urllib.error import urllib.request from typing import Optional from urllib.parse import urlparse import numpy as np import pandas as pd import yaml from iopath.common.download import download from iopath.common.file_io import file_lock, g_pathmgr from lavis.common.registry import registry from torch.utils.model_zoo import tqdm from torchvision.datasets.utils import ( check_integrity, download_file_from_google_drive, extract_archive, ) def now(): from datetime import datetime return datetime.now().strftime("%Y%m%d%H%M")[:-1] def is_url(url_or_filename): parsed = urlparse(url_or_filename) return parsed.scheme in ("http", "https") def get_cache_path(rel_path): return os.path.expanduser(os.path.join(registry.get_path("cache_root"), rel_path)) def get_abs_path(rel_path): return os.path.join(registry.get_path("library_root"), rel_path) def load_json(filename): with open(filename, "r") as f: return json.load(f) # The following are adapted from torchvision and vissl # torchvision: https://github.com/pytorch/vision # vissl: https://github.com/facebookresearch/vissl/blob/main/vissl/utils/download.py def makedir(dir_path): """ Create the directory if it does not exist. """ is_success = False try: if not g_pathmgr.exists(dir_path): g_pathmgr.mkdirs(dir_path) is_success = True except BaseException: print(f"Error creating directory: {dir_path}") return is_success def get_redirected_url(url: str): """ Given a URL, returns the URL it redirects to or the original URL in case of no indirection """ import requests with requests.Session() as session: with session.get(url, stream=True, allow_redirects=True) as response: if response.history: return response.url else: return url def to_google_drive_download_url(view_url: str) -> str: """ Utility function to transform a view URL of google drive to a download URL for google drive Example input: https://drive.google.com/file/d/137RyRjvTBkBiIfeYBNZBtViDHQ6_Ewsp/view Example output: https://drive.google.com/uc?export=download&id=137RyRjvTBkBiIfeYBNZBtViDHQ6_Ewsp """ splits = view_url.split("/") assert splits[-1] == "view" file_id = splits[-2] return f"https://drive.google.com/uc?export=download&id={file_id}" def download_google_drive_url(url: str, output_path: str, output_file_name: str): """ Download a file from google drive Downloading an URL from google drive requires confirmation when the file of the size is too big (google drive notifies that anti-viral checks cannot be performed on such files) """ import requests with requests.Session() as session: # First get the confirmation token and append it to the URL with session.get(url, stream=True, allow_redirects=True) as response: for k, v in response.cookies.items(): if k.startswith("download_warning"): url = url + "&confirm=" + v # Then download the content of the file with session.get(url, stream=True, verify=True) as response: makedir(output_path) path = os.path.join(output_path, output_file_name) total_size = int(response.headers.get("Content-length", 0)) with open(path, "wb") as file: from tqdm import tqdm with tqdm(total=total_size) as progress_bar: for block in response.iter_content(chunk_size=io.DEFAULT_BUFFER_SIZE): file.write(block) progress_bar.update(len(block)) def _get_google_drive_file_id(url: str) -> Optional[str]: parts = urlparse(url) if re.match(r"(drive|docs)[.]google[.]com", parts.netloc) is None: return None match = re.match(r"/file/d/(?P<id>[^/]*)", parts.path) if match is None: return None return match.group("id") def _urlretrieve(url: str, filename: str, chunk_size: int = 1024) -> None: with open(filename, "wb") as fh: with urllib.request.urlopen(urllib.request.Request(url, headers={"User-Agent": "vissl"})) as response: with tqdm(total=response.length) as pbar: for chunk in iter(lambda: response.read(chunk_size), ""): if not chunk: break pbar.update(chunk_size) fh.write(chunk) def download_url( url: str, root: str, filename: Optional[str] = None, md5: Optional[str] = None, ) -> None: """Download a file from a url and place it in root. Args: url (str): URL to download file from root (str): Directory to place downloaded file in filename (str, optional): Name to save the file under. If None, use the basename of the URL. md5 (str, optional): MD5 checksum of the download. If None, do not check """ root = os.path.expanduser(root) if not filename: filename = os.path.basename(url) fpath = os.path.join(root, filename) makedir(root) # check if file is already present locally if check_integrity(fpath, md5): print("Using downloaded and verified file: " + fpath) return # expand redirect chain if needed url = get_redirected_url(url) # check if file is located on Google Drive file_id = _get_google_drive_file_id(url) if file_id is not None: return download_file_from_google_drive(file_id, root, filename, md5) # download the file try: print("Downloading " + url + " to " + fpath) _urlretrieve(url, fpath) except (urllib.error.URLError, IOError) as e: # type: ignore[attr-defined] if url[:5] == "https": url = url.replace("https:", "http:") print("Failed download. Trying https -> http instead." " Downloading " + url + " to " + fpath) _urlretrieve(url, fpath) else: raise e # check integrity of downloaded file if not check_integrity(fpath, md5): raise RuntimeError("File not found or corrupted.") def download_and_extract_archive( url: str, download_root: str, extract_root: Optional[str] = None, filename: Optional[str] = None, md5: Optional[str] = None, remove_finished: bool = False, ) -> None: download_root = os.path.expanduser(download_root) if extract_root is None: extract_root = download_root if not filename: filename = os.path.basename(url) download_url(url, download_root, filename, md5) archive = os.path.join(download_root, filename) print("Extracting {} to {}".format(archive, extract_root)) extract_archive(archive, extract_root, remove_finished) def cache_url(url: str, cache_dir: str) -> str: """ This implementation downloads the remote resource and caches it locally. The resource will only be downloaded if not previously requested. """ parsed_url = urlparse(url) dirname = os.path.join(cache_dir, os.path.dirname(parsed_url.path.lstrip("/"))) makedir(dirname) filename = url.split("/")[-1] cached = os.path.join(dirname, filename) with file_lock(cached): if not os.path.isfile(cached): logging.info(f"Downloading {url} to {cached} ...") cached = download(url, dirname, filename=filename) logging.info(f"URL {url} cached in {cached}") return cached # TODO (prigoyal): convert this into RAII-style API def create_file_symlink(file1, file2): """ Simply create the symlinks for a given file1 to file2. Useful during model checkpointing to symlinks to the latest successful checkpoint. """ try: if g_pathmgr.exists(file2): g_pathmgr.rm(file2) g_pathmgr.symlink(file1, file2) except Exception as e: logging.info(f"Could NOT create symlink. Error: {e}") def save_file(data, filename, append_to_json=True, verbose=True): """ Common i/o utility to handle saving data to various file formats. Supported: .pkl, .pickle, .npy, .json Specifically for .json, users have the option to either append (default) or rewrite by passing in Boolean value to append_to_json. """ if verbose: logging.info(f"Saving data to file: {filename}") file_ext = os.path.splitext(filename)[1] if file_ext in [".pkl", ".pickle"]: with g_pathmgr.open(filename, "wb") as fopen: pickle.dump(data, fopen, pickle.HIGHEST_PROTOCOL) elif file_ext == ".npy": with g_pathmgr.open(filename, "wb") as fopen: np.save(fopen, data) elif file_ext == ".json": if append_to_json: with g_pathmgr.open(filename, "a") as fopen: fopen.write(json.dumps(data, sort_keys=True) + "\n") fopen.flush() else: with g_pathmgr.open(filename, "w") as fopen: fopen.write(json.dumps(data, sort_keys=True) + "\n") fopen.flush() elif file_ext == ".yaml": with g_pathmgr.open(filename, "w") as fopen: dump = yaml.dump(data) fopen.write(dump) fopen.flush() else: raise Exception(f"Saving {file_ext} is not supported yet") if verbose: logging.info(f"Saved data to file: {filename}") def load_file(filename, mmap_mode=None, verbose=True, allow_pickle=False): """ Common i/o utility to handle loading data from various file formats. Supported: .pkl, .pickle, .npy, .json For the npy files, we support reading the files in mmap_mode. If the mmap_mode of reading is not successful, we load data without the mmap_mode. """ if verbose: logging.info(f"Loading data from file: {filename}") file_ext = os.path.splitext(filename)[1] if file_ext == ".txt": with g_pathmgr.open(filename, "r") as fopen: data = fopen.readlines() elif file_ext in [".pkl", ".pickle"]: with g_pathmgr.open(filename, "rb") as fopen: data = pickle.load(fopen, encoding="latin1") elif file_ext == ".npy": if mmap_mode: try: with g_pathmgr.open(filename, "rb") as fopen: data = np.load( fopen, allow_pickle=allow_pickle, encoding="latin1", mmap_mode=mmap_mode, ) except ValueError as e: logging.info(f"Could not mmap {filename}: {e}. Trying without g_pathmgr") data = np.load( filename, allow_pickle=allow_pickle, encoding="latin1", mmap_mode=mmap_mode, ) logging.info("Successfully loaded without g_pathmgr") except Exception: logging.info("Could not mmap without g_pathmgr. Trying without mmap") with g_pathmgr.open(filename, "rb") as fopen: data = np.load(fopen, allow_pickle=allow_pickle, encoding="latin1") else: with g_pathmgr.open(filename, "rb") as fopen: data = np.load(fopen, allow_pickle=allow_pickle, encoding="latin1") elif file_ext == ".json": with g_pathmgr.open(filename, "r") as fopen: data = json.load(fopen) elif file_ext == ".yaml": with g_pathmgr.open(filename, "r") as fopen: data = yaml.load(fopen, Loader=yaml.FullLoader) elif file_ext == ".csv": with g_pathmgr.open(filename, "r") as fopen: data = pd.read_csv(fopen) else: raise Exception(f"Reading from {file_ext} is not supported yet") return data def abspath(resource_path: str): """ Make a path absolute, but take into account prefixes like "http://" or "manifold://" """ regex = re.compile(r"^\w+://") if regex.match(resource_path) is None: return os.path.abspath(resource_path) else: return resource_path def makedir(dir_path): """ Create the directory if it does not exist. """ is_success = False try: if not g_pathmgr.exists(dir_path): g_pathmgr.mkdirs(dir_path) is_success = True except BaseException: logging.info(f"Error creating directory: {dir_path}") return is_success def is_url(input_url): """ Check if an input string is a url. look for http(s):// and ignoring the case """ is_url = re.match(r"^(?:http)s?://", input_url, re.IGNORECASE) is not None return is_url def cleanup_dir(dir): """ Utility for deleting a directory. Useful for cleaning the storage space that contains various training artifacts like checkpoints, data etc. """ if os.path.exists(dir): logging.info(f"Deleting directory: {dir}") shutil.rmtree(dir) logging.info(f"Deleted contents of directory: {dir}") def get_file_size(filename): """ Given a file, get the size of file in MB """ size_in_mb = os.path.getsize(filename) / float(1024 ** 2) return size_in_mb
3D-LLM-main
3DLLM_BLIP2-base/lavis/common/utils.py
import numpy as np from matplotlib import pyplot as plt from scipy.ndimage import filters from skimage import transform as skimage_transform def getAttMap(img, attMap, blur=True, overlap=True): attMap -= attMap.min() if attMap.max() > 0: attMap /= attMap.max() attMap = skimage_transform.resize(attMap, (img.shape[:2]), order=3, mode="constant") if blur: attMap = filters.gaussian_filter(attMap, 0.02 * max(img.shape[:2])) attMap -= attMap.min() attMap /= attMap.max() cmap = plt.get_cmap("jet") attMapV = cmap(attMap) attMapV = np.delete(attMapV, 3, 2) if overlap: attMap = ( 1 * (1 - attMap ** 0.7).reshape(attMap.shape + (1,)) * img + (attMap ** 0.7).reshape(attMap.shape + (1,)) * attMapV ) return attMap
3D-LLM-main
3DLLM_BLIP2-base/lavis/common/gradcam.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import datetime import functools import os import torch import torch.distributed as dist import timm.models.hub as timm_hub def setup_for_distributed(is_master): """ This function disables printing when not in master process """ import builtins as __builtin__ builtin_print = __builtin__.print def print(*args, **kwargs): force = kwargs.pop("force", False) if is_master or force: builtin_print(*args, **kwargs) __builtin__.print = print def is_dist_avail_and_initialized(): if not dist.is_available(): return False if not dist.is_initialized(): return False return True def get_world_size(): if not is_dist_avail_and_initialized(): return 1 return dist.get_world_size() def get_rank(): if not is_dist_avail_and_initialized(): return 0 return dist.get_rank() def is_main_process(): return get_rank() == 0 def init_distributed_mode(args): if "RANK" in os.environ and "WORLD_SIZE" in os.environ: args.rank = int(os.environ["RANK"]) args.world_size = int(os.environ["WORLD_SIZE"]) args.gpu = int(os.environ["LOCAL_RANK"]) elif "SLURM_PROCID" in os.environ: args.rank = int(os.environ["SLURM_PROCID"]) args.gpu = args.rank % torch.cuda.device_count() else: print("Not using distributed mode") args.distributed = False return args.distributed = True torch.cuda.set_device(args.gpu) args.dist_backend = "nccl" print( "| distributed init (rank {}, world {}): {}".format(args.rank, args.world_size, args.dist_url), flush=True, ) torch.distributed.init_process_group( backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank, timeout=datetime.timedelta(days=365), # allow auto-downloading and de-compressing ) torch.distributed.barrier() setup_for_distributed(args.rank == 0) def get_dist_info(): if torch.__version__ < "1.0": initialized = dist._initialized else: initialized = dist.is_initialized() if initialized: rank = dist.get_rank() world_size = dist.get_world_size() else: # non-distributed training rank = 0 world_size = 1 return rank, world_size def main_process(func): @functools.wraps(func) def wrapper(*args, **kwargs): rank, _ = get_dist_info() if rank == 0: return func(*args, **kwargs) return wrapper def download_cached_file(url, check_hash=True, progress=False): """ Download a file from a URL and cache it locally. If the file already exists, it is not downloaded again. If distributed, only the main process downloads the file, and the other processes wait for the file to be downloaded. """ def get_cached_file_path(): # a hack to sync the file path across processes parts = torch.hub.urlparse(url) filename = os.path.basename(parts.path) cached_file = os.path.join(timm_hub.get_cache_dir(), filename) return cached_file if is_main_process(): timm_hub.download_cached_file(url, check_hash, progress) if is_dist_avail_and_initialized(): dist.barrier() return get_cached_file_path()
3D-LLM-main
3DLLM_BLIP2-base/lavis/common/dist_utils.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ __author__ = "aagrawal"
3D-LLM-main
3DLLM_BLIP2-base/lavis/common/vqa_tools/__init__.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ # coding=utf-8 __author__ = "aagrawal" # This code is based on the code written by Tsung-Yi Lin for MSCOCO Python API available at the following link: # (https://github.com/tylin/coco-caption/blob/master/pycocoevalcap/eval.py). import sys import re class VQAEval: def __init__(self, vqa=None, vqaRes=None, n=2): self.n = n self.accuracy = {} self.evalQA = {} self.evalQuesType = {} self.evalAnsType = {} self.vqa = vqa self.vqaRes = vqaRes if vqa is not None: self.params = {"question_id": vqa.getQuesIds()} self.contractions = { "aint": "ain't", "arent": "aren't", "cant": "can't", "couldve": "could've", "couldnt": "couldn't", "couldn'tve": "couldn't've", "couldnt've": "couldn't've", "didnt": "didn't", "doesnt": "doesn't", "dont": "don't", "hadnt": "hadn't", "hadnt've": "hadn't've", "hadn'tve": "hadn't've", "hasnt": "hasn't", "havent": "haven't", "hed": "he'd", "hed've": "he'd've", "he'dve": "he'd've", "hes": "he's", "howd": "how'd", "howll": "how'll", "hows": "how's", "Id've": "I'd've", "I'dve": "I'd've", "Im": "I'm", "Ive": "I've", "isnt": "isn't", "itd": "it'd", "itd've": "it'd've", "it'dve": "it'd've", "itll": "it'll", "let's": "let's", "maam": "ma'am", "mightnt": "mightn't", "mightnt've": "mightn't've", "mightn'tve": "mightn't've", "mightve": "might've", "mustnt": "mustn't", "mustve": "must've", "neednt": "needn't", "notve": "not've", "oclock": "o'clock", "oughtnt": "oughtn't", "ow's'at": "'ow's'at", "'ows'at": "'ow's'at", "'ow'sat": "'ow's'at", "shant": "shan't", "shed've": "she'd've", "she'dve": "she'd've", "she's": "she's", "shouldve": "should've", "shouldnt": "shouldn't", "shouldnt've": "shouldn't've", "shouldn'tve": "shouldn't've", "somebody'd": "somebodyd", "somebodyd've": "somebody'd've", "somebody'dve": "somebody'd've", "somebodyll": "somebody'll", "somebodys": "somebody's", "someoned": "someone'd", "someoned've": "someone'd've", "someone'dve": "someone'd've", "someonell": "someone'll", "someones": "someone's", "somethingd": "something'd", "somethingd've": "something'd've", "something'dve": "something'd've", "somethingll": "something'll", "thats": "that's", "thered": "there'd", "thered've": "there'd've", "there'dve": "there'd've", "therere": "there're", "theres": "there's", "theyd": "they'd", "theyd've": "they'd've", "they'dve": "they'd've", "theyll": "they'll", "theyre": "they're", "theyve": "they've", "twas": "'twas", "wasnt": "wasn't", "wed've": "we'd've", "we'dve": "we'd've", "weve": "we've", "werent": "weren't", "whatll": "what'll", "whatre": "what're", "whats": "what's", "whatve": "what've", "whens": "when's", "whered": "where'd", "wheres": "where's", "whereve": "where've", "whod": "who'd", "whod've": "who'd've", "who'dve": "who'd've", "wholl": "who'll", "whos": "who's", "whove": "who've", "whyll": "why'll", "whyre": "why're", "whys": "why's", "wont": "won't", "wouldve": "would've", "wouldnt": "wouldn't", "wouldnt've": "wouldn't've", "wouldn'tve": "wouldn't've", "yall": "y'all", "yall'll": "y'all'll", "y'allll": "y'all'll", "yall'd've": "y'all'd've", "y'alld've": "y'all'd've", "y'all'dve": "y'all'd've", "youd": "you'd", "youd've": "you'd've", "you'dve": "you'd've", "youll": "you'll", "youre": "you're", "youve": "you've", } self.manualMap = { "none": "0", "zero": "0", "one": "1", "two": "2", "three": "3", "four": "4", "five": "5", "six": "6", "seven": "7", "eight": "8", "nine": "9", "ten": "10", } self.articles = ["a", "an", "the"] self.periodStrip = re.compile("(?!<=\d)(\.)(?!\d)") self.commaStrip = re.compile("(\d)(,)(\d)") self.punct = [ ";", r"/", "[", "]", '"', "{", "}", "(", ")", "=", "+", "\\", "_", "-", ">", "<", "@", "`", ",", "?", "!", ] def evaluate(self, quesIds=None): if quesIds == None: quesIds = [quesId for quesId in self.params["question_id"]] gts = {} res = {} for quesId in quesIds: gts[quesId] = self.vqa.qa[quesId] res[quesId] = self.vqaRes.qa[quesId] # ================================================= # Compute accuracy # ================================================= accQA = [] accQuesType = {} accAnsType = {} print("computing accuracy") step = 0 for quesId in quesIds: resAns = res[quesId]["answer"] resAns = resAns.replace("\n", " ") resAns = resAns.replace("\t", " ") resAns = resAns.strip() resAns = self.processPunctuation(resAns) resAns = self.processDigitArticle(resAns) gtAcc = [] gtAnswers = [ans["answer"] for ans in gts[quesId]["answers"]] if len(set(gtAnswers)) > 1: for ansDic in gts[quesId]["answers"]: ansDic["answer"] = self.processPunctuation(ansDic["answer"]) for gtAnsDatum in gts[quesId]["answers"]: otherGTAns = [item for item in gts[quesId]["answers"] if item != gtAnsDatum] matchingAns = [item for item in otherGTAns if item["answer"] == resAns] acc = min(1, float(len(matchingAns)) / 3) gtAcc.append(acc) quesType = gts[quesId]["question_type"] ansType = gts[quesId]["answer_type"] avgGTAcc = float(sum(gtAcc)) / len(gtAcc) accQA.append(avgGTAcc) if quesType not in accQuesType: accQuesType[quesType] = [] accQuesType[quesType].append(avgGTAcc) if ansType not in accAnsType: accAnsType[ansType] = [] accAnsType[ansType].append(avgGTAcc) self.setEvalQA(quesId, avgGTAcc) self.setEvalQuesType(quesId, quesType, avgGTAcc) self.setEvalAnsType(quesId, ansType, avgGTAcc) if step % 100 == 0: self.updateProgress(step / float(len(quesIds))) step = step + 1 self.setAccuracy(accQA, accQuesType, accAnsType) print("Done computing accuracy") def processPunctuation(self, inText): outText = inText for p in self.punct: if (p + " " in inText or " " + p in inText) or (re.search(self.commaStrip, inText) != None): outText = outText.replace(p, "") else: outText = outText.replace(p, " ") outText = self.periodStrip.sub("", outText, re.UNICODE) return outText def processDigitArticle(self, inText): outText = [] tempText = inText.lower().split() for word in tempText: word = self.manualMap.setdefault(word, word) if word not in self.articles: outText.append(word) else: pass for wordId, word in enumerate(outText): if word in self.contractions: outText[wordId] = self.contractions[word] outText = " ".join(outText) return outText def setAccuracy(self, accQA, accQuesType, accAnsType): self.accuracy["overall"] = round(100 * float(sum(accQA)) / len(accQA), self.n) self.accuracy["perQuestionType"] = { quesType: round( 100 * float(sum(accQuesType[quesType])) / len(accQuesType[quesType]), self.n, ) for quesType in accQuesType } self.accuracy["perAnswerType"] = { ansType: round(100 * float(sum(accAnsType[ansType])) / len(accAnsType[ansType]), self.n) for ansType in accAnsType } def setEvalQA(self, quesId, acc): self.evalQA[quesId] = round(100 * acc, self.n) def setEvalQuesType(self, quesId, quesType, acc): if quesType not in self.evalQuesType: self.evalQuesType[quesType] = {} self.evalQuesType[quesType][quesId] = round(100 * acc, self.n) def setEvalAnsType(self, quesId, ansType, acc): if ansType not in self.evalAnsType: self.evalAnsType[ansType] = {} self.evalAnsType[ansType][quesId] = round(100 * acc, self.n) def updateProgress(self, progress): barLength = 20 status = "" if isinstance(progress, int): progress = float(progress) if not isinstance(progress, float): progress = 0 status = "error: progress var must be float\r\n" if progress < 0: progress = 0 status = "Halt...\r\n" if progress >= 1: progress = 1 status = "Done...\r\n" block = int(round(barLength * progress)) text = "\rFinshed Percent: [{0}] {1}% {2}".format( "#" * block + "-" * (barLength - block), int(progress * 100), status ) sys.stdout.write(text) sys.stdout.flush()
3D-LLM-main
3DLLM_BLIP2-base/lavis/common/vqa_tools/vqa_eval.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ __author__ = "aagrawal" __version__ = "0.9" # Interface for accessing the VQA dataset. # This code is based on the code written by Tsung-Yi Lin for MSCOCO Python API available at the following link: # (https://github.com/pdollar/coco/blob/master/PythonAPI/pycocotools/coco.py). # The following functions are defined: # VQA - VQA class that loads VQA annotation file and prepares data structures. # getQuesIds - Get question ids that satisfy given filter conditions. # getImgIds - Get image ids that satisfy given filter conditions. # loadQA - Load questions and answers with the specified question ids. # showQA - Display the specified questions and answers. # loadRes - Load result file and create result object. # Help on each function can be accessed by: "help(COCO.function)" import json import datetime import copy class VQA: def __init__(self, annotation_file=None, question_file=None): """ Constructor of VQA helper class for reading and visualizing questions and answers. :param annotation_file (str): location of VQA annotation file :return: """ # load dataset self.dataset = {} self.questions = {} self.qa = {} self.qqa = {} self.imgToQA = {} if not annotation_file == None and not question_file == None: print("loading VQA annotations and questions into memory...") time_t = datetime.datetime.utcnow() dataset = json.load(open(annotation_file, "r")) questions = json.load(open(question_file, "r")) self.dataset = dataset self.questions = questions self.createIndex() def createIndex(self): # create index print("creating index...") imgToQA = {ann["image_id"]: [] for ann in self.dataset["annotations"]} qa = {ann["question_id"]: [] for ann in self.dataset["annotations"]} qqa = {ann["question_id"]: [] for ann in self.dataset["annotations"]} for ann in self.dataset["annotations"]: imgToQA[ann["image_id"]] += [ann] qa[ann["question_id"]] = ann for ques in self.questions["questions"]: qqa[ques["question_id"]] = ques print("index created!") # create class members self.qa = qa self.qqa = qqa self.imgToQA = imgToQA def info(self): """ Print information about the VQA annotation file. :return: """ for key, value in self.datset["info"].items(): print("%s: %s" % (key, value)) def getQuesIds(self, imgIds=[], quesTypes=[], ansTypes=[]): """ Get question ids that satisfy given filter conditions. default skips that filter :param imgIds (int array) : get question ids for given imgs quesTypes (str array) : get question ids for given question types ansTypes (str array) : get question ids for given answer types :return: ids (int array) : integer array of question ids """ imgIds = imgIds if type(imgIds) == list else [imgIds] quesTypes = quesTypes if type(quesTypes) == list else [quesTypes] ansTypes = ansTypes if type(ansTypes) == list else [ansTypes] if len(imgIds) == len(quesTypes) == len(ansTypes) == 0: anns = self.dataset["annotations"] else: if not len(imgIds) == 0: anns = sum( [self.imgToQA[imgId] for imgId in imgIds if imgId in self.imgToQA], [], ) else: anns = self.dataset["annotations"] anns = anns if len(quesTypes) == 0 else [ann for ann in anns if ann["question_type"] in quesTypes] anns = anns if len(ansTypes) == 0 else [ann for ann in anns if ann["answer_type"] in ansTypes] ids = [ann["question_id"] for ann in anns] return ids def getImgIds(self, quesIds=[], quesTypes=[], ansTypes=[]): """ Get image ids that satisfy given filter conditions. default skips that filter :param quesIds (int array) : get image ids for given question ids quesTypes (str array) : get image ids for given question types ansTypes (str array) : get image ids for given answer types :return: ids (int array) : integer array of image ids """ quesIds = quesIds if type(quesIds) == list else [quesIds] quesTypes = quesTypes if type(quesTypes) == list else [quesTypes] ansTypes = ansTypes if type(ansTypes) == list else [ansTypes] if len(quesIds) == len(quesTypes) == len(ansTypes) == 0: anns = self.dataset["annotations"] else: if not len(quesIds) == 0: anns = sum([self.qa[quesId] for quesId in quesIds if quesId in self.qa], []) else: anns = self.dataset["annotations"] anns = anns if len(quesTypes) == 0 else [ann for ann in anns if ann["question_type"] in quesTypes] anns = anns if len(ansTypes) == 0 else [ann for ann in anns if ann["answer_type"] in ansTypes] ids = [ann["image_id"] for ann in anns] return ids def loadQA(self, ids=[]): """ Load questions and answers with the specified question ids. :param ids (int array) : integer ids specifying question ids :return: qa (object array) : loaded qa objects """ if type(ids) == list: return [self.qa[id] for id in ids] elif type(ids) == int: return [self.qa[ids]] def showQA(self, anns): """ Display the specified annotations. :param anns (array of object): annotations to display :return: None """ if len(anns) == 0: return 0 for ann in anns: quesId = ann["question_id"] print("Question: %s" % (self.qqa[quesId]["question"])) for ans in ann["answers"]: print("Answer %d: %s" % (ans["answer_id"], ans["answer"])) def loadRes(self, resFile, quesFile): """ Load result file and return a result object. :param resFile (str) : file name of result file :return: res (obj) : result api object """ res = VQA() res.questions = json.load(open(quesFile)) res.dataset["info"] = copy.deepcopy(self.questions["info"]) res.dataset["task_type"] = copy.deepcopy(self.questions["task_type"]) res.dataset["data_type"] = copy.deepcopy(self.questions["data_type"]) res.dataset["data_subtype"] = copy.deepcopy(self.questions["data_subtype"]) res.dataset["license"] = copy.deepcopy(self.questions["license"]) print("Loading and preparing results... ") time_t = datetime.datetime.utcnow() anns = json.load(open(resFile)) assert type(anns) == list, "results is not an array of objects" annsQuesIds = [ann["question_id"] for ann in anns] assert set(annsQuesIds) == set( self.getQuesIds() ), "Results do not correspond to current VQA set. Either the results do not have predictions for all question ids in annotation file or there is atleast one question id that does not belong to the question ids in the annotation file." for ann in anns: quesId = ann["question_id"] if res.dataset["task_type"] == "Multiple Choice": assert ( ann["answer"] in self.qqa[quesId]["multiple_choices"] ), "predicted answer is not one of the multiple choices" qaAnn = self.qa[quesId] ann["image_id"] = qaAnn["image_id"] ann["question_type"] = qaAnn["question_type"] ann["answer_type"] = qaAnn["answer_type"] print("DONE (t=%0.2fs)" % ((datetime.datetime.utcnow() - time_t).total_seconds())) res.dataset["annotations"] = anns res.createIndex() return res
3D-LLM-main
3DLLM_BLIP2-base/lavis/common/vqa_tools/vqa.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ from lavis.runners.runner_base import RunnerBase from lavis.runners.runner_iter import RunnerIter __all__ = ["RunnerBase", "RunnerIter"]
3D-LLM-main
3DLLM_BLIP2-base/lavis/runners/__init__.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import datetime import json import logging import os import time from pathlib import Path import torch import torch.distributed as dist import webdataset as wds from lavis.common.dist_utils import ( download_cached_file, get_rank, get_world_size, is_main_process, main_process, ) from lavis.common.registry import registry from lavis.common.utils import is_url from lavis.datasets.data_utils import concat_datasets, reorg_datasets_by_split from lavis.datasets.datasets.dataloader_utils import ( IterLoader, MultiIterLoader, PrefetchLoader, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.data import DataLoader, DistributedSampler from torch.utils.data.dataset import ChainDataset @registry.register_runner("runner_base") class RunnerBase: """ A runner class to train and evaluate a model given a task and datasets. The runner uses pytorch distributed data parallel by default. Future release will support other distributed frameworks. """ def __init__(self, cfg, task, model, datasets, job_id): self.config = cfg self.job_id = job_id self.task = task self.datasets = datasets self._model = model self._wrapped_model = None self._device = None self._optimizer = None self._scaler = None self._dataloaders = None self._lr_sched = None self.start_epoch = 0 # self.setup_seeds() self.setup_output_dir() @property def device(self): if self._device is None: self._device = torch.device(self.config.run_cfg.device) return self._device @property def use_distributed(self): return self.config.run_cfg.distributed @property def model(self): """ A property to get the DDP-wrapped model on the device. """ # move model to device if self._model.device != self.device: self._model = self._model.to(self.device) # distributed training wrapper if self.use_distributed: if self._wrapped_model is None: self._wrapped_model = DDP( self._model, device_ids=[self.config.run_cfg.gpu], find_unused_parameters=False ) else: self._wrapped_model = self._model return self._wrapped_model @property def optimizer(self): # TODO make optimizer class and configurations if self._optimizer is None: num_parameters = 0 p_wd, p_non_wd = [], [] for n, p in self.model.named_parameters(): if not p.requires_grad: continue # frozen weights if p.ndim < 2 or "bias" in n or "ln" in n or "bn" in n: p_non_wd.append(p) else: p_wd.append(p) num_parameters += p.data.nelement() logging.info("number of trainable parameters: %d" % num_parameters) optim_params = [ { "params": p_wd, "weight_decay": float(self.config.run_cfg.weight_decay), }, {"params": p_non_wd, "weight_decay": 0}, ] beta2 = self.config.run_cfg.get("beta2", 0.999) self._optimizer = torch.optim.AdamW( optim_params, lr=float(self.config.run_cfg.init_lr), weight_decay=float(self.config.run_cfg.weight_decay), betas=(0.9, beta2), ) return self._optimizer @property def scaler(self): amp = self.config.run_cfg.get("amp", False) if amp: if self._scaler is None: self._scaler = torch.cuda.amp.GradScaler() return self._scaler @property def lr_scheduler(self): """ A property to get and create learning rate scheduler by split just in need. """ if self._lr_sched is None: lr_sched_cls = registry.get_lr_scheduler_class(self.config.run_cfg.lr_sched) # max_epoch = self.config.run_cfg.max_epoch max_epoch = self.max_epoch # min_lr = self.config.run_cfg.min_lr min_lr = self.min_lr # init_lr = self.config.run_cfg.init_lr init_lr = self.init_lr # optional parameters decay_rate = self.config.run_cfg.get("lr_decay_rate", None) warmup_start_lr = self.config.run_cfg.get("warmup_lr", -1) warmup_steps = self.config.run_cfg.get("warmup_steps", 0) self._lr_sched = lr_sched_cls( optimizer=self.optimizer, max_epoch=max_epoch, min_lr=min_lr, init_lr=init_lr, decay_rate=decay_rate, warmup_start_lr=warmup_start_lr, warmup_steps=warmup_steps, ) return self._lr_sched @property def dataloaders(self) -> dict: """ A property to get and create dataloaders by split just in need. If no train_dataset_ratio is provided, concatenate map-style datasets and chain wds.DataPipe datasets separately. Training set becomes a tuple (ConcatDataset, ChainDataset), both are optional but at least one of them is required. The resultant ConcatDataset and ChainDataset will be sampled evenly. If train_dataset_ratio is provided, create a MultiIterLoader to sample each dataset by ratios during training. Currently do not support multiple datasets for validation and test. Returns: dict: {split_name: (tuples of) dataloader} """ if self._dataloaders is None: # reoganize datasets by split and concatenate/chain if necessary dataset_ratios = self.config.run_cfg.get("train_dataset_ratios", None) # concatenate map-style datasets and chain wds.DataPipe datasets separately # training set becomes a tuple (ConcatDataset, ChainDataset), both are # optional but at least one of them is required. The resultant ConcatDataset # and ChainDataset will be sampled evenly. logging.info( "dataset_ratios not specified, datasets will be concatenated (map-style datasets) or chained (webdataset.DataPipeline)." ) datasets = reorg_datasets_by_split(self.datasets) self.datasets = concat_datasets(datasets) # print dataset statistics after concatenation/chaining for split_name in self.datasets: if isinstance(self.datasets[split_name], tuple) or isinstance(self.datasets[split_name], list): # mixed wds.DataPipeline and torch.utils.data.Dataset num_records = sum( [ len(d) if not type(d) in [wds.DataPipeline, ChainDataset] else 0 for d in self.datasets[split_name] ] ) else: if hasattr(self.datasets[split_name], "__len__"): # a single map-style dataset num_records = len(self.datasets[split_name]) else: # a single wds.DataPipeline num_records = -1 logging.info("Only a single wds.DataPipeline dataset, no __len__ attribute.") if num_records >= 0: logging.info("Loaded {} records for {} split from the dataset.".format(num_records, split_name)) # create dataloaders split_names = sorted(self.datasets.keys()) datasets = [self.datasets[split] for split in split_names] is_trains = [split in self.train_splits for split in split_names] batch_sizes = [ self.config.run_cfg.batch_size_train if split == "train" else self.config.run_cfg.batch_size_eval for split in split_names ] collate_fns = [] for dataset in datasets: if isinstance(dataset, tuple) or isinstance(dataset, list): collate_fns.append([getattr(d, "collater", None) for d in dataset]) else: collate_fns.append(getattr(dataset, "collater", None)) dataloaders = self.create_loaders( datasets=datasets, num_workers=self.config.run_cfg.num_workers, batch_sizes=batch_sizes, is_trains=is_trains, collate_fns=collate_fns, dataset_ratios=dataset_ratios, ) self._dataloaders = {k: v for k, v in zip(split_names, dataloaders)} return self._dataloaders @property def cuda_enabled(self): return self.device.type == "cuda" @property def max_epoch(self): return int(self.config.run_cfg.max_epoch) @property def log_freq(self): log_freq = self.config.run_cfg.get("log_freq", 50) return int(log_freq) @property def init_lr(self): return float(self.config.run_cfg.init_lr) @property def min_lr(self): return float(self.config.run_cfg.min_lr) @property def accum_grad_iters(self): return int(self.config.run_cfg.get("accum_grad_iters", 1)) @property def valid_splits(self): valid_splits = self.config.run_cfg.get("valid_splits", []) if len(valid_splits) == 0: logging.info("No validation splits found.") return valid_splits @property def test_splits(self): test_splits = self.config.run_cfg.get("test_splits", []) return test_splits @property def train_splits(self): train_splits = self.config.run_cfg.get("train_splits", []) if len(train_splits) == 0: logging.info("Empty train splits.") return train_splits @property def evaluate_only(self): """ Set to True to skip training. """ return self.config.run_cfg.evaluate @property def use_dist_eval_sampler(self): return self.config.run_cfg.get("use_dist_eval_sampler", True) @property def resume_ckpt_path(self): return self.config.run_cfg.get("resume_ckpt_path", None) @property def train_loader(self): train_dataloader = self.dataloaders["train"] return train_dataloader def setup_output_dir(self): lib_root = Path(registry.get_path("library_root")) output_dir = lib_root / self.config.run_cfg.output_dir / self.job_id result_dir = output_dir / "result" output_dir.mkdir(parents=True, exist_ok=True) result_dir.mkdir(parents=True, exist_ok=True) registry.register_path("result_dir", str(result_dir)) registry.register_path("output_dir", str(output_dir)) self.result_dir = result_dir self.output_dir = output_dir def train(self): start_time = time.time() best_agg_metric = 0 best_epoch = 0 self.log_config() # resume from checkpoint if specified if not self.evaluate_only and self.resume_ckpt_path is not None: self._load_checkpoint(self.resume_ckpt_path) for cur_epoch in range(self.start_epoch, self.max_epoch): # training phase if not self.evaluate_only: logging.info("Start training") train_stats = self.train_epoch(cur_epoch) self.log_stats(split_name="train", stats=train_stats) # evaluation phase if len(self.valid_splits) > 0: for split_name in self.valid_splits: logging.info("Evaluating on {}.".format(split_name)) val_log = self.eval_epoch(split_name=split_name, cur_epoch=cur_epoch) self._save_checkpoint(cur_epoch, is_best=False) if self.evaluate_only: break dist.barrier() total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) logging.info("Training time {}".format(total_time_str)) def evaluate(self, cur_epoch="best", skip_reload=False): test_logs = dict() if len(self.test_splits) > 0: for split_name in self.test_splits: test_logs[split_name] = self.eval_epoch( split_name=split_name, cur_epoch=cur_epoch, skip_reload=skip_reload ) return test_logs def train_epoch(self, epoch): # train self.model.train() return self.task.train_epoch( epoch=epoch, model=self.model, data_loader=self.train_loader, optimizer=self.optimizer, scaler=self.scaler, lr_scheduler=self.lr_scheduler, cuda_enabled=self.cuda_enabled, log_freq=self.log_freq, accum_grad_iters=self.accum_grad_iters, ) @torch.no_grad() def eval_epoch(self, split_name, cur_epoch, skip_reload=False): """ Evaluate the model on a given split. Args: split_name (str): name of the split to evaluate on. cur_epoch (int): current epoch. skip_reload_best (bool): whether to skip reloading the best checkpoint. During training, we will reload the best checkpoint for validation. During testing, we will use provided weights and skip reloading the best checkpoint . """ data_loader = self.dataloaders.get(split_name, None) assert data_loader, "data_loader for split {} is None.".format(split_name) # TODO In validation, you need to compute loss as well as metrics # TODO consider moving to model.before_evaluation() model = self.unwrap_dist_model(self.model) if not skip_reload and cur_epoch == "best": model = self._reload_best_model(model) model.eval() self.task.before_evaluation( model=model, dataset=self.datasets[split_name], ) results = self.task.evaluation(model, data_loader) if results is not None: return self.task.after_evaluation( val_result=results, split_name=split_name, epoch=cur_epoch, ) def unwrap_dist_model(self, model): if self.use_distributed: return model.module else: return model def create_loaders( self, datasets, num_workers, batch_sizes, is_trains, collate_fns, dataset_ratios=None, ): """ Create dataloaders for training and validation. """ def _create_loader(dataset, num_workers, bsz, is_train, collate_fn): # create a single dataloader for each split if isinstance(dataset, ChainDataset) or isinstance(dataset, wds.DataPipeline): # wds.WebdDataset instance are chained together # webdataset.DataPipeline has its own sampler and collate_fn loader = iter( DataLoader( dataset, batch_size=bsz, num_workers=num_workers, pin_memory=True, ) ) else: # map-style dataset are concatenated together # setup distributed sampler if self.use_distributed: sampler = DistributedSampler( dataset, shuffle=is_train, num_replicas=get_world_size(), rank=get_rank(), ) if not self.use_dist_eval_sampler: # e.g. retrieval evaluation sampler = sampler if is_train else None else: sampler = None loader = DataLoader( dataset, batch_size=bsz, num_workers=num_workers, pin_memory=True, sampler=sampler, shuffle=sampler is None and is_train, collate_fn=collate_fn, drop_last=True if is_train else False, ) loader = PrefetchLoader(loader) if is_train: loader = IterLoader(loader, use_distributed=self.use_distributed) return loader loaders = [] for dataset, bsz, is_train, collate_fn in zip(datasets, batch_sizes, is_trains, collate_fns): if isinstance(dataset, list) or isinstance(dataset, tuple): loader = MultiIterLoader( loaders=[ _create_loader(d, num_workers, bsz, is_train, collate_fn[i]) for i, d in enumerate(dataset) ], ratios=dataset_ratios, ) else: loader = _create_loader(dataset, num_workers, bsz, is_train, collate_fn) loaders.append(loader) return loaders @main_process def _save_checkpoint(self, cur_epoch, is_best=False): """ Save the checkpoint at the current epoch. """ model_no_ddp = self.unwrap_dist_model(self.model) param_grad_dic = {k: v.requires_grad for (k, v) in model_no_ddp.named_parameters()} state_dict = model_no_ddp.state_dict() for k in list(state_dict.keys()): if k in param_grad_dic.keys() and not param_grad_dic[k]: # delete parameters that do not require gradient del state_dict[k] save_obj = { "model": state_dict, "optimizer": self.optimizer.state_dict(), "config": self.config.to_dict(), "scaler": self.scaler.state_dict() if self.scaler else None, "epoch": cur_epoch, } save_to = os.path.join( self.output_dir, "checkpoint_{}.pth".format("best" if is_best else cur_epoch), ) logging.info("Saving checkpoint at epoch {} to {}.".format(cur_epoch, save_to)) torch.save(save_obj, save_to) def _reload_best_model(self, model): """ Load the best checkpoint for evaluation. """ checkpoint_path = os.path.join(self.output_dir, "checkpoint_best.pth") logging.info("Loading checkpoint from {}.".format(checkpoint_path)) checkpoint = torch.load(checkpoint_path, map_location="cpu") try: model.load_state_dict(checkpoint["model"]) except RuntimeError as e: logging.warning( """ Key mismatch when loading checkpoint. This is expected if only part of the model is saved. Trying to load the model with strict=False. """ ) model.load_state_dict(checkpoint["model"], strict=False) return model def _load_checkpoint(self, url_or_filename): """ Resume from a checkpoint. """ if is_url(url_or_filename): cached_file = download_cached_file(url_or_filename, check_hash=False, progress=True) checkpoint = torch.load(cached_file, map_location=self.device) elif os.path.isfile(url_or_filename): checkpoint = torch.load(url_or_filename, map_location=self.device) else: raise RuntimeError("checkpoint url or path is invalid") state_dict = checkpoint["model"] self.unwrap_dist_model(self.model).load_state_dict(state_dict, strict=False) self.optimizer.load_state_dict(checkpoint["optimizer"]) if self.scaler and "scaler" in checkpoint: self.scaler.load_state_dict(checkpoint["scaler"]) self.start_epoch = checkpoint["epoch"] + 1 logging.info("Resume checkpoint from {}".format(url_or_filename)) @main_process def log_stats(self, stats, split_name): if isinstance(stats, dict): log_stats = {**{f"{split_name}_{k}": v for k, v in stats.items()}} with open(os.path.join(self.output_dir, "log.txt"), "a") as f: f.write(json.dumps(log_stats) + "\n") elif isinstance(stats, list): pass @main_process def log_config(self): with open(os.path.join(self.output_dir, "log.txt"), "a") as f: f.write(json.dumps(self.config.to_dict(), indent=4) + "\n")
3D-LLM-main
3DLLM_BLIP2-base/lavis/runners/runner_base.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import datetime import logging import os import time import torch import torch.distributed as dist import webdataset as wds from lavis.common.dist_utils import download_cached_file, is_main_process, main_process from lavis.common.registry import registry from lavis.common.utils import is_url from lavis.datasets.data_utils import concat_datasets, reorg_datasets_by_split from lavis.runners.runner_base import RunnerBase from torch.utils.data.dataset import ChainDataset @registry.register_runner("runner_iter") class RunnerIter(RunnerBase): """ Run training based on the number of iterations. This is common when the training dataset size is large. Underhood logic is similar to epoch-based training by considering every #iters_per_inner_epoch as an inner epoch. In iter-based runner, after every #iters_per_inner_epoch steps, we 1) do a validation epoch; 2) schedule the learning rate; 3) save the checkpoint. We refer every #iters_per_inner_epoch steps as an inner epoch. """ def __init__(self, cfg, task, model, datasets, job_id): super().__init__(cfg, task, model, datasets, job_id) self.start_iters = 0 self.max_iters = int(self.config.run_cfg.get("max_iters", -1)) assert self.max_iters > 0, "max_iters must be greater than 0." self.iters_per_inner_epoch = int(self.config.run_cfg.get("iters_per_inner_epoch", -1)) assert self.iters_per_inner_epoch > 0, "iters_per_inner_epoch must be greater than 0." @property def max_epoch(self): return int(self.max_iters / self.iters_per_inner_epoch) @property def cur_epoch(self): try: return self.train_loader.epoch except AttributeError: # pipeline data (e.g. LAION) is streaming, have no concept of epoch return 0 def _progress(self, cur_iters): return "{}_iters={}".format(self.cur_epoch, cur_iters) def train(self): start_time = time.time() best_agg_metric = 0 best_iters = 0 self.log_config() # resume from checkpoint if specified if not self.evaluate_only and self.resume_ckpt_path is not None: self._load_checkpoint(self.resume_ckpt_path) for start_iters in range(self.start_iters, self.max_iters, self.iters_per_inner_epoch): end_iters = start_iters + self.iters_per_inner_epoch # training phase if not self.evaluate_only: logging.info( "Start training, max_iters={}, in total {} inner epochs.".format( self.max_iters, int(self.max_iters / self.iters_per_inner_epoch) ) ) train_stats = self.train_iters(self.cur_epoch, start_iters) self.log_stats(split_name="train", stats=train_stats) # evaluation phase if len(self.valid_splits) > 0: for split_name in self.valid_splits: logging.info("Evaluating on {}.".format(split_name)) val_log = self.eval_epoch(split_name=split_name, cur_epoch=self._progress(end_iters)) if val_log is not None: if is_main_process(): assert "agg_metrics" in val_log, "No agg_metrics found in validation log." agg_metrics = val_log["agg_metrics"] if agg_metrics > best_agg_metric and split_name == "val": best_iters, best_agg_metric = end_iters, agg_metrics self._save_checkpoint(end_iters, is_best=True) val_log.update({"best_iters": best_iters}) self.log_stats(val_log, split_name) else: # if no validation split is provided, we just save the checkpoint at the end of each inner epoch. if not self.evaluate_only: self._save_checkpoint(end_iters, is_best=False) if self.evaluate_only: break dist.barrier() # testing phase self.evaluate(cur_epoch=self.cur_epoch) total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) logging.info("Training time {}".format(total_time_str)) def train_iters(self, epoch, start_iters): # train by iterations self.model.train() return self.task.train_iters( epoch=epoch, start_iters=start_iters, iters_per_inner_epoch=self.iters_per_inner_epoch, model=self.model, data_loader=self.train_loader, optimizer=self.optimizer, scaler=self.scaler, lr_scheduler=self.lr_scheduler, cuda_enabled=self.cuda_enabled, log_freq=self.log_freq, accum_grad_iters=self.accum_grad_iters, ) @main_process def _save_checkpoint(self, cur_iters, is_best=False): save_obj = { "model": self.unwrap_dist_model(self.model).state_dict(), "optimizer": self.optimizer.state_dict(), "config": self.config.to_dict(), "scaler": self.scaler.state_dict() if self.scaler else None, "iters": cur_iters, } save_to = os.path.join( self.output_dir, "checkpoint_{}.pth".format("best" if is_best else cur_iters), ) logging.info("Saving checkpoint at iters {} to {}.".format(cur_iters, save_to)) torch.save(save_obj, save_to) def _load_checkpoint(self, url_or_filename): """ Resume from a checkpoint. """ if is_url(url_or_filename): cached_file = download_cached_file(url_or_filename, check_hash=False, progress=True) checkpoint = torch.load(cached_file, map_location=self.device) elif os.path.isfile(url_or_filename): checkpoint = torch.load(url_or_filename, map_location=self.device) else: raise RuntimeError("checkpoint url or path is invalid") state_dict = checkpoint["model"] self.unwrap_dist_model(self.model).load_state_dict(state_dict) self.optimizer.load_state_dict(checkpoint["optimizer"]) if self.scaler and "scaler" in checkpoint: self.scaler.load_state_dict(checkpoint["scaler"]) self.start_iters = checkpoint["iters"] + 1 logging.info("Resume checkpoint from {}".format(url_or_filename)) @property def dataloaders(self) -> dict: """ A property to get and create dataloaders by split just in need. If no train_dataset_ratio is provided, concatenate map-style datasets and chain wds.DataPipe datasets separately. Training set becomes a tuple (ConcatDataset, ChainDataset), both are optional but at least one of them is required. The resultant ConcatDataset and ChainDataset will be sampled evenly. If train_dataset_ratio is provided, create a MultiIterLoader to sample each dataset by ratios during training. Currently do not support multiple datasets for validation and test. Returns: dict: {split_name: (tuples of) dataloader} """ if self._dataloaders is None: # reoganize datasets by split and concatenate/chain if necessary dataset_ratios = self.config.run_cfg.get("train_dataset_ratios", None) if dataset_ratios is None: # concatenate map-style datasets and chain wds.DataPipe datasets separately # training set becomes a tuple (ConcatDataset, ChainDataset), both are # optional but at least one of them is required. The resultant ConcatDataset # and ChainDataset will be sampled evenly. logging.info( "dataset_ratios not specified, datasets will be concatenated (map-style datasets) or chained (webdataset.DataPipeline)." ) datasets = reorg_datasets_by_split(self.datasets) self.datasets = concat_datasets(datasets) else: # create multi-loader with the provided ratios, without concatenating or chaining missing_keys = [k for k in dataset_ratios if k not in self.datasets] if len(missing_keys) > 0: raise ValueError("Datasets with the following split names are not found: {}".format(missing_keys)) unexpected_keys = [k for k in self.datasets if k not in dataset_ratios] if len(unexpected_keys) > 0: raise ValueError( "Datasets with the following split names are not expected: {}".format(unexpected_keys) ) dataset_ratios = [float(dataset_ratios[k]) for k in self.datasets] self.datasets = reorg_datasets_by_split(self.datasets) # to keep the same structure as return value of concat_datasets self.datasets = {k: v[0] if len(v) == 1 else v for k, v in datasets.items()} # print dataset statistics after concatenation/chaining for split_name in self.datasets: if isinstance(self.datasets[split_name], tuple) or isinstance(self.datasets[split_name], list): # mixed wds.DataPipeline and torch.utils.data.Dataset num_records = sum( [ len(d) if not type(d) in [wds.DataPipeline, ChainDataset] else 0 for d in self.datasets[split_name] ] ) else: try: # a single map-style dataset num_records = len(self.datasets[split_name]) except TypeError: # a single wds.DataPipeline or ChainDataset num_records = -1 logging.info("Only a single wds.DataPipeline dataset, no __len__ attribute.") if num_records >= 0: logging.info("Loaded {} records for {} split from the dataset.".format(num_records, split_name)) # create dataloaders split_names = sorted(self.datasets.keys()) datasets = [self.datasets[split] for split in split_names] is_trains = [split in self.train_splits for split in split_names] batch_sizes = [ self.config.run_cfg.batch_size_train if split == "train" else self.config.run_cfg.batch_size_eval for split in split_names ] collate_fns = [] for dataset in datasets: if isinstance(dataset, tuple) or isinstance(dataset, list): collate_fns.append([getattr(d, "collater", None) for d in dataset]) else: collate_fns.append(getattr(dataset, "collater", None)) dataloaders = self.create_loaders( datasets=datasets, num_workers=self.config.run_cfg.num_workers, batch_sizes=batch_sizes, is_trains=is_trains, collate_fns=collate_fns, dataset_ratios=dataset_ratios, ) self._dataloaders = {k: v for k, v in zip(split_names, dataloaders)} return self._dataloaders
3D-LLM-main
3DLLM_BLIP2-base/lavis/runners/runner_iter.py
# Copyright (c) Facebook, Inc. and its affiliates. # Copied from: https://github.com/facebookresearch/detectron2/blob/master/demo/predictor.py import atexit import bisect import multiprocessing as mp from collections import deque import cv2 import torch from detectron2.data import MetadataCatalog from detectron2.engine.defaults import DefaultPredictor from detectron2.utils.video_visualizer import VideoVisualizer from detectron2.utils.visualizer import ColorMode, Visualizer class VisualizationDemo(object): def __init__(self, cfg, instance_mode=ColorMode.IMAGE, parallel=False): """ Args: cfg (CfgNode): instance_mode (ColorMode): parallel (bool): whether to run the model in different processes from visualization. Useful since the visualization logic can be slow. """ self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else "__unused") self.cpu_device = torch.device("cpu") self.instance_mode = instance_mode self.parallel = parallel if parallel: num_gpu = torch.cuda.device_count() self.predictor = AsyncPredictor(cfg, num_gpus=num_gpu) else: self.predictor = DefaultPredictor(cfg) def run_on_image(self, image): """ Args: image (np.ndarray): an image of shape (H, W, C) (in BGR order). This is the format used by OpenCV. Returns: predictions (dict): the output of the model. vis_output (VisImage): the visualized image output. """ vis_output = None predictions = self.predictor(image) print(predictions) # Convert image from OpenCV BGR format to Matplotlib RGB format. image = image[:, :, ::-1] visualizer = Visualizer(image, self.metadata, instance_mode=self.instance_mode) if "panoptic_seg" in predictions: panoptic_seg, segments_info = predictions["panoptic_seg"] vis_output = visualizer.draw_panoptic_seg_predictions(panoptic_seg.to(self.cpu_device), segments_info) else: if "sem_seg" in predictions: vis_output = visualizer.draw_sem_seg(predictions["sem_seg"].argmax(dim=0).to(self.cpu_device)) if "instances" in predictions: instances = predictions["instances"].to(self.cpu_device) vis_output = visualizer.draw_instance_predictions(predictions=instances) return predictions, vis_output def _frame_from_video(self, video): while video.isOpened(): success, frame = video.read() if success: yield frame else: break def run_on_video(self, video): """ Visualizes predictions on frames of the input video. Args: video (cv2.VideoCapture): a :class:`VideoCapture` object, whose source can be either a webcam or a video file. Yields: ndarray: BGR visualizations of each video frame. """ video_visualizer = VideoVisualizer(self.metadata, self.instance_mode) def process_predictions(frame, predictions): frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) if "panoptic_seg" in predictions: panoptic_seg, segments_info = predictions["panoptic_seg"] vis_frame = video_visualizer.draw_panoptic_seg_predictions( frame, panoptic_seg.to(self.cpu_device), segments_info ) elif "instances" in predictions: predictions = predictions["instances"].to(self.cpu_device) vis_frame = video_visualizer.draw_instance_predictions(frame, predictions) elif "sem_seg" in predictions: vis_frame = video_visualizer.draw_sem_seg( frame, predictions["sem_seg"].argmax(dim=0).to(self.cpu_device) ) # Converts Matplotlib RGB format to OpenCV BGR format vis_frame = cv2.cvtColor(vis_frame.get_image(), cv2.COLOR_RGB2BGR) return vis_frame frame_gen = self._frame_from_video(video) if self.parallel: buffer_size = self.predictor.default_buffer_size frame_data = deque() for cnt, frame in enumerate(frame_gen): frame_data.append(frame) self.predictor.put(frame) if cnt >= buffer_size: frame = frame_data.popleft() predictions = self.predictor.get() yield process_predictions(frame, predictions) while len(frame_data): frame = frame_data.popleft() predictions = self.predictor.get() yield process_predictions(frame, predictions) else: for frame in frame_gen: yield process_predictions(frame, self.predictor(frame)) class AsyncPredictor: """ A predictor that runs the model asynchronously, possibly on >1 GPUs. Because rendering the visualization takes considerably amount of time, this helps improve throughput a little bit when rendering videos. """ class _StopToken: pass class _PredictWorker(mp.Process): def __init__(self, cfg, task_queue, result_queue): self.cfg = cfg self.task_queue = task_queue self.result_queue = result_queue super().__init__() def run(self): predictor = DefaultPredictor(self.cfg) while True: task = self.task_queue.get() if isinstance(task, AsyncPredictor._StopToken): break idx, data = task result = predictor(data) self.result_queue.put((idx, result)) def __init__(self, cfg, num_gpus: int = 1): """ Args: cfg (CfgNode): num_gpus (int): if 0, will run on CPU """ num_workers = max(num_gpus, 1) self.task_queue = mp.Queue(maxsize=num_workers * 3) self.result_queue = mp.Queue(maxsize=num_workers * 3) self.procs = [] for gpuid in range(max(num_gpus, 1)): cfg = cfg.clone() cfg.defrost() cfg.MODEL.DEVICE = "cuda:{}".format(gpuid) if num_gpus > 0 else "cpu" self.procs.append(AsyncPredictor._PredictWorker(cfg, self.task_queue, self.result_queue)) self.put_idx = 0 self.get_idx = 0 self.result_rank = [] self.result_data = [] for p in self.procs: p.start() atexit.register(self.shutdown) def put(self, image): self.put_idx += 1 self.task_queue.put((self.put_idx, image)) def get(self): self.get_idx += 1 # the index needed for this request if len(self.result_rank) and self.result_rank[0] == self.get_idx: res = self.result_data[0] del self.result_data[0], self.result_rank[0] return res while True: # make sure the results are returned in the correct order idx, res = self.result_queue.get() if idx == self.get_idx: return res insert = bisect.bisect(self.result_rank, idx) self.result_rank.insert(insert, idx) self.result_data.insert(insert, res) def __len__(self): return self.put_idx - self.get_idx def __call__(self, image): self.put(image) return self.get() def shutdown(self): for _ in self.procs: self.task_queue.put(AsyncPredictor._StopToken()) @property def default_buffer_size(self): return len(self.procs) * 5
3D-LLM-main
three_steps_3d_feature/first_step/predictor.py