python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
import torch
import types
import os
from tqdm import tqdm
import numpy as np
from detectron2.config import get_cfg
from detectron2.data.detection_utils import read_image
from detectron2.projects.deeplab import add_deeplab_config
import glob
from mask2former import add_maskformer2_config
from predictor import VisualizationDemo
import matplotlib
import matplotlib.pyplot as plt
import argparse
import json
def setup_cfg(args):
# load config from file and command-line arguments
cfg = get_cfg()
add_deeplab_config(cfg)
add_maskformer2_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
return cfg
MASK2FORMER_CONFIG_FILE = "./maskformer2_swin_large_IN21k_384_bs16_100ep.yaml"
MASK2FORMER_WEIGHTS_FILE = "./model_final_e5f453.pkl"
if __name__ == "__main__":
torch.autograd.set_grad_enabled(False)
parser = argparse.ArgumentParser(description="Specify dirs")
parser.add_argument("--scene_dir_path", default="./masked_rdp_data/", type=str)
parser.add_argument("--save_dir_path", default="./maskformer_masks/", type=str)
args = parser.parse_args()
scene_dir = args.scene_dir_path
save_dir = args.save_dir_path
os.makedirs(os.path.join(save_dir), exist_ok=True)
for scan in tqdm(os.listdir(scene_dir)):
os.makedirs(os.path.join(save_dir, scan), exist_ok=True)
rgb_list = glob.glob(os.path.join(scene_dir, scan, "*png"))
for img2_idx in range(len(rgb_list)):
IMGFILE = os.path.join(scene_dir, scan, str(img2_idx) + ".png")
MASK_LOAD_FILE = os.path.join(save_dir, scan, str(img2_idx) + ".pt")
LOAD_IMG_HEIGHT = 512
LOAD_IMG_WIDTH = 512
cfgargs = types.SimpleNamespace()
cfgargs.imgfile = IMGFILE
cfgargs.config_file = MASK2FORMER_CONFIG_FILE
cfgargs.opts = ["MODEL.WEIGHTS", MASK2FORMER_WEIGHTS_FILE]
cfg = setup_cfg(cfgargs)
demo = VisualizationDemo(cfg)
img = read_image(IMGFILE, format="BGR")
predictions, visualized_output = demo.run_on_image(img)
masks = torch.nn.functional.interpolate(
predictions["instances"].pred_masks.unsqueeze(0), [LOAD_IMG_HEIGHT, LOAD_IMG_WIDTH], mode="nearest"
)
masks = masks.half()
torch.save(masks[0].detach().cpu(), MASK_LOAD_FILE)
| 3D-LLM-main | three_steps_3d_feature/first_step/maskformer_mask.py |
import os
from pathlib import Path
import cv2
import numpy as np
import open_clip
import torch
from segment_anything import SamAutomaticMaskGenerator, SamPredictor, sam_model_registry
from tqdm import tqdm, trange
import glob
import argparse
from tqdm import tqdm
def main():
parser = argparse.ArgumentParser(description="Specify dirs")
parser.add_argument("--scene_dir_path", default="./masked_rdp_data/", type=str)
parser.add_argument("--save_dir_path", default="./sam_masks/", type=str)
args = parser.parse_args()
torch.autograd.set_grad_enabled(False)
sam = sam_model_registry["vit_h"](checkpoint=Path("sam_vit_h_4b8939.pth"))
sam.to(device="cuda")
mask_generator = SamAutomaticMaskGenerator(
model=sam,
points_per_side=8,
pred_iou_thresh=0.92,
crop_n_layers=1,
crop_n_points_downscale_factor=2,
)
save_dir = args.save_dir_path
os.makedirs(save_dir, exist_ok=True)
print("Extracting SAM masks...")
room_list = os.listdir(args.scene_dir_path)
model, _, preprocess = open_clip.create_model_and_transforms("ViT-H-14", "laion2b_s32b_b79k")
model.cuda()
model.eval()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Device:", device)
dataset_dir = args.scene_dir_path
for room in tqdm(os.listdir(dataset_dir)):
os.makedirs(save_dir + room, exist_ok=True)
dataset_path = dataset_dir + room + "/*png"
data_list = glob.glob(dataset_path)
for img_name in data_list:
img_base_name = os.path.basename(img_name)
try:
savefile = os.path.join(
save_dir,
room,
os.path.basename(img_name).replace(".png", ".pt"),
)
if os.path.exists(savefile):
continue
imgfile = img_name
img = cv2.imread(imgfile)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
masks = mask_generator.generate(img)
cur_mask = masks[0]["segmentation"]
_savefile = os.path.join(
save_dir,
room,
os.path.splitext(os.path.basename(imgfile))[0] + ".pt",
)
mask_list = []
for mask_item in masks:
mask_list.append(mask_item["segmentation"])
mask_np = np.asarray(mask_list)
mask_torch = torch.from_numpy(mask_np)
torch.save(mask_torch, _savefile)
except:
pass
if __name__ == "__main__":
main()
| 3D-LLM-main | three_steps_3d_feature/first_step/sam_mask.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import logging
from itertools import count
import numpy as np
import torch
from fvcore.transforms import HFlipTransform
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from detectron2.data.detection_utils import read_image
from detectron2.modeling import DatasetMapperTTA
__all__ = [
"SemanticSegmentorWithTTA",
]
class SemanticSegmentorWithTTA(nn.Module):
"""
A SemanticSegmentor with test-time augmentation enabled.
Its :meth:`__call__` method has the same interface as :meth:`SemanticSegmentor.forward`.
"""
def __init__(self, cfg, model, tta_mapper=None, batch_size=1):
"""
Args:
cfg (CfgNode):
model (SemanticSegmentor): a SemanticSegmentor to apply TTA on.
tta_mapper (callable): takes a dataset dict and returns a list of
augmented versions of the dataset dict. Defaults to
`DatasetMapperTTA(cfg)`.
batch_size (int): batch the augmented images into this batch size for inference.
"""
super().__init__()
if isinstance(model, DistributedDataParallel):
model = model.module
self.cfg = cfg.clone()
self.model = model
if tta_mapper is None:
tta_mapper = DatasetMapperTTA(cfg)
self.tta_mapper = tta_mapper
self.batch_size = batch_size
def __call__(self, batched_inputs):
"""
Same input/output format as :meth:`SemanticSegmentor.forward`
"""
def _maybe_read_image(dataset_dict):
ret = copy.copy(dataset_dict)
if "image" not in ret:
image = read_image(ret.pop("file_name"), self.model.input_format)
image = torch.from_numpy(np.ascontiguousarray(image.transpose(2, 0, 1))) # CHW
ret["image"] = image
if "height" not in ret and "width" not in ret:
ret["height"] = image.shape[1]
ret["width"] = image.shape[2]
return ret
processed_results = []
for x in batched_inputs:
result = self._inference_one_image(_maybe_read_image(x))
processed_results.append(result)
return processed_results
def _inference_one_image(self, input):
"""
Args:
input (dict): one dataset dict with "image" field being a CHW tensor
Returns:
dict: one output dict
"""
orig_shape = (input["height"], input["width"])
augmented_inputs, tfms = self._get_augmented_inputs(input)
final_predictions = None
count_predictions = 0
for input, tfm in zip(augmented_inputs, tfms):
count_predictions += 1
with torch.no_grad():
if final_predictions is None:
if any(isinstance(t, HFlipTransform) for t in tfm.transforms):
final_predictions = self.model([input])[0].pop("sem_seg").flip(dims=[2])
else:
final_predictions = self.model([input])[0].pop("sem_seg")
else:
if any(isinstance(t, HFlipTransform) for t in tfm.transforms):
final_predictions += self.model([input])[0].pop("sem_seg").flip(dims=[2])
else:
final_predictions += self.model([input])[0].pop("sem_seg")
final_predictions = final_predictions / count_predictions
return {"sem_seg": final_predictions}
def _get_augmented_inputs(self, input):
augmented_inputs = self.tta_mapper(input)
tfms = [x.pop("transforms") for x in augmented_inputs]
return augmented_inputs, tfms
| 3D-LLM-main | three_steps_3d_feature/first_step/mask2former/test_time_augmentation.py |
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
from detectron2.config import CfgNode as CN
def add_maskformer2_config(cfg):
"""
Add config for MASK_FORMER.
"""
# NOTE: configs from original maskformer
# data config
# select the dataset mapper
cfg.INPUT.DATASET_MAPPER_NAME = "mask_former_semantic"
# Color augmentation
cfg.INPUT.COLOR_AUG_SSD = False
# We retry random cropping until no single category in semantic segmentation GT occupies more
# than `SINGLE_CATEGORY_MAX_AREA` part of the crop.
cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA = 1.0
# Pad image and segmentation GT in dataset mapper.
cfg.INPUT.SIZE_DIVISIBILITY = -1
# solver config
# weight decay on embedding
cfg.SOLVER.WEIGHT_DECAY_EMBED = 0.0
# optimizer
cfg.SOLVER.OPTIMIZER = "ADAMW"
cfg.SOLVER.BACKBONE_MULTIPLIER = 0.1
# mask_former model config
cfg.MODEL.MASK_FORMER = CN()
# loss
cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION = True
cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT = 0.1
cfg.MODEL.MASK_FORMER.CLASS_WEIGHT = 1.0
cfg.MODEL.MASK_FORMER.DICE_WEIGHT = 1.0
cfg.MODEL.MASK_FORMER.MASK_WEIGHT = 20.0
# transformer config
cfg.MODEL.MASK_FORMER.NHEADS = 8
cfg.MODEL.MASK_FORMER.DROPOUT = 0.1
cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD = 2048
cfg.MODEL.MASK_FORMER.ENC_LAYERS = 0
cfg.MODEL.MASK_FORMER.DEC_LAYERS = 6
cfg.MODEL.MASK_FORMER.PRE_NORM = False
cfg.MODEL.MASK_FORMER.HIDDEN_DIM = 256
cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES = 100
cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE = "res5"
cfg.MODEL.MASK_FORMER.ENFORCE_INPUT_PROJ = False
# mask_former inference config
cfg.MODEL.MASK_FORMER.TEST = CN()
cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON = True
cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON = False
cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON = False
cfg.MODEL.MASK_FORMER.TEST.OBJECT_MASK_THRESHOLD = 0.0
cfg.MODEL.MASK_FORMER.TEST.OVERLAP_THRESHOLD = 0.0
cfg.MODEL.MASK_FORMER.TEST.SEM_SEG_POSTPROCESSING_BEFORE_INFERENCE = False
# Sometimes `backbone.size_divisibility` is set to 0 for some backbone (e.g. ResNet)
# you can use this config to override
cfg.MODEL.MASK_FORMER.SIZE_DIVISIBILITY = 32
# pixel decoder config
cfg.MODEL.SEM_SEG_HEAD.MASK_DIM = 256
# adding transformer in pixel decoder
cfg.MODEL.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS = 0
# pixel decoder
cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME = "BasePixelDecoder"
# swin transformer backbone
cfg.MODEL.SWIN = CN()
cfg.MODEL.SWIN.PRETRAIN_IMG_SIZE = 224
cfg.MODEL.SWIN.PATCH_SIZE = 4
cfg.MODEL.SWIN.EMBED_DIM = 96
cfg.MODEL.SWIN.DEPTHS = [2, 2, 6, 2]
cfg.MODEL.SWIN.NUM_HEADS = [3, 6, 12, 24]
cfg.MODEL.SWIN.WINDOW_SIZE = 7
cfg.MODEL.SWIN.MLP_RATIO = 4.0
cfg.MODEL.SWIN.QKV_BIAS = True
cfg.MODEL.SWIN.QK_SCALE = None
cfg.MODEL.SWIN.DROP_RATE = 0.0
cfg.MODEL.SWIN.ATTN_DROP_RATE = 0.0
cfg.MODEL.SWIN.DROP_PATH_RATE = 0.3
cfg.MODEL.SWIN.APE = False
cfg.MODEL.SWIN.PATCH_NORM = True
cfg.MODEL.SWIN.OUT_FEATURES = ["res2", "res3", "res4", "res5"]
cfg.MODEL.SWIN.USE_CHECKPOINT = False
# NOTE: maskformer2 extra configs
# transformer module
cfg.MODEL.MASK_FORMER.TRANSFORMER_DECODER_NAME = "MultiScaleMaskedTransformerDecoder"
# LSJ aug
cfg.INPUT.IMAGE_SIZE = 1024
cfg.INPUT.MIN_SCALE = 0.1
cfg.INPUT.MAX_SCALE = 2.0
# MSDeformAttn encoder configs
cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_IN_FEATURES = ["res3", "res4", "res5"]
cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_POINTS = 4
cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_HEADS = 8
# point loss configs
# Number of points sampled during training for a mask point head.
cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS = 112 * 112
# Oversampling parameter for PointRend point sampling during training. Parameter `k` in the
# original paper.
cfg.MODEL.MASK_FORMER.OVERSAMPLE_RATIO = 3.0
# Importance sampling parameter for PointRend point sampling during training. Parametr `beta` in
# the original paper.
cfg.MODEL.MASK_FORMER.IMPORTANCE_SAMPLE_RATIO = 0.75
| 3D-LLM-main | three_steps_3d_feature/first_step/mask2former/config.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import Tuple
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.data import MetadataCatalog
from detectron2.modeling import META_ARCH_REGISTRY, build_backbone, build_sem_seg_head
from detectron2.modeling.backbone import Backbone
from detectron2.modeling.postprocessing import sem_seg_postprocess
from detectron2.structures import Boxes, ImageList, Instances, BitMasks
from detectron2.utils.memory import retry_if_cuda_oom
from .modeling.criterion import SetCriterion
from .modeling.matcher import HungarianMatcher
@META_ARCH_REGISTRY.register()
class MaskFormer(nn.Module):
"""
Main class for mask classification semantic segmentation architectures.
"""
@configurable
def __init__(
self,
*,
backbone: Backbone,
sem_seg_head: nn.Module,
criterion: nn.Module,
num_queries: int,
object_mask_threshold: float,
overlap_threshold: float,
metadata,
size_divisibility: int,
sem_seg_postprocess_before_inference: bool,
pixel_mean: Tuple[float],
pixel_std: Tuple[float],
# inference
semantic_on: bool,
panoptic_on: bool,
instance_on: bool,
test_topk_per_image: int,
):
"""
Args:
backbone: a backbone module, must follow detectron2's backbone interface
sem_seg_head: a module that predicts semantic segmentation from backbone features
criterion: a module that defines the loss
num_queries: int, number of queries
object_mask_threshold: float, threshold to filter query based on classification score
for panoptic segmentation inference
overlap_threshold: overlap threshold used in general inference for panoptic segmentation
metadata: dataset meta, get `thing` and `stuff` category names for panoptic
segmentation inference
size_divisibility: Some backbones require the input height and width to be divisible by a
specific integer. We can use this to override such requirement.
sem_seg_postprocess_before_inference: whether to resize the prediction back
to original input size before semantic segmentation inference or after.
For high-resolution dataset like Mapillary, resizing predictions before
inference will cause OOM error.
pixel_mean, pixel_std: list or tuple with #channels element, representing
the per-channel mean and std to be used to normalize the input image
semantic_on: bool, whether to output semantic segmentation prediction
instance_on: bool, whether to output instance segmentation prediction
panoptic_on: bool, whether to output panoptic segmentation prediction
test_topk_per_image: int, instance segmentation parameter, keep topk instances per image
"""
super().__init__()
self.backbone = backbone
self.sem_seg_head = sem_seg_head
self.criterion = criterion
self.num_queries = num_queries
self.overlap_threshold = overlap_threshold
self.object_mask_threshold = object_mask_threshold
self.metadata = metadata
if size_divisibility < 0:
# use backbone size_divisibility if not set
size_divisibility = self.backbone.size_divisibility
self.size_divisibility = size_divisibility
self.sem_seg_postprocess_before_inference = sem_seg_postprocess_before_inference
self.register_buffer("pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False)
# additional args
self.semantic_on = semantic_on
self.instance_on = instance_on
self.panoptic_on = panoptic_on
self.test_topk_per_image = test_topk_per_image
if not self.semantic_on:
assert self.sem_seg_postprocess_before_inference
@classmethod
def from_config(cls, cfg):
backbone = build_backbone(cfg)
sem_seg_head = build_sem_seg_head(cfg, backbone.output_shape())
# Loss parameters:
deep_supervision = cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION
no_object_weight = cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT
# loss weights
class_weight = cfg.MODEL.MASK_FORMER.CLASS_WEIGHT
dice_weight = cfg.MODEL.MASK_FORMER.DICE_WEIGHT
mask_weight = cfg.MODEL.MASK_FORMER.MASK_WEIGHT
# building criterion
matcher = HungarianMatcher(
cost_class=class_weight,
cost_mask=mask_weight,
cost_dice=dice_weight,
num_points=cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS,
)
weight_dict = {"loss_ce": class_weight, "loss_mask": mask_weight, "loss_dice": dice_weight}
if deep_supervision:
dec_layers = cfg.MODEL.MASK_FORMER.DEC_LAYERS
aux_weight_dict = {}
for i in range(dec_layers - 1):
aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()})
weight_dict.update(aux_weight_dict)
losses = ["labels", "masks"]
criterion = SetCriterion(
sem_seg_head.num_classes,
matcher=matcher,
weight_dict=weight_dict,
eos_coef=no_object_weight,
losses=losses,
num_points=cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS,
oversample_ratio=cfg.MODEL.MASK_FORMER.OVERSAMPLE_RATIO,
importance_sample_ratio=cfg.MODEL.MASK_FORMER.IMPORTANCE_SAMPLE_RATIO,
)
return {
"backbone": backbone,
"sem_seg_head": sem_seg_head,
"criterion": criterion,
"num_queries": cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES,
"object_mask_threshold": cfg.MODEL.MASK_FORMER.TEST.OBJECT_MASK_THRESHOLD,
"overlap_threshold": cfg.MODEL.MASK_FORMER.TEST.OVERLAP_THRESHOLD,
"metadata": MetadataCatalog.get(cfg.DATASETS.TRAIN[0]),
"size_divisibility": cfg.MODEL.MASK_FORMER.SIZE_DIVISIBILITY,
"sem_seg_postprocess_before_inference": (
cfg.MODEL.MASK_FORMER.TEST.SEM_SEG_POSTPROCESSING_BEFORE_INFERENCE
or cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON
or cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON
),
"pixel_mean": cfg.MODEL.PIXEL_MEAN,
"pixel_std": cfg.MODEL.PIXEL_STD,
# inference
"semantic_on": cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON,
"instance_on": cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON,
"panoptic_on": cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON,
"test_topk_per_image": cfg.TEST.DETECTIONS_PER_IMAGE,
}
@property
def device(self):
return self.pixel_mean.device
def forward(self, batched_inputs):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper`.
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* "image": Tensor, image in (C, H, W) format.
* "instances": per-region ground truth
* Other information that's included in the original dicts, such as:
"height", "width" (int): the output resolution of the model (may be different
from input resolution), used in inference.
Returns:
list[dict]:
each dict has the results for one image. The dict contains the following keys:
* "sem_seg":
A Tensor that represents the
per-pixel segmentation prediced by the head.
The prediction has shape KxHxW that represents the logits of
each class for each pixel.
* "panoptic_seg":
A tuple that represent panoptic output
panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment.
segments_info (list[dict]): Describe each segment in `panoptic_seg`.
Each dict contains keys "id", "category_id", "isthing".
"""
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, self.size_divisibility)
features = self.backbone(images.tensor)
outputs = self.sem_seg_head(features)
if self.training:
# mask classification target
if "instances" in batched_inputs[0]:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
targets = self.prepare_targets(gt_instances, images)
else:
targets = None
# bipartite matching-based loss
losses = self.criterion(outputs, targets)
for k in list(losses.keys()):
if k in self.criterion.weight_dict:
losses[k] *= self.criterion.weight_dict[k]
else:
# remove this loss if not specified in `weight_dict`
losses.pop(k)
return losses
else:
mask_cls_results = outputs["pred_logits"]
mask_pred_results = outputs["pred_masks"]
# upsample masks
mask_pred_results = F.interpolate(
mask_pred_results,
size=(images.tensor.shape[-2], images.tensor.shape[-1]),
mode="bilinear",
align_corners=False,
)
del outputs
processed_results = []
for mask_cls_result, mask_pred_result, input_per_image, image_size in zip(
mask_cls_results, mask_pred_results, batched_inputs, images.image_sizes
):
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
processed_results.append({})
if self.sem_seg_postprocess_before_inference:
mask_pred_result = retry_if_cuda_oom(sem_seg_postprocess)(
mask_pred_result, image_size, height, width
)
mask_cls_result = mask_cls_result.to(mask_pred_result)
# semantic segmentation inference
if self.semantic_on:
r = retry_if_cuda_oom(self.semantic_inference)(mask_cls_result, mask_pred_result)
if not self.sem_seg_postprocess_before_inference:
r = retry_if_cuda_oom(sem_seg_postprocess)(r, image_size, height, width)
processed_results[-1]["sem_seg"] = r
# panoptic segmentation inference
if self.panoptic_on:
panoptic_r = retry_if_cuda_oom(self.panoptic_inference)(mask_cls_result, mask_pred_result)
processed_results[-1]["panoptic_seg"] = panoptic_r
# instance segmentation inference
if self.instance_on:
instance_r = retry_if_cuda_oom(self.instance_inference)(mask_cls_result, mask_pred_result)
processed_results[-1]["instances"] = instance_r
return processed_results
def prepare_targets(self, targets, images):
h_pad, w_pad = images.tensor.shape[-2:]
new_targets = []
for targets_per_image in targets:
# pad gt
gt_masks = targets_per_image.gt_masks
padded_masks = torch.zeros((gt_masks.shape[0], h_pad, w_pad), dtype=gt_masks.dtype, device=gt_masks.device)
padded_masks[:, : gt_masks.shape[1], : gt_masks.shape[2]] = gt_masks
new_targets.append(
{
"labels": targets_per_image.gt_classes,
"masks": padded_masks,
}
)
return new_targets
def semantic_inference(self, mask_cls, mask_pred):
mask_cls = F.softmax(mask_cls, dim=-1)[..., :-1]
mask_pred = mask_pred.sigmoid()
semseg = torch.einsum("qc,qhw->chw", mask_cls, mask_pred)
return semseg
def panoptic_inference(self, mask_cls, mask_pred):
scores, labels = F.softmax(mask_cls, dim=-1).max(-1)
mask_pred = mask_pred.sigmoid()
keep = labels.ne(self.sem_seg_head.num_classes) & (scores > self.object_mask_threshold)
cur_scores = scores[keep]
cur_classes = labels[keep]
cur_masks = mask_pred[keep]
cur_mask_cls = mask_cls[keep]
cur_mask_cls = cur_mask_cls[:, :-1]
cur_prob_masks = cur_scores.view(-1, 1, 1) * cur_masks
h, w = cur_masks.shape[-2:]
panoptic_seg = torch.zeros((h, w), dtype=torch.int32, device=cur_masks.device)
segments_info = []
current_segment_id = 0
if cur_masks.shape[0] == 0:
# We didn't detect any mask :(
return panoptic_seg, segments_info
else:
# take argmax
cur_mask_ids = cur_prob_masks.argmax(0)
stuff_memory_list = {}
for k in range(cur_classes.shape[0]):
pred_class = cur_classes[k].item()
isthing = pred_class in self.metadata.thing_dataset_id_to_contiguous_id.values()
mask_area = (cur_mask_ids == k).sum().item()
original_area = (cur_masks[k] >= 0.5).sum().item()
mask = (cur_mask_ids == k) & (cur_masks[k] >= 0.5)
if mask_area > 0 and original_area > 0 and mask.sum().item() > 0:
if mask_area / original_area < self.overlap_threshold:
continue
# merge stuff regions
if not isthing:
if int(pred_class) in stuff_memory_list.keys():
panoptic_seg[mask] = stuff_memory_list[int(pred_class)]
continue
else:
stuff_memory_list[int(pred_class)] = current_segment_id + 1
current_segment_id += 1
panoptic_seg[mask] = current_segment_id
segments_info.append(
{
"id": current_segment_id,
"isthing": bool(isthing),
"category_id": int(pred_class),
}
)
return panoptic_seg, segments_info
def instance_inference(self, mask_cls, mask_pred):
# mask_pred is already processed to have the same shape as original input
image_size = mask_pred.shape[-2:]
# [Q, K]
scores = F.softmax(mask_cls, dim=-1)[:, :-1]
labels = torch.arange(self.sem_seg_head.num_classes, device=self.device).unsqueeze(0).repeat(self.num_queries, 1).flatten(0, 1)
# scores_per_image, topk_indices = scores.flatten(0, 1).topk(self.num_queries, sorted=False)
scores_per_image, topk_indices = scores.flatten(0, 1).topk(self.test_topk_per_image, sorted=False)
labels_per_image = labels[topk_indices]
topk_indices = topk_indices // self.sem_seg_head.num_classes
# mask_pred = mask_pred.unsqueeze(1).repeat(1, self.sem_seg_head.num_classes, 1).flatten(0, 1)
mask_pred = mask_pred[topk_indices]
# if this is panoptic segmentation, we only keep the "thing" classes
if self.panoptic_on:
keep = torch.zeros_like(scores_per_image).bool()
for i, lab in enumerate(labels_per_image):
keep[i] = lab in self.metadata.thing_dataset_id_to_contiguous_id.values()
scores_per_image = scores_per_image[keep]
labels_per_image = labels_per_image[keep]
mask_pred = mask_pred[keep]
result = Instances(image_size)
# mask (before sigmoid)
result.pred_masks = (mask_pred > 0).float()
result.pred_boxes = Boxes(torch.zeros(mask_pred.size(0), 4))
# Uncomment the following to get boxes from masks (this is slow)
# result.pred_boxes = BitMasks(mask_pred > 0).get_bounding_boxes()
# calculate average mask prob
mask_scores_per_image = (mask_pred.sigmoid().flatten(1) * result.pred_masks.flatten(1)).sum(1) / (result.pred_masks.flatten(1).sum(1) + 1e-6)
result.scores = scores_per_image * mask_scores_per_image
result.pred_classes = labels_per_image
return result
| 3D-LLM-main | three_steps_3d_feature/first_step/mask2former/maskformer_model.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from . import data # register all new datasets
from . import modeling
# config
from .config import add_maskformer2_config
# dataset loading
from .data.dataset_mappers.coco_instance_new_baseline_dataset_mapper import COCOInstanceNewBaselineDatasetMapper
from .data.dataset_mappers.coco_panoptic_new_baseline_dataset_mapper import COCOPanopticNewBaselineDatasetMapper
from .data.dataset_mappers.mask_former_instance_dataset_mapper import (
MaskFormerInstanceDatasetMapper,
)
from .data.dataset_mappers.mask_former_panoptic_dataset_mapper import (
MaskFormerPanopticDatasetMapper,
)
from .data.dataset_mappers.mask_former_semantic_dataset_mapper import (
MaskFormerSemanticDatasetMapper,
)
# models
from .maskformer_model import MaskFormer
from .test_time_augmentation import SemanticSegmentorWithTTA
# evaluation
from .evaluation.instance_evaluation import InstanceSegEvaluator
| 3D-LLM-main | three_steps_3d_feature/first_step/mask2former/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/facebookresearch/detr/blob/master/util/misc.py
"""
Misc functions, including distributed helpers.
Mostly copy-paste from torchvision references.
"""
from typing import List, Optional
import torch
import torch.distributed as dist
import torchvision
from torch import Tensor
def _max_by_axis(the_list):
# type: (List[List[int]]) -> List[int]
maxes = the_list[0]
for sublist in the_list[1:]:
for index, item in enumerate(sublist):
maxes[index] = max(maxes[index], item)
return maxes
class NestedTensor(object):
def __init__(self, tensors, mask: Optional[Tensor]):
self.tensors = tensors
self.mask = mask
def to(self, device):
# type: (Device) -> NestedTensor # noqa
cast_tensor = self.tensors.to(device)
mask = self.mask
if mask is not None:
assert mask is not None
cast_mask = mask.to(device)
else:
cast_mask = None
return NestedTensor(cast_tensor, cast_mask)
def decompose(self):
return self.tensors, self.mask
def __repr__(self):
return str(self.tensors)
def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):
# TODO make this more general
if tensor_list[0].ndim == 3:
if torchvision._is_tracing():
# nested_tensor_from_tensor_list() does not export well to ONNX
# call _onnx_nested_tensor_from_tensor_list() instead
return _onnx_nested_tensor_from_tensor_list(tensor_list)
# TODO make it support different-sized images
max_size = _max_by_axis([list(img.shape) for img in tensor_list])
# min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))
batch_shape = [len(tensor_list)] + max_size
b, c, h, w = batch_shape
dtype = tensor_list[0].dtype
device = tensor_list[0].device
tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
mask = torch.ones((b, h, w), dtype=torch.bool, device=device)
for img, pad_img, m in zip(tensor_list, tensor, mask):
pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
m[: img.shape[1], : img.shape[2]] = False
else:
raise ValueError("not supported")
return NestedTensor(tensor, mask)
# _onnx_nested_tensor_from_tensor_list() is an implementation of
# nested_tensor_from_tensor_list() that is supported by ONNX tracing.
@torch.jit.unused
def _onnx_nested_tensor_from_tensor_list(tensor_list: List[Tensor]) -> NestedTensor:
max_size = []
for i in range(tensor_list[0].dim()):
max_size_i = torch.max(
torch.stack([img.shape[i] for img in tensor_list]).to(torch.float32)
).to(torch.int64)
max_size.append(max_size_i)
max_size = tuple(max_size)
# work around for
# pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
# m[: img.shape[1], :img.shape[2]] = False
# which is not yet supported in onnx
padded_imgs = []
padded_masks = []
for img in tensor_list:
padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))]
padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0]))
padded_imgs.append(padded_img)
m = torch.zeros_like(img[0], dtype=torch.int, device=img.device)
padded_mask = torch.nn.functional.pad(m, (0, padding[2], 0, padding[1]), "constant", 1)
padded_masks.append(padded_mask.to(torch.bool))
tensor = torch.stack(padded_imgs)
mask = torch.stack(padded_masks)
return NestedTensor(tensor, mask=mask)
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
| 3D-LLM-main | three_steps_3d_feature/first_step/mask2former/utils/misc.py |
# Copyright (c) Facebook, Inc. and its affiliates.
| 3D-LLM-main | three_steps_3d_feature/first_step/mask2former/utils/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/facebookresearch/detr/blob/master/models/matcher.py
"""
Modules to compute the matching cost and solve the corresponding LSAP.
"""
import torch
import torch.nn.functional as F
from scipy.optimize import linear_sum_assignment
from torch import nn
from torch.cuda.amp import autocast
from detectron2.projects.point_rend.point_features import point_sample
def batch_dice_loss(inputs: torch.Tensor, targets: torch.Tensor):
"""
Compute the DICE loss, similar to generalized IOU for masks
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
"""
inputs = inputs.sigmoid()
inputs = inputs.flatten(1)
numerator = 2 * torch.einsum("nc,mc->nm", inputs, targets)
denominator = inputs.sum(-1)[:, None] + targets.sum(-1)[None, :]
loss = 1 - (numerator + 1) / (denominator + 1)
return loss
batch_dice_loss_jit = torch.jit.script(
batch_dice_loss
) # type: torch.jit.ScriptModule
def batch_sigmoid_ce_loss(inputs: torch.Tensor, targets: torch.Tensor):
"""
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
Returns:
Loss tensor
"""
hw = inputs.shape[1]
pos = F.binary_cross_entropy_with_logits(
inputs, torch.ones_like(inputs), reduction="none"
)
neg = F.binary_cross_entropy_with_logits(
inputs, torch.zeros_like(inputs), reduction="none"
)
loss = torch.einsum("nc,mc->nm", pos, targets) + torch.einsum(
"nc,mc->nm", neg, (1 - targets)
)
return loss / hw
batch_sigmoid_ce_loss_jit = torch.jit.script(
batch_sigmoid_ce_loss
) # type: torch.jit.ScriptModule
class HungarianMatcher(nn.Module):
"""This class computes an assignment between the targets and the predictions of the network
For efficiency reasons, the targets don't include the no_object. Because of this, in general,
there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,
while the others are un-matched (and thus treated as non-objects).
"""
def __init__(self, cost_class: float = 1, cost_mask: float = 1, cost_dice: float = 1, num_points: int = 0):
"""Creates the matcher
Params:
cost_class: This is the relative weight of the classification error in the matching cost
cost_mask: This is the relative weight of the focal loss of the binary mask in the matching cost
cost_dice: This is the relative weight of the dice loss of the binary mask in the matching cost
"""
super().__init__()
self.cost_class = cost_class
self.cost_mask = cost_mask
self.cost_dice = cost_dice
assert cost_class != 0 or cost_mask != 0 or cost_dice != 0, "all costs cant be 0"
self.num_points = num_points
@torch.no_grad()
def memory_efficient_forward(self, outputs, targets):
"""More memory-friendly matching"""
bs, num_queries = outputs["pred_logits"].shape[:2]
indices = []
# Iterate through batch size
for b in range(bs):
out_prob = outputs["pred_logits"][b].softmax(-1) # [num_queries, num_classes]
tgt_ids = targets[b]["labels"]
# Compute the classification cost. Contrary to the loss, we don't use the NLL,
# but approximate it in 1 - proba[target class].
# The 1 is a constant that doesn't change the matching, it can be ommitted.
cost_class = -out_prob[:, tgt_ids]
out_mask = outputs["pred_masks"][b] # [num_queries, H_pred, W_pred]
# gt masks are already padded when preparing target
tgt_mask = targets[b]["masks"].to(out_mask)
out_mask = out_mask[:, None]
tgt_mask = tgt_mask[:, None]
# all masks share the same set of points for efficient matching!
point_coords = torch.rand(1, self.num_points, 2, device=out_mask.device)
# get gt labels
tgt_mask = point_sample(
tgt_mask,
point_coords.repeat(tgt_mask.shape[0], 1, 1),
align_corners=False,
).squeeze(1)
out_mask = point_sample(
out_mask,
point_coords.repeat(out_mask.shape[0], 1, 1),
align_corners=False,
).squeeze(1)
with autocast(enabled=False):
out_mask = out_mask.float()
tgt_mask = tgt_mask.float()
# Compute the focal loss between masks
cost_mask = batch_sigmoid_ce_loss_jit(out_mask, tgt_mask)
# Compute the dice loss betwen masks
cost_dice = batch_dice_loss_jit(out_mask, tgt_mask)
# Final cost matrix
C = (
self.cost_mask * cost_mask
+ self.cost_class * cost_class
+ self.cost_dice * cost_dice
)
C = C.reshape(num_queries, -1).cpu()
indices.append(linear_sum_assignment(C))
return [
(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64))
for i, j in indices
]
@torch.no_grad()
def forward(self, outputs, targets):
"""Performs the matching
Params:
outputs: This is a dict that contains at least these entries:
"pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits
"pred_masks": Tensor of dim [batch_size, num_queries, H_pred, W_pred] with the predicted masks
targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:
"labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth
objects in the target) containing the class labels
"masks": Tensor of dim [num_target_boxes, H_gt, W_gt] containing the target masks
Returns:
A list of size batch_size, containing tuples of (index_i, index_j) where:
- index_i is the indices of the selected predictions (in order)
- index_j is the indices of the corresponding selected targets (in order)
For each batch element, it holds:
len(index_i) = len(index_j) = min(num_queries, num_target_boxes)
"""
return self.memory_efficient_forward(outputs, targets)
def __repr__(self, _repr_indent=4):
head = "Matcher " + self.__class__.__name__
body = [
"cost_class: {}".format(self.cost_class),
"cost_mask: {}".format(self.cost_mask),
"cost_dice: {}".format(self.cost_dice),
]
lines = [head] + [" " * _repr_indent + line for line in body]
return "\n".join(lines)
| 3D-LLM-main | three_steps_3d_feature/first_step/mask2former/modeling/matcher.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from .backbone.swin import D2SwinTransformer
from .pixel_decoder.fpn import BasePixelDecoder
from .pixel_decoder.msdeformattn import MSDeformAttnPixelDecoder
from .meta_arch.mask_former_head import MaskFormerHead
from .meta_arch.per_pixel_baseline import PerPixelBaselineHead, PerPixelBaselinePlusHead
| 3D-LLM-main | three_steps_3d_feature/first_step/mask2former/modeling/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/facebookresearch/detr/blob/master/models/detr.py
"""
MaskFormer criterion.
"""
import logging
import torch
import torch.nn.functional as F
from torch import nn
from detectron2.utils.comm import get_world_size
from detectron2.projects.point_rend.point_features import (
get_uncertain_point_coords_with_randomness,
point_sample,
)
from ..utils.misc import is_dist_avail_and_initialized, nested_tensor_from_tensor_list
def dice_loss(
inputs: torch.Tensor,
targets: torch.Tensor,
num_masks: float,
):
"""
Compute the DICE loss, similar to generalized IOU for masks
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
"""
inputs = inputs.sigmoid()
inputs = inputs.flatten(1)
numerator = 2 * (inputs * targets).sum(-1)
denominator = inputs.sum(-1) + targets.sum(-1)
loss = 1 - (numerator + 1) / (denominator + 1)
return loss.sum() / num_masks
dice_loss_jit = torch.jit.script(
dice_loss
) # type: torch.jit.ScriptModule
def sigmoid_ce_loss(
inputs: torch.Tensor,
targets: torch.Tensor,
num_masks: float,
):
"""
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
Returns:
Loss tensor
"""
loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
return loss.mean(1).sum() / num_masks
sigmoid_ce_loss_jit = torch.jit.script(
sigmoid_ce_loss
) # type: torch.jit.ScriptModule
def calculate_uncertainty(logits):
"""
We estimate uncerainty as L1 distance between 0.0 and the logit prediction in 'logits' for the
foreground class in `classes`.
Args:
logits (Tensor): A tensor of shape (R, 1, ...) for class-specific or
class-agnostic, where R is the total number of predicted masks in all images and C is
the number of foreground classes. The values are logits.
Returns:
scores (Tensor): A tensor of shape (R, 1, ...) that contains uncertainty scores with
the most uncertain locations having the highest uncertainty score.
"""
assert logits.shape[1] == 1
gt_class_logits = logits.clone()
return -(torch.abs(gt_class_logits))
class SetCriterion(nn.Module):
"""This class computes the loss for DETR.
The process happens in two steps:
1) we compute hungarian assignment between ground truth boxes and the outputs of the model
2) we supervise each pair of matched ground-truth / prediction (supervise class and box)
"""
def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses,
num_points, oversample_ratio, importance_sample_ratio):
"""Create the criterion.
Parameters:
num_classes: number of object categories, omitting the special no-object category
matcher: module able to compute a matching between targets and proposals
weight_dict: dict containing as key the names of the losses and as values their relative weight.
eos_coef: relative classification weight applied to the no-object category
losses: list of all the losses to be applied. See get_loss for list of available losses.
"""
super().__init__()
self.num_classes = num_classes
self.matcher = matcher
self.weight_dict = weight_dict
self.eos_coef = eos_coef
self.losses = losses
empty_weight = torch.ones(self.num_classes + 1)
empty_weight[-1] = self.eos_coef
self.register_buffer("empty_weight", empty_weight)
# pointwise mask loss parameters
self.num_points = num_points
self.oversample_ratio = oversample_ratio
self.importance_sample_ratio = importance_sample_ratio
def loss_labels(self, outputs, targets, indices, num_masks):
"""Classification loss (NLL)
targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]
"""
assert "pred_logits" in outputs
src_logits = outputs["pred_logits"].float()
idx = self._get_src_permutation_idx(indices)
target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)])
target_classes = torch.full(
src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device
)
target_classes[idx] = target_classes_o
loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight)
losses = {"loss_ce": loss_ce}
return losses
def loss_masks(self, outputs, targets, indices, num_masks):
"""Compute the losses related to the masks: the focal loss and the dice loss.
targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w]
"""
assert "pred_masks" in outputs
src_idx = self._get_src_permutation_idx(indices)
tgt_idx = self._get_tgt_permutation_idx(indices)
src_masks = outputs["pred_masks"]
src_masks = src_masks[src_idx]
masks = [t["masks"] for t in targets]
# TODO use valid to mask invalid areas due to padding in loss
target_masks, valid = nested_tensor_from_tensor_list(masks).decompose()
target_masks = target_masks.to(src_masks)
target_masks = target_masks[tgt_idx]
# No need to upsample predictions as we are using normalized coordinates :)
# N x 1 x H x W
src_masks = src_masks[:, None]
target_masks = target_masks[:, None]
with torch.no_grad():
# sample point_coords
point_coords = get_uncertain_point_coords_with_randomness(
src_masks,
lambda logits: calculate_uncertainty(logits),
self.num_points,
self.oversample_ratio,
self.importance_sample_ratio,
)
# get gt labels
point_labels = point_sample(
target_masks,
point_coords,
align_corners=False,
).squeeze(1)
point_logits = point_sample(
src_masks,
point_coords,
align_corners=False,
).squeeze(1)
losses = {
"loss_mask": sigmoid_ce_loss_jit(point_logits, point_labels, num_masks),
"loss_dice": dice_loss_jit(point_logits, point_labels, num_masks),
}
del src_masks
del target_masks
return losses
def _get_src_permutation_idx(self, indices):
# permute predictions following indices
batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])
src_idx = torch.cat([src for (src, _) in indices])
return batch_idx, src_idx
def _get_tgt_permutation_idx(self, indices):
# permute targets following indices
batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])
tgt_idx = torch.cat([tgt for (_, tgt) in indices])
return batch_idx, tgt_idx
def get_loss(self, loss, outputs, targets, indices, num_masks):
loss_map = {
'labels': self.loss_labels,
'masks': self.loss_masks,
}
assert loss in loss_map, f"do you really want to compute {loss} loss?"
return loss_map[loss](outputs, targets, indices, num_masks)
def forward(self, outputs, targets):
"""This performs the loss computation.
Parameters:
outputs: dict of tensors, see the output specification of the model for the format
targets: list of dicts, such that len(targets) == batch_size.
The expected keys in each dict depends on the losses applied, see each loss' doc
"""
outputs_without_aux = {k: v for k, v in outputs.items() if k != "aux_outputs"}
# Retrieve the matching between the outputs of the last layer and the targets
indices = self.matcher(outputs_without_aux, targets)
# Compute the average number of target boxes accross all nodes, for normalization purposes
num_masks = sum(len(t["labels"]) for t in targets)
num_masks = torch.as_tensor(
[num_masks], dtype=torch.float, device=next(iter(outputs.values())).device
)
if is_dist_avail_and_initialized():
torch.distributed.all_reduce(num_masks)
num_masks = torch.clamp(num_masks / get_world_size(), min=1).item()
# Compute all the requested losses
losses = {}
for loss in self.losses:
losses.update(self.get_loss(loss, outputs, targets, indices, num_masks))
# In case of auxiliary losses, we repeat this process with the output of each intermediate layer.
if "aux_outputs" in outputs:
for i, aux_outputs in enumerate(outputs["aux_outputs"]):
indices = self.matcher(aux_outputs, targets)
for loss in self.losses:
l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_masks)
l_dict = {k + f"_{i}": v for k, v in l_dict.items()}
losses.update(l_dict)
return losses
def __repr__(self):
head = "Criterion " + self.__class__.__name__
body = [
"matcher: {}".format(self.matcher.__repr__(_repr_indent=8)),
"losses: {}".format(self.losses),
"weight_dict: {}".format(self.weight_dict),
"num_classes: {}".format(self.num_classes),
"eos_coef: {}".format(self.eos_coef),
"num_points: {}".format(self.num_points),
"oversample_ratio: {}".format(self.oversample_ratio),
"importance_sample_ratio: {}".format(self.importance_sample_ratio),
]
_repr_indent = 4
lines = [head] + [" " * _repr_indent + line for line in body]
return "\n".join(lines)
| 3D-LLM-main | three_steps_3d_feature/first_step/mask2former/modeling/criterion.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from: https://github.com/facebookresearch/detr/blob/master/models/detr.py
import fvcore.nn.weight_init as weight_init
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.layers import Conv2d
from detectron2.utils.registry import Registry
from .position_encoding import PositionEmbeddingSine
from .transformer import Transformer
TRANSFORMER_DECODER_REGISTRY = Registry("TRANSFORMER_MODULE")
TRANSFORMER_DECODER_REGISTRY.__doc__ = """
Registry for transformer module in MaskFormer.
"""
def build_transformer_decoder(cfg, in_channels, mask_classification=True):
"""
Build a instance embedding branch from `cfg.MODEL.INS_EMBED_HEAD.NAME`.
"""
name = cfg.MODEL.MASK_FORMER.TRANSFORMER_DECODER_NAME
return TRANSFORMER_DECODER_REGISTRY.get(name)(cfg, in_channels, mask_classification)
@TRANSFORMER_DECODER_REGISTRY.register()
class StandardTransformerDecoder(nn.Module):
@configurable
def __init__(
self,
in_channels,
mask_classification=True,
*,
num_classes: int,
hidden_dim: int,
num_queries: int,
nheads: int,
dropout: float,
dim_feedforward: int,
enc_layers: int,
dec_layers: int,
pre_norm: bool,
deep_supervision: bool,
mask_dim: int,
enforce_input_project: bool,
):
"""
NOTE: this interface is experimental.
Args:
in_channels: channels of the input features
mask_classification: whether to add mask classifier or not
num_classes: number of classes
hidden_dim: Transformer feature dimension
num_queries: number of queries
nheads: number of heads
dropout: dropout in Transformer
dim_feedforward: feature dimension in feedforward network
enc_layers: number of Transformer encoder layers
dec_layers: number of Transformer decoder layers
pre_norm: whether to use pre-LayerNorm or not
deep_supervision: whether to add supervision to every decoder layers
mask_dim: mask feature dimension
enforce_input_project: add input project 1x1 conv even if input
channels and hidden dim is identical
"""
super().__init__()
self.mask_classification = mask_classification
# positional encoding
N_steps = hidden_dim // 2
self.pe_layer = PositionEmbeddingSine(N_steps, normalize=True)
transformer = Transformer(
d_model=hidden_dim,
dropout=dropout,
nhead=nheads,
dim_feedforward=dim_feedforward,
num_encoder_layers=enc_layers,
num_decoder_layers=dec_layers,
normalize_before=pre_norm,
return_intermediate_dec=deep_supervision,
)
self.num_queries = num_queries
self.transformer = transformer
hidden_dim = transformer.d_model
self.query_embed = nn.Embedding(num_queries, hidden_dim)
if in_channels != hidden_dim or enforce_input_project:
self.input_proj = Conv2d(in_channels, hidden_dim, kernel_size=1)
weight_init.c2_xavier_fill(self.input_proj)
else:
self.input_proj = nn.Sequential()
self.aux_loss = deep_supervision
# output FFNs
if self.mask_classification:
self.class_embed = nn.Linear(hidden_dim, num_classes + 1)
self.mask_embed = MLP(hidden_dim, hidden_dim, mask_dim, 3)
@classmethod
def from_config(cls, cfg, in_channels, mask_classification):
ret = {}
ret["in_channels"] = in_channels
ret["mask_classification"] = mask_classification
ret["num_classes"] = cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES
ret["hidden_dim"] = cfg.MODEL.MASK_FORMER.HIDDEN_DIM
ret["num_queries"] = cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES
# Transformer parameters:
ret["nheads"] = cfg.MODEL.MASK_FORMER.NHEADS
ret["dropout"] = cfg.MODEL.MASK_FORMER.DROPOUT
ret["dim_feedforward"] = cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD
ret["enc_layers"] = cfg.MODEL.MASK_FORMER.ENC_LAYERS
ret["dec_layers"] = cfg.MODEL.MASK_FORMER.DEC_LAYERS
ret["pre_norm"] = cfg.MODEL.MASK_FORMER.PRE_NORM
ret["deep_supervision"] = cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION
ret["enforce_input_project"] = cfg.MODEL.MASK_FORMER.ENFORCE_INPUT_PROJ
ret["mask_dim"] = cfg.MODEL.SEM_SEG_HEAD.MASK_DIM
return ret
def forward(self, x, mask_features, mask=None):
if mask is not None:
mask = F.interpolate(mask[None].float(), size=x.shape[-2:]).to(torch.bool)[0]
pos = self.pe_layer(x, mask)
src = x
hs, memory = self.transformer(self.input_proj(src), mask, self.query_embed.weight, pos)
if self.mask_classification:
outputs_class = self.class_embed(hs)
out = {"pred_logits": outputs_class[-1]}
else:
out = {}
if self.aux_loss:
# [l, bs, queries, embed]
mask_embed = self.mask_embed(hs)
outputs_seg_masks = torch.einsum("lbqc,bchw->lbqhw", mask_embed, mask_features)
out["pred_masks"] = outputs_seg_masks[-1]
out["aux_outputs"] = self._set_aux_loss(
outputs_class if self.mask_classification else None, outputs_seg_masks
)
else:
# FIXME h_boxes takes the last one computed, keep this in mind
# [bs, queries, embed]
mask_embed = self.mask_embed(hs[-1])
outputs_seg_masks = torch.einsum("bqc,bchw->bqhw", mask_embed, mask_features)
out["pred_masks"] = outputs_seg_masks
return out
@torch.jit.unused
def _set_aux_loss(self, outputs_class, outputs_seg_masks):
# this is a workaround to make torchscript happy, as torchscript
# doesn't support dictionary with non-homogeneous values, such
# as a dict having both a Tensor and a list.
if self.mask_classification:
return [
{"pred_logits": a, "pred_masks": b}
for a, b in zip(outputs_class[:-1], outputs_seg_masks[:-1])
]
else:
return [{"pred_masks": b} for b in outputs_seg_masks[:-1]]
class MLP(nn.Module):
"""Very simple multi-layer perceptron (also called FFN)"""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(
nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])
)
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
| 3D-LLM-main | three_steps_3d_feature/first_step/mask2former/modeling/transformer_decoder/maskformer_transformer_decoder.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from: https://github.com/facebookresearch/detr/blob/master/models/detr.py
import logging
import fvcore.nn.weight_init as weight_init
from typing import Optional
import torch
from torch import nn, Tensor
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.layers import Conv2d
from .position_encoding import PositionEmbeddingSine
from .maskformer_transformer_decoder import TRANSFORMER_DECODER_REGISTRY
class SelfAttentionLayer(nn.Module):
def __init__(self, d_model, nhead, dropout=0.0,
activation="relu", normalize_before=False):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.norm = nn.LayerNorm(d_model)
self.dropout = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self, tgt,
tgt_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
q = k = self.with_pos_embed(tgt, query_pos)
tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout(tgt2)
tgt = self.norm(tgt)
return tgt
def forward_pre(self, tgt,
tgt_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
tgt2 = self.norm(tgt)
q = k = self.with_pos_embed(tgt2, query_pos)
tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout(tgt2)
return tgt
def forward(self, tgt,
tgt_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
if self.normalize_before:
return self.forward_pre(tgt, tgt_mask,
tgt_key_padding_mask, query_pos)
return self.forward_post(tgt, tgt_mask,
tgt_key_padding_mask, query_pos)
class CrossAttentionLayer(nn.Module):
def __init__(self, d_model, nhead, dropout=0.0,
activation="relu", normalize_before=False):
super().__init__()
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.norm = nn.LayerNorm(d_model)
self.dropout = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self, tgt, memory,
memory_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout(tgt2)
tgt = self.norm(tgt)
return tgt
def forward_pre(self, tgt, memory,
memory_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
tgt2 = self.norm(tgt)
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout(tgt2)
return tgt
def forward(self, tgt, memory,
memory_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
if self.normalize_before:
return self.forward_pre(tgt, memory, memory_mask,
memory_key_padding_mask, pos, query_pos)
return self.forward_post(tgt, memory, memory_mask,
memory_key_padding_mask, pos, query_pos)
class FFNLayer(nn.Module):
def __init__(self, d_model, dim_feedforward=2048, dropout=0.0,
activation="relu", normalize_before=False):
super().__init__()
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm = nn.LayerNorm(d_model)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self, tgt):
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout(tgt2)
tgt = self.norm(tgt)
return tgt
def forward_pre(self, tgt):
tgt2 = self.norm(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
tgt = tgt + self.dropout(tgt2)
return tgt
def forward(self, tgt):
if self.normalize_before:
return self.forward_pre(tgt)
return self.forward_post(tgt)
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
class MLP(nn.Module):
""" Very simple multi-layer perceptron (also called FFN)"""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
@TRANSFORMER_DECODER_REGISTRY.register()
class MultiScaleMaskedTransformerDecoder(nn.Module):
_version = 2
def _load_from_state_dict(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
version = local_metadata.get("version", None)
if version is None or version < 2:
# Do not warn if train from scratch
scratch = True
logger = logging.getLogger(__name__)
for k in list(state_dict.keys()):
newk = k
if "static_query" in k:
newk = k.replace("static_query", "query_feat")
if newk != k:
state_dict[newk] = state_dict[k]
del state_dict[k]
scratch = False
if not scratch:
logger.warning(
f"Weight format of {self.__class__.__name__} have changed! "
"Please upgrade your models. Applying automatic conversion now ..."
)
@configurable
def __init__(
self,
in_channels,
mask_classification=True,
*,
num_classes: int,
hidden_dim: int,
num_queries: int,
nheads: int,
dim_feedforward: int,
dec_layers: int,
pre_norm: bool,
mask_dim: int,
enforce_input_project: bool,
):
"""
NOTE: this interface is experimental.
Args:
in_channels: channels of the input features
mask_classification: whether to add mask classifier or not
num_classes: number of classes
hidden_dim: Transformer feature dimension
num_queries: number of queries
nheads: number of heads
dim_feedforward: feature dimension in feedforward network
enc_layers: number of Transformer encoder layers
dec_layers: number of Transformer decoder layers
pre_norm: whether to use pre-LayerNorm or not
mask_dim: mask feature dimension
enforce_input_project: add input project 1x1 conv even if input
channels and hidden dim is identical
"""
super().__init__()
assert mask_classification, "Only support mask classification model"
self.mask_classification = mask_classification
# positional encoding
N_steps = hidden_dim // 2
self.pe_layer = PositionEmbeddingSine(N_steps, normalize=True)
# define Transformer decoder here
self.num_heads = nheads
self.num_layers = dec_layers
self.transformer_self_attention_layers = nn.ModuleList()
self.transformer_cross_attention_layers = nn.ModuleList()
self.transformer_ffn_layers = nn.ModuleList()
for _ in range(self.num_layers):
self.transformer_self_attention_layers.append(
SelfAttentionLayer(
d_model=hidden_dim,
nhead=nheads,
dropout=0.0,
normalize_before=pre_norm,
)
)
self.transformer_cross_attention_layers.append(
CrossAttentionLayer(
d_model=hidden_dim,
nhead=nheads,
dropout=0.0,
normalize_before=pre_norm,
)
)
self.transformer_ffn_layers.append(
FFNLayer(
d_model=hidden_dim,
dim_feedforward=dim_feedforward,
dropout=0.0,
normalize_before=pre_norm,
)
)
self.decoder_norm = nn.LayerNorm(hidden_dim)
self.num_queries = num_queries
# learnable query features
self.query_feat = nn.Embedding(num_queries, hidden_dim)
# learnable query p.e.
self.query_embed = nn.Embedding(num_queries, hidden_dim)
# level embedding (we always use 3 scales)
self.num_feature_levels = 3
self.level_embed = nn.Embedding(self.num_feature_levels, hidden_dim)
self.input_proj = nn.ModuleList()
for _ in range(self.num_feature_levels):
if in_channels != hidden_dim or enforce_input_project:
self.input_proj.append(Conv2d(in_channels, hidden_dim, kernel_size=1))
weight_init.c2_xavier_fill(self.input_proj[-1])
else:
self.input_proj.append(nn.Sequential())
# output FFNs
if self.mask_classification:
self.class_embed = nn.Linear(hidden_dim, num_classes + 1)
self.mask_embed = MLP(hidden_dim, hidden_dim, mask_dim, 3)
@classmethod
def from_config(cls, cfg, in_channels, mask_classification):
ret = {}
ret["in_channels"] = in_channels
ret["mask_classification"] = mask_classification
ret["num_classes"] = cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES
ret["hidden_dim"] = cfg.MODEL.MASK_FORMER.HIDDEN_DIM
ret["num_queries"] = cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES
# Transformer parameters:
ret["nheads"] = cfg.MODEL.MASK_FORMER.NHEADS
ret["dim_feedforward"] = cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD
# NOTE: because we add learnable query features which requires supervision,
# we add minus 1 to decoder layers to be consistent with our loss
# implementation: that is, number of auxiliary losses is always
# equal to number of decoder layers. With learnable query features, the number of
# auxiliary losses equals number of decoders plus 1.
assert cfg.MODEL.MASK_FORMER.DEC_LAYERS >= 1
ret["dec_layers"] = cfg.MODEL.MASK_FORMER.DEC_LAYERS - 1
ret["pre_norm"] = cfg.MODEL.MASK_FORMER.PRE_NORM
ret["enforce_input_project"] = cfg.MODEL.MASK_FORMER.ENFORCE_INPUT_PROJ
ret["mask_dim"] = cfg.MODEL.SEM_SEG_HEAD.MASK_DIM
return ret
def forward(self, x, mask_features, mask = None):
# x is a list of multi-scale feature
assert len(x) == self.num_feature_levels
src = []
pos = []
size_list = []
# disable mask, it does not affect performance
del mask
for i in range(self.num_feature_levels):
size_list.append(x[i].shape[-2:])
pos.append(self.pe_layer(x[i], None).flatten(2))
src.append(self.input_proj[i](x[i]).flatten(2) + self.level_embed.weight[i][None, :, None])
# flatten NxCxHxW to HWxNxC
pos[-1] = pos[-1].permute(2, 0, 1)
src[-1] = src[-1].permute(2, 0, 1)
_, bs, _ = src[0].shape
# QxNxC
query_embed = self.query_embed.weight.unsqueeze(1).repeat(1, bs, 1)
output = self.query_feat.weight.unsqueeze(1).repeat(1, bs, 1)
predictions_class = []
predictions_mask = []
# prediction heads on learnable query features
outputs_class, outputs_mask, attn_mask = self.forward_prediction_heads(output, mask_features, attn_mask_target_size=size_list[0])
predictions_class.append(outputs_class)
predictions_mask.append(outputs_mask)
for i in range(self.num_layers):
level_index = i % self.num_feature_levels
attn_mask[torch.where(attn_mask.sum(-1) == attn_mask.shape[-1])] = False
# attention: cross-attention first
output = self.transformer_cross_attention_layers[i](
output, src[level_index],
memory_mask=attn_mask,
memory_key_padding_mask=None, # here we do not apply masking on padded region
pos=pos[level_index], query_pos=query_embed
)
output = self.transformer_self_attention_layers[i](
output, tgt_mask=None,
tgt_key_padding_mask=None,
query_pos=query_embed
)
# FFN
output = self.transformer_ffn_layers[i](
output
)
outputs_class, outputs_mask, attn_mask = self.forward_prediction_heads(output, mask_features, attn_mask_target_size=size_list[(i + 1) % self.num_feature_levels])
predictions_class.append(outputs_class)
predictions_mask.append(outputs_mask)
assert len(predictions_class) == self.num_layers + 1
out = {
'pred_logits': predictions_class[-1],
'pred_masks': predictions_mask[-1],
'aux_outputs': self._set_aux_loss(
predictions_class if self.mask_classification else None, predictions_mask
)
}
return out
def forward_prediction_heads(self, output, mask_features, attn_mask_target_size):
decoder_output = self.decoder_norm(output)
decoder_output = decoder_output.transpose(0, 1)
outputs_class = self.class_embed(decoder_output)
mask_embed = self.mask_embed(decoder_output)
outputs_mask = torch.einsum("bqc,bchw->bqhw", mask_embed, mask_features)
# NOTE: prediction is of higher-resolution
# [B, Q, H, W] -> [B, Q, H*W] -> [B, h, Q, H*W] -> [B*h, Q, HW]
attn_mask = F.interpolate(outputs_mask, size=attn_mask_target_size, mode="bilinear", align_corners=False)
# must use bool type
# If a BoolTensor is provided, positions with ``True`` are not allowed to attend while ``False`` values will be unchanged.
attn_mask = (attn_mask.sigmoid().flatten(2).unsqueeze(1).repeat(1, self.num_heads, 1, 1).flatten(0, 1) < 0.5).bool()
attn_mask = attn_mask.detach()
return outputs_class, outputs_mask, attn_mask
@torch.jit.unused
def _set_aux_loss(self, outputs_class, outputs_seg_masks):
# this is a workaround to make torchscript happy, as torchscript
# doesn't support dictionary with non-homogeneous values, such
# as a dict having both a Tensor and a list.
if self.mask_classification:
return [
{"pred_logits": a, "pred_masks": b}
for a, b in zip(outputs_class[:-1], outputs_seg_masks[:-1])
]
else:
return [{"pred_masks": b} for b in outputs_seg_masks[:-1]]
| 3D-LLM-main | three_steps_3d_feature/first_step/mask2former/modeling/transformer_decoder/mask2former_transformer_decoder.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# # Modified by Bowen Cheng from: https://github.com/facebookresearch/detr/blob/master/models/position_encoding.py
"""
Various positional encodings for the transformer.
"""
import math
import torch
from torch import nn
class PositionEmbeddingSine(nn.Module):
"""
This is a more standard version of the position embedding, very similar to the one
used by the Attention is all you need paper, generalized to work on images.
"""
def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):
super().__init__()
self.num_pos_feats = num_pos_feats
self.temperature = temperature
self.normalize = normalize
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * math.pi
self.scale = scale
def forward(self, x, mask=None):
if mask is None:
mask = torch.zeros((x.size(0), x.size(2), x.size(3)), device=x.device, dtype=torch.bool)
not_mask = ~mask
y_embed = not_mask.cumsum(1, dtype=torch.float32)
x_embed = not_mask.cumsum(2, dtype=torch.float32)
if self.normalize:
eps = 1e-6
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack(
(pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4
).flatten(3)
pos_y = torch.stack(
(pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4
).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos
def __repr__(self, _repr_indent=4):
head = "Positional encoding " + self.__class__.__name__
body = [
"num_pos_feats: {}".format(self.num_pos_feats),
"temperature: {}".format(self.temperature),
"normalize: {}".format(self.normalize),
"scale: {}".format(self.scale),
]
# _repr_indent = 4
lines = [head] + [" " * _repr_indent + line for line in body]
return "\n".join(lines)
| 3D-LLM-main | three_steps_3d_feature/first_step/mask2former/modeling/transformer_decoder/position_encoding.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from .maskformer_transformer_decoder import StandardTransformerDecoder
from .mask2former_transformer_decoder import MultiScaleMaskedTransformerDecoder
| 3D-LLM-main | three_steps_3d_feature/first_step/mask2former/modeling/transformer_decoder/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from: https://github.com/facebookresearch/detr/blob/master/models/transformer.py
"""
Transformer class.
Copy-paste from torch.nn.Transformer with modifications:
* positional encodings are passed in MHattention
* extra LN at the end of encoder is removed
* decoder returns a stack of activations from all decoding layers
"""
import copy
from typing import List, Optional
import torch
import torch.nn.functional as F
from torch import Tensor, nn
class Transformer(nn.Module):
def __init__(
self,
d_model=512,
nhead=8,
num_encoder_layers=6,
num_decoder_layers=6,
dim_feedforward=2048,
dropout=0.1,
activation="relu",
normalize_before=False,
return_intermediate_dec=False,
):
super().__init__()
encoder_layer = TransformerEncoderLayer(
d_model, nhead, dim_feedforward, dropout, activation, normalize_before
)
encoder_norm = nn.LayerNorm(d_model) if normalize_before else None
self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
decoder_layer = TransformerDecoderLayer(
d_model, nhead, dim_feedforward, dropout, activation, normalize_before
)
decoder_norm = nn.LayerNorm(d_model)
self.decoder = TransformerDecoder(
decoder_layer,
num_decoder_layers,
decoder_norm,
return_intermediate=return_intermediate_dec,
)
self._reset_parameters()
self.d_model = d_model
self.nhead = nhead
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, src, mask, query_embed, pos_embed):
# flatten NxCxHxW to HWxNxC
bs, c, h, w = src.shape
src = src.flatten(2).permute(2, 0, 1)
pos_embed = pos_embed.flatten(2).permute(2, 0, 1)
query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1)
if mask is not None:
mask = mask.flatten(1)
tgt = torch.zeros_like(query_embed)
memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed)
hs = self.decoder(
tgt, memory, memory_key_padding_mask=mask, pos=pos_embed, query_pos=query_embed
)
return hs.transpose(1, 2), memory.permute(1, 2, 0).view(bs, c, h, w)
class TransformerEncoder(nn.Module):
def __init__(self, encoder_layer, num_layers, norm=None):
super().__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
def forward(
self,
src,
mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
):
output = src
for layer in self.layers:
output = layer(
output, src_mask=mask, src_key_padding_mask=src_key_padding_mask, pos=pos
)
if self.norm is not None:
output = self.norm(output)
return output
class TransformerDecoder(nn.Module):
def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False):
super().__init__()
self.layers = _get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
self.return_intermediate = return_intermediate
def forward(
self,
tgt,
memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
output = tgt
intermediate = []
for layer in self.layers:
output = layer(
output,
memory,
tgt_mask=tgt_mask,
memory_mask=memory_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask,
pos=pos,
query_pos=query_pos,
)
if self.return_intermediate:
intermediate.append(self.norm(output))
if self.norm is not None:
output = self.norm(output)
if self.return_intermediate:
intermediate.pop()
intermediate.append(output)
if self.return_intermediate:
return torch.stack(intermediate)
return output.unsqueeze(0)
class TransformerEncoderLayer(nn.Module):
def __init__(
self,
d_model,
nhead,
dim_feedforward=2048,
dropout=0.1,
activation="relu",
normalize_before=False,
):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(
self,
src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
):
q = k = self.with_pos_embed(src, pos)
src2 = self.self_attn(
q, k, value=src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask
)[0]
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
def forward_pre(
self,
src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
):
src2 = self.norm1(src)
q = k = self.with_pos_embed(src2, pos)
src2 = self.self_attn(
q, k, value=src2, attn_mask=src_mask, key_padding_mask=src_key_padding_mask
)[0]
src = src + self.dropout1(src2)
src2 = self.norm2(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))
src = src + self.dropout2(src2)
return src
def forward(
self,
src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
):
if self.normalize_before:
return self.forward_pre(src, src_mask, src_key_padding_mask, pos)
return self.forward_post(src, src_mask, src_key_padding_mask, pos)
class TransformerDecoderLayer(nn.Module):
def __init__(
self,
d_model,
nhead,
dim_feedforward=2048,
dropout=0.1,
activation="relu",
normalize_before=False,
):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(
self,
tgt,
memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
q = k = self.with_pos_embed(tgt, query_pos)
tgt2 = self.self_attn(
q, k, value=tgt, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask
)[0]
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
tgt2 = self.multihead_attn(
query=self.with_pos_embed(tgt, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory,
attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask,
)[0]
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt
def forward_pre(
self,
tgt,
memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
tgt2 = self.norm1(tgt)
q = k = self.with_pos_embed(tgt2, query_pos)
tgt2 = self.self_attn(
q, k, value=tgt2, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask
)[0]
tgt = tgt + self.dropout1(tgt2)
tgt2 = self.norm2(tgt)
tgt2 = self.multihead_attn(
query=self.with_pos_embed(tgt2, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory,
attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask,
)[0]
tgt = tgt + self.dropout2(tgt2)
tgt2 = self.norm3(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
tgt = tgt + self.dropout3(tgt2)
return tgt
def forward(
self,
tgt,
memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
if self.normalize_before:
return self.forward_pre(
tgt,
memory,
tgt_mask,
memory_mask,
tgt_key_padding_mask,
memory_key_padding_mask,
pos,
query_pos,
)
return self.forward_post(
tgt,
memory,
tgt_mask,
memory_mask,
tgt_key_padding_mask,
memory_key_padding_mask,
pos,
query_pos,
)
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(f"activation should be relu/gelu, not {activation}.")
| 3D-LLM-main | three_steps_3d_feature/first_step/mask2former/modeling/transformer_decoder/transformer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import numpy as np
from typing import Callable, Dict, List, Optional, Tuple, Union
import fvcore.nn.weight_init as weight_init
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn.init import xavier_uniform_, constant_, uniform_, normal_
from torch.cuda.amp import autocast
from detectron2.config import configurable
from detectron2.layers import Conv2d, DeformConv, ShapeSpec, get_norm
from detectron2.modeling import SEM_SEG_HEADS_REGISTRY
from ..transformer_decoder.position_encoding import PositionEmbeddingSine
from ..transformer_decoder.transformer import TransformerEncoder, TransformerEncoderLayer, _get_clones, _get_activation_fn
def build_pixel_decoder(cfg, input_shape):
"""
Build a pixel decoder from `cfg.MODEL.MASK_FORMER.PIXEL_DECODER_NAME`.
"""
name = cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME
model = SEM_SEG_HEADS_REGISTRY.get(name)(cfg, input_shape)
forward_features = getattr(model, "forward_features", None)
if not callable(forward_features):
raise ValueError(
"Only SEM_SEG_HEADS with forward_features method can be used as pixel decoder. "
f"Please implement forward_features for {name} to only return mask features."
)
return model
# This is a modified FPN decoder.
@SEM_SEG_HEADS_REGISTRY.register()
class BasePixelDecoder(nn.Module):
@configurable
def __init__(
self,
input_shape: Dict[str, ShapeSpec],
*,
conv_dim: int,
mask_dim: int,
norm: Optional[Union[str, Callable]] = None,
):
"""
NOTE: this interface is experimental.
Args:
input_shape: shapes (channels and stride) of the input features
conv_dims: number of output channels for the intermediate conv layers.
mask_dim: number of output channels for the final conv layer.
norm (str or callable): normalization for all conv layers
"""
super().__init__()
input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride)
self.in_features = [k for k, v in input_shape] # starting from "res2" to "res5"
feature_channels = [v.channels for k, v in input_shape]
lateral_convs = []
output_convs = []
use_bias = norm == ""
for idx, in_channels in enumerate(feature_channels):
if idx == len(self.in_features) - 1:
output_norm = get_norm(norm, conv_dim)
output_conv = Conv2d(
in_channels,
conv_dim,
kernel_size=3,
stride=1,
padding=1,
bias=use_bias,
norm=output_norm,
activation=F.relu,
)
weight_init.c2_xavier_fill(output_conv)
self.add_module("layer_{}".format(idx + 1), output_conv)
lateral_convs.append(None)
output_convs.append(output_conv)
else:
lateral_norm = get_norm(norm, conv_dim)
output_norm = get_norm(norm, conv_dim)
lateral_conv = Conv2d(
in_channels, conv_dim, kernel_size=1, bias=use_bias, norm=lateral_norm
)
output_conv = Conv2d(
conv_dim,
conv_dim,
kernel_size=3,
stride=1,
padding=1,
bias=use_bias,
norm=output_norm,
activation=F.relu,
)
weight_init.c2_xavier_fill(lateral_conv)
weight_init.c2_xavier_fill(output_conv)
self.add_module("adapter_{}".format(idx + 1), lateral_conv)
self.add_module("layer_{}".format(idx + 1), output_conv)
lateral_convs.append(lateral_conv)
output_convs.append(output_conv)
# Place convs into top-down order (from low to high resolution)
# to make the top-down computation in forward clearer.
self.lateral_convs = lateral_convs[::-1]
self.output_convs = output_convs[::-1]
self.mask_dim = mask_dim
self.mask_features = Conv2d(
conv_dim,
mask_dim,
kernel_size=3,
stride=1,
padding=1,
)
weight_init.c2_xavier_fill(self.mask_features)
self.maskformer_num_feature_levels = 3 # always use 3 scales
@classmethod
def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):
ret = {}
ret["input_shape"] = {
k: v for k, v in input_shape.items() if k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES
}
ret["conv_dim"] = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM
ret["mask_dim"] = cfg.MODEL.SEM_SEG_HEAD.MASK_DIM
ret["norm"] = cfg.MODEL.SEM_SEG_HEAD.NORM
return ret
def forward_features(self, features):
multi_scale_features = []
num_cur_levels = 0
# Reverse feature maps into top-down order (from low to high resolution)
for idx, f in enumerate(self.in_features[::-1]):
x = features[f]
lateral_conv = self.lateral_convs[idx]
output_conv = self.output_convs[idx]
if lateral_conv is None:
y = output_conv(x)
else:
cur_fpn = lateral_conv(x)
# Following FPN implementation, we use nearest upsampling here
y = cur_fpn + F.interpolate(y, size=cur_fpn.shape[-2:], mode="nearest")
y = output_conv(y)
if num_cur_levels < self.maskformer_num_feature_levels:
multi_scale_features.append(y)
num_cur_levels += 1
return self.mask_features(y), None, multi_scale_features
def forward(self, features, targets=None):
logger = logging.getLogger(__name__)
logger.warning("Calling forward() may cause unpredicted behavior of PixelDecoder module.")
return self.forward_features(features)
class TransformerEncoderOnly(nn.Module):
def __init__(
self,
d_model=512,
nhead=8,
num_encoder_layers=6,
dim_feedforward=2048,
dropout=0.1,
activation="relu",
normalize_before=False,
):
super().__init__()
encoder_layer = TransformerEncoderLayer(
d_model, nhead, dim_feedforward, dropout, activation, normalize_before
)
encoder_norm = nn.LayerNorm(d_model) if normalize_before else None
self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
self._reset_parameters()
self.d_model = d_model
self.nhead = nhead
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, src, mask, pos_embed):
# flatten NxCxHxW to HWxNxC
bs, c, h, w = src.shape
src = src.flatten(2).permute(2, 0, 1)
pos_embed = pos_embed.flatten(2).permute(2, 0, 1)
if mask is not None:
mask = mask.flatten(1)
memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed)
return memory.permute(1, 2, 0).view(bs, c, h, w)
# This is a modified FPN decoder with extra Transformer encoder that processes the lowest-resolution feature map.
@SEM_SEG_HEADS_REGISTRY.register()
class TransformerEncoderPixelDecoder(BasePixelDecoder):
@configurable
def __init__(
self,
input_shape: Dict[str, ShapeSpec],
*,
transformer_dropout: float,
transformer_nheads: int,
transformer_dim_feedforward: int,
transformer_enc_layers: int,
transformer_pre_norm: bool,
conv_dim: int,
mask_dim: int,
norm: Optional[Union[str, Callable]] = None,
):
"""
NOTE: this interface is experimental.
Args:
input_shape: shapes (channels and stride) of the input features
transformer_dropout: dropout probability in transformer
transformer_nheads: number of heads in transformer
transformer_dim_feedforward: dimension of feedforward network
transformer_enc_layers: number of transformer encoder layers
transformer_pre_norm: whether to use pre-layernorm or not
conv_dims: number of output channels for the intermediate conv layers.
mask_dim: number of output channels for the final conv layer.
norm (str or callable): normalization for all conv layers
"""
super().__init__(input_shape, conv_dim=conv_dim, mask_dim=mask_dim, norm=norm)
input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride)
self.in_features = [k for k, v in input_shape] # starting from "res2" to "res5"
feature_strides = [v.stride for k, v in input_shape]
feature_channels = [v.channels for k, v in input_shape]
in_channels = feature_channels[len(self.in_features) - 1]
self.input_proj = Conv2d(in_channels, conv_dim, kernel_size=1)
weight_init.c2_xavier_fill(self.input_proj)
self.transformer = TransformerEncoderOnly(
d_model=conv_dim,
dropout=transformer_dropout,
nhead=transformer_nheads,
dim_feedforward=transformer_dim_feedforward,
num_encoder_layers=transformer_enc_layers,
normalize_before=transformer_pre_norm,
)
N_steps = conv_dim // 2
self.pe_layer = PositionEmbeddingSine(N_steps, normalize=True)
# update layer
use_bias = norm == ""
output_norm = get_norm(norm, conv_dim)
output_conv = Conv2d(
conv_dim,
conv_dim,
kernel_size=3,
stride=1,
padding=1,
bias=use_bias,
norm=output_norm,
activation=F.relu,
)
weight_init.c2_xavier_fill(output_conv)
delattr(self, "layer_{}".format(len(self.in_features)))
self.add_module("layer_{}".format(len(self.in_features)), output_conv)
self.output_convs[0] = output_conv
@classmethod
def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):
ret = super().from_config(cfg, input_shape)
ret["transformer_dropout"] = cfg.MODEL.MASK_FORMER.DROPOUT
ret["transformer_nheads"] = cfg.MODEL.MASK_FORMER.NHEADS
ret["transformer_dim_feedforward"] = cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD
ret[
"transformer_enc_layers"
] = cfg.MODEL.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS # a separate config
ret["transformer_pre_norm"] = cfg.MODEL.MASK_FORMER.PRE_NORM
return ret
def forward_features(self, features):
multi_scale_features = []
num_cur_levels = 0
# Reverse feature maps into top-down order (from low to high resolution)
for idx, f in enumerate(self.in_features[::-1]):
x = features[f]
lateral_conv = self.lateral_convs[idx]
output_conv = self.output_convs[idx]
if lateral_conv is None:
transformer = self.input_proj(x)
pos = self.pe_layer(x)
transformer = self.transformer(transformer, None, pos)
y = output_conv(transformer)
# save intermediate feature as input to Transformer decoder
transformer_encoder_features = transformer
else:
cur_fpn = lateral_conv(x)
# Following FPN implementation, we use nearest upsampling here
y = cur_fpn + F.interpolate(y, size=cur_fpn.shape[-2:], mode="nearest")
y = output_conv(y)
if num_cur_levels < self.maskformer_num_feature_levels:
multi_scale_features.append(y)
num_cur_levels += 1
return self.mask_features(y), transformer_encoder_features, multi_scale_features
def forward(self, features, targets=None):
logger = logging.getLogger(__name__)
logger.warning("Calling forward() may cause unpredicted behavior of PixelDecoder module.")
return self.forward_features(features)
| 3D-LLM-main | three_steps_3d_feature/first_step/mask2former/modeling/pixel_decoder/fpn.py |
# Copyright (c) Facebook, Inc. and its affiliates.
| 3D-LLM-main | three_steps_3d_feature/first_step/mask2former/modeling/pixel_decoder/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import numpy as np
from typing import Callable, Dict, List, Optional, Tuple, Union
import fvcore.nn.weight_init as weight_init
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn.init import xavier_uniform_, constant_, uniform_, normal_
from torch.cuda.amp import autocast
from detectron2.config import configurable
from detectron2.layers import Conv2d, ShapeSpec, get_norm
from detectron2.modeling import SEM_SEG_HEADS_REGISTRY
from ..transformer_decoder.position_encoding import PositionEmbeddingSine
from ..transformer_decoder.transformer import _get_clones, _get_activation_fn
from .ops.modules import MSDeformAttn
# MSDeformAttn Transformer encoder in deformable detr
class MSDeformAttnTransformerEncoderOnly(nn.Module):
def __init__(self, d_model=256, nhead=8,
num_encoder_layers=6, dim_feedforward=1024, dropout=0.1,
activation="relu",
num_feature_levels=4, enc_n_points=4,
):
super().__init__()
self.d_model = d_model
self.nhead = nhead
encoder_layer = MSDeformAttnTransformerEncoderLayer(d_model, dim_feedforward,
dropout, activation,
num_feature_levels, nhead, enc_n_points)
self.encoder = MSDeformAttnTransformerEncoder(encoder_layer, num_encoder_layers)
self.level_embed = nn.Parameter(torch.Tensor(num_feature_levels, d_model))
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
for m in self.modules():
if isinstance(m, MSDeformAttn):
m._reset_parameters()
normal_(self.level_embed)
def get_valid_ratio(self, mask):
_, H, W = mask.shape
valid_H = torch.sum(~mask[:, :, 0], 1)
valid_W = torch.sum(~mask[:, 0, :], 1)
valid_ratio_h = valid_H.float() / H
valid_ratio_w = valid_W.float() / W
valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1)
return valid_ratio
def forward(self, srcs, pos_embeds):
masks = [torch.zeros((x.size(0), x.size(2), x.size(3)), device=x.device, dtype=torch.bool) for x in srcs]
# prepare input for encoder
src_flatten = []
mask_flatten = []
lvl_pos_embed_flatten = []
spatial_shapes = []
for lvl, (src, mask, pos_embed) in enumerate(zip(srcs, masks, pos_embeds)):
bs, c, h, w = src.shape
spatial_shape = (h, w)
spatial_shapes.append(spatial_shape)
src = src.flatten(2).transpose(1, 2)
mask = mask.flatten(1)
pos_embed = pos_embed.flatten(2).transpose(1, 2)
lvl_pos_embed = pos_embed + self.level_embed[lvl].view(1, 1, -1)
lvl_pos_embed_flatten.append(lvl_pos_embed)
src_flatten.append(src)
mask_flatten.append(mask)
src_flatten = torch.cat(src_flatten, 1)
mask_flatten = torch.cat(mask_flatten, 1)
lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1)
spatial_shapes = torch.as_tensor(spatial_shapes, dtype=torch.long, device=src_flatten.device)
level_start_index = torch.cat((spatial_shapes.new_zeros((1, )), spatial_shapes.prod(1).cumsum(0)[:-1]))
valid_ratios = torch.stack([self.get_valid_ratio(m) for m in masks], 1)
# encoder
memory = self.encoder(src_flatten, spatial_shapes, level_start_index, valid_ratios, lvl_pos_embed_flatten, mask_flatten)
return memory, spatial_shapes, level_start_index
class MSDeformAttnTransformerEncoderLayer(nn.Module):
def __init__(self,
d_model=256, d_ffn=1024,
dropout=0.1, activation="relu",
n_levels=4, n_heads=8, n_points=4):
super().__init__()
# self attention
self.self_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)
self.dropout1 = nn.Dropout(dropout)
self.norm1 = nn.LayerNorm(d_model)
# ffn
self.linear1 = nn.Linear(d_model, d_ffn)
self.activation = _get_activation_fn(activation)
self.dropout2 = nn.Dropout(dropout)
self.linear2 = nn.Linear(d_ffn, d_model)
self.dropout3 = nn.Dropout(dropout)
self.norm2 = nn.LayerNorm(d_model)
@staticmethod
def with_pos_embed(tensor, pos):
return tensor if pos is None else tensor + pos
def forward_ffn(self, src):
src2 = self.linear2(self.dropout2(self.activation(self.linear1(src))))
src = src + self.dropout3(src2)
src = self.norm2(src)
return src
def forward(self, src, pos, reference_points, spatial_shapes, level_start_index, padding_mask=None):
# self attention
src2 = self.self_attn(self.with_pos_embed(src, pos), reference_points, src, spatial_shapes, level_start_index, padding_mask)
src = src + self.dropout1(src2)
src = self.norm1(src)
# ffn
src = self.forward_ffn(src)
return src
class MSDeformAttnTransformerEncoder(nn.Module):
def __init__(self, encoder_layer, num_layers):
super().__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
@staticmethod
def get_reference_points(spatial_shapes, valid_ratios, device):
reference_points_list = []
for lvl, (H_, W_) in enumerate(spatial_shapes):
ref_y, ref_x = torch.meshgrid(torch.linspace(0.5, H_ - 0.5, H_, dtype=torch.float32, device=device),
torch.linspace(0.5, W_ - 0.5, W_, dtype=torch.float32, device=device))
ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, lvl, 1] * H_)
ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, lvl, 0] * W_)
ref = torch.stack((ref_x, ref_y), -1)
reference_points_list.append(ref)
reference_points = torch.cat(reference_points_list, 1)
reference_points = reference_points[:, :, None] * valid_ratios[:, None]
return reference_points
def forward(self, src, spatial_shapes, level_start_index, valid_ratios, pos=None, padding_mask=None):
output = src
reference_points = self.get_reference_points(spatial_shapes, valid_ratios, device=src.device)
for _, layer in enumerate(self.layers):
output = layer(output, pos, reference_points, spatial_shapes, level_start_index, padding_mask)
return output
@SEM_SEG_HEADS_REGISTRY.register()
class MSDeformAttnPixelDecoder(nn.Module):
@configurable
def __init__(
self,
input_shape: Dict[str, ShapeSpec],
*,
transformer_dropout: float,
transformer_nheads: int,
transformer_dim_feedforward: int,
transformer_enc_layers: int,
conv_dim: int,
mask_dim: int,
norm: Optional[Union[str, Callable]] = None,
# deformable transformer encoder args
transformer_in_features: List[str],
common_stride: int,
):
"""
NOTE: this interface is experimental.
Args:
input_shape: shapes (channels and stride) of the input features
transformer_dropout: dropout probability in transformer
transformer_nheads: number of heads in transformer
transformer_dim_feedforward: dimension of feedforward network
transformer_enc_layers: number of transformer encoder layers
conv_dims: number of output channels for the intermediate conv layers.
mask_dim: number of output channels for the final conv layer.
norm (str or callable): normalization for all conv layers
"""
super().__init__()
transformer_input_shape = {
k: v for k, v in input_shape.items() if k in transformer_in_features
}
# this is the input shape of pixel decoder
input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride)
self.in_features = [k for k, v in input_shape] # starting from "res2" to "res5"
self.feature_strides = [v.stride for k, v in input_shape]
self.feature_channels = [v.channels for k, v in input_shape]
# this is the input shape of transformer encoder (could use less features than pixel decoder
transformer_input_shape = sorted(transformer_input_shape.items(), key=lambda x: x[1].stride)
self.transformer_in_features = [k for k, v in transformer_input_shape] # starting from "res2" to "res5"
transformer_in_channels = [v.channels for k, v in transformer_input_shape]
self.transformer_feature_strides = [v.stride for k, v in transformer_input_shape] # to decide extra FPN layers
self.transformer_num_feature_levels = len(self.transformer_in_features)
if self.transformer_num_feature_levels > 1:
input_proj_list = []
# from low resolution to high resolution (res5 -> res2)
for in_channels in transformer_in_channels[::-1]:
input_proj_list.append(nn.Sequential(
nn.Conv2d(in_channels, conv_dim, kernel_size=1),
nn.GroupNorm(32, conv_dim),
))
self.input_proj = nn.ModuleList(input_proj_list)
else:
self.input_proj = nn.ModuleList([
nn.Sequential(
nn.Conv2d(transformer_in_channels[-1], conv_dim, kernel_size=1),
nn.GroupNorm(32, conv_dim),
)])
for proj in self.input_proj:
nn.init.xavier_uniform_(proj[0].weight, gain=1)
nn.init.constant_(proj[0].bias, 0)
self.transformer = MSDeformAttnTransformerEncoderOnly(
d_model=conv_dim,
dropout=transformer_dropout,
nhead=transformer_nheads,
dim_feedforward=transformer_dim_feedforward,
num_encoder_layers=transformer_enc_layers,
num_feature_levels=self.transformer_num_feature_levels,
)
N_steps = conv_dim // 2
self.pe_layer = PositionEmbeddingSine(N_steps, normalize=True)
self.mask_dim = mask_dim
# use 1x1 conv instead
self.mask_features = Conv2d(
conv_dim,
mask_dim,
kernel_size=1,
stride=1,
padding=0,
)
weight_init.c2_xavier_fill(self.mask_features)
self.maskformer_num_feature_levels = 3 # always use 3 scales
self.common_stride = common_stride
# extra fpn levels
stride = min(self.transformer_feature_strides)
self.num_fpn_levels = int(np.log2(stride) - np.log2(self.common_stride))
lateral_convs = []
output_convs = []
use_bias = norm == ""
for idx, in_channels in enumerate(self.feature_channels[:self.num_fpn_levels]):
lateral_norm = get_norm(norm, conv_dim)
output_norm = get_norm(norm, conv_dim)
lateral_conv = Conv2d(
in_channels, conv_dim, kernel_size=1, bias=use_bias, norm=lateral_norm
)
output_conv = Conv2d(
conv_dim,
conv_dim,
kernel_size=3,
stride=1,
padding=1,
bias=use_bias,
norm=output_norm,
activation=F.relu,
)
weight_init.c2_xavier_fill(lateral_conv)
weight_init.c2_xavier_fill(output_conv)
self.add_module("adapter_{}".format(idx + 1), lateral_conv)
self.add_module("layer_{}".format(idx + 1), output_conv)
lateral_convs.append(lateral_conv)
output_convs.append(output_conv)
# Place convs into top-down order (from low to high resolution)
# to make the top-down computation in forward clearer.
self.lateral_convs = lateral_convs[::-1]
self.output_convs = output_convs[::-1]
@classmethod
def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):
ret = {}
ret["input_shape"] = {
k: v for k, v in input_shape.items() if k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES
}
ret["conv_dim"] = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM
ret["mask_dim"] = cfg.MODEL.SEM_SEG_HEAD.MASK_DIM
ret["norm"] = cfg.MODEL.SEM_SEG_HEAD.NORM
ret["transformer_dropout"] = cfg.MODEL.MASK_FORMER.DROPOUT
ret["transformer_nheads"] = cfg.MODEL.MASK_FORMER.NHEADS
# ret["transformer_dim_feedforward"] = cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD
ret["transformer_dim_feedforward"] = 1024 # use 1024 for deformable transformer encoder
ret[
"transformer_enc_layers"
] = cfg.MODEL.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS # a separate config
ret["transformer_in_features"] = cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_IN_FEATURES
ret["common_stride"] = cfg.MODEL.SEM_SEG_HEAD.COMMON_STRIDE
return ret
@autocast(enabled=False)
def forward_features(self, features):
srcs = []
pos = []
# Reverse feature maps into top-down order (from low to high resolution)
for idx, f in enumerate(self.transformer_in_features[::-1]):
x = features[f].float() # deformable detr does not support half precision
srcs.append(self.input_proj[idx](x))
pos.append(self.pe_layer(x))
y, spatial_shapes, level_start_index = self.transformer(srcs, pos)
bs = y.shape[0]
split_size_or_sections = [None] * self.transformer_num_feature_levels
for i in range(self.transformer_num_feature_levels):
if i < self.transformer_num_feature_levels - 1:
split_size_or_sections[i] = level_start_index[i + 1] - level_start_index[i]
else:
split_size_or_sections[i] = y.shape[1] - level_start_index[i]
y = torch.split(y, split_size_or_sections, dim=1)
out = []
multi_scale_features = []
num_cur_levels = 0
for i, z in enumerate(y):
out.append(z.transpose(1, 2).view(bs, -1, spatial_shapes[i][0], spatial_shapes[i][1]))
# append `out` with extra FPN levels
# Reverse feature maps into top-down order (from low to high resolution)
for idx, f in enumerate(self.in_features[:self.num_fpn_levels][::-1]):
x = features[f].float()
lateral_conv = self.lateral_convs[idx]
output_conv = self.output_convs[idx]
cur_fpn = lateral_conv(x)
# Following FPN implementation, we use nearest upsampling here
y = cur_fpn + F.interpolate(out[-1], size=cur_fpn.shape[-2:], mode="bilinear", align_corners=False)
y = output_conv(y)
out.append(y)
for o in out:
if num_cur_levels < self.maskformer_num_feature_levels:
multi_scale_features.append(o)
num_cur_levels += 1
return self.mask_features(out[-1]), out[0], multi_scale_features
| 3D-LLM-main | three_steps_3d_feature/first_step/mask2former/modeling/pixel_decoder/msdeformattn.py |
# ------------------------------------------------------------------------------------------------
# Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------------------------------
# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
# ------------------------------------------------------------------------------------------------
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import time
import torch
import torch.nn as nn
from torch.autograd import gradcheck
from functions.ms_deform_attn_func import MSDeformAttnFunction, ms_deform_attn_core_pytorch
N, M, D = 1, 2, 2
Lq, L, P = 2, 2, 2
shapes = torch.as_tensor([(6, 4), (3, 2)], dtype=torch.long).cuda()
level_start_index = torch.cat((shapes.new_zeros((1, )), shapes.prod(1).cumsum(0)[:-1]))
S = sum([(H*W).item() for H, W in shapes])
torch.manual_seed(3)
@torch.no_grad()
def check_forward_equal_with_pytorch_double():
value = torch.rand(N, S, M, D).cuda() * 0.01
sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda()
attention_weights = torch.rand(N, Lq, M, L, P).cuda() + 1e-5
attention_weights /= attention_weights.sum(-1, keepdim=True).sum(-2, keepdim=True)
im2col_step = 2
output_pytorch = ms_deform_attn_core_pytorch(value.double(), shapes, sampling_locations.double(), attention_weights.double()).detach().cpu()
output_cuda = MSDeformAttnFunction.apply(value.double(), shapes, level_start_index, sampling_locations.double(), attention_weights.double(), im2col_step).detach().cpu()
fwdok = torch.allclose(output_cuda, output_pytorch)
max_abs_err = (output_cuda - output_pytorch).abs().max()
max_rel_err = ((output_cuda - output_pytorch).abs() / output_pytorch.abs()).max()
print(f'* {fwdok} check_forward_equal_with_pytorch_double: max_abs_err {max_abs_err:.2e} max_rel_err {max_rel_err:.2e}')
@torch.no_grad()
def check_forward_equal_with_pytorch_float():
value = torch.rand(N, S, M, D).cuda() * 0.01
sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda()
attention_weights = torch.rand(N, Lq, M, L, P).cuda() + 1e-5
attention_weights /= attention_weights.sum(-1, keepdim=True).sum(-2, keepdim=True)
im2col_step = 2
output_pytorch = ms_deform_attn_core_pytorch(value, shapes, sampling_locations, attention_weights).detach().cpu()
output_cuda = MSDeformAttnFunction.apply(value, shapes, level_start_index, sampling_locations, attention_weights, im2col_step).detach().cpu()
fwdok = torch.allclose(output_cuda, output_pytorch, rtol=1e-2, atol=1e-3)
max_abs_err = (output_cuda - output_pytorch).abs().max()
max_rel_err = ((output_cuda - output_pytorch).abs() / output_pytorch.abs()).max()
print(f'* {fwdok} check_forward_equal_with_pytorch_float: max_abs_err {max_abs_err:.2e} max_rel_err {max_rel_err:.2e}')
def check_gradient_numerical(channels=4, grad_value=True, grad_sampling_loc=True, grad_attn_weight=True):
value = torch.rand(N, S, M, channels).cuda() * 0.01
sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda()
attention_weights = torch.rand(N, Lq, M, L, P).cuda() + 1e-5
attention_weights /= attention_weights.sum(-1, keepdim=True).sum(-2, keepdim=True)
im2col_step = 2
func = MSDeformAttnFunction.apply
value.requires_grad = grad_value
sampling_locations.requires_grad = grad_sampling_loc
attention_weights.requires_grad = grad_attn_weight
gradok = gradcheck(func, (value.double(), shapes, level_start_index, sampling_locations.double(), attention_weights.double(), im2col_step))
print(f'* {gradok} check_gradient_numerical(D={channels})')
if __name__ == '__main__':
check_forward_equal_with_pytorch_double()
check_forward_equal_with_pytorch_float()
for channels in [30, 32, 64, 71, 1025, 2048, 3096]:
check_gradient_numerical(channels, True, True, True)
| 3D-LLM-main | three_steps_3d_feature/first_step/mask2former/modeling/pixel_decoder/ops/test.py |
# ------------------------------------------------------------------------------------------------
# Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------------------------------
# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
# ------------------------------------------------------------------------------------------------
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR
import os
import glob
import torch
from torch.utils.cpp_extension import CUDA_HOME
from torch.utils.cpp_extension import CppExtension
from torch.utils.cpp_extension import CUDAExtension
from setuptools import find_packages
from setuptools import setup
requirements = ["torch", "torchvision"]
def get_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir = os.path.join(this_dir, "src")
main_file = glob.glob(os.path.join(extensions_dir, "*.cpp"))
source_cpu = glob.glob(os.path.join(extensions_dir, "cpu", "*.cpp"))
source_cuda = glob.glob(os.path.join(extensions_dir, "cuda", "*.cu"))
sources = main_file + source_cpu
extension = CppExtension
extra_compile_args = {"cxx": []}
define_macros = []
# Force cuda since torch ask for a device, not if cuda is in fact available.
if (os.environ.get('FORCE_CUDA') or torch.cuda.is_available()) and CUDA_HOME is not None:
extension = CUDAExtension
sources += source_cuda
define_macros += [("WITH_CUDA", None)]
extra_compile_args["nvcc"] = [
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
]
else:
if CUDA_HOME is None:
raise NotImplementedError('CUDA_HOME is None. Please set environment variable CUDA_HOME.')
else:
raise NotImplementedError('No CUDA runtime is found. Please set FORCE_CUDA=1 or test it by running torch.cuda.is_available().')
sources = [os.path.join(extensions_dir, s) for s in sources]
include_dirs = [extensions_dir]
ext_modules = [
extension(
"MultiScaleDeformableAttention",
sources,
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
)
]
return ext_modules
setup(
name="MultiScaleDeformableAttention",
version="1.0",
author="Weijie Su",
url="https://github.com/fundamentalvision/Deformable-DETR",
description="PyTorch Wrapper for CUDA Functions of Multi-Scale Deformable Attention",
packages=find_packages(exclude=("configs", "tests",)),
ext_modules=get_extensions(),
cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension},
)
| 3D-LLM-main | three_steps_3d_feature/first_step/mask2former/modeling/pixel_decoder/ops/setup.py |
# ------------------------------------------------------------------------------------------------
# Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------------------------------
# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
# ------------------------------------------------------------------------------------------------
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import torch
import torch.nn.functional as F
from torch.autograd import Function
from torch.autograd.function import once_differentiable
try:
import MultiScaleDeformableAttention as MSDA
except ModuleNotFoundError as e:
info_string = (
"\n\nPlease compile MultiScaleDeformableAttention CUDA op with the following commands:\n"
"\t`cd mask2former/modeling/pixel_decoder/ops`\n"
"\t`sh make.sh`\n"
)
raise ModuleNotFoundError(info_string)
class MSDeformAttnFunction(Function):
@staticmethod
def forward(ctx, value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, im2col_step):
ctx.im2col_step = im2col_step
output = MSDA.ms_deform_attn_forward(
value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, ctx.im2col_step)
ctx.save_for_backward(value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights = ctx.saved_tensors
grad_value, grad_sampling_loc, grad_attn_weight = \
MSDA.ms_deform_attn_backward(
value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, grad_output, ctx.im2col_step)
return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
def ms_deform_attn_core_pytorch(value, value_spatial_shapes, sampling_locations, attention_weights):
# for debug and test only,
# need to use cuda version instead
N_, S_, M_, D_ = value.shape
_, Lq_, M_, L_, P_, _ = sampling_locations.shape
value_list = value.split([H_ * W_ for H_, W_ in value_spatial_shapes], dim=1)
sampling_grids = 2 * sampling_locations - 1
sampling_value_list = []
for lid_, (H_, W_) in enumerate(value_spatial_shapes):
# N_, H_*W_, M_, D_ -> N_, H_*W_, M_*D_ -> N_, M_*D_, H_*W_ -> N_*M_, D_, H_, W_
value_l_ = value_list[lid_].flatten(2).transpose(1, 2).reshape(N_*M_, D_, H_, W_)
# N_, Lq_, M_, P_, 2 -> N_, M_, Lq_, P_, 2 -> N_*M_, Lq_, P_, 2
sampling_grid_l_ = sampling_grids[:, :, :, lid_].transpose(1, 2).flatten(0, 1)
# N_*M_, D_, Lq_, P_
sampling_value_l_ = F.grid_sample(value_l_, sampling_grid_l_,
mode='bilinear', padding_mode='zeros', align_corners=False)
sampling_value_list.append(sampling_value_l_)
# (N_, Lq_, M_, L_, P_) -> (N_, M_, Lq_, L_, P_) -> (N_, M_, 1, Lq_, L_*P_)
attention_weights = attention_weights.transpose(1, 2).reshape(N_*M_, 1, Lq_, L_*P_)
output = (torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights).sum(-1).view(N_, M_*D_, Lq_)
return output.transpose(1, 2).contiguous()
| 3D-LLM-main | three_steps_3d_feature/first_step/mask2former/modeling/pixel_decoder/ops/functions/ms_deform_attn_func.py |
# ------------------------------------------------------------------------------------------------
# Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------------------------------
# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
# ------------------------------------------------------------------------------------------------
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR
from .ms_deform_attn_func import MSDeformAttnFunction
| 3D-LLM-main | three_steps_3d_feature/first_step/mask2former/modeling/pixel_decoder/ops/functions/__init__.py |
# ------------------------------------------------------------------------------------------------
# Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------------------------------
# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
# ------------------------------------------------------------------------------------------------
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import warnings
import math
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.init import xavier_uniform_, constant_
from ..functions import MSDeformAttnFunction
from ..functions.ms_deform_attn_func import ms_deform_attn_core_pytorch
def _is_power_of_2(n):
if (not isinstance(n, int)) or (n < 0):
raise ValueError("invalid input for _is_power_of_2: {} (type: {})".format(n, type(n)))
return (n & (n-1) == 0) and n != 0
class MSDeformAttn(nn.Module):
def __init__(self, d_model=256, n_levels=4, n_heads=8, n_points=4):
"""
Multi-Scale Deformable Attention Module
:param d_model hidden dimension
:param n_levels number of feature levels
:param n_heads number of attention heads
:param n_points number of sampling points per attention head per feature level
"""
super().__init__()
if d_model % n_heads != 0:
raise ValueError('d_model must be divisible by n_heads, but got {} and {}'.format(d_model, n_heads))
_d_per_head = d_model // n_heads
# you'd better set _d_per_head to a power of 2 which is more efficient in our CUDA implementation
if not _is_power_of_2(_d_per_head):
warnings.warn("You'd better set d_model in MSDeformAttn to make the dimension of each attention head a power of 2 "
"which is more efficient in our CUDA implementation.")
self.im2col_step = 128
self.d_model = d_model
self.n_levels = n_levels
self.n_heads = n_heads
self.n_points = n_points
self.sampling_offsets = nn.Linear(d_model, n_heads * n_levels * n_points * 2)
self.attention_weights = nn.Linear(d_model, n_heads * n_levels * n_points)
self.value_proj = nn.Linear(d_model, d_model)
self.output_proj = nn.Linear(d_model, d_model)
self._reset_parameters()
def _reset_parameters(self):
constant_(self.sampling_offsets.weight.data, 0.)
thetas = torch.arange(self.n_heads, dtype=torch.float32) * (2.0 * math.pi / self.n_heads)
grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)
grid_init = (grid_init / grid_init.abs().max(-1, keepdim=True)[0]).view(self.n_heads, 1, 1, 2).repeat(1, self.n_levels, self.n_points, 1)
for i in range(self.n_points):
grid_init[:, :, i, :] *= i + 1
with torch.no_grad():
self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1))
constant_(self.attention_weights.weight.data, 0.)
constant_(self.attention_weights.bias.data, 0.)
xavier_uniform_(self.value_proj.weight.data)
constant_(self.value_proj.bias.data, 0.)
xavier_uniform_(self.output_proj.weight.data)
constant_(self.output_proj.bias.data, 0.)
def forward(self, query, reference_points, input_flatten, input_spatial_shapes, input_level_start_index, input_padding_mask=None):
"""
:param query (N, Length_{query}, C)
:param reference_points (N, Length_{query}, n_levels, 2), range in [0, 1], top-left (0,0), bottom-right (1, 1), including padding area
or (N, Length_{query}, n_levels, 4), add additional (w, h) to form reference boxes
:param input_flatten (N, \sum_{l=0}^{L-1} H_l \cdot W_l, C)
:param input_spatial_shapes (n_levels, 2), [(H_0, W_0), (H_1, W_1), ..., (H_{L-1}, W_{L-1})]
:param input_level_start_index (n_levels, ), [0, H_0*W_0, H_0*W_0+H_1*W_1, H_0*W_0+H_1*W_1+H_2*W_2, ..., H_0*W_0+H_1*W_1+...+H_{L-1}*W_{L-1}]
:param input_padding_mask (N, \sum_{l=0}^{L-1} H_l \cdot W_l), True for padding elements, False for non-padding elements
:return output (N, Length_{query}, C)
"""
N, Len_q, _ = query.shape
N, Len_in, _ = input_flatten.shape
assert (input_spatial_shapes[:, 0] * input_spatial_shapes[:, 1]).sum() == Len_in
value = self.value_proj(input_flatten)
if input_padding_mask is not None:
value = value.masked_fill(input_padding_mask[..., None], float(0))
value = value.view(N, Len_in, self.n_heads, self.d_model // self.n_heads)
sampling_offsets = self.sampling_offsets(query).view(N, Len_q, self.n_heads, self.n_levels, self.n_points, 2)
attention_weights = self.attention_weights(query).view(N, Len_q, self.n_heads, self.n_levels * self.n_points)
attention_weights = F.softmax(attention_weights, -1).view(N, Len_q, self.n_heads, self.n_levels, self.n_points)
# N, Len_q, n_heads, n_levels, n_points, 2
if reference_points.shape[-1] == 2:
offset_normalizer = torch.stack([input_spatial_shapes[..., 1], input_spatial_shapes[..., 0]], -1)
sampling_locations = reference_points[:, :, None, :, None, :] \
+ sampling_offsets / offset_normalizer[None, None, None, :, None, :]
elif reference_points.shape[-1] == 4:
sampling_locations = reference_points[:, :, None, :, None, :2] \
+ sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5
else:
raise ValueError(
'Last dim of reference_points must be 2 or 4, but get {} instead.'.format(reference_points.shape[-1]))
try:
output = MSDeformAttnFunction.apply(
value, input_spatial_shapes, input_level_start_index, sampling_locations, attention_weights, self.im2col_step)
except:
# CPU
output = ms_deform_attn_core_pytorch(value, input_spatial_shapes, sampling_locations, attention_weights)
# # For FLOPs calculation only
# output = ms_deform_attn_core_pytorch(value, input_spatial_shapes, sampling_locations, attention_weights)
output = self.output_proj(output)
return output
| 3D-LLM-main | three_steps_3d_feature/first_step/mask2former/modeling/pixel_decoder/ops/modules/ms_deform_attn.py |
# ------------------------------------------------------------------------------------------------
# Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------------------------------
# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
# ------------------------------------------------------------------------------------------------
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR
from .ms_deform_attn import MSDeformAttn
| 3D-LLM-main | three_steps_3d_feature/first_step/mask2former/modeling/pixel_decoder/ops/modules/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
from copy import deepcopy
from typing import Callable, Dict, List, Optional, Tuple, Union
import fvcore.nn.weight_init as weight_init
from torch import nn
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.layers import Conv2d, ShapeSpec, get_norm
from detectron2.modeling import SEM_SEG_HEADS_REGISTRY
from ..transformer_decoder.maskformer_transformer_decoder import build_transformer_decoder
from ..pixel_decoder.fpn import build_pixel_decoder
@SEM_SEG_HEADS_REGISTRY.register()
class MaskFormerHead(nn.Module):
_version = 2
def _load_from_state_dict(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
version = local_metadata.get("version", None)
if version is None or version < 2:
# Do not warn if train from scratch
scratch = True
logger = logging.getLogger(__name__)
for k in list(state_dict.keys()):
newk = k
if "sem_seg_head" in k and not k.startswith(prefix + "predictor"):
newk = k.replace(prefix, prefix + "pixel_decoder.")
# logger.debug(f"{k} ==> {newk}")
if newk != k:
state_dict[newk] = state_dict[k]
del state_dict[k]
scratch = False
if not scratch:
logger.warning(
f"Weight format of {self.__class__.__name__} have changed! "
"Please upgrade your models. Applying automatic conversion now ..."
)
@configurable
def __init__(
self,
input_shape: Dict[str, ShapeSpec],
*,
num_classes: int,
pixel_decoder: nn.Module,
loss_weight: float = 1.0,
ignore_value: int = -1,
# extra parameters
transformer_predictor: nn.Module,
transformer_in_feature: str,
):
"""
NOTE: this interface is experimental.
Args:
input_shape: shapes (channels and stride) of the input features
num_classes: number of classes to predict
pixel_decoder: the pixel decoder module
loss_weight: loss weight
ignore_value: category id to be ignored during training.
transformer_predictor: the transformer decoder that makes prediction
transformer_in_feature: input feature name to the transformer_predictor
"""
super().__init__()
input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride)
self.in_features = [k for k, v in input_shape]
feature_strides = [v.stride for k, v in input_shape]
feature_channels = [v.channels for k, v in input_shape]
self.ignore_value = ignore_value
self.common_stride = 4
self.loss_weight = loss_weight
self.pixel_decoder = pixel_decoder
self.predictor = transformer_predictor
self.transformer_in_feature = transformer_in_feature
self.num_classes = num_classes
@classmethod
def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):
# figure out in_channels to transformer predictor
if cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE == "transformer_encoder":
transformer_predictor_in_channels = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM
elif cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE == "pixel_embedding":
transformer_predictor_in_channels = cfg.MODEL.SEM_SEG_HEAD.MASK_DIM
elif cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE == "multi_scale_pixel_decoder": # for maskformer2
transformer_predictor_in_channels = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM
else:
transformer_predictor_in_channels = input_shape[cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE].channels
return {
"input_shape": {
k: v for k, v in input_shape.items() if k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES
},
"ignore_value": cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
"num_classes": cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
"pixel_decoder": build_pixel_decoder(cfg, input_shape),
"loss_weight": cfg.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT,
"transformer_in_feature": cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE,
"transformer_predictor": build_transformer_decoder(
cfg,
transformer_predictor_in_channels,
mask_classification=True,
),
}
def forward(self, features, mask=None):
return self.layers(features, mask)
def layers(self, features, mask=None):
mask_features, transformer_encoder_features, multi_scale_features = self.pixel_decoder.forward_features(features)
if self.transformer_in_feature == "multi_scale_pixel_decoder":
predictions = self.predictor(multi_scale_features, mask_features, mask)
else:
if self.transformer_in_feature == "transformer_encoder":
assert (
transformer_encoder_features is not None
), "Please use the TransformerEncoderPixelDecoder."
predictions = self.predictor(transformer_encoder_features, mask_features, mask)
elif self.transformer_in_feature == "pixel_embedding":
predictions = self.predictor(mask_features, mask_features, mask)
else:
predictions = self.predictor(features[self.transformer_in_feature], mask_features, mask)
return predictions
| 3D-LLM-main | three_steps_3d_feature/first_step/mask2former/modeling/meta_arch/mask_former_head.py |
# Copyright (c) Facebook, Inc. and its affiliates.
| 3D-LLM-main | three_steps_3d_feature/first_step/mask2former/modeling/meta_arch/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
from typing import Callable, Dict, List, Optional, Tuple, Union
import fvcore.nn.weight_init as weight_init
from torch import nn
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.layers import Conv2d, ShapeSpec, get_norm
from detectron2.modeling import SEM_SEG_HEADS_REGISTRY
from ..transformer_decoder.maskformer_transformer_decoder import StandardTransformerDecoder
from ..pixel_decoder.fpn import build_pixel_decoder
@SEM_SEG_HEADS_REGISTRY.register()
class PerPixelBaselineHead(nn.Module):
_version = 2
def _load_from_state_dict(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
version = local_metadata.get("version", None)
if version is None or version < 2:
logger = logging.getLogger(__name__)
# Do not warn if train from scratch
scratch = True
logger = logging.getLogger(__name__)
for k in list(state_dict.keys()):
newk = k
if "sem_seg_head" in k and not k.startswith(prefix + "predictor"):
newk = k.replace(prefix, prefix + "pixel_decoder.")
# logger.warning(f"{k} ==> {newk}")
if newk != k:
state_dict[newk] = state_dict[k]
del state_dict[k]
scratch = False
if not scratch:
logger.warning(
f"Weight format of {self.__class__.__name__} have changed! "
"Please upgrade your models. Applying automatic conversion now ..."
)
@configurable
def __init__(
self,
input_shape: Dict[str, ShapeSpec],
*,
num_classes: int,
pixel_decoder: nn.Module,
loss_weight: float = 1.0,
ignore_value: int = -1,
):
"""
NOTE: this interface is experimental.
Args:
input_shape: shapes (channels and stride) of the input features
num_classes: number of classes to predict
pixel_decoder: the pixel decoder module
loss_weight: loss weight
ignore_value: category id to be ignored during training.
"""
super().__init__()
input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride)
self.in_features = [k for k, v in input_shape]
feature_strides = [v.stride for k, v in input_shape]
feature_channels = [v.channels for k, v in input_shape]
self.ignore_value = ignore_value
self.common_stride = 4
self.loss_weight = loss_weight
self.pixel_decoder = pixel_decoder
self.predictor = Conv2d(
self.pixel_decoder.mask_dim, num_classes, kernel_size=1, stride=1, padding=0
)
weight_init.c2_msra_fill(self.predictor)
@classmethod
def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):
return {
"input_shape": {
k: v for k, v in input_shape.items() if k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES
},
"ignore_value": cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
"num_classes": cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
"pixel_decoder": build_pixel_decoder(cfg, input_shape),
"loss_weight": cfg.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT,
}
def forward(self, features, targets=None):
"""
Returns:
In training, returns (None, dict of losses)
In inference, returns (CxHxW logits, {})
"""
x = self.layers(features)
if self.training:
return None, self.losses(x, targets)
else:
x = F.interpolate(
x, scale_factor=self.common_stride, mode="bilinear", align_corners=False
)
return x, {}
def layers(self, features):
x, _, _ = self.pixel_decoder.forward_features(features)
x = self.predictor(x)
return x
def losses(self, predictions, targets):
predictions = predictions.float() # https://github.com/pytorch/pytorch/issues/48163
predictions = F.interpolate(
predictions, scale_factor=self.common_stride, mode="bilinear", align_corners=False
)
loss = F.cross_entropy(
predictions, targets, reduction="mean", ignore_index=self.ignore_value
)
losses = {"loss_sem_seg": loss * self.loss_weight}
return losses
@SEM_SEG_HEADS_REGISTRY.register()
class PerPixelBaselinePlusHead(PerPixelBaselineHead):
def _load_from_state_dict(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
version = local_metadata.get("version", None)
if version is None or version < 2:
# Do not warn if train from scratch
scratch = True
logger = logging.getLogger(__name__)
for k in list(state_dict.keys()):
newk = k
if "sem_seg_head" in k and not k.startswith(prefix + "predictor"):
newk = k.replace(prefix, prefix + "pixel_decoder.")
logger.debug(f"{k} ==> {newk}")
if newk != k:
state_dict[newk] = state_dict[k]
del state_dict[k]
scratch = False
if not scratch:
logger.warning(
f"Weight format of {self.__class__.__name__} have changed! "
"Please upgrade your models. Applying automatic conversion now ..."
)
@configurable
def __init__(
self,
input_shape: Dict[str, ShapeSpec],
*,
# extra parameters
transformer_predictor: nn.Module,
transformer_in_feature: str,
deep_supervision: bool,
# inherit parameters
num_classes: int,
pixel_decoder: nn.Module,
loss_weight: float = 1.0,
ignore_value: int = -1,
):
"""
NOTE: this interface is experimental.
Args:
input_shape: shapes (channels and stride) of the input features
transformer_predictor: the transformer decoder that makes prediction
transformer_in_feature: input feature name to the transformer_predictor
deep_supervision: whether or not to add supervision to the output of
every transformer decoder layer
num_classes: number of classes to predict
pixel_decoder: the pixel decoder module
loss_weight: loss weight
ignore_value: category id to be ignored during training.
"""
super().__init__(
input_shape,
num_classes=num_classes,
pixel_decoder=pixel_decoder,
loss_weight=loss_weight,
ignore_value=ignore_value,
)
del self.predictor
self.predictor = transformer_predictor
self.transformer_in_feature = transformer_in_feature
self.deep_supervision = deep_supervision
@classmethod
def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):
ret = super().from_config(cfg, input_shape)
ret["transformer_in_feature"] = cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE
if cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE == "transformer_encoder":
in_channels = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM
else:
in_channels = input_shape[ret["transformer_in_feature"]].channels
ret["transformer_predictor"] = StandardTransformerDecoder(
cfg, in_channels, mask_classification=False
)
ret["deep_supervision"] = cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION
return ret
def forward(self, features, targets=None):
"""
Returns:
In training, returns (None, dict of losses)
In inference, returns (CxHxW logits, {})
"""
x, aux_outputs = self.layers(features)
if self.training:
if self.deep_supervision:
losses = self.losses(x, targets)
for i, aux_output in enumerate(aux_outputs):
losses["loss_sem_seg" + f"_{i}"] = self.losses(
aux_output["pred_masks"], targets
)["loss_sem_seg"]
return None, losses
else:
return None, self.losses(x, targets)
else:
x = F.interpolate(
x, scale_factor=self.common_stride, mode="bilinear", align_corners=False
)
return x, {}
def layers(self, features):
mask_features, transformer_encoder_features, _ = self.pixel_decoder.forward_features(features)
if self.transformer_in_feature == "transformer_encoder":
assert (
transformer_encoder_features is not None
), "Please use the TransformerEncoderPixelDecoder."
predictions = self.predictor(transformer_encoder_features, mask_features)
else:
predictions = self.predictor(features[self.transformer_in_feature], mask_features)
if self.deep_supervision:
return predictions["pred_masks"], predictions["aux_outputs"]
else:
return predictions["pred_masks"], None
| 3D-LLM-main | three_steps_3d_feature/first_step/mask2former/modeling/meta_arch/per_pixel_baseline.py |
# --------------------------------------------------------
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ze Liu, Yutong Lin, Yixuan Wei
# --------------------------------------------------------
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/SwinTransformer/Swin-Transformer-Semantic-Segmentation/blob/main/mmseg/models/backbones/swin_transformer.py
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from detectron2.modeling import BACKBONE_REGISTRY, Backbone, ShapeSpec
class Mlp(nn.Module):
"""Multilayer perceptron."""
def __init__(
self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows
def window_reverse(windows, window_size, H, W):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class WindowAttention(nn.Module):
"""Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
def __init__(
self,
dim,
window_size,
num_heads,
qkv_bias=True,
qk_scale=None,
attn_drop=0.0,
proj_drop=0.0,
):
super().__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
# define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)
) # 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=0.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, mask=None):
"""Forward function.
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B_, N, C = x.shape
qkv = (
self.qkv(x)
.reshape(B_, N, 3, self.num_heads, C // self.num_heads)
.permute(2, 0, 3, 1, 4)
)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = q @ k.transpose(-2, -1)
relative_position_bias = self.relative_position_bias_table[
self.relative_position_index.view(-1)
].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1
) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(
2, 0, 1
).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class SwinTransformerBlock(nn.Module):
"""Swin Transformer Block.
Args:
dim (int): Number of input channels.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(
self,
dim,
num_heads,
window_size=7,
shift_size=0,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
drop=0.0,
attn_drop=0.0,
drop_path=0.0,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
):
super().__init__()
self.dim = dim
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(
dim,
window_size=to_2tuple(self.window_size),
num_heads=num_heads,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop=attn_drop,
proj_drop=drop,
)
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(
in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop
)
self.H = None
self.W = None
def forward(self, x, mask_matrix):
"""Forward function.
Args:
x: Input feature, tensor size (B, H*W, C).
H, W: Spatial resolution of the input feature.
mask_matrix: Attention mask for cyclic shift.
"""
B, L, C = x.shape
H, W = self.H, self.W
assert L == H * W, "input feature has wrong size"
shortcut = x
x = self.norm1(x)
x = x.view(B, H, W, C)
# pad feature maps to multiples of window size
pad_l = pad_t = 0
pad_r = (self.window_size - W % self.window_size) % self.window_size
pad_b = (self.window_size - H % self.window_size) % self.window_size
x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
_, Hp, Wp, _ = x.shape
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
attn_mask = mask_matrix
else:
shifted_x = x
attn_mask = None
# partition windows
x_windows = window_partition(
shifted_x, self.window_size
) # nW*B, window_size, window_size, C
x_windows = x_windows.view(
-1, self.window_size * self.window_size, C
) # nW*B, window_size*window_size, C
# W-MSA/SW-MSA
attn_windows = self.attn(x_windows, mask=attn_mask) # nW*B, window_size*window_size, C
# merge windows
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
if pad_r > 0 or pad_b > 0:
x = x[:, :H, :W, :].contiguous()
x = x.view(B, H * W, C)
# FFN
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchMerging(nn.Module):
"""Patch Merging Layer
Args:
dim (int): Number of input channels.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, dim, norm_layer=nn.LayerNorm):
super().__init__()
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def forward(self, x, H, W):
"""Forward function.
Args:
x: Input feature, tensor size (B, H*W, C).
H, W: Spatial resolution of the input feature.
"""
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
x = x.view(B, H, W, C)
# padding
pad_input = (H % 2 == 1) or (W % 2 == 1)
if pad_input:
x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2))
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
x = self.norm(x)
x = self.reduction(x)
return x
class BasicLayer(nn.Module):
"""A basic Swin Transformer layer for one stage.
Args:
dim (int): Number of feature channels
depth (int): Depths of this stage.
num_heads (int): Number of attention head.
window_size (int): Local window size. Default: 7.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(
self,
dim,
depth,
num_heads,
window_size=7,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
drop=0.0,
attn_drop=0.0,
drop_path=0.0,
norm_layer=nn.LayerNorm,
downsample=None,
use_checkpoint=False,
):
super().__init__()
self.window_size = window_size
self.shift_size = window_size // 2
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList(
[
SwinTransformerBlock(
dim=dim,
num_heads=num_heads,
window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop,
attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer,
)
for i in range(depth)
]
)
# patch merging layer
if downsample is not None:
self.downsample = downsample(dim=dim, norm_layer=norm_layer)
else:
self.downsample = None
def forward(self, x, H, W):
"""Forward function.
Args:
x: Input feature, tensor size (B, H*W, C).
H, W: Spatial resolution of the input feature.
"""
# calculate attention mask for SW-MSA
Hp = int(np.ceil(H / self.window_size)) * self.window_size
Wp = int(np.ceil(W / self.window_size)) * self.window_size
img_mask = torch.zeros((1, Hp, Wp, 1), device=x.device) # 1 Hp Wp 1
h_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None),
)
w_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None),
)
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(
img_mask, self.window_size
) # nW, window_size, window_size, 1
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(
attn_mask == 0, float(0.0)
)
for blk in self.blocks:
blk.H, blk.W = H, W
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x, attn_mask)
else:
x = blk(x, attn_mask)
if self.downsample is not None:
x_down = self.downsample(x, H, W)
Wh, Ww = (H + 1) // 2, (W + 1) // 2
return x, H, W, x_down, Wh, Ww
else:
return x, H, W, x, H, W
class PatchEmbed(nn.Module):
"""Image to Patch Embedding
Args:
patch_size (int): Patch token size. Default: 4.
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
norm_layer (nn.Module, optional): Normalization layer. Default: None
"""
def __init__(self, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
super().__init__()
patch_size = to_2tuple(patch_size)
self.patch_size = patch_size
self.in_chans = in_chans
self.embed_dim = embed_dim
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
if norm_layer is not None:
self.norm = norm_layer(embed_dim)
else:
self.norm = None
def forward(self, x):
"""Forward function."""
# padding
_, _, H, W = x.size()
if W % self.patch_size[1] != 0:
x = F.pad(x, (0, self.patch_size[1] - W % self.patch_size[1]))
if H % self.patch_size[0] != 0:
x = F.pad(x, (0, 0, 0, self.patch_size[0] - H % self.patch_size[0]))
x = self.proj(x) # B C Wh Ww
if self.norm is not None:
Wh, Ww = x.size(2), x.size(3)
x = x.flatten(2).transpose(1, 2)
x = self.norm(x)
x = x.transpose(1, 2).view(-1, self.embed_dim, Wh, Ww)
return x
class SwinTransformer(nn.Module):
"""Swin Transformer backbone.
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
https://arxiv.org/pdf/2103.14030
Args:
pretrain_img_size (int): Input image size for training the pretrained model,
used in absolute postion embedding. Default 224.
patch_size (int | tuple(int)): Patch size. Default: 4.
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
depths (tuple[int]): Depths of each Swin Transformer stage.
num_heads (tuple[int]): Number of attention head of each stage.
window_size (int): Window size. Default: 7.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set.
drop_rate (float): Dropout rate.
attn_drop_rate (float): Attention dropout rate. Default: 0.
drop_path_rate (float): Stochastic depth rate. Default: 0.2.
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False.
patch_norm (bool): If True, add normalization after patch embedding. Default: True.
out_indices (Sequence[int]): Output from which stages.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters.
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(
self,
pretrain_img_size=224,
patch_size=4,
in_chans=3,
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_rate=0.2,
norm_layer=nn.LayerNorm,
ape=False,
patch_norm=True,
out_indices=(0, 1, 2, 3),
frozen_stages=-1,
use_checkpoint=False,
):
super().__init__()
self.pretrain_img_size = pretrain_img_size
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.ape = ape
self.patch_norm = patch_norm
self.out_indices = out_indices
self.frozen_stages = frozen_stages
# split image into non-overlapping patches
self.patch_embed = PatchEmbed(
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None,
)
# absolute position embedding
if self.ape:
pretrain_img_size = to_2tuple(pretrain_img_size)
patch_size = to_2tuple(patch_size)
patches_resolution = [
pretrain_img_size[0] // patch_size[0],
pretrain_img_size[1] // patch_size[1],
]
self.absolute_pos_embed = nn.Parameter(
torch.zeros(1, embed_dim, patches_resolution[0], patches_resolution[1])
)
trunc_normal_(self.absolute_pos_embed, std=0.02)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [
x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))
] # stochastic depth decay rule
# build layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(
dim=int(embed_dim * 2 ** i_layer),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]) : sum(depths[: i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
use_checkpoint=use_checkpoint,
)
self.layers.append(layer)
num_features = [int(embed_dim * 2 ** i) for i in range(self.num_layers)]
self.num_features = num_features
# add a norm layer for each output
for i_layer in out_indices:
layer = norm_layer(num_features[i_layer])
layer_name = f"norm{i_layer}"
self.add_module(layer_name, layer)
self._freeze_stages()
def _freeze_stages(self):
if self.frozen_stages >= 0:
self.patch_embed.eval()
for param in self.patch_embed.parameters():
param.requires_grad = False
if self.frozen_stages >= 1 and self.ape:
self.absolute_pos_embed.requires_grad = False
if self.frozen_stages >= 2:
self.pos_drop.eval()
for i in range(0, self.frozen_stages - 1):
m = self.layers[i]
m.eval()
for param in m.parameters():
param.requires_grad = False
def init_weights(self, pretrained=None):
"""Initialize the weights in backbone.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
def _init_weights(m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self, x):
"""Forward function."""
x = self.patch_embed(x)
Wh, Ww = x.size(2), x.size(3)
if self.ape:
# interpolate the position embedding to the corresponding size
absolute_pos_embed = F.interpolate(
self.absolute_pos_embed, size=(Wh, Ww), mode="bicubic"
)
x = (x + absolute_pos_embed).flatten(2).transpose(1, 2) # B Wh*Ww C
else:
x = x.flatten(2).transpose(1, 2)
x = self.pos_drop(x)
outs = {}
for i in range(self.num_layers):
layer = self.layers[i]
x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww)
if i in self.out_indices:
norm_layer = getattr(self, f"norm{i}")
x_out = norm_layer(x_out)
out = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous()
outs["res{}".format(i + 2)] = out
return outs
def train(self, mode=True):
"""Convert the model into training mode while keep layers freezed."""
super(SwinTransformer, self).train(mode)
self._freeze_stages()
@BACKBONE_REGISTRY.register()
class D2SwinTransformer(SwinTransformer, Backbone):
def __init__(self, cfg, input_shape):
pretrain_img_size = cfg.MODEL.SWIN.PRETRAIN_IMG_SIZE
patch_size = cfg.MODEL.SWIN.PATCH_SIZE
in_chans = 3
embed_dim = cfg.MODEL.SWIN.EMBED_DIM
depths = cfg.MODEL.SWIN.DEPTHS
num_heads = cfg.MODEL.SWIN.NUM_HEADS
window_size = cfg.MODEL.SWIN.WINDOW_SIZE
mlp_ratio = cfg.MODEL.SWIN.MLP_RATIO
qkv_bias = cfg.MODEL.SWIN.QKV_BIAS
qk_scale = cfg.MODEL.SWIN.QK_SCALE
drop_rate = cfg.MODEL.SWIN.DROP_RATE
attn_drop_rate = cfg.MODEL.SWIN.ATTN_DROP_RATE
drop_path_rate = cfg.MODEL.SWIN.DROP_PATH_RATE
norm_layer = nn.LayerNorm
ape = cfg.MODEL.SWIN.APE
patch_norm = cfg.MODEL.SWIN.PATCH_NORM
use_checkpoint = cfg.MODEL.SWIN.USE_CHECKPOINT
super().__init__(
pretrain_img_size,
patch_size,
in_chans,
embed_dim,
depths,
num_heads,
window_size,
mlp_ratio,
qkv_bias,
qk_scale,
drop_rate,
attn_drop_rate,
drop_path_rate,
norm_layer,
ape,
patch_norm,
use_checkpoint=use_checkpoint,
)
self._out_features = cfg.MODEL.SWIN.OUT_FEATURES
self._out_feature_strides = {
"res2": 4,
"res3": 8,
"res4": 16,
"res5": 32,
}
self._out_feature_channels = {
"res2": self.num_features[0],
"res3": self.num_features[1],
"res4": self.num_features[2],
"res5": self.num_features[3],
}
def forward(self, x):
"""
Args:
x: Tensor of shape (N,C,H,W). H, W must be a multiple of ``self.size_divisibility``.
Returns:
dict[str->Tensor]: names and the corresponding features
"""
assert (
x.dim() == 4
), f"SwinTransformer takes an input of shape (N, C, H, W). Got {x.shape} instead!"
outputs = {}
y = super().forward(x)
for k in y.keys():
if k in self._out_features:
outputs[k] = y[k]
return outputs
def output_shape(self):
return {
name: ShapeSpec(
channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
)
for name in self._out_features
}
@property
def size_divisibility(self):
return 32
| 3D-LLM-main | three_steps_3d_feature/first_step/mask2former/modeling/backbone/swin.py |
# Copyright (c) Facebook, Inc. and its affiliates.
| 3D-LLM-main | three_steps_3d_feature/first_step/mask2former/modeling/backbone/__init__.py |
3D-LLM-main | three_steps_3d_feature/first_step/mask2former/evaluation/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
import contextlib
import copy
import io
import itertools
import json
import logging
import numpy as np
import os
import pickle
from collections import OrderedDict
import pycocotools.mask as mask_util
import torch
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from tabulate import tabulate
import detectron2.utils.comm as comm
from detectron2.config import CfgNode
from detectron2.data import MetadataCatalog
from detectron2.data.datasets.coco import convert_to_coco_json
from detectron2.evaluation.coco_evaluation import COCOEvaluator, _evaluate_predictions_on_coco
from detectron2.evaluation.fast_eval_api import COCOeval_opt
from detectron2.structures import Boxes, BoxMode, pairwise_iou
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import create_small_table
# modified from COCOEvaluator for instance segmetnat
class InstanceSegEvaluator(COCOEvaluator):
"""
Evaluate AR for object proposals, AP for instance detection/segmentation, AP
for keypoint detection outputs using COCO's metrics.
See http://cocodataset.org/#detection-eval and
http://cocodataset.org/#keypoints-eval to understand its metrics.
The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means
the metric cannot be computed (e.g. due to no predictions made).
In addition to COCO, this evaluator is able to support any bounding box detection,
instance segmentation, or keypoint detection dataset.
"""
def _eval_predictions(self, predictions, img_ids=None):
"""
Evaluate predictions. Fill self._results with the metrics of the tasks.
"""
self._logger.info("Preparing results for COCO format ...")
coco_results = list(itertools.chain(*[x["instances"] for x in predictions]))
tasks = self._tasks or self._tasks_from_predictions(coco_results)
# unmap the category ids for COCO
if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"):
dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id
# all_contiguous_ids = list(dataset_id_to_contiguous_id.values())
# num_classes = len(all_contiguous_ids)
# assert min(all_contiguous_ids) == 0 and max(all_contiguous_ids) == num_classes - 1
reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()}
for result in coco_results:
category_id = result["category_id"]
# assert category_id < num_classes, (
# f"A prediction has class={category_id}, "
# f"but the dataset only has {num_classes} classes and "
# f"predicted class id should be in [0, {num_classes - 1}]."
# )
assert category_id in reverse_id_mapping, (
f"A prediction has class={category_id}, "
f"but the dataset only has class ids in {dataset_id_to_contiguous_id}."
)
result["category_id"] = reverse_id_mapping[category_id]
if self._output_dir:
file_path = os.path.join(self._output_dir, "coco_instances_results.json")
self._logger.info("Saving results to {}".format(file_path))
with PathManager.open(file_path, "w") as f:
f.write(json.dumps(coco_results))
f.flush()
if not self._do_evaluation:
self._logger.info("Annotations are not available for evaluation.")
return
self._logger.info(
"Evaluating predictions with {} COCO API...".format(
"unofficial" if self._use_fast_impl else "official"
)
)
for task in sorted(tasks):
assert task in {"bbox", "segm", "keypoints"}, f"Got unknown task: {task}!"
coco_eval = (
_evaluate_predictions_on_coco(
self._coco_api,
coco_results,
task,
kpt_oks_sigmas=self._kpt_oks_sigmas,
use_fast_impl=self._use_fast_impl,
img_ids=img_ids,
max_dets_per_image=self._max_dets_per_image,
)
if len(coco_results) > 0
else None # cocoapi does not handle empty results very well
)
res = self._derive_coco_results(
coco_eval, task, class_names=self._metadata.get("thing_classes")
)
self._results[task] = res
| 3D-LLM-main | three_steps_3d_feature/first_step/mask2former/evaluation/instance_evaluation.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from . import datasets
| 3D-LLM-main | three_steps_3d_feature/first_step/mask2former/data/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
| 3D-LLM-main | three_steps_3d_feature/first_step/mask2former/data/dataset_mappers/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/facebookresearch/detr/blob/master/d2/detr/dataset_mapper.py
import copy
import logging
import numpy as np
import torch
from detectron2.config import configurable
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
from detectron2.data.transforms import TransformGen
from detectron2.structures import BitMasks, Boxes, Instances
__all__ = ["COCOPanopticNewBaselineDatasetMapper"]
def build_transform_gen(cfg, is_train):
"""
Create a list of default :class:`Augmentation` from config.
Now it includes resizing and flipping.
Returns:
list[Augmentation]
"""
assert is_train, "Only support training augmentation"
image_size = cfg.INPUT.IMAGE_SIZE
min_scale = cfg.INPUT.MIN_SCALE
max_scale = cfg.INPUT.MAX_SCALE
augmentation = []
if cfg.INPUT.RANDOM_FLIP != "none":
augmentation.append(
T.RandomFlip(
horizontal=cfg.INPUT.RANDOM_FLIP == "horizontal",
vertical=cfg.INPUT.RANDOM_FLIP == "vertical",
)
)
augmentation.extend([
T.ResizeScale(
min_scale=min_scale, max_scale=max_scale, target_height=image_size, target_width=image_size
),
T.FixedSizeCrop(crop_size=(image_size, image_size)),
])
return augmentation
# This is specifically designed for the COCO dataset.
class COCOPanopticNewBaselineDatasetMapper:
"""
A callable which takes a dataset dict in Detectron2 Dataset format,
and map it into a format used by MaskFormer.
This dataset mapper applies the same transformation as DETR for COCO panoptic segmentation.
The callable currently does the following:
1. Read the image from "file_name"
2. Applies geometric transforms to the image and annotation
3. Find and applies suitable cropping to the image and annotation
4. Prepare image and annotation to Tensors
"""
@configurable
def __init__(
self,
is_train=True,
*,
tfm_gens,
image_format,
):
"""
NOTE: this interface is experimental.
Args:
is_train: for training or inference
augmentations: a list of augmentations or deterministic transforms to apply
crop_gen: crop augmentation
tfm_gens: data augmentation
image_format: an image format supported by :func:`detection_utils.read_image`.
"""
self.tfm_gens = tfm_gens
logging.getLogger(__name__).info(
"[COCOPanopticNewBaselineDatasetMapper] Full TransformGens used in training: {}".format(
str(self.tfm_gens)
)
)
self.img_format = image_format
self.is_train = is_train
@classmethod
def from_config(cls, cfg, is_train=True):
# Build augmentation
tfm_gens = build_transform_gen(cfg, is_train)
ret = {
"is_train": is_train,
"tfm_gens": tfm_gens,
"image_format": cfg.INPUT.FORMAT,
}
return ret
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
image = utils.read_image(dataset_dict["file_name"], format=self.img_format)
utils.check_image_size(dataset_dict, image)
image, transforms = T.apply_transform_gens(self.tfm_gens, image)
image_shape = image.shape[:2] # h, w
# Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
# but not efficient on large generic data structures due to the use of pickle & mp.Queue.
# Therefore it's important to use torch.Tensor.
dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
if not self.is_train:
# USER: Modify this if you want to keep them for some reason.
dataset_dict.pop("annotations", None)
return dataset_dict
if "pan_seg_file_name" in dataset_dict:
pan_seg_gt = utils.read_image(dataset_dict.pop("pan_seg_file_name"), "RGB")
segments_info = dataset_dict["segments_info"]
# apply the same transformation to panoptic segmentation
pan_seg_gt = transforms.apply_segmentation(pan_seg_gt)
from panopticapi.utils import rgb2id
pan_seg_gt = rgb2id(pan_seg_gt)
instances = Instances(image_shape)
classes = []
masks = []
for segment_info in segments_info:
class_id = segment_info["category_id"]
if not segment_info["iscrowd"]:
classes.append(class_id)
masks.append(pan_seg_gt == segment_info["id"])
classes = np.array(classes)
instances.gt_classes = torch.tensor(classes, dtype=torch.int64)
if len(masks) == 0:
# Some image does not have annotation (all ignored)
instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1]))
instances.gt_boxes = Boxes(torch.zeros((0, 4)))
else:
masks = BitMasks(
torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])
)
instances.gt_masks = masks.tensor
instances.gt_boxes = masks.get_bounding_boxes()
dataset_dict["instances"] = instances
return dataset_dict
| 3D-LLM-main | three_steps_3d_feature/first_step/mask2former/data/dataset_mappers/coco_panoptic_new_baseline_dataset_mapper.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import logging
import numpy as np
import torch
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.data import MetadataCatalog
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
from detectron2.projects.point_rend import ColorAugSSDTransform
from detectron2.structures import BitMasks, Instances
__all__ = ["MaskFormerSemanticDatasetMapper"]
class MaskFormerSemanticDatasetMapper:
"""
A callable which takes a dataset dict in Detectron2 Dataset format,
and map it into a format used by MaskFormer for semantic segmentation.
The callable currently does the following:
1. Read the image from "file_name"
2. Applies geometric transforms to the image and annotation
3. Find and applies suitable cropping to the image and annotation
4. Prepare image and annotation to Tensors
"""
@configurable
def __init__(
self,
is_train=True,
*,
augmentations,
image_format,
ignore_label,
size_divisibility,
):
"""
NOTE: this interface is experimental.
Args:
is_train: for training or inference
augmentations: a list of augmentations or deterministic transforms to apply
image_format: an image format supported by :func:`detection_utils.read_image`.
ignore_label: the label that is ignored to evaluation
size_divisibility: pad image size to be divisible by this value
"""
self.is_train = is_train
self.tfm_gens = augmentations
self.img_format = image_format
self.ignore_label = ignore_label
self.size_divisibility = size_divisibility
logger = logging.getLogger(__name__)
mode = "training" if is_train else "inference"
logger.info(f"[{self.__class__.__name__}] Augmentations used in {mode}: {augmentations}")
@classmethod
def from_config(cls, cfg, is_train=True):
# Build augmentation
augs = [
T.ResizeShortestEdge(
cfg.INPUT.MIN_SIZE_TRAIN,
cfg.INPUT.MAX_SIZE_TRAIN,
cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING,
)
]
if cfg.INPUT.CROP.ENABLED:
augs.append(
T.RandomCrop_CategoryAreaConstraint(
cfg.INPUT.CROP.TYPE,
cfg.INPUT.CROP.SIZE,
cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA,
cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
)
)
if cfg.INPUT.COLOR_AUG_SSD:
augs.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))
augs.append(T.RandomFlip())
# Assume always applies to the training set.
dataset_names = cfg.DATASETS.TRAIN
meta = MetadataCatalog.get(dataset_names[0])
ignore_label = meta.ignore_label
ret = {
"is_train": is_train,
"augmentations": augs,
"image_format": cfg.INPUT.FORMAT,
"ignore_label": ignore_label,
"size_divisibility": cfg.INPUT.SIZE_DIVISIBILITY,
}
return ret
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
assert self.is_train, "MaskFormerSemanticDatasetMapper should only be used for training!"
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
image = utils.read_image(dataset_dict["file_name"], format=self.img_format)
utils.check_image_size(dataset_dict, image)
if "sem_seg_file_name" in dataset_dict:
# PyTorch transformation not implemented for uint16, so converting it to double first
sem_seg_gt = utils.read_image(dataset_dict.pop("sem_seg_file_name")).astype("double")
else:
sem_seg_gt = None
if sem_seg_gt is None:
raise ValueError(
"Cannot find 'sem_seg_file_name' for semantic segmentation dataset {}.".format(
dataset_dict["file_name"]
)
)
aug_input = T.AugInput(image, sem_seg=sem_seg_gt)
aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)
image = aug_input.image
sem_seg_gt = aug_input.sem_seg
# Pad image and segmentation label here!
image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
if sem_seg_gt is not None:
sem_seg_gt = torch.as_tensor(sem_seg_gt.astype("long"))
if self.size_divisibility > 0:
image_size = (image.shape[-2], image.shape[-1])
padding_size = [
0,
self.size_divisibility - image_size[1],
0,
self.size_divisibility - image_size[0],
]
image = F.pad(image, padding_size, value=128).contiguous()
if sem_seg_gt is not None:
sem_seg_gt = F.pad(sem_seg_gt, padding_size, value=self.ignore_label).contiguous()
image_shape = (image.shape[-2], image.shape[-1]) # h, w
# Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
# but not efficient on large generic data structures due to the use of pickle & mp.Queue.
# Therefore it's important to use torch.Tensor.
dataset_dict["image"] = image
if sem_seg_gt is not None:
dataset_dict["sem_seg"] = sem_seg_gt.long()
if "annotations" in dataset_dict:
raise ValueError("Semantic segmentation dataset should not have 'annotations'.")
# Prepare per-category binary masks
if sem_seg_gt is not None:
sem_seg_gt = sem_seg_gt.numpy()
instances = Instances(image_shape)
classes = np.unique(sem_seg_gt)
# remove ignored region
classes = classes[classes != self.ignore_label]
instances.gt_classes = torch.tensor(classes, dtype=torch.int64)
masks = []
for class_id in classes:
masks.append(sem_seg_gt == class_id)
if len(masks) == 0:
# Some image does not have annotation (all ignored)
instances.gt_masks = torch.zeros((0, sem_seg_gt.shape[-2], sem_seg_gt.shape[-1]))
else:
masks = BitMasks(
torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])
)
instances.gt_masks = masks.tensor
dataset_dict["instances"] = instances
return dataset_dict
| 3D-LLM-main | three_steps_3d_feature/first_step/mask2former/data/dataset_mappers/mask_former_semantic_dataset_mapper.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import logging
import numpy as np
import torch
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
from detectron2.structures import BitMasks, Instances
from .mask_former_semantic_dataset_mapper import MaskFormerSemanticDatasetMapper
__all__ = ["MaskFormerPanopticDatasetMapper"]
class MaskFormerPanopticDatasetMapper(MaskFormerSemanticDatasetMapper):
"""
A callable which takes a dataset dict in Detectron2 Dataset format,
and map it into a format used by MaskFormer for panoptic segmentation.
The callable currently does the following:
1. Read the image from "file_name"
2. Applies geometric transforms to the image and annotation
3. Find and applies suitable cropping to the image and annotation
4. Prepare image and annotation to Tensors
"""
@configurable
def __init__(
self,
is_train=True,
*,
augmentations,
image_format,
ignore_label,
size_divisibility,
):
"""
NOTE: this interface is experimental.
Args:
is_train: for training or inference
augmentations: a list of augmentations or deterministic transforms to apply
image_format: an image format supported by :func:`detection_utils.read_image`.
ignore_label: the label that is ignored to evaluation
size_divisibility: pad image size to be divisible by this value
"""
super().__init__(
is_train,
augmentations=augmentations,
image_format=image_format,
ignore_label=ignore_label,
size_divisibility=size_divisibility,
)
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
assert self.is_train, "MaskFormerPanopticDatasetMapper should only be used for training!"
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
image = utils.read_image(dataset_dict["file_name"], format=self.img_format)
utils.check_image_size(dataset_dict, image)
# semantic segmentation
if "sem_seg_file_name" in dataset_dict:
# PyTorch transformation not implemented for uint16, so converting it to double first
sem_seg_gt = utils.read_image(dataset_dict.pop("sem_seg_file_name")).astype("double")
else:
sem_seg_gt = None
# panoptic segmentation
if "pan_seg_file_name" in dataset_dict:
pan_seg_gt = utils.read_image(dataset_dict.pop("pan_seg_file_name"), "RGB")
segments_info = dataset_dict["segments_info"]
else:
pan_seg_gt = None
segments_info = None
if pan_seg_gt is None:
raise ValueError(
"Cannot find 'pan_seg_file_name' for panoptic segmentation dataset {}.".format(
dataset_dict["file_name"]
)
)
aug_input = T.AugInput(image, sem_seg=sem_seg_gt)
aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)
image = aug_input.image
if sem_seg_gt is not None:
sem_seg_gt = aug_input.sem_seg
# apply the same transformation to panoptic segmentation
pan_seg_gt = transforms.apply_segmentation(pan_seg_gt)
from panopticapi.utils import rgb2id
pan_seg_gt = rgb2id(pan_seg_gt)
# Pad image and segmentation label here!
image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
if sem_seg_gt is not None:
sem_seg_gt = torch.as_tensor(sem_seg_gt.astype("long"))
pan_seg_gt = torch.as_tensor(pan_seg_gt.astype("long"))
if self.size_divisibility > 0:
image_size = (image.shape[-2], image.shape[-1])
padding_size = [
0,
self.size_divisibility - image_size[1],
0,
self.size_divisibility - image_size[0],
]
image = F.pad(image, padding_size, value=128).contiguous()
if sem_seg_gt is not None:
sem_seg_gt = F.pad(sem_seg_gt, padding_size, value=self.ignore_label).contiguous()
pan_seg_gt = F.pad(
pan_seg_gt, padding_size, value=0
).contiguous() # 0 is the VOID panoptic label
image_shape = (image.shape[-2], image.shape[-1]) # h, w
# Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
# but not efficient on large generic data structures due to the use of pickle & mp.Queue.
# Therefore it's important to use torch.Tensor.
dataset_dict["image"] = image
if sem_seg_gt is not None:
dataset_dict["sem_seg"] = sem_seg_gt.long()
if "annotations" in dataset_dict:
raise ValueError("Pemantic segmentation dataset should not have 'annotations'.")
# Prepare per-category binary masks
pan_seg_gt = pan_seg_gt.numpy()
instances = Instances(image_shape)
classes = []
masks = []
for segment_info in segments_info:
class_id = segment_info["category_id"]
if not segment_info["iscrowd"]:
classes.append(class_id)
masks.append(pan_seg_gt == segment_info["id"])
classes = np.array(classes)
instances.gt_classes = torch.tensor(classes, dtype=torch.int64)
if len(masks) == 0:
# Some image does not have annotation (all ignored)
instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1]))
else:
masks = BitMasks(
torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])
)
instances.gt_masks = masks.tensor
dataset_dict["instances"] = instances
return dataset_dict
| 3D-LLM-main | three_steps_3d_feature/first_step/mask2former/data/dataset_mappers/mask_former_panoptic_dataset_mapper.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/facebookresearch/detr/blob/master/d2/detr/dataset_mapper.py
import copy
import logging
import numpy as np
import torch
from detectron2.config import configurable
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
from detectron2.data.transforms import TransformGen
from detectron2.structures import BitMasks, Instances
from pycocotools import mask as coco_mask
__all__ = ["COCOInstanceNewBaselineDatasetMapper"]
def convert_coco_poly_to_mask(segmentations, height, width):
masks = []
for polygons in segmentations:
rles = coco_mask.frPyObjects(polygons, height, width)
mask = coco_mask.decode(rles)
if len(mask.shape) < 3:
mask = mask[..., None]
mask = torch.as_tensor(mask, dtype=torch.uint8)
mask = mask.any(dim=2)
masks.append(mask)
if masks:
masks = torch.stack(masks, dim=0)
else:
masks = torch.zeros((0, height, width), dtype=torch.uint8)
return masks
def build_transform_gen(cfg, is_train):
"""
Create a list of default :class:`Augmentation` from config.
Now it includes resizing and flipping.
Returns:
list[Augmentation]
"""
assert is_train, "Only support training augmentation"
image_size = cfg.INPUT.IMAGE_SIZE
min_scale = cfg.INPUT.MIN_SCALE
max_scale = cfg.INPUT.MAX_SCALE
augmentation = []
if cfg.INPUT.RANDOM_FLIP != "none":
augmentation.append(
T.RandomFlip(
horizontal=cfg.INPUT.RANDOM_FLIP == "horizontal",
vertical=cfg.INPUT.RANDOM_FLIP == "vertical",
)
)
augmentation.extend([
T.ResizeScale(
min_scale=min_scale, max_scale=max_scale, target_height=image_size, target_width=image_size
),
T.FixedSizeCrop(crop_size=(image_size, image_size)),
])
return augmentation
# This is specifically designed for the COCO dataset.
class COCOInstanceNewBaselineDatasetMapper:
"""
A callable which takes a dataset dict in Detectron2 Dataset format,
and map it into a format used by MaskFormer.
This dataset mapper applies the same transformation as DETR for COCO panoptic segmentation.
The callable currently does the following:
1. Read the image from "file_name"
2. Applies geometric transforms to the image and annotation
3. Find and applies suitable cropping to the image and annotation
4. Prepare image and annotation to Tensors
"""
@configurable
def __init__(
self,
is_train=True,
*,
tfm_gens,
image_format,
):
"""
NOTE: this interface is experimental.
Args:
is_train: for training or inference
augmentations: a list of augmentations or deterministic transforms to apply
tfm_gens: data augmentation
image_format: an image format supported by :func:`detection_utils.read_image`.
"""
self.tfm_gens = tfm_gens
logging.getLogger(__name__).info(
"[COCOInstanceNewBaselineDatasetMapper] Full TransformGens used in training: {}".format(str(self.tfm_gens))
)
self.img_format = image_format
self.is_train = is_train
@classmethod
def from_config(cls, cfg, is_train=True):
# Build augmentation
tfm_gens = build_transform_gen(cfg, is_train)
ret = {
"is_train": is_train,
"tfm_gens": tfm_gens,
"image_format": cfg.INPUT.FORMAT,
}
return ret
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
image = utils.read_image(dataset_dict["file_name"], format=self.img_format)
utils.check_image_size(dataset_dict, image)
# TODO: get padding mask
# by feeding a "segmentation mask" to the same transforms
padding_mask = np.ones(image.shape[:2])
image, transforms = T.apply_transform_gens(self.tfm_gens, image)
# the crop transformation has default padding value 0 for segmentation
padding_mask = transforms.apply_segmentation(padding_mask)
padding_mask = ~ padding_mask.astype(bool)
image_shape = image.shape[:2] # h, w
# Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
# but not efficient on large generic data structures due to the use of pickle & mp.Queue.
# Therefore it's important to use torch.Tensor.
dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
dataset_dict["padding_mask"] = torch.as_tensor(np.ascontiguousarray(padding_mask))
if not self.is_train:
# USER: Modify this if you want to keep them for some reason.
dataset_dict.pop("annotations", None)
return dataset_dict
if "annotations" in dataset_dict:
# USER: Modify this if you want to keep them for some reason.
for anno in dataset_dict["annotations"]:
# Let's always keep mask
# if not self.mask_on:
# anno.pop("segmentation", None)
anno.pop("keypoints", None)
# USER: Implement additional transformations if you have other types of data
annos = [
utils.transform_instance_annotations(obj, transforms, image_shape)
for obj in dataset_dict.pop("annotations")
if obj.get("iscrowd", 0) == 0
]
# NOTE: does not support BitMask due to augmentation
# Current BitMask cannot handle empty objects
instances = utils.annotations_to_instances(annos, image_shape)
# After transforms such as cropping are applied, the bounding box may no longer
# tightly bound the object. As an example, imagine a triangle object
# [(0,0), (2,0), (0,2)] cropped by a box [(1,0),(2,2)] (XYXY format). The tight
# bounding box of the cropped triangle should be [(1,0),(2,1)], which is not equal to
# the intersection of original bounding box and the cropping box.
instances.gt_boxes = instances.gt_masks.get_bounding_boxes()
# Need to filter empty instances first (due to augmentation)
instances = utils.filter_empty_instances(instances)
# Generate masks from polygon
h, w = instances.image_size
# image_size_xyxy = torch.as_tensor([w, h, w, h], dtype=torch.float)
if hasattr(instances, 'gt_masks'):
gt_masks = instances.gt_masks
gt_masks = convert_coco_poly_to_mask(gt_masks.polygons, h, w)
instances.gt_masks = gt_masks
dataset_dict["instances"] = instances
return dataset_dict
| 3D-LLM-main | three_steps_3d_feature/first_step/mask2former/data/dataset_mappers/coco_instance_new_baseline_dataset_mapper.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import logging
import numpy as np
import pycocotools.mask as mask_util
import torch
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
from detectron2.projects.point_rend import ColorAugSSDTransform
from detectron2.structures import BitMasks, Instances, polygons_to_bitmask
__all__ = ["MaskFormerInstanceDatasetMapper"]
class MaskFormerInstanceDatasetMapper:
"""
A callable which takes a dataset dict in Detectron2 Dataset format,
and map it into a format used by MaskFormer for instance segmentation.
The callable currently does the following:
1. Read the image from "file_name"
2. Applies geometric transforms to the image and annotation
3. Find and applies suitable cropping to the image and annotation
4. Prepare image and annotation to Tensors
"""
@configurable
def __init__(
self,
is_train=True,
*,
augmentations,
image_format,
size_divisibility,
):
"""
NOTE: this interface is experimental.
Args:
is_train: for training or inference
augmentations: a list of augmentations or deterministic transforms to apply
image_format: an image format supported by :func:`detection_utils.read_image`.
size_divisibility: pad image size to be divisible by this value
"""
self.is_train = is_train
self.tfm_gens = augmentations
self.img_format = image_format
self.size_divisibility = size_divisibility
logger = logging.getLogger(__name__)
mode = "training" if is_train else "inference"
logger.info(f"[{self.__class__.__name__}] Augmentations used in {mode}: {augmentations}")
@classmethod
def from_config(cls, cfg, is_train=True):
# Build augmentation
augs = [
T.ResizeShortestEdge(
cfg.INPUT.MIN_SIZE_TRAIN,
cfg.INPUT.MAX_SIZE_TRAIN,
cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING,
)
]
if cfg.INPUT.CROP.ENABLED:
augs.append(
T.RandomCrop(
cfg.INPUT.CROP.TYPE,
cfg.INPUT.CROP.SIZE,
)
)
if cfg.INPUT.COLOR_AUG_SSD:
augs.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))
augs.append(T.RandomFlip())
ret = {
"is_train": is_train,
"augmentations": augs,
"image_format": cfg.INPUT.FORMAT,
"size_divisibility": cfg.INPUT.SIZE_DIVISIBILITY,
}
return ret
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
assert self.is_train, "MaskFormerPanopticDatasetMapper should only be used for training!"
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
image = utils.read_image(dataset_dict["file_name"], format=self.img_format)
utils.check_image_size(dataset_dict, image)
aug_input = T.AugInput(image)
aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)
image = aug_input.image
# transform instnace masks
assert "annotations" in dataset_dict
for anno in dataset_dict["annotations"]:
anno.pop("keypoints", None)
annos = [
utils.transform_instance_annotations(obj, transforms, image.shape[:2])
for obj in dataset_dict.pop("annotations")
if obj.get("iscrowd", 0) == 0
]
if len(annos):
assert "segmentation" in annos[0]
segms = [obj["segmentation"] for obj in annos]
masks = []
for segm in segms:
if isinstance(segm, list):
# polygon
masks.append(polygons_to_bitmask(segm, *image.shape[:2]))
elif isinstance(segm, dict):
# COCO RLE
masks.append(mask_util.decode(segm))
elif isinstance(segm, np.ndarray):
assert segm.ndim == 2, "Expect segmentation of 2 dimensions, got {}.".format(
segm.ndim
)
# mask array
masks.append(segm)
else:
raise ValueError(
"Cannot convert segmentation of type '{}' to BitMasks!"
"Supported types are: polygons as list[list[float] or ndarray],"
" COCO-style RLE as a dict, or a binary segmentation mask "
" in a 2D numpy array of shape HxW.".format(type(segm))
)
# Pad image and segmentation label here!
image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
masks = [torch.from_numpy(np.ascontiguousarray(x)) for x in masks]
classes = [int(obj["category_id"]) for obj in annos]
classes = torch.tensor(classes, dtype=torch.int64)
if self.size_divisibility > 0:
image_size = (image.shape[-2], image.shape[-1])
padding_size = [
0,
self.size_divisibility - image_size[1],
0,
self.size_divisibility - image_size[0],
]
# pad image
image = F.pad(image, padding_size, value=128).contiguous()
# pad mask
masks = [F.pad(x, padding_size, value=0).contiguous() for x in masks]
image_shape = (image.shape[-2], image.shape[-1]) # h, w
# Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
# but not efficient on large generic data structures due to the use of pickle & mp.Queue.
# Therefore it's important to use torch.Tensor.
dataset_dict["image"] = image
# Prepare per-category binary masks
instances = Instances(image_shape)
instances.gt_classes = classes
if len(masks) == 0:
# Some image does not have annotation (all ignored)
instances.gt_masks = torch.zeros((0, image.shape[-2], image.shape[-1]))
else:
masks = BitMasks(torch.stack(masks))
instances.gt_masks = masks.tensor
dataset_dict["instances"] = instances
return dataset_dict
| 3D-LLM-main | three_steps_3d_feature/first_step/mask2former/data/dataset_mappers/mask_former_instance_dataset_mapper.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import os
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data.datasets import load_sem_seg
ADE20K_SEM_SEG_FULL_CATEGORIES = [
{"name": "wall", "id": 2978, "trainId": 0},
{"name": "building, edifice", "id": 312, "trainId": 1},
{"name": "sky", "id": 2420, "trainId": 2},
{"name": "tree", "id": 2855, "trainId": 3},
{"name": "road, route", "id": 2131, "trainId": 4},
{"name": "floor, flooring", "id": 976, "trainId": 5},
{"name": "ceiling", "id": 447, "trainId": 6},
{"name": "bed", "id": 165, "trainId": 7},
{"name": "sidewalk, pavement", "id": 2377, "trainId": 8},
{"name": "earth, ground", "id": 838, "trainId": 9},
{"name": "cabinet", "id": 350, "trainId": 10},
{"name": "person, individual, someone, somebody, mortal, soul", "id": 1831, "trainId": 11},
{"name": "grass", "id": 1125, "trainId": 12},
{"name": "windowpane, window", "id": 3055, "trainId": 13},
{"name": "car, auto, automobile, machine, motorcar", "id": 401, "trainId": 14},
{"name": "mountain, mount", "id": 1610, "trainId": 15},
{"name": "plant, flora, plant life", "id": 1910, "trainId": 16},
{"name": "table", "id": 2684, "trainId": 17},
{"name": "chair", "id": 471, "trainId": 18},
{"name": "curtain, drape, drapery, mantle, pall", "id": 687, "trainId": 19},
{"name": "door", "id": 774, "trainId": 20},
{"name": "sofa, couch, lounge", "id": 2473, "trainId": 21},
{"name": "sea", "id": 2264, "trainId": 22},
{"name": "painting, picture", "id": 1735, "trainId": 23},
{"name": "water", "id": 2994, "trainId": 24},
{"name": "mirror", "id": 1564, "trainId": 25},
{"name": "house", "id": 1276, "trainId": 26},
{"name": "rug, carpet, carpeting", "id": 2178, "trainId": 27},
{"name": "shelf", "id": 2329, "trainId": 28},
{"name": "armchair", "id": 57, "trainId": 29},
{"name": "fence, fencing", "id": 907, "trainId": 30},
{"name": "field", "id": 913, "trainId": 31},
{"name": "lamp", "id": 1395, "trainId": 32},
{"name": "rock, stone", "id": 2138, "trainId": 33},
{"name": "seat", "id": 2272, "trainId": 34},
{"name": "river", "id": 2128, "trainId": 35},
{"name": "desk", "id": 724, "trainId": 36},
{"name": "bathtub, bathing tub, bath, tub", "id": 155, "trainId": 37},
{"name": "railing, rail", "id": 2053, "trainId": 38},
{"name": "signboard, sign", "id": 2380, "trainId": 39},
{"name": "cushion", "id": 689, "trainId": 40},
{"name": "path", "id": 1788, "trainId": 41},
{"name": "work surface", "id": 3087, "trainId": 42},
{"name": "stairs, steps", "id": 2530, "trainId": 43},
{"name": "column, pillar", "id": 581, "trainId": 44},
{"name": "sink", "id": 2388, "trainId": 45},
{"name": "wardrobe, closet, press", "id": 2985, "trainId": 46},
{"name": "snow", "id": 2454, "trainId": 47},
{"name": "refrigerator, icebox", "id": 2096, "trainId": 48},
{"name": "base, pedestal, stand", "id": 137, "trainId": 49},
{"name": "bridge, span", "id": 294, "trainId": 50},
{"name": "blind, screen", "id": 212, "trainId": 51},
{"name": "runway", "id": 2185, "trainId": 52},
{"name": "cliff, drop, drop-off", "id": 524, "trainId": 53},
{"name": "sand", "id": 2212, "trainId": 54},
{"name": "fireplace, hearth, open fireplace", "id": 943, "trainId": 55},
{"name": "pillow", "id": 1869, "trainId": 56},
{"name": "screen door, screen", "id": 2251, "trainId": 57},
{"name": "toilet, can, commode, crapper, pot, potty, stool, throne", "id": 2793, "trainId": 58},
{"name": "skyscraper", "id": 2423, "trainId": 59},
{"name": "grandstand, covered stand", "id": 1121, "trainId": 60},
{"name": "box", "id": 266, "trainId": 61},
{"name": "pool table, billiard table, snooker table", "id": 1948, "trainId": 62},
{"name": "palm, palm tree", "id": 1744, "trainId": 63},
{"name": "double door", "id": 783, "trainId": 64},
{"name": "coffee table, cocktail table", "id": 571, "trainId": 65},
{"name": "counter", "id": 627, "trainId": 66},
{"name": "countertop", "id": 629, "trainId": 67},
{"name": "chest of drawers, chest, bureau, dresser", "id": 491, "trainId": 68},
{"name": "kitchen island", "id": 1374, "trainId": 69},
{"name": "boat", "id": 223, "trainId": 70},
{"name": "waterfall, falls", "id": 3016, "trainId": 71},
{
"name": "stove, kitchen stove, range, kitchen range, cooking stove",
"id": 2598,
"trainId": 72,
},
{"name": "flower", "id": 978, "trainId": 73},
{"name": "bookcase", "id": 239, "trainId": 74},
{"name": "controls", "id": 608, "trainId": 75},
{"name": "book", "id": 236, "trainId": 76},
{"name": "stairway, staircase", "id": 2531, "trainId": 77},
{"name": "streetlight, street lamp", "id": 2616, "trainId": 78},
{
"name": "computer, computing machine, computing device, data processor, electronic computer, information processing system",
"id": 591,
"trainId": 79,
},
{
"name": "bus, autobus, coach, charabanc, double-decker, jitney, motorbus, motorcoach, omnibus, passenger vehicle",
"id": 327,
"trainId": 80,
},
{"name": "swivel chair", "id": 2679, "trainId": 81},
{"name": "light, light source", "id": 1451, "trainId": 82},
{"name": "bench", "id": 181, "trainId": 83},
{"name": "case, display case, showcase, vitrine", "id": 420, "trainId": 84},
{"name": "towel", "id": 2821, "trainId": 85},
{"name": "fountain", "id": 1023, "trainId": 86},
{"name": "embankment", "id": 855, "trainId": 87},
{
"name": "television receiver, television, television set, tv, tv set, idiot box, boob tube, telly, goggle box",
"id": 2733,
"trainId": 88,
},
{"name": "van", "id": 2928, "trainId": 89},
{"name": "hill", "id": 1240, "trainId": 90},
{"name": "awning, sunshade, sunblind", "id": 77, "trainId": 91},
{"name": "poster, posting, placard, notice, bill, card", "id": 1969, "trainId": 92},
{"name": "truck, motortruck", "id": 2880, "trainId": 93},
{"name": "airplane, aeroplane, plane", "id": 14, "trainId": 94},
{"name": "pole", "id": 1936, "trainId": 95},
{"name": "tower", "id": 2828, "trainId": 96},
{"name": "court", "id": 631, "trainId": 97},
{"name": "ball", "id": 103, "trainId": 98},
{
"name": "aircraft carrier, carrier, flattop, attack aircraft carrier",
"id": 3144,
"trainId": 99,
},
{"name": "buffet, counter, sideboard", "id": 308, "trainId": 100},
{"name": "hovel, hut, hutch, shack, shanty", "id": 1282, "trainId": 101},
{"name": "apparel, wearing apparel, dress, clothes", "id": 38, "trainId": 102},
{"name": "minibike, motorbike", "id": 1563, "trainId": 103},
{"name": "animal, animate being, beast, brute, creature, fauna", "id": 29, "trainId": 104},
{"name": "chandelier, pendant, pendent", "id": 480, "trainId": 105},
{"name": "step, stair", "id": 2569, "trainId": 106},
{"name": "booth, cubicle, stall, kiosk", "id": 247, "trainId": 107},
{"name": "bicycle, bike, wheel, cycle", "id": 187, "trainId": 108},
{"name": "doorframe, doorcase", "id": 778, "trainId": 109},
{"name": "sconce", "id": 2243, "trainId": 110},
{"name": "pond", "id": 1941, "trainId": 111},
{"name": "trade name, brand name, brand, marque", "id": 2833, "trainId": 112},
{"name": "bannister, banister, balustrade, balusters, handrail", "id": 120, "trainId": 113},
{"name": "bag", "id": 95, "trainId": 114},
{"name": "traffic light, traffic signal, stoplight", "id": 2836, "trainId": 115},
{"name": "gazebo", "id": 1087, "trainId": 116},
{"name": "escalator, moving staircase, moving stairway", "id": 868, "trainId": 117},
{"name": "land, ground, soil", "id": 1401, "trainId": 118},
{"name": "board, plank", "id": 220, "trainId": 119},
{"name": "arcade machine", "id": 47, "trainId": 120},
{"name": "eiderdown, duvet, continental quilt", "id": 843, "trainId": 121},
{"name": "bar", "id": 123, "trainId": 122},
{"name": "stall, stand, sales booth", "id": 2537, "trainId": 123},
{"name": "playground", "id": 1927, "trainId": 124},
{"name": "ship", "id": 2337, "trainId": 125},
{"name": "ottoman, pouf, pouffe, puff, hassock", "id": 1702, "trainId": 126},
{
"name": "ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin",
"id": 64,
"trainId": 127,
},
{"name": "bottle", "id": 249, "trainId": 128},
{"name": "cradle", "id": 642, "trainId": 129},
{"name": "pot, flowerpot", "id": 1981, "trainId": 130},
{
"name": "conveyer belt, conveyor belt, conveyer, conveyor, transporter",
"id": 609,
"trainId": 131,
},
{"name": "train, railroad train", "id": 2840, "trainId": 132},
{"name": "stool", "id": 2586, "trainId": 133},
{"name": "lake", "id": 1393, "trainId": 134},
{"name": "tank, storage tank", "id": 2704, "trainId": 135},
{"name": "ice, water ice", "id": 1304, "trainId": 136},
{"name": "basket, handbasket", "id": 146, "trainId": 137},
{"name": "manhole", "id": 1494, "trainId": 138},
{"name": "tent, collapsible shelter", "id": 2739, "trainId": 139},
{"name": "canopy", "id": 389, "trainId": 140},
{"name": "microwave, microwave oven", "id": 1551, "trainId": 141},
{"name": "barrel, cask", "id": 131, "trainId": 142},
{"name": "dirt track", "id": 738, "trainId": 143},
{"name": "beam", "id": 161, "trainId": 144},
{"name": "dishwasher, dish washer, dishwashing machine", "id": 747, "trainId": 145},
{"name": "plate", "id": 1919, "trainId": 146},
{"name": "screen, crt screen", "id": 3109, "trainId": 147},
{"name": "ruins", "id": 2179, "trainId": 148},
{"name": "washer, automatic washer, washing machine", "id": 2989, "trainId": 149},
{"name": "blanket, cover", "id": 206, "trainId": 150},
{"name": "plaything, toy", "id": 1930, "trainId": 151},
{"name": "food, solid food", "id": 1002, "trainId": 152},
{"name": "screen, silver screen, projection screen", "id": 2254, "trainId": 153},
{"name": "oven", "id": 1708, "trainId": 154},
{"name": "stage", "id": 2526, "trainId": 155},
{"name": "beacon, lighthouse, beacon light, pharos", "id": 160, "trainId": 156},
{"name": "umbrella", "id": 2901, "trainId": 157},
{"name": "sculpture", "id": 2262, "trainId": 158},
{"name": "aqueduct", "id": 44, "trainId": 159},
{"name": "container", "id": 597, "trainId": 160},
{"name": "scaffolding, staging", "id": 2235, "trainId": 161},
{"name": "hood, exhaust hood", "id": 1260, "trainId": 162},
{"name": "curb, curbing, kerb", "id": 682, "trainId": 163},
{"name": "roller coaster", "id": 2151, "trainId": 164},
{"name": "horse, equus caballus", "id": 3107, "trainId": 165},
{"name": "catwalk", "id": 432, "trainId": 166},
{"name": "glass, drinking glass", "id": 1098, "trainId": 167},
{"name": "vase", "id": 2932, "trainId": 168},
{"name": "central reservation", "id": 461, "trainId": 169},
{"name": "carousel", "id": 410, "trainId": 170},
{"name": "radiator", "id": 2046, "trainId": 171},
{"name": "closet", "id": 533, "trainId": 172},
{"name": "machine", "id": 1481, "trainId": 173},
{"name": "pier, wharf, wharfage, dock", "id": 1858, "trainId": 174},
{"name": "fan", "id": 894, "trainId": 175},
{"name": "inflatable bounce game", "id": 1322, "trainId": 176},
{"name": "pitch", "id": 1891, "trainId": 177},
{"name": "paper", "id": 1756, "trainId": 178},
{"name": "arcade, colonnade", "id": 49, "trainId": 179},
{"name": "hot tub", "id": 1272, "trainId": 180},
{"name": "helicopter", "id": 1229, "trainId": 181},
{"name": "tray", "id": 2850, "trainId": 182},
{"name": "partition, divider", "id": 1784, "trainId": 183},
{"name": "vineyard", "id": 2962, "trainId": 184},
{"name": "bowl", "id": 259, "trainId": 185},
{"name": "bullring", "id": 319, "trainId": 186},
{"name": "flag", "id": 954, "trainId": 187},
{"name": "pot", "id": 1974, "trainId": 188},
{"name": "footbridge, overcrossing, pedestrian bridge", "id": 1013, "trainId": 189},
{"name": "shower", "id": 2356, "trainId": 190},
{"name": "bag, traveling bag, travelling bag, grip, suitcase", "id": 97, "trainId": 191},
{"name": "bulletin board, notice board", "id": 318, "trainId": 192},
{"name": "confessional booth", "id": 592, "trainId": 193},
{"name": "trunk, tree trunk, bole", "id": 2885, "trainId": 194},
{"name": "forest", "id": 1017, "trainId": 195},
{"name": "elevator door", "id": 851, "trainId": 196},
{"name": "laptop, laptop computer", "id": 1407, "trainId": 197},
{"name": "instrument panel", "id": 1332, "trainId": 198},
{"name": "bucket, pail", "id": 303, "trainId": 199},
{"name": "tapestry, tapis", "id": 2714, "trainId": 200},
{"name": "platform", "id": 1924, "trainId": 201},
{"name": "jacket", "id": 1346, "trainId": 202},
{"name": "gate", "id": 1081, "trainId": 203},
{"name": "monitor, monitoring device", "id": 1583, "trainId": 204},
{
"name": "telephone booth, phone booth, call box, telephone box, telephone kiosk",
"id": 2727,
"trainId": 205,
},
{"name": "spotlight, spot", "id": 2509, "trainId": 206},
{"name": "ring", "id": 2123, "trainId": 207},
{"name": "control panel", "id": 602, "trainId": 208},
{"name": "blackboard, chalkboard", "id": 202, "trainId": 209},
{"name": "air conditioner, air conditioning", "id": 10, "trainId": 210},
{"name": "chest", "id": 490, "trainId": 211},
{"name": "clock", "id": 530, "trainId": 212},
{"name": "sand dune", "id": 2213, "trainId": 213},
{"name": "pipe, pipage, piping", "id": 1884, "trainId": 214},
{"name": "vault", "id": 2934, "trainId": 215},
{"name": "table football", "id": 2687, "trainId": 216},
{"name": "cannon", "id": 387, "trainId": 217},
{"name": "swimming pool, swimming bath, natatorium", "id": 2668, "trainId": 218},
{"name": "fluorescent, fluorescent fixture", "id": 982, "trainId": 219},
{"name": "statue", "id": 2547, "trainId": 220},
{
"name": "loudspeaker, speaker, speaker unit, loudspeaker system, speaker system",
"id": 1474,
"trainId": 221,
},
{"name": "exhibitor", "id": 877, "trainId": 222},
{"name": "ladder", "id": 1391, "trainId": 223},
{"name": "carport", "id": 414, "trainId": 224},
{"name": "dam", "id": 698, "trainId": 225},
{"name": "pulpit", "id": 2019, "trainId": 226},
{"name": "skylight, fanlight", "id": 2422, "trainId": 227},
{"name": "water tower", "id": 3010, "trainId": 228},
{"name": "grill, grille, grillwork", "id": 1139, "trainId": 229},
{"name": "display board", "id": 753, "trainId": 230},
{"name": "pane, pane of glass, window glass", "id": 1747, "trainId": 231},
{"name": "rubbish, trash, scrap", "id": 2175, "trainId": 232},
{"name": "ice rink", "id": 1301, "trainId": 233},
{"name": "fruit", "id": 1033, "trainId": 234},
{"name": "patio", "id": 1789, "trainId": 235},
{"name": "vending machine", "id": 2939, "trainId": 236},
{"name": "telephone, phone, telephone set", "id": 2730, "trainId": 237},
{"name": "net", "id": 1652, "trainId": 238},
{
"name": "backpack, back pack, knapsack, packsack, rucksack, haversack",
"id": 90,
"trainId": 239,
},
{"name": "jar", "id": 1349, "trainId": 240},
{"name": "track", "id": 2830, "trainId": 241},
{"name": "magazine", "id": 1485, "trainId": 242},
{"name": "shutter", "id": 2370, "trainId": 243},
{"name": "roof", "id": 2155, "trainId": 244},
{"name": "banner, streamer", "id": 118, "trainId": 245},
{"name": "landfill", "id": 1402, "trainId": 246},
{"name": "post", "id": 1957, "trainId": 247},
{"name": "altarpiece, reredos", "id": 3130, "trainId": 248},
{"name": "hat, chapeau, lid", "id": 1197, "trainId": 249},
{"name": "arch, archway", "id": 52, "trainId": 250},
{"name": "table game", "id": 2688, "trainId": 251},
{"name": "bag, handbag, pocketbook, purse", "id": 96, "trainId": 252},
{"name": "document, written document, papers", "id": 762, "trainId": 253},
{"name": "dome", "id": 772, "trainId": 254},
{"name": "pier", "id": 1857, "trainId": 255},
{"name": "shanties", "id": 2315, "trainId": 256},
{"name": "forecourt", "id": 1016, "trainId": 257},
{"name": "crane", "id": 643, "trainId": 258},
{"name": "dog, domestic dog, canis familiaris", "id": 3105, "trainId": 259},
{"name": "piano, pianoforte, forte-piano", "id": 1849, "trainId": 260},
{"name": "drawing", "id": 791, "trainId": 261},
{"name": "cabin", "id": 349, "trainId": 262},
{
"name": "ad, advertisement, advertizement, advertising, advertizing, advert",
"id": 6,
"trainId": 263,
},
{"name": "amphitheater, amphitheatre, coliseum", "id": 3114, "trainId": 264},
{"name": "monument", "id": 1587, "trainId": 265},
{"name": "henhouse", "id": 1233, "trainId": 266},
{"name": "cockpit", "id": 559, "trainId": 267},
{"name": "heater, warmer", "id": 1223, "trainId": 268},
{"name": "windmill, aerogenerator, wind generator", "id": 3049, "trainId": 269},
{"name": "pool", "id": 1943, "trainId": 270},
{"name": "elevator, lift", "id": 853, "trainId": 271},
{"name": "decoration, ornament, ornamentation", "id": 709, "trainId": 272},
{"name": "labyrinth", "id": 1390, "trainId": 273},
{"name": "text, textual matter", "id": 2748, "trainId": 274},
{"name": "printer", "id": 2007, "trainId": 275},
{"name": "mezzanine, first balcony", "id": 1546, "trainId": 276},
{"name": "mattress", "id": 1513, "trainId": 277},
{"name": "straw", "id": 2600, "trainId": 278},
{"name": "stalls", "id": 2538, "trainId": 279},
{"name": "patio, terrace", "id": 1790, "trainId": 280},
{"name": "billboard, hoarding", "id": 194, "trainId": 281},
{"name": "bus stop", "id": 326, "trainId": 282},
{"name": "trouser, pant", "id": 2877, "trainId": 283},
{"name": "console table, console", "id": 594, "trainId": 284},
{"name": "rack", "id": 2036, "trainId": 285},
{"name": "notebook", "id": 1662, "trainId": 286},
{"name": "shrine", "id": 2366, "trainId": 287},
{"name": "pantry", "id": 1754, "trainId": 288},
{"name": "cart", "id": 418, "trainId": 289},
{"name": "steam shovel", "id": 2553, "trainId": 290},
{"name": "porch", "id": 1951, "trainId": 291},
{"name": "postbox, mailbox, letter box", "id": 1963, "trainId": 292},
{"name": "figurine, statuette", "id": 918, "trainId": 293},
{"name": "recycling bin", "id": 2086, "trainId": 294},
{"name": "folding screen", "id": 997, "trainId": 295},
{"name": "telescope", "id": 2731, "trainId": 296},
{"name": "deck chair, beach chair", "id": 704, "trainId": 297},
{"name": "kennel", "id": 1365, "trainId": 298},
{"name": "coffee maker", "id": 569, "trainId": 299},
{"name": "altar, communion table, lord's table", "id": 3108, "trainId": 300},
{"name": "fish", "id": 948, "trainId": 301},
{"name": "easel", "id": 839, "trainId": 302},
{"name": "artificial golf green", "id": 63, "trainId": 303},
{"name": "iceberg", "id": 1305, "trainId": 304},
{"name": "candlestick, candle holder", "id": 378, "trainId": 305},
{"name": "shower stall, shower bath", "id": 2362, "trainId": 306},
{"name": "television stand", "id": 2734, "trainId": 307},
{
"name": "wall socket, wall plug, electric outlet, electrical outlet, outlet, electric receptacle",
"id": 2982,
"trainId": 308,
},
{"name": "skeleton", "id": 2398, "trainId": 309},
{"name": "grand piano, grand", "id": 1119, "trainId": 310},
{"name": "candy, confect", "id": 382, "trainId": 311},
{"name": "grille door", "id": 1141, "trainId": 312},
{"name": "pedestal, plinth, footstall", "id": 1805, "trainId": 313},
{"name": "jersey, t-shirt, tee shirt", "id": 3102, "trainId": 314},
{"name": "shoe", "id": 2341, "trainId": 315},
{"name": "gravestone, headstone, tombstone", "id": 1131, "trainId": 316},
{"name": "shanty", "id": 2316, "trainId": 317},
{"name": "structure", "id": 2626, "trainId": 318},
{"name": "rocking chair, rocker", "id": 3104, "trainId": 319},
{"name": "bird", "id": 198, "trainId": 320},
{"name": "place mat", "id": 1896, "trainId": 321},
{"name": "tomb", "id": 2800, "trainId": 322},
{"name": "big top", "id": 190, "trainId": 323},
{"name": "gas pump, gasoline pump, petrol pump, island dispenser", "id": 3131, "trainId": 324},
{"name": "lockers", "id": 1463, "trainId": 325},
{"name": "cage", "id": 357, "trainId": 326},
{"name": "finger", "id": 929, "trainId": 327},
{"name": "bleachers", "id": 209, "trainId": 328},
{"name": "ferris wheel", "id": 912, "trainId": 329},
{"name": "hairdresser chair", "id": 1164, "trainId": 330},
{"name": "mat", "id": 1509, "trainId": 331},
{"name": "stands", "id": 2539, "trainId": 332},
{"name": "aquarium, fish tank, marine museum", "id": 3116, "trainId": 333},
{"name": "streetcar, tram, tramcar, trolley, trolley car", "id": 2615, "trainId": 334},
{"name": "napkin, table napkin, serviette", "id": 1644, "trainId": 335},
{"name": "dummy", "id": 818, "trainId": 336},
{"name": "booklet, brochure, folder, leaflet, pamphlet", "id": 242, "trainId": 337},
{"name": "sand trap", "id": 2217, "trainId": 338},
{"name": "shop, store", "id": 2347, "trainId": 339},
{"name": "table cloth", "id": 2686, "trainId": 340},
{"name": "service station", "id": 2300, "trainId": 341},
{"name": "coffin", "id": 572, "trainId": 342},
{"name": "drawer", "id": 789, "trainId": 343},
{"name": "cages", "id": 358, "trainId": 344},
{"name": "slot machine, coin machine", "id": 2443, "trainId": 345},
{"name": "balcony", "id": 101, "trainId": 346},
{"name": "volleyball court", "id": 2969, "trainId": 347},
{"name": "table tennis", "id": 2692, "trainId": 348},
{"name": "control table", "id": 606, "trainId": 349},
{"name": "shirt", "id": 2339, "trainId": 350},
{"name": "merchandise, ware, product", "id": 1533, "trainId": 351},
{"name": "railway", "id": 2060, "trainId": 352},
{"name": "parterre", "id": 1782, "trainId": 353},
{"name": "chimney", "id": 495, "trainId": 354},
{"name": "can, tin, tin can", "id": 371, "trainId": 355},
{"name": "tanks", "id": 2707, "trainId": 356},
{"name": "fabric, cloth, material, textile", "id": 889, "trainId": 357},
{"name": "alga, algae", "id": 3156, "trainId": 358},
{"name": "system", "id": 2683, "trainId": 359},
{"name": "map", "id": 1499, "trainId": 360},
{"name": "greenhouse", "id": 1135, "trainId": 361},
{"name": "mug", "id": 1619, "trainId": 362},
{"name": "barbecue", "id": 125, "trainId": 363},
{"name": "trailer", "id": 2838, "trainId": 364},
{"name": "toilet tissue, toilet paper, bathroom tissue", "id": 2792, "trainId": 365},
{"name": "organ", "id": 1695, "trainId": 366},
{"name": "dishrag, dishcloth", "id": 746, "trainId": 367},
{"name": "island", "id": 1343, "trainId": 368},
{"name": "keyboard", "id": 1370, "trainId": 369},
{"name": "trench", "id": 2858, "trainId": 370},
{"name": "basket, basketball hoop, hoop", "id": 145, "trainId": 371},
{"name": "steering wheel, wheel", "id": 2565, "trainId": 372},
{"name": "pitcher, ewer", "id": 1892, "trainId": 373},
{"name": "goal", "id": 1103, "trainId": 374},
{"name": "bread, breadstuff, staff of life", "id": 286, "trainId": 375},
{"name": "beds", "id": 170, "trainId": 376},
{"name": "wood", "id": 3073, "trainId": 377},
{"name": "file cabinet", "id": 922, "trainId": 378},
{"name": "newspaper, paper", "id": 1655, "trainId": 379},
{"name": "motorboat", "id": 1602, "trainId": 380},
{"name": "rope", "id": 2160, "trainId": 381},
{"name": "guitar", "id": 1151, "trainId": 382},
{"name": "rubble", "id": 2176, "trainId": 383},
{"name": "scarf", "id": 2239, "trainId": 384},
{"name": "barrels", "id": 132, "trainId": 385},
{"name": "cap", "id": 394, "trainId": 386},
{"name": "leaves", "id": 1424, "trainId": 387},
{"name": "control tower", "id": 607, "trainId": 388},
{"name": "dashboard", "id": 700, "trainId": 389},
{"name": "bandstand", "id": 116, "trainId": 390},
{"name": "lectern", "id": 1425, "trainId": 391},
{"name": "switch, electric switch, electrical switch", "id": 2676, "trainId": 392},
{"name": "baseboard, mopboard, skirting board", "id": 141, "trainId": 393},
{"name": "shower room", "id": 2360, "trainId": 394},
{"name": "smoke", "id": 2449, "trainId": 395},
{"name": "faucet, spigot", "id": 897, "trainId": 396},
{"name": "bulldozer", "id": 317, "trainId": 397},
{"name": "saucepan", "id": 2228, "trainId": 398},
{"name": "shops", "id": 2351, "trainId": 399},
{"name": "meter", "id": 1543, "trainId": 400},
{"name": "crevasse", "id": 656, "trainId": 401},
{"name": "gear", "id": 1088, "trainId": 402},
{"name": "candelabrum, candelabra", "id": 373, "trainId": 403},
{"name": "sofa bed", "id": 2472, "trainId": 404},
{"name": "tunnel", "id": 2892, "trainId": 405},
{"name": "pallet", "id": 1740, "trainId": 406},
{"name": "wire, conducting wire", "id": 3067, "trainId": 407},
{"name": "kettle, boiler", "id": 1367, "trainId": 408},
{"name": "bidet", "id": 188, "trainId": 409},
{
"name": "baby buggy, baby carriage, carriage, perambulator, pram, stroller, go-cart, pushchair, pusher",
"id": 79,
"trainId": 410,
},
{"name": "music stand", "id": 1633, "trainId": 411},
{"name": "pipe, tube", "id": 1885, "trainId": 412},
{"name": "cup", "id": 677, "trainId": 413},
{"name": "parking meter", "id": 1779, "trainId": 414},
{"name": "ice hockey rink", "id": 1297, "trainId": 415},
{"name": "shelter", "id": 2334, "trainId": 416},
{"name": "weeds", "id": 3027, "trainId": 417},
{"name": "temple", "id": 2735, "trainId": 418},
{"name": "patty, cake", "id": 1791, "trainId": 419},
{"name": "ski slope", "id": 2405, "trainId": 420},
{"name": "panel", "id": 1748, "trainId": 421},
{"name": "wallet", "id": 2983, "trainId": 422},
{"name": "wheel", "id": 3035, "trainId": 423},
{"name": "towel rack, towel horse", "id": 2824, "trainId": 424},
{"name": "roundabout", "id": 2168, "trainId": 425},
{"name": "canister, cannister, tin", "id": 385, "trainId": 426},
{"name": "rod", "id": 2148, "trainId": 427},
{"name": "soap dispenser", "id": 2465, "trainId": 428},
{"name": "bell", "id": 175, "trainId": 429},
{"name": "canvas", "id": 390, "trainId": 430},
{"name": "box office, ticket office, ticket booth", "id": 268, "trainId": 431},
{"name": "teacup", "id": 2722, "trainId": 432},
{"name": "trellis", "id": 2857, "trainId": 433},
{"name": "workbench", "id": 3088, "trainId": 434},
{"name": "valley, vale", "id": 2926, "trainId": 435},
{"name": "toaster", "id": 2782, "trainId": 436},
{"name": "knife", "id": 1378, "trainId": 437},
{"name": "podium", "id": 1934, "trainId": 438},
{"name": "ramp", "id": 2072, "trainId": 439},
{"name": "tumble dryer", "id": 2889, "trainId": 440},
{"name": "fireplug, fire hydrant, plug", "id": 944, "trainId": 441},
{"name": "gym shoe, sneaker, tennis shoe", "id": 1158, "trainId": 442},
{"name": "lab bench", "id": 1383, "trainId": 443},
{"name": "equipment", "id": 867, "trainId": 444},
{"name": "rocky formation", "id": 2145, "trainId": 445},
{"name": "plastic", "id": 1915, "trainId": 446},
{"name": "calendar", "id": 361, "trainId": 447},
{"name": "caravan", "id": 402, "trainId": 448},
{"name": "check-in-desk", "id": 482, "trainId": 449},
{"name": "ticket counter", "id": 2761, "trainId": 450},
{"name": "brush", "id": 300, "trainId": 451},
{"name": "mill", "id": 1554, "trainId": 452},
{"name": "covered bridge", "id": 636, "trainId": 453},
{"name": "bowling alley", "id": 260, "trainId": 454},
{"name": "hanger", "id": 1186, "trainId": 455},
{"name": "excavator", "id": 871, "trainId": 456},
{"name": "trestle", "id": 2859, "trainId": 457},
{"name": "revolving door", "id": 2103, "trainId": 458},
{"name": "blast furnace", "id": 208, "trainId": 459},
{"name": "scale, weighing machine", "id": 2236, "trainId": 460},
{"name": "projector", "id": 2012, "trainId": 461},
{"name": "soap", "id": 2462, "trainId": 462},
{"name": "locker", "id": 1462, "trainId": 463},
{"name": "tractor", "id": 2832, "trainId": 464},
{"name": "stretcher", "id": 2617, "trainId": 465},
{"name": "frame", "id": 1024, "trainId": 466},
{"name": "grating", "id": 1129, "trainId": 467},
{"name": "alembic", "id": 18, "trainId": 468},
{"name": "candle, taper, wax light", "id": 376, "trainId": 469},
{"name": "barrier", "id": 134, "trainId": 470},
{"name": "cardboard", "id": 407, "trainId": 471},
{"name": "cave", "id": 434, "trainId": 472},
{"name": "puddle", "id": 2017, "trainId": 473},
{"name": "tarp", "id": 2717, "trainId": 474},
{"name": "price tag", "id": 2005, "trainId": 475},
{"name": "watchtower", "id": 2993, "trainId": 476},
{"name": "meters", "id": 1545, "trainId": 477},
{
"name": "light bulb, lightbulb, bulb, incandescent lamp, electric light, electric-light bulb",
"id": 1445,
"trainId": 478,
},
{"name": "tracks", "id": 2831, "trainId": 479},
{"name": "hair dryer", "id": 1161, "trainId": 480},
{"name": "skirt", "id": 2411, "trainId": 481},
{"name": "viaduct", "id": 2949, "trainId": 482},
{"name": "paper towel", "id": 1769, "trainId": 483},
{"name": "coat", "id": 552, "trainId": 484},
{"name": "sheet", "id": 2327, "trainId": 485},
{"name": "fire extinguisher, extinguisher, asphyxiator", "id": 939, "trainId": 486},
{"name": "water wheel", "id": 3013, "trainId": 487},
{"name": "pottery, clayware", "id": 1986, "trainId": 488},
{"name": "magazine rack", "id": 1486, "trainId": 489},
{"name": "teapot", "id": 2723, "trainId": 490},
{"name": "microphone, mike", "id": 1549, "trainId": 491},
{"name": "support", "id": 2649, "trainId": 492},
{"name": "forklift", "id": 1020, "trainId": 493},
{"name": "canyon", "id": 392, "trainId": 494},
{"name": "cash register, register", "id": 422, "trainId": 495},
{"name": "leaf, leafage, foliage", "id": 1419, "trainId": 496},
{"name": "remote control, remote", "id": 2099, "trainId": 497},
{"name": "soap dish", "id": 2464, "trainId": 498},
{"name": "windshield, windscreen", "id": 3058, "trainId": 499},
{"name": "cat", "id": 430, "trainId": 500},
{"name": "cue, cue stick, pool cue, pool stick", "id": 675, "trainId": 501},
{"name": "vent, venthole, vent-hole, blowhole", "id": 2941, "trainId": 502},
{"name": "videos", "id": 2955, "trainId": 503},
{"name": "shovel", "id": 2355, "trainId": 504},
{"name": "eaves", "id": 840, "trainId": 505},
{"name": "antenna, aerial, transmitting aerial", "id": 32, "trainId": 506},
{"name": "shipyard", "id": 2338, "trainId": 507},
{"name": "hen, biddy", "id": 1232, "trainId": 508},
{"name": "traffic cone", "id": 2834, "trainId": 509},
{"name": "washing machines", "id": 2991, "trainId": 510},
{"name": "truck crane", "id": 2879, "trainId": 511},
{"name": "cds", "id": 444, "trainId": 512},
{"name": "niche", "id": 1657, "trainId": 513},
{"name": "scoreboard", "id": 2246, "trainId": 514},
{"name": "briefcase", "id": 296, "trainId": 515},
{"name": "boot", "id": 245, "trainId": 516},
{"name": "sweater, jumper", "id": 2661, "trainId": 517},
{"name": "hay", "id": 1202, "trainId": 518},
{"name": "pack", "id": 1714, "trainId": 519},
{"name": "bottle rack", "id": 251, "trainId": 520},
{"name": "glacier", "id": 1095, "trainId": 521},
{"name": "pergola", "id": 1828, "trainId": 522},
{"name": "building materials", "id": 311, "trainId": 523},
{"name": "television camera", "id": 2732, "trainId": 524},
{"name": "first floor", "id": 947, "trainId": 525},
{"name": "rifle", "id": 2115, "trainId": 526},
{"name": "tennis table", "id": 2738, "trainId": 527},
{"name": "stadium", "id": 2525, "trainId": 528},
{"name": "safety belt", "id": 2194, "trainId": 529},
{"name": "cover", "id": 634, "trainId": 530},
{"name": "dish rack", "id": 740, "trainId": 531},
{"name": "synthesizer", "id": 2682, "trainId": 532},
{"name": "pumpkin", "id": 2020, "trainId": 533},
{"name": "gutter", "id": 1156, "trainId": 534},
{"name": "fruit stand", "id": 1036, "trainId": 535},
{"name": "ice floe, floe", "id": 1295, "trainId": 536},
{"name": "handle, grip, handgrip, hold", "id": 1181, "trainId": 537},
{"name": "wheelchair", "id": 3037, "trainId": 538},
{"name": "mousepad, mouse mat", "id": 1614, "trainId": 539},
{"name": "diploma", "id": 736, "trainId": 540},
{"name": "fairground ride", "id": 893, "trainId": 541},
{"name": "radio", "id": 2047, "trainId": 542},
{"name": "hotplate", "id": 1274, "trainId": 543},
{"name": "junk", "id": 1361, "trainId": 544},
{"name": "wheelbarrow", "id": 3036, "trainId": 545},
{"name": "stream", "id": 2606, "trainId": 546},
{"name": "toll plaza", "id": 2797, "trainId": 547},
{"name": "punching bag", "id": 2022, "trainId": 548},
{"name": "trough", "id": 2876, "trainId": 549},
{"name": "throne", "id": 2758, "trainId": 550},
{"name": "chair desk", "id": 472, "trainId": 551},
{"name": "weighbridge", "id": 3028, "trainId": 552},
{"name": "extractor fan", "id": 882, "trainId": 553},
{"name": "hanging clothes", "id": 1189, "trainId": 554},
{"name": "dish, dish aerial, dish antenna, saucer", "id": 743, "trainId": 555},
{"name": "alarm clock, alarm", "id": 3122, "trainId": 556},
{"name": "ski lift", "id": 2401, "trainId": 557},
{"name": "chain", "id": 468, "trainId": 558},
{"name": "garage", "id": 1061, "trainId": 559},
{"name": "mechanical shovel", "id": 1523, "trainId": 560},
{"name": "wine rack", "id": 3059, "trainId": 561},
{"name": "tramway", "id": 2843, "trainId": 562},
{"name": "treadmill", "id": 2853, "trainId": 563},
{"name": "menu", "id": 1529, "trainId": 564},
{"name": "block", "id": 214, "trainId": 565},
{"name": "well", "id": 3032, "trainId": 566},
{"name": "witness stand", "id": 3071, "trainId": 567},
{"name": "branch", "id": 277, "trainId": 568},
{"name": "duck", "id": 813, "trainId": 569},
{"name": "casserole", "id": 426, "trainId": 570},
{"name": "frying pan", "id": 1039, "trainId": 571},
{"name": "desk organizer", "id": 727, "trainId": 572},
{"name": "mast", "id": 1508, "trainId": 573},
{"name": "spectacles, specs, eyeglasses, glasses", "id": 2490, "trainId": 574},
{"name": "service elevator", "id": 2299, "trainId": 575},
{"name": "dollhouse", "id": 768, "trainId": 576},
{"name": "hammock", "id": 1172, "trainId": 577},
{"name": "clothes hanging", "id": 537, "trainId": 578},
{"name": "photocopier", "id": 1847, "trainId": 579},
{"name": "notepad", "id": 1664, "trainId": 580},
{"name": "golf cart", "id": 1110, "trainId": 581},
{"name": "footpath", "id": 1014, "trainId": 582},
{"name": "cross", "id": 662, "trainId": 583},
{"name": "baptismal font", "id": 121, "trainId": 584},
{"name": "boiler", "id": 227, "trainId": 585},
{"name": "skip", "id": 2410, "trainId": 586},
{"name": "rotisserie", "id": 2165, "trainId": 587},
{"name": "tables", "id": 2696, "trainId": 588},
{"name": "water mill", "id": 3005, "trainId": 589},
{"name": "helmet", "id": 1231, "trainId": 590},
{"name": "cover curtain", "id": 635, "trainId": 591},
{"name": "brick", "id": 292, "trainId": 592},
{"name": "table runner", "id": 2690, "trainId": 593},
{"name": "ashtray", "id": 65, "trainId": 594},
{"name": "street box", "id": 2607, "trainId": 595},
{"name": "stick", "id": 2574, "trainId": 596},
{"name": "hangers", "id": 1188, "trainId": 597},
{"name": "cells", "id": 456, "trainId": 598},
{"name": "urinal", "id": 2913, "trainId": 599},
{"name": "centerpiece", "id": 459, "trainId": 600},
{"name": "portable fridge", "id": 1955, "trainId": 601},
{"name": "dvds", "id": 827, "trainId": 602},
{"name": "golf club", "id": 1111, "trainId": 603},
{"name": "skirting board", "id": 2412, "trainId": 604},
{"name": "water cooler", "id": 2997, "trainId": 605},
{"name": "clipboard", "id": 528, "trainId": 606},
{"name": "camera, photographic camera", "id": 366, "trainId": 607},
{"name": "pigeonhole", "id": 1863, "trainId": 608},
{"name": "chips", "id": 500, "trainId": 609},
{"name": "food processor", "id": 1001, "trainId": 610},
{"name": "post box", "id": 1958, "trainId": 611},
{"name": "lid", "id": 1441, "trainId": 612},
{"name": "drum", "id": 809, "trainId": 613},
{"name": "blender", "id": 210, "trainId": 614},
{"name": "cave entrance", "id": 435, "trainId": 615},
{"name": "dental chair", "id": 718, "trainId": 616},
{"name": "obelisk", "id": 1674, "trainId": 617},
{"name": "canoe", "id": 388, "trainId": 618},
{"name": "mobile", "id": 1572, "trainId": 619},
{"name": "monitors", "id": 1584, "trainId": 620},
{"name": "pool ball", "id": 1944, "trainId": 621},
{"name": "cue rack", "id": 674, "trainId": 622},
{"name": "baggage carts", "id": 99, "trainId": 623},
{"name": "shore", "id": 2352, "trainId": 624},
{"name": "fork", "id": 1019, "trainId": 625},
{"name": "paper filer", "id": 1763, "trainId": 626},
{"name": "bicycle rack", "id": 185, "trainId": 627},
{"name": "coat rack", "id": 554, "trainId": 628},
{"name": "garland", "id": 1066, "trainId": 629},
{"name": "sports bag", "id": 2508, "trainId": 630},
{"name": "fish tank", "id": 951, "trainId": 631},
{"name": "towel dispenser", "id": 2822, "trainId": 632},
{"name": "carriage", "id": 415, "trainId": 633},
{"name": "brochure", "id": 297, "trainId": 634},
{"name": "plaque", "id": 1914, "trainId": 635},
{"name": "stringer", "id": 2619, "trainId": 636},
{"name": "iron", "id": 1338, "trainId": 637},
{"name": "spoon", "id": 2505, "trainId": 638},
{"name": "flag pole", "id": 955, "trainId": 639},
{"name": "toilet brush", "id": 2786, "trainId": 640},
{"name": "book stand", "id": 238, "trainId": 641},
{"name": "water faucet, water tap, tap, hydrant", "id": 3000, "trainId": 642},
{"name": "ticket office", "id": 2763, "trainId": 643},
{"name": "broom", "id": 299, "trainId": 644},
{"name": "dvd", "id": 822, "trainId": 645},
{"name": "ice bucket", "id": 1288, "trainId": 646},
{"name": "carapace, shell, cuticle, shield", "id": 3101, "trainId": 647},
{"name": "tureen", "id": 2894, "trainId": 648},
{"name": "folders", "id": 992, "trainId": 649},
{"name": "chess", "id": 489, "trainId": 650},
{"name": "root", "id": 2157, "trainId": 651},
{"name": "sewing machine", "id": 2309, "trainId": 652},
{"name": "model", "id": 1576, "trainId": 653},
{"name": "pen", "id": 1810, "trainId": 654},
{"name": "violin", "id": 2964, "trainId": 655},
{"name": "sweatshirt", "id": 2662, "trainId": 656},
{"name": "recycling materials", "id": 2087, "trainId": 657},
{"name": "mitten", "id": 1569, "trainId": 658},
{"name": "chopping board, cutting board", "id": 503, "trainId": 659},
{"name": "mask", "id": 1505, "trainId": 660},
{"name": "log", "id": 1468, "trainId": 661},
{"name": "mouse, computer mouse", "id": 1613, "trainId": 662},
{"name": "grill", "id": 1138, "trainId": 663},
{"name": "hole", "id": 1256, "trainId": 664},
{"name": "target", "id": 2715, "trainId": 665},
{"name": "trash bag", "id": 2846, "trainId": 666},
{"name": "chalk", "id": 477, "trainId": 667},
{"name": "sticks", "id": 2576, "trainId": 668},
{"name": "balloon", "id": 108, "trainId": 669},
{"name": "score", "id": 2245, "trainId": 670},
{"name": "hair spray", "id": 1162, "trainId": 671},
{"name": "roll", "id": 2149, "trainId": 672},
{"name": "runner", "id": 2183, "trainId": 673},
{"name": "engine", "id": 858, "trainId": 674},
{"name": "inflatable glove", "id": 1324, "trainId": 675},
{"name": "games", "id": 1055, "trainId": 676},
{"name": "pallets", "id": 1741, "trainId": 677},
{"name": "baskets", "id": 149, "trainId": 678},
{"name": "coop", "id": 615, "trainId": 679},
{"name": "dvd player", "id": 825, "trainId": 680},
{"name": "rocking horse", "id": 2143, "trainId": 681},
{"name": "buckets", "id": 304, "trainId": 682},
{"name": "bread rolls", "id": 283, "trainId": 683},
{"name": "shawl", "id": 2322, "trainId": 684},
{"name": "watering can", "id": 3017, "trainId": 685},
{"name": "spotlights", "id": 2510, "trainId": 686},
{"name": "post-it", "id": 1960, "trainId": 687},
{"name": "bowls", "id": 265, "trainId": 688},
{"name": "security camera", "id": 2282, "trainId": 689},
{"name": "runner cloth", "id": 2184, "trainId": 690},
{"name": "lock", "id": 1461, "trainId": 691},
{"name": "alarm, warning device, alarm system", "id": 3113, "trainId": 692},
{"name": "side", "id": 2372, "trainId": 693},
{"name": "roulette", "id": 2166, "trainId": 694},
{"name": "bone", "id": 232, "trainId": 695},
{"name": "cutlery", "id": 693, "trainId": 696},
{"name": "pool balls", "id": 1945, "trainId": 697},
{"name": "wheels", "id": 3039, "trainId": 698},
{"name": "spice rack", "id": 2494, "trainId": 699},
{"name": "plant pots", "id": 1908, "trainId": 700},
{"name": "towel ring", "id": 2827, "trainId": 701},
{"name": "bread box", "id": 280, "trainId": 702},
{"name": "video", "id": 2950, "trainId": 703},
{"name": "funfair", "id": 1044, "trainId": 704},
{"name": "breads", "id": 288, "trainId": 705},
{"name": "tripod", "id": 2863, "trainId": 706},
{"name": "ironing board", "id": 1342, "trainId": 707},
{"name": "skimmer", "id": 2409, "trainId": 708},
{"name": "hollow", "id": 1258, "trainId": 709},
{"name": "scratching post", "id": 2249, "trainId": 710},
{"name": "tricycle", "id": 2862, "trainId": 711},
{"name": "file box", "id": 920, "trainId": 712},
{"name": "mountain pass", "id": 1607, "trainId": 713},
{"name": "tombstones", "id": 2802, "trainId": 714},
{"name": "cooker", "id": 610, "trainId": 715},
{"name": "card game, cards", "id": 3129, "trainId": 716},
{"name": "golf bag", "id": 1108, "trainId": 717},
{"name": "towel paper", "id": 2823, "trainId": 718},
{"name": "chaise lounge", "id": 476, "trainId": 719},
{"name": "sun", "id": 2641, "trainId": 720},
{"name": "toilet paper holder", "id": 2788, "trainId": 721},
{"name": "rake", "id": 2070, "trainId": 722},
{"name": "key", "id": 1368, "trainId": 723},
{"name": "umbrella stand", "id": 2903, "trainId": 724},
{"name": "dartboard", "id": 699, "trainId": 725},
{"name": "transformer", "id": 2844, "trainId": 726},
{"name": "fireplace utensils", "id": 942, "trainId": 727},
{"name": "sweatshirts", "id": 2663, "trainId": 728},
{
"name": "cellular telephone, cellular phone, cellphone, cell, mobile phone",
"id": 457,
"trainId": 729,
},
{"name": "tallboy", "id": 2701, "trainId": 730},
{"name": "stapler", "id": 2540, "trainId": 731},
{"name": "sauna", "id": 2231, "trainId": 732},
{"name": "test tube", "id": 2746, "trainId": 733},
{"name": "palette", "id": 1738, "trainId": 734},
{"name": "shopping carts", "id": 2350, "trainId": 735},
{"name": "tools", "id": 2808, "trainId": 736},
{"name": "push button, push, button", "id": 2025, "trainId": 737},
{"name": "star", "id": 2541, "trainId": 738},
{"name": "roof rack", "id": 2156, "trainId": 739},
{"name": "barbed wire", "id": 126, "trainId": 740},
{"name": "spray", "id": 2512, "trainId": 741},
{"name": "ear", "id": 831, "trainId": 742},
{"name": "sponge", "id": 2503, "trainId": 743},
{"name": "racket", "id": 2039, "trainId": 744},
{"name": "tins", "id": 2774, "trainId": 745},
{"name": "eyeglasses", "id": 886, "trainId": 746},
{"name": "file", "id": 919, "trainId": 747},
{"name": "scarfs", "id": 2240, "trainId": 748},
{"name": "sugar bowl", "id": 2636, "trainId": 749},
{"name": "flip flop", "id": 963, "trainId": 750},
{"name": "headstones", "id": 1218, "trainId": 751},
{"name": "laptop bag", "id": 1406, "trainId": 752},
{"name": "leash", "id": 1420, "trainId": 753},
{"name": "climbing frame", "id": 526, "trainId": 754},
{"name": "suit hanger", "id": 2639, "trainId": 755},
{"name": "floor spotlight", "id": 975, "trainId": 756},
{"name": "plate rack", "id": 1921, "trainId": 757},
{"name": "sewer", "id": 2305, "trainId": 758},
{"name": "hard drive", "id": 1193, "trainId": 759},
{"name": "sprinkler", "id": 2517, "trainId": 760},
{"name": "tools box", "id": 2809, "trainId": 761},
{"name": "necklace", "id": 1647, "trainId": 762},
{"name": "bulbs", "id": 314, "trainId": 763},
{"name": "steel industry", "id": 2560, "trainId": 764},
{"name": "club", "id": 545, "trainId": 765},
{"name": "jack", "id": 1345, "trainId": 766},
{"name": "door bars", "id": 775, "trainId": 767},
{
"name": "control panel, instrument panel, control board, board, panel",
"id": 603,
"trainId": 768,
},
{"name": "hairbrush", "id": 1163, "trainId": 769},
{"name": "napkin holder", "id": 1641, "trainId": 770},
{"name": "office", "id": 1678, "trainId": 771},
{"name": "smoke detector", "id": 2450, "trainId": 772},
{"name": "utensils", "id": 2915, "trainId": 773},
{"name": "apron", "id": 42, "trainId": 774},
{"name": "scissors", "id": 2242, "trainId": 775},
{"name": "terminal", "id": 2741, "trainId": 776},
{"name": "grinder", "id": 1143, "trainId": 777},
{"name": "entry phone", "id": 862, "trainId": 778},
{"name": "newspaper stand", "id": 1654, "trainId": 779},
{"name": "pepper shaker", "id": 1826, "trainId": 780},
{"name": "onions", "id": 1689, "trainId": 781},
{
"name": "central processing unit, cpu, c p u , central processor, processor, mainframe",
"id": 3124,
"trainId": 782,
},
{"name": "tape", "id": 2710, "trainId": 783},
{"name": "bat", "id": 152, "trainId": 784},
{"name": "coaster", "id": 549, "trainId": 785},
{"name": "calculator", "id": 360, "trainId": 786},
{"name": "potatoes", "id": 1982, "trainId": 787},
{"name": "luggage rack", "id": 1478, "trainId": 788},
{"name": "salt", "id": 2203, "trainId": 789},
{"name": "street number", "id": 2612, "trainId": 790},
{"name": "viewpoint", "id": 2956, "trainId": 791},
{"name": "sword", "id": 2681, "trainId": 792},
{"name": "cd", "id": 437, "trainId": 793},
{"name": "rowing machine", "id": 2171, "trainId": 794},
{"name": "plug", "id": 1933, "trainId": 795},
{"name": "andiron, firedog, dog, dog-iron", "id": 3110, "trainId": 796},
{"name": "pepper", "id": 1824, "trainId": 797},
{"name": "tongs", "id": 2803, "trainId": 798},
{"name": "bonfire", "id": 234, "trainId": 799},
{"name": "dog dish", "id": 764, "trainId": 800},
{"name": "belt", "id": 177, "trainId": 801},
{"name": "dumbbells", "id": 817, "trainId": 802},
{"name": "videocassette recorder, vcr", "id": 3145, "trainId": 803},
{"name": "hook", "id": 1262, "trainId": 804},
{"name": "envelopes", "id": 864, "trainId": 805},
{"name": "shower faucet", "id": 2359, "trainId": 806},
{"name": "watch", "id": 2992, "trainId": 807},
{"name": "padlock", "id": 1725, "trainId": 808},
{"name": "swimming pool ladder", "id": 2667, "trainId": 809},
{"name": "spanners", "id": 2484, "trainId": 810},
{"name": "gravy boat", "id": 1133, "trainId": 811},
{"name": "notice board", "id": 1667, "trainId": 812},
{"name": "trash bags", "id": 2847, "trainId": 813},
{"name": "fire alarm", "id": 932, "trainId": 814},
{"name": "ladle", "id": 1392, "trainId": 815},
{"name": "stethoscope", "id": 2573, "trainId": 816},
{"name": "rocket", "id": 2140, "trainId": 817},
{"name": "funnel", "id": 1046, "trainId": 818},
{"name": "bowling pins", "id": 264, "trainId": 819},
{"name": "valve", "id": 2927, "trainId": 820},
{"name": "thermometer", "id": 2752, "trainId": 821},
{"name": "cups", "id": 679, "trainId": 822},
{"name": "spice jar", "id": 2493, "trainId": 823},
{"name": "night light", "id": 1658, "trainId": 824},
{"name": "soaps", "id": 2466, "trainId": 825},
{"name": "games table", "id": 1057, "trainId": 826},
{"name": "slotted spoon", "id": 2444, "trainId": 827},
{"name": "reel", "id": 2093, "trainId": 828},
{"name": "scourer", "id": 2248, "trainId": 829},
{"name": "sleeping robe", "id": 2432, "trainId": 830},
{"name": "desk mat", "id": 726, "trainId": 831},
{"name": "dumbbell", "id": 816, "trainId": 832},
{"name": "hammer", "id": 1171, "trainId": 833},
{"name": "tie", "id": 2766, "trainId": 834},
{"name": "typewriter", "id": 2900, "trainId": 835},
{"name": "shaker", "id": 2313, "trainId": 836},
{"name": "cheese dish", "id": 488, "trainId": 837},
{"name": "sea star", "id": 2265, "trainId": 838},
{"name": "racquet", "id": 2043, "trainId": 839},
{"name": "butane gas cylinder", "id": 332, "trainId": 840},
{"name": "paper weight", "id": 1771, "trainId": 841},
{"name": "shaving brush", "id": 2320, "trainId": 842},
{"name": "sunglasses", "id": 2646, "trainId": 843},
{"name": "gear shift", "id": 1089, "trainId": 844},
{"name": "towel rail", "id": 2826, "trainId": 845},
{"name": "adding machine, totalizer, totaliser", "id": 3148, "trainId": 846},
]
def _get_ade20k_full_meta():
# Id 0 is reserved for ignore_label, we change ignore_label for 0
# to 255 in our pre-processing, so all ids are shifted by 1.
stuff_ids = [k["id"] for k in ADE20K_SEM_SEG_FULL_CATEGORIES]
assert len(stuff_ids) == 847, len(stuff_ids)
# For semantic segmentation, this mapping maps from contiguous stuff id
# (in [0, 91], used in models) to ids in the dataset (used for processing results)
stuff_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(stuff_ids)}
stuff_classes = [k["name"] for k in ADE20K_SEM_SEG_FULL_CATEGORIES]
ret = {
"stuff_dataset_id_to_contiguous_id": stuff_dataset_id_to_contiguous_id,
"stuff_classes": stuff_classes,
}
return ret
def register_all_ade20k_full(root):
root = os.path.join(root, "ADE20K_2021_17_01")
meta = _get_ade20k_full_meta()
for name, dirname in [("train", "training"), ("val", "validation")]:
image_dir = os.path.join(root, "images_detectron2", dirname)
gt_dir = os.path.join(root, "annotations_detectron2", dirname)
name = f"ade20k_full_sem_seg_{name}"
DatasetCatalog.register(
name, lambda x=image_dir, y=gt_dir: load_sem_seg(y, x, gt_ext="tif", image_ext="jpg")
)
MetadataCatalog.get(name).set(
stuff_classes=meta["stuff_classes"][:],
image_root=image_dir,
sem_seg_root=gt_dir,
evaluator_type="sem_seg",
ignore_label=65535, # NOTE: gt is saved in 16-bit TIFF images
)
_root = os.getenv("DETECTRON2_DATASETS", "datasets")
register_all_ade20k_full(_root)
| 3D-LLM-main | three_steps_3d_feature/first_step/mask2former/data/datasets/register_ade20k_full.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import os
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data.datasets import load_sem_seg
MAPILLARY_VISTAS_SEM_SEG_CATEGORIES = [
{
"color": [165, 42, 42],
"instances": True,
"readable": "Bird",
"name": "animal--bird",
"evaluate": True,
},
{
"color": [0, 192, 0],
"instances": True,
"readable": "Ground Animal",
"name": "animal--ground-animal",
"evaluate": True,
},
{
"color": [196, 196, 196],
"instances": False,
"readable": "Curb",
"name": "construction--barrier--curb",
"evaluate": True,
},
{
"color": [190, 153, 153],
"instances": False,
"readable": "Fence",
"name": "construction--barrier--fence",
"evaluate": True,
},
{
"color": [180, 165, 180],
"instances": False,
"readable": "Guard Rail",
"name": "construction--barrier--guard-rail",
"evaluate": True,
},
{
"color": [90, 120, 150],
"instances": False,
"readable": "Barrier",
"name": "construction--barrier--other-barrier",
"evaluate": True,
},
{
"color": [102, 102, 156],
"instances": False,
"readable": "Wall",
"name": "construction--barrier--wall",
"evaluate": True,
},
{
"color": [128, 64, 255],
"instances": False,
"readable": "Bike Lane",
"name": "construction--flat--bike-lane",
"evaluate": True,
},
{
"color": [140, 140, 200],
"instances": True,
"readable": "Crosswalk - Plain",
"name": "construction--flat--crosswalk-plain",
"evaluate": True,
},
{
"color": [170, 170, 170],
"instances": False,
"readable": "Curb Cut",
"name": "construction--flat--curb-cut",
"evaluate": True,
},
{
"color": [250, 170, 160],
"instances": False,
"readable": "Parking",
"name": "construction--flat--parking",
"evaluate": True,
},
{
"color": [96, 96, 96],
"instances": False,
"readable": "Pedestrian Area",
"name": "construction--flat--pedestrian-area",
"evaluate": True,
},
{
"color": [230, 150, 140],
"instances": False,
"readable": "Rail Track",
"name": "construction--flat--rail-track",
"evaluate": True,
},
{
"color": [128, 64, 128],
"instances": False,
"readable": "Road",
"name": "construction--flat--road",
"evaluate": True,
},
{
"color": [110, 110, 110],
"instances": False,
"readable": "Service Lane",
"name": "construction--flat--service-lane",
"evaluate": True,
},
{
"color": [244, 35, 232],
"instances": False,
"readable": "Sidewalk",
"name": "construction--flat--sidewalk",
"evaluate": True,
},
{
"color": [150, 100, 100],
"instances": False,
"readable": "Bridge",
"name": "construction--structure--bridge",
"evaluate": True,
},
{
"color": [70, 70, 70],
"instances": False,
"readable": "Building",
"name": "construction--structure--building",
"evaluate": True,
},
{
"color": [150, 120, 90],
"instances": False,
"readable": "Tunnel",
"name": "construction--structure--tunnel",
"evaluate": True,
},
{
"color": [220, 20, 60],
"instances": True,
"readable": "Person",
"name": "human--person",
"evaluate": True,
},
{
"color": [255, 0, 0],
"instances": True,
"readable": "Bicyclist",
"name": "human--rider--bicyclist",
"evaluate": True,
},
{
"color": [255, 0, 100],
"instances": True,
"readable": "Motorcyclist",
"name": "human--rider--motorcyclist",
"evaluate": True,
},
{
"color": [255, 0, 200],
"instances": True,
"readable": "Other Rider",
"name": "human--rider--other-rider",
"evaluate": True,
},
{
"color": [200, 128, 128],
"instances": True,
"readable": "Lane Marking - Crosswalk",
"name": "marking--crosswalk-zebra",
"evaluate": True,
},
{
"color": [255, 255, 255],
"instances": False,
"readable": "Lane Marking - General",
"name": "marking--general",
"evaluate": True,
},
{
"color": [64, 170, 64],
"instances": False,
"readable": "Mountain",
"name": "nature--mountain",
"evaluate": True,
},
{
"color": [230, 160, 50],
"instances": False,
"readable": "Sand",
"name": "nature--sand",
"evaluate": True,
},
{
"color": [70, 130, 180],
"instances": False,
"readable": "Sky",
"name": "nature--sky",
"evaluate": True,
},
{
"color": [190, 255, 255],
"instances": False,
"readable": "Snow",
"name": "nature--snow",
"evaluate": True,
},
{
"color": [152, 251, 152],
"instances": False,
"readable": "Terrain",
"name": "nature--terrain",
"evaluate": True,
},
{
"color": [107, 142, 35],
"instances": False,
"readable": "Vegetation",
"name": "nature--vegetation",
"evaluate": True,
},
{
"color": [0, 170, 30],
"instances": False,
"readable": "Water",
"name": "nature--water",
"evaluate": True,
},
{
"color": [255, 255, 128],
"instances": True,
"readable": "Banner",
"name": "object--banner",
"evaluate": True,
},
{
"color": [250, 0, 30],
"instances": True,
"readable": "Bench",
"name": "object--bench",
"evaluate": True,
},
{
"color": [100, 140, 180],
"instances": True,
"readable": "Bike Rack",
"name": "object--bike-rack",
"evaluate": True,
},
{
"color": [220, 220, 220],
"instances": True,
"readable": "Billboard",
"name": "object--billboard",
"evaluate": True,
},
{
"color": [220, 128, 128],
"instances": True,
"readable": "Catch Basin",
"name": "object--catch-basin",
"evaluate": True,
},
{
"color": [222, 40, 40],
"instances": True,
"readable": "CCTV Camera",
"name": "object--cctv-camera",
"evaluate": True,
},
{
"color": [100, 170, 30],
"instances": True,
"readable": "Fire Hydrant",
"name": "object--fire-hydrant",
"evaluate": True,
},
{
"color": [40, 40, 40],
"instances": True,
"readable": "Junction Box",
"name": "object--junction-box",
"evaluate": True,
},
{
"color": [33, 33, 33],
"instances": True,
"readable": "Mailbox",
"name": "object--mailbox",
"evaluate": True,
},
{
"color": [100, 128, 160],
"instances": True,
"readable": "Manhole",
"name": "object--manhole",
"evaluate": True,
},
{
"color": [142, 0, 0],
"instances": True,
"readable": "Phone Booth",
"name": "object--phone-booth",
"evaluate": True,
},
{
"color": [70, 100, 150],
"instances": False,
"readable": "Pothole",
"name": "object--pothole",
"evaluate": True,
},
{
"color": [210, 170, 100],
"instances": True,
"readable": "Street Light",
"name": "object--street-light",
"evaluate": True,
},
{
"color": [153, 153, 153],
"instances": True,
"readable": "Pole",
"name": "object--support--pole",
"evaluate": True,
},
{
"color": [128, 128, 128],
"instances": True,
"readable": "Traffic Sign Frame",
"name": "object--support--traffic-sign-frame",
"evaluate": True,
},
{
"color": [0, 0, 80],
"instances": True,
"readable": "Utility Pole",
"name": "object--support--utility-pole",
"evaluate": True,
},
{
"color": [250, 170, 30],
"instances": True,
"readable": "Traffic Light",
"name": "object--traffic-light",
"evaluate": True,
},
{
"color": [192, 192, 192],
"instances": True,
"readable": "Traffic Sign (Back)",
"name": "object--traffic-sign--back",
"evaluate": True,
},
{
"color": [220, 220, 0],
"instances": True,
"readable": "Traffic Sign (Front)",
"name": "object--traffic-sign--front",
"evaluate": True,
},
{
"color": [140, 140, 20],
"instances": True,
"readable": "Trash Can",
"name": "object--trash-can",
"evaluate": True,
},
{
"color": [119, 11, 32],
"instances": True,
"readable": "Bicycle",
"name": "object--vehicle--bicycle",
"evaluate": True,
},
{
"color": [150, 0, 255],
"instances": True,
"readable": "Boat",
"name": "object--vehicle--boat",
"evaluate": True,
},
{
"color": [0, 60, 100],
"instances": True,
"readable": "Bus",
"name": "object--vehicle--bus",
"evaluate": True,
},
{
"color": [0, 0, 142],
"instances": True,
"readable": "Car",
"name": "object--vehicle--car",
"evaluate": True,
},
{
"color": [0, 0, 90],
"instances": True,
"readable": "Caravan",
"name": "object--vehicle--caravan",
"evaluate": True,
},
{
"color": [0, 0, 230],
"instances": True,
"readable": "Motorcycle",
"name": "object--vehicle--motorcycle",
"evaluate": True,
},
{
"color": [0, 80, 100],
"instances": False,
"readable": "On Rails",
"name": "object--vehicle--on-rails",
"evaluate": True,
},
{
"color": [128, 64, 64],
"instances": True,
"readable": "Other Vehicle",
"name": "object--vehicle--other-vehicle",
"evaluate": True,
},
{
"color": [0, 0, 110],
"instances": True,
"readable": "Trailer",
"name": "object--vehicle--trailer",
"evaluate": True,
},
{
"color": [0, 0, 70],
"instances": True,
"readable": "Truck",
"name": "object--vehicle--truck",
"evaluate": True,
},
{
"color": [0, 0, 192],
"instances": True,
"readable": "Wheeled Slow",
"name": "object--vehicle--wheeled-slow",
"evaluate": True,
},
{
"color": [32, 32, 32],
"instances": False,
"readable": "Car Mount",
"name": "void--car-mount",
"evaluate": True,
},
{
"color": [120, 10, 10],
"instances": False,
"readable": "Ego Vehicle",
"name": "void--ego-vehicle",
"evaluate": True,
},
{
"color": [0, 0, 0],
"instances": False,
"readable": "Unlabeled",
"name": "void--unlabeled",
"evaluate": False,
},
]
def _get_mapillary_vistas_meta():
stuff_classes = [k["readable"] for k in MAPILLARY_VISTAS_SEM_SEG_CATEGORIES if k["evaluate"]]
assert len(stuff_classes) == 65
stuff_colors = [k["color"] for k in MAPILLARY_VISTAS_SEM_SEG_CATEGORIES if k["evaluate"]]
assert len(stuff_colors) == 65
ret = {
"stuff_classes": stuff_classes,
"stuff_colors": stuff_colors,
}
return ret
def register_all_mapillary_vistas(root):
root = os.path.join(root, "mapillary_vistas")
meta = _get_mapillary_vistas_meta()
for name, dirname in [("train", "training"), ("val", "validation")]:
image_dir = os.path.join(root, dirname, "images")
gt_dir = os.path.join(root, dirname, "labels")
name = f"mapillary_vistas_sem_seg_{name}"
DatasetCatalog.register(
name, lambda x=image_dir, y=gt_dir: load_sem_seg(y, x, gt_ext="png", image_ext="jpg")
)
MetadataCatalog.get(name).set(
image_root=image_dir,
sem_seg_root=gt_dir,
evaluator_type="sem_seg",
ignore_label=65, # different from other datasets, Mapillary Vistas sets ignore_label to 65
**meta,
)
_root = os.getenv("DETECTRON2_DATASETS", "datasets")
register_all_mapillary_vistas(_root)
| 3D-LLM-main | three_steps_3d_feature/first_step/mask2former/data/datasets/register_mapillary_vistas.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from . import (
register_ade20k_full,
register_ade20k_panoptic,
register_coco_stuff_10k,
register_mapillary_vistas,
register_coco_panoptic_annos_semseg,
register_ade20k_instance,
register_mapillary_vistas_panoptic,
)
| 3D-LLM-main | three_steps_3d_feature/first_step/mask2former/data/datasets/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import json
import os
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.utils.file_io import PathManager
MAPILLARY_VISTAS_SEM_SEG_CATEGORIES = [
{'color': [165, 42, 42],
'id': 1,
'isthing': 1,
'name': 'Bird',
'supercategory': 'animal--bird'},
{'color': [0, 192, 0],
'id': 2,
'isthing': 1,
'name': 'Ground Animal',
'supercategory': 'animal--ground-animal'},
{'color': [196, 196, 196],
'id': 3,
'isthing': 0,
'name': 'Curb',
'supercategory': 'construction--barrier--curb'},
{'color': [190, 153, 153],
'id': 4,
'isthing': 0,
'name': 'Fence',
'supercategory': 'construction--barrier--fence'},
{'color': [180, 165, 180],
'id': 5,
'isthing': 0,
'name': 'Guard Rail',
'supercategory': 'construction--barrier--guard-rail'},
{'color': [90, 120, 150],
'id': 6,
'isthing': 0,
'name': 'Barrier',
'supercategory': 'construction--barrier--other-barrier'},
{'color': [102, 102, 156],
'id': 7,
'isthing': 0,
'name': 'Wall',
'supercategory': 'construction--barrier--wall'},
{'color': [128, 64, 255],
'id': 8,
'isthing': 0,
'name': 'Bike Lane',
'supercategory': 'construction--flat--bike-lane'},
{'color': [140, 140, 200],
'id': 9,
'isthing': 1,
'name': 'Crosswalk - Plain',
'supercategory': 'construction--flat--crosswalk-plain'},
{'color': [170, 170, 170],
'id': 10,
'isthing': 0,
'name': 'Curb Cut',
'supercategory': 'construction--flat--curb-cut'},
{'color': [250, 170, 160],
'id': 11,
'isthing': 0,
'name': 'Parking',
'supercategory': 'construction--flat--parking'},
{'color': [96, 96, 96],
'id': 12,
'isthing': 0,
'name': 'Pedestrian Area',
'supercategory': 'construction--flat--pedestrian-area'},
{'color': [230, 150, 140],
'id': 13,
'isthing': 0,
'name': 'Rail Track',
'supercategory': 'construction--flat--rail-track'},
{'color': [128, 64, 128],
'id': 14,
'isthing': 0,
'name': 'Road',
'supercategory': 'construction--flat--road'},
{'color': [110, 110, 110],
'id': 15,
'isthing': 0,
'name': 'Service Lane',
'supercategory': 'construction--flat--service-lane'},
{'color': [244, 35, 232],
'id': 16,
'isthing': 0,
'name': 'Sidewalk',
'supercategory': 'construction--flat--sidewalk'},
{'color': [150, 100, 100],
'id': 17,
'isthing': 0,
'name': 'Bridge',
'supercategory': 'construction--structure--bridge'},
{'color': [70, 70, 70],
'id': 18,
'isthing': 0,
'name': 'Building',
'supercategory': 'construction--structure--building'},
{'color': [150, 120, 90],
'id': 19,
'isthing': 0,
'name': 'Tunnel',
'supercategory': 'construction--structure--tunnel'},
{'color': [220, 20, 60],
'id': 20,
'isthing': 1,
'name': 'Person',
'supercategory': 'human--person'},
{'color': [255, 0, 0],
'id': 21,
'isthing': 1,
'name': 'Bicyclist',
'supercategory': 'human--rider--bicyclist'},
{'color': [255, 0, 100],
'id': 22,
'isthing': 1,
'name': 'Motorcyclist',
'supercategory': 'human--rider--motorcyclist'},
{'color': [255, 0, 200],
'id': 23,
'isthing': 1,
'name': 'Other Rider',
'supercategory': 'human--rider--other-rider'},
{'color': [200, 128, 128],
'id': 24,
'isthing': 1,
'name': 'Lane Marking - Crosswalk',
'supercategory': 'marking--crosswalk-zebra'},
{'color': [255, 255, 255],
'id': 25,
'isthing': 0,
'name': 'Lane Marking - General',
'supercategory': 'marking--general'},
{'color': [64, 170, 64],
'id': 26,
'isthing': 0,
'name': 'Mountain',
'supercategory': 'nature--mountain'},
{'color': [230, 160, 50],
'id': 27,
'isthing': 0,
'name': 'Sand',
'supercategory': 'nature--sand'},
{'color': [70, 130, 180],
'id': 28,
'isthing': 0,
'name': 'Sky',
'supercategory': 'nature--sky'},
{'color': [190, 255, 255],
'id': 29,
'isthing': 0,
'name': 'Snow',
'supercategory': 'nature--snow'},
{'color': [152, 251, 152],
'id': 30,
'isthing': 0,
'name': 'Terrain',
'supercategory': 'nature--terrain'},
{'color': [107, 142, 35],
'id': 31,
'isthing': 0,
'name': 'Vegetation',
'supercategory': 'nature--vegetation'},
{'color': [0, 170, 30],
'id': 32,
'isthing': 0,
'name': 'Water',
'supercategory': 'nature--water'},
{'color': [255, 255, 128],
'id': 33,
'isthing': 1,
'name': 'Banner',
'supercategory': 'object--banner'},
{'color': [250, 0, 30],
'id': 34,
'isthing': 1,
'name': 'Bench',
'supercategory': 'object--bench'},
{'color': [100, 140, 180],
'id': 35,
'isthing': 1,
'name': 'Bike Rack',
'supercategory': 'object--bike-rack'},
{'color': [220, 220, 220],
'id': 36,
'isthing': 1,
'name': 'Billboard',
'supercategory': 'object--billboard'},
{'color': [220, 128, 128],
'id': 37,
'isthing': 1,
'name': 'Catch Basin',
'supercategory': 'object--catch-basin'},
{'color': [222, 40, 40],
'id': 38,
'isthing': 1,
'name': 'CCTV Camera',
'supercategory': 'object--cctv-camera'},
{'color': [100, 170, 30],
'id': 39,
'isthing': 1,
'name': 'Fire Hydrant',
'supercategory': 'object--fire-hydrant'},
{'color': [40, 40, 40],
'id': 40,
'isthing': 1,
'name': 'Junction Box',
'supercategory': 'object--junction-box'},
{'color': [33, 33, 33],
'id': 41,
'isthing': 1,
'name': 'Mailbox',
'supercategory': 'object--mailbox'},
{'color': [100, 128, 160],
'id': 42,
'isthing': 1,
'name': 'Manhole',
'supercategory': 'object--manhole'},
{'color': [142, 0, 0],
'id': 43,
'isthing': 1,
'name': 'Phone Booth',
'supercategory': 'object--phone-booth'},
{'color': [70, 100, 150],
'id': 44,
'isthing': 0,
'name': 'Pothole',
'supercategory': 'object--pothole'},
{'color': [210, 170, 100],
'id': 45,
'isthing': 1,
'name': 'Street Light',
'supercategory': 'object--street-light'},
{'color': [153, 153, 153],
'id': 46,
'isthing': 1,
'name': 'Pole',
'supercategory': 'object--support--pole'},
{'color': [128, 128, 128],
'id': 47,
'isthing': 1,
'name': 'Traffic Sign Frame',
'supercategory': 'object--support--traffic-sign-frame'},
{'color': [0, 0, 80],
'id': 48,
'isthing': 1,
'name': 'Utility Pole',
'supercategory': 'object--support--utility-pole'},
{'color': [250, 170, 30],
'id': 49,
'isthing': 1,
'name': 'Traffic Light',
'supercategory': 'object--traffic-light'},
{'color': [192, 192, 192],
'id': 50,
'isthing': 1,
'name': 'Traffic Sign (Back)',
'supercategory': 'object--traffic-sign--back'},
{'color': [220, 220, 0],
'id': 51,
'isthing': 1,
'name': 'Traffic Sign (Front)',
'supercategory': 'object--traffic-sign--front'},
{'color': [140, 140, 20],
'id': 52,
'isthing': 1,
'name': 'Trash Can',
'supercategory': 'object--trash-can'},
{'color': [119, 11, 32],
'id': 53,
'isthing': 1,
'name': 'Bicycle',
'supercategory': 'object--vehicle--bicycle'},
{'color': [150, 0, 255],
'id': 54,
'isthing': 1,
'name': 'Boat',
'supercategory': 'object--vehicle--boat'},
{'color': [0, 60, 100],
'id': 55,
'isthing': 1,
'name': 'Bus',
'supercategory': 'object--vehicle--bus'},
{'color': [0, 0, 142],
'id': 56,
'isthing': 1,
'name': 'Car',
'supercategory': 'object--vehicle--car'},
{'color': [0, 0, 90],
'id': 57,
'isthing': 1,
'name': 'Caravan',
'supercategory': 'object--vehicle--caravan'},
{'color': [0, 0, 230],
'id': 58,
'isthing': 1,
'name': 'Motorcycle',
'supercategory': 'object--vehicle--motorcycle'},
{'color': [0, 80, 100],
'id': 59,
'isthing': 0,
'name': 'On Rails',
'supercategory': 'object--vehicle--on-rails'},
{'color': [128, 64, 64],
'id': 60,
'isthing': 1,
'name': 'Other Vehicle',
'supercategory': 'object--vehicle--other-vehicle'},
{'color': [0, 0, 110],
'id': 61,
'isthing': 1,
'name': 'Trailer',
'supercategory': 'object--vehicle--trailer'},
{'color': [0, 0, 70],
'id': 62,
'isthing': 1,
'name': 'Truck',
'supercategory': 'object--vehicle--truck'},
{'color': [0, 0, 192],
'id': 63,
'isthing': 1,
'name': 'Wheeled Slow',
'supercategory': 'object--vehicle--wheeled-slow'},
{'color': [32, 32, 32],
'id': 64,
'isthing': 0,
'name': 'Car Mount',
'supercategory': 'void--car-mount'},
{'color': [120, 10, 10],
'id': 65,
'isthing': 0,
'name': 'Ego Vehicle',
'supercategory': 'void--ego-vehicle'}
]
def load_mapillary_vistas_panoptic_json(json_file, image_dir, gt_dir, semseg_dir, meta):
"""
Args:
image_dir (str): path to the raw dataset. e.g., "~/coco/train2017".
gt_dir (str): path to the raw annotations. e.g., "~/coco/panoptic_train2017".
json_file (str): path to the json file. e.g., "~/coco/annotations/panoptic_train2017.json".
Returns:
list[dict]: a list of dicts in Detectron2 standard format. (See
`Using Custom Datasets </tutorials/datasets.html>`_ )
"""
def _convert_category_id(segment_info, meta):
if segment_info["category_id"] in meta["thing_dataset_id_to_contiguous_id"]:
segment_info["category_id"] = meta["thing_dataset_id_to_contiguous_id"][
segment_info["category_id"]
]
segment_info["isthing"] = True
else:
segment_info["category_id"] = meta["stuff_dataset_id_to_contiguous_id"][
segment_info["category_id"]
]
segment_info["isthing"] = False
return segment_info
with PathManager.open(json_file) as f:
json_info = json.load(f)
ret = []
for ann in json_info["annotations"]:
image_id = ann["image_id"]
# TODO: currently we assume image and label has the same filename but
# different extension, and images have extension ".jpg" for COCO. Need
# to make image extension a user-provided argument if we extend this
# function to support other COCO-like datasets.
image_file = os.path.join(image_dir, os.path.splitext(ann["file_name"])[0] + ".jpg")
label_file = os.path.join(gt_dir, ann["file_name"])
sem_label_file = os.path.join(semseg_dir, ann["file_name"])
segments_info = [_convert_category_id(x, meta) for x in ann["segments_info"]]
ret.append(
{
"file_name": image_file,
"image_id": image_id,
"pan_seg_file_name": label_file,
"sem_seg_file_name": sem_label_file,
"segments_info": segments_info,
}
)
assert len(ret), f"No images found in {image_dir}!"
assert PathManager.isfile(ret[0]["file_name"]), ret[0]["file_name"]
assert PathManager.isfile(ret[0]["pan_seg_file_name"]), ret[0]["pan_seg_file_name"]
assert PathManager.isfile(ret[0]["sem_seg_file_name"]), ret[0]["sem_seg_file_name"]
return ret
def register_mapillary_vistas_panoptic(
name, metadata, image_root, panoptic_root, semantic_root, panoptic_json, instances_json=None
):
"""
Register a "standard" version of ADE20k panoptic segmentation dataset named `name`.
The dictionaries in this registered dataset follows detectron2's standard format.
Hence it's called "standard".
Args:
name (str): the name that identifies a dataset,
e.g. "ade20k_panoptic_train"
metadata (dict): extra metadata associated with this dataset.
image_root (str): directory which contains all the images
panoptic_root (str): directory which contains panoptic annotation images in COCO format
panoptic_json (str): path to the json panoptic annotation file in COCO format
sem_seg_root (none): not used, to be consistent with
`register_coco_panoptic_separated`.
instances_json (str): path to the json instance annotation file
"""
panoptic_name = name
DatasetCatalog.register(
panoptic_name,
lambda: load_mapillary_vistas_panoptic_json(
panoptic_json, image_root, panoptic_root, semantic_root, metadata
),
)
MetadataCatalog.get(panoptic_name).set(
panoptic_root=panoptic_root,
image_root=image_root,
panoptic_json=panoptic_json,
json_file=instances_json,
evaluator_type="mapillary_vistas_panoptic_seg",
ignore_label=65, # different from other datasets, Mapillary Vistas sets ignore_label to 65
label_divisor=1000,
**metadata,
)
_PREDEFINED_SPLITS_ADE20K_PANOPTIC = {
"mapillary_vistas_panoptic_train": (
"mapillary_vistas/training/images",
"mapillary_vistas/training/panoptic",
"mapillary_vistas/training/panoptic/panoptic_2018.json",
"mapillary_vistas/training/labels",
),
"mapillary_vistas_panoptic_val": (
"mapillary_vistas/validation/images",
"mapillary_vistas/validation/panoptic",
"mapillary_vistas/validation/panoptic/panoptic_2018.json",
"mapillary_vistas/validation/labels",
),
}
def get_metadata():
meta = {}
# The following metadata maps contiguous id from [0, #thing categories +
# #stuff categories) to their names and colors. We have to replica of the
# same name and color under "thing_*" and "stuff_*" because the current
# visualization function in D2 handles thing and class classes differently
# due to some heuristic used in Panoptic FPN. We keep the same naming to
# enable reusing existing visualization functions.
thing_classes = [k["name"] for k in MAPILLARY_VISTAS_SEM_SEG_CATEGORIES]
thing_colors = [k["color"] for k in MAPILLARY_VISTAS_SEM_SEG_CATEGORIES]
stuff_classes = [k["name"] for k in MAPILLARY_VISTAS_SEM_SEG_CATEGORIES]
stuff_colors = [k["color"] for k in MAPILLARY_VISTAS_SEM_SEG_CATEGORIES]
meta["thing_classes"] = thing_classes
meta["thing_colors"] = thing_colors
meta["stuff_classes"] = stuff_classes
meta["stuff_colors"] = stuff_colors
# Convert category id for training:
# category id: like semantic segmentation, it is the class id for each
# pixel. Since there are some classes not used in evaluation, the category
# id is not always contiguous and thus we have two set of category ids:
# - original category id: category id in the original dataset, mainly
# used for evaluation.
# - contiguous category id: [0, #classes), in order to train the linear
# softmax classifier.
thing_dataset_id_to_contiguous_id = {}
stuff_dataset_id_to_contiguous_id = {}
for i, cat in enumerate(MAPILLARY_VISTAS_SEM_SEG_CATEGORIES):
if cat["isthing"]:
thing_dataset_id_to_contiguous_id[cat["id"]] = i
# else:
# stuff_dataset_id_to_contiguous_id[cat["id"]] = i
# in order to use sem_seg evaluator
stuff_dataset_id_to_contiguous_id[cat["id"]] = i
meta["thing_dataset_id_to_contiguous_id"] = thing_dataset_id_to_contiguous_id
meta["stuff_dataset_id_to_contiguous_id"] = stuff_dataset_id_to_contiguous_id
return meta
def register_all_mapillary_vistas_panoptic(root):
metadata = get_metadata()
for (
prefix,
(image_root, panoptic_root, panoptic_json, semantic_root),
) in _PREDEFINED_SPLITS_ADE20K_PANOPTIC.items():
# The "standard" version of COCO panoptic segmentation dataset,
# e.g. used by Panoptic-DeepLab
register_mapillary_vistas_panoptic(
prefix,
metadata,
os.path.join(root, image_root),
os.path.join(root, panoptic_root),
os.path.join(root, semantic_root),
os.path.join(root, panoptic_json),
)
_root = os.getenv("DETECTRON2_DATASETS", "datasets")
register_all_mapillary_vistas_panoptic(_root)
| 3D-LLM-main | three_steps_3d_feature/first_step/mask2former/data/datasets/register_mapillary_vistas_panoptic.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import json
import logging
import numpy as np
import os
from PIL import Image
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data.datasets.coco import load_coco_json, register_coco_instances
from detectron2.utils.file_io import PathManager
ADE_CATEGORIES = [{'id': 7, 'name': 'bed'}, {'id': 8, 'name': 'windowpane'}, {'id': 10, 'name': 'cabinet'}, {'id': 12, 'name': 'person'}, {'id': 14, 'name': 'door'}, {'id': 15, 'name': 'table'}, {'id': 18, 'name': 'curtain'}, {'id': 19, 'name': 'chair'}, {'id': 20, 'name': 'car'}, {'id': 22, 'name': 'painting'}, {'id': 23, 'name': 'sofa'}, {'id': 24, 'name': 'shelf'}, {'id': 27, 'name': 'mirror'}, {'id': 30, 'name': 'armchair'}, {'id': 31, 'name': 'seat'}, {'id': 32, 'name': 'fence'}, {'id': 33, 'name': 'desk'}, {'id': 35, 'name': 'wardrobe'}, {'id': 36, 'name': 'lamp'}, {'id': 37, 'name': 'bathtub'}, {'id': 38, 'name': 'railing'}, {'id': 39, 'name': 'cushion'}, {'id': 41, 'name': 'box'}, {'id': 42, 'name': 'column'}, {'id': 43, 'name': 'signboard'}, {'id': 44, 'name': 'chest of drawers'}, {'id': 45, 'name': 'counter'}, {'id': 47, 'name': 'sink'}, {'id': 49, 'name': 'fireplace'}, {'id': 50, 'name': 'refrigerator'}, {'id': 53, 'name': 'stairs'}, {'id': 55, 'name': 'case'}, {'id': 56, 'name': 'pool table'}, {'id': 57, 'name': 'pillow'}, {'id': 58, 'name': 'screen door'}, {'id': 62, 'name': 'bookcase'}, {'id': 64, 'name': 'coffee table'}, {'id': 65, 'name': 'toilet'}, {'id': 66, 'name': 'flower'}, {'id': 67, 'name': 'book'}, {'id': 69, 'name': 'bench'}, {'id': 70, 'name': 'countertop'}, {'id': 71, 'name': 'stove'}, {'id': 72, 'name': 'palm'}, {'id': 73, 'name': 'kitchen island'}, {'id': 74, 'name': 'computer'}, {'id': 75, 'name': 'swivel chair'}, {'id': 76, 'name': 'boat'}, {'id': 78, 'name': 'arcade machine'}, {'id': 80, 'name': 'bus'}, {'id': 81, 'name': 'towel'}, {'id': 82, 'name': 'light'}, {'id': 83, 'name': 'truck'}, {'id': 85, 'name': 'chandelier'}, {'id': 86, 'name': 'awning'}, {'id': 87, 'name': 'streetlight'}, {'id': 88, 'name': 'booth'}, {'id': 89, 'name': 'television receiver'}, {'id': 90, 'name': 'airplane'}, {'id': 92, 'name': 'apparel'}, {'id': 93, 'name': 'pole'}, {'id': 95, 'name': 'bannister'}, {'id': 97, 'name': 'ottoman'}, {'id': 98, 'name': 'bottle'}, {'id': 102, 'name': 'van'}, {'id': 103, 'name': 'ship'}, {'id': 104, 'name': 'fountain'}, {'id': 107, 'name': 'washer'}, {'id': 108, 'name': 'plaything'}, {'id': 110, 'name': 'stool'}, {'id': 111, 'name': 'barrel'}, {'id': 112, 'name': 'basket'}, {'id': 115, 'name': 'bag'}, {'id': 116, 'name': 'minibike'}, {'id': 118, 'name': 'oven'}, {'id': 119, 'name': 'ball'}, {'id': 120, 'name': 'food'}, {'id': 121, 'name': 'step'}, {'id': 123, 'name': 'trade name'}, {'id': 124, 'name': 'microwave'}, {'id': 125, 'name': 'pot'}, {'id': 126, 'name': 'animal'}, {'id': 127, 'name': 'bicycle'}, {'id': 129, 'name': 'dishwasher'}, {'id': 130, 'name': 'screen'}, {'id': 132, 'name': 'sculpture'}, {'id': 133, 'name': 'hood'}, {'id': 134, 'name': 'sconce'}, {'id': 135, 'name': 'vase'}, {'id': 136, 'name': 'traffic light'}, {'id': 137, 'name': 'tray'}, {'id': 138, 'name': 'ashcan'}, {'id': 139, 'name': 'fan'}, {'id': 142, 'name': 'plate'}, {'id': 143, 'name': 'monitor'}, {'id': 144, 'name': 'bulletin board'}, {'id': 146, 'name': 'radiator'}, {'id': 147, 'name': 'glass'}, {'id': 148, 'name': 'clock'}, {'id': 149, 'name': 'flag'}]
_PREDEFINED_SPLITS = {
# point annotations without masks
"ade20k_instance_train": (
"ADEChallengeData2016/images/training",
"ADEChallengeData2016/ade20k_instance_train.json",
),
"ade20k_instance_val": (
"ADEChallengeData2016/images/validation",
"ADEChallengeData2016/ade20k_instance_val.json",
),
}
def _get_ade_instances_meta():
thing_ids = [k["id"] for k in ADE_CATEGORIES]
assert len(thing_ids) == 100, len(thing_ids)
# Mapping from the incontiguous ADE category id to an id in [0, 99]
thing_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(thing_ids)}
thing_classes = [k["name"] for k in ADE_CATEGORIES]
ret = {
"thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id,
"thing_classes": thing_classes,
}
return ret
def register_all_ade20k_instance(root):
for key, (image_root, json_file) in _PREDEFINED_SPLITS.items():
# Assume pre-defined datasets live in `./datasets`.
register_coco_instances(
key,
_get_ade_instances_meta(),
os.path.join(root, json_file) if "://" not in json_file else json_file,
os.path.join(root, image_root),
)
_root = os.getenv("DETECTRON2_DATASETS", "datasets")
register_all_ade20k_instance(_root)
| 3D-LLM-main | three_steps_3d_feature/first_step/mask2former/data/datasets/register_ade20k_instance.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import json
import os
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data.datasets import load_sem_seg
from detectron2.data.datasets.builtin_meta import COCO_CATEGORIES
from detectron2.utils.file_io import PathManager
_PREDEFINED_SPLITS_COCO_PANOPTIC = {
"coco_2017_train_panoptic": (
# This is the original panoptic annotation directory
"coco/panoptic_train2017",
"coco/annotations/panoptic_train2017.json",
# This directory contains semantic annotations that are
# converted from panoptic annotations.
# It is used by PanopticFPN.
# You can use the script at detectron2/datasets/prepare_panoptic_fpn.py
# to create these directories.
"coco/panoptic_semseg_train2017",
),
"coco_2017_val_panoptic": (
"coco/panoptic_val2017",
"coco/annotations/panoptic_val2017.json",
"coco/panoptic_semseg_val2017",
),
}
def get_metadata():
meta = {}
# The following metadata maps contiguous id from [0, #thing categories +
# #stuff categories) to their names and colors. We have to replica of the
# same name and color under "thing_*" and "stuff_*" because the current
# visualization function in D2 handles thing and class classes differently
# due to some heuristic used in Panoptic FPN. We keep the same naming to
# enable reusing existing visualization functions.
thing_classes = [k["name"] for k in COCO_CATEGORIES if k["isthing"] == 1]
thing_colors = [k["color"] for k in COCO_CATEGORIES if k["isthing"] == 1]
stuff_classes = [k["name"] for k in COCO_CATEGORIES]
stuff_colors = [k["color"] for k in COCO_CATEGORIES]
meta["thing_classes"] = thing_classes
meta["thing_colors"] = thing_colors
meta["stuff_classes"] = stuff_classes
meta["stuff_colors"] = stuff_colors
# Convert category id for training:
# category id: like semantic segmentation, it is the class id for each
# pixel. Since there are some classes not used in evaluation, the category
# id is not always contiguous and thus we have two set of category ids:
# - original category id: category id in the original dataset, mainly
# used for evaluation.
# - contiguous category id: [0, #classes), in order to train the linear
# softmax classifier.
thing_dataset_id_to_contiguous_id = {}
stuff_dataset_id_to_contiguous_id = {}
for i, cat in enumerate(COCO_CATEGORIES):
if cat["isthing"]:
thing_dataset_id_to_contiguous_id[cat["id"]] = i
# else:
# stuff_dataset_id_to_contiguous_id[cat["id"]] = i
# in order to use sem_seg evaluator
stuff_dataset_id_to_contiguous_id[cat["id"]] = i
meta["thing_dataset_id_to_contiguous_id"] = thing_dataset_id_to_contiguous_id
meta["stuff_dataset_id_to_contiguous_id"] = stuff_dataset_id_to_contiguous_id
return meta
def load_coco_panoptic_json(json_file, image_dir, gt_dir, semseg_dir, meta):
"""
Args:
image_dir (str): path to the raw dataset. e.g., "~/coco/train2017".
gt_dir (str): path to the raw annotations. e.g., "~/coco/panoptic_train2017".
json_file (str): path to the json file. e.g., "~/coco/annotations/panoptic_train2017.json".
Returns:
list[dict]: a list of dicts in Detectron2 standard format. (See
`Using Custom Datasets </tutorials/datasets.html>`_ )
"""
def _convert_category_id(segment_info, meta):
if segment_info["category_id"] in meta["thing_dataset_id_to_contiguous_id"]:
segment_info["category_id"] = meta["thing_dataset_id_to_contiguous_id"][
segment_info["category_id"]
]
segment_info["isthing"] = True
else:
segment_info["category_id"] = meta["stuff_dataset_id_to_contiguous_id"][
segment_info["category_id"]
]
segment_info["isthing"] = False
return segment_info
with PathManager.open(json_file) as f:
json_info = json.load(f)
ret = []
for ann in json_info["annotations"]:
image_id = int(ann["image_id"])
# TODO: currently we assume image and label has the same filename but
# different extension, and images have extension ".jpg" for COCO. Need
# to make image extension a user-provided argument if we extend this
# function to support other COCO-like datasets.
image_file = os.path.join(image_dir, os.path.splitext(ann["file_name"])[0] + ".jpg")
label_file = os.path.join(gt_dir, ann["file_name"])
sem_label_file = os.path.join(semseg_dir, ann["file_name"])
segments_info = [_convert_category_id(x, meta) for x in ann["segments_info"]]
ret.append(
{
"file_name": image_file,
"image_id": image_id,
"pan_seg_file_name": label_file,
"sem_seg_file_name": sem_label_file,
"segments_info": segments_info,
}
)
assert len(ret), f"No images found in {image_dir}!"
assert PathManager.isfile(ret[0]["file_name"]), ret[0]["file_name"]
assert PathManager.isfile(ret[0]["pan_seg_file_name"]), ret[0]["pan_seg_file_name"]
assert PathManager.isfile(ret[0]["sem_seg_file_name"]), ret[0]["sem_seg_file_name"]
return ret
def register_coco_panoptic_annos_sem_seg(
name, metadata, image_root, panoptic_root, panoptic_json, sem_seg_root, instances_json
):
panoptic_name = name
delattr(MetadataCatalog.get(panoptic_name), "thing_classes")
delattr(MetadataCatalog.get(panoptic_name), "thing_colors")
MetadataCatalog.get(panoptic_name).set(
thing_classes=metadata["thing_classes"],
thing_colors=metadata["thing_colors"],
# thing_dataset_id_to_contiguous_id=metadata["thing_dataset_id_to_contiguous_id"],
)
# the name is "coco_2017_train_panoptic_with_sem_seg" and "coco_2017_val_panoptic_with_sem_seg"
semantic_name = name + "_with_sem_seg"
DatasetCatalog.register(
semantic_name,
lambda: load_coco_panoptic_json(panoptic_json, image_root, panoptic_root, sem_seg_root, metadata),
)
MetadataCatalog.get(semantic_name).set(
sem_seg_root=sem_seg_root,
panoptic_root=panoptic_root,
image_root=image_root,
panoptic_json=panoptic_json,
json_file=instances_json,
evaluator_type="coco_panoptic_seg",
ignore_label=255,
label_divisor=1000,
**metadata,
)
def register_all_coco_panoptic_annos_sem_seg(root):
for (
prefix,
(panoptic_root, panoptic_json, semantic_root),
) in _PREDEFINED_SPLITS_COCO_PANOPTIC.items():
prefix_instances = prefix[: -len("_panoptic")]
instances_meta = MetadataCatalog.get(prefix_instances)
image_root, instances_json = instances_meta.image_root, instances_meta.json_file
register_coco_panoptic_annos_sem_seg(
prefix,
get_metadata(),
image_root,
os.path.join(root, panoptic_root),
os.path.join(root, panoptic_json),
os.path.join(root, semantic_root),
instances_json,
)
_root = os.getenv("DETECTRON2_DATASETS", "datasets")
register_all_coco_panoptic_annos_sem_seg(_root)
| 3D-LLM-main | three_steps_3d_feature/first_step/mask2former/data/datasets/register_coco_panoptic_annos_semseg.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import json
import os
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.utils.file_io import PathManager
ADE20K_150_CATEGORIES = [
{"color": [120, 120, 120], "id": 0, "isthing": 0, "name": "wall"},
{"color": [180, 120, 120], "id": 1, "isthing": 0, "name": "building"},
{"color": [6, 230, 230], "id": 2, "isthing": 0, "name": "sky"},
{"color": [80, 50, 50], "id": 3, "isthing": 0, "name": "floor"},
{"color": [4, 200, 3], "id": 4, "isthing": 0, "name": "tree"},
{"color": [120, 120, 80], "id": 5, "isthing": 0, "name": "ceiling"},
{"color": [140, 140, 140], "id": 6, "isthing": 0, "name": "road, route"},
{"color": [204, 5, 255], "id": 7, "isthing": 1, "name": "bed"},
{"color": [230, 230, 230], "id": 8, "isthing": 1, "name": "window "},
{"color": [4, 250, 7], "id": 9, "isthing": 0, "name": "grass"},
{"color": [224, 5, 255], "id": 10, "isthing": 1, "name": "cabinet"},
{"color": [235, 255, 7], "id": 11, "isthing": 0, "name": "sidewalk, pavement"},
{"color": [150, 5, 61], "id": 12, "isthing": 1, "name": "person"},
{"color": [120, 120, 70], "id": 13, "isthing": 0, "name": "earth, ground"},
{"color": [8, 255, 51], "id": 14, "isthing": 1, "name": "door"},
{"color": [255, 6, 82], "id": 15, "isthing": 1, "name": "table"},
{"color": [143, 255, 140], "id": 16, "isthing": 0, "name": "mountain, mount"},
{"color": [204, 255, 4], "id": 17, "isthing": 0, "name": "plant"},
{"color": [255, 51, 7], "id": 18, "isthing": 1, "name": "curtain"},
{"color": [204, 70, 3], "id": 19, "isthing": 1, "name": "chair"},
{"color": [0, 102, 200], "id": 20, "isthing": 1, "name": "car"},
{"color": [61, 230, 250], "id": 21, "isthing": 0, "name": "water"},
{"color": [255, 6, 51], "id": 22, "isthing": 1, "name": "painting, picture"},
{"color": [11, 102, 255], "id": 23, "isthing": 1, "name": "sofa"},
{"color": [255, 7, 71], "id": 24, "isthing": 1, "name": "shelf"},
{"color": [255, 9, 224], "id": 25, "isthing": 0, "name": "house"},
{"color": [9, 7, 230], "id": 26, "isthing": 0, "name": "sea"},
{"color": [220, 220, 220], "id": 27, "isthing": 1, "name": "mirror"},
{"color": [255, 9, 92], "id": 28, "isthing": 0, "name": "rug"},
{"color": [112, 9, 255], "id": 29, "isthing": 0, "name": "field"},
{"color": [8, 255, 214], "id": 30, "isthing": 1, "name": "armchair"},
{"color": [7, 255, 224], "id": 31, "isthing": 1, "name": "seat"},
{"color": [255, 184, 6], "id": 32, "isthing": 1, "name": "fence"},
{"color": [10, 255, 71], "id": 33, "isthing": 1, "name": "desk"},
{"color": [255, 41, 10], "id": 34, "isthing": 0, "name": "rock, stone"},
{"color": [7, 255, 255], "id": 35, "isthing": 1, "name": "wardrobe, closet, press"},
{"color": [224, 255, 8], "id": 36, "isthing": 1, "name": "lamp"},
{"color": [102, 8, 255], "id": 37, "isthing": 1, "name": "tub"},
{"color": [255, 61, 6], "id": 38, "isthing": 1, "name": "rail"},
{"color": [255, 194, 7], "id": 39, "isthing": 1, "name": "cushion"},
{"color": [255, 122, 8], "id": 40, "isthing": 0, "name": "base, pedestal, stand"},
{"color": [0, 255, 20], "id": 41, "isthing": 1, "name": "box"},
{"color": [255, 8, 41], "id": 42, "isthing": 1, "name": "column, pillar"},
{"color": [255, 5, 153], "id": 43, "isthing": 1, "name": "signboard, sign"},
{
"color": [6, 51, 255],
"id": 44,
"isthing": 1,
"name": "chest of drawers, chest, bureau, dresser",
},
{"color": [235, 12, 255], "id": 45, "isthing": 1, "name": "counter"},
{"color": [160, 150, 20], "id": 46, "isthing": 0, "name": "sand"},
{"color": [0, 163, 255], "id": 47, "isthing": 1, "name": "sink"},
{"color": [140, 140, 140], "id": 48, "isthing": 0, "name": "skyscraper"},
{"color": [250, 10, 15], "id": 49, "isthing": 1, "name": "fireplace"},
{"color": [20, 255, 0], "id": 50, "isthing": 1, "name": "refrigerator, icebox"},
{"color": [31, 255, 0], "id": 51, "isthing": 0, "name": "grandstand, covered stand"},
{"color": [255, 31, 0], "id": 52, "isthing": 0, "name": "path"},
{"color": [255, 224, 0], "id": 53, "isthing": 1, "name": "stairs"},
{"color": [153, 255, 0], "id": 54, "isthing": 0, "name": "runway"},
{"color": [0, 0, 255], "id": 55, "isthing": 1, "name": "case, display case, showcase, vitrine"},
{
"color": [255, 71, 0],
"id": 56,
"isthing": 1,
"name": "pool table, billiard table, snooker table",
},
{"color": [0, 235, 255], "id": 57, "isthing": 1, "name": "pillow"},
{"color": [0, 173, 255], "id": 58, "isthing": 1, "name": "screen door, screen"},
{"color": [31, 0, 255], "id": 59, "isthing": 0, "name": "stairway, staircase"},
{"color": [11, 200, 200], "id": 60, "isthing": 0, "name": "river"},
{"color": [255, 82, 0], "id": 61, "isthing": 0, "name": "bridge, span"},
{"color": [0, 255, 245], "id": 62, "isthing": 1, "name": "bookcase"},
{"color": [0, 61, 255], "id": 63, "isthing": 0, "name": "blind, screen"},
{"color": [0, 255, 112], "id": 64, "isthing": 1, "name": "coffee table"},
{
"color": [0, 255, 133],
"id": 65,
"isthing": 1,
"name": "toilet, can, commode, crapper, pot, potty, stool, throne",
},
{"color": [255, 0, 0], "id": 66, "isthing": 1, "name": "flower"},
{"color": [255, 163, 0], "id": 67, "isthing": 1, "name": "book"},
{"color": [255, 102, 0], "id": 68, "isthing": 0, "name": "hill"},
{"color": [194, 255, 0], "id": 69, "isthing": 1, "name": "bench"},
{"color": [0, 143, 255], "id": 70, "isthing": 1, "name": "countertop"},
{"color": [51, 255, 0], "id": 71, "isthing": 1, "name": "stove"},
{"color": [0, 82, 255], "id": 72, "isthing": 1, "name": "palm, palm tree"},
{"color": [0, 255, 41], "id": 73, "isthing": 1, "name": "kitchen island"},
{"color": [0, 255, 173], "id": 74, "isthing": 1, "name": "computer"},
{"color": [10, 0, 255], "id": 75, "isthing": 1, "name": "swivel chair"},
{"color": [173, 255, 0], "id": 76, "isthing": 1, "name": "boat"},
{"color": [0, 255, 153], "id": 77, "isthing": 0, "name": "bar"},
{"color": [255, 92, 0], "id": 78, "isthing": 1, "name": "arcade machine"},
{"color": [255, 0, 255], "id": 79, "isthing": 0, "name": "hovel, hut, hutch, shack, shanty"},
{"color": [255, 0, 245], "id": 80, "isthing": 1, "name": "bus"},
{"color": [255, 0, 102], "id": 81, "isthing": 1, "name": "towel"},
{"color": [255, 173, 0], "id": 82, "isthing": 1, "name": "light"},
{"color": [255, 0, 20], "id": 83, "isthing": 1, "name": "truck"},
{"color": [255, 184, 184], "id": 84, "isthing": 0, "name": "tower"},
{"color": [0, 31, 255], "id": 85, "isthing": 1, "name": "chandelier"},
{"color": [0, 255, 61], "id": 86, "isthing": 1, "name": "awning, sunshade, sunblind"},
{"color": [0, 71, 255], "id": 87, "isthing": 1, "name": "street lamp"},
{"color": [255, 0, 204], "id": 88, "isthing": 1, "name": "booth"},
{"color": [0, 255, 194], "id": 89, "isthing": 1, "name": "tv"},
{"color": [0, 255, 82], "id": 90, "isthing": 1, "name": "plane"},
{"color": [0, 10, 255], "id": 91, "isthing": 0, "name": "dirt track"},
{"color": [0, 112, 255], "id": 92, "isthing": 1, "name": "clothes"},
{"color": [51, 0, 255], "id": 93, "isthing": 1, "name": "pole"},
{"color": [0, 194, 255], "id": 94, "isthing": 0, "name": "land, ground, soil"},
{
"color": [0, 122, 255],
"id": 95,
"isthing": 1,
"name": "bannister, banister, balustrade, balusters, handrail",
},
{
"color": [0, 255, 163],
"id": 96,
"isthing": 0,
"name": "escalator, moving staircase, moving stairway",
},
{
"color": [255, 153, 0],
"id": 97,
"isthing": 1,
"name": "ottoman, pouf, pouffe, puff, hassock",
},
{"color": [0, 255, 10], "id": 98, "isthing": 1, "name": "bottle"},
{"color": [255, 112, 0], "id": 99, "isthing": 0, "name": "buffet, counter, sideboard"},
{
"color": [143, 255, 0],
"id": 100,
"isthing": 0,
"name": "poster, posting, placard, notice, bill, card",
},
{"color": [82, 0, 255], "id": 101, "isthing": 0, "name": "stage"},
{"color": [163, 255, 0], "id": 102, "isthing": 1, "name": "van"},
{"color": [255, 235, 0], "id": 103, "isthing": 1, "name": "ship"},
{"color": [8, 184, 170], "id": 104, "isthing": 1, "name": "fountain"},
{
"color": [133, 0, 255],
"id": 105,
"isthing": 0,
"name": "conveyer belt, conveyor belt, conveyer, conveyor, transporter",
},
{"color": [0, 255, 92], "id": 106, "isthing": 0, "name": "canopy"},
{
"color": [184, 0, 255],
"id": 107,
"isthing": 1,
"name": "washer, automatic washer, washing machine",
},
{"color": [255, 0, 31], "id": 108, "isthing": 1, "name": "plaything, toy"},
{"color": [0, 184, 255], "id": 109, "isthing": 0, "name": "pool"},
{"color": [0, 214, 255], "id": 110, "isthing": 1, "name": "stool"},
{"color": [255, 0, 112], "id": 111, "isthing": 1, "name": "barrel, cask"},
{"color": [92, 255, 0], "id": 112, "isthing": 1, "name": "basket, handbasket"},
{"color": [0, 224, 255], "id": 113, "isthing": 0, "name": "falls"},
{"color": [112, 224, 255], "id": 114, "isthing": 0, "name": "tent"},
{"color": [70, 184, 160], "id": 115, "isthing": 1, "name": "bag"},
{"color": [163, 0, 255], "id": 116, "isthing": 1, "name": "minibike, motorbike"},
{"color": [153, 0, 255], "id": 117, "isthing": 0, "name": "cradle"},
{"color": [71, 255, 0], "id": 118, "isthing": 1, "name": "oven"},
{"color": [255, 0, 163], "id": 119, "isthing": 1, "name": "ball"},
{"color": [255, 204, 0], "id": 120, "isthing": 1, "name": "food, solid food"},
{"color": [255, 0, 143], "id": 121, "isthing": 1, "name": "step, stair"},
{"color": [0, 255, 235], "id": 122, "isthing": 0, "name": "tank, storage tank"},
{"color": [133, 255, 0], "id": 123, "isthing": 1, "name": "trade name"},
{"color": [255, 0, 235], "id": 124, "isthing": 1, "name": "microwave"},
{"color": [245, 0, 255], "id": 125, "isthing": 1, "name": "pot"},
{"color": [255, 0, 122], "id": 126, "isthing": 1, "name": "animal"},
{"color": [255, 245, 0], "id": 127, "isthing": 1, "name": "bicycle"},
{"color": [10, 190, 212], "id": 128, "isthing": 0, "name": "lake"},
{"color": [214, 255, 0], "id": 129, "isthing": 1, "name": "dishwasher"},
{"color": [0, 204, 255], "id": 130, "isthing": 1, "name": "screen"},
{"color": [20, 0, 255], "id": 131, "isthing": 0, "name": "blanket, cover"},
{"color": [255, 255, 0], "id": 132, "isthing": 1, "name": "sculpture"},
{"color": [0, 153, 255], "id": 133, "isthing": 1, "name": "hood, exhaust hood"},
{"color": [0, 41, 255], "id": 134, "isthing": 1, "name": "sconce"},
{"color": [0, 255, 204], "id": 135, "isthing": 1, "name": "vase"},
{"color": [41, 0, 255], "id": 136, "isthing": 1, "name": "traffic light"},
{"color": [41, 255, 0], "id": 137, "isthing": 1, "name": "tray"},
{"color": [173, 0, 255], "id": 138, "isthing": 1, "name": "trash can"},
{"color": [0, 245, 255], "id": 139, "isthing": 1, "name": "fan"},
{"color": [71, 0, 255], "id": 140, "isthing": 0, "name": "pier"},
{"color": [122, 0, 255], "id": 141, "isthing": 0, "name": "crt screen"},
{"color": [0, 255, 184], "id": 142, "isthing": 1, "name": "plate"},
{"color": [0, 92, 255], "id": 143, "isthing": 1, "name": "monitor"},
{"color": [184, 255, 0], "id": 144, "isthing": 1, "name": "bulletin board"},
{"color": [0, 133, 255], "id": 145, "isthing": 0, "name": "shower"},
{"color": [255, 214, 0], "id": 146, "isthing": 1, "name": "radiator"},
{"color": [25, 194, 194], "id": 147, "isthing": 1, "name": "glass, drinking glass"},
{"color": [102, 255, 0], "id": 148, "isthing": 1, "name": "clock"},
{"color": [92, 0, 255], "id": 149, "isthing": 1, "name": "flag"},
]
ADE20k_COLORS = [k["color"] for k in ADE20K_150_CATEGORIES]
MetadataCatalog.get("ade20k_sem_seg_train").set(
stuff_colors=ADE20k_COLORS[:],
)
MetadataCatalog.get("ade20k_sem_seg_val").set(
stuff_colors=ADE20k_COLORS[:],
)
def load_ade20k_panoptic_json(json_file, image_dir, gt_dir, semseg_dir, meta):
"""
Args:
image_dir (str): path to the raw dataset. e.g., "~/coco/train2017".
gt_dir (str): path to the raw annotations. e.g., "~/coco/panoptic_train2017".
json_file (str): path to the json file. e.g., "~/coco/annotations/panoptic_train2017.json".
Returns:
list[dict]: a list of dicts in Detectron2 standard format. (See
`Using Custom Datasets </tutorials/datasets.html>`_ )
"""
def _convert_category_id(segment_info, meta):
if segment_info["category_id"] in meta["thing_dataset_id_to_contiguous_id"]:
segment_info["category_id"] = meta["thing_dataset_id_to_contiguous_id"][
segment_info["category_id"]
]
segment_info["isthing"] = True
else:
segment_info["category_id"] = meta["stuff_dataset_id_to_contiguous_id"][
segment_info["category_id"]
]
segment_info["isthing"] = False
return segment_info
with PathManager.open(json_file) as f:
json_info = json.load(f)
ret = []
for ann in json_info["annotations"]:
image_id = ann["image_id"]
# TODO: currently we assume image and label has the same filename but
# different extension, and images have extension ".jpg" for COCO. Need
# to make image extension a user-provided argument if we extend this
# function to support other COCO-like datasets.
image_file = os.path.join(image_dir, os.path.splitext(ann["file_name"])[0] + ".jpg")
label_file = os.path.join(gt_dir, ann["file_name"])
sem_label_file = os.path.join(semseg_dir, ann["file_name"])
segments_info = [_convert_category_id(x, meta) for x in ann["segments_info"]]
ret.append(
{
"file_name": image_file,
"image_id": image_id,
"pan_seg_file_name": label_file,
"sem_seg_file_name": sem_label_file,
"segments_info": segments_info,
}
)
assert len(ret), f"No images found in {image_dir}!"
assert PathManager.isfile(ret[0]["file_name"]), ret[0]["file_name"]
assert PathManager.isfile(ret[0]["pan_seg_file_name"]), ret[0]["pan_seg_file_name"]
assert PathManager.isfile(ret[0]["sem_seg_file_name"]), ret[0]["sem_seg_file_name"]
return ret
def register_ade20k_panoptic(
name, metadata, image_root, panoptic_root, semantic_root, panoptic_json, instances_json=None
):
"""
Register a "standard" version of ADE20k panoptic segmentation dataset named `name`.
The dictionaries in this registered dataset follows detectron2's standard format.
Hence it's called "standard".
Args:
name (str): the name that identifies a dataset,
e.g. "ade20k_panoptic_train"
metadata (dict): extra metadata associated with this dataset.
image_root (str): directory which contains all the images
panoptic_root (str): directory which contains panoptic annotation images in COCO format
panoptic_json (str): path to the json panoptic annotation file in COCO format
sem_seg_root (none): not used, to be consistent with
`register_coco_panoptic_separated`.
instances_json (str): path to the json instance annotation file
"""
panoptic_name = name
DatasetCatalog.register(
panoptic_name,
lambda: load_ade20k_panoptic_json(
panoptic_json, image_root, panoptic_root, semantic_root, metadata
),
)
MetadataCatalog.get(panoptic_name).set(
panoptic_root=panoptic_root,
image_root=image_root,
panoptic_json=panoptic_json,
json_file=instances_json,
evaluator_type="ade20k_panoptic_seg",
ignore_label=255,
label_divisor=1000,
**metadata,
)
_PREDEFINED_SPLITS_ADE20K_PANOPTIC = {
"ade20k_panoptic_train": (
"ADEChallengeData2016/images/training",
"ADEChallengeData2016/ade20k_panoptic_train",
"ADEChallengeData2016/ade20k_panoptic_train.json",
"ADEChallengeData2016/annotations_detectron2/training",
"ADEChallengeData2016/ade20k_instance_train.json",
),
"ade20k_panoptic_val": (
"ADEChallengeData2016/images/validation",
"ADEChallengeData2016/ade20k_panoptic_val",
"ADEChallengeData2016/ade20k_panoptic_val.json",
"ADEChallengeData2016/annotations_detectron2/validation",
"ADEChallengeData2016/ade20k_instance_val.json",
),
}
def get_metadata():
meta = {}
# The following metadata maps contiguous id from [0, #thing categories +
# #stuff categories) to their names and colors. We have to replica of the
# same name and color under "thing_*" and "stuff_*" because the current
# visualization function in D2 handles thing and class classes differently
# due to some heuristic used in Panoptic FPN. We keep the same naming to
# enable reusing existing visualization functions.
thing_classes = [k["name"] for k in ADE20K_150_CATEGORIES if k["isthing"] == 1]
thing_colors = [k["color"] for k in ADE20K_150_CATEGORIES if k["isthing"] == 1]
stuff_classes = [k["name"] for k in ADE20K_150_CATEGORIES]
stuff_colors = [k["color"] for k in ADE20K_150_CATEGORIES]
meta["thing_classes"] = thing_classes
meta["thing_colors"] = thing_colors
meta["stuff_classes"] = stuff_classes
meta["stuff_colors"] = stuff_colors
# Convert category id for training:
# category id: like semantic segmentation, it is the class id for each
# pixel. Since there are some classes not used in evaluation, the category
# id is not always contiguous and thus we have two set of category ids:
# - original category id: category id in the original dataset, mainly
# used for evaluation.
# - contiguous category id: [0, #classes), in order to train the linear
# softmax classifier.
thing_dataset_id_to_contiguous_id = {}
stuff_dataset_id_to_contiguous_id = {}
for i, cat in enumerate(ADE20K_150_CATEGORIES):
if cat["isthing"]:
thing_dataset_id_to_contiguous_id[cat["id"]] = i
# else:
# stuff_dataset_id_to_contiguous_id[cat["id"]] = i
# in order to use sem_seg evaluator
stuff_dataset_id_to_contiguous_id[cat["id"]] = i
meta["thing_dataset_id_to_contiguous_id"] = thing_dataset_id_to_contiguous_id
meta["stuff_dataset_id_to_contiguous_id"] = stuff_dataset_id_to_contiguous_id
return meta
def register_all_ade20k_panoptic(root):
metadata = get_metadata()
for (
prefix,
(image_root, panoptic_root, panoptic_json, semantic_root, instance_json),
) in _PREDEFINED_SPLITS_ADE20K_PANOPTIC.items():
# The "standard" version of COCO panoptic segmentation dataset,
# e.g. used by Panoptic-DeepLab
register_ade20k_panoptic(
prefix,
metadata,
os.path.join(root, image_root),
os.path.join(root, panoptic_root),
os.path.join(root, semantic_root),
os.path.join(root, panoptic_json),
os.path.join(root, instance_json),
)
_root = os.getenv("DETECTRON2_DATASETS", "datasets")
register_all_ade20k_panoptic(_root)
| 3D-LLM-main | three_steps_3d_feature/first_step/mask2former/data/datasets/register_ade20k_panoptic.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import os
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data.datasets import load_sem_seg
COCO_CATEGORIES = [
{"color": [220, 20, 60], "isthing": 1, "id": 1, "name": "person"},
{"color": [119, 11, 32], "isthing": 1, "id": 2, "name": "bicycle"},
{"color": [0, 0, 142], "isthing": 1, "id": 3, "name": "car"},
{"color": [0, 0, 230], "isthing": 1, "id": 4, "name": "motorcycle"},
{"color": [106, 0, 228], "isthing": 1, "id": 5, "name": "airplane"},
{"color": [0, 60, 100], "isthing": 1, "id": 6, "name": "bus"},
{"color": [0, 80, 100], "isthing": 1, "id": 7, "name": "train"},
{"color": [0, 0, 70], "isthing": 1, "id": 8, "name": "truck"},
{"color": [0, 0, 192], "isthing": 1, "id": 9, "name": "boat"},
{"color": [250, 170, 30], "isthing": 1, "id": 10, "name": "traffic light"},
{"color": [100, 170, 30], "isthing": 1, "id": 11, "name": "fire hydrant"},
{"color": [220, 220, 0], "isthing": 1, "id": 13, "name": "stop sign"},
{"color": [175, 116, 175], "isthing": 1, "id": 14, "name": "parking meter"},
{"color": [250, 0, 30], "isthing": 1, "id": 15, "name": "bench"},
{"color": [165, 42, 42], "isthing": 1, "id": 16, "name": "bird"},
{"color": [255, 77, 255], "isthing": 1, "id": 17, "name": "cat"},
{"color": [0, 226, 252], "isthing": 1, "id": 18, "name": "dog"},
{"color": [182, 182, 255], "isthing": 1, "id": 19, "name": "horse"},
{"color": [0, 82, 0], "isthing": 1, "id": 20, "name": "sheep"},
{"color": [120, 166, 157], "isthing": 1, "id": 21, "name": "cow"},
{"color": [110, 76, 0], "isthing": 1, "id": 22, "name": "elephant"},
{"color": [174, 57, 255], "isthing": 1, "id": 23, "name": "bear"},
{"color": [199, 100, 0], "isthing": 1, "id": 24, "name": "zebra"},
{"color": [72, 0, 118], "isthing": 1, "id": 25, "name": "giraffe"},
{"color": [255, 179, 240], "isthing": 1, "id": 27, "name": "backpack"},
{"color": [0, 125, 92], "isthing": 1, "id": 28, "name": "umbrella"},
{"color": [209, 0, 151], "isthing": 1, "id": 31, "name": "handbag"},
{"color": [188, 208, 182], "isthing": 1, "id": 32, "name": "tie"},
{"color": [0, 220, 176], "isthing": 1, "id": 33, "name": "suitcase"},
{"color": [255, 99, 164], "isthing": 1, "id": 34, "name": "frisbee"},
{"color": [92, 0, 73], "isthing": 1, "id": 35, "name": "skis"},
{"color": [133, 129, 255], "isthing": 1, "id": 36, "name": "snowboard"},
{"color": [78, 180, 255], "isthing": 1, "id": 37, "name": "sports ball"},
{"color": [0, 228, 0], "isthing": 1, "id": 38, "name": "kite"},
{"color": [174, 255, 243], "isthing": 1, "id": 39, "name": "baseball bat"},
{"color": [45, 89, 255], "isthing": 1, "id": 40, "name": "baseball glove"},
{"color": [134, 134, 103], "isthing": 1, "id": 41, "name": "skateboard"},
{"color": [145, 148, 174], "isthing": 1, "id": 42, "name": "surfboard"},
{"color": [255, 208, 186], "isthing": 1, "id": 43, "name": "tennis racket"},
{"color": [197, 226, 255], "isthing": 1, "id": 44, "name": "bottle"},
{"color": [171, 134, 1], "isthing": 1, "id": 46, "name": "wine glass"},
{"color": [109, 63, 54], "isthing": 1, "id": 47, "name": "cup"},
{"color": [207, 138, 255], "isthing": 1, "id": 48, "name": "fork"},
{"color": [151, 0, 95], "isthing": 1, "id": 49, "name": "knife"},
{"color": [9, 80, 61], "isthing": 1, "id": 50, "name": "spoon"},
{"color": [84, 105, 51], "isthing": 1, "id": 51, "name": "bowl"},
{"color": [74, 65, 105], "isthing": 1, "id": 52, "name": "banana"},
{"color": [166, 196, 102], "isthing": 1, "id": 53, "name": "apple"},
{"color": [208, 195, 210], "isthing": 1, "id": 54, "name": "sandwich"},
{"color": [255, 109, 65], "isthing": 1, "id": 55, "name": "orange"},
{"color": [0, 143, 149], "isthing": 1, "id": 56, "name": "broccoli"},
{"color": [179, 0, 194], "isthing": 1, "id": 57, "name": "carrot"},
{"color": [209, 99, 106], "isthing": 1, "id": 58, "name": "hot dog"},
{"color": [5, 121, 0], "isthing": 1, "id": 59, "name": "pizza"},
{"color": [227, 255, 205], "isthing": 1, "id": 60, "name": "donut"},
{"color": [147, 186, 208], "isthing": 1, "id": 61, "name": "cake"},
{"color": [153, 69, 1], "isthing": 1, "id": 62, "name": "chair"},
{"color": [3, 95, 161], "isthing": 1, "id": 63, "name": "couch"},
{"color": [163, 255, 0], "isthing": 1, "id": 64, "name": "potted plant"},
{"color": [119, 0, 170], "isthing": 1, "id": 65, "name": "bed"},
{"color": [0, 182, 199], "isthing": 1, "id": 67, "name": "dining table"},
{"color": [0, 165, 120], "isthing": 1, "id": 70, "name": "toilet"},
{"color": [183, 130, 88], "isthing": 1, "id": 72, "name": "tv"},
{"color": [95, 32, 0], "isthing": 1, "id": 73, "name": "laptop"},
{"color": [130, 114, 135], "isthing": 1, "id": 74, "name": "mouse"},
{"color": [110, 129, 133], "isthing": 1, "id": 75, "name": "remote"},
{"color": [166, 74, 118], "isthing": 1, "id": 76, "name": "keyboard"},
{"color": [219, 142, 185], "isthing": 1, "id": 77, "name": "cell phone"},
{"color": [79, 210, 114], "isthing": 1, "id": 78, "name": "microwave"},
{"color": [178, 90, 62], "isthing": 1, "id": 79, "name": "oven"},
{"color": [65, 70, 15], "isthing": 1, "id": 80, "name": "toaster"},
{"color": [127, 167, 115], "isthing": 1, "id": 81, "name": "sink"},
{"color": [59, 105, 106], "isthing": 1, "id": 82, "name": "refrigerator"},
{"color": [142, 108, 45], "isthing": 1, "id": 84, "name": "book"},
{"color": [196, 172, 0], "isthing": 1, "id": 85, "name": "clock"},
{"color": [95, 54, 80], "isthing": 1, "id": 86, "name": "vase"},
{"color": [128, 76, 255], "isthing": 1, "id": 87, "name": "scissors"},
{"color": [201, 57, 1], "isthing": 1, "id": 88, "name": "teddy bear"},
{"color": [246, 0, 122], "isthing": 1, "id": 89, "name": "hair drier"},
{"color": [191, 162, 208], "isthing": 1, "id": 90, "name": "toothbrush"},
{"id": 92, "name": "banner", "supercategory": "textile"},
{"id": 93, "name": "blanket", "supercategory": "textile"},
{"id": 94, "name": "branch", "supercategory": "plant"},
{"id": 95, "name": "bridge", "supercategory": "building"},
{"id": 96, "name": "building-other", "supercategory": "building"},
{"id": 97, "name": "bush", "supercategory": "plant"},
{"id": 98, "name": "cabinet", "supercategory": "furniture-stuff"},
{"id": 99, "name": "cage", "supercategory": "structural"},
{"id": 100, "name": "cardboard", "supercategory": "raw-material"},
{"id": 101, "name": "carpet", "supercategory": "floor"},
{"id": 102, "name": "ceiling-other", "supercategory": "ceiling"},
{"id": 103, "name": "ceiling-tile", "supercategory": "ceiling"},
{"id": 104, "name": "cloth", "supercategory": "textile"},
{"id": 105, "name": "clothes", "supercategory": "textile"},
{"id": 106, "name": "clouds", "supercategory": "sky"},
{"id": 107, "name": "counter", "supercategory": "furniture-stuff"},
{"id": 108, "name": "cupboard", "supercategory": "furniture-stuff"},
{"id": 109, "name": "curtain", "supercategory": "textile"},
{"id": 110, "name": "desk-stuff", "supercategory": "furniture-stuff"},
{"id": 111, "name": "dirt", "supercategory": "ground"},
{"id": 112, "name": "door-stuff", "supercategory": "furniture-stuff"},
{"id": 113, "name": "fence", "supercategory": "structural"},
{"id": 114, "name": "floor-marble", "supercategory": "floor"},
{"id": 115, "name": "floor-other", "supercategory": "floor"},
{"id": 116, "name": "floor-stone", "supercategory": "floor"},
{"id": 117, "name": "floor-tile", "supercategory": "floor"},
{"id": 118, "name": "floor-wood", "supercategory": "floor"},
{"id": 119, "name": "flower", "supercategory": "plant"},
{"id": 120, "name": "fog", "supercategory": "water"},
{"id": 121, "name": "food-other", "supercategory": "food-stuff"},
{"id": 122, "name": "fruit", "supercategory": "food-stuff"},
{"id": 123, "name": "furniture-other", "supercategory": "furniture-stuff"},
{"id": 124, "name": "grass", "supercategory": "plant"},
{"id": 125, "name": "gravel", "supercategory": "ground"},
{"id": 126, "name": "ground-other", "supercategory": "ground"},
{"id": 127, "name": "hill", "supercategory": "solid"},
{"id": 128, "name": "house", "supercategory": "building"},
{"id": 129, "name": "leaves", "supercategory": "plant"},
{"id": 130, "name": "light", "supercategory": "furniture-stuff"},
{"id": 131, "name": "mat", "supercategory": "textile"},
{"id": 132, "name": "metal", "supercategory": "raw-material"},
{"id": 133, "name": "mirror-stuff", "supercategory": "furniture-stuff"},
{"id": 134, "name": "moss", "supercategory": "plant"},
{"id": 135, "name": "mountain", "supercategory": "solid"},
{"id": 136, "name": "mud", "supercategory": "ground"},
{"id": 137, "name": "napkin", "supercategory": "textile"},
{"id": 138, "name": "net", "supercategory": "structural"},
{"id": 139, "name": "paper", "supercategory": "raw-material"},
{"id": 140, "name": "pavement", "supercategory": "ground"},
{"id": 141, "name": "pillow", "supercategory": "textile"},
{"id": 142, "name": "plant-other", "supercategory": "plant"},
{"id": 143, "name": "plastic", "supercategory": "raw-material"},
{"id": 144, "name": "platform", "supercategory": "ground"},
{"id": 145, "name": "playingfield", "supercategory": "ground"},
{"id": 146, "name": "railing", "supercategory": "structural"},
{"id": 147, "name": "railroad", "supercategory": "ground"},
{"id": 148, "name": "river", "supercategory": "water"},
{"id": 149, "name": "road", "supercategory": "ground"},
{"id": 150, "name": "rock", "supercategory": "solid"},
{"id": 151, "name": "roof", "supercategory": "building"},
{"id": 152, "name": "rug", "supercategory": "textile"},
{"id": 153, "name": "salad", "supercategory": "food-stuff"},
{"id": 154, "name": "sand", "supercategory": "ground"},
{"id": 155, "name": "sea", "supercategory": "water"},
{"id": 156, "name": "shelf", "supercategory": "furniture-stuff"},
{"id": 157, "name": "sky-other", "supercategory": "sky"},
{"id": 158, "name": "skyscraper", "supercategory": "building"},
{"id": 159, "name": "snow", "supercategory": "ground"},
{"id": 160, "name": "solid-other", "supercategory": "solid"},
{"id": 161, "name": "stairs", "supercategory": "furniture-stuff"},
{"id": 162, "name": "stone", "supercategory": "solid"},
{"id": 163, "name": "straw", "supercategory": "plant"},
{"id": 164, "name": "structural-other", "supercategory": "structural"},
{"id": 165, "name": "table", "supercategory": "furniture-stuff"},
{"id": 166, "name": "tent", "supercategory": "building"},
{"id": 167, "name": "textile-other", "supercategory": "textile"},
{"id": 168, "name": "towel", "supercategory": "textile"},
{"id": 169, "name": "tree", "supercategory": "plant"},
{"id": 170, "name": "vegetable", "supercategory": "food-stuff"},
{"id": 171, "name": "wall-brick", "supercategory": "wall"},
{"id": 172, "name": "wall-concrete", "supercategory": "wall"},
{"id": 173, "name": "wall-other", "supercategory": "wall"},
{"id": 174, "name": "wall-panel", "supercategory": "wall"},
{"id": 175, "name": "wall-stone", "supercategory": "wall"},
{"id": 176, "name": "wall-tile", "supercategory": "wall"},
{"id": 177, "name": "wall-wood", "supercategory": "wall"},
{"id": 178, "name": "water-other", "supercategory": "water"},
{"id": 179, "name": "waterdrops", "supercategory": "water"},
{"id": 180, "name": "window-blind", "supercategory": "window"},
{"id": 181, "name": "window-other", "supercategory": "window"},
{"id": 182, "name": "wood", "supercategory": "solid"},
]
def _get_coco_stuff_meta():
# Id 0 is reserved for ignore_label, we change ignore_label for 0
# to 255 in our pre-processing.
stuff_ids = [k["id"] for k in COCO_CATEGORIES]
assert len(stuff_ids) == 171, len(stuff_ids)
# For semantic segmentation, this mapping maps from contiguous stuff id
# (in [0, 91], used in models) to ids in the dataset (used for processing results)
stuff_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(stuff_ids)}
stuff_classes = [k["name"] for k in COCO_CATEGORIES]
ret = {
"stuff_dataset_id_to_contiguous_id": stuff_dataset_id_to_contiguous_id,
"stuff_classes": stuff_classes,
}
return ret
def register_all_coco_stuff_10k(root):
root = os.path.join(root, "coco", "coco_stuff_10k")
meta = _get_coco_stuff_meta()
for name, image_dirname, sem_seg_dirname in [
("train", "images_detectron2/train", "annotations_detectron2/train"),
("test", "images_detectron2/test", "annotations_detectron2/test"),
]:
image_dir = os.path.join(root, image_dirname)
gt_dir = os.path.join(root, sem_seg_dirname)
name = f"coco_2017_{name}_stuff_10k_sem_seg"
DatasetCatalog.register(
name, lambda x=image_dir, y=gt_dir: load_sem_seg(y, x, gt_ext="png", image_ext="jpg")
)
MetadataCatalog.get(name).set(
image_root=image_dir,
sem_seg_root=gt_dir,
evaluator_type="sem_seg",
ignore_label=255,
**meta,
)
_root = os.getenv("DETECTRON2_DATASETS", "datasets")
register_all_coco_stuff_10k(_root)
| 3D-LLM-main | three_steps_3d_feature/first_step/mask2former/data/datasets/register_coco_stuff_10k.py |
import torch
import torchvision
import cv2
import numpy as np
from tqdm import tqdm
import os
from torch import nn
import argparse
import clip
import open_clip
LOAD_IMG_HEIGHT = 512
LOAD_IMG_WIDTH = 512
from PIL import Image
def get_bbox_around_mask(mask):
bbox = None
nonzero_inds = torch.nonzero(mask) # (num_nonzero, 2)
if nonzero_inds.numel() == 0:
topleft = [0, 0]
botright = [mask.shape[0], mask.shape[1]]
bbox = (topleft[0], topleft[1], botright[0], botright[1]) # (x0, y0, x1, y1)
else:
topleft = nonzero_inds.min(0)[0] # (2,)
botright = nonzero_inds.max(0)[0] # (2,)
bbox = (topleft[0].item(), topleft[1].item(), botright[0].item(), botright[1].item()) # (x0, y0, x1, y1)
# x0, y0, x1, y1
return bbox, nonzero_inds
if __name__ == "__main__":
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Device:", device)
OPENCLIP_MODEL = "ViT-L-14" # "ViT-bigG-14"
OPENCLIP_DATA = "laion2b_s32b_b82k" # "laion2b_s39b_b160k"
print("Initializing model...")
model, _, preprocess = open_clip.create_model_and_transforms(OPENCLIP_MODEL, OPENCLIP_DATA)
model.visual.output_tokens = True
model.cuda()
model.eval()
tokenizer = open_clip.get_tokenizer(OPENCLIP_MODEL)
parser = argparse.ArgumentParser(description="Specify dirs")
parser.add_argument("--scene_dir_path", default="./masked_rdp_data/", type=str)
parser.add_argument("--mask_dir_path", default="./maskformer_masks/", type=str)
parser.add_argument("--save_dir_path", default="./nps_maskformer_clip/", type=str)
args = parser.parse_args()
scene_lists = sorted(os.listdir(args.scene_dir_path))
for scene in tqdm(scene_lists):
os.makedirs(os.path.join(args.save_dir_path, scene), exist_ok=True)
for file in os.listdir(os.path.join(args.mask_dir_path, scene)):
try:
INPUT_IMAGE_PATH = os.path.join(args.scene_dir_path, scene, file.replace(".pt", ".png"))
SEMIGLOBAL_FEAT_SAVE_FILE = os.path.join(args.save_dir_path, scene, file)
if os.path.isfile(SEMIGLOBAL_FEAT_SAVE_FILE):
continue
raw_image = cv2.imread(INPUT_IMAGE_PATH)
raw_image = cv2.resize(raw_image, (512, 512))
image = torch.tensor(raw_image).to(device)
"""
Extract and save global feat vec
"""
global_feat = None
with torch.cuda.amp.autocast():
_img = preprocess(Image.open(INPUT_IMAGE_PATH)).unsqueeze(0) # [1, 3, 224, 224]
imgfeat = model.visual(_img.cuda())[1] # All image token feat [1, 256, 1024]
imgfeat = torch.mean(imgfeat, dim=1)
global_feat = imgfeat.half().cuda()
global_feat = torch.nn.functional.normalize(global_feat, dim=-1) # --> (1, 1024)
FEAT_DIM = global_feat.shape[-1]
cosine_similarity = torch.nn.CosineSimilarity(dim=-1)
MASK_LOAD_FILE = os.path.join(args.mask_dir_path, scene, file)
outfeat = torch.zeros(LOAD_IMG_HEIGHT, LOAD_IMG_WIDTH, FEAT_DIM, dtype=torch.half)
mask = torch.load(MASK_LOAD_FILE).unsqueeze(0) # 1, num_masks, H, W
num_masks = mask.shape[-3]
pallete = get_new_pallete(num_masks)
rois = []
roi_similarities_with_global_vec = []
roi_sim_per_unit_area = []
feat_per_roi = []
roi_nonzero_inds = []
for _i in range(num_masks):
curmask = mask[0, _i]
bbox, nonzero_inds = get_bbox_around_mask(curmask)
x0, y0, x1, y1 = bbox
bbox_area = (x1 - x0 + 1) * (y1 - y0 + 1)
img_area = LOAD_IMG_WIDTH * LOAD_IMG_HEIGHT
iou = bbox_area / img_area
if iou < 0.005:
continue
with torch.no_grad():
img_roi = image[x0:x1, y0:y1]
img_roi = Image.fromarray(img_roi.detach().cpu().numpy())
img_roi = preprocess(img_roi).unsqueeze(0).cuda()
roifeat = model.visual(img_roi)[1] # All image token feat [1, 256, 1024]
roifeat = torch.mean(roifeat, dim=1)
feat_per_roi.append(roifeat)
roi_nonzero_inds.append(nonzero_inds)
_sim = cosine_similarity(global_feat, roifeat)
rois.append(torch.tensor(list(bbox)))
roi_similarities_with_global_vec.append(_sim)
roi_sim_per_unit_area.append(_sim)
rois = torch.stack(rois)
scores = torch.cat(roi_sim_per_unit_area).to(rois.device)
retained = torchvision.ops.nms(rois.float().cpu(), scores.float().cpu(), iou_threshold=1.0)
feat_per_roi = torch.cat(feat_per_roi, dim=0) # N, 1024
retained_rois = rois[retained]
retained_scores = scores[retained]
retained_feat = feat_per_roi[retained]
retained_nonzero_inds = []
for _roiidx in range(retained.shape[0]):
retained_nonzero_inds.append(roi_nonzero_inds[retained[_roiidx].item()])
mask_sim_mat = torch.nn.functional.cosine_similarity(
retained_feat[:, :, None], retained_feat.t()[None, :, :]
)
mask_sim_mat.fill_diagonal_(0.0)
mask_sim_mat = mask_sim_mat.mean(1) # avg sim of each mask with each other mask
softmax_scores = retained_scores.cuda() - mask_sim_mat
softmax_scores = torch.nn.functional.softmax(softmax_scores, dim=0)
for _roiidx in range(retained.shape[0]):
_weighted_feat = (
softmax_scores[_roiidx] * global_feat + (1 - softmax_scores[_roiidx]) * retained_feat[_roiidx]
)
_weighted_feat = torch.nn.functional.normalize(_weighted_feat, dim=-1)
outfeat[retained_nonzero_inds[_roiidx][:, 0], retained_nonzero_inds[_roiidx][:, 1]] += (
_weighted_feat[0].detach().cpu().half()
)
outfeat[
retained_nonzero_inds[_roiidx][:, 0], retained_nonzero_inds[_roiidx][:, 1]
] = torch.nn.functional.normalize(
outfeat[retained_nonzero_inds[_roiidx][:, 0], retained_nonzero_inds[_roiidx][:, 1]].float(),
dim=-1,
).half()
outfeat = outfeat.unsqueeze(0).float() # interpolate is not implemented for float yet in pytorch
outfeat = outfeat.permute(0, 3, 1, 2) # 1, H, W, feat_dim -> 1, feat_dim, H, W
outfeat = torch.nn.functional.interpolate(outfeat, [512, 512], mode="nearest")
outfeat = outfeat.permute(0, 2, 3, 1) # 1, feat_dim, H, W --> 1, H, W, feat_dim
outfeat = torch.nn.functional.normalize(outfeat, dim=-1)
outfeat = outfeat[0].half() # --> H, W, feat_dim
torch.save(outfeat, SEMIGLOBAL_FEAT_SAVE_FILE)
except:
print(SEMIGLOBAL_FEAT_SAVE_FILE, "fail")
| 3D-LLM-main | three_steps_3d_feature/second_step/clip_maskformer.py |
import torch
import torchvision
import cv2
import numpy as np
from tqdm import tqdm
import os
from torch import nn
from lavis.models.eva_vit import create_eva_vit_g
import argparse
LOAD_IMG_HEIGHT = 512
LOAD_IMG_WIDTH = 512
def get_bbox_around_mask(mask):
# mask: (img_height, img_width)
# compute bbox around mask
bbox = None
nonzero_inds = torch.nonzero(mask) # (num_nonzero, 2)
if nonzero_inds.numel() == 0:
topleft = [0, 0]
botright = [mask.shape[0], mask.shape[1]]
bbox = (topleft[0], topleft[1], botright[0], botright[1]) # (x0, y0, x1, y1)
else:
topleft = nonzero_inds.min(0)[0] # (2,)
botright = nonzero_inds.max(0)[0] # (2,)
bbox = (topleft[0].item(), topleft[1].item(), botright[0].item(), botright[1].item()) # (x0, y0, x1, y1)
# x0, y0, x1, y1
return bbox, nonzero_inds
if __name__ == "__main__":
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Device:", device)
parser = argparse.ArgumentParser(description="Specify dirs")
parser.add_argument("--scene_dir_path", default="./masked_rdp_data/", type=str)
parser.add_argument("--mask_dir_path", default="./sam_masks/", type=str)
parser.add_argument("--save_dir_path", default="./nps_sam_blip/", type=str)
args = parser.parse_args()
for scene in tqdm(sorted(os.listdir(args.scene_dir_path))):
try:
os.makedirs(os.path.join(args.save_dir_path, scene), exist_ok=True)
for file in os.listdir(os.path.join(args.mask_dir_path, scene)):
INPUT_IMAGE_PATH = os.path.join(args.scene_dir_path, scene, file.replace(".pt", ".png"))
SEMIGLOBAL_FEAT_SAVE_FILE = os.path.join(args.save_dir_path, scene, file)
if os.path.isfile(SEMIGLOBAL_FEAT_SAVE_FILE):
continue
raw_image = cv2.imread(INPUT_IMAGE_PATH)
raw_image = cv2.resize(raw_image, (512, 512))
image = torch.tensor(raw_image[:512, :512]).permute(2, 0, 1).unsqueeze(0).to(device)
visual_encoder = create_eva_vit_g(512).to(device)
output = visual_encoder(image)
global_feat = torch.tensor(output)
global_feat = global_feat.half().cuda()
global_feat = global_feat[:, :-1, :].resize(1, 36, 36, 1408).permute((0, 3, 1, 2))
m = nn.AdaptiveAvgPool2d((1, 1))
global_feat = m(global_feat)
global_feat = global_feat.squeeze(-1).squeeze(-1)
global_feat = torch.nn.functional.normalize(global_feat, dim=-1)
FEAT_DIM = global_feat.shape[-1]
cosine_similarity = torch.nn.CosineSimilarity(dim=-1)
MASK_LOAD_FILE = os.path.join(args.mask_dir_path, scene, file)
outfeat = torch.zeros(512, 512, FEAT_DIM, dtype=torch.half)
print(f"Loading instance masks {MASK_LOAD_FILE}...")
mask = torch.load(MASK_LOAD_FILE).unsqueeze(0) # 1, num_masks, H, W
mask = mask[:, :, :512, :512]
num_masks = mask.shape[-3]
rois = []
roi_similarities_with_global_vec = []
roi_sim_per_unit_area = []
feat_per_roi = []
roi_nonzero_inds = []
for _i in range(num_masks):
curmask = mask[0, _i].long()
bbox, nonzero_inds = get_bbox_around_mask(curmask)
x0, y0, x1, y1 = bbox
bbox_area = (x1 - x0 + 1) * (y1 - y0 + 1)
img_area = LOAD_IMG_WIDTH * LOAD_IMG_HEIGHT
iou = bbox_area / img_area
if iou < 0.005:
continue
roi = torch.ones((512, 512, 3))
img_roi = torch.tensor(raw_image[:512, :512])[x0:x1, y0:y1]
roi[x0:x1, y0:y1] = img_roi
img_roi = roi.permute(2, 0, 1).unsqueeze(0).to(device)
roifeat = visual_encoder(img_roi)
roifeat = torch.tensor(roifeat)
roifeat = roifeat.half().cuda()
roifeat = roifeat[:, :-1, :].resize(1, 36, 36, 1408).permute((0, 3, 1, 2))
m = nn.AdaptiveAvgPool2d((1, 1))
roifeat = m(roifeat)
roifeat = roifeat.squeeze(-1).squeeze(-1)
roifeat = torch.nn.functional.normalize(roifeat, dim=-1)
feat_per_roi.append(roifeat)
roi_nonzero_inds.append(nonzero_inds)
_sim = cosine_similarity(global_feat, roifeat)
rois.append(torch.tensor(list(bbox)))
roi_similarities_with_global_vec.append(_sim)
roi_sim_per_unit_area.append(_sim)
rois = torch.stack(rois)
scores = torch.cat(roi_sim_per_unit_area).to(rois.device)
retained = torchvision.ops.nms(rois.float().cpu(), scores.float().cpu(), iou_threshold=1.0)
feat_per_roi = torch.cat(feat_per_roi, dim=0)
print(f"retained {len(retained)} masks of {rois.shape[0]} total")
retained_rois = rois[retained]
retained_scores = scores[retained]
retained_feat = feat_per_roi[retained]
retained_nonzero_inds = []
for _roiidx in range(retained.shape[0]):
retained_nonzero_inds.append(roi_nonzero_inds[retained[_roiidx].item()])
mask_sim_mat = torch.nn.functional.cosine_similarity(
retained_feat[:, :, None], retained_feat.t()[None, :, :]
)
mask_sim_mat.fill_diagonal_(0.0)
mask_sim_mat = mask_sim_mat.mean(1) # avg sim of each mask with each other mask
softmax_scores = retained_scores.cuda() - mask_sim_mat
softmax_scores = torch.nn.functional.softmax(softmax_scores, dim=0)
for _roiidx in range(retained.shape[0]):
_weighted_feat = (
softmax_scores[_roiidx] * global_feat + (1 - softmax_scores[_roiidx]) * retained_feat[_roiidx]
)
_weighted_feat = torch.nn.functional.normalize(_weighted_feat, dim=-1)
outfeat[retained_nonzero_inds[_roiidx][:, 0], retained_nonzero_inds[_roiidx][:, 1]] += (
_weighted_feat[0].detach().cpu().half()
)
outfeat[
retained_nonzero_inds[_roiidx][:, 0], retained_nonzero_inds[_roiidx][:, 1]
] = torch.nn.functional.normalize(
outfeat[retained_nonzero_inds[_roiidx][:, 0], retained_nonzero_inds[_roiidx][:, 1]].float(),
dim=-1,
).half()
outfeat = outfeat.unsqueeze(0).float() # interpolate is not implemented for float yet in pytorch
outfeat = outfeat.permute(0, 3, 1, 2) # 1, H, W, feat_dim -> 1, feat_dim, H, W
outfeat = torch.nn.functional.interpolate(outfeat, [512, 512], mode="nearest")
outfeat = outfeat.permute(0, 2, 3, 1) # 1, feat_dim, H, W --> 1, H, W, feat_dim
outfeat = torch.nn.functional.normalize(outfeat, dim=-1)
outfeat = outfeat[0].half() # --> H, W, feat_dim
print(outfeat.shape)
torch.save(outfeat, SEMIGLOBAL_FEAT_SAVE_FILE)
except:
pass
| 3D-LLM-main | three_steps_3d_feature/second_step/blip_sam.py |
import torch
import torchvision
import cv2
import numpy as np
from tqdm import tqdm
import os
from torch import nn
from lavis.models.eva_vit import create_eva_vit_g
import argparse
LOAD_IMG_HEIGHT = 512
LOAD_IMG_WIDTH = 512
def get_bbox_around_mask(mask):
bbox = None
nonzero_inds = torch.nonzero(mask) # (num_nonzero, 2)
if nonzero_inds.numel() == 0:
topleft = [0, 0]
botright = [mask.shape[0], mask.shape[1]]
bbox = (topleft[0], topleft[1], botright[0], botright[1]) # (x0, y0, x1, y1)
else:
topleft = nonzero_inds.min(0)[0] # (2,)
botright = nonzero_inds.max(0)[0] # (2,)
bbox = (topleft[0].item(), topleft[1].item(), botright[0].item(), botright[1].item()) # (x0, y0, x1, y1)
# x0, y0, x1, y1
return bbox, nonzero_inds
if __name__ == "__main__":
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Device:", device)
parser = argparse.ArgumentParser(description="Specify dirs")
parser.add_argument("--scene_dir_path", default="./masked_rdp_data/", type=str)
parser.add_argument("--mask_dir_path", default="./maskformer_masks/", type=str)
parser.add_argument("--save_dir_path", default="./nps_maskformer_blip/", type=str)
args = parser.parse_args()
for scene in tqdm(os.listdir(args.scene_dir_path)):
try:
os.makedirs(os.path.join(args.save_dir_path, scene), exist_ok=True)
for file in os.listdir(os.path.join(args.mask_dir_path, scene)):
INPUT_IMAGE_PATH = os.path.join(args.scene_dir_path, scene, file.replace(".pt", ".png"))
SEMIGLOBAL_FEAT_SAVE_FILE = os.path.join(args.save_dir_path, scene, file)
if os.path.isfile(SEMIGLOBAL_FEAT_SAVE_FILE):
continue
raw_image = cv2.imread(INPUT_IMAGE_PATH)
raw_image = cv2.resize(raw_image, (512, 512))
image = torch.tensor(raw_image[:512, :512]).permute(2, 0, 1).unsqueeze(0).to(device)
visual_encoder = create_eva_vit_g(512).to(device)
output = visual_encoder(image)
global_feat = torch.tensor(output)
global_feat = global_feat.half().cuda()
global_feat = global_feat[:, :-1, :].resize(1, 36, 36, 1408).permute((0, 3, 1, 2))
m = nn.AdaptiveAvgPool2d((1, 1))
global_feat = m(global_feat)
global_feat = global_feat.squeeze(-1).squeeze(-1)
global_feat = torch.nn.functional.normalize(global_feat, dim=-1) # --> (1, 1024)
FEAT_DIM = global_feat.shape[-1]
cosine_similarity = torch.nn.CosineSimilarity(dim=-1)
MASK_LOAD_FILE = os.path.join(args.mask_dir_path, scene, file)
outfeat = torch.zeros(512, 512, FEAT_DIM, dtype=torch.half)
print(f"Loading instance masks {MASK_LOAD_FILE}...")
mask = torch.load(MASK_LOAD_FILE).unsqueeze(0) # 1, num_masks, H, W
mask = mask[:, :, :512, :512]
num_masks = mask.shape[-3]
pallete = get_new_pallete(num_masks)
rois = []
roi_similarities_with_global_vec = []
roi_sim_per_unit_area = []
feat_per_roi = []
roi_nonzero_inds = []
for _i in range(num_masks):
curmask = mask[0, _i]
bbox, nonzero_inds = get_bbox_around_mask(curmask)
x0, y0, x1, y1 = bbox
bbox_area = (x1 - x0 + 1) * (y1 - y0 + 1)
img_area = LOAD_IMG_WIDTH * LOAD_IMG_HEIGHT
iou = bbox_area / img_area
if iou < 0.005:
continue
roi = torch.ones((512, 512, 3))
img_roi = torch.tensor(raw_image[:512, :512])[x0:x1, y0:y1]
roi[x0:x1, y0:y1] = img_roi
img_roi = roi.permute(2, 0, 1).unsqueeze(0).to(device)
roifeat = visual_encoder(img_roi)
roifeat = torch.tensor(roifeat)
roifeat = roifeat.half().cuda()
roifeat = roifeat[:, :-1, :].resize(1, 36, 36, 1408).permute((0, 3, 1, 2))
m = nn.AdaptiveAvgPool2d((1, 1))
roifeat = m(roifeat)
roifeat = roifeat.squeeze(-1).squeeze(-1)
roifeat = torch.nn.functional.normalize(roifeat, dim=-1)
feat_per_roi.append(roifeat)
roi_nonzero_inds.append(nonzero_inds)
_sim = cosine_similarity(global_feat, roifeat)
rois.append(torch.tensor(list(bbox)))
roi_similarities_with_global_vec.append(_sim)
roi_sim_per_unit_area.append(_sim) # / iou)
rois = torch.stack(rois)
scores = torch.cat(roi_sim_per_unit_area).to(rois.device)
retained = torchvision.ops.nms(rois.float().cpu(), scores.float().cpu(), iou_threshold=1.0)
feat_per_roi = torch.cat(feat_per_roi, dim=0) # N, 1024
print(f"retained {len(retained)} masks of {rois.shape[0]} total")
retained_rois = rois[retained]
retained_scores = scores[retained]
retained_feat = feat_per_roi[retained]
retained_nonzero_inds = []
for _roiidx in range(retained.shape[0]):
retained_nonzero_inds.append(roi_nonzero_inds[retained[_roiidx].item()])
mask_sim_mat = torch.nn.functional.cosine_similarity(
retained_feat[:, :, None], retained_feat.t()[None, :, :]
)
mask_sim_mat.fill_diagonal_(0.0)
mask_sim_mat = mask_sim_mat.mean(1) # avg sim of each mask with each other mask
softmax_scores = retained_scores.cuda() - mask_sim_mat
softmax_scores = torch.nn.functional.softmax(softmax_scores, dim=0)
for _roiidx in range(retained.shape[0]):
_weighted_feat = (
softmax_scores[_roiidx] * global_feat + (1 - softmax_scores[_roiidx]) * retained_feat[_roiidx]
)
_weighted_feat = torch.nn.functional.normalize(_weighted_feat, dim=-1)
outfeat[retained_nonzero_inds[_roiidx][:, 0], retained_nonzero_inds[_roiidx][:, 1]] += (
_weighted_feat[0].detach().cpu().half()
)
outfeat[
retained_nonzero_inds[_roiidx][:, 0], retained_nonzero_inds[_roiidx][:, 1]
] = torch.nn.functional.normalize(
outfeat[retained_nonzero_inds[_roiidx][:, 0], retained_nonzero_inds[_roiidx][:, 1]].float(),
dim=-1,
).half()
outfeat = outfeat.unsqueeze(0).float() # interpolate is not implemented for float yet in pytorch
outfeat = outfeat.permute(0, 3, 1, 2) # 1, H, W, feat_dim -> 1, feat_dim, H, W
outfeat = torch.nn.functional.interpolate(outfeat, [512, 512], mode="nearest")
outfeat = outfeat.permute(0, 2, 3, 1) # 1, feat_dim, H, W --> 1, H, W, feat_dim
outfeat = torch.nn.functional.normalize(outfeat, dim=-1)
outfeat = outfeat[0].half() # --> H, W, feat_dim
print(outfeat.shape)
torch.save(outfeat, SEMIGLOBAL_FEAT_SAVE_FILE)
except:
pass
| 3D-LLM-main | three_steps_3d_feature/second_step/blip_maskformer.py |
import torch
import torchvision
import cv2
import numpy as np
from tqdm import tqdm
import os
from torch import nn
import argparse
import clip
import open_clip
LOAD_IMG_HEIGHT = 512
LOAD_IMG_WIDTH = 512
from PIL import Image
def get_bbox_around_mask(mask):
# mask: (img_height, img_width)
# compute bbox around mask
bbox = None
nonzero_inds = torch.nonzero(mask) # (num_nonzero, 2)
if nonzero_inds.numel() == 0:
topleft = [0, 0]
botright = [mask.shape[0], mask.shape[1]]
bbox = (topleft[0], topleft[1], botright[0], botright[1]) # (x0, y0, x1, y1)
else:
topleft = nonzero_inds.min(0)[0] # (2,)
botright = nonzero_inds.max(0)[0] # (2,)
bbox = (topleft[0].item(), topleft[1].item(), botright[0].item(), botright[1].item()) # (x0, y0, x1, y1)
# x0, y0, x1, y1
return bbox, nonzero_inds
if __name__ == "__main__":
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Device:", device)
OPENCLIP_MODEL = "ViT-L-14" # "ViT-bigG-14"
OPENCLIP_DATA = "laion2b_s32b_b82k" # "laion2b_s39b_b160k"
print("Initializing model...")
model, _, preprocess = open_clip.create_model_and_transforms(OPENCLIP_MODEL, OPENCLIP_DATA)
model.visual.output_tokens = True
model.cuda()
model.eval()
tokenizer = open_clip.get_tokenizer(OPENCLIP_MODEL)
parser = argparse.ArgumentParser(description="Specify dirs")
parser.add_argument("--scene_dir_path", default="./masked_rdp_data/", type=str)
parser.add_argument("--mask_dir_path", default="./maskformer_masks/", type=str)
parser.add_argument("--save_dir_path", default="./nps_sam_clip/", type=str)
args = parser.parse_args()
scene_lists = sorted(os.listdir(args.scene_dir_path))
for scene in tqdm(scene_lists):
os.makedirs(os.path.join(args.save_dir_path, scene), exist_ok=True)
for file in os.listdir(os.path.join(args.mask_dir_path, scene)):
try:
INPUT_IMAGE_PATH = os.path.join(args.scene_dir_path, scene, file.replace(".pt", ".png"))
SEMIGLOBAL_FEAT_SAVE_FILE = os.path.join(args.save_dir_path, scene, file)
if os.path.isfile(SEMIGLOBAL_FEAT_SAVE_FILE):
continue
raw_image = cv2.imread(INPUT_IMAGE_PATH)
raw_image = cv2.resize(raw_image, (512, 512))
image = torch.tensor(raw_image).to(device)
"""
Extract and save global feat vec
"""
global_feat = None
with torch.cuda.amp.autocast():
_img = preprocess(Image.open(INPUT_IMAGE_PATH)).unsqueeze(0) # [1, 3, 224, 224]
imgfeat = model.visual(_img.cuda())[1] # All image token feat [1, 256, 1024]
imgfeat = torch.mean(imgfeat, dim=1)
global_feat = imgfeat.half().cuda()
global_feat = torch.nn.functional.normalize(global_feat, dim=-1) # --> (1, 1024)
FEAT_DIM = global_feat.shape[-1]
cosine_similarity = torch.nn.CosineSimilarity(dim=-1)
MASK_LOAD_FILE = os.path.join(args.mask_dir_path, scene, file)
outfeat = torch.zeros(LOAD_IMG_HEIGHT, LOAD_IMG_WIDTH, FEAT_DIM, dtype=torch.half)
mask = torch.load(MASK_LOAD_FILE).unsqueeze(0) # 1, num_masks, H, W
num_masks = mask.shape[-3]
rois = []
roi_similarities_with_global_vec = []
roi_sim_per_unit_area = []
feat_per_roi = []
roi_nonzero_inds = []
for _i in range(num_masks):
curmask = mask[0, _i]
bbox, nonzero_inds = get_bbox_around_mask(curmask)
x0, y0, x1, y1 = bbox
bbox_area = (x1 - x0 + 1) * (y1 - y0 + 1)
img_area = LOAD_IMG_WIDTH * LOAD_IMG_HEIGHT
iou = bbox_area / img_area
if iou < 0.005:
continue
with torch.no_grad():
img_roi = image[x0:x1, y0:y1]
img_roi = Image.fromarray(img_roi.detach().cpu().numpy())
img_roi = preprocess(img_roi).unsqueeze(0).cuda()
roifeat = model.visual(img_roi)[1] # All image token feat [1, 256, 1024]
roifeat = torch.mean(roifeat, dim=1)
feat_per_roi.append(roifeat)
roi_nonzero_inds.append(nonzero_inds)
_sim = cosine_similarity(global_feat, roifeat)
rois.append(torch.tensor(list(bbox)))
roi_similarities_with_global_vec.append(_sim)
roi_sim_per_unit_area.append(_sim)
rois = torch.stack(rois)
scores = torch.cat(roi_sim_per_unit_area).to(rois.device)
retained = torchvision.ops.nms(rois.float().cpu(), scores.float().cpu(), iou_threshold=1.0)
feat_per_roi = torch.cat(feat_per_roi, dim=0) # N, 1024
retained_rois = rois[retained]
retained_scores = scores[retained]
retained_feat = feat_per_roi[retained]
retained_nonzero_inds = []
for _roiidx in range(retained.shape[0]):
retained_nonzero_inds.append(roi_nonzero_inds[retained[_roiidx].item()])
mask_sim_mat = torch.nn.functional.cosine_similarity(
retained_feat[:, :, None], retained_feat.t()[None, :, :]
)
mask_sim_mat.fill_diagonal_(0.0)
mask_sim_mat = mask_sim_mat.mean(1) # avg sim of each mask with each other mask
softmax_scores = retained_scores.cuda() - mask_sim_mat
softmax_scores = torch.nn.functional.softmax(softmax_scores, dim=0)
for _roiidx in range(retained.shape[0]):
_weighted_feat = (
softmax_scores[_roiidx] * global_feat + (1 - softmax_scores[_roiidx]) * retained_feat[_roiidx]
)
_weighted_feat = torch.nn.functional.normalize(_weighted_feat, dim=-1)
outfeat[retained_nonzero_inds[_roiidx][:, 0], retained_nonzero_inds[_roiidx][:, 1]] += (
_weighted_feat[0].detach().cpu().half()
)
outfeat[
retained_nonzero_inds[_roiidx][:, 0], retained_nonzero_inds[_roiidx][:, 1]
] = torch.nn.functional.normalize(
outfeat[retained_nonzero_inds[_roiidx][:, 0], retained_nonzero_inds[_roiidx][:, 1]].float(),
dim=-1,
).half()
outfeat = outfeat.unsqueeze(0).float() # interpolate is not implemented for float yet in pytorch
outfeat = outfeat.permute(0, 3, 1, 2) # 1, H, W, feat_dim -> 1, feat_dim, H, W
outfeat = torch.nn.functional.interpolate(outfeat, [512, 512], mode="nearest")
outfeat = outfeat.permute(0, 2, 3, 1) # 1, feat_dim, H, W --> 1, H, W, feat_dim
outfeat = torch.nn.functional.normalize(outfeat, dim=-1)
outfeat = outfeat[0].half() # --> H, W, feat_dim
torch.save(outfeat, SEMIGLOBAL_FEAT_SAVE_FILE)
except:
print(SEMIGLOBAL_FEAT_SAVE_FILE, "fail")
| 3D-LLM-main | three_steps_3d_feature/second_step/clip_sam.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import os
import sys
from omegaconf import OmegaConf
from lavis.common.registry import registry
from lavis.datasets.builders import *
from lavis.models import *
from lavis.processors import *
from lavis.tasks import *
root_dir = os.path.dirname(os.path.abspath(__file__))
default_cfg = OmegaConf.load(os.path.join(root_dir, "configs/default.yaml"))
registry.register_path("library_root", root_dir)
repo_root = os.path.join(root_dir, "..")
registry.register_path("repo_root", repo_root)
cache_root = os.path.join(repo_root, default_cfg.env.cache_root)
registry.register_path("cache_root", cache_root)
registry.register("MAX_INT", sys.maxsize)
registry.register("SPLIT_NAMES", ["train", "val", "test"])
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/__init__.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import logging
import os
import torch
import torch.distributed as dist
from lavis.common.dist_utils import get_rank, get_world_size, is_main_process, is_dist_avail_and_initialized
from lavis.common.logger import MetricLogger, SmoothedValue
from lavis.common.registry import registry
from lavis.datasets.data_utils import prepare_sample
class BaseTask:
def __init__(self, **kwargs):
super().__init__()
self.inst_id_key = "instance_id"
@classmethod
def setup_task(cls, **kwargs):
return cls()
def build_model(self, cfg):
model_config = cfg.model_cfg
model_cls = registry.get_model_class(model_config.arch)
return model_cls.from_config(model_config)
def build_datasets(self, cfg):
"""
Build a dictionary of datasets, keyed by split 'train', 'valid', 'test'.
Download dataset and annotations automatically if not exist.
Args:
cfg (common.config.Config): _description_
Returns:
dict: Dictionary of torch.utils.data.Dataset objects by split.
"""
datasets = dict()
datasets_config = cfg.datasets_cfg
assert len(datasets_config) > 0, "At least one dataset has to be specified."
for name in datasets_config:
dataset_config = datasets_config[name]
builder = registry.get_builder_class(name)(dataset_config)
dataset = builder.build_datasets()
datasets[name] = dataset
return datasets
def train_step(self, model, samples):
loss = model(samples)["loss"]
return loss
def valid_step(self, model, samples):
raise NotImplementedError
def before_evaluation(self, model, dataset, **kwargs):
model.before_evaluation(dataset=dataset, task_type=type(self))
def after_evaluation(self, **kwargs):
pass
def inference_step(self):
raise NotImplementedError
def evaluation(self, model, data_loader, cuda_enabled=True):
metric_logger = MetricLogger(delimiter=" ")
header = "Evaluation"
# TODO make it configurable
print_freq = 10
results = []
for samples in metric_logger.log_every(data_loader, print_freq, header):
samples = prepare_sample(samples, cuda_enabled=cuda_enabled)
eval_output = self.valid_step(model=model, samples=samples)
results.extend(eval_output)
if is_dist_avail_and_initialized():
dist.barrier()
return results
def train_epoch(
self,
epoch,
model,
data_loader,
optimizer,
lr_scheduler,
scaler=None,
cuda_enabled=False,
log_freq=50,
accum_grad_iters=1,
):
return self._train_inner_loop(
epoch=epoch,
iters_per_epoch=len(data_loader),
model=model,
data_loader=data_loader,
optimizer=optimizer,
scaler=scaler,
lr_scheduler=lr_scheduler,
log_freq=log_freq,
cuda_enabled=cuda_enabled,
accum_grad_iters=accum_grad_iters,
)
def train_iters(
self,
epoch,
start_iters,
iters_per_inner_epoch,
model,
data_loader,
optimizer,
lr_scheduler,
scaler=None,
cuda_enabled=False,
log_freq=50,
accum_grad_iters=1,
):
return self._train_inner_loop(
epoch=epoch,
start_iters=start_iters,
iters_per_epoch=iters_per_inner_epoch,
model=model,
data_loader=data_loader,
optimizer=optimizer,
scaler=scaler,
lr_scheduler=lr_scheduler,
log_freq=log_freq,
cuda_enabled=cuda_enabled,
accum_grad_iters=accum_grad_iters,
)
def _train_inner_loop(
self,
epoch,
iters_per_epoch,
model,
data_loader,
optimizer,
lr_scheduler,
scaler=None,
start_iters=None,
log_freq=50,
cuda_enabled=False,
accum_grad_iters=1,
):
"""
An inner training loop compatible with both epoch-based and iter-based training.
When using epoch-based, training stops after one epoch; when using iter-based,
training stops after #iters_per_epoch iterations.
"""
use_amp = scaler is not None
if not hasattr(data_loader, "__next__"):
# convert to iterator if not already
data_loader = iter(data_loader)
metric_logger = MetricLogger(delimiter=" ")
metric_logger.add_meter("lr", SmoothedValue(window_size=1, fmt="{value:.6f}"))
metric_logger.add_meter("loss", SmoothedValue(window_size=1, fmt="{value:.4f}"))
# if iter-based runner, schedule lr based on inner epoch.
logging.info(
"Start training epoch {}, {} iters per inner epoch.".format(
epoch, iters_per_epoch
)
)
header = "Train: data epoch: [{}]".format(epoch)
if start_iters is None:
# epoch-based runner
inner_epoch = epoch
else:
# In iter-based runner, we schedule the learning rate based on iterations.
inner_epoch = start_iters // iters_per_epoch
header = header + "; inner epoch [{}]".format(inner_epoch)
for i in metric_logger.log_every(range(iters_per_epoch), log_freq, header):
# if using iter-based runner, we stop after iters_per_epoch iterations.
if i >= iters_per_epoch:
break
samples = next(data_loader)
samples = prepare_sample(samples, cuda_enabled=cuda_enabled)
samples.update(
{
"epoch": inner_epoch,
"num_iters_per_epoch": iters_per_epoch,
"iters": i,
}
)
lr_scheduler.step(cur_epoch=inner_epoch, cur_step=i)
with torch.cuda.amp.autocast(enabled=use_amp):
loss = self.train_step(model=model, samples=samples)
# after_train_step()
if use_amp:
scaler.scale(loss).backward()
else:
loss.backward()
# update gradients every accum_grad_iters iterations
if (i + 1) % accum_grad_iters == 0:
optimizer.step()
optimizer.zero_grad()
metric_logger.update(loss=loss.item())
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
# after train_epoch()
# gather the stats from all processes
metric_logger.synchronize_between_processes()
logging.info("Averaged stats: " + str(metric_logger.global_avg()))
return {
k: "{:.3f}".format(meter.global_avg)
for k, meter in metric_logger.meters.items()
}
@staticmethod
def save_result(result, result_dir, filename, remove_duplicate=""):
import json
result_file = os.path.join(
result_dir, "%s_rank%d.json" % (filename, get_rank())
)
final_result_file = os.path.join(result_dir, "%s.json" % filename)
json.dump(result, open(result_file, "w"))
if is_dist_avail_and_initialized():
dist.barrier()
if is_main_process():
logging.warning("rank %d starts merging results." % get_rank())
# combine results from all processes
result = []
for rank in range(get_world_size()):
result_file = os.path.join(
result_dir, "%s_rank%d.json" % (filename, rank)
)
res = json.load(open(result_file, "r"))
result += res
if remove_duplicate:
result_new = []
id_list = []
for res in result:
if res[remove_duplicate] not in id_list:
id_list.append(res[remove_duplicate])
result_new.append(res)
result = result_new
json.dump(result, open(final_result_file, "w"))
print("result file saved to %s" % final_result_file)
return final_result_file
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/tasks/base_task.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
from lavis.common.registry import registry
from lavis.tasks.base_task import BaseTask
from lavis.tasks.captioning import CaptionTask
from lavis.tasks.image_text_pretrain import ImageTextPretrainTask
from lavis.tasks.multimodal_classification import (
MultimodalClassificationTask,
)
from lavis.tasks.retrieval import RetrievalTask
from lavis.tasks.vqa import VQATask, AOKVQATask
from lavis.tasks.vqa_reading_comprehension import VQARCTask, GQARCTask
from lavis.tasks.dialogue import DialogueTask
def setup_task(cfg):
assert "task" in cfg.run_cfg, "Task name must be provided."
task_name = cfg.run_cfg.task
task = registry.get_task_class(task_name).setup_task(cfg=cfg)
assert task is not None, "Task {} not properly registered.".format(task_name)
return task
__all__ = [
"BaseTask",
"AOKVQATask",
"RetrievalTask",
"CaptionTask",
"VQATask",
"VQARCTask",
"GQARCTask",
"MultimodalClassificationTask",
# "VideoQATask",
# "VisualEntailmentTask",
"ImageTextPretrainTask",
"DialogueTask",
]
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/tasks/__init__.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import json
import logging
import os
import numpy as np
import torch
from lavis.common.dist_utils import is_main_process
from lavis.common.registry import registry
from lavis.tasks.base_task import BaseTask
@registry.register_task("retrieval")
class RetrievalTask(BaseTask):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
@classmethod
def setup_task(cls, cfg):
run_cfg = cfg.run_cfg
return cls(cfg=run_cfg)
def evaluation(self, model, data_loader, **kwargs):
# score_i2t, score_t2i = model.compute_sim_matrix(model, data_loader)
score_i2t, score_t2i = model.compute_sim_matrix(data_loader, task_cfg=self.cfg)
if is_main_process():
eval_result = self._report_metrics(
score_i2t,
score_t2i,
data_loader.dataset.txt2img,
data_loader.dataset.img2txt,
)
logging.info(eval_result)
else:
eval_result = None
return eval_result
def after_evaluation(self, val_result, **kwargs):
return val_result
@staticmethod
@torch.no_grad()
def _report_metrics(scores_i2t, scores_t2i, txt2img, img2txt):
# Images->Text
ranks = np.zeros(scores_i2t.shape[0])
for index, score in enumerate(scores_i2t):
inds = np.argsort(score)[::-1]
# Score
rank = 1e20
for i in img2txt[index]:
tmp = np.where(inds == i)[0][0]
if tmp < rank:
rank = tmp
ranks[index] = rank
# Compute metrics
tr1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
tr5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
tr10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
# Text->Images
ranks = np.zeros(scores_t2i.shape[0])
for index, score in enumerate(scores_t2i):
inds = np.argsort(score)[::-1]
ranks[index] = np.where(inds == txt2img[index])[0][0]
# Compute metrics
ir1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
ir5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
ir10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
tr_mean = (tr1 + tr5 + tr10) / 3
ir_mean = (ir1 + ir5 + ir10) / 3
r_mean = (tr_mean + ir_mean) / 2
agg_metrics = (tr1 + tr5 + tr10) / 3
eval_result = {
"txt_r1": tr1,
"txt_r5": tr5,
"txt_r10": tr10,
"txt_r_mean": tr_mean,
"img_r1": ir1,
"img_r5": ir5,
"img_r10": ir10,
"img_r_mean": ir_mean,
"r_mean": r_mean,
"agg_metrics": agg_metrics,
}
with open(
os.path.join(registry.get_path("output_dir"), "evaluate.txt"), "a"
) as f:
f.write(json.dumps(eval_result) + "\n")
return eval_result
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/tasks/retrieval.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
from lavis.common.registry import registry
from lavis.tasks.base_task import BaseTask
@registry.register_task("image_text_pretrain")
class ImageTextPretrainTask(BaseTask):
def __init__(self):
super().__init__()
def evaluation(self, model, data_loader, cuda_enabled=True):
pass
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/tasks/image_text_pretrain.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import json
import os
from lavis.common.dist_utils import main_process
from lavis.common.logger import MetricLogger
from lavis.common.registry import registry
from lavis.tasks.base_task import BaseTask
from lavis.datasets.data_utils import prepare_sample
import numpy as np
@registry.register_task("dialogue")
class DialogueTask(BaseTask):
def __init__(self, num_beams, max_len, min_len, evaluate, report_metric=True):
super().__init__()
self.num_beams = num_beams
self.max_len = max_len
self.min_len = min_len
self.evaluate = evaluate
self.report_metric = report_metric
@classmethod
def setup_task(cls, cfg):
run_cfg = cfg.run_cfg
num_beams = run_cfg.num_beams
max_len = run_cfg.max_len
min_len = run_cfg.min_len
evaluate = run_cfg.evaluate
report_metric = run_cfg.get("report_metric", True)
return cls(
num_beams=num_beams,
max_len=max_len,
min_len=min_len,
evaluate=evaluate,
report_metric=report_metric,
)
def valid_step(self, model, samples):
results = []
loss = model(samples)["loss"].item()
return [loss]
def after_evaluation(self, val_result, split_name, epoch, **kwargs):
if self.report_metric:
avg_loss = np.mean(val_result)
metrics = {"agg_metrics": avg_loss}
else:
metrics = {"agg_metrics": 0.0}
return metrics
@main_process
def _report_metrics(self, eval_result_file, split_name):
# TODO better way to define this
coco_gt_root = os.path.join(registry.get_path("cache_root"), "coco_gt")
coco_val = coco_dialogue_eval(coco_gt_root, eval_result_file, split_name)
agg_metrics = coco_val.eval["CIDEr"] + coco_val.eval["Bleu_4"]
log_stats = {split_name: {k: v for k, v in coco_val.eval.items()}}
with open(
os.path.join(registry.get_path("output_dir"), "evaluate.txt"), "a"
) as f:
f.write(json.dumps(log_stats) + "\n")
coco_res = {k: v for k, v in coco_val.eval.items()}
coco_res["agg_metrics"] = agg_metrics
return coco_res
# TODO better structure for this.
from pycocoevalcap.eval import COCOEvalCap
from pycocotools.coco import COCO
from torchvision.datasets.utils import download_url
def coco_dialogue_eval(coco_gt_root, results_file, split):
urls = {
"val": "https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_val_gt.json",
"test": "https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_test_gt.json",
}
filenames = {
"val": "coco_karpathy_val_gt.json",
"test": "coco_karpathy_test_gt.json",
}
download_url(urls[split], coco_gt_root)
annotation_file = os.path.join(coco_gt_root, filenames[split])
# create coco object and coco_result object
coco = COCO(annotation_file)
coco_result = coco.loadRes(results_file)
# create coco_eval object by taking coco and coco_result
coco_eval = COCOEvalCap(coco, coco_result)
# evaluate on a subset of images by setting
# coco_eval.params['image_id'] = coco_result.getImgIds()
# please remove this line when evaluating the full validation set
# coco_eval.params['image_id'] = coco_result.getImgIds()
# evaluate results
# SPICE will take a few minutes the first time, but speeds up due to caching
coco_eval.evaluate()
# print output evaluation scores
for metric, score in coco_eval.eval.items():
print(f"{metric}: {score:.3f}")
return coco_eval
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/tasks/dialogue.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import json
import os
from lavis.common.dist_utils import main_process
from lavis.common.registry import registry
from lavis.tasks.base_task import BaseTask
@registry.register_task("captioning")
class CaptionTask(BaseTask):
def __init__(self, num_beams, max_len, min_len, evaluate, report_metric=True):
super().__init__()
self.num_beams = num_beams
self.max_len = max_len
self.min_len = min_len
self.evaluate = evaluate
self.report_metric = report_metric
@classmethod
def setup_task(cls, cfg):
run_cfg = cfg.run_cfg
num_beams = run_cfg.num_beams
max_len = run_cfg.max_len
min_len = run_cfg.min_len
evaluate = run_cfg.evaluate
report_metric = run_cfg.get("report_metric", True)
return cls(
num_beams=num_beams,
max_len=max_len,
min_len=min_len,
evaluate=evaluate,
report_metric=report_metric,
)
def valid_step(self, model, samples):
results = []
# run_cfg = slf.cfg.run_cfg
captions = model.generate(
samples,
use_nucleus_sampling=False,
num_beams=self.num_beams,
max_length=self.max_len,
min_length=self.min_len,
)
img_ids = samples["image_id"]
for caption, img_id in zip(captions, img_ids):
results.append({"caption": caption, "image_id": int(img_id)})
return results
def after_evaluation(self, val_result, split_name, epoch, **kwargs):
eval_result_file = self.save_result(
result=val_result,
result_dir=registry.get_path("result_dir"),
filename="{}_epoch{}".format(split_name, epoch),
remove_duplicate="image_id",
)
if self.report_metric:
metrics = self._report_metrics(
eval_result_file=eval_result_file, split_name=split_name
)
else:
metrics = {"agg_metrics": 0.0}
return metrics
@main_process
def _report_metrics(self, eval_result_file, split_name):
# TODO better way to define this
coco_gt_root = os.path.join(registry.get_path("cache_root"), "coco_gt")
coco_val = coco_caption_eval(coco_gt_root, eval_result_file, split_name)
agg_metrics = coco_val.eval["CIDEr"] + coco_val.eval["Bleu_4"]
log_stats = {split_name: {k: v for k, v in coco_val.eval.items()}}
with open(
os.path.join(registry.get_path("output_dir"), "evaluate.txt"), "a"
) as f:
f.write(json.dumps(log_stats) + "\n")
coco_res = {k: v for k, v in coco_val.eval.items()}
coco_res["agg_metrics"] = agg_metrics
return coco_res
# TODO better structure for this.
from pycocoevalcap.eval import COCOEvalCap
from pycocotools.coco import COCO
from torchvision.datasets.utils import download_url
def coco_caption_eval(coco_gt_root, results_file, split):
urls = {
"val": "https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_val_gt.json",
"test": "https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_test_gt.json",
}
filenames = {
"val": "coco_karpathy_val_gt.json",
"test": "coco_karpathy_test_gt.json",
}
download_url(urls[split], coco_gt_root)
annotation_file = os.path.join(coco_gt_root, filenames[split])
# create coco object and coco_result object
coco = COCO(annotation_file)
coco_result = coco.loadRes(results_file)
# create coco_eval object by taking coco and coco_result
coco_eval = COCOEvalCap(coco, coco_result)
# evaluate on a subset of images by setting
# coco_eval.params['image_id'] = coco_result.getImgIds()
# please remove this line when evaluating the full validation set
# coco_eval.params['image_id'] = coco_result.getImgIds()
# evaluate results
# SPICE will take a few minutes the first time, but speeds up due to caching
coco_eval.evaluate()
# print output evaluation scores
for metric, score in coco_eval.eval.items():
print(f"{metric}: {score:.3f}")
return coco_eval
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/tasks/captioning.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import logging
import json
import os
import lavis.common.dist_utils as dist_utils
from lavis.common.registry import registry
from lavis.common.vqa_tools.vqa import VQA
from lavis.common.vqa_tools.vqa_eval import VQAEval
from lavis.tasks.base_task import BaseTask
@registry.register_task("vqa")
class VQATask(BaseTask):
def __init__(
self,
num_beams,
max_len,
min_len,
evaluate,
num_ans_candidates,
inference_method="rank",
):
super().__init__()
self.num_beams = num_beams
self.max_len = max_len
self.min_len = min_len
self.evaluate = evaluate
self.inference_method = inference_method
self.num_ans_candidates = num_ans_candidates
self.answer_list = None
self.ques_files = dict()
self.anno_files = dict()
@classmethod
def setup_task(cls, cfg):
run_cfg = cfg.run_cfg
num_beams = run_cfg.get("num_beams", 3)
max_len = run_cfg.get("max_len", 10)
min_len = run_cfg.get("min_len", 1)
evaluate = run_cfg.get("evaluate", False)
inference_method = run_cfg.get("inference_method", "rank")
num_ans_candidates = run_cfg.get("num_ans_candidates", 128)
return cls(
num_beams=num_beams,
max_len=max_len,
min_len=min_len,
evaluate=evaluate,
num_ans_candidates=num_ans_candidates,
inference_method=inference_method,
)
def build_datasets(self, cfg):
datasets = super().build_datasets(cfg)
# get question file, annotation file and anwser list in COCO format
for dataset in datasets.values():
for split in dataset:
if (
hasattr(dataset[split], "coco_fmt_qust_file")
and dataset[split].coco_fmt_qust_file is not None
):
self.ques_files[split] = dataset[split].coco_fmt_qust_file
self.anno_files[split] = dataset[split].coco_fmt_anno_file
try:
self.answer_list = dataset[split].answer_list
except AttributeError:
# if answer_list is not provided, then set it to None
pass
if len(self.ques_files) > 0:
assert len(self.ques_files) == len(
self.anno_files
), "Only support one split for evaluation."
return datasets
def valid_step(self, model, samples):
answers = model.predict_answers(
samples=samples,
answer_list=self.answer_list,
inference_method=self.inference_method,
num_beams=self.num_beams,
max_len=self.max_len,
min_len=self.min_len,
num_ans_candidates=self.num_ans_candidates,
)
pred_qa_pairs = []
question_id = samples["question_id"]
for answer, ques_id in zip(answers, question_id):
ques_id = int(ques_id.item())
pred_qa_pairs.append({"question_id": ques_id, "answer": answer})
return pred_qa_pairs
def after_evaluation(self, val_result, split_name, **kwargs):
result_file = self.save_result(
val_result,
result_dir=registry.get_path("result_dir"),
filename=f"{split_name}_vqa_result",
remove_duplicate="question_id",
)
metrics = self._report_metrics(result_file=result_file, split=split_name)
return metrics
@dist_utils.main_process
def _report_metrics(self, result_file, split):
"""
Use official VQA evaluation script to report metrics.
"""
metrics = {}
if split in self.ques_files and split in self.anno_files:
vqa = VQA(self.anno_files[split], self.ques_files[split])
vqa_result = vqa.loadRes(
resFile=result_file, quesFile=self.ques_files[split]
)
# create vqaEval object by taking vqa and vqaRes
# n is precision of accuracy (number of places after decimal), default is 2
vqa_scorer = VQAEval(vqa, vqa_result, n=2)
logging.info("Start VQA evaluation.")
vqa_scorer.evaluate()
# print accuracies
overall_acc = vqa_scorer.accuracy["overall"]
metrics["agg_metrics"] = overall_acc
logging.info("Overall Accuracy is: %.02f\n" % overall_acc)
logging.info("Per Answer Type Accuracy is the following:")
for ans_type in vqa_scorer.accuracy["perAnswerType"]:
logging.info(
"%s : %.02f"
% (ans_type, vqa_scorer.accuracy["perAnswerType"][ans_type])
)
metrics[ans_type] = vqa_scorer.accuracy["perAnswerType"][ans_type]
with open(
os.path.join(registry.get_path("output_dir"), "evaluate.txt"), "a"
) as f:
f.write(json.dumps(metrics) + "\n")
return metrics
@registry.register_task("aok_vqa")
class AOKVQATask(VQATask):
def valid_step(self, model, samples):
answers = model.predict_answers(
samples=samples,
answer_list=self.answer_list,
inference_method=self.inference_method,
num_beams=self.num_beams,
max_len=self.max_len,
min_len=self.min_len,
num_ans_candidates=self.num_ans_candidates,
)
pred_qa_pairs = []
question_id = samples["question_id"]
gt_answers = samples["direct_answers"]
for pred_answer, ques_id, gt_answer in zip(answers, question_id, gt_answers):
pred_qa_pairs.append(
{"question_id": ques_id, "pred_ans": pred_answer, "gt_ans": gt_answer}
)
return pred_qa_pairs
@dist_utils.main_process
def _report_metrics(self, result_file, split):
"""
Implementing accuracy computation for AOKVQA, see
https://github.com/allenai/aokvqa/blob/main/evaluation/eval_predictions.py#L45 for details.
"""
# TODO add evaluation for multi-choice
results = json.load(open(result_file, "r"))
acc = []
for res in results:
if res["gt_ans"] is None:
# prepare test results for leaderboard evaluation
self._save_result_leaderboard(results)
return
pred = res["pred_ans"]
gt_ans = res["gt_ans"]
num_match = sum([pred == gt for gt in gt_ans])
vqa_acc = min(1.0, num_match / 3.0)
acc.append(vqa_acc)
accuracy = sum(acc) / len(acc) * 100
metrics = {"agg_metrics": accuracy, "acc": accuracy}
with open(
os.path.join(registry.get_path("output_dir"), "evaluate.txt"), "a"
) as f:
f.write(json.dumps(metrics) + "\n")
logging.info(metrics)
return metrics
@dist_utils.main_process
def _save_result_leaderboard(self, results):
"""
Saving the results in the format required for leaderboard evaluation.
[TODO] add support for multi-choice.
"""
result_leaderboard = dict()
for res in results:
result_leaderboard[res["question_id"]] = {
"direct_answer": res["pred_ans"],
"multiple_choice": "",
}
result_file = registry.get_path("result_dir") + "_leaderboard.json"
with open(result_file, "w") as f:
json.dump(result_leaderboard, f)
logging.info(f"Saved results for leaderboard evaluation at {result_file}")
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/tasks/vqa.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import logging
import json
import os
import torch
import torch.distributed as dist
from itertools import chain
import lavis.common.dist_utils as dist_utils
from lavis.common.dist_utils import get_rank, get_world_size, is_main_process
from lavis.common.registry import registry
from lavis.common.vqa_tools.vqa_eval import VQAEval as VQATool
from lavis.tasks.vqa import VQATask
@registry.register_task("vqa_reading_comprehension")
class VQARCTask(VQATask):
def __init__(
self,
num_beams,
max_len,
min_len,
evaluate,
num_ans_candidates,
inference_method="rank",
**kwargs,
):
super().__init__(num_beams, max_len, min_len, evaluate, num_ans_candidates, inference_method)
self.config = kwargs.get('config')
@classmethod
def setup_task(cls, cfg):
run_cfg = cfg.run_cfg
num_beams = run_cfg.get("num_beams", 3)
max_len = run_cfg.get("max_len", 10)
min_len = run_cfg.get("min_len", 1)
evaluate = run_cfg.get("evaluate", False)
inference_method = run_cfg.get("inference_method", "rank")
num_ans_candidates = run_cfg.get("num_ans_candidates", 128)
return cls(
num_beams=num_beams,
max_len=max_len,
min_len=min_len,
evaluate=evaluate,
num_ans_candidates=num_ans_candidates,
inference_method=inference_method,
config=run_cfg,
)
def valid_step(self, model, samples):
answers, captions, gradcams = model.predict_answers(
samples=samples,
inference_method=self.inference_method,
num_beams=self.num_beams,
max_len=self.max_len,
min_len=self.min_len,
internal_bsz_fid=self.config['internal_bsz_fid'],
num_captions=self.config['num_captions'],
num_captions_fid=self.config['num_captions_fid'],
cap_max_length=self.config['cap_max_length'],
cap_min_length=self.config['cap_min_length'],
top_k=self.config['top_k'],
top_p=self.config['top_p'],
repetition_penalty=self.config['repetition_penalty'],
num_patches=self.config['num_patches'],
block_num=self.config['block_num'],
)
pred_qa_pairs = []
sample_captions = []
sample_gradcams = []
question_id = samples["question_id"]
for answer, caption, gradcam, ques_id in zip(answers, captions, gradcams, question_id):
ques_id = int(ques_id.item())
pred_qa_pairs.append({"question_id": ques_id, "answer": answer})
sample_captions.append({"question_id": ques_id, "caption": caption})
sample_gradcams.append({"question_id": ques_id, "gradcam": gradcam})
return [sample_gradcams, sample_captions, pred_qa_pairs]
def after_evaluation(self, val_result, split_name, **kwargs):
result_ = list(chain(*val_result[0::3]))
result_file = self.save_gradcam(
result_,
result_dir=registry.get_path("result_dir"),
filename=f"{split_name}_gradcam_result",
remove_duplicate="question_id",
)
result_ = list(chain(*val_result[1::3]))
result_file = self.save_result(
result_,
result_dir=registry.get_path("result_dir"),
filename=f"{split_name}_caption_result",
remove_duplicate="question_id",
)
result_ = list(chain(*val_result[2::3]))
result_file = self.save_result(
result_,
result_dir=registry.get_path("result_dir"),
filename=f"{split_name}_vqa_result",
remove_duplicate="question_id",
)
metrics = self._report_metrics(result_file=result_file, split=split_name)
return metrics
def save_gradcam(self, result, result_dir, filename, remove_duplicate=""):
result_file = os.path.join(result_dir, '%s_rank%d.pth' % (filename, get_rank()))
final_result_file = os.path.join(result_dir, '%s.pth' % filename)
torch.save({'result': result}, result_file)
dist.barrier()
if is_main_process():
logging.warning("rank %d starts merging results." % get_rank())
# combine results from all processes
result = []
for rank in range(get_world_size()):
result_file = os.path.join(result_dir, '%s_rank%d.pth' % (filename, rank))
res_ckpt = torch.load(result_file, map_location='cpu')
res = res_ckpt['result']
result += res
if remove_duplicate:
result_new = []
id_list = []
for res in result:
if res[remove_duplicate] not in id_list:
id_list.append(res[remove_duplicate])
result_new.append(res)
result = result_new
torch.save({'result': result}, final_result_file)
print("result file saved to %s" % final_result_file)
return final_result_file
@registry.register_task("gqa_reading_comprehension")
class GQARCTask(VQARCTask):
def valid_step(self, model, samples):
answers, captions, gradcams = model.predict_answers(
samples=samples,
inference_method=self.inference_method,
num_beams=self.num_beams,
max_len=self.max_len,
min_len=self.min_len,
internal_bsz_fid=self.config['internal_bsz_fid'],
num_captions=self.config['num_captions'],
num_captions_fid=self.config['num_captions_fid'],
cap_max_length=self.config['cap_max_length'],
cap_min_length=self.config['cap_min_length'],
top_k=self.config['top_k'],
top_p=self.config['top_p'],
repetition_penalty=self.config['repetition_penalty'],
num_patches=self.config['num_patches'],
block_num=self.config['block_num'],
)
pred_qa_pairs = []
sample_captions = []
sample_gradcams = []
question_id = samples["question_id"]
gt_answers = samples["answer"]
for pred_answer, caption, gradcam, ques_id, gt_answer in zip(answers, captions, gradcams, question_id, gt_answers):
ques_id = int(ques_id.item())
pred_qa_pairs.append({"question_id": ques_id, "pred_ans": pred_answer, "gt_ans": gt_answer})
sample_captions.append({"question_id": ques_id, "caption": caption})
sample_gradcams.append({"question_id": ques_id, "gradcam": gradcam})
return [sample_gradcams, sample_captions, pred_qa_pairs]
@dist_utils.main_process
def _report_metrics(self, result_file, split):
"""
TODO: add other evaluation metrics for GQA
"""
results = json.load(open(result_file, "r"))
acc = []
vqa_tool = VQATool()
for res in results:
if res["gt_ans"] is None:
# prepare test results for leaderboard evaluation
self._save_result_leaderboard(results)
return
gt_ans = res["gt_ans"]
pred = res["pred_ans"]
if self.inference_method == "generate":
pred = vqa_tool.processPunctuation(pred)
pred = vqa_tool.processDigitArticle(pred)
vqa_acc = 1 if pred == gt_ans else 0
acc.append(vqa_acc)
accuracy = sum(acc) / len(acc) * 100
metrics = {"agg_metrics": accuracy, "acc": accuracy}
with open(
os.path.join(registry.get_path("output_dir"), "evaluate.txt"), "a"
) as f:
f.write(json.dumps(metrics) + "\n")
logging.info(metrics)
return metrics
@dist_utils.main_process
def _save_result_leaderboard(self, results):
"""
Saving the results in the format required for leaderboard evaluation.
"""
result_leaderboard = []
for res in results:
result_leaderboard.append({
"questionId": str(res['question_id']),
"prediction": str(res["pred_ans"]),
})
result_file = registry.get_path("result_dir") + "_leaderboard.json"
with open(result_file, "w") as f:
json.dump(result_leaderboard, f)
logging.info(f"Saved results for leaderboard evaluation at {result_file}")
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/tasks/vqa_reading_comprehension.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import json
import os
import logging
import numpy as np
import torch
from lavis.common.dist_utils import main_process
from lavis.common.registry import registry
from lavis.tasks.base_task import BaseTask
@registry.register_task("multimodal_classification")
class MultimodalClassificationTask(BaseTask):
def __init__(self):
super().__init__()
def valid_step(self, model, samples):
results = []
outputs = model.predict(samples)
predictions = outputs["predictions"]
targets = outputs["targets"]
predictions = predictions.max(1)[1].cpu().numpy()
targets = targets.cpu().numpy()
indices = samples[self.inst_id_key]
for pred, tgt, index in zip(predictions, targets, indices):
if isinstance(index, torch.Tensor):
index = index.item()
results.append(
{
self.inst_id_key: index,
"prediction": pred.item(),
"target": tgt.item(),
}
)
return results
def after_evaluation(self, val_result, split_name, epoch, **kwargs):
eval_result_file = self.save_result(
result=val_result,
result_dir=registry.get_path("result_dir"),
filename="{}_epoch{}".format(split_name, epoch),
remove_duplicate=self.inst_id_key,
)
metrics = self._report_metrics(
eval_result_file=eval_result_file, split_name=split_name
)
return metrics
@main_process
def _report_metrics(self, eval_result_file, split_name):
results = json.load(open(eval_result_file))
predictions = np.array([res["prediction"] for res in results])
targets = np.array([res["target"] for res in results])
accuracy = (targets == predictions).sum() / targets.shape[0]
metrics = {"agg_metrics": accuracy, "acc": accuracy}
log_stats = {split_name: {k: v for k, v in metrics.items()}}
with open(
os.path.join(registry.get_path("output_dir"), "evaluate.txt"), "a"
) as f:
f.write(json.dumps(log_stats) + "\n")
logging.info(metrics)
return metrics
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/tasks/multimodal_classification.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import gzip
import logging
import os
import random as rnd
import tarfile
import zipfile
import decord
import webdataset as wds
import numpy as np
import torch
from torch.utils.data.dataset import IterableDataset, ChainDataset
from decord import VideoReader
from lavis.common.registry import registry
from lavis.datasets.datasets.base_dataset import ConcatDataset
from tqdm import tqdm
decord.bridge.set_bridge("torch")
MAX_INT = registry.get("MAX_INT")
def load_video(video_path, n_frms=MAX_INT, height=-1, width=-1, sampling="uniform"):
vr = VideoReader(uri=video_path, height=height, width=width)
vlen = len(vr)
start, end = 0, vlen
n_frms = min(n_frms, vlen)
if sampling == "uniform":
indices = np.arange(start, end, vlen / n_frms).astype(int)
elif sampling == "headtail":
indices_h = sorted(rnd.sample(range(vlen // 2), n_frms // 2))
indices_t = sorted(rnd.sample(range(vlen // 2, vlen), n_frms // 2))
indices = indices_h + indices_t
else:
raise NotImplementedError
# get_batch -> T, H, W, C
frms = vr.get_batch(indices).permute(3, 0, 1, 2).float() # (C, T, H, W)
return frms
def apply_to_sample(f, sample):
if len(sample) == 0:
return {}
def _apply(x):
if torch.is_tensor(x):
return f(x)
elif isinstance(x, dict):
return {key: _apply(value) for key, value in x.items()}
elif isinstance(x, list):
return [_apply(x) for x in x]
else:
return x
return _apply(sample)
def move_to_cuda(sample):
def _move_to_cuda(tensor):
return tensor.cuda()
return apply_to_sample(_move_to_cuda, sample)
def prepare_sample(samples, cuda_enabled=True):
if cuda_enabled:
samples = move_to_cuda(samples)
# TODO fp16 support
return samples
def reorg_datasets_by_split(datasets):
"""
Organizes datasets by split.
Args:
datasets: dict of torch.utils.data.Dataset objects by name.
Returns:
Dict of datasets by split {split_name: List[Datasets]}.
"""
# if len(datasets) == 1:
# return datasets[list(datasets.keys())[0]]
# else:
reorg_datasets = dict()
# reorganize by split
for _, dataset in datasets.items():
for split_name, dataset_split in dataset.items():
if split_name not in reorg_datasets:
reorg_datasets[split_name] = [dataset_split]
else:
reorg_datasets[split_name].append(dataset_split)
return reorg_datasets
def concat_datasets(datasets):
"""
Concatenates multiple datasets into a single dataset.
It supports may-style datasets and DataPipeline from WebDataset. Currently, does not support
generic IterableDataset because it requires creating separate samplers.
Now only supports conctenating training datasets and assuming validation and testing
have only a single dataset. This is because metrics should not be computed on the concatenated
datasets.
Args:
datasets: dict of torch.utils.data.Dataset objects by split.
Returns:
Dict of concatenated datasets by split, "train" is the concatenation of multiple datasets,
"val" and "test" remain the same.
If the input training datasets contain both map-style and DataPipeline datasets, returns
a tuple, where the first element is a concatenated map-style dataset and the second
element is a chained DataPipeline dataset.
"""
# concatenate datasets in the same split
for split_name in datasets:
if split_name != "train":
assert (
len(datasets[split_name]) == 1
), "Do not support multiple {} datasets.".format(split_name)
datasets[split_name] = datasets[split_name][0]
else:
iterable_datasets, map_datasets = [], []
for dataset in datasets[split_name]:
if isinstance(dataset, wds.DataPipeline):
logging.info(
"Dataset {} is IterableDataset, can't be concatenated.".format(
dataset
)
)
iterable_datasets.append(dataset)
elif isinstance(dataset, IterableDataset):
raise NotImplementedError(
"Do not support concatenation of generic IterableDataset."
)
else:
map_datasets.append(dataset)
# if len(iterable_datasets) > 0:
# concatenate map-style datasets and iterable-style datasets separately
chained_datasets = (
ChainDataset(iterable_datasets) if len(iterable_datasets) > 0 else None
)
concat_datasets = (
ConcatDataset(map_datasets) if len(map_datasets) > 0 else None
)
train_datasets = concat_datasets, chained_datasets
train_datasets = tuple([x for x in train_datasets if x is not None])
train_datasets = (
train_datasets[0] if len(train_datasets) == 1 else train_datasets
)
datasets[split_name] = train_datasets
return datasets
def extract_archive(from_path, to_path=None, overwrite=False):
"""Extract archive.
Args:
from_path: the path of the archive.
to_path: the root path of the extracted files (directory of from_path)
overwrite: overwrite existing files (False)
Returns:
List of paths to extracted files even if not overwritten.
Examples:
>>> url = 'http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/validation.tar.gz'
>>> from_path = './validation.tar.gz'
>>> to_path = './'
>>> torchtext.utils.download_from_url(url, from_path)
>>> torchtext.utils.extract_archive(from_path, to_path)
>>> ['.data/val.de', '.data/val.en']
>>> torchtext.utils.download_from_url(url, from_path)
>>> torchtext.utils.extract_archive(from_path, to_path)
>>> ['.data/val.de', '.data/val.en']
"""
if to_path is None:
to_path = os.path.dirname(from_path)
if from_path.endswith((".tar.gz", ".tgz")):
logging.info("Opening tar file {} to {}.".format(from_path, to_path))
with tarfile.open(from_path, "r") as tar:
files = []
for file_ in tqdm(tar):
file_path = os.path.join(to_path, file_.name)
if file_.isfile():
files.append(file_path)
if os.path.exists(file_path):
logging.info("{} already extracted.".format(file_path))
if not overwrite:
continue
tar.extract(file_, to_path)
logging.info("Finished extracting tar file {}.".format(from_path))
return files
elif from_path.endswith(".zip"):
assert zipfile.is_zipfile(from_path), from_path
logging.info("Opening zip file {} to {}.".format(from_path, to_path))
with zipfile.ZipFile(from_path, "r") as zfile:
files = []
for file_ in tqdm(zfile.namelist()):
file_path = os.path.join(to_path, file_)
files.append(file_path)
if os.path.exists(file_path):
logging.info("{} already extracted.".format(file_path))
if not overwrite:
continue
zfile.extract(file_, to_path)
files = [f for f in files if os.path.isfile(f)]
logging.info("Finished extracting zip file {}.".format(from_path))
return files
elif from_path.endswith(".gz"):
logging.info("Opening gz file {} to {}.".format(from_path, to_path))
default_block_size = 65536
filename = from_path[:-3]
files = [filename]
with gzip.open(from_path, "rb") as gzfile, open(filename, "wb") as d_file:
while True:
block = gzfile.read(default_block_size)
if not block:
break
else:
d_file.write(block)
d_file.write(block)
logging.info("Finished extracting gz file {}.".format(from_path))
return files
else:
raise NotImplementedError(
"We currently only support tar.gz, .tgz, .gz and zip achives."
)
def save_frames_grid(img_array, out_path):
import torch
from PIL import Image
from torchvision.utils import make_grid
if len(img_array.shape) == 3:
img_array = img_array.unsqueeze(0)
elif len(img_array.shape) == 5:
b, t, c, h, w = img_array.shape
img_array = img_array.view(-1, c, h, w)
elif len(img_array.shape) == 4:
pass
else:
raise NotImplementedError(
"Supports only (b,t,c,h,w)-shaped inputs. First two dimensions can be ignored."
)
assert img_array.shape[1] == 3, "Exepcting input shape of (H, W, 3), i.e. RGB-only."
grid = make_grid(img_array)
ndarr = grid.permute(1, 2, 0).to("cpu", torch.uint8).numpy()
img = Image.fromarray(ndarr)
img.save(out_path)
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/datasets/data_utils.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import os
from lavis.common.registry import registry
from lavis.datasets.builders.base_dataset_builder import BaseDatasetBuilder
from lavis.datasets.datasets.imagefolder_dataset import ImageFolderDataset
@registry.register_builder("imagenet")
class ImageNetBuilder(BaseDatasetBuilder):
train_dataset_cls = ImageFolderDataset
eval_dataset_cls = ImageFolderDataset
DATASET_CONFIG_DICT = {"default": "configs/datasets/imagenet/defaults.yaml"}
def _download_ann(self):
pass
def build(self):
self.build_processors()
build_info = self.config.build_info
vis_info = build_info.get(self.data_type)
datasets = dict()
for split in build_info.splits:
assert split in [
"train",
"val",
], "Invalid split name {}, must be one of 'train', 'val' and 'test'."
is_train = split == "train"
vis_processor = (
self.vis_processors["train"]
if is_train
else self.vis_processors["eval"]
)
vis_path = os.path.join(vis_info.storage, split)
# create datasets
dataset_cls = self.train_dataset_cls if is_train else self.eval_dataset_cls
datasets[split] = dataset_cls(
vis_processor=vis_processor,
vis_root=vis_path,
classnames=imagenet_classnames,
)
return datasets
imagenet_classnames = [
"tench",
"goldfish",
"great white shark",
"tiger shark",
"hammerhead shark",
"electric ray",
"stingray",
"rooster",
"hen",
"ostrich",
"brambling",
"goldfinch",
"house finch",
"junco",
"indigo bunting",
"American robin",
"bulbul",
"jay",
"magpie",
"chickadee",
"American dipper",
"kite (bird of prey)",
"bald eagle",
"vulture",
"great grey owl",
"fire salamander",
"smooth newt",
"newt",
"spotted salamander",
"axolotl",
"American bullfrog",
"tree frog",
"tailed frog",
"loggerhead sea turtle",
"leatherback sea turtle",
"mud turtle",
"terrapin",
"box turtle",
"banded gecko",
"green iguana",
"Carolina anole",
"desert grassland whiptail lizard",
"agama",
"frilled-necked lizard",
"alligator lizard",
"Gila monster",
"European green lizard",
"chameleon",
"Komodo dragon",
"Nile crocodile",
"American alligator",
"triceratops",
"worm snake",
"ring-necked snake",
"eastern hog-nosed snake",
"smooth green snake",
"kingsnake",
"garter snake",
"water snake",
"vine snake",
"night snake",
"boa constrictor",
"African rock python",
"Indian cobra",
"green mamba",
"sea snake",
"Saharan horned viper",
"eastern diamondback rattlesnake",
"sidewinder rattlesnake",
"trilobite",
"harvestman",
"scorpion",
"yellow garden spider",
"barn spider",
"European garden spider",
"southern black widow",
"tarantula",
"wolf spider",
"tick",
"centipede",
"black grouse",
"ptarmigan",
"ruffed grouse",
"prairie grouse",
"peafowl",
"quail",
"partridge",
"african grey parrot",
"macaw",
"sulphur-crested cockatoo",
"lorikeet",
"coucal",
"bee eater",
"hornbill",
"hummingbird",
"jacamar",
"toucan",
"duck",
"red-breasted merganser",
"goose",
"black swan",
"tusker",
"echidna",
"platypus",
"wallaby",
"koala",
"wombat",
"jellyfish",
"sea anemone",
"brain coral",
"flatworm",
"nematode",
"conch",
"snail",
"slug",
"sea slug",
"chiton",
"chambered nautilus",
"Dungeness crab",
"rock crab",
"fiddler crab",
"red king crab",
"American lobster",
"spiny lobster",
"crayfish",
"hermit crab",
"isopod",
"white stork",
"black stork",
"spoonbill",
"flamingo",
"little blue heron",
"great egret",
"bittern bird",
"crane bird",
"limpkin",
"common gallinule",
"American coot",
"bustard",
"ruddy turnstone",
"dunlin",
"common redshank",
"dowitcher",
"oystercatcher",
"pelican",
"king penguin",
"albatross",
"grey whale",
"killer whale",
"dugong",
"sea lion",
"Chihuahua",
"Japanese Chin",
"Maltese",
"Pekingese",
"Shih Tzu",
"King Charles Spaniel",
"Papillon",
"toy terrier",
"Rhodesian Ridgeback",
"Afghan Hound",
"Basset Hound",
"Beagle",
"Bloodhound",
"Bluetick Coonhound",
"Black and Tan Coonhound",
"Treeing Walker Coonhound",
"English foxhound",
"Redbone Coonhound",
"borzoi",
"Irish Wolfhound",
"Italian Greyhound",
"Whippet",
"Ibizan Hound",
"Norwegian Elkhound",
"Otterhound",
"Saluki",
"Scottish Deerhound",
"Weimaraner",
"Staffordshire Bull Terrier",
"American Staffordshire Terrier",
"Bedlington Terrier",
"Border Terrier",
"Kerry Blue Terrier",
"Irish Terrier",
"Norfolk Terrier",
"Norwich Terrier",
"Yorkshire Terrier",
"Wire Fox Terrier",
"Lakeland Terrier",
"Sealyham Terrier",
"Airedale Terrier",
"Cairn Terrier",
"Australian Terrier",
"Dandie Dinmont Terrier",
"Boston Terrier",
"Miniature Schnauzer",
"Giant Schnauzer",
"Standard Schnauzer",
"Scottish Terrier",
"Tibetan Terrier",
"Australian Silky Terrier",
"Soft-coated Wheaten Terrier",
"West Highland White Terrier",
"Lhasa Apso",
"Flat-Coated Retriever",
"Curly-coated Retriever",
"Golden Retriever",
"Labrador Retriever",
"Chesapeake Bay Retriever",
"German Shorthaired Pointer",
"Vizsla",
"English Setter",
"Irish Setter",
"Gordon Setter",
"Brittany dog",
"Clumber Spaniel",
"English Springer Spaniel",
"Welsh Springer Spaniel",
"Cocker Spaniel",
"Sussex Spaniel",
"Irish Water Spaniel",
"Kuvasz",
"Schipperke",
"Groenendael dog",
"Malinois",
"Briard",
"Australian Kelpie",
"Komondor",
"Old English Sheepdog",
"Shetland Sheepdog",
"collie",
"Border Collie",
"Bouvier des Flandres dog",
"Rottweiler",
"German Shepherd Dog",
"Dobermann",
"Miniature Pinscher",
"Greater Swiss Mountain Dog",
"Bernese Mountain Dog",
"Appenzeller Sennenhund",
"Entlebucher Sennenhund",
"Boxer",
"Bullmastiff",
"Tibetan Mastiff",
"French Bulldog",
"Great Dane",
"St. Bernard",
"husky",
"Alaskan Malamute",
"Siberian Husky",
"Dalmatian",
"Affenpinscher",
"Basenji",
"pug",
"Leonberger",
"Newfoundland dog",
"Great Pyrenees dog",
"Samoyed",
"Pomeranian",
"Chow Chow",
"Keeshond",
"brussels griffon",
"Pembroke Welsh Corgi",
"Cardigan Welsh Corgi",
"Toy Poodle",
"Miniature Poodle",
"Standard Poodle",
"Mexican hairless dog (xoloitzcuintli)",
"grey wolf",
"Alaskan tundra wolf",
"red wolf or maned wolf",
"coyote",
"dingo",
"dhole",
"African wild dog",
"hyena",
"red fox",
"kit fox",
"Arctic fox",
"grey fox",
"tabby cat",
"tiger cat",
"Persian cat",
"Siamese cat",
"Egyptian Mau",
"cougar",
"lynx",
"leopard",
"snow leopard",
"jaguar",
"lion",
"tiger",
"cheetah",
"brown bear",
"American black bear",
"polar bear",
"sloth bear",
"mongoose",
"meerkat",
"tiger beetle",
"ladybug",
"ground beetle",
"longhorn beetle",
"leaf beetle",
"dung beetle",
"rhinoceros beetle",
"weevil",
"fly",
"bee",
"ant",
"grasshopper",
"cricket insect",
"stick insect",
"cockroach",
"praying mantis",
"cicada",
"leafhopper",
"lacewing",
"dragonfly",
"damselfly",
"red admiral butterfly",
"ringlet butterfly",
"monarch butterfly",
"small white butterfly",
"sulphur butterfly",
"gossamer-winged butterfly",
"starfish",
"sea urchin",
"sea cucumber",
"cottontail rabbit",
"hare",
"Angora rabbit",
"hamster",
"porcupine",
"fox squirrel",
"marmot",
"beaver",
"guinea pig",
"common sorrel horse",
"zebra",
"pig",
"wild boar",
"warthog",
"hippopotamus",
"ox",
"water buffalo",
"bison",
"ram (adult male sheep)",
"bighorn sheep",
"Alpine ibex",
"hartebeest",
"impala (antelope)",
"gazelle",
"arabian camel",
"llama",
"weasel",
"mink",
"European polecat",
"black-footed ferret",
"otter",
"skunk",
"badger",
"armadillo",
"three-toed sloth",
"orangutan",
"gorilla",
"chimpanzee",
"gibbon",
"siamang",
"guenon",
"patas monkey",
"baboon",
"macaque",
"langur",
"black-and-white colobus",
"proboscis monkey",
"marmoset",
"white-headed capuchin",
"howler monkey",
"titi monkey",
"Geoffroy's spider monkey",
"common squirrel monkey",
"ring-tailed lemur",
"indri",
"Asian elephant",
"African bush elephant",
"red panda",
"giant panda",
"snoek fish",
"eel",
"silver salmon",
"rock beauty fish",
"clownfish",
"sturgeon",
"gar fish",
"lionfish",
"pufferfish",
"abacus",
"abaya",
"academic gown",
"accordion",
"acoustic guitar",
"aircraft carrier",
"airliner",
"airship",
"altar",
"ambulance",
"amphibious vehicle",
"analog clock",
"apiary",
"apron",
"trash can",
"assault rifle",
"backpack",
"bakery",
"balance beam",
"balloon",
"ballpoint pen",
"Band-Aid",
"banjo",
"baluster / handrail",
"barbell",
"barber chair",
"barbershop",
"barn",
"barometer",
"barrel",
"wheelbarrow",
"baseball",
"basketball",
"bassinet",
"bassoon",
"swimming cap",
"bath towel",
"bathtub",
"station wagon",
"lighthouse",
"beaker",
"military hat (bearskin or shako)",
"beer bottle",
"beer glass",
"bell tower",
"baby bib",
"tandem bicycle",
"bikini",
"ring binder",
"binoculars",
"birdhouse",
"boathouse",
"bobsleigh",
"bolo tie",
"poke bonnet",
"bookcase",
"bookstore",
"bottle cap",
"hunting bow",
"bow tie",
"brass memorial plaque",
"bra",
"breakwater",
"breastplate",
"broom",
"bucket",
"buckle",
"bulletproof vest",
"high-speed train",
"butcher shop",
"taxicab",
"cauldron",
"candle",
"cannon",
"canoe",
"can opener",
"cardigan",
"car mirror",
"carousel",
"tool kit",
"cardboard box / carton",
"car wheel",
"automated teller machine",
"cassette",
"cassette player",
"castle",
"catamaran",
"CD player",
"cello",
"mobile phone",
"chain",
"chain-link fence",
"chain mail",
"chainsaw",
"storage chest",
"chiffonier",
"bell or wind chime",
"china cabinet",
"Christmas stocking",
"church",
"movie theater",
"cleaver",
"cliff dwelling",
"cloak",
"clogs",
"cocktail shaker",
"coffee mug",
"coffeemaker",
"spiral or coil",
"combination lock",
"computer keyboard",
"candy store",
"container ship",
"convertible",
"corkscrew",
"cornet",
"cowboy boot",
"cowboy hat",
"cradle",
"construction crane",
"crash helmet",
"crate",
"infant bed",
"Crock Pot",
"croquet ball",
"crutch",
"cuirass",
"dam",
"desk",
"desktop computer",
"rotary dial telephone",
"diaper",
"digital clock",
"digital watch",
"dining table",
"dishcloth",
"dishwasher",
"disc brake",
"dock",
"dog sled",
"dome",
"doormat",
"drilling rig",
"drum",
"drumstick",
"dumbbell",
"Dutch oven",
"electric fan",
"electric guitar",
"electric locomotive",
"entertainment center",
"envelope",
"espresso machine",
"face powder",
"feather boa",
"filing cabinet",
"fireboat",
"fire truck",
"fire screen",
"flagpole",
"flute",
"folding chair",
"football helmet",
"forklift",
"fountain",
"fountain pen",
"four-poster bed",
"freight car",
"French horn",
"frying pan",
"fur coat",
"garbage truck",
"gas mask or respirator",
"gas pump",
"goblet",
"go-kart",
"golf ball",
"golf cart",
"gondola",
"gong",
"gown",
"grand piano",
"greenhouse",
"radiator grille",
"grocery store",
"guillotine",
"hair clip",
"hair spray",
"half-track",
"hammer",
"hamper",
"hair dryer",
"hand-held computer",
"handkerchief",
"hard disk drive",
"harmonica",
"harp",
"combine harvester",
"hatchet",
"holster",
"home theater",
"honeycomb",
"hook",
"hoop skirt",
"gymnastic horizontal bar",
"horse-drawn vehicle",
"hourglass",
"iPod",
"clothes iron",
"carved pumpkin",
"jeans",
"jeep",
"T-shirt",
"jigsaw puzzle",
"rickshaw",
"joystick",
"kimono",
"knee pad",
"knot",
"lab coat",
"ladle",
"lampshade",
"laptop computer",
"lawn mower",
"lens cap",
"letter opener",
"library",
"lifeboat",
"lighter",
"limousine",
"ocean liner",
"lipstick",
"slip-on shoe",
"lotion",
"music speaker",
"loupe magnifying glass",
"sawmill",
"magnetic compass",
"messenger bag",
"mailbox",
"tights",
"one-piece bathing suit",
"manhole cover",
"maraca",
"marimba",
"mask",
"matchstick",
"maypole",
"maze",
"measuring cup",
"medicine cabinet",
"megalith",
"microphone",
"microwave oven",
"military uniform",
"milk can",
"minibus",
"miniskirt",
"minivan",
"missile",
"mitten",
"mixing bowl",
"mobile home",
"ford model t",
"modem",
"monastery",
"monitor",
"moped",
"mortar and pestle",
"graduation cap",
"mosque",
"mosquito net",
"vespa",
"mountain bike",
"tent",
"computer mouse",
"mousetrap",
"moving van",
"muzzle",
"metal nail",
"neck brace",
"necklace",
"baby pacifier",
"notebook computer",
"obelisk",
"oboe",
"ocarina",
"odometer",
"oil filter",
"pipe organ",
"oscilloscope",
"overskirt",
"bullock cart",
"oxygen mask",
"product packet / packaging",
"paddle",
"paddle wheel",
"padlock",
"paintbrush",
"pajamas",
"palace",
"pan flute",
"paper towel",
"parachute",
"parallel bars",
"park bench",
"parking meter",
"railroad car",
"patio",
"payphone",
"pedestal",
"pencil case",
"pencil sharpener",
"perfume",
"Petri dish",
"photocopier",
"plectrum",
"Pickelhaube",
"picket fence",
"pickup truck",
"pier",
"piggy bank",
"pill bottle",
"pillow",
"ping-pong ball",
"pinwheel",
"pirate ship",
"drink pitcher",
"block plane",
"planetarium",
"plastic bag",
"plate rack",
"farm plow",
"plunger",
"Polaroid camera",
"pole",
"police van",
"poncho",
"pool table",
"soda bottle",
"plant pot",
"potter's wheel",
"power drill",
"prayer rug",
"printer",
"prison",
"missile",
"projector",
"hockey puck",
"punching bag",
"purse",
"quill",
"quilt",
"race car",
"racket",
"radiator",
"radio",
"radio telescope",
"rain barrel",
"recreational vehicle",
"fishing casting reel",
"reflex camera",
"refrigerator",
"remote control",
"restaurant",
"revolver",
"rifle",
"rocking chair",
"rotisserie",
"eraser",
"rugby ball",
"ruler measuring stick",
"sneaker",
"safe",
"safety pin",
"salt shaker",
"sandal",
"sarong",
"saxophone",
"scabbard",
"weighing scale",
"school bus",
"schooner",
"scoreboard",
"CRT monitor",
"screw",
"screwdriver",
"seat belt",
"sewing machine",
"shield",
"shoe store",
"shoji screen / room divider",
"shopping basket",
"shopping cart",
"shovel",
"shower cap",
"shower curtain",
"ski",
"balaclava ski mask",
"sleeping bag",
"slide rule",
"sliding door",
"slot machine",
"snorkel",
"snowmobile",
"snowplow",
"soap dispenser",
"soccer ball",
"sock",
"solar thermal collector",
"sombrero",
"soup bowl",
"keyboard space bar",
"space heater",
"space shuttle",
"spatula",
"motorboat",
"spider web",
"spindle",
"sports car",
"spotlight",
"stage",
"steam locomotive",
"through arch bridge",
"steel drum",
"stethoscope",
"scarf",
"stone wall",
"stopwatch",
"stove",
"strainer",
"tram",
"stretcher",
"couch",
"stupa",
"submarine",
"suit",
"sundial",
"sunglasses",
"sunglasses",
"sunscreen",
"suspension bridge",
"mop",
"sweatshirt",
"swim trunks / shorts",
"swing",
"electrical switch",
"syringe",
"table lamp",
"tank",
"tape player",
"teapot",
"teddy bear",
"television",
"tennis ball",
"thatched roof",
"front curtain",
"thimble",
"threshing machine",
"throne",
"tile roof",
"toaster",
"tobacco shop",
"toilet seat",
"torch",
"totem pole",
"tow truck",
"toy store",
"tractor",
"semi-trailer truck",
"tray",
"trench coat",
"tricycle",
"trimaran",
"tripod",
"triumphal arch",
"trolleybus",
"trombone",
"hot tub",
"turnstile",
"typewriter keyboard",
"umbrella",
"unicycle",
"upright piano",
"vacuum cleaner",
"vase",
"vaulted or arched ceiling",
"velvet fabric",
"vending machine",
"vestment",
"viaduct",
"violin",
"volleyball",
"waffle iron",
"wall clock",
"wallet",
"wardrobe",
"military aircraft",
"sink",
"washing machine",
"water bottle",
"water jug",
"water tower",
"whiskey jug",
"whistle",
"hair wig",
"window screen",
"window shade",
"Windsor tie",
"wine bottle",
"airplane wing",
"wok",
"wooden spoon",
"wool",
"split-rail fence",
"shipwreck",
"sailboat",
"yurt",
"website",
"comic book",
"crossword",
"traffic or street sign",
"traffic light",
"dust jacket",
"menu",
"plate",
"guacamole",
"consomme",
"hot pot",
"trifle",
"ice cream",
"popsicle",
"baguette",
"bagel",
"pretzel",
"cheeseburger",
"hot dog",
"mashed potatoes",
"cabbage",
"broccoli",
"cauliflower",
"zucchini",
"spaghetti squash",
"acorn squash",
"butternut squash",
"cucumber",
"artichoke",
"bell pepper",
"cardoon",
"mushroom",
"Granny Smith apple",
"strawberry",
"orange",
"lemon",
"fig",
"pineapple",
"banana",
"jackfruit",
"cherimoya (custard apple)",
"pomegranate",
"hay",
"carbonara",
"chocolate syrup",
"dough",
"meatloaf",
"pizza",
"pot pie",
"burrito",
"red wine",
"espresso",
"tea cup",
"eggnog",
"mountain",
"bubble",
"cliff",
"coral reef",
"geyser",
"lakeshore",
"promontory",
"sandbar",
"beach",
"valley",
"volcano",
"baseball player",
"bridegroom",
"scuba diver",
"rapeseed",
"daisy",
"yellow lady's slipper",
"corn",
"acorn",
"rose hip",
"horse chestnut seed",
"coral fungus",
"agaric",
"gyromitra",
"stinkhorn mushroom",
"earth star fungus",
"hen of the woods mushroom",
"bolete",
"corn cob",
"toilet paper",
]
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/datasets/builders/imagefolder_builder.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import logging
import os
import shutil
import warnings
import lavis.common.utils as utils
import torch.distributed as dist
from lavis.common.dist_utils import is_dist_avail_and_initialized, is_main_process
from lavis.common.registry import registry
from lavis.datasets.data_utils import extract_archive
from lavis.processors.base_processor import BaseProcessor
from omegaconf import OmegaConf
from torchvision.datasets.utils import download_url
class BaseDatasetBuilder:
train_dataset_cls, eval_dataset_cls = None, None
def __init__(self, cfg=None):
super().__init__()
if cfg is None:
# help to create datasets from default config.
self.config = load_dataset_config(self.default_config_path())
elif isinstance(cfg, str):
self.config = load_dataset_config(cfg)
else:
# when called from task.build_dataset()
self.config = cfg
self.data_type = self.config.data_type
self.vis_processors = {"train": BaseProcessor(), "eval": BaseProcessor()}
self.text_processors = {"train": BaseProcessor(), "eval": BaseProcessor()}
def build_datasets(self):
# download, split, etc...
# only called on 1 GPU/TPU in distributed
if is_main_process():
self._download_data()
if is_dist_avail_and_initialized():
dist.barrier()
# at this point, all the annotations and image/videos should be all downloaded to the specified locations.
logging.info("Building datasets...")
datasets = self.build() # dataset['train'/'val'/'test']
return datasets
def build_processors(self):
vis_proc_cfg = self.config.get("vis_processor")
txt_proc_cfg = self.config.get("text_processor")
if vis_proc_cfg is not None:
vis_train_cfg = vis_proc_cfg.get("train")
vis_eval_cfg = vis_proc_cfg.get("eval")
self.vis_processors["train"] = self._build_proc_from_cfg(vis_train_cfg)
self.vis_processors["eval"] = self._build_proc_from_cfg(vis_eval_cfg)
if txt_proc_cfg is not None:
txt_train_cfg = txt_proc_cfg.get("train")
txt_eval_cfg = txt_proc_cfg.get("eval")
self.text_processors["train"] = self._build_proc_from_cfg(txt_train_cfg)
self.text_processors["eval"] = self._build_proc_from_cfg(txt_eval_cfg)
@staticmethod
def _build_proc_from_cfg(cfg):
return (
registry.get_processor_class(cfg.name).from_config(cfg)
if cfg is not None
else None
)
@classmethod
def default_config_path(cls, type="default"):
return utils.get_abs_path(cls.DATASET_CONFIG_DICT[type])
def _download_data(self):
self._download_ann()
self._download_vis()
def _download_ann(self):
"""
Download annotation files if necessary.
All the vision-language datasets should have annotations of unified format.
storage_path can be:
(1) relative/absolute: will be prefixed with env.cache_root to make full path if relative.
(2) basename/dirname: will be suffixed with base name of URL if dirname is provided.
Local annotation paths should be relative.
"""
anns = self.config.build_info.annotations
splits = anns.keys()
cache_root = registry.get_path("cache_root")
for split in splits:
info = anns[split]
urls, storage_paths = info.get("url", None), info.storage
if isinstance(urls, str):
urls = [urls]
if isinstance(storage_paths, str):
storage_paths = [storage_paths]
assert len(urls) == len(storage_paths)
for url_or_filename, storage_path in zip(urls, storage_paths):
# if storage_path is relative, make it full by prefixing with cache_root.
if not os.path.isabs(storage_path):
storage_path = os.path.join(cache_root, storage_path)
dirname = os.path.dirname(storage_path)
if not os.path.exists(dirname):
os.makedirs(dirname)
if os.path.isfile(url_or_filename):
src, dst = url_or_filename, storage_path
if not os.path.exists(dst):
shutil.copyfile(src=src, dst=dst)
else:
logging.info("Using existing file {}.".format(dst))
else:
if os.path.isdir(storage_path):
# if only dirname is provided, suffix with basename of URL.
raise ValueError(
"Expecting storage_path to be a file path, got directory {}".format(
storage_path
)
)
else:
filename = os.path.basename(storage_path)
download_url(url=url_or_filename, root=dirname, filename=filename)
def _download_vis(self):
storage_path = self.config.build_info.get(self.data_type).storage
storage_path = utils.get_cache_path(storage_path)
if not os.path.exists(storage_path):
warnings.warn(
f"""
The specified path {storage_path} for visual inputs does not exist.
Please provide a correct path to the visual inputs or
refer to datasets/download_scripts/README.md for downloading instructions.
"""
)
def build(self):
"""
Create by split datasets inheriting torch.utils.data.Datasets.
# build() can be dataset-specific. Overwrite to customize.
"""
self.build_processors()
build_info = self.config.build_info
ann_info = build_info.annotations
vis_info = build_info.get(self.data_type)
datasets = dict()
for split in ann_info.keys():
if split not in ["train", "val", "test"]:
continue
is_train = split == "train"
# processors
vis_processor = (
self.vis_processors["train"]
if is_train
else self.vis_processors["eval"]
)
text_processor = (
self.text_processors["train"]
if is_train
else self.text_processors["eval"]
)
# annotation path
ann_paths = ann_info.get(split).storage
if isinstance(ann_paths, str):
ann_paths = [ann_paths]
abs_ann_paths = []
for ann_path in ann_paths:
if not os.path.isabs(ann_path):
ann_path = utils.get_cache_path(ann_path)
abs_ann_paths.append(ann_path)
ann_paths = abs_ann_paths
# visual data storage path
vis_path = vis_info.storage
if not os.path.isabs(vis_path):
# vis_path = os.path.join(utils.get_cache_path(), vis_path)
vis_path = utils.get_cache_path(vis_path)
if not os.path.exists(vis_path):
warnings.warn("storage path {} does not exist.".format(vis_path))
# create datasets
dataset_cls = self.train_dataset_cls if is_train else self.eval_dataset_cls
datasets[split] = dataset_cls(
vis_processor=vis_processor,
text_processor=text_processor,
ann_paths=ann_paths,
vis_root=vis_path,
)
return datasets
def load_dataset_config(cfg_path):
cfg = OmegaConf.load(cfg_path).datasets
cfg = cfg[list(cfg.keys())[0]]
return cfg
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/datasets/builders/base_dataset_builder.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
from lavis.common.registry import registry
from lavis.common.utils import get_cache_path
from lavis.datasets.builders.base_dataset_builder import BaseDatasetBuilder
from lavis.datasets.datasets.video_vqa_datasets import VideoQADataset
class VideoQABuilder(BaseDatasetBuilder):
train_dataset_cls = VideoQADataset
eval_dataset_cls = VideoQADataset
def build(self):
datasets = super().build()
ans2label = self.config.build_info.annotations.get("ans2label")
if ans2label is None:
raise ValueError("ans2label is not specified in build_info.")
ans2label = get_cache_path(ans2label.storage)
for split in datasets:
datasets[split]._build_class_labels(ans2label)
return datasets
@registry.register_builder("msrvtt_qa")
class MSRVTTQABuilder(VideoQABuilder):
DATASET_CONFIG_DICT = {
"default": "configs/datasets/msrvtt/defaults_qa.yaml",
}
@registry.register_builder("msvd_qa")
class MSVDQABuilder(VideoQABuilder):
DATASET_CONFIG_DICT = {
"default": "configs/datasets/msvd/defaults_qa.yaml",
}
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/datasets/builders/video_qa_builder.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
from lavis.datasets.builders.base_dataset_builder import load_dataset_config
from lavis.datasets.builders.caption_builder import (
COCOCapBuilder,
MSRVTTCapBuilder,
MSVDCapBuilder,
VATEXCapBuilder,
)
from lavis.datasets.builders.image_text_pair_builder import (
ConceptualCaption12MBuilder,
ConceptualCaption3MBuilder,
VGCaptionBuilder,
SBUCaptionBuilder,
)
from lavis.datasets.builders.classification_builder import (
NLVRBuilder,
SNLIVisualEntailmentBuilder,
)
from lavis.datasets.builders.imagefolder_builder import ImageNetBuilder
from lavis.datasets.builders.video_qa_builder import MSRVTTQABuilder, MSVDQABuilder
from lavis.datasets.builders.vqa_builder import (
COCOVQABuilder,
OKVQABuilder,
VGVQABuilder,
GQABuilder,
)
from lavis.datasets.builders.retrieval_builder import (
MSRVTTRetrievalBuilder,
DiDeMoRetrievalBuilder,
COCORetrievalBuilder,
Flickr30kBuilder,
)
from lavis.datasets.builders.dialogue_builder import AVSDDialBuilder
from lavis.common.registry import registry
__all__ = [
"COCOCapBuilder",
"COCORetrievalBuilder",
"COCOVQABuilder",
"ConceptualCaption12MBuilder",
"ConceptualCaption3MBuilder",
"DiDeMoRetrievalBuilder",
"Flickr30kBuilder",
"GQABuilder",
"ImageNetBuilder",
"MSRVTTCapBuilder",
"MSRVTTQABuilder",
"MSRVTTRetrievalBuilder",
"MSVDCapBuilder",
"MSVDQABuilder",
"NLVRBuilder",
"OKVQABuilder",
"SBUCaptionBuilder",
"SNLIVisualEntailmentBuilder",
"VATEXCapBuilder",
"VGCaptionBuilder",
"VGVQABuilder",
"AVSDDialBuilder",
]
def load_dataset(name, cfg_path=None, vis_path=None, data_type=None):
"""
Example
>>> dataset = load_dataset("coco_caption", cfg=None)
>>> splits = dataset.keys()
>>> print([len(dataset[split]) for split in splits])
"""
if cfg_path is None:
cfg = None
else:
cfg = load_dataset_config(cfg_path)
try:
builder = registry.get_builder_class(name)(cfg)
except TypeError:
print(
f"Dataset {name} not found. Available datasets:\n"
+ ", ".join([str(k) for k in dataset_zoo.get_names()])
)
exit(1)
if vis_path is not None:
if data_type is None:
# use default data type in the config
data_type = builder.config.data_type
assert (
data_type in builder.config.build_info
), f"Invalid data_type {data_type} for {name}."
builder.config.build_info.get(data_type).storage = vis_path
dataset = builder.build_datasets()
return dataset
class DatasetZoo:
def __init__(self) -> None:
self.dataset_zoo = {
k: list(v.DATASET_CONFIG_DICT.keys())
for k, v in sorted(registry.mapping["builder_name_mapping"].items())
}
def get_names(self):
return list(self.dataset_zoo.keys())
dataset_zoo = DatasetZoo()
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/datasets/builders/__init__.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
from lavis.datasets.builders.base_dataset_builder import BaseDatasetBuilder
from lavis.datasets.datasets.retrieval_datasets import (
RetrievalDataset,
RetrievalEvalDataset,
VideoRetrievalDataset,
VideoRetrievalEvalDataset,
)
from lavis.common.registry import registry
@registry.register_builder("msrvtt_retrieval")
class MSRVTTRetrievalBuilder(BaseDatasetBuilder):
train_dataset_cls = VideoRetrievalDataset
eval_dataset_cls = VideoRetrievalEvalDataset
DATASET_CONFIG_DICT = {"default": "configs/datasets/msrvtt/defaults_ret.yaml"}
@registry.register_builder("didemo_retrieval")
class DiDeMoRetrievalBuilder(BaseDatasetBuilder):
train_dataset_cls = VideoRetrievalDataset
eval_dataset_cls = VideoRetrievalEvalDataset
DATASET_CONFIG_DICT = {"default": "configs/datasets/didemo/defaults_ret.yaml"}
@registry.register_builder("coco_retrieval")
class COCORetrievalBuilder(BaseDatasetBuilder):
train_dataset_cls = RetrievalDataset
eval_dataset_cls = RetrievalEvalDataset
DATASET_CONFIG_DICT = {"default": "configs/datasets/coco/defaults_ret.yaml"}
@registry.register_builder("flickr30k")
class Flickr30kBuilder(BaseDatasetBuilder):
train_dataset_cls = RetrievalDataset
eval_dataset_cls = RetrievalEvalDataset
DATASET_CONFIG_DICT = {"default": "configs/datasets/flickr30k/defaults.yaml"}
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/datasets/builders/retrieval_builder.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
from lavis.datasets.builders.base_dataset_builder import BaseDatasetBuilder
from lavis.common.registry import registry
from lavis.datasets.datasets.aok_vqa_datasets import AOKVQADataset, AOKVQAEvalDataset
from lavis.datasets.datasets.coco_vqa_datasets import COCOVQADataset, COCOVQAEvalDataset
from lavis.datasets.datasets.vg_vqa_datasets import VGVQADataset
from lavis.datasets.datasets.gqa_datasets import GQADataset, GQAEvalDataset
@registry.register_builder("coco_vqa")
class COCOVQABuilder(BaseDatasetBuilder):
train_dataset_cls = COCOVQADataset
eval_dataset_cls = COCOVQAEvalDataset
DATASET_CONFIG_DICT = {
"default": "configs/datasets/coco/defaults_vqa.yaml",
"eval": "configs/datasets/coco/eval_vqa.yaml",
}
@registry.register_builder("vg_vqa")
class VGVQABuilder(BaseDatasetBuilder):
train_dataset_cls = VGVQADataset
DATASET_CONFIG_DICT = {"default": "configs/datasets/vg/defaults_vqa.yaml"}
@registry.register_builder("ok_vqa")
class OKVQABuilder(COCOVQABuilder):
DATASET_CONFIG_DICT = {
"default": "configs/datasets/okvqa/defaults.yaml",
}
@registry.register_builder("aok_vqa")
class AOKVQABuilder(BaseDatasetBuilder):
train_dataset_cls = AOKVQADataset
eval_dataset_cls = AOKVQAEvalDataset
DATASET_CONFIG_DICT = {"default": "configs/datasets/aokvqa/defaults.yaml"}
@registry.register_builder("gqa")
class GQABuilder(BaseDatasetBuilder):
train_dataset_cls = GQADataset
eval_dataset_cls = GQAEvalDataset
DATASET_CONFIG_DICT = {
"default": "configs/datasets/gqa/defaults.yaml",
"balanced_val": "configs/datasets/gqa/balanced_val.yaml",
"balanced_testdev": "configs/datasets/gqa/balanced_testdev.yaml",
} | 3D-LLM-main | three_steps_3d_feature/second_step/lavis/datasets/builders/vqa_builder.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
from lavis.common.registry import registry
from lavis.datasets.builders.base_dataset_builder import BaseDatasetBuilder
from lavis.datasets.datasets.avsd_dialogue_datasets import (
AVSDDialDataset,
AVSDDialEvalDataset,
)
@registry.register_builder("avsd_dialogue")
class AVSDDialBuilder(BaseDatasetBuilder):
train_dataset_cls = AVSDDialDataset
eval_dataset_cls = AVSDDialEvalDataset
DATASET_CONFIG_DICT = {"default": "configs/datasets/avsd/defaults_dial.yaml"}
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/datasets/builders/dialogue_builder.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import os
from lavis.common.registry import registry
from lavis.datasets.builders.base_dataset_builder import BaseDatasetBuilder
from lavis.datasets.datasets.image_text_pair_datasets import ImageTextPairDataset
from lavis.datasets.datasets.laion_dataset import LaionDataset
@registry.register_builder("conceptual_caption_3m")
class ConceptualCaption3MBuilder(BaseDatasetBuilder):
train_dataset_cls = ImageTextPairDataset
DATASET_CONFIG_DICT = {
"default": "configs/datasets/conceptual_caption/defaults_3m.yaml"
}
@registry.register_builder("conceptual_caption_12m")
class ConceptualCaption12MBuilder(BaseDatasetBuilder):
train_dataset_cls = ImageTextPairDataset
DATASET_CONFIG_DICT = {
"default": "configs/datasets/conceptual_caption/defaults_12m.yaml"
}
@registry.register_builder("sbu_caption")
class SBUCaptionBuilder(BaseDatasetBuilder):
train_dataset_cls = ImageTextPairDataset
DATASET_CONFIG_DICT = {"default": "configs/datasets/sbu_caption/defaults.yaml"}
@registry.register_builder("vg_caption")
class VGCaptionBuilder(BaseDatasetBuilder):
train_dataset_cls = ImageTextPairDataset
DATASET_CONFIG_DICT = {"default": "configs/datasets/vg/defaults_caption.yaml"}
@registry.register_builder("laion2B_multi")
class Laion2BMultiBuilder(BaseDatasetBuilder):
train_dataset_cls = LaionDataset
DATASET_CONFIG_DICT = {"default": "configs/datasets/laion/defaults_2B_multi.yaml"}
def _download_ann(self):
pass
def _download_vis(self):
pass
def build(self):
self.build_processors()
build_info = self.config.build_info
datasets = dict()
split = "train" # laion dataset only has train split
# create datasets
# [NOTE] return inner_datasets (wds.DataPipeline)
dataset_cls = self.train_dataset_cls
datasets[split] = dataset_cls(
vis_processor=self.vis_processors[split],
text_processor=self.text_processors[split],
location=build_info.storage,
).inner_dataset
return datasets
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/datasets/builders/image_text_pair_builder.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
from lavis.datasets.builders.base_dataset_builder import BaseDatasetBuilder
from lavis.datasets.datasets.coco_caption_datasets import (
COCOCapDataset,
COCOCapEvalDataset,
NoCapsEvalDataset,
)
from lavis.common.registry import registry
from lavis.datasets.datasets.video_caption_datasets import (
VideoCaptionDataset,
VideoCaptionEvalDataset,
)
@registry.register_builder("coco_caption")
class COCOCapBuilder(BaseDatasetBuilder):
train_dataset_cls = COCOCapDataset
eval_dataset_cls = COCOCapEvalDataset
DATASET_CONFIG_DICT = {
"default": "configs/datasets/coco/defaults_cap.yaml",
}
@registry.register_builder("nocaps")
class COCOCapBuilder(BaseDatasetBuilder):
eval_dataset_cls = NoCapsEvalDataset
DATASET_CONFIG_DICT = {
"default": "configs/datasets/nocaps/defaults.yaml",
}
@registry.register_builder("msrvtt_caption")
class MSRVTTCapBuilder(BaseDatasetBuilder):
train_dataset_cls = VideoCaptionDataset
eval_dataset_cls = VideoCaptionEvalDataset
DATASET_CONFIG_DICT = {
"default": "configs/datasets/msrvtt/defaults_cap.yaml",
}
@registry.register_builder("msvd_caption")
class MSVDCapBuilder(BaseDatasetBuilder):
train_dataset_cls = VideoCaptionDataset
eval_dataset_cls = VideoCaptionEvalDataset
DATASET_CONFIG_DICT = {
"default": "configs/datasets/msvd/defaults_cap.yaml",
}
@registry.register_builder("vatex_caption")
class VATEXCapBuilder(BaseDatasetBuilder):
train_dataset_cls = VideoCaptionDataset
eval_dataset_cls = VideoCaptionEvalDataset
DATASET_CONFIG_DICT = {
"default": "configs/datasets/vatex/defaults_cap.yaml",
}
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/datasets/builders/caption_builder.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
from lavis.common.registry import registry
from lavis.datasets.builders.base_dataset_builder import BaseDatasetBuilder
from lavis.datasets.datasets.nlvr_datasets import NLVRDataset, NLVREvalDataset
from lavis.datasets.datasets.snli_ve_datasets import SNLIVisualEntialmentDataset
@registry.register_builder("nlvr")
class NLVRBuilder(BaseDatasetBuilder):
train_dataset_cls = NLVRDataset
eval_dataset_cls = NLVREvalDataset
DATASET_CONFIG_DICT = {"default": "configs/datasets/nlvr/defaults.yaml"}
@registry.register_builder("snli_ve")
class SNLIVisualEntailmentBuilder(BaseDatasetBuilder):
train_dataset_cls = SNLIVisualEntialmentDataset
eval_dataset_cls = SNLIVisualEntialmentDataset
DATASET_CONFIG_DICT = {"default": "configs/datasets/snli_ve/defaults.yaml"}
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/datasets/builders/classification_builder.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import os
from collections import OrderedDict
from lavis.datasets.datasets.base_dataset import BaseDataset
from PIL import Image
from torchvision import datasets
class ImageFolderDataset(BaseDataset):
def __init__(self, vis_processor, vis_root, classnames=[], **kwargs):
super().__init__(vis_processor=vis_processor, vis_root=vis_root)
self.inner_dataset = datasets.ImageFolder(vis_root)
self.annotation = [
{"image": elem[0], "label": elem[1], "image_id": elem[0]}
for elem in self.inner_dataset.imgs
]
self.classnames = classnames
self._add_instance_ids()
def __len__(self):
return len(self.inner_dataset)
def __getitem__(self, index):
ann = self.annotation[index]
img_fn = ann["image"]
image_path = os.path.join(self.vis_root, img_fn)
image = Image.open(image_path).convert("RGB")
image = self.vis_processor(image)
return {
"image": image,
"label": ann["label"],
"image_id": ann["image_id"],
"instance_id": ann["instance_id"],
}
def displ_item(self, index):
sample, ann = self.__getitem__(index), self.annotation[index]
return OrderedDict(
{
"file": ann["image"],
"label": self.classnames[ann["label"]],
"image": sample["image"],
}
)
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/datasets/datasets/imagefolder_dataset.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import webdataset as wds
from lavis.datasets.datasets.base_dataset import BaseDataset
class LaionDataset(BaseDataset):
def __init__(self, vis_processor, text_processor, location):
super().__init__(vis_processor=vis_processor, text_processor=text_processor)
self.inner_dataset = wds.DataPipeline(
wds.ResampledShards(location),
wds.tarfile_to_samples(handler=wds.warn_and_continue),
wds.shuffle(1000, handler=wds.warn_and_continue),
wds.decode("pilrgb", handler=wds.warn_and_continue),
wds.to_tuple("jpg", "json", handler=wds.warn_and_continue),
wds.map_tuple(self.vis_processor, handler=wds.warn_and_continue),
wds.map(self.to_dict, handler=wds.warn_and_continue),
)
def to_dict(self, sample):
return {
"image": sample[0],
"text_input": self.text_processor(sample[1]["caption"]),
}
if __name__ == "__main__":
from torchvision import transforms
def to_image_text_pair(sample):
return sample[0], sample[1]["caption"]
normalize = transforms.Normalize(
(0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)
)
transform_train = transforms.Compose(
[
transforms.RandomResizedCrop(256, scale=(0.2, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]
)
dataset = LaionDataset(
vis_processor=transform_train,
text_processor=lambda x: x,
location="/export/laion/laion2B-multi/part-00000/{00000..01743}.tar",
)
import torch
loader = torch.utils.data.DataLoader(dataset.inner_dataset, batch_size=2)
print(next(iter(loader))["text_input"])
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/datasets/datasets/laion_dataset.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import json
from typing import Iterable
from torch.utils.data import Dataset, ConcatDataset
from torch.utils.data.dataloader import default_collate
class BaseDataset(Dataset):
def __init__(
self, vis_processor=None, text_processor=None, vis_root=None, ann_paths=[]
):
"""
vis_root (string): Root directory of images (e.g. coco/images/)
ann_root (string): directory to store the annotation file
"""
self.vis_root = vis_root
self.annotation = []
for ann_path in ann_paths:
self.annotation.extend(json.load(open(ann_path, "r")))
self.vis_processor = vis_processor
self.text_processor = text_processor
self._add_instance_ids()
def __len__(self):
return len(self.annotation)
def collater(self, samples):
return default_collate(samples)
def set_processors(self, vis_processor, text_processor):
self.vis_processor = vis_processor
self.text_processor = text_processor
def _add_instance_ids(self, key="instance_id"):
for idx, ann in enumerate(self.annotation):
ann[key] = str(idx)
class ConcatDataset(ConcatDataset):
def __init__(self, datasets: Iterable[Dataset]) -> None:
super().__init__(datasets)
def collater(self, samples):
# TODO For now only supports datasets with same underlying collater implementations
all_keys = set()
for s in samples:
all_keys.update(s)
shared_keys = all_keys
for s in samples:
shared_keys = shared_keys & set(s.keys())
samples_shared_keys = []
for s in samples:
samples_shared_keys.append({k: s[k] for k in s.keys() if k in shared_keys})
return self.datasets[0].collater(samples_shared_keys)
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/datasets/datasets/base_dataset.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import os
import random
from collections import OrderedDict
from lavis.datasets.datasets.multimodal_classification_datasets import (
MultimodalClassificationDataset,
)
from PIL import Image
class __DisplMixin:
def displ_item(self, index):
sample, ann = self.__getitem__(index), self.annotation[index]
return OrderedDict(
{
"file_L": ann["images"][0],
"file_R": ann["images"][1],
"sentence": ann["sentence"],
"label": ann["label"],
"image": [sample["image0"], sample["image1"]],
}
)
class NLVRDataset(MultimodalClassificationDataset, __DisplMixin):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
super().__init__(vis_processor, text_processor, vis_root, ann_paths)
self.class_labels = self._build_class_labels()
def _build_class_labels(self):
return {"False": 0, "True": 1}
@staticmethod
def _flip(samples):
sentence = samples["text_input"]
image0, image1 = samples["image0"], samples["image1"]
if "left" not in sentence and "right" not in sentence:
if random.random() < 0.5:
image0, image1 = image1, image0
else:
if random.random() < 0.5:
sentence = sentence.replace("left", "[TEMP_TOKEN]")
sentence = sentence.replace("right", "left")
sentence = sentence.replace("[TEMP_TOKEN]", "right")
image0, image1 = image1, image0
samples["text_input"] = sentence
samples["image0"] = image0
samples["image1"] = image1
return samples
def __getitem__(self, index):
ann = self.annotation[index]
image0_path = os.path.join(self.vis_root, ann["images"][0])
image0 = Image.open(image0_path).convert("RGB")
image0 = self.vis_processor(image0)
image1_path = os.path.join(self.vis_root, ann["images"][1])
image1 = Image.open(image1_path).convert("RGB")
image1 = self.vis_processor(image1)
sentence = self.text_processor(ann["sentence"])
label = self.class_labels[ann["label"]]
return self._flip(
{
"image0": image0,
"image1": image1,
"text_input": sentence,
"label": label,
# "image_id": ann["image_id"],
"instance_id": ann["instance_id"],
}
)
class NLVREvalDataset(NLVRDataset):
@staticmethod
def _flip(samples):
return samples
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/datasets/datasets/nlvr_datasets.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import time
import random
import torch
from lavis.datasets.data_utils import move_to_cuda
from torch.utils.data import DataLoader
class MultiIterLoader:
"""
A simple wrapper for iterating over multiple iterators.
Args:
loaders (List[Loader]): List of Iterator loaders.
ratios (List[float]): List of ratios to sample from each loader. If None, all loaders are sampled uniformly.
"""
def __init__(self, loaders, ratios=None):
# assert all loaders has __next__ method
for loader in loaders:
assert hasattr(
loader, "__next__"
), "Loader {} has no __next__ method.".format(loader)
if ratios is None:
ratios = [1.0] * len(loaders)
else:
assert len(ratios) == len(loaders)
ratios = [float(ratio) / sum(ratios) for ratio in ratios]
self.loaders = loaders
self.ratios = ratios
def __next__(self):
# random sample from each loader by ratio
loader_idx = random.choices(range(len(self.loaders)), self.ratios, k=1)[0]
return next(self.loaders[loader_idx])
class PrefetchLoader(object):
"""
Modified from https://github.com/ChenRocks/UNITER.
overlap compute and cuda data transfer
(copied and then modified from nvidia apex)
"""
def __init__(self, loader):
self.loader = loader
self.stream = torch.cuda.Stream()
def __iter__(self):
loader_it = iter(self.loader)
self.preload(loader_it)
batch = self.next(loader_it)
while batch is not None:
is_tuple = isinstance(batch, tuple)
if is_tuple:
task, batch = batch
if is_tuple:
yield task, batch
else:
yield batch
batch = self.next(loader_it)
def __len__(self):
return len(self.loader)
def preload(self, it):
try:
self.batch = next(it)
except StopIteration:
self.batch = None
return
# if record_stream() doesn't work, another option is to make sure
# device inputs are created on the main stream.
# self.next_input_gpu = torch.empty_like(self.next_input,
# device='cuda')
# self.next_target_gpu = torch.empty_like(self.next_target,
# device='cuda')
# Need to make sure the memory allocated for next_* is not still in use
# by the main stream at the time we start copying to next_*:
# self.stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(self.stream):
self.batch = move_to_cuda(self.batch)
# more code for the alternative if record_stream() doesn't work:
# copy_ will record the use of the pinned source tensor in this
# side stream.
# self.next_input_gpu.copy_(self.next_input, non_blocking=True)
# self.next_target_gpu.copy_(self.next_target, non_blocking=True)
# self.next_input = self.next_input_gpu
# self.next_target = self.next_target_gpu
def next(self, it):
torch.cuda.current_stream().wait_stream(self.stream)
batch = self.batch
if batch is not None:
record_cuda_stream(batch)
self.preload(it)
return batch
def __getattr__(self, name):
method = self.loader.__getattribute__(name)
return method
def record_cuda_stream(batch):
if isinstance(batch, torch.Tensor):
batch.record_stream(torch.cuda.current_stream())
elif isinstance(batch, list) or isinstance(batch, tuple):
for t in batch:
record_cuda_stream(t)
elif isinstance(batch, dict):
for t in batch.values():
record_cuda_stream(t)
else:
pass
class IterLoader:
"""
A wrapper to convert DataLoader as an infinite iterator.
Modified from:
https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/iter_based_runner.py
"""
def __init__(self, dataloader: DataLoader, use_distributed: bool = False):
self._dataloader = dataloader
self.iter_loader = iter(self._dataloader)
self._use_distributed = use_distributed
self._epoch = 0
@property
def epoch(self) -> int:
return self._epoch
def __next__(self):
try:
data = next(self.iter_loader)
except StopIteration:
self._epoch += 1
if hasattr(self._dataloader.sampler, "set_epoch") and self._use_distributed:
self._dataloader.sampler.set_epoch(self._epoch)
time.sleep(2) # Prevent possible deadlock during epoch transition
self.iter_loader = iter(self._dataloader)
data = next(self.iter_loader)
return data
def __iter__(self):
return self
def __len__(self):
return len(self._dataloader)
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/datasets/datasets/dataloader_utils.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import os
from PIL import Image
from lavis.datasets.datasets.vqa_datasets import VQADataset
class VGVQADataset(VQADataset):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
super().__init__(vis_processor, text_processor, vis_root, ann_paths)
def __getitem__(self, index):
ann = self.annotation[index]
image_path = os.path.join(self.vis_root, ann["image"])
image = Image.open(image_path).convert("RGB")
image = self.vis_processor(image)
question = self.text_processor(ann["question"])
answers = [ann["answer"]]
# TODO this should be configured better
weights = [0.2]
return {
"image": image,
"text_input": question,
"answers": answers,
"weights": weights,
}
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/datasets/datasets/vg_vqa_datasets.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
from abc import abstractmethod
from lavis.datasets.datasets.base_dataset import BaseDataset
class MultimodalClassificationDataset(BaseDataset):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
super().__init__(vis_processor, text_processor, vis_root, ann_paths)
self.class_labels = None
@abstractmethod
def _build_class_labels(self):
pass
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/datasets/datasets/multimodal_classification_datasets.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import os
from collections import OrderedDict
from lavis.datasets.datasets.base_dataset import BaseDataset
from PIL import Image
class __DisplMixin:
def displ_item(self, index):
sample, ann = self.__getitem__(index), self.annotation[index]
return OrderedDict(
{
"file": os.path.basename(ann["image"]),
"caption": ann["caption"],
"image": sample["image"],
}
)
class ImageTextPairDataset(BaseDataset, __DisplMixin):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
"""
vis_root (string): Root directory of images (e.g. coco/images/)
ann_root (string): directory to store the annotation file
"""
super().__init__(vis_processor, text_processor, vis_root, ann_paths)
def __getitem__(self, index):
# TODO this assumes image input, not general enough
ann = self.annotation[index]
image_path = os.path.join(self.vis_root, ann["image"])
image = Image.open(image_path).convert("RGB")
image = self.vis_processor(image)
caption = self.text_processor(ann["caption"])
return {"image": image, "text_input": caption}
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/datasets/datasets/image_text_pair_datasets.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import os
from lavis.datasets.datasets.base_dataset import BaseDataset
from lavis.datasets.datasets.caption_datasets import CaptionDataset
class VideoCaptionDataset(CaptionDataset):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
"""
vis_root (string): Root directory of images (e.g. coco/images/)
ann_root (string): directory to store the annotation file
split (string): val or test
"""
super().__init__(vis_processor, text_processor, vis_root, ann_paths)
def __getitem__(self, index):
ann = self.annotation[index]
vname = ann["video"]
video_path = os.path.join(self.vis_root, vname)
video = self.vis_processor(video_path)
caption = self.text_processor(ann["caption"])
# "image_id" is kept to stay compatible with the COCO evaluation format
return {
"video": video,
"text_input": caption,
"image_id": self.img_ids[ann["image_id"]],
}
class VideoCaptionEvalDataset(BaseDataset):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
"""
vis_root (string): Root directory of images (e.g. coco/images/)
ann_root (string): directory to store the annotation file
split (string): val or test
"""
super().__init__(vis_processor, text_processor, vis_root, ann_paths)
def __getitem__(self, index):
ann = self.annotation[index]
vname = ann["video"]
video_path = os.path.join(self.vis_root, vname)
video = self.vis_processor(video_path)
return {
"video": video,
"image_id": ann["image_id"],
"instance_id": ann["instance_id"],
}
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/datasets/datasets/video_caption_datasets.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import json
import os
from collections import OrderedDict
from lavis.datasets.datasets.multimodal_classification_datasets import (
MultimodalClassificationDataset,
)
class __DisplMixin:
def displ_item(self, index):
ann = self.annotation[index]
vname = ann["video"]
vpath = os.path.join(self.vis_root, vname)
return OrderedDict(
{"file": vpath, "question": ann["question"], "answer": ann["answer"]}
)
class VideoQADataset(MultimodalClassificationDataset, __DisplMixin):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
super().__init__(vis_processor, text_processor, vis_root, ann_paths)
def _build_class_labels(self, ans_path):
ans2label = json.load(open(ans_path))
self.class_labels = ans2label
def _get_answer_label(self, answer):
if answer in self.class_labels:
return self.class_labels[answer]
else:
return len(self.class_labels)
def __getitem__(self, index):
assert (
self.class_labels
), f"class_labels of {__class__.__name__} is not built yet."
ann = self.annotation[index]
vname = ann["video"]
vpath = os.path.join(self.vis_root, vname)
frms = self.vis_processor(vpath)
question = self.text_processor(ann["question"])
return {
"video": frms,
"text_input": question,
"answers": self._get_answer_label(ann["answer"]),
"question_id": ann["question_id"],
"instance_id": ann["instance_id"],
}
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/datasets/datasets/video_vqa_datasets.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import os
from collections import OrderedDict
from PIL import Image
from lavis.datasets.datasets.base_dataset import BaseDataset
import json
import copy
class __DisplMixin:
def displ_item(self, index):
sample, ann = self.__getitem__(index), self.annotation[index]
return OrderedDict(
{
"file": ann["image"],
"dialogue": ann["dialogue"],
"image": sample["image"],
}
)
class DialogueDataset(BaseDataset, __DisplMixin):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
"""
vis_root (string): Root directory of images (e.g. coco/images/)
ann_root (string): directory to store the annotation file
"""
self.vis_root = vis_root
self.annotation = []
for ann_path in ann_paths:
dialogs = json.load(open(ann_path, "r"))["dialogs"]
for dialog in dialogs:
all_turns = dialog["dialog"]
dialogue_context = []
for turn in all_turns:
dialog_instance = copy.deepcopy(dialog)
question = turn["question"]
answer = turn["answer"]
dialog_instance["dialog"] = copy.deepcopy(dialogue_context)
dialog_instance["question"] = question
dialog_instance["answer"] = answer
self.annotation.append(dialog_instance)
dialogue_context.append(turn)
self.vis_processor = vis_processor
self.text_processor = text_processor
self._add_instance_ids()
self.img_ids = {}
n = 0
for ann in self.annotation:
img_id = ann["image_id"]
if img_id not in self.img_ids.keys():
self.img_ids[img_id] = n
n += 1
def __getitem__(self, index):
ann = self.annotation[index]
image_path = os.path.join(self.vis_root, ann["image"])
image = Image.open(image_path).convert("RGB")
image = self.vis_processor(image)
caption = self.text_processor(ann["caption"])
return {
"image": image,
"text_input": caption,
"image_id": self.img_ids[ann["image_id"]],
}
class DialogueEvalDataset(BaseDataset, __DisplMixin):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
"""
vis_root (string): Root directory of images (e.g. coco/images/)
ann_root (string): directory to store the annotation file
split (string): val or test
"""
self.vis_root = vis_root
self.annotation = []
for ann_path in ann_paths:
dialogs = json.load(open(ann_path, "r"))["dialogs"]
for dialog in dialogs:
all_turns = dialog["dialog"]
dialogue_context = all_turns[:-1]
last_turn = all_turns[-1]
question = last_turn["question"]
answer = last_turn["answer"]
dialog["dialog"] = dialogue_context
dialog["question"] = question
dialog["answer"] = answer
self.annotation.append(dialog)
self.vis_processor = vis_processor
self.text_processor = text_processor
self._add_instance_ids()
self.img_ids = {}
n = 0
for ann in self.annotation:
img_id = ann["image_id"]
if img_id not in self.img_ids.keys():
self.img_ids[img_id] = n
n += 1
def __getitem__(self, index):
ann = self.annotation[index]
image_path = os.path.join(self.vis_root, ann["image"])
image = Image.open(image_path).convert("RGB")
image = self.vis_processor(image)
return {
"image": image,
"image_id": ann["image_id"],
"instance_id": ann["instance_id"],
}
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/datasets/datasets/dialogue_datasets.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import os
import json
from PIL import Image
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
from lavis.datasets.datasets.caption_datasets import CaptionDataset, CaptionEvalDataset
COCOCapDataset = CaptionDataset
class COCOCapEvalDataset(CaptionEvalDataset):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
"""
vis_root (string): Root directory of images (e.g. coco/images/)
ann_root (string): directory to store the annotation file
split (string): val or test
"""
super().__init__(vis_processor, text_processor, vis_root, ann_paths)
def __getitem__(self, index):
ann = self.annotation[index]
image_path = os.path.join(self.vis_root, ann["image"])
image = Image.open(image_path).convert("RGB")
image = self.vis_processor(image)
img_id = ann["image"].split("/")[-1].strip(".jpg").split("_")[-1]
return {
"image": image,
"image_id": img_id,
"instance_id": ann["instance_id"],
}
class NoCapsEvalDataset(CaptionEvalDataset):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
"""
vis_root (string): Root directory of images (e.g. coco/images/)
ann_root (string): directory to store the annotation file
split (string): val or test
"""
super().__init__(vis_processor, text_processor, vis_root, ann_paths)
def __getitem__(self, index):
ann = self.annotation[index]
image_path = os.path.join(self.vis_root, ann["image"])
image = Image.open(image_path).convert("RGB")
image = self.vis_processor(image)
img_id = ann["img_id"]
return {
"image": image,
"image_id": img_id,
"instance_id": ann["instance_id"],
}
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/datasets/datasets/coco_caption_datasets.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
from collections import OrderedDict
import json
import os
import torch
from PIL import Image
from lavis.datasets.datasets.vqa_datasets import VQADataset, VQAEvalDataset
class __DisplMixin:
def displ_item(self, index):
sample, ann = self.__getitem__(index), self.annotation[index]
return OrderedDict(
{
"file": ann["image"],
"question": ann["question"],
"question_id": ann["question_id"],
"direct_answers": "; ".join(ann["direct_answers"]),
"choices": "; ".join(ann["choices"]),
"correct_choice": ann["choices"][ann["correct_choice_idx"]],
"image": sample["image"],
}
)
class AOKVQADataset(VQADataset, __DisplMixin):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
super().__init__(vis_processor, text_processor, vis_root, ann_paths)
def __getitem__(self, index):
ann = self.annotation[index]
image_path = os.path.join(self.vis_root, ann["image"])
image = Image.open(image_path).convert("RGB")
image = self.vis_processor(image)
question = self.text_processor(ann["question"])
answer_key = "direct_answers"
answer_weight = {}
for answer in ann[answer_key]:
if answer in answer_weight.keys():
answer_weight[answer] += 1 / len(ann[answer_key])
else:
answer_weight[answer] = 1 / len(ann[answer_key])
answers = list(answer_weight.keys())
weights = list(answer_weight.values())
return {
"image": image,
"text_input": question,
"answers": answers,
"weights": weights,
}
class AOKVQAEvalDataset(VQAEvalDataset, __DisplMixin):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
"""
vis_root (string): Root directory of images (e.g. coco/images/)
ann_root (string): directory to store the annotation file
"""
self.vis_root = vis_root
self.annotation = json.load(open(ann_paths[0]))
answer_list_path = ann_paths[1]
if os.path.exists(answer_list_path):
self.answer_list = json.load(open(answer_list_path))
else:
self.answer_list = None
try:
self.coco_fmt_qust_file = ann_paths[2]
self.coco_fmt_anno_file = ann_paths[3]
except IndexError:
self.coco_fmt_qust_file = None
self.coco_fmt_anno_file = None
self.vis_processor = vis_processor
self.text_processor = text_processor
self._add_instance_ids()
def collater(self, samples):
(
image_list,
question_list,
question_id_list,
instance_id_list,
choices_list,
correct_choice_idx_list,
direct_answers_list,
) = ([], [], [], [], [], [], [])
for sample in samples:
image_list.append(sample["image"])
question_list.append(sample["text_input"])
question_id_list.append(sample["question_id"])
instance_id_list.append(sample["instance_id"])
choices_list.append(sample["choices"])
correct_choice_idx_list.append(sample["correct_choice_idx"])
direct_answers_list.append(sample["direct_answers"])
return {
"image": torch.stack(image_list, dim=0),
"text_input": question_list,
"question_id": question_id_list,
"instance_id": instance_id_list,
"choices": choices_list,
"correct_choice_idx": correct_choice_idx_list,
"direct_answers": direct_answers_list,
}
def __getitem__(self, index):
ann = self.annotation[index]
image_path = os.path.join(self.vis_root, ann["image"])
image = Image.open(image_path).convert("RGB")
image = self.vis_processor(image)
question = self.text_processor(ann["question"])
choices = ann["choices"]
if "correct_choice_idx" in ann:
correct_choice_idx = ann["correct_choice_idx"]
else:
correct_choice_idx = None
if "direct_answers" in ann:
direct_answers = ann["direct_answers"]
else:
direct_answers = None
return {
"image": image,
"text_input": question,
"question_id": ann["question_id"],
"instance_id": ann["instance_id"],
"choices": choices,
"correct_choice_idx": correct_choice_idx,
"direct_answers": direct_answers,
}
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/datasets/datasets/aok_vqa_datasets.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import os
from collections import OrderedDict
from lavis.datasets.datasets.multimodal_classification_datasets import (
MultimodalClassificationDataset,
)
from PIL import Image
class __DisplMixin:
def displ_item(self, index):
sample, ann = self.__getitem__(index), self.annotation[index]
return OrderedDict(
{
"file": os.path.basename(ann["image"]),
"sentence": ann["sentence"],
"label": ann["label"],
"image": sample["image"],
}
)
class SNLIVisualEntialmentDataset(MultimodalClassificationDataset, __DisplMixin):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
super().__init__(vis_processor, text_processor, vis_root, ann_paths)
self.class_labels = self._build_class_labels()
def _build_class_labels(self):
return {"contradiction": 0, "neutral": 1, "entailment": 2}
def __getitem__(self, index):
ann = self.annotation[index]
image_id = ann["image"]
image_path = os.path.join(self.vis_root, "%s.jpg" % image_id)
image = Image.open(image_path).convert("RGB")
image = self.vis_processor(image)
sentence = self.text_processor(ann["sentence"])
return {
"image": image,
"text_input": sentence,
"label": self.class_labels[ann["label"]],
"image_id": image_id,
"instance_id": ann["instance_id"],
}
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/datasets/datasets/snli_ve_datasets.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import torch
from lavis.datasets.datasets.base_dataset import BaseDataset
class VQADataset(BaseDataset):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
super().__init__(vis_processor, text_processor, vis_root, ann_paths)
def collater(self, samples):
image_list, question_list, answer_list, weight_list = [], [], [], []
num_answers = []
for sample in samples:
image_list.append(sample["image"])
question_list.append(sample["text_input"])
weight_list.extend(sample["weights"])
answers = sample["answers"]
answer_list.extend(answers)
num_answers.append(len(answers))
return {
"image": torch.stack(image_list, dim=0),
"text_input": question_list,
"answer": answer_list,
"weight": torch.Tensor(weight_list),
"n_answers": torch.LongTensor(num_answers),
}
class VQAEvalDataset(BaseDataset):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
super().__init__(vis_processor, text_processor, vis_root, ann_paths)
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/datasets/datasets/vqa_datasets.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import os
import json
from PIL import Image
from lavis.datasets.datasets.vqa_datasets import VQADataset, VQAEvalDataset
from collections import OrderedDict
class __DisplMixin:
def displ_item(self, index):
sample, ann = self.__getitem__(index), self.annotation[index]
return OrderedDict(
{
"file": ann["image"],
"question": ann["question"],
"question_id": ann["question_id"],
"answers": "; ".join(ann["answer"]),
"image": sample["image"],
}
)
class COCOVQADataset(VQADataset, __DisplMixin):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
super().__init__(vis_processor, text_processor, vis_root, ann_paths)
def __getitem__(self, index):
ann = self.annotation[index]
image_path = os.path.join(self.vis_root, ann["image"])
image = Image.open(image_path).convert("RGB")
image = self.vis_processor(image)
question = self.text_processor(ann["question"])
answer_weight = {}
for answer in ann["answer"]:
if answer in answer_weight.keys():
answer_weight[answer] += 1 / len(ann["answer"])
else:
answer_weight[answer] = 1 / len(ann["answer"])
answers = list(answer_weight.keys())
weights = list(answer_weight.values())
return {
"image": image,
"text_input": question,
"answers": answers,
"weights": weights,
}
class COCOVQAEvalDataset(VQAEvalDataset, __DisplMixin):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
"""
vis_root (string): Root directory of images (e.g. coco/images/)
ann_root (string): directory to store the annotation file
"""
self.vis_root = vis_root
self.annotation = json.load(open(ann_paths[0]))
answer_list_path = ann_paths[1]
if os.path.exists(answer_list_path):
self.answer_list = json.load(open(answer_list_path))
else:
self.answer_list = None
try:
self.coco_fmt_qust_file = ann_paths[2]
self.coco_fmt_anno_file = ann_paths[3]
except IndexError:
self.coco_fmt_qust_file = None
self.coco_fmt_anno_file = None
self.vis_processor = vis_processor
self.text_processor = text_processor
self._add_instance_ids()
def __getitem__(self, index):
ann = self.annotation[index]
image_path = os.path.join(self.vis_root, ann["image"])
image = Image.open(image_path).convert("RGB")
image = self.vis_processor(image)
question = self.text_processor(ann["question"])
return {
"image": image,
"text_input": question,
"question_id": ann["question_id"],
"instance_id": ann["instance_id"],
}
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/datasets/datasets/coco_vqa_datasets.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import os
import json
from PIL import Image
from lavis.datasets.datasets.vqa_datasets import VQADataset, VQAEvalDataset
from collections import OrderedDict
class __DisplMixin:
def displ_item(self, index):
sample, ann = self.__getitem__(index), self.annotation[index]
return OrderedDict(
{
"file": ann["image"],
"question": ann["question"],
"question_id": ann["question_id"],
"answers": "; ".join(ann["answer"]),
"image": sample["image"],
}
)
class GQADataset(VQADataset, __DisplMixin):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
super().__init__(vis_processor, text_processor, vis_root, ann_paths)
def __getitem__(self, index):
ann = self.annotation[index]
image_path = os.path.join(self.vis_root, ann["image"])
image = Image.open(image_path).convert("RGB")
image = self.vis_processor(image)
question = self.text_processor(ann["question"])
answers = [ann["answer"]]
weights = [1]
return {
"image": image,
"text_input": question,
"answers": answers,
"weights": weights,
}
class GQAEvalDataset(VQAEvalDataset, __DisplMixin):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
"""
vis_root (string): Root directory of images (e.g. gqa/images/)
ann_root (string): directory to store the annotation file
"""
self.vis_root = vis_root
self.annotation = json.load(open(ann_paths[0]))
## TODO: support inference method == 'ranking'
answer_list_path = ann_paths[1] if len(ann_paths) > 1 else ''
if os.path.exists(answer_list_path):
self.answer_list = json.load(open(answer_list_path))
else:
self.answer_list = None
self.vis_processor = vis_processor
self.text_processor = text_processor
self._add_instance_ids()
def __getitem__(self, index):
ann = self.annotation[index]
image_path = os.path.join(self.vis_root, ann["image"])
image = Image.open(image_path).convert("RGB")
image = self.vis_processor(image)
question = self.text_processor(ann["question"])
if "answer" in ann:
# answer is a string
answer = ann["answer"]
else:
answer = None
return {
"image": image,
"text_input": question,
"answer": answer,
"question_id": ann["question_id"],
"instance_id": ann["instance_id"],
}
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/datasets/datasets/gqa_datasets.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import torch
from lavis.datasets.datasets.dialogue_datasets import (
DialogueDataset,
DialogueEvalDataset,
)
class AVSDDialDataset(DialogueDataset):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
"""
vis_root (string): Root directory of images (e.g. coco/images/)
ann_root (string): directory to store the annotation file
split (string): val or test
"""
super().__init__(vis_processor, text_processor, vis_root, ann_paths)
def __getitem__(self, index):
ann = self.annotation[index]
vname = ann["image_id"]
video = self.vis_processor(self.vis_root, vname)
dialogue = self.text_processor(ann)
# "image_id" is kept to stay compatible with the COCO evaluation format
return {
"video_fts": video["video_fts"],
"video_token_type_ids": video["token_type_ids"],
"input_ids": dialogue["input_ids"],
"token_type_ids": dialogue["token_type_ids"],
"labels": dialogue["labels"],
"image_id": ann["image_id"],
"instance_id": ann["instance_id"],
}
def collater(self, samples):
input_ids, token_type_ids, labels, video_fts, video_token_type_ids = (
[],
[],
[],
[],
[],
)
for i in samples:
input_ids.append(i["input_ids"])
token_type_ids.append(i["token_type_ids"])
labels.append(i["labels"])
video_fts.append(i["video_fts"])
video_token_type_ids.append(i["video_token_type_ids"])
input_ids = self.text_processor.padding(input_ids)
labels = self.text_processor.padding(
labels, -1
) # ignore token indice -1 by default
video_fts = self.vis_processor.padding(video_fts)
token_type_ids = self.text_processor.padding(token_type_ids)
video_token_type_ids = self.text_processor.padding(video_token_type_ids)
token_type_ids = torch.cat([video_token_type_ids, token_type_ids], dim=1)
attn_mask = self.text_processor.get_attention_mask(input_ids)
video_mask = self.vis_processor.get_attention_mask(video_fts)
attn_mask = torch.cat([video_mask, attn_mask], dim=1)
video_labels = (
torch.ones((video_fts.size(0), video_fts.size(1))).long() * -1
) # ignore token indice -1 by default
labels = torch.cat([video_labels, labels], dim=1)
samples = {}
samples["input_ids"] = input_ids
samples["token_type_ids"] = token_type_ids
samples["labels"] = labels
samples["video_fts"] = video_fts
samples["attn_mask"] = attn_mask
return samples
class AVSDDialEvalDataset(DialogueEvalDataset):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
"""
vis_root (string): Root directory of images (e.g. coco/images/)
ann_root (string): directory to store the annotation file
split (string): val or test
"""
super().__init__(vis_processor, text_processor, vis_root, ann_paths)
def __getitem__(self, index):
ann = self.annotation[index]
vname = ann["image_id"]
video = self.vis_processor(self.vis_root, vname)
dialogue = self.text_processor(ann)
# "image_id" is kept to stay compatible with the COCO evaluation format
return {
"video_fts": video["video_fts"],
"video_token_type_ids": video["token_type_ids"],
"input_ids": dialogue["input_ids"],
"token_type_ids": dialogue["token_type_ids"],
"labels": dialogue["labels"],
"image_id": ann["image_id"],
"instance_id": ann["instance_id"],
}
def collater(self, samples):
input_ids, token_type_ids, labels, video_fts, video_token_type_ids = (
[],
[],
[],
[],
[],
)
for i in samples:
input_ids.append(i["input_ids"])
token_type_ids.append(i["token_type_ids"])
labels.append(i["labels"])
video_fts.append(i["video_fts"])
video_token_type_ids.append(i["video_token_type_ids"])
input_ids = self.text_processor.padding(input_ids)
labels = self.text_processor.padding(
labels, -1
) # ignore token indice -1 by default
video_fts = self.vis_processor.padding(video_fts)
token_type_ids = self.text_processor.padding(token_type_ids)
video_token_type_ids = self.text_processor.padding(video_token_type_ids)
token_type_ids = torch.cat([video_token_type_ids, token_type_ids], dim=1)
attn_mask = self.text_processor.get_attention_mask(input_ids)
video_mask = self.vis_processor.get_attention_mask(video_fts)
attn_mask = torch.cat([video_mask, attn_mask], dim=1)
video_labels = (
torch.ones((video_fts.size(0), video_fts.size(1))).long() * -1
) # ignore token indice -1 by default
labels = torch.cat([video_labels, labels], dim=1)
samples = {}
samples["input_ids"] = input_ids
samples["token_type_ids"] = token_type_ids
samples["labels"] = labels
samples["video_fts"] = video_fts
samples["attn_mask"] = attn_mask
return samples
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/datasets/datasets/avsd_dialogue_datasets.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import os
from collections import OrderedDict
from lavis.datasets.datasets.base_dataset import BaseDataset
from PIL import Image
class __DisplMixin:
def displ_item(self, index):
sample, ann = self.__getitem__(index), self.annotation[index]
return OrderedDict(
{
"file": ann["image"],
"caption": ann["caption"],
"image": sample["image"],
}
)
class CaptionDataset(BaseDataset, __DisplMixin):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
"""
vis_root (string): Root directory of images (e.g. coco/images/)
ann_root (string): directory to store the annotation file
"""
super().__init__(vis_processor, text_processor, vis_root, ann_paths)
self.img_ids = {}
n = 0
for ann in self.annotation:
img_id = ann["image_id"]
if img_id not in self.img_ids.keys():
self.img_ids[img_id] = n
n += 1
def __getitem__(self, index):
# TODO this assumes image input, not general enough
ann = self.annotation[index]
image_path = os.path.join(self.vis_root, ann["image"])
image = Image.open(image_path).convert("RGB")
image = self.vis_processor(image)
caption = self.text_processor(ann["caption"])
return {
"image": image,
"text_input": caption,
"image_id": self.img_ids[ann["image_id"]],
}
class CaptionEvalDataset(BaseDataset, __DisplMixin):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
"""
vis_root (string): Root directory of images (e.g. coco/images/)
ann_root (string): directory to store the annotation file
split (string): val or test
"""
super().__init__(vis_processor, text_processor, vis_root, ann_paths)
def __getitem__(self, index):
ann = self.annotation[index]
image_path = os.path.join(self.vis_root, ann["image"])
image = Image.open(image_path).convert("RGB")
image = self.vis_processor(image)
return {
"image": image,
"image_id": ann["image_id"],
"instance_id": ann["instance_id"],
}
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/datasets/datasets/caption_datasets.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import os
from collections import OrderedDict
from lavis.datasets.datasets.base_dataset import BaseDataset
from PIL import Image
class __DisplMixin:
def displ_item(self, index):
sample, ann = self.__getitem__(index), self.annotation[index]
visual_key = "image" if "image" in ann else "video"
return OrderedDict(
{
"file": ann[visual_key],
"caption": ann["caption"],
visual_key: sample[visual_key],
}
)
class RetrievalDataset(BaseDataset, __DisplMixin):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
"""
vis_root (string): Root directory of images (e.g. coco/images/)
ann_root (string): directory to store the annotation file
"""
super().__init__(vis_processor, text_processor, vis_root, ann_paths)
self.img_ids = {}
n = 0
for ann in self.annotation:
img_id = ann["image_id"]
if img_id not in self.img_ids.keys():
self.img_ids[img_id] = n
n += 1
def __getitem__(self, index):
ann = self.annotation[index]
image_path = os.path.join(self.vis_root, ann["image"])
image = Image.open(image_path).convert("RGB")
image = self.vis_processor(image)
caption = self.text_processor(ann["caption"])
return {
"image": image,
"text_input": caption,
"image_id": self.img_ids[ann["image_id"]],
"instance_id": ann["instance_id"],
}
class RetrievalEvalDataset(BaseDataset, __DisplMixin):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
"""
vis_root (string): Root directory of images (e.g. coco/images/)
ann_root (string): directory to store the annotation file
split (string): val or test
"""
super().__init__(vis_processor, text_processor, vis_root, ann_paths)
self.text = []
self.image = []
self.txt2img = {}
self.img2txt = {}
txt_id = 0
for img_id, ann in enumerate(self.annotation):
self.image.append(ann["image"])
self.img2txt[img_id] = []
for i, caption in enumerate(ann["caption"]):
self.text.append(self.text_processor(caption))
self.img2txt[img_id].append(txt_id)
self.txt2img[txt_id] = img_id
txt_id += 1
def __getitem__(self, index):
image_path = os.path.join(self.vis_root, self.annotation[index]["image"])
image = Image.open(image_path).convert("RGB")
image = self.vis_processor(image)
return {"image": image, "index": index}
class VideoRetrievalDataset(BaseDataset, __DisplMixin):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
"""
vis_root (string): Root directory of videos.
ann_root (string): directory to store the annotation file
"""
super().__init__(vis_processor, text_processor, vis_root, ann_paths)
self.img_ids = {}
n = 0
for ann in self.annotation:
img_id = ann["video"]
if img_id not in self.img_ids.keys():
self.img_ids[img_id] = n
n += 1
def __getitem__(self, index):
ann = self.annotation[index]
vpath = os.path.join(self.vis_root, ann["video"])
video = self.vis_processor(vpath)
caption = self.text_processor(ann["caption"])
# return image, caption, self.img_ids[ann['image_id']]
return {
"video": video,
"text_input": caption,
"image_id": self.img_ids[ann["video"]],
}
class VideoRetrievalEvalDataset(BaseDataset, __DisplMixin):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
"""
vis_root (string): Root directory of videos.
ann_root (string): directory to store the annotation file
split (string): val or test
"""
super().__init__(vis_processor, text_processor, vis_root, ann_paths)
self.text = []
self.image = []
self.txt2img = {}
self.img2txt = {}
txt_id = 0
for img_id, ann in enumerate(self.annotation):
self.image.append(ann["video"])
self.img2txt[img_id] = []
for i, caption in enumerate(ann["caption"]):
self.text.append(self.text_processor(caption))
self.img2txt[img_id].append(txt_id)
self.txt2img[txt_id] = img_id
txt_id += 1
def __getitem__(self, index):
ann = self.annotation[index]
vpath = os.path.join(self.vis_root, ann["video"])
video = self.vis_processor(vpath)
return {"video": video, "index": index}
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/datasets/datasets/retrieval_datasets.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
Based on huggingface code base
https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert
"""
import math
import os
import warnings
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import Tensor, device
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss
import torch.nn.functional as F
from transformers import BatchEncoding, PreTrainedTokenizer
from transformers.activations import ACT2FN
from transformers.file_utils import (
ModelOutput,
)
from transformers.modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
NextSentencePredictorOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from transformers.modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from transformers.utils import logging
from transformers.models.bert.configuration_bert import BertConfig
from lavis.common.utils import get_abs_path
from lavis.models.base_model import BaseEncoder
logging.set_verbosity_error()
logger = logging.get_logger(__name__)
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word and position embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(
config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id
)
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size
)
if config.add_type_embeddings:
self.token_type_embeddings = nn.Embedding(
config.type_vocab_size, config.hidden_size
)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer(
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))
)
self.position_embedding_type = getattr(
config, "position_embedding_type", "absolute"
)
self.config = config
def forward(
self,
input_ids=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
past_key_values_length=0,
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[
:, past_key_values_length : seq_length + past_key_values_length
]
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
if token_type_ids is not None:
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
else:
embeddings = inputs_embeds
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config, is_cross_attention):
super().__init__()
self.config = config
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(
config, "embedding_size"
):
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
if is_cross_attention:
self.key = nn.Linear(config.encoder_width, self.all_head_size)
self.value = nn.Linear(config.encoder_width, self.all_head_size)
else:
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = getattr(
config, "position_embedding_type", "absolute"
)
if (
self.position_embedding_type == "relative_key"
or self.position_embedding_type == "relative_key_query"
):
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(
2 * config.max_position_embeddings - 1, self.attention_head_size
)
self.save_attention = False
def save_attn_gradients(self, attn_gradients):
self.attn_gradients = attn_gradients
def get_attn_gradients(self):
return self.attn_gradients
def save_attention_map(self, attention_map):
self.attention_map = attention_map
def get_attention_map(self):
return self.attention_map
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (
self.num_attention_heads,
self.attention_head_size,
)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention:
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
past_key_value = (key_layer, value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if (
self.position_embedding_type == "relative_key"
or self.position_embedding_type == "relative_key_query"
):
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(
seq_length, dtype=torch.long, device=hidden_states.device
).view(-1, 1)
position_ids_r = torch.arange(
seq_length, dtype=torch.long, device=hidden_states.device
).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(
distance + self.max_position_embeddings - 1
)
positional_embedding = positional_embedding.to(
dtype=query_layer.dtype
) # fp16 compatibility
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum(
"bhld,lrd->bhlr", query_layer, positional_embedding
)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum(
"bhld,lrd->bhlr", query_layer, positional_embedding
)
relative_position_scores_key = torch.einsum(
"bhrd,lrd->bhlr", key_layer, positional_embedding
)
attention_scores = (
attention_scores
+ relative_position_scores_query
+ relative_position_scores_key
)
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
if is_cross_attention and self.save_attention:
self.save_attention_map(attention_probs)
attention_probs.register_hook(self.save_attn_gradients)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs_dropped = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs_dropped = attention_probs_dropped * head_mask
context_layer = torch.matmul(attention_probs_dropped, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (
(context_layer, attention_probs) if output_attentions else (context_layer,)
)
outputs = outputs + (past_key_value,)
return outputs
class BertSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config, is_cross_attention=False):
super().__init__()
self.self = BertSelfAttention(config, is_cross_attention)
self.output = BertSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads,
self.self.num_attention_heads,
self.self.attention_head_size,
self.pruned_heads,
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = (
self.self.attention_head_size * self.self.num_attention_heads
)
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[
1:
] # add attentions if we output them
return outputs
class BertIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config, layer_num):
super().__init__()
self.config = config
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = BertAttention(config)
self.layer_num = layer_num
# compatibility for ALBEF and BLIP
try:
# ALBEF & ALPRO
fusion_layer = self.config.fusion_layer
add_cross_attention = (
fusion_layer <= layer_num and self.config.add_cross_attention
)
self.fusion_layer = fusion_layer
except AttributeError:
# BLIP
self.fusion_layer = self.config.num_hidden_layers
add_cross_attention = self.config.add_cross_attention
# if self.config.add_cross_attention:
if add_cross_attention:
self.crossattention = BertAttention(
config, is_cross_attention=self.config.add_cross_attention
)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
mode=None,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = (
past_key_value[:2] if past_key_value is not None else None
)
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
# TODO line 482 in albef/models/xbert.py
# compatibility for ALBEF and BLIP
if mode in ["multimodal", "fusion"] and hasattr(self, "crossattention"):
assert (
encoder_hidden_states is not None
), "encoder_hidden_states must be given for cross-attention layers"
if isinstance(encoder_hidden_states, list):
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states[
(self.layer_num - self.fusion_layer)
% len(encoder_hidden_states)
],
encoder_attention_mask[
(self.layer_num - self.fusion_layer)
% len(encoder_hidden_states)
],
output_attentions=output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1]
else:
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
output_attentions=output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = (
outputs + cross_attention_outputs[1:-1]
) # add cross attentions if we output attention weights
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk,
self.chunk_size_feed_forward,
self.seq_len_dim,
attention_output,
)
outputs = (layer_output,) + outputs
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList(
[BertLayer(config, i) for i in range(config.num_hidden_layers)]
)
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
mode="multimodal",
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = (
() if output_attentions and self.config.add_cross_attention else None
)
next_decoder_cache = () if use_cache else None
try:
# ALBEF
fusion_layer = self.config.fusion_layer
except AttributeError:
# BLIP
fusion_layer = self.config.num_hidden_layers
if mode == "text":
start_layer = 0
# output_layer = self.config.fusion_layer
output_layer = fusion_layer
elif mode == "fusion":
# start_layer = self.config.fusion_layer
start_layer = fusion_layer
output_layer = self.config.num_hidden_layers
elif mode == "multimodal":
start_layer = 0
output_layer = self.config.num_hidden_layers
# compatibility for ALBEF and BLIP
# for i in range(self.config.num_hidden_layers):
for i in range(start_layer, output_layer):
layer_module = self.layer[i]
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
# TODO pay attention to this.
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warn(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
mode=mode,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
mode=mode,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
class BertPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = BertConfig
base_model_prefix = "bert"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
class BertModel(BertPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
input to the forward pass.
"""
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config) if add_pooling_layer else None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def get_extended_attention_mask(
self,
attention_mask: Tensor,
input_shape: Tuple[int],
device: device,
is_decoder: bool,
) -> Tensor:
"""
Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
Arguments:
attention_mask (:obj:`torch.Tensor`):
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
input_shape (:obj:`Tuple[int]`):
The shape of the input to the model.
device: (:obj:`torch.device`):
The device of the input to the model.
Returns:
:obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`.
"""
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
if is_decoder:
batch_size, seq_length = input_shape
seq_ids = torch.arange(seq_length, device=device)
causal_mask = (
seq_ids[None, None, :].repeat(batch_size, seq_length, 1)
<= seq_ids[None, :, None]
)
# in case past_key_values are used we need to add a prefix ones mask to the causal mask
# causal and attention masks must have same type with pytorch version < 1.3
causal_mask = causal_mask.to(attention_mask.dtype)
if causal_mask.shape[1] < attention_mask.shape[1]:
prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]
causal_mask = torch.cat(
[
torch.ones(
(batch_size, seq_length, prefix_seq_len),
device=device,
dtype=causal_mask.dtype,
),
causal_mask,
],
axis=-1,
)
extended_attention_mask = (
causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
)
else:
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(
"Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
input_shape, attention_mask.shape
)
)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(
dtype=self.dtype
) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
is_decoder=False,
mode="multimodal",
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
"""
output_attentions = (
output_attentions
if output_attentions is not None
else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
if is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError(
"You cannot specify both input_ids and inputs_embeds at the same time"
)
elif input_ids is not None:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
device = input_ids.device
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
device = inputs_embeds.device
elif encoder_embeds is not None:
input_shape = encoder_embeds.size()[:-1]
batch_size, seq_length = input_shape
device = encoder_embeds.device
else:
raise ValueError(
"You have to specify either input_ids or inputs_embeds or encoder_embeds"
)
# past_key_values_length
past_key_values_length = (
past_key_values[0][0].shape[2] if past_key_values is not None else 0
)
if attention_mask is None:
attention_mask = torch.ones(
((batch_size, seq_length + past_key_values_length)), device=device
)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(
attention_mask, input_shape, device, is_decoder
)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_hidden_states is not None:
if type(encoder_hidden_states) == list:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[
0
].size()
else:
(
encoder_batch_size,
encoder_sequence_length,
_,
) = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if type(encoder_attention_mask) == list:
encoder_extended_attention_mask = [
self.invert_attention_mask(mask) for mask in encoder_attention_mask
]
elif encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(
encoder_attention_mask
)
else:
encoder_extended_attention_mask = self.invert_attention_mask(
encoder_attention_mask
)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
if encoder_embeds is None:
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
else:
embedding_output = encoder_embeds
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
mode=mode,
)
sequence_output = encoder_outputs[0]
pooled_output = (
self.pooler(sequence_output) if self.pooler is not None else None
)
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
class BertForMaskedLM(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config, add_pooling_layer=False)
self.cls = BertOnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
def forward(
self,
input_ids=None,
attention_mask=None,
# token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
is_decoder=False,
mode="multimodal",
soft_labels=None,
alpha=0,
return_logits=False,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
"""
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
# token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_embeds=encoder_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
is_decoder=is_decoder,
mode=mode,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
if return_logits:
return prediction_scores
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(
prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)
)
if soft_labels is not None:
loss_distill = -torch.sum(
F.log_softmax(prediction_scores, dim=-1) * soft_labels, dim=-1
)
loss_distill = loss_distill[labels != -100].mean()
masked_lm_loss = (1 - alpha) * masked_lm_loss + alpha * loss_distill
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return (
((masked_lm_loss,) + output) if masked_lm_loss is not None else output
)
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(
self, input_ids, attention_mask=None, **model_kwargs
):
input_shape = input_ids.shape
effective_batch_size = input_shape[0]
# add a dummy token
assert (
self.config.pad_token_id is not None
), "The PAD token should be defined for generation"
attention_mask = torch.cat(
[attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))],
dim=-1,
)
dummy_token = torch.full(
(effective_batch_size, 1),
self.config.pad_token_id,
dtype=torch.long,
device=input_ids.device,
)
input_ids = torch.cat([input_ids, dummy_token], dim=1)
return {"input_ids": input_ids, "attention_mask": attention_mask}
class BertLMHeadModel(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config, add_pooling_layer=False)
self.cls = BertOnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
def forward(
self,
input_ids=None,
attention_mask=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
return_logits=False,
is_decoder=True,
reduction="mean",
mode="multimodal",
soft_labels=None,
alpha=0,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are
ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]``
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
Returns:
Example::
>>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
>>> config = BertConfig.from_pretrained("bert-base-cased")
>>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
"""
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
if labels is not None:
use_cache = False
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
is_decoder=is_decoder,
mode=mode,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
if return_logits:
return prediction_scores[:, :-1, :].contiguous()
lm_loss = None
if labels is not None:
# we are doing next-token prediction; shift prediction scores and input ids by one
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss(reduction=reduction, label_smoothing=0.1)
lm_loss = loss_fct(
shifted_prediction_scores.view(-1, self.config.vocab_size),
labels.view(-1),
)
if reduction == "none":
lm_loss = lm_loss.view(prediction_scores.size(0), -1).sum(1)
if soft_labels is not None:
loss_distill = -torch.sum(
F.log_softmax(shifted_prediction_scores, dim=-1) * soft_labels, dim=-1
)
loss_distill = (loss_distill * (labels != -100)).sum(1)
lm_loss = (1 - alpha) * lm_loss + alpha * loss_distill
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((lm_loss,) + output) if lm_loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=lm_loss,
logits=prediction_scores,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(
self, input_ids, past=None, attention_mask=None, **model_kwargs
):
input_shape = input_ids.shape
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_shape)
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"past_key_values": past,
"encoder_hidden_states": model_kwargs.get("encoder_hidden_states", None),
"encoder_attention_mask": model_kwargs.get("encoder_attention_mask", None),
"is_decoder": True,
}
def _reorder_cache(self, past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (
tuple(
past_state.index_select(0, beam_idx) for past_state in layer_past
),
)
return reordered_past
class XBertLMHeadDecoder(BertLMHeadModel):
"""
This class decouples the decoder forward logic from the VL model.
In this way, different VL models can share this decoder as long as
they feed encoder_embeds as required.
"""
@classmethod
def from_config(cls, cfg, from_pretrained=False):
med_config_path = get_abs_path(cfg.get("med_config_path"))
med_config = BertConfig.from_json_file(med_config_path)
if from_pretrained:
return cls.from_pretrained("bert-base-uncased", config=med_config)
else:
return cls(config=med_config)
def generate_from_encoder(
self,
tokenized_prompt,
visual_embeds,
sep_token_id,
pad_token_id,
use_nucleus_sampling=False,
num_beams=3,
max_length=30,
min_length=10,
top_p=0.9,
repetition_penalty=1.0,
**kwargs
):
if not use_nucleus_sampling:
num_beams = num_beams
visual_embeds = visual_embeds.repeat_interleave(num_beams, dim=0)
image_atts = torch.ones(visual_embeds.size()[:-1], dtype=torch.long).to(
self.device
)
model_kwargs = {
"encoder_hidden_states": visual_embeds,
"encoder_attention_mask": image_atts,
}
if use_nucleus_sampling:
# nucleus sampling
outputs = self.generate(
input_ids=tokenized_prompt.input_ids,
max_length=max_length,
min_length=min_length,
do_sample=True,
top_p=top_p,
num_return_sequences=1,
eos_token_id=sep_token_id,
pad_token_id=pad_token_id,
repetition_penalty=1.1,
**model_kwargs
)
else:
# beam search
outputs = self.generate(
input_ids=tokenized_prompt.input_ids,
max_length=max_length,
min_length=min_length,
num_beams=num_beams,
eos_token_id=sep_token_id,
pad_token_id=pad_token_id,
repetition_penalty=repetition_penalty,
**model_kwargs
)
return outputs
class XBertEncoder(BertModel, BaseEncoder):
@classmethod
def from_config(cls, cfg, from_pretrained=False):
med_config_path = get_abs_path(cfg.get("med_config_path"))
med_config = BertConfig.from_json_file(med_config_path)
if from_pretrained:
return cls.from_pretrained(
"bert-base-uncased", config=med_config, add_pooling_layer=False
)
else:
return cls(config=med_config, add_pooling_layer=False)
def forward_automask(self, tokenized_text, visual_embeds, **kwargs):
image_atts = torch.ones(visual_embeds.size()[:-1], dtype=torch.long).to(
self.device
)
text = tokenized_text
text_output = super().forward(
text.input_ids,
attention_mask=text.attention_mask,
encoder_hidden_states=visual_embeds,
encoder_attention_mask=image_atts,
return_dict=True,
)
return text_output
def forward_text(self, tokenized_text, **kwargs):
text = tokenized_text
token_type_ids = kwargs.get("token_type_ids", None)
text_output = super().forward(
text.input_ids,
attention_mask=text.attention_mask,
token_type_ids=token_type_ids,
return_dict=True,
mode="text",
)
return text_output
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/med.py |
# Based on EVA, BEIT, timm and DeiT code bases
# https://github.com/baaivision/EVA
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/microsoft/unilm/tree/master/beit
# https://github.com/facebookresearch/deit/
# https://github.com/facebookresearch/dino
# --------------------------------------------------------'
import math
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.models.layers import drop_path, to_2tuple, trunc_normal_
from timm.models.registry import register_model
from lavis.common.dist_utils import download_cached_file
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic',
'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5),
**kwargs
}
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
def extra_repr(self) -> str:
return 'p={}'.format(self.drop_prob)
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x.half())
x = self.act(x)
# x = self.drop(x)
# commit this for the orignal BERT implement
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(
self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.,
proj_drop=0., window_size=None, attn_head_dim=None):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
if attn_head_dim is not None:
head_dim = attn_head_dim
all_head_dim = head_dim * self.num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False)
if qkv_bias:
self.q_bias = nn.Parameter(torch.zeros(all_head_dim))
self.v_bias = nn.Parameter(torch.zeros(all_head_dim))
else:
self.q_bias = None
self.v_bias = None
if window_size:
self.window_size = window_size
self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
self.relative_position_bias_table = nn.Parameter(
torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# cls to token & token 2 cls & cls to cls
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(window_size[0])
coords_w = torch.arange(window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += window_size[1] - 1
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
relative_position_index = \
torch.zeros(size=(window_size[0] * window_size[1] + 1, ) * 2, dtype=relative_coords.dtype)
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
relative_position_index[0, 0:] = self.num_relative_distance - 3
relative_position_index[0:, 0] = self.num_relative_distance - 2
relative_position_index[0, 0] = self.num_relative_distance - 1
self.register_buffer("relative_position_index", relative_position_index)
else:
self.window_size = None
self.relative_position_bias_table = None
self.relative_position_index = None
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(all_head_dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, rel_pos_bias=None):
B, N, C = x.shape
qkv_bias = None
if self.q_bias is not None:
qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias))
# qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
qkv = F.linear(input=x.half(), weight=self.qkv.weight, bias=qkv_bias)
qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
if self.relative_position_bias_table is not None:
relative_position_bias = \
self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1] + 1,
self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if rel_pos_bias is not None:
attn = attn + rel_pos_bias
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, -1)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., init_values=None, act_layer=nn.GELU, norm_layer=nn.LayerNorm,
window_size=None, attn_head_dim=None):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop=attn_drop, proj_drop=drop, window_size=window_size, attn_head_dim=attn_head_dim)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
if init_values is not None and init_values > 0:
self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True)
self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True)
else:
self.gamma_1, self.gamma_2 = None, None
def forward(self, x, rel_pos_bias=None):
if self.gamma_1 is None:
x = x + self.drop_path(self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias))
x = x + self.drop_path(self.mlp(self.norm2(x)))
else:
x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias))
x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x, **kwargs):
B, C, H, W = x.shape
# FIXME look at relaxing size constraints
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x.half()).flatten(2).transpose(1, 2)
return x
class RelativePositionBias(nn.Module):
def __init__(self, window_size, num_heads):
super().__init__()
self.window_size = window_size
self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
self.relative_position_bias_table = nn.Parameter(
torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# cls to token & token 2 cls & cls to cls
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(window_size[0])
coords_w = torch.arange(window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += window_size[1] - 1
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
relative_position_index = \
torch.zeros(size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype)
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
relative_position_index[0, 0:] = self.num_relative_distance - 3
relative_position_index[0:, 0] = self.num_relative_distance - 2
relative_position_index[0, 0] = self.num_relative_distance - 1
self.register_buffer("relative_position_index", relative_position_index)
# trunc_normal_(self.relative_position_bias_table, std=.02)
def forward(self):
relative_position_bias = \
self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1] + 1,
self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
return relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
class VisionTransformer(nn.Module):
""" Vision Transformer with support for patch or hybrid CNN input stage
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=nn.LayerNorm, init_values=None,
use_abs_pos_emb=True, use_rel_pos_bias=False, use_shared_rel_pos_bias=False,
use_mean_pooling=True, init_scale=0.001, use_checkpoint=False):
super().__init__()
self.image_size = img_size
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
if use_abs_pos_emb:
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
else:
self.pos_embed = None
self.pos_drop = nn.Dropout(p=drop_rate)
if use_shared_rel_pos_bias:
self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.patch_shape, num_heads=num_heads)
else:
self.rel_pos_bias = None
self.use_checkpoint = use_checkpoint
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.use_rel_pos_bias = use_rel_pos_bias
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
init_values=init_values, window_size=self.patch_embed.patch_shape if use_rel_pos_bias else None)
for i in range(depth)])
# self.norm = nn.Identity() if use_mean_pooling else norm_layer(embed_dim)
# self.fc_norm = norm_layer(embed_dim) if use_mean_pooling else None
# self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
if self.pos_embed is not None:
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
# trunc_normal_(self.mask_token, std=.02)
# if isinstance(self.head, nn.Linear):
# trunc_normal_(self.head.weight, std=.02)
self.apply(self._init_weights)
self.fix_init_weight()
# if isinstance(self.head, nn.Linear):
# self.head.weight.data.mul_(init_scale)
# self.head.bias.data.mul_(init_scale)
def fix_init_weight(self):
def rescale(param, layer_id):
param.div_(math.sqrt(2.0 * layer_id))
for layer_id, layer in enumerate(self.blocks):
rescale(layer.attn.proj.weight.data, layer_id + 1)
rescale(layer.mlp.fc2.weight.data, layer_id + 1)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
x = self.patch_embed(x)
batch_size, seq_len, _ = x.size()
cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
if self.pos_embed is not None:
x = x + self.pos_embed
x = self.pos_drop(x)
rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None
for blk in self.blocks:
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x, rel_pos_bias)
else:
x = blk(x, rel_pos_bias)
return x
# x = self.norm(x)
# if self.fc_norm is not None:
# t = x[:, 1:, :]
# return self.fc_norm(t.mean(1))
# else:
# return x[:, 0]
def forward(self, x):
x = self.forward_features(x)
# x = self.head(x)
return x
def get_intermediate_layers(self, x):
x = self.patch_embed(x)
batch_size, seq_len, _ = x.size()
cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
if self.pos_embed is not None:
x = x + self.pos_embed
x = self.pos_drop(x)
features = []
rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None
for blk in self.blocks:
x = blk(x, rel_pos_bias)
features.append(x)
return features
def interpolate_pos_embed(model, checkpoint_model):
if 'pos_embed' in checkpoint_model:
pos_embed_checkpoint = checkpoint_model['pos_embed'].float()
embedding_size = pos_embed_checkpoint.shape[-1]
num_patches = model.patch_embed.num_patches
num_extra_tokens = model.pos_embed.shape[-2] - num_patches
# height (== width) for the checkpoint position embedding
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
# height (== width) for the new position embedding
new_size = int(num_patches ** 0.5)
# class_token and dist_token are kept unchanged
if orig_size != new_size:
print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size))
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
checkpoint_model['pos_embed'] = new_pos_embed
def convert_weights_to_fp16(model: nn.Module):
"""Convert applicable model parameters to fp16"""
def _convert_weights_to_fp16(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
# if isinstance(l, (nn.MultiheadAttention, Attention)):
# for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
# tensor = getattr(l, attr)
# if tensor is not None:
# tensor.data = tensor.data.half()
model.apply(_convert_weights_to_fp16)
def create_eva_vit_g(img_size=224,drop_path_rate=0.4,use_checkpoint=False,precision="fp16"):
model = VisionTransformer(
img_size=img_size,
patch_size=14,
use_mean_pooling=False,
embed_dim=1408,
depth=39,
num_heads=1408//88,
mlp_ratio=4.3637,
qkv_bias=True,
drop_path_rate=drop_path_rate,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
use_checkpoint=use_checkpoint,
)
url = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/eva_vit_g.pth"
cached_file = download_cached_file(
url, check_hash=False, progress=True
)
state_dict = torch.load(cached_file, map_location="cpu")
interpolate_pos_embed(model,state_dict)
incompatible_keys = model.load_state_dict(state_dict, strict=False)
# print(incompatible_keys)
if precision == "fp16":
# model.to("cuda")
convert_weights_to_fp16(model)
return model
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/eva_vit.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import logging
from omegaconf import OmegaConf
from lavis.common.registry import registry
from lavis.models.base_model import BaseModel
from lavis.models.albef_models.albef_classification import AlbefClassification
from lavis.models.albef_models.albef_feature_extractor import AlbefFeatureExtractor
from lavis.models.albef_models.albef_nlvr import AlbefNLVR
from lavis.models.albef_models.albef_pretrain import AlbefPretrain
from lavis.models.albef_models.albef_retrieval import AlbefRetrieval
from lavis.models.albef_models.albef_vqa import AlbefVQA
from lavis.models.alpro_models.alpro_qa import AlproQA
from lavis.models.alpro_models.alpro_retrieval import AlproRetrieval
from lavis.models.blip_models.blip import BlipBase
from lavis.models.blip_models.blip_caption import BlipCaption
from lavis.models.blip_models.blip_classification import BlipClassification
from lavis.models.blip_models.blip_feature_extractor import BlipFeatureExtractor
from lavis.models.blip_models.blip_image_text_matching import BlipITM
from lavis.models.blip_models.blip_nlvr import BlipNLVR
from lavis.models.blip_models.blip_pretrain import BlipPretrain
from lavis.models.blip_models.blip_retrieval import BlipRetrieval
from lavis.models.blip_models.blip_vqa import BlipVQA
from lavis.models.blip2_models.blip2 import Blip2Base
from lavis.models.blip2_models.blip2_opt import Blip2OPT
from lavis.models.blip2_models.blip2_t5 import Blip2T5
from lavis.models.blip2_models.blip2_qformer import Blip2Qformer
from lavis.models.blip2_models.blip2_image_text_matching import Blip2ITM
from lavis.models.pnp_vqa_models.pnp_vqa import PNPVQA
from lavis.models.pnp_vqa_models.pnp_unifiedqav2_fid import PNPUnifiedQAv2FiD
from lavis.models.img2prompt_models.img2prompt_vqa import Img2PromptVQA
from lavis.models.med import XBertLMHeadDecoder
from lavis.models.vit import VisionTransformerEncoder
from lavis.models.clip_models.model import CLIP
from lavis.models.gpt_models.gpt_dialogue import GPTDialogue
from lavis.processors.base_processor import BaseProcessor
__all__ = [
"load_model",
"AlbefClassification",
"AlbefFeatureExtractor",
"AlbefNLVR",
"AlbefVQA",
"AlbefPretrain",
"AlbefRetrieval",
"AlproQA",
"AlproRetrieval",
"BaseModel",
"BlipBase",
"BlipFeatureExtractor",
"BlipCaption",
"BlipClassification",
"BlipITM",
"BlipNLVR",
"BlipPretrain",
"BlipRetrieval",
"BlipVQA",
"Blip2Qformer",
"Blip2Base",
"Blip2ITM",
"Blip2OPT",
"Blip2T5",
"PNPVQA",
"Img2PromptVQA",
"PNPUnifiedQAv2FiD",
"CLIP",
"VisionTransformerEncoder",
"XBertLMHeadDecoder",
"GPTDialogue",
]
def load_model(name, model_type, is_eval=False, device="cpu", checkpoint=None):
"""
Load supported models.
To list all available models and types in registry:
>>> from lavis.models import model_zoo
>>> print(model_zoo)
Args:
name (str): name of the model.
model_type (str): type of the model.
is_eval (bool): whether the model is in eval mode. Default: False.
device (str): device to use. Default: "cpu".
checkpoint (str): path or to checkpoint. Default: None.
Note that expecting the checkpoint to have the same keys in state_dict as the model.
Returns:
model (torch.nn.Module): model.
"""
model = registry.get_model_class(name).from_pretrained(model_type=model_type)
if checkpoint is not None:
model.load_checkpoint(checkpoint)
if is_eval:
model.eval()
if device == "cpu":
model = model.float()
return model.to(device)
def load_preprocess(config):
"""
Load preprocessor configs and construct preprocessors.
If no preprocessor is specified, return BaseProcessor, which does not do any preprocessing.
Args:
config (dict): preprocessor configs.
Returns:
vis_processors (dict): preprocessors for visual inputs.
txt_processors (dict): preprocessors for text inputs.
Key is "train" or "eval" for processors used in training and evaluation respectively.
"""
def _build_proc_from_cfg(cfg):
return (
registry.get_processor_class(cfg.name).from_config(cfg)
if cfg is not None
else BaseProcessor()
)
vis_processors = dict()
txt_processors = dict()
vis_proc_cfg = config.get("vis_processor")
txt_proc_cfg = config.get("text_processor")
if vis_proc_cfg is not None:
vis_train_cfg = vis_proc_cfg.get("train")
vis_eval_cfg = vis_proc_cfg.get("eval")
else:
vis_train_cfg = None
vis_eval_cfg = None
vis_processors["train"] = _build_proc_from_cfg(vis_train_cfg)
vis_processors["eval"] = _build_proc_from_cfg(vis_eval_cfg)
if txt_proc_cfg is not None:
txt_train_cfg = txt_proc_cfg.get("train")
txt_eval_cfg = txt_proc_cfg.get("eval")
else:
txt_train_cfg = None
txt_eval_cfg = None
txt_processors["train"] = _build_proc_from_cfg(txt_train_cfg)
txt_processors["eval"] = _build_proc_from_cfg(txt_eval_cfg)
return vis_processors, txt_processors
def load_model_and_preprocess(name, model_type, is_eval=False, device="cpu"):
"""
Load model and its related preprocessors.
List all available models and types in registry:
>>> from lavis.models import model_zoo
>>> print(model_zoo)
Args:
name (str): name of the model.
model_type (str): type of the model.
is_eval (bool): whether the model is in eval mode. Default: False.
device (str): device to use. Default: "cpu".
Returns:
model (torch.nn.Module): model.
vis_processors (dict): preprocessors for visual inputs.
txt_processors (dict): preprocessors for text inputs.
"""
model_cls = registry.get_model_class(name)
# load model
model = model_cls.from_pretrained(model_type=model_type)
if is_eval:
model.eval()
# load preprocess
cfg = OmegaConf.load(model_cls.default_config_path(model_type))
if cfg is not None:
preprocess_cfg = cfg.preprocess
vis_processors, txt_processors = load_preprocess(preprocess_cfg)
else:
vis_processors, txt_processors = None, None
logging.info(
f"""No default preprocess for model {name} ({model_type}).
This can happen if the model is not finetuned on downstream datasets,
or it is not intended for direct use without finetuning.
"""
)
if device == "cpu":
model = model.float()
return model.to(device), vis_processors, txt_processors
class ModelZoo:
"""
A utility class to create string representation of available model architectures and types.
>>> from lavis.models import model_zoo
>>> # list all available models
>>> print(model_zoo)
>>> # show total number of models
>>> print(len(model_zoo))
"""
def __init__(self) -> None:
self.model_zoo = {
k: list(v.PRETRAINED_MODEL_CONFIG_DICT.keys())
for k, v in registry.mapping["model_name_mapping"].items()
}
def __str__(self) -> str:
return (
"=" * 50
+ "\n"
+ f"{'Architectures':<30} {'Types'}\n"
+ "=" * 50
+ "\n"
+ "\n".join(
[
f"{name:<30} {', '.join(types)}"
for name, types in self.model_zoo.items()
]
)
)
def __iter__(self):
return iter(self.model_zoo.items())
def __len__(self):
return sum([len(v) for v in self.model_zoo.values()])
model_zoo = ModelZoo()
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/__init__.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import logging
import os
import numpy as np
import torch
import torch.nn as nn
from lavis.common.dist_utils import download_cached_file, is_dist_avail_and_initialized
from lavis.common.utils import get_abs_path, is_url
from omegaconf import OmegaConf
class BaseModel(nn.Module):
"""Base class for models."""
def __init__(self):
super().__init__()
@property
def device(self):
return list(self.parameters())[0].device
def load_checkpoint(self, url_or_filename):
"""
Load from a finetuned checkpoint.
This should expect no mismatch in the model keys and the checkpoint keys.
"""
if is_url(url_or_filename):
cached_file = download_cached_file(
url_or_filename, check_hash=False, progress=True
)
checkpoint = torch.load(cached_file, map_location="cpu")
elif os.path.isfile(url_or_filename):
checkpoint = torch.load(url_or_filename, map_location="cpu")
else:
raise RuntimeError("checkpoint url or path is invalid")
if "model" in checkpoint.keys():
state_dict = checkpoint["model"]
else:
state_dict = checkpoint
msg = self.load_state_dict(state_dict, strict=False)
logging.info("Missing keys {}".format(msg.missing_keys))
logging.info("load checkpoint from %s" % url_or_filename)
return msg
@classmethod
def from_pretrained(cls, model_type):
"""
Build a pretrained model from default configuration file, specified by model_type.
Args:
- model_type (str): model type, specifying architecture and checkpoints.
Returns:
- model (nn.Module): pretrained or finetuned model, depending on the configuration.
"""
model_cfg = OmegaConf.load(cls.default_config_path(model_type)).model
model = cls.from_config(model_cfg)
return model
@classmethod
def default_config_path(cls, model_type):
assert (
model_type in cls.PRETRAINED_MODEL_CONFIG_DICT
), "Unknown model type {}".format(model_type)
return get_abs_path(cls.PRETRAINED_MODEL_CONFIG_DICT[model_type])
def load_checkpoint_from_config(self, cfg, **kwargs):
"""
Load checkpoint as specified in the config file.
If load_finetuned is True, load the finetuned model; otherwise, load the pretrained model.
When loading the pretrained model, each task-specific architecture may define their
own load_from_pretrained() method.
"""
load_finetuned = cfg.get("load_finetuned", True)
if load_finetuned:
finetune_path = cfg.get("finetuned", None)
assert (
finetune_path is not None
), "Found load_finetuned is True, but finetune_path is None."
self.load_checkpoint(url_or_filename=finetune_path)
else:
# load pre-trained weights
pretrain_path = cfg.get("pretrained", None)
assert "Found load_finetuned is False, but pretrain_path is None."
self.load_from_pretrained(url_or_filename=pretrain_path, **kwargs)
def before_evaluation(self, **kwargs):
pass
def show_n_params(self, return_str=True):
tot = 0
for p in self.parameters():
w = 1
for x in p.shape:
w *= x
tot += w
if return_str:
if tot >= 1e6:
return "{:.1f}M".format(tot / 1e6)
else:
return "{:.1f}K".format(tot / 1e3)
else:
return tot
class BaseEncoder(nn.Module):
"""
Base class for primitive encoders, such as ViT, TimeSformer, etc.
"""
def __init__(self):
super().__init__()
def forward_features(self, samples, **kwargs):
raise NotImplementedError
@property
def device(self):
return list(self.parameters())[0].device
class SharedQueueMixin:
@torch.no_grad()
def _dequeue_and_enqueue(self, image_feat, text_feat, idxs=None):
# gather keys before updating queue
image_feats = concat_all_gather(image_feat)
text_feats = concat_all_gather(text_feat)
batch_size = image_feats.shape[0]
ptr = int(self.queue_ptr)
assert self.queue_size % batch_size == 0 # for simplicity
# replace the keys at ptr (dequeue and enqueue)
self.image_queue[:, ptr : ptr + batch_size] = image_feats.T
self.text_queue[:, ptr : ptr + batch_size] = text_feats.T
if idxs is not None:
idxs = concat_all_gather(idxs)
self.idx_queue[:, ptr : ptr + batch_size] = idxs.T
ptr = (ptr + batch_size) % self.queue_size # move pointer
self.queue_ptr[0] = ptr
class MomentumDistilationMixin:
@torch.no_grad()
def copy_params(self):
for model_pair in self.model_pairs:
for param, param_m in zip(
model_pair[0].parameters(), model_pair[1].parameters()
):
param_m.data.copy_(param.data) # initialize
param_m.requires_grad = False # not update by gradient
@torch.no_grad()
def _momentum_update(self):
for model_pair in self.model_pairs:
for param, param_m in zip(
model_pair[0].parameters(), model_pair[1].parameters()
):
param_m.data = param_m.data * self.momentum + param.data * (
1.0 - self.momentum
)
class GatherLayer(torch.autograd.Function):
"""
Gather tensors from all workers with support for backward propagation:
This implementation does not cut the gradients as torch.distributed.all_gather does.
"""
@staticmethod
def forward(ctx, x):
output = [
torch.zeros_like(x) for _ in range(torch.distributed.get_world_size())
]
torch.distributed.all_gather(output, x)
return tuple(output)
@staticmethod
def backward(ctx, *grads):
all_gradients = torch.stack(grads)
torch.distributed.all_reduce(all_gradients)
return all_gradients[torch.distributed.get_rank()]
def all_gather_with_grad(tensors):
"""
Performs all_gather operation on the provided tensors.
Graph remains connected for backward grad computation.
"""
# Queue the gathered tensors
world_size = torch.distributed.get_world_size()
# There is no need for reduction in the single-proc case
if world_size == 1:
return tensors
# tensor_all = GatherLayer.apply(tensors)
tensor_all = GatherLayer.apply(tensors)
return torch.cat(tensor_all, dim=0)
@torch.no_grad()
def concat_all_gather(tensor):
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
# if use distributed training
if not is_dist_avail_and_initialized():
return tensor
tensors_gather = [
torch.ones_like(tensor) for _ in range(torch.distributed.get_world_size())
]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output
def tile(x, dim, n_tile):
init_dim = x.size(dim)
repeat_idx = [1] * x.dim()
repeat_idx[dim] = n_tile
x = x.repeat(*(repeat_idx))
order_index = torch.LongTensor(
np.concatenate([init_dim * np.arange(n_tile) + i for i in range(init_dim)])
)
return torch.index_select(x, dim, order_index.to(x.device))
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/base_model.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
Based on timm code base
https://github.com/rwightman/pytorch-image-models/tree/master/timm
"""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from timm.models.vision_transformer import _cfg, PatchEmbed
from timm.models.registry import register_model
from timm.models.layers import trunc_normal_, DropPath
from timm.models.helpers import named_apply, adapt_input_conv
from fairscale.nn.checkpoint.checkpoint_activations import checkpoint_wrapper
from lavis.models.base_model import BaseEncoder
class Mlp(nn.Module):
"""MLP as used in Vision Transformer, MLP-Mixer and related networks"""
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.GELU,
drop=0.0,
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(
self,
dim,
num_heads=8,
qkv_bias=False,
qk_scale=None,
attn_drop=0.0,
proj_drop=0.0,
):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
self.scale = qk_scale or head_dim**-0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.attn_gradients = None
self.attention_map = None
def save_attn_gradients(self, attn_gradients):
self.attn_gradients = attn_gradients
def get_attn_gradients(self):
return self.attn_gradients
def save_attention_map(self, attention_map):
self.attention_map = attention_map
def get_attention_map(self):
return self.attention_map
def forward(self, x, register_hook=False):
B, N, C = x.shape
qkv = (
self.qkv(x)
.reshape(B, N, 3, self.num_heads, C // self.num_heads)
.permute(2, 0, 3, 1, 4)
)
q, k, v = (
qkv[0],
qkv[1],
qkv[2],
) # make torchscript happy (cannot use tensor as tuple)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
if register_hook:
self.save_attention_map(attn)
attn.register_hook(self.save_attn_gradients)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.0,
qkv_bias=False,
qk_scale=None,
drop=0.0,
attn_drop=0.0,
drop_path=0.0,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
use_grad_checkpointing=False,
):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop=attn_drop,
proj_drop=drop,
)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=drop,
)
if use_grad_checkpointing:
self.attn = checkpoint_wrapper(self.attn)
self.mlp = checkpoint_wrapper(self.mlp)
def forward(self, x, register_hook=False):
x = x + self.drop_path(self.attn(self.norm1(x), register_hook=register_hook))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class VisionTransformer(nn.Module):
"""Vision Transformer
A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` -
https://arxiv.org/abs/2010.11929
"""
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
num_classes=1000,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
representation_size=None,
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_rate=0.0,
norm_layer=None,
use_grad_checkpointing=False,
ckpt_layer=0,
):
"""
Args:
img_size (int, tuple): input image size
patch_size (int, tuple): patch size
in_chans (int): number of input channels
num_classes (int): number of classes for classification head
embed_dim (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
qk_scale (float): override default qk scale of head_dim ** -0.5 if set
representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set
drop_rate (float): dropout rate
attn_drop_rate (float): attention dropout rate
drop_path_rate (float): stochastic depth rate
norm_layer: (nn.Module): normalization layer
"""
super().__init__()
self.num_features = (
self.embed_dim
) = embed_dim # num_features for consistency with other models
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
self.patch_embed = PatchEmbed(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [
x.item() for x in torch.linspace(0, drop_path_rate, depth)
] # stochastic depth decay rule
self.blocks = nn.ModuleList(
[
Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
use_grad_checkpointing=(
use_grad_checkpointing and i >= depth - ckpt_layer
),
)
for i in range(depth)
]
)
self.norm = norm_layer(embed_dim)
trunc_normal_(self.pos_embed, std=0.02)
trunc_normal_(self.cls_token, std=0.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {"pos_embed", "cls_token"}
def forward(self, x, register_blk=-1):
B = x.shape[0]
x = self.patch_embed(x)
cls_tokens = self.cls_token.expand(
B, -1, -1
) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
x = x + self.pos_embed[:, : x.size(1), :]
x = self.pos_drop(x)
for i, blk in enumerate(self.blocks):
x = blk(x, register_blk == i)
x = self.norm(x)
return x
@torch.jit.ignore()
def load_pretrained(self, checkpoint_path, prefix=""):
_load_weights(self, checkpoint_path, prefix)
@torch.no_grad()
def _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = ""):
"""Load weights from .npz checkpoints for official Google Brain Flax implementation"""
import numpy as np
def _n2p(w, t=True):
if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1:
w = w.flatten()
if t:
if w.ndim == 4:
w = w.transpose([3, 2, 0, 1])
elif w.ndim == 3:
w = w.transpose([2, 0, 1])
elif w.ndim == 2:
w = w.transpose([1, 0])
return torch.from_numpy(w)
w = np.load(checkpoint_path)
if not prefix and "opt/target/embedding/kernel" in w:
prefix = "opt/target/"
if hasattr(model.patch_embed, "backbone"):
# hybrid
backbone = model.patch_embed.backbone
stem_only = not hasattr(backbone, "stem")
stem = backbone if stem_only else backbone.stem
stem.conv.weight.copy_(
adapt_input_conv(
stem.conv.weight.shape[1], _n2p(w[f"{prefix}conv_root/kernel"])
)
)
stem.norm.weight.copy_(_n2p(w[f"{prefix}gn_root/scale"]))
stem.norm.bias.copy_(_n2p(w[f"{prefix}gn_root/bias"]))
if not stem_only:
for i, stage in enumerate(backbone.stages):
for j, block in enumerate(stage.blocks):
bp = f"{prefix}block{i + 1}/unit{j + 1}/"
for r in range(3):
getattr(block, f"conv{r + 1}").weight.copy_(
_n2p(w[f"{bp}conv{r + 1}/kernel"])
)
getattr(block, f"norm{r + 1}").weight.copy_(
_n2p(w[f"{bp}gn{r + 1}/scale"])
)
getattr(block, f"norm{r + 1}").bias.copy_(
_n2p(w[f"{bp}gn{r + 1}/bias"])
)
if block.downsample is not None:
block.downsample.conv.weight.copy_(
_n2p(w[f"{bp}conv_proj/kernel"])
)
block.downsample.norm.weight.copy_(
_n2p(w[f"{bp}gn_proj/scale"])
)
block.downsample.norm.bias.copy_(_n2p(w[f"{bp}gn_proj/bias"]))
embed_conv_w = _n2p(w[f"{prefix}embedding/kernel"])
else:
embed_conv_w = adapt_input_conv(
model.patch_embed.proj.weight.shape[1], _n2p(w[f"{prefix}embedding/kernel"])
)
model.patch_embed.proj.weight.copy_(embed_conv_w)
model.patch_embed.proj.bias.copy_(_n2p(w[f"{prefix}embedding/bias"]))
model.cls_token.copy_(_n2p(w[f"{prefix}cls"], t=False))
pos_embed_w = _n2p(w[f"{prefix}Transformer/posembed_input/pos_embedding"], t=False)
if pos_embed_w.shape != model.pos_embed.shape:
pos_embed_w = resize_pos_embed( # resize pos embedding when different size from pretrained weights
pos_embed_w,
model.pos_embed,
getattr(model, "num_tokens", 1),
model.patch_embed.grid_size,
)
model.pos_embed.copy_(pos_embed_w)
model.norm.weight.copy_(_n2p(w[f"{prefix}Transformer/encoder_norm/scale"]))
model.norm.bias.copy_(_n2p(w[f"{prefix}Transformer/encoder_norm/bias"]))
# if isinstance(model.head, nn.Linear) and model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]:
# model.head.weight.copy_(_n2p(w[f'{prefix}head/kernel']))
# model.head.bias.copy_(_n2p(w[f'{prefix}head/bias']))
# if isinstance(getattr(model.pre_logits, 'fc', None), nn.Linear) and f'{prefix}pre_logits/bias' in w:
# model.pre_logits.fc.weight.copy_(_n2p(w[f'{prefix}pre_logits/kernel']))
# model.pre_logits.fc.bias.copy_(_n2p(w[f'{prefix}pre_logits/bias']))
for i, block in enumerate(model.blocks.children()):
block_prefix = f"{prefix}Transformer/encoderblock_{i}/"
mha_prefix = block_prefix + "MultiHeadDotProductAttention_1/"
block.norm1.weight.copy_(_n2p(w[f"{block_prefix}LayerNorm_0/scale"]))
block.norm1.bias.copy_(_n2p(w[f"{block_prefix}LayerNorm_0/bias"]))
block.attn.qkv.weight.copy_(
torch.cat(
[
_n2p(w[f"{mha_prefix}{n}/kernel"], t=False).flatten(1).T
for n in ("query", "key", "value")
]
)
)
block.attn.qkv.bias.copy_(
torch.cat(
[
_n2p(w[f"{mha_prefix}{n}/bias"], t=False).reshape(-1)
for n in ("query", "key", "value")
]
)
)
block.attn.proj.weight.copy_(_n2p(w[f"{mha_prefix}out/kernel"]).flatten(1))
block.attn.proj.bias.copy_(_n2p(w[f"{mha_prefix}out/bias"]))
for r in range(2):
getattr(block.mlp, f"fc{r + 1}").weight.copy_(
_n2p(w[f"{block_prefix}MlpBlock_3/Dense_{r}/kernel"])
)
getattr(block.mlp, f"fc{r + 1}").bias.copy_(
_n2p(w[f"{block_prefix}MlpBlock_3/Dense_{r}/bias"])
)
block.norm2.weight.copy_(_n2p(w[f"{block_prefix}LayerNorm_2/scale"]))
block.norm2.bias.copy_(_n2p(w[f"{block_prefix}LayerNorm_2/bias"]))
def resize_pos_embed(posemb, posemb_new, num_tokens=1, gs_new=()):
# Rescale the grid of position embeddings when loading from state_dict. Adapted from
# https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224
print("Resized position embedding: %s to %s", posemb.shape, posemb_new.shape)
ntok_new = posemb_new.shape[1]
if num_tokens:
posemb_tok, posemb_grid = posemb[:, :num_tokens], posemb[0, num_tokens:]
ntok_new -= num_tokens
else:
posemb_tok, posemb_grid = posemb[:, :0], posemb[0]
gs_old = int(math.sqrt(len(posemb_grid)))
if not len(gs_new): # backwards compatibility
gs_new = [int(math.sqrt(ntok_new))] * 2
assert len(gs_new) >= 2
print("Position embedding grid-size from %s to %s", [gs_old, gs_old], gs_new)
posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
posemb_grid = F.interpolate(
posemb_grid, size=gs_new, mode="bicubic", align_corners=False
)
posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new[0] * gs_new[1], -1)
posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
return
def interpolate_pos_embed(pos_embed_checkpoint, visual_encoder):
# interpolate position embedding
embedding_size = pos_embed_checkpoint.shape[-1]
num_patches = visual_encoder.patch_embed.num_patches
num_extra_tokens = visual_encoder.pos_embed.shape[-2] - num_patches
# height (== width) for the checkpoint position embedding
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
# height (== width) for the new position embedding
new_size = int(num_patches**0.5)
if orig_size != new_size:
# class_token and dist_token are kept unchanged
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
pos_tokens = pos_tokens.reshape(
-1, orig_size, orig_size, embedding_size
).permute(0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens, size=(new_size, new_size), mode="bicubic", align_corners=False
)
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
print(
"reshape position embedding from %d to %d" % (orig_size**2, new_size**2)
)
return new_pos_embed
else:
return pos_embed_checkpoint
class VisionTransformerEncoder(VisionTransformer, BaseEncoder):
@classmethod
def from_config(cls, cfg, from_pretrained=False):
vit_type = cfg.get("vit_type", "base")
image_size = cfg.get("image_size", 384)
ckpt_layer = cfg.get("vit_ckpt_layer", 0)
drop_path_rate = cfg.get("vit_drop_path_rate", 0)
norm_layer_eps = cfg.get("vit_layer_norm_epsilon", -1)
use_grad_checkpointing = cfg.get("vit_grad_ckpt", False)
if norm_layer_eps == -1:
norm_layer = None
else:
norm_layer = partial(nn.LayerNorm, eps=norm_layer_eps)
# norm_layer=partial(nn.LayerNorm, eps=1e-6),
assert vit_type in ["base", "large"], "vit parameter must be base or large"
if vit_type == "base":
vision_width = 768
visual_encoder = cls(
img_size=image_size,
patch_size=16,
embed_dim=vision_width,
depth=12,
num_heads=12,
use_grad_checkpointing=use_grad_checkpointing,
ckpt_layer=ckpt_layer,
drop_path_rate=0 or drop_path_rate,
norm_layer=norm_layer,
)
if from_pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth",
map_location="cpu",
check_hash=True,
)
state_dict = checkpoint["model"]
state_dict["pos_embed"] = interpolate_pos_embed(
state_dict["pos_embed"], visual_encoder
)
msg = visual_encoder.load_state_dict(state_dict, strict=False)
elif vit_type == "large":
vision_width = 1024
visual_encoder = cls(
img_size=image_size,
patch_size=16,
embed_dim=vision_width,
depth=24,
num_heads=16,
use_grad_checkpointing=use_grad_checkpointing,
ckpt_layer=ckpt_layer,
drop_path_rate=0.1 or drop_path_rate,
norm_layer=norm_layer,
)
if from_pretrained:
from timm.models.helpers import load_custom_pretrained
from timm.models.vision_transformer import default_cfgs
load_custom_pretrained(
visual_encoder, default_cfgs["vit_large_patch16_224_in21k"]
)
visual_encoder.vision_width = vision_width
return visual_encoder
def forward_features(self, x, register_blk=-1):
return super().forward(x, register_blk)
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/vit.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import torch
import torch.nn as nn
from itertools import chain
from lavis.common.registry import registry
from lavis.models.base_model import BaseModel
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import T5ForConditionalGeneration
from lavis.models.pnp_vqa_models import prepare_qa_input
from lavis.models.blip_models.blip_image_text_matching import compute_gradcam
from transformers.modeling_outputs import CausalLMOutputWithCrossAttentions
@registry.register_model("pnp_vqa")
class PNPVQA(BaseModel):
"""
PNPVQA model consists of three submodels for zero-shot VQA:
1. Image-questioning matching model
2. Image captioning model
3. Question answering model
Supported model types:
- base: BLIPITM, BLIPCaption, PNPUnifiedQAv2FiD (t5-base)
- large: BLIPITM, BLIPCaption, PNPUnifiedQAv2FiD (t5-large)
- 3b: BLIPITM, BLIPCaption, PNPUnifiedQAv2FiD (t5-3b)
Usage:
>>> from lavis.models import load_model
>>> model = load_model("pnp_vqa", "base", is_eval=True)
>>> model = load_model("pnp_vqa", "large", is_eval=True)
>>> model = load_model("pnp_vqa", "3b", is_eval=True)
"""
PRETRAINED_MODEL_CONFIG_DICT = {"base": "configs/models/pnp-vqa/pnp_vqa_base.yaml",
"large": "configs/models/pnp-vqa/pnp_vqa_large.yaml",
"3b": "configs/models/pnp-vqa/pnp_vqa_3b.yaml",
}
def __init__(self, image_question_matching_model, image_captioning_model,
question_answering_model, offload_model=False):
super().__init__()
self.image_question_matching_model = image_question_matching_model
self.image_captioning_model = image_captioning_model
self.question_answering_model = question_answering_model
self.offload_model = offload_model
def forward_itm(self, samples, block_num=7):
"""
Args:
samples (dict): A dictionary containing the following keys:
- image (torch.Tensor): A tensor of shape (batch_size, 3, H, W)
- text_input (list): A list of strings of length batch_size
block_num (int): The index of cross-attention block for gradcam computation.
Returns:
samples (dict): A dictionary containing the following keys:
- image (torch.Tensor): A tensor of shape (batch_size, 3, H, W)
- text_input (list): A list of strings of length batch_size
- gradcams (torch.Tensor): A tensor of shape (batch_size, H*W)
"""
image = samples['image']
question = [text.strip('?') for text in samples['text_input']]
tokenized_text = self.image_question_matching_model.tokenizer(question, padding='longest', truncation=True,
return_tensors="pt").to(self.image_question_matching_model.device)
with torch.set_grad_enabled(True):
gradcams, _ = compute_gradcam(model=self.image_question_matching_model,
visual_input=image,
text_input=question,
tokenized_text=tokenized_text,
block_num=block_num)
gradcams = [gradcam_[1] for gradcam_ in gradcams]
samples['gradcams'] = torch.stack(gradcams).reshape(samples['image'].size(0), -1)
return samples
def forward_cap(
self,
samples,
cap_max_length=20,
cap_min_length=0,
top_p=1,
top_k=50,
repetition_penalty=1.0,
num_captions=100,
num_patches=20,
):
"""
Args:
samples (dict): A dictionary containing the following keys:
- image (torch.Tensor): A tensor of shape (batch_size, 3, H, W)
- text_input (list): A list of strings of length batch_size
- gradcams (torch.Tensor): A tensor of shape (batch_size, H*W)
cap_max_length (int): The maximum length of the caption to be generated.
cap_min_length (int): The minimum length of the caption to be generated.
top_p (float): The cumulative probability for nucleus sampling.
top_k (float): The number of the highest probability tokens for top-k sampling.
repetition_penalty (float): The parameter for repetition penalty. 1.0 means no penalty.
num_captions (int): Number of captions generated for each image.
num_patches (int): Number of patches sampled for each image.
Returns:
samples (dict): A dictionary containing the following keys:
- image (torch.Tensor): A tensor of shape (batch_size, 3, H, W)
- text_input (list): A list of strings of length batch_size
- gradcams (torch.Tensor): A tensor of shape (batch_size, H*W)
- captions (nested list): A nested list of strings of total length batch_size * num_captions
"""
encoder_out = self.image_captioning_model.forward_encoder(samples)
captions = [[] for _ in range(encoder_out.size(0))]
min_num_captions = 0
while min_num_captions < num_captions:
encoder_out_samples = []
for i in range(num_captions):
patch_id = torch.multinomial(samples['gradcams'].to(self.image_captioning_model.device),
num_patches).reshape(encoder_out.size(0), -1) + 1
patch_id = patch_id.sort(dim=1).values.unsqueeze(-1).expand(-1, -1, encoder_out.size(2))
encoder_out_sample = torch.gather(encoder_out, 1, patch_id)
encoder_out_samples.append(encoder_out_sample)
stacked = torch.stack(encoder_out_samples, dim=1)
image_embeds = torch.flatten(stacked, start_dim=0, end_dim=1) #(bsz*num_seq, num_patch, dim)
image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(self.image_captioning_model.device)
model_kwargs = {
"encoder_hidden_states": image_embeds,
"encoder_attention_mask": image_atts,
}
prompt = [self.image_captioning_model.prompt] * image_embeds.size(0)
prompt = self.image_captioning_model.tokenizer(prompt,
return_tensors="pt").to(self.image_captioning_model.device)
prompt.input_ids[:, 0] = self.image_captioning_model.tokenizer.bos_token_id
prompt.input_ids = prompt.input_ids[:, :-1]
decoder_out = self.image_captioning_model.text_decoder.generate(
input_ids=prompt.input_ids,
max_length=cap_max_length,
min_length=cap_min_length,
do_sample=True,
top_p=top_p,
top_k=top_k,
num_return_sequences=1,
eos_token_id=self.image_captioning_model.tokenizer.sep_token_id,
pad_token_id=self.image_captioning_model.tokenizer.pad_token_id,
repetition_penalty=repetition_penalty,
**model_kwargs)
outputs = self.image_captioning_model.tokenizer.batch_decode(decoder_out, skip_special_tokens=True)
for counter, output in enumerate(outputs):
ind = counter//num_captions
if len(captions[ind]) < num_captions:
caption = output[len(self.image_captioning_model.prompt):]
overlap_caption = [1 for caps in captions[ind] if caption in caps]
if len(overlap_caption) == 0:
captions[ind].append(caption)
min_num_captions = min([len(i) for i in captions])
samples['captions'] = captions
return samples
def forward_qa(
self,
samples,
num_beams=1,
max_len=20,
min_len=0,
internal_bsz_fid=1,
num_captions=100,
num_captions_fid=1,
):
"""
Args:
samples (dict): A dictionary containing the following keys:
- image (torch.Tensor): A tensor of shape (batch_size, 3, H, W)
- text_input (list): A list of strings of length batch_size
- gradcams (torch.Tensor): A tensor of shape (batch_size, H*W)
- captions (nested list): A nested list of strings of total length batch_size * num_captions
- question_captions (nested list): A nested list of concatenated strings of questions and captions
num_beams (int): Number of beams for beam search. 1 means no beam search.
max_len (int): Maximum length of generated answers.
min_len (int): Minimum length of generated answers.
internal_bsz_fid (int): Internal batch size when using FiD decoding.
num_captions (int): Number of captions generated for each image.
num_captions_fid (int): Number of captions concatenated with a question during FiD decoding.
Returns:
List: A list of strings, each string is an answer.
"""
prepare_qa_input(samples, num_captions=num_captions, num_captions_fid=num_captions_fid)
pred_answers = []
question_captions = samples['question_captions']
question_captions_chunk = [question_captions[i:i + internal_bsz_fid]
for i in range(0, len(question_captions), internal_bsz_fid)]
question_captions_chunk = list(chain(*question_captions_chunk))
for question_caption in question_captions_chunk:
question_caption_input = self.question_answering_model.tokenizer(question_caption, padding='longest',
truncation=True, return_tensors="pt").to(self.question_answering_model.device)
question_caption_input.input_ids = question_caption_input.input_ids.reshape(
internal_bsz_fid, -1, question_caption_input.input_ids.size(1))
question_caption_input.attention_mask = question_caption_input.attention_mask.reshape(
internal_bsz_fid, -1, question_caption_input.attention_mask.size(1))
outputs = self.question_answering_model.generate(input_ids=question_caption_input.input_ids,
attention_mask=question_caption_input.attention_mask,
num_beams=num_beams,
min_length=min_len,
max_length=max_len,
)
for output in outputs:
pred_answer = self.question_answering_model.tokenizer.decode(output, skip_special_tokens=True)
pred_answers.append(pred_answer)
return pred_answers
def predict_answers(
self,
samples,
num_beams=1,
inference_method="generate",
max_len=20,
min_len=0,
internal_bsz_fid=1,
num_captions=50,
num_captions_fid=1,
cap_max_length=20,
cap_min_length=10,
top_k=50,
top_p=1,
repetition_penalty=1,
num_patches=50,
block_num=7,
):
"""
Args:
samples (dict): A dictionary containing the following keys:
- image (torch.Tensor): A tensor of shape (batch_size, 3, H, W). Default H=480, W=480.
- text_input (str or [str]): String or a list of strings, each string is a question.
The number of questions must be equal to the batch size. If a single string, will be converted to a list of string, with length 1 first.
num_beams (int): Number of beams for beam search. 1 means no beam search.
inference_method (str): Inference method. Must be "generate". The model will generate answers.
max_len (int): Maximum length of generated answers.
min_len (int): Minimum length of generated answers.
internal_bsz_fid (int): Internal batch size when using FiD decoding.
num_captions (int): Number of captions generated for each image.
num_captions_fid (int): Number of captions concatenated with a question during FiD decoding.
cap_max_length (int): The maximum length of the caption to be generated.
cap_min_length (int): The minimum length of the caption to be generated.
top_k (float): The number of the highest probability tokens for top-k sampling.
top_p (float): The cumulative probability for nucleus sampling.
repetition_penalty (float): The parameter for repetition penalty. 1.0 means no penalty.
num_patches (int): Number of patches sampled for each image.
block_num (int): The index of cross-attention block for gradcam computation.
Returns:
List: A list of strings, each string is an answer.
gradcams (torch.Tensor): A tensor of shape (batch_size, H*W)
captions (nested list): A nested list of strings of total length batch_size * num_captions
"""
assert inference_method in [
"generate",
], "Inference method must be 'generate', got {}.".format(
inference_method
)
if isinstance(samples["text_input"], str):
samples["text_input"] = [samples["text_input"]]
assert len(samples["text_input"]) == samples["image"].size(
0
), "The number of questions must be equal to the batch size."
samples = self.forward_itm(samples, block_num=block_num)
samples = self.forward_cap(samples,
cap_max_length=cap_max_length,
cap_min_length=cap_min_length,
top_k=top_k,
top_p=top_p,
repetition_penalty=repetition_penalty,
num_captions=num_captions,
num_patches=num_patches)
if self.offload_model:
samples['image'] = samples['image'].to('cpu')
self.image_question_matching_model.to('cpu')
self.image_captioning_model.to('cpu')
torch.cuda.empty_cache()
pred_answers = self.forward_qa(samples,
num_beams=num_beams,
max_len=max_len,
min_len=min_len,
internal_bsz_fid=internal_bsz_fid,
num_captions=num_captions,
num_captions_fid=num_captions_fid)
if self.offload_model:
self.image_question_matching_model.to(self.question_answering_model.device)
self.image_captioning_model.to(self.question_answering_model.device)
return pred_answers, samples['captions'], samples['gradcams']
@classmethod
def from_config(cls, model_config):
itm_config = model_config.image_question_matching_model
cap_config = model_config.image_captioning_model
qa_config = model_config.question_answering_model
itm_cls = registry.get_model_class(itm_config.arch)
cap_cls = registry.get_model_class(cap_config.arch)
qa_cls = registry.get_model_class(qa_config.arch)
image_question_matching_model = itm_cls.from_config(itm_config)
image_captioning_model = cap_cls.from_config(cap_config)
question_answering_model = qa_cls.from_config(qa_config)
model = cls(image_question_matching_model=image_question_matching_model,
image_captioning_model=image_captioning_model,
question_answering_model=question_answering_model,
offload_model= True if model_config.model_type == '3b' else False,
)
return model | 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/pnp_vqa_models/pnp_vqa.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import torch
def prepare_qa_input(sample, num_captions, num_captions_fid):
sample_question_captions = []
for question, captions in zip(sample['text_input'], sample['captions']):
assert isinstance(captions, list)
question_captions = []
question_caption = ''
for cap_id, cap_ in enumerate(captions[0:num_captions]):
question_caption += (cap_.strip() + '. ')
if (cap_id + 1) != num_captions and ((cap_id + 1) % num_captions_fid == 0):
question_caption = question.lower().strip() + " \\n " + question_caption.lower().strip()
question_captions.append(question_caption)
question_caption = ''
if (cap_id + 1) == num_captions:
question_caption = question.lower().strip() + " \\n " + question_caption.lower().strip()
question_captions.append(question_caption)
sample_question_captions.append(question_captions)
sample['question_captions'] = sample_question_captions
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/pnp_vqa_models/__init__.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
Based on facebookresearch code base
https://github.com/facebookresearch/FiD
"""
import torch
import torch.nn as nn
from lavis.common.registry import registry
from lavis.models.base_model import BaseModel
from lavis.common.utils import get_abs_path
from transformers import T5Config, T5Tokenizer, T5ForConditionalGeneration
@registry.register_model("pnp_unifiedqav2_fid")
class PNPUnifiedQAv2FiD(T5ForConditionalGeneration, BaseModel):
PRETRAINED_MODEL_CONFIG_DICT = {}
def __init__(self, config, model_path):
super().__init__(config)
self.tokenizer = T5Tokenizer.from_pretrained(model_path)
def forward(self, input_ids=None, attention_mask=None, **kwargs):
if input_ids != None:
if input_ids.dim() == 3:
self.encoder.num_contexts = input_ids.size(1)
input_ids = input_ids.view(input_ids.size(0), -1)
if attention_mask != None:
attention_mask = attention_mask.view(attention_mask.size(0), -1)
return super().forward(
input_ids=input_ids,
attention_mask=attention_mask,
**kwargs
)
def generate(self, input_ids, attention_mask, num_beams=1, min_length=0, max_length=20):
self.encoder.num_contexts = input_ids.size(1)
return super().generate(
input_ids=input_ids.view(input_ids.size(0), -1),
attention_mask=attention_mask.view(attention_mask.size(0), -1),
num_beams=num_beams,
min_length=min_length,
max_length=max_length
)
def load_unifiedqa(self, state_dict):
self.load_state_dict(state_dict)
self.encoder = T5EncoderWrapper(self.encoder)
@classmethod
def from_config(cls, cfg):
model_path = cfg.get('pretrained')
t5_config_path = get_abs_path(cfg.get("t5_config_path"))
t5_config = T5Config.from_json_file(t5_config_path)
model = cls(t5_config, model_path)
model.load_unifiedqa(T5ForConditionalGeneration.from_pretrained(model_path).state_dict())
return model
class T5EncoderWrapper(torch.nn.Module):
def __init__(self, encoder):
super().__init__()
self.encoder = encoder
self.block = self.encoder.block
self.parallelize = self.encoder.parallelize
self.main_input_name = encoder.main_input_name
def forward(self, input_ids=None, attention_mask=None, **kwargs):
bsz, total_length = input_ids.shape
context_length = total_length // self.num_contexts
input_ids = input_ids.view(bsz*self.num_contexts, context_length)
attention_mask = attention_mask.view(bsz*self.num_contexts, context_length)
outputs = self.encoder(input_ids, attention_mask, **kwargs)
outputs = (outputs[0].view(bsz, self.num_contexts*context_length, -1), ) + outputs[1:]
return outputs | 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/pnp_vqa_models/pnp_unifiedqav2_fid.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.