python_code
stringlengths 0
4.04M
| repo_name
stringlengths 8
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by XuDong Wang from https://github.com/facebookresearch/Mask2Former/tree/main/mask2former/data/dataset_mappers/dataset_mapper.py
import copy
import logging
import numpy as np
import torch
from detectron2.config import configurable
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
from detectron2.data.transforms import TransformGen
from detectron2.structures import BitMasks, Instances
from pycocotools import mask as coco_mask
__all__ = ["COCOInstanceNewBaselineDatasetMapper"]
def convert_coco_poly_to_mask(segmentations, height, width):
masks = []
for polygons in segmentations:
rles = coco_mask.frPyObjects(polygons, height, width)
mask = coco_mask.decode(rles)
if len(mask.shape) < 3:
mask = mask[..., None]
mask = torch.as_tensor(mask, dtype=torch.uint8)
mask = mask.any(dim=2)
masks.append(mask)
if masks:
masks = torch.stack(masks, dim=0)
else:
masks = torch.zeros((0, height, width), dtype=torch.uint8)
return masks
def build_transform_gen(cfg, is_train):
"""
Create a list of default :class:`Augmentation` from config.
Now it includes resizing and flipping.
Returns:
list[Augmentation]
"""
assert is_train, "Only support training augmentation"
image_size = cfg.INPUT.IMAGE_SIZE
min_scale = cfg.INPUT.MIN_SCALE
max_scale = cfg.INPUT.MAX_SCALE
augmentation = []
if cfg.INPUT.RANDOM_FLIP != "none":
augmentation.append(
T.RandomFlip(
horizontal=cfg.INPUT.RANDOM_FLIP == "horizontal",
vertical=cfg.INPUT.RANDOM_FLIP == "vertical",
)
)
augmentation.extend([
T.ResizeScale(
min_scale=min_scale, max_scale=max_scale, target_height=image_size, target_width=image_size
),
T.FixedSizeCrop(crop_size=(image_size, image_size)),
])
return augmentation
# This is specifically designed for the COCO dataset.
class COCOInstanceNewBaselineDatasetMapper:
"""
A callable which takes a dataset dict in Detectron2 Dataset format,
and map it into a format used by MaskFormer.
This dataset mapper applies the same transformation as DETR for COCO panoptic segmentation.
The callable currently does the following:
1. Read the image from "file_name"
2. Applies geometric transforms to the image and annotation
3. Find and applies suitable cropping to the image and annotation
4. Prepare image and annotation to Tensors
"""
@configurable
def __init__(
self,
is_train=True,
*,
tfm_gens,
image_format,
):
"""
NOTE: this interface is experimental.
Args:
is_train: for training or inference
augmentations: a list of augmentations or deterministic transforms to apply
tfm_gens: data augmentation
image_format: an image format supported by :func:`detection_utils.read_image`.
"""
self.tfm_gens = tfm_gens
logging.getLogger(__name__).info(
"[COCOInstanceNewBaselineDatasetMapper] Full TransformGens used in training: {}".format(str(self.tfm_gens))
)
self.img_format = image_format
self.is_train = is_train
@classmethod
def from_config(cls, cfg, is_train=True):
# Build augmentation
tfm_gens = build_transform_gen(cfg, is_train)
ret = {
"is_train": is_train,
"tfm_gens": tfm_gens,
"image_format": cfg.INPUT.FORMAT,
}
return ret
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
image = utils.read_image(dataset_dict["file_name"], format=self.img_format)
utils.check_image_size(dataset_dict, image)
# TODO: get padding mask
# by feeding a "segmentation mask" to the same transforms
padding_mask = np.ones(image.shape[:2])
image, transforms = T.apply_transform_gens(self.tfm_gens, image)
# the crop transformation has default padding value 0 for segmentation
padding_mask = transforms.apply_segmentation(padding_mask)
padding_mask = ~ padding_mask.astype(bool)
image_shape = image.shape[:2] # h, w
# Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
# but not efficient on large generic data structures due to the use of pickle & mp.Queue.
# Therefore it's important to use torch.Tensor.
dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
dataset_dict["padding_mask"] = torch.as_tensor(np.ascontiguousarray(padding_mask))
if not self.is_train:
# USER: Modify this if you want to keep them for some reason.
dataset_dict.pop("annotations", None)
return dataset_dict
if "annotations" in dataset_dict:
# USER: Modify this if you want to keep them for some reason.
for anno in dataset_dict["annotations"]:
# Let's always keep mask
# if not self.mask_on:
# anno.pop("segmentation", None)
anno.pop("keypoints", None)
# USER: Implement additional transformations if you have other types of data
# annos = [
# utils.transform_instance_annotations(obj, transforms, image_shape)
# for obj in dataset_dict.pop("annotations")
# ]
annos = [
utils.transform_instance_annotations(obj, transforms, image_shape)
for obj in dataset_dict.pop("annotations")
if obj.get("iscrowd", 0) == 0
]
# NOTE: does not support BitMask due to augmentation
# Current BitMask cannot handle empty objects
instances = utils.annotations_to_instances(annos, image_shape)
# print('instances: ', instances)
# After transforms such as cropping are applied, the bounding box may no longer
# tightly bound the object. As an example, imagine a triangle object
# [(0,0), (2,0), (0,2)] cropped by a box [(1,0),(2,2)] (XYXY format). The tight
# bounding box of the cropped triangle should be [(1,0),(2,1)], which is not equal to
# the intersection of original bounding box and the cropping box.
instances.gt_boxes = instances.gt_masks.get_bounding_boxes()
# Need to filter empty instances first (due to augmentation)
instances = utils.filter_empty_instances(instances)
# Generate masks from polygon
h, w = instances.image_size
# image_size_xyxy = torch.as_tensor([w, h, w, h], dtype=torch.float)
if hasattr(instances, 'gt_masks'):
gt_masks = instances.gt_masks
gt_masks = convert_coco_poly_to_mask(gt_masks.polygons, h, w)
instances.gt_masks = gt_masks
dataset_dict["instances"] = instances
return dataset_dict
| CutLER-main | videocutler/mask2former/data/dataset_mappers/coco_instance_new_baseline_dataset_mapper.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by XuDong from https://github.com/facebookresearch/Mask2Former/tree/main/mask2former/data/dataset_mappers/
import copy
import logging
import numpy as np
import pycocotools.mask as mask_util
import torch
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
from detectron2.projects.point_rend import ColorAugSSDTransform
from detectron2.structures import BitMasks, Instances, polygons_to_bitmask
__all__ = ["MaskFormerInstanceDatasetMapper"]
class MaskFormerInstanceDatasetMapper:
"""
A callable which takes a dataset dict in Detectron2 Dataset format,
and map it into a format used by MaskFormer for instance segmentation.
The callable currently does the following:
1. Read the image from "file_name"
2. Applies geometric transforms to the image and annotation
3. Find and applies suitable cropping to the image and annotation
4. Prepare image and annotation to Tensors
"""
@configurable
def __init__(
self,
is_train=True,
*,
augmentations,
image_format,
size_divisibility,
):
"""
NOTE: this interface is experimental.
Args:
is_train: for training or inference
augmentations: a list of augmentations or deterministic transforms to apply
image_format: an image format supported by :func:`detection_utils.read_image`.
size_divisibility: pad image size to be divisible by this value
"""
self.is_train = is_train
self.tfm_gens = augmentations
self.img_format = image_format
self.size_divisibility = size_divisibility
logger = logging.getLogger(__name__)
mode = "training" if is_train else "inference"
logger.info(f"[{self.__class__.__name__}] Augmentations used in {mode}: {augmentations}")
@classmethod
def from_config(cls, cfg, is_train=True):
# Build augmentation
augs = [
T.ResizeShortestEdge(
cfg.INPUT.MIN_SIZE_TRAIN,
cfg.INPUT.MAX_SIZE_TRAIN,
cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING,
)
]
if cfg.INPUT.CROP.ENABLED:
augs.append(
T.RandomCrop(
cfg.INPUT.CROP.TYPE,
cfg.INPUT.CROP.SIZE,
)
)
if cfg.INPUT.COLOR_AUG_SSD:
augs.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))
augs.append(T.RandomFlip())
ret = {
"is_train": is_train,
"augmentations": augs,
"image_format": cfg.INPUT.FORMAT,
"size_divisibility": cfg.INPUT.SIZE_DIVISIBILITY,
}
return ret
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
assert self.is_train, "MaskFormerPanopticDatasetMapper should only be used for training!"
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
image = utils.read_image(dataset_dict["file_name"], format=self.img_format)
utils.check_image_size(dataset_dict, image)
aug_input = T.AugInput(image)
aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)
image = aug_input.image
# transform instnace masks
assert "annotations" in dataset_dict
for anno in dataset_dict["annotations"]:
anno.pop("keypoints", None)
annos = [
utils.transform_instance_annotations(obj, transforms, image.shape[:2])
for obj in dataset_dict.pop("annotations")
if obj.get("iscrowd", 0) == 0
]
if len(annos):
assert "segmentation" in annos[0]
segms = [obj["segmentation"] for obj in annos]
masks = []
for segm in segms:
if isinstance(segm, list):
# polygon
masks.append(polygons_to_bitmask(segm, *image.shape[:2]))
elif isinstance(segm, dict):
# COCO RLE
masks.append(mask_util.decode(segm))
elif isinstance(segm, np.ndarray):
assert segm.ndim == 2, "Expect segmentation of 2 dimensions, got {}.".format(
segm.ndim
)
# mask array
masks.append(segm)
else:
raise ValueError(
"Cannot convert segmentation of type '{}' to BitMasks!"
"Supported types are: polygons as list[list[float] or ndarray],"
" COCO-style RLE as a dict, or a binary segmentation mask "
" in a 2D numpy array of shape HxW.".format(type(segm))
)
# Pad image and segmentation label here!
image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
masks = [torch.from_numpy(np.ascontiguousarray(x)) for x in masks]
classes = [int(obj["category_id"]) for obj in annos]
classes = torch.tensor(classes, dtype=torch.int64)
if self.size_divisibility > 0:
image_size = (image.shape[-2], image.shape[-1])
padding_size = [
0,
self.size_divisibility - image_size[1],
0,
self.size_divisibility - image_size[0],
]
# pad image
image = F.pad(image, padding_size, value=128).contiguous()
# pad mask
masks = [F.pad(x, padding_size, value=0).contiguous() for x in masks]
image_shape = (image.shape[-2], image.shape[-1]) # h, w
# Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
# but not efficient on large generic data structures due to the use of pickle & mp.Queue.
# Therefore it's important to use torch.Tensor.
dataset_dict["image"] = image
# Prepare per-category binary masks
instances = Instances(image_shape)
instances.gt_classes = classes
if len(masks) == 0:
# Some image does not have annotation (all ignored)
instances.gt_masks = torch.zeros((0, image.shape[-2], image.shape[-1]))
else:
masks = BitMasks(torch.stack(masks))
instances.gt_masks = masks.tensor
dataset_dict["instances"] = instances
return dataset_dict
| CutLER-main | videocutler/mask2former/data/dataset_mappers/mask_former_instance_dataset_mapper.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import os
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data.datasets import load_sem_seg
ADE20K_SEM_SEG_FULL_CATEGORIES = [
{"name": "wall", "id": 2978, "trainId": 0},
{"name": "building, edifice", "id": 312, "trainId": 1},
{"name": "sky", "id": 2420, "trainId": 2},
{"name": "tree", "id": 2855, "trainId": 3},
{"name": "road, route", "id": 2131, "trainId": 4},
{"name": "floor, flooring", "id": 976, "trainId": 5},
{"name": "ceiling", "id": 447, "trainId": 6},
{"name": "bed", "id": 165, "trainId": 7},
{"name": "sidewalk, pavement", "id": 2377, "trainId": 8},
{"name": "earth, ground", "id": 838, "trainId": 9},
{"name": "cabinet", "id": 350, "trainId": 10},
{"name": "person, individual, someone, somebody, mortal, soul", "id": 1831, "trainId": 11},
{"name": "grass", "id": 1125, "trainId": 12},
{"name": "windowpane, window", "id": 3055, "trainId": 13},
{"name": "car, auto, automobile, machine, motorcar", "id": 401, "trainId": 14},
{"name": "mountain, mount", "id": 1610, "trainId": 15},
{"name": "plant, flora, plant life", "id": 1910, "trainId": 16},
{"name": "table", "id": 2684, "trainId": 17},
{"name": "chair", "id": 471, "trainId": 18},
{"name": "curtain, drape, drapery, mantle, pall", "id": 687, "trainId": 19},
{"name": "door", "id": 774, "trainId": 20},
{"name": "sofa, couch, lounge", "id": 2473, "trainId": 21},
{"name": "sea", "id": 2264, "trainId": 22},
{"name": "painting, picture", "id": 1735, "trainId": 23},
{"name": "water", "id": 2994, "trainId": 24},
{"name": "mirror", "id": 1564, "trainId": 25},
{"name": "house", "id": 1276, "trainId": 26},
{"name": "rug, carpet, carpeting", "id": 2178, "trainId": 27},
{"name": "shelf", "id": 2329, "trainId": 28},
{"name": "armchair", "id": 57, "trainId": 29},
{"name": "fence, fencing", "id": 907, "trainId": 30},
{"name": "field", "id": 913, "trainId": 31},
{"name": "lamp", "id": 1395, "trainId": 32},
{"name": "rock, stone", "id": 2138, "trainId": 33},
{"name": "seat", "id": 2272, "trainId": 34},
{"name": "river", "id": 2128, "trainId": 35},
{"name": "desk", "id": 724, "trainId": 36},
{"name": "bathtub, bathing tub, bath, tub", "id": 155, "trainId": 37},
{"name": "railing, rail", "id": 2053, "trainId": 38},
{"name": "signboard, sign", "id": 2380, "trainId": 39},
{"name": "cushion", "id": 689, "trainId": 40},
{"name": "path", "id": 1788, "trainId": 41},
{"name": "work surface", "id": 3087, "trainId": 42},
{"name": "stairs, steps", "id": 2530, "trainId": 43},
{"name": "column, pillar", "id": 581, "trainId": 44},
{"name": "sink", "id": 2388, "trainId": 45},
{"name": "wardrobe, closet, press", "id": 2985, "trainId": 46},
{"name": "snow", "id": 2454, "trainId": 47},
{"name": "refrigerator, icebox", "id": 2096, "trainId": 48},
{"name": "base, pedestal, stand", "id": 137, "trainId": 49},
{"name": "bridge, span", "id": 294, "trainId": 50},
{"name": "blind, screen", "id": 212, "trainId": 51},
{"name": "runway", "id": 2185, "trainId": 52},
{"name": "cliff, drop, drop-off", "id": 524, "trainId": 53},
{"name": "sand", "id": 2212, "trainId": 54},
{"name": "fireplace, hearth, open fireplace", "id": 943, "trainId": 55},
{"name": "pillow", "id": 1869, "trainId": 56},
{"name": "screen door, screen", "id": 2251, "trainId": 57},
{"name": "toilet, can, commode, crapper, pot, potty, stool, throne", "id": 2793, "trainId": 58},
{"name": "skyscraper", "id": 2423, "trainId": 59},
{"name": "grandstand, covered stand", "id": 1121, "trainId": 60},
{"name": "box", "id": 266, "trainId": 61},
{"name": "pool table, billiard table, snooker table", "id": 1948, "trainId": 62},
{"name": "palm, palm tree", "id": 1744, "trainId": 63},
{"name": "double door", "id": 783, "trainId": 64},
{"name": "coffee table, cocktail table", "id": 571, "trainId": 65},
{"name": "counter", "id": 627, "trainId": 66},
{"name": "countertop", "id": 629, "trainId": 67},
{"name": "chest of drawers, chest, bureau, dresser", "id": 491, "trainId": 68},
{"name": "kitchen island", "id": 1374, "trainId": 69},
{"name": "boat", "id": 223, "trainId": 70},
{"name": "waterfall, falls", "id": 3016, "trainId": 71},
{
"name": "stove, kitchen stove, range, kitchen range, cooking stove",
"id": 2598,
"trainId": 72,
},
{"name": "flower", "id": 978, "trainId": 73},
{"name": "bookcase", "id": 239, "trainId": 74},
{"name": "controls", "id": 608, "trainId": 75},
{"name": "book", "id": 236, "trainId": 76},
{"name": "stairway, staircase", "id": 2531, "trainId": 77},
{"name": "streetlight, street lamp", "id": 2616, "trainId": 78},
{
"name": "computer, computing machine, computing device, data processor, electronic computer, information processing system",
"id": 591,
"trainId": 79,
},
{
"name": "bus, autobus, coach, charabanc, double-decker, jitney, motorbus, motorcoach, omnibus, passenger vehicle",
"id": 327,
"trainId": 80,
},
{"name": "swivel chair", "id": 2679, "trainId": 81},
{"name": "light, light source", "id": 1451, "trainId": 82},
{"name": "bench", "id": 181, "trainId": 83},
{"name": "case, display case, showcase, vitrine", "id": 420, "trainId": 84},
{"name": "towel", "id": 2821, "trainId": 85},
{"name": "fountain", "id": 1023, "trainId": 86},
{"name": "embankment", "id": 855, "trainId": 87},
{
"name": "television receiver, television, television set, tv, tv set, idiot box, boob tube, telly, goggle box",
"id": 2733,
"trainId": 88,
},
{"name": "van", "id": 2928, "trainId": 89},
{"name": "hill", "id": 1240, "trainId": 90},
{"name": "awning, sunshade, sunblind", "id": 77, "trainId": 91},
{"name": "poster, posting, placard, notice, bill, card", "id": 1969, "trainId": 92},
{"name": "truck, motortruck", "id": 2880, "trainId": 93},
{"name": "airplane, aeroplane, plane", "id": 14, "trainId": 94},
{"name": "pole", "id": 1936, "trainId": 95},
{"name": "tower", "id": 2828, "trainId": 96},
{"name": "court", "id": 631, "trainId": 97},
{"name": "ball", "id": 103, "trainId": 98},
{
"name": "aircraft carrier, carrier, flattop, attack aircraft carrier",
"id": 3144,
"trainId": 99,
},
{"name": "buffet, counter, sideboard", "id": 308, "trainId": 100},
{"name": "hovel, hut, hutch, shack, shanty", "id": 1282, "trainId": 101},
{"name": "apparel, wearing apparel, dress, clothes", "id": 38, "trainId": 102},
{"name": "minibike, motorbike", "id": 1563, "trainId": 103},
{"name": "animal, animate being, beast, brute, creature, fauna", "id": 29, "trainId": 104},
{"name": "chandelier, pendant, pendent", "id": 480, "trainId": 105},
{"name": "step, stair", "id": 2569, "trainId": 106},
{"name": "booth, cubicle, stall, kiosk", "id": 247, "trainId": 107},
{"name": "bicycle, bike, wheel, cycle", "id": 187, "trainId": 108},
{"name": "doorframe, doorcase", "id": 778, "trainId": 109},
{"name": "sconce", "id": 2243, "trainId": 110},
{"name": "pond", "id": 1941, "trainId": 111},
{"name": "trade name, brand name, brand, marque", "id": 2833, "trainId": 112},
{"name": "bannister, banister, balustrade, balusters, handrail", "id": 120, "trainId": 113},
{"name": "bag", "id": 95, "trainId": 114},
{"name": "traffic light, traffic signal, stoplight", "id": 2836, "trainId": 115},
{"name": "gazebo", "id": 1087, "trainId": 116},
{"name": "escalator, moving staircase, moving stairway", "id": 868, "trainId": 117},
{"name": "land, ground, soil", "id": 1401, "trainId": 118},
{"name": "board, plank", "id": 220, "trainId": 119},
{"name": "arcade machine", "id": 47, "trainId": 120},
{"name": "eiderdown, duvet, continental quilt", "id": 843, "trainId": 121},
{"name": "bar", "id": 123, "trainId": 122},
{"name": "stall, stand, sales booth", "id": 2537, "trainId": 123},
{"name": "playground", "id": 1927, "trainId": 124},
{"name": "ship", "id": 2337, "trainId": 125},
{"name": "ottoman, pouf, pouffe, puff, hassock", "id": 1702, "trainId": 126},
{
"name": "ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin",
"id": 64,
"trainId": 127,
},
{"name": "bottle", "id": 249, "trainId": 128},
{"name": "cradle", "id": 642, "trainId": 129},
{"name": "pot, flowerpot", "id": 1981, "trainId": 130},
{
"name": "conveyer belt, conveyor belt, conveyer, conveyor, transporter",
"id": 609,
"trainId": 131,
},
{"name": "train, railroad train", "id": 2840, "trainId": 132},
{"name": "stool", "id": 2586, "trainId": 133},
{"name": "lake", "id": 1393, "trainId": 134},
{"name": "tank, storage tank", "id": 2704, "trainId": 135},
{"name": "ice, water ice", "id": 1304, "trainId": 136},
{"name": "basket, handbasket", "id": 146, "trainId": 137},
{"name": "manhole", "id": 1494, "trainId": 138},
{"name": "tent, collapsible shelter", "id": 2739, "trainId": 139},
{"name": "canopy", "id": 389, "trainId": 140},
{"name": "microwave, microwave oven", "id": 1551, "trainId": 141},
{"name": "barrel, cask", "id": 131, "trainId": 142},
{"name": "dirt track", "id": 738, "trainId": 143},
{"name": "beam", "id": 161, "trainId": 144},
{"name": "dishwasher, dish washer, dishwashing machine", "id": 747, "trainId": 145},
{"name": "plate", "id": 1919, "trainId": 146},
{"name": "screen, crt screen", "id": 3109, "trainId": 147},
{"name": "ruins", "id": 2179, "trainId": 148},
{"name": "washer, automatic washer, washing machine", "id": 2989, "trainId": 149},
{"name": "blanket, cover", "id": 206, "trainId": 150},
{"name": "plaything, toy", "id": 1930, "trainId": 151},
{"name": "food, solid food", "id": 1002, "trainId": 152},
{"name": "screen, silver screen, projection screen", "id": 2254, "trainId": 153},
{"name": "oven", "id": 1708, "trainId": 154},
{"name": "stage", "id": 2526, "trainId": 155},
{"name": "beacon, lighthouse, beacon light, pharos", "id": 160, "trainId": 156},
{"name": "umbrella", "id": 2901, "trainId": 157},
{"name": "sculpture", "id": 2262, "trainId": 158},
{"name": "aqueduct", "id": 44, "trainId": 159},
{"name": "container", "id": 597, "trainId": 160},
{"name": "scaffolding, staging", "id": 2235, "trainId": 161},
{"name": "hood, exhaust hood", "id": 1260, "trainId": 162},
{"name": "curb, curbing, kerb", "id": 682, "trainId": 163},
{"name": "roller coaster", "id": 2151, "trainId": 164},
{"name": "horse, equus caballus", "id": 3107, "trainId": 165},
{"name": "catwalk", "id": 432, "trainId": 166},
{"name": "glass, drinking glass", "id": 1098, "trainId": 167},
{"name": "vase", "id": 2932, "trainId": 168},
{"name": "central reservation", "id": 461, "trainId": 169},
{"name": "carousel", "id": 410, "trainId": 170},
{"name": "radiator", "id": 2046, "trainId": 171},
{"name": "closet", "id": 533, "trainId": 172},
{"name": "machine", "id": 1481, "trainId": 173},
{"name": "pier, wharf, wharfage, dock", "id": 1858, "trainId": 174},
{"name": "fan", "id": 894, "trainId": 175},
{"name": "inflatable bounce game", "id": 1322, "trainId": 176},
{"name": "pitch", "id": 1891, "trainId": 177},
{"name": "paper", "id": 1756, "trainId": 178},
{"name": "arcade, colonnade", "id": 49, "trainId": 179},
{"name": "hot tub", "id": 1272, "trainId": 180},
{"name": "helicopter", "id": 1229, "trainId": 181},
{"name": "tray", "id": 2850, "trainId": 182},
{"name": "partition, divider", "id": 1784, "trainId": 183},
{"name": "vineyard", "id": 2962, "trainId": 184},
{"name": "bowl", "id": 259, "trainId": 185},
{"name": "bullring", "id": 319, "trainId": 186},
{"name": "flag", "id": 954, "trainId": 187},
{"name": "pot", "id": 1974, "trainId": 188},
{"name": "footbridge, overcrossing, pedestrian bridge", "id": 1013, "trainId": 189},
{"name": "shower", "id": 2356, "trainId": 190},
{"name": "bag, traveling bag, travelling bag, grip, suitcase", "id": 97, "trainId": 191},
{"name": "bulletin board, notice board", "id": 318, "trainId": 192},
{"name": "confessional booth", "id": 592, "trainId": 193},
{"name": "trunk, tree trunk, bole", "id": 2885, "trainId": 194},
{"name": "forest", "id": 1017, "trainId": 195},
{"name": "elevator door", "id": 851, "trainId": 196},
{"name": "laptop, laptop computer", "id": 1407, "trainId": 197},
{"name": "instrument panel", "id": 1332, "trainId": 198},
{"name": "bucket, pail", "id": 303, "trainId": 199},
{"name": "tapestry, tapis", "id": 2714, "trainId": 200},
{"name": "platform", "id": 1924, "trainId": 201},
{"name": "jacket", "id": 1346, "trainId": 202},
{"name": "gate", "id": 1081, "trainId": 203},
{"name": "monitor, monitoring device", "id": 1583, "trainId": 204},
{
"name": "telephone booth, phone booth, call box, telephone box, telephone kiosk",
"id": 2727,
"trainId": 205,
},
{"name": "spotlight, spot", "id": 2509, "trainId": 206},
{"name": "ring", "id": 2123, "trainId": 207},
{"name": "control panel", "id": 602, "trainId": 208},
{"name": "blackboard, chalkboard", "id": 202, "trainId": 209},
{"name": "air conditioner, air conditioning", "id": 10, "trainId": 210},
{"name": "chest", "id": 490, "trainId": 211},
{"name": "clock", "id": 530, "trainId": 212},
{"name": "sand dune", "id": 2213, "trainId": 213},
{"name": "pipe, pipage, piping", "id": 1884, "trainId": 214},
{"name": "vault", "id": 2934, "trainId": 215},
{"name": "table football", "id": 2687, "trainId": 216},
{"name": "cannon", "id": 387, "trainId": 217},
{"name": "swimming pool, swimming bath, natatorium", "id": 2668, "trainId": 218},
{"name": "fluorescent, fluorescent fixture", "id": 982, "trainId": 219},
{"name": "statue", "id": 2547, "trainId": 220},
{
"name": "loudspeaker, speaker, speaker unit, loudspeaker system, speaker system",
"id": 1474,
"trainId": 221,
},
{"name": "exhibitor", "id": 877, "trainId": 222},
{"name": "ladder", "id": 1391, "trainId": 223},
{"name": "carport", "id": 414, "trainId": 224},
{"name": "dam", "id": 698, "trainId": 225},
{"name": "pulpit", "id": 2019, "trainId": 226},
{"name": "skylight, fanlight", "id": 2422, "trainId": 227},
{"name": "water tower", "id": 3010, "trainId": 228},
{"name": "grill, grille, grillwork", "id": 1139, "trainId": 229},
{"name": "display board", "id": 753, "trainId": 230},
{"name": "pane, pane of glass, window glass", "id": 1747, "trainId": 231},
{"name": "rubbish, trash, scrap", "id": 2175, "trainId": 232},
{"name": "ice rink", "id": 1301, "trainId": 233},
{"name": "fruit", "id": 1033, "trainId": 234},
{"name": "patio", "id": 1789, "trainId": 235},
{"name": "vending machine", "id": 2939, "trainId": 236},
{"name": "telephone, phone, telephone set", "id": 2730, "trainId": 237},
{"name": "net", "id": 1652, "trainId": 238},
{
"name": "backpack, back pack, knapsack, packsack, rucksack, haversack",
"id": 90,
"trainId": 239,
},
{"name": "jar", "id": 1349, "trainId": 240},
{"name": "track", "id": 2830, "trainId": 241},
{"name": "magazine", "id": 1485, "trainId": 242},
{"name": "shutter", "id": 2370, "trainId": 243},
{"name": "roof", "id": 2155, "trainId": 244},
{"name": "banner, streamer", "id": 118, "trainId": 245},
{"name": "landfill", "id": 1402, "trainId": 246},
{"name": "post", "id": 1957, "trainId": 247},
{"name": "altarpiece, reredos", "id": 3130, "trainId": 248},
{"name": "hat, chapeau, lid", "id": 1197, "trainId": 249},
{"name": "arch, archway", "id": 52, "trainId": 250},
{"name": "table game", "id": 2688, "trainId": 251},
{"name": "bag, handbag, pocketbook, purse", "id": 96, "trainId": 252},
{"name": "document, written document, papers", "id": 762, "trainId": 253},
{"name": "dome", "id": 772, "trainId": 254},
{"name": "pier", "id": 1857, "trainId": 255},
{"name": "shanties", "id": 2315, "trainId": 256},
{"name": "forecourt", "id": 1016, "trainId": 257},
{"name": "crane", "id": 643, "trainId": 258},
{"name": "dog, domestic dog, canis familiaris", "id": 3105, "trainId": 259},
{"name": "piano, pianoforte, forte-piano", "id": 1849, "trainId": 260},
{"name": "drawing", "id": 791, "trainId": 261},
{"name": "cabin", "id": 349, "trainId": 262},
{
"name": "ad, advertisement, advertizement, advertising, advertizing, advert",
"id": 6,
"trainId": 263,
},
{"name": "amphitheater, amphitheatre, coliseum", "id": 3114, "trainId": 264},
{"name": "monument", "id": 1587, "trainId": 265},
{"name": "henhouse", "id": 1233, "trainId": 266},
{"name": "cockpit", "id": 559, "trainId": 267},
{"name": "heater, warmer", "id": 1223, "trainId": 268},
{"name": "windmill, aerogenerator, wind generator", "id": 3049, "trainId": 269},
{"name": "pool", "id": 1943, "trainId": 270},
{"name": "elevator, lift", "id": 853, "trainId": 271},
{"name": "decoration, ornament, ornamentation", "id": 709, "trainId": 272},
{"name": "labyrinth", "id": 1390, "trainId": 273},
{"name": "text, textual matter", "id": 2748, "trainId": 274},
{"name": "printer", "id": 2007, "trainId": 275},
{"name": "mezzanine, first balcony", "id": 1546, "trainId": 276},
{"name": "mattress", "id": 1513, "trainId": 277},
{"name": "straw", "id": 2600, "trainId": 278},
{"name": "stalls", "id": 2538, "trainId": 279},
{"name": "patio, terrace", "id": 1790, "trainId": 280},
{"name": "billboard, hoarding", "id": 194, "trainId": 281},
{"name": "bus stop", "id": 326, "trainId": 282},
{"name": "trouser, pant", "id": 2877, "trainId": 283},
{"name": "console table, console", "id": 594, "trainId": 284},
{"name": "rack", "id": 2036, "trainId": 285},
{"name": "notebook", "id": 1662, "trainId": 286},
{"name": "shrine", "id": 2366, "trainId": 287},
{"name": "pantry", "id": 1754, "trainId": 288},
{"name": "cart", "id": 418, "trainId": 289},
{"name": "steam shovel", "id": 2553, "trainId": 290},
{"name": "porch", "id": 1951, "trainId": 291},
{"name": "postbox, mailbox, letter box", "id": 1963, "trainId": 292},
{"name": "figurine, statuette", "id": 918, "trainId": 293},
{"name": "recycling bin", "id": 2086, "trainId": 294},
{"name": "folding screen", "id": 997, "trainId": 295},
{"name": "telescope", "id": 2731, "trainId": 296},
{"name": "deck chair, beach chair", "id": 704, "trainId": 297},
{"name": "kennel", "id": 1365, "trainId": 298},
{"name": "coffee maker", "id": 569, "trainId": 299},
{"name": "altar, communion table, lord's table", "id": 3108, "trainId": 300},
{"name": "fish", "id": 948, "trainId": 301},
{"name": "easel", "id": 839, "trainId": 302},
{"name": "artificial golf green", "id": 63, "trainId": 303},
{"name": "iceberg", "id": 1305, "trainId": 304},
{"name": "candlestick, candle holder", "id": 378, "trainId": 305},
{"name": "shower stall, shower bath", "id": 2362, "trainId": 306},
{"name": "television stand", "id": 2734, "trainId": 307},
{
"name": "wall socket, wall plug, electric outlet, electrical outlet, outlet, electric receptacle",
"id": 2982,
"trainId": 308,
},
{"name": "skeleton", "id": 2398, "trainId": 309},
{"name": "grand piano, grand", "id": 1119, "trainId": 310},
{"name": "candy, confect", "id": 382, "trainId": 311},
{"name": "grille door", "id": 1141, "trainId": 312},
{"name": "pedestal, plinth, footstall", "id": 1805, "trainId": 313},
{"name": "jersey, t-shirt, tee shirt", "id": 3102, "trainId": 314},
{"name": "shoe", "id": 2341, "trainId": 315},
{"name": "gravestone, headstone, tombstone", "id": 1131, "trainId": 316},
{"name": "shanty", "id": 2316, "trainId": 317},
{"name": "structure", "id": 2626, "trainId": 318},
{"name": "rocking chair, rocker", "id": 3104, "trainId": 319},
{"name": "bird", "id": 198, "trainId": 320},
{"name": "place mat", "id": 1896, "trainId": 321},
{"name": "tomb", "id": 2800, "trainId": 322},
{"name": "big top", "id": 190, "trainId": 323},
{"name": "gas pump, gasoline pump, petrol pump, island dispenser", "id": 3131, "trainId": 324},
{"name": "lockers", "id": 1463, "trainId": 325},
{"name": "cage", "id": 357, "trainId": 326},
{"name": "finger", "id": 929, "trainId": 327},
{"name": "bleachers", "id": 209, "trainId": 328},
{"name": "ferris wheel", "id": 912, "trainId": 329},
{"name": "hairdresser chair", "id": 1164, "trainId": 330},
{"name": "mat", "id": 1509, "trainId": 331},
{"name": "stands", "id": 2539, "trainId": 332},
{"name": "aquarium, fish tank, marine museum", "id": 3116, "trainId": 333},
{"name": "streetcar, tram, tramcar, trolley, trolley car", "id": 2615, "trainId": 334},
{"name": "napkin, table napkin, serviette", "id": 1644, "trainId": 335},
{"name": "dummy", "id": 818, "trainId": 336},
{"name": "booklet, brochure, folder, leaflet, pamphlet", "id": 242, "trainId": 337},
{"name": "sand trap", "id": 2217, "trainId": 338},
{"name": "shop, store", "id": 2347, "trainId": 339},
{"name": "table cloth", "id": 2686, "trainId": 340},
{"name": "service station", "id": 2300, "trainId": 341},
{"name": "coffin", "id": 572, "trainId": 342},
{"name": "drawer", "id": 789, "trainId": 343},
{"name": "cages", "id": 358, "trainId": 344},
{"name": "slot machine, coin machine", "id": 2443, "trainId": 345},
{"name": "balcony", "id": 101, "trainId": 346},
{"name": "volleyball court", "id": 2969, "trainId": 347},
{"name": "table tennis", "id": 2692, "trainId": 348},
{"name": "control table", "id": 606, "trainId": 349},
{"name": "shirt", "id": 2339, "trainId": 350},
{"name": "merchandise, ware, product", "id": 1533, "trainId": 351},
{"name": "railway", "id": 2060, "trainId": 352},
{"name": "parterre", "id": 1782, "trainId": 353},
{"name": "chimney", "id": 495, "trainId": 354},
{"name": "can, tin, tin can", "id": 371, "trainId": 355},
{"name": "tanks", "id": 2707, "trainId": 356},
{"name": "fabric, cloth, material, textile", "id": 889, "trainId": 357},
{"name": "alga, algae", "id": 3156, "trainId": 358},
{"name": "system", "id": 2683, "trainId": 359},
{"name": "map", "id": 1499, "trainId": 360},
{"name": "greenhouse", "id": 1135, "trainId": 361},
{"name": "mug", "id": 1619, "trainId": 362},
{"name": "barbecue", "id": 125, "trainId": 363},
{"name": "trailer", "id": 2838, "trainId": 364},
{"name": "toilet tissue, toilet paper, bathroom tissue", "id": 2792, "trainId": 365},
{"name": "organ", "id": 1695, "trainId": 366},
{"name": "dishrag, dishcloth", "id": 746, "trainId": 367},
{"name": "island", "id": 1343, "trainId": 368},
{"name": "keyboard", "id": 1370, "trainId": 369},
{"name": "trench", "id": 2858, "trainId": 370},
{"name": "basket, basketball hoop, hoop", "id": 145, "trainId": 371},
{"name": "steering wheel, wheel", "id": 2565, "trainId": 372},
{"name": "pitcher, ewer", "id": 1892, "trainId": 373},
{"name": "goal", "id": 1103, "trainId": 374},
{"name": "bread, breadstuff, staff of life", "id": 286, "trainId": 375},
{"name": "beds", "id": 170, "trainId": 376},
{"name": "wood", "id": 3073, "trainId": 377},
{"name": "file cabinet", "id": 922, "trainId": 378},
{"name": "newspaper, paper", "id": 1655, "trainId": 379},
{"name": "motorboat", "id": 1602, "trainId": 380},
{"name": "rope", "id": 2160, "trainId": 381},
{"name": "guitar", "id": 1151, "trainId": 382},
{"name": "rubble", "id": 2176, "trainId": 383},
{"name": "scarf", "id": 2239, "trainId": 384},
{"name": "barrels", "id": 132, "trainId": 385},
{"name": "cap", "id": 394, "trainId": 386},
{"name": "leaves", "id": 1424, "trainId": 387},
{"name": "control tower", "id": 607, "trainId": 388},
{"name": "dashboard", "id": 700, "trainId": 389},
{"name": "bandstand", "id": 116, "trainId": 390},
{"name": "lectern", "id": 1425, "trainId": 391},
{"name": "switch, electric switch, electrical switch", "id": 2676, "trainId": 392},
{"name": "baseboard, mopboard, skirting board", "id": 141, "trainId": 393},
{"name": "shower room", "id": 2360, "trainId": 394},
{"name": "smoke", "id": 2449, "trainId": 395},
{"name": "faucet, spigot", "id": 897, "trainId": 396},
{"name": "bulldozer", "id": 317, "trainId": 397},
{"name": "saucepan", "id": 2228, "trainId": 398},
{"name": "shops", "id": 2351, "trainId": 399},
{"name": "meter", "id": 1543, "trainId": 400},
{"name": "crevasse", "id": 656, "trainId": 401},
{"name": "gear", "id": 1088, "trainId": 402},
{"name": "candelabrum, candelabra", "id": 373, "trainId": 403},
{"name": "sofa bed", "id": 2472, "trainId": 404},
{"name": "tunnel", "id": 2892, "trainId": 405},
{"name": "pallet", "id": 1740, "trainId": 406},
{"name": "wire, conducting wire", "id": 3067, "trainId": 407},
{"name": "kettle, boiler", "id": 1367, "trainId": 408},
{"name": "bidet", "id": 188, "trainId": 409},
{
"name": "baby buggy, baby carriage, carriage, perambulator, pram, stroller, go-cart, pushchair, pusher",
"id": 79,
"trainId": 410,
},
{"name": "music stand", "id": 1633, "trainId": 411},
{"name": "pipe, tube", "id": 1885, "trainId": 412},
{"name": "cup", "id": 677, "trainId": 413},
{"name": "parking meter", "id": 1779, "trainId": 414},
{"name": "ice hockey rink", "id": 1297, "trainId": 415},
{"name": "shelter", "id": 2334, "trainId": 416},
{"name": "weeds", "id": 3027, "trainId": 417},
{"name": "temple", "id": 2735, "trainId": 418},
{"name": "patty, cake", "id": 1791, "trainId": 419},
{"name": "ski slope", "id": 2405, "trainId": 420},
{"name": "panel", "id": 1748, "trainId": 421},
{"name": "wallet", "id": 2983, "trainId": 422},
{"name": "wheel", "id": 3035, "trainId": 423},
{"name": "towel rack, towel horse", "id": 2824, "trainId": 424},
{"name": "roundabout", "id": 2168, "trainId": 425},
{"name": "canister, cannister, tin", "id": 385, "trainId": 426},
{"name": "rod", "id": 2148, "trainId": 427},
{"name": "soap dispenser", "id": 2465, "trainId": 428},
{"name": "bell", "id": 175, "trainId": 429},
{"name": "canvas", "id": 390, "trainId": 430},
{"name": "box office, ticket office, ticket booth", "id": 268, "trainId": 431},
{"name": "teacup", "id": 2722, "trainId": 432},
{"name": "trellis", "id": 2857, "trainId": 433},
{"name": "workbench", "id": 3088, "trainId": 434},
{"name": "valley, vale", "id": 2926, "trainId": 435},
{"name": "toaster", "id": 2782, "trainId": 436},
{"name": "knife", "id": 1378, "trainId": 437},
{"name": "podium", "id": 1934, "trainId": 438},
{"name": "ramp", "id": 2072, "trainId": 439},
{"name": "tumble dryer", "id": 2889, "trainId": 440},
{"name": "fireplug, fire hydrant, plug", "id": 944, "trainId": 441},
{"name": "gym shoe, sneaker, tennis shoe", "id": 1158, "trainId": 442},
{"name": "lab bench", "id": 1383, "trainId": 443},
{"name": "equipment", "id": 867, "trainId": 444},
{"name": "rocky formation", "id": 2145, "trainId": 445},
{"name": "plastic", "id": 1915, "trainId": 446},
{"name": "calendar", "id": 361, "trainId": 447},
{"name": "caravan", "id": 402, "trainId": 448},
{"name": "check-in-desk", "id": 482, "trainId": 449},
{"name": "ticket counter", "id": 2761, "trainId": 450},
{"name": "brush", "id": 300, "trainId": 451},
{"name": "mill", "id": 1554, "trainId": 452},
{"name": "covered bridge", "id": 636, "trainId": 453},
{"name": "bowling alley", "id": 260, "trainId": 454},
{"name": "hanger", "id": 1186, "trainId": 455},
{"name": "excavator", "id": 871, "trainId": 456},
{"name": "trestle", "id": 2859, "trainId": 457},
{"name": "revolving door", "id": 2103, "trainId": 458},
{"name": "blast furnace", "id": 208, "trainId": 459},
{"name": "scale, weighing machine", "id": 2236, "trainId": 460},
{"name": "projector", "id": 2012, "trainId": 461},
{"name": "soap", "id": 2462, "trainId": 462},
{"name": "locker", "id": 1462, "trainId": 463},
{"name": "tractor", "id": 2832, "trainId": 464},
{"name": "stretcher", "id": 2617, "trainId": 465},
{"name": "frame", "id": 1024, "trainId": 466},
{"name": "grating", "id": 1129, "trainId": 467},
{"name": "alembic", "id": 18, "trainId": 468},
{"name": "candle, taper, wax light", "id": 376, "trainId": 469},
{"name": "barrier", "id": 134, "trainId": 470},
{"name": "cardboard", "id": 407, "trainId": 471},
{"name": "cave", "id": 434, "trainId": 472},
{"name": "puddle", "id": 2017, "trainId": 473},
{"name": "tarp", "id": 2717, "trainId": 474},
{"name": "price tag", "id": 2005, "trainId": 475},
{"name": "watchtower", "id": 2993, "trainId": 476},
{"name": "meters", "id": 1545, "trainId": 477},
{
"name": "light bulb, lightbulb, bulb, incandescent lamp, electric light, electric-light bulb",
"id": 1445,
"trainId": 478,
},
{"name": "tracks", "id": 2831, "trainId": 479},
{"name": "hair dryer", "id": 1161, "trainId": 480},
{"name": "skirt", "id": 2411, "trainId": 481},
{"name": "viaduct", "id": 2949, "trainId": 482},
{"name": "paper towel", "id": 1769, "trainId": 483},
{"name": "coat", "id": 552, "trainId": 484},
{"name": "sheet", "id": 2327, "trainId": 485},
{"name": "fire extinguisher, extinguisher, asphyxiator", "id": 939, "trainId": 486},
{"name": "water wheel", "id": 3013, "trainId": 487},
{"name": "pottery, clayware", "id": 1986, "trainId": 488},
{"name": "magazine rack", "id": 1486, "trainId": 489},
{"name": "teapot", "id": 2723, "trainId": 490},
{"name": "microphone, mike", "id": 1549, "trainId": 491},
{"name": "support", "id": 2649, "trainId": 492},
{"name": "forklift", "id": 1020, "trainId": 493},
{"name": "canyon", "id": 392, "trainId": 494},
{"name": "cash register, register", "id": 422, "trainId": 495},
{"name": "leaf, leafage, foliage", "id": 1419, "trainId": 496},
{"name": "remote control, remote", "id": 2099, "trainId": 497},
{"name": "soap dish", "id": 2464, "trainId": 498},
{"name": "windshield, windscreen", "id": 3058, "trainId": 499},
{"name": "cat", "id": 430, "trainId": 500},
{"name": "cue, cue stick, pool cue, pool stick", "id": 675, "trainId": 501},
{"name": "vent, venthole, vent-hole, blowhole", "id": 2941, "trainId": 502},
{"name": "videos", "id": 2955, "trainId": 503},
{"name": "shovel", "id": 2355, "trainId": 504},
{"name": "eaves", "id": 840, "trainId": 505},
{"name": "antenna, aerial, transmitting aerial", "id": 32, "trainId": 506},
{"name": "shipyard", "id": 2338, "trainId": 507},
{"name": "hen, biddy", "id": 1232, "trainId": 508},
{"name": "traffic cone", "id": 2834, "trainId": 509},
{"name": "washing machines", "id": 2991, "trainId": 510},
{"name": "truck crane", "id": 2879, "trainId": 511},
{"name": "cds", "id": 444, "trainId": 512},
{"name": "niche", "id": 1657, "trainId": 513},
{"name": "scoreboard", "id": 2246, "trainId": 514},
{"name": "briefcase", "id": 296, "trainId": 515},
{"name": "boot", "id": 245, "trainId": 516},
{"name": "sweater, jumper", "id": 2661, "trainId": 517},
{"name": "hay", "id": 1202, "trainId": 518},
{"name": "pack", "id": 1714, "trainId": 519},
{"name": "bottle rack", "id": 251, "trainId": 520},
{"name": "glacier", "id": 1095, "trainId": 521},
{"name": "pergola", "id": 1828, "trainId": 522},
{"name": "building materials", "id": 311, "trainId": 523},
{"name": "television camera", "id": 2732, "trainId": 524},
{"name": "first floor", "id": 947, "trainId": 525},
{"name": "rifle", "id": 2115, "trainId": 526},
{"name": "tennis table", "id": 2738, "trainId": 527},
{"name": "stadium", "id": 2525, "trainId": 528},
{"name": "safety belt", "id": 2194, "trainId": 529},
{"name": "cover", "id": 634, "trainId": 530},
{"name": "dish rack", "id": 740, "trainId": 531},
{"name": "synthesizer", "id": 2682, "trainId": 532},
{"name": "pumpkin", "id": 2020, "trainId": 533},
{"name": "gutter", "id": 1156, "trainId": 534},
{"name": "fruit stand", "id": 1036, "trainId": 535},
{"name": "ice floe, floe", "id": 1295, "trainId": 536},
{"name": "handle, grip, handgrip, hold", "id": 1181, "trainId": 537},
{"name": "wheelchair", "id": 3037, "trainId": 538},
{"name": "mousepad, mouse mat", "id": 1614, "trainId": 539},
{"name": "diploma", "id": 736, "trainId": 540},
{"name": "fairground ride", "id": 893, "trainId": 541},
{"name": "radio", "id": 2047, "trainId": 542},
{"name": "hotplate", "id": 1274, "trainId": 543},
{"name": "junk", "id": 1361, "trainId": 544},
{"name": "wheelbarrow", "id": 3036, "trainId": 545},
{"name": "stream", "id": 2606, "trainId": 546},
{"name": "toll plaza", "id": 2797, "trainId": 547},
{"name": "punching bag", "id": 2022, "trainId": 548},
{"name": "trough", "id": 2876, "trainId": 549},
{"name": "throne", "id": 2758, "trainId": 550},
{"name": "chair desk", "id": 472, "trainId": 551},
{"name": "weighbridge", "id": 3028, "trainId": 552},
{"name": "extractor fan", "id": 882, "trainId": 553},
{"name": "hanging clothes", "id": 1189, "trainId": 554},
{"name": "dish, dish aerial, dish antenna, saucer", "id": 743, "trainId": 555},
{"name": "alarm clock, alarm", "id": 3122, "trainId": 556},
{"name": "ski lift", "id": 2401, "trainId": 557},
{"name": "chain", "id": 468, "trainId": 558},
{"name": "garage", "id": 1061, "trainId": 559},
{"name": "mechanical shovel", "id": 1523, "trainId": 560},
{"name": "wine rack", "id": 3059, "trainId": 561},
{"name": "tramway", "id": 2843, "trainId": 562},
{"name": "treadmill", "id": 2853, "trainId": 563},
{"name": "menu", "id": 1529, "trainId": 564},
{"name": "block", "id": 214, "trainId": 565},
{"name": "well", "id": 3032, "trainId": 566},
{"name": "witness stand", "id": 3071, "trainId": 567},
{"name": "branch", "id": 277, "trainId": 568},
{"name": "duck", "id": 813, "trainId": 569},
{"name": "casserole", "id": 426, "trainId": 570},
{"name": "frying pan", "id": 1039, "trainId": 571},
{"name": "desk organizer", "id": 727, "trainId": 572},
{"name": "mast", "id": 1508, "trainId": 573},
{"name": "spectacles, specs, eyeglasses, glasses", "id": 2490, "trainId": 574},
{"name": "service elevator", "id": 2299, "trainId": 575},
{"name": "dollhouse", "id": 768, "trainId": 576},
{"name": "hammock", "id": 1172, "trainId": 577},
{"name": "clothes hanging", "id": 537, "trainId": 578},
{"name": "photocopier", "id": 1847, "trainId": 579},
{"name": "notepad", "id": 1664, "trainId": 580},
{"name": "golf cart", "id": 1110, "trainId": 581},
{"name": "footpath", "id": 1014, "trainId": 582},
{"name": "cross", "id": 662, "trainId": 583},
{"name": "baptismal font", "id": 121, "trainId": 584},
{"name": "boiler", "id": 227, "trainId": 585},
{"name": "skip", "id": 2410, "trainId": 586},
{"name": "rotisserie", "id": 2165, "trainId": 587},
{"name": "tables", "id": 2696, "trainId": 588},
{"name": "water mill", "id": 3005, "trainId": 589},
{"name": "helmet", "id": 1231, "trainId": 590},
{"name": "cover curtain", "id": 635, "trainId": 591},
{"name": "brick", "id": 292, "trainId": 592},
{"name": "table runner", "id": 2690, "trainId": 593},
{"name": "ashtray", "id": 65, "trainId": 594},
{"name": "street box", "id": 2607, "trainId": 595},
{"name": "stick", "id": 2574, "trainId": 596},
{"name": "hangers", "id": 1188, "trainId": 597},
{"name": "cells", "id": 456, "trainId": 598},
{"name": "urinal", "id": 2913, "trainId": 599},
{"name": "centerpiece", "id": 459, "trainId": 600},
{"name": "portable fridge", "id": 1955, "trainId": 601},
{"name": "dvds", "id": 827, "trainId": 602},
{"name": "golf club", "id": 1111, "trainId": 603},
{"name": "skirting board", "id": 2412, "trainId": 604},
{"name": "water cooler", "id": 2997, "trainId": 605},
{"name": "clipboard", "id": 528, "trainId": 606},
{"name": "camera, photographic camera", "id": 366, "trainId": 607},
{"name": "pigeonhole", "id": 1863, "trainId": 608},
{"name": "chips", "id": 500, "trainId": 609},
{"name": "food processor", "id": 1001, "trainId": 610},
{"name": "post box", "id": 1958, "trainId": 611},
{"name": "lid", "id": 1441, "trainId": 612},
{"name": "drum", "id": 809, "trainId": 613},
{"name": "blender", "id": 210, "trainId": 614},
{"name": "cave entrance", "id": 435, "trainId": 615},
{"name": "dental chair", "id": 718, "trainId": 616},
{"name": "obelisk", "id": 1674, "trainId": 617},
{"name": "canoe", "id": 388, "trainId": 618},
{"name": "mobile", "id": 1572, "trainId": 619},
{"name": "monitors", "id": 1584, "trainId": 620},
{"name": "pool ball", "id": 1944, "trainId": 621},
{"name": "cue rack", "id": 674, "trainId": 622},
{"name": "baggage carts", "id": 99, "trainId": 623},
{"name": "shore", "id": 2352, "trainId": 624},
{"name": "fork", "id": 1019, "trainId": 625},
{"name": "paper filer", "id": 1763, "trainId": 626},
{"name": "bicycle rack", "id": 185, "trainId": 627},
{"name": "coat rack", "id": 554, "trainId": 628},
{"name": "garland", "id": 1066, "trainId": 629},
{"name": "sports bag", "id": 2508, "trainId": 630},
{"name": "fish tank", "id": 951, "trainId": 631},
{"name": "towel dispenser", "id": 2822, "trainId": 632},
{"name": "carriage", "id": 415, "trainId": 633},
{"name": "brochure", "id": 297, "trainId": 634},
{"name": "plaque", "id": 1914, "trainId": 635},
{"name": "stringer", "id": 2619, "trainId": 636},
{"name": "iron", "id": 1338, "trainId": 637},
{"name": "spoon", "id": 2505, "trainId": 638},
{"name": "flag pole", "id": 955, "trainId": 639},
{"name": "toilet brush", "id": 2786, "trainId": 640},
{"name": "book stand", "id": 238, "trainId": 641},
{"name": "water faucet, water tap, tap, hydrant", "id": 3000, "trainId": 642},
{"name": "ticket office", "id": 2763, "trainId": 643},
{"name": "broom", "id": 299, "trainId": 644},
{"name": "dvd", "id": 822, "trainId": 645},
{"name": "ice bucket", "id": 1288, "trainId": 646},
{"name": "carapace, shell, cuticle, shield", "id": 3101, "trainId": 647},
{"name": "tureen", "id": 2894, "trainId": 648},
{"name": "folders", "id": 992, "trainId": 649},
{"name": "chess", "id": 489, "trainId": 650},
{"name": "root", "id": 2157, "trainId": 651},
{"name": "sewing machine", "id": 2309, "trainId": 652},
{"name": "model", "id": 1576, "trainId": 653},
{"name": "pen", "id": 1810, "trainId": 654},
{"name": "violin", "id": 2964, "trainId": 655},
{"name": "sweatshirt", "id": 2662, "trainId": 656},
{"name": "recycling materials", "id": 2087, "trainId": 657},
{"name": "mitten", "id": 1569, "trainId": 658},
{"name": "chopping board, cutting board", "id": 503, "trainId": 659},
{"name": "mask", "id": 1505, "trainId": 660},
{"name": "log", "id": 1468, "trainId": 661},
{"name": "mouse, computer mouse", "id": 1613, "trainId": 662},
{"name": "grill", "id": 1138, "trainId": 663},
{"name": "hole", "id": 1256, "trainId": 664},
{"name": "target", "id": 2715, "trainId": 665},
{"name": "trash bag", "id": 2846, "trainId": 666},
{"name": "chalk", "id": 477, "trainId": 667},
{"name": "sticks", "id": 2576, "trainId": 668},
{"name": "balloon", "id": 108, "trainId": 669},
{"name": "score", "id": 2245, "trainId": 670},
{"name": "hair spray", "id": 1162, "trainId": 671},
{"name": "roll", "id": 2149, "trainId": 672},
{"name": "runner", "id": 2183, "trainId": 673},
{"name": "engine", "id": 858, "trainId": 674},
{"name": "inflatable glove", "id": 1324, "trainId": 675},
{"name": "games", "id": 1055, "trainId": 676},
{"name": "pallets", "id": 1741, "trainId": 677},
{"name": "baskets", "id": 149, "trainId": 678},
{"name": "coop", "id": 615, "trainId": 679},
{"name": "dvd player", "id": 825, "trainId": 680},
{"name": "rocking horse", "id": 2143, "trainId": 681},
{"name": "buckets", "id": 304, "trainId": 682},
{"name": "bread rolls", "id": 283, "trainId": 683},
{"name": "shawl", "id": 2322, "trainId": 684},
{"name": "watering can", "id": 3017, "trainId": 685},
{"name": "spotlights", "id": 2510, "trainId": 686},
{"name": "post-it", "id": 1960, "trainId": 687},
{"name": "bowls", "id": 265, "trainId": 688},
{"name": "security camera", "id": 2282, "trainId": 689},
{"name": "runner cloth", "id": 2184, "trainId": 690},
{"name": "lock", "id": 1461, "trainId": 691},
{"name": "alarm, warning device, alarm system", "id": 3113, "trainId": 692},
{"name": "side", "id": 2372, "trainId": 693},
{"name": "roulette", "id": 2166, "trainId": 694},
{"name": "bone", "id": 232, "trainId": 695},
{"name": "cutlery", "id": 693, "trainId": 696},
{"name": "pool balls", "id": 1945, "trainId": 697},
{"name": "wheels", "id": 3039, "trainId": 698},
{"name": "spice rack", "id": 2494, "trainId": 699},
{"name": "plant pots", "id": 1908, "trainId": 700},
{"name": "towel ring", "id": 2827, "trainId": 701},
{"name": "bread box", "id": 280, "trainId": 702},
{"name": "video", "id": 2950, "trainId": 703},
{"name": "funfair", "id": 1044, "trainId": 704},
{"name": "breads", "id": 288, "trainId": 705},
{"name": "tripod", "id": 2863, "trainId": 706},
{"name": "ironing board", "id": 1342, "trainId": 707},
{"name": "skimmer", "id": 2409, "trainId": 708},
{"name": "hollow", "id": 1258, "trainId": 709},
{"name": "scratching post", "id": 2249, "trainId": 710},
{"name": "tricycle", "id": 2862, "trainId": 711},
{"name": "file box", "id": 920, "trainId": 712},
{"name": "mountain pass", "id": 1607, "trainId": 713},
{"name": "tombstones", "id": 2802, "trainId": 714},
{"name": "cooker", "id": 610, "trainId": 715},
{"name": "card game, cards", "id": 3129, "trainId": 716},
{"name": "golf bag", "id": 1108, "trainId": 717},
{"name": "towel paper", "id": 2823, "trainId": 718},
{"name": "chaise lounge", "id": 476, "trainId": 719},
{"name": "sun", "id": 2641, "trainId": 720},
{"name": "toilet paper holder", "id": 2788, "trainId": 721},
{"name": "rake", "id": 2070, "trainId": 722},
{"name": "key", "id": 1368, "trainId": 723},
{"name": "umbrella stand", "id": 2903, "trainId": 724},
{"name": "dartboard", "id": 699, "trainId": 725},
{"name": "transformer", "id": 2844, "trainId": 726},
{"name": "fireplace utensils", "id": 942, "trainId": 727},
{"name": "sweatshirts", "id": 2663, "trainId": 728},
{
"name": "cellular telephone, cellular phone, cellphone, cell, mobile phone",
"id": 457,
"trainId": 729,
},
{"name": "tallboy", "id": 2701, "trainId": 730},
{"name": "stapler", "id": 2540, "trainId": 731},
{"name": "sauna", "id": 2231, "trainId": 732},
{"name": "test tube", "id": 2746, "trainId": 733},
{"name": "palette", "id": 1738, "trainId": 734},
{"name": "shopping carts", "id": 2350, "trainId": 735},
{"name": "tools", "id": 2808, "trainId": 736},
{"name": "push button, push, button", "id": 2025, "trainId": 737},
{"name": "star", "id": 2541, "trainId": 738},
{"name": "roof rack", "id": 2156, "trainId": 739},
{"name": "barbed wire", "id": 126, "trainId": 740},
{"name": "spray", "id": 2512, "trainId": 741},
{"name": "ear", "id": 831, "trainId": 742},
{"name": "sponge", "id": 2503, "trainId": 743},
{"name": "racket", "id": 2039, "trainId": 744},
{"name": "tins", "id": 2774, "trainId": 745},
{"name": "eyeglasses", "id": 886, "trainId": 746},
{"name": "file", "id": 919, "trainId": 747},
{"name": "scarfs", "id": 2240, "trainId": 748},
{"name": "sugar bowl", "id": 2636, "trainId": 749},
{"name": "flip flop", "id": 963, "trainId": 750},
{"name": "headstones", "id": 1218, "trainId": 751},
{"name": "laptop bag", "id": 1406, "trainId": 752},
{"name": "leash", "id": 1420, "trainId": 753},
{"name": "climbing frame", "id": 526, "trainId": 754},
{"name": "suit hanger", "id": 2639, "trainId": 755},
{"name": "floor spotlight", "id": 975, "trainId": 756},
{"name": "plate rack", "id": 1921, "trainId": 757},
{"name": "sewer", "id": 2305, "trainId": 758},
{"name": "hard drive", "id": 1193, "trainId": 759},
{"name": "sprinkler", "id": 2517, "trainId": 760},
{"name": "tools box", "id": 2809, "trainId": 761},
{"name": "necklace", "id": 1647, "trainId": 762},
{"name": "bulbs", "id": 314, "trainId": 763},
{"name": "steel industry", "id": 2560, "trainId": 764},
{"name": "club", "id": 545, "trainId": 765},
{"name": "jack", "id": 1345, "trainId": 766},
{"name": "door bars", "id": 775, "trainId": 767},
{
"name": "control panel, instrument panel, control board, board, panel",
"id": 603,
"trainId": 768,
},
{"name": "hairbrush", "id": 1163, "trainId": 769},
{"name": "napkin holder", "id": 1641, "trainId": 770},
{"name": "office", "id": 1678, "trainId": 771},
{"name": "smoke detector", "id": 2450, "trainId": 772},
{"name": "utensils", "id": 2915, "trainId": 773},
{"name": "apron", "id": 42, "trainId": 774},
{"name": "scissors", "id": 2242, "trainId": 775},
{"name": "terminal", "id": 2741, "trainId": 776},
{"name": "grinder", "id": 1143, "trainId": 777},
{"name": "entry phone", "id": 862, "trainId": 778},
{"name": "newspaper stand", "id": 1654, "trainId": 779},
{"name": "pepper shaker", "id": 1826, "trainId": 780},
{"name": "onions", "id": 1689, "trainId": 781},
{
"name": "central processing unit, cpu, c p u , central processor, processor, mainframe",
"id": 3124,
"trainId": 782,
},
{"name": "tape", "id": 2710, "trainId": 783},
{"name": "bat", "id": 152, "trainId": 784},
{"name": "coaster", "id": 549, "trainId": 785},
{"name": "calculator", "id": 360, "trainId": 786},
{"name": "potatoes", "id": 1982, "trainId": 787},
{"name": "luggage rack", "id": 1478, "trainId": 788},
{"name": "salt", "id": 2203, "trainId": 789},
{"name": "street number", "id": 2612, "trainId": 790},
{"name": "viewpoint", "id": 2956, "trainId": 791},
{"name": "sword", "id": 2681, "trainId": 792},
{"name": "cd", "id": 437, "trainId": 793},
{"name": "rowing machine", "id": 2171, "trainId": 794},
{"name": "plug", "id": 1933, "trainId": 795},
{"name": "andiron, firedog, dog, dog-iron", "id": 3110, "trainId": 796},
{"name": "pepper", "id": 1824, "trainId": 797},
{"name": "tongs", "id": 2803, "trainId": 798},
{"name": "bonfire", "id": 234, "trainId": 799},
{"name": "dog dish", "id": 764, "trainId": 800},
{"name": "belt", "id": 177, "trainId": 801},
{"name": "dumbbells", "id": 817, "trainId": 802},
{"name": "videocassette recorder, vcr", "id": 3145, "trainId": 803},
{"name": "hook", "id": 1262, "trainId": 804},
{"name": "envelopes", "id": 864, "trainId": 805},
{"name": "shower faucet", "id": 2359, "trainId": 806},
{"name": "watch", "id": 2992, "trainId": 807},
{"name": "padlock", "id": 1725, "trainId": 808},
{"name": "swimming pool ladder", "id": 2667, "trainId": 809},
{"name": "spanners", "id": 2484, "trainId": 810},
{"name": "gravy boat", "id": 1133, "trainId": 811},
{"name": "notice board", "id": 1667, "trainId": 812},
{"name": "trash bags", "id": 2847, "trainId": 813},
{"name": "fire alarm", "id": 932, "trainId": 814},
{"name": "ladle", "id": 1392, "trainId": 815},
{"name": "stethoscope", "id": 2573, "trainId": 816},
{"name": "rocket", "id": 2140, "trainId": 817},
{"name": "funnel", "id": 1046, "trainId": 818},
{"name": "bowling pins", "id": 264, "trainId": 819},
{"name": "valve", "id": 2927, "trainId": 820},
{"name": "thermometer", "id": 2752, "trainId": 821},
{"name": "cups", "id": 679, "trainId": 822},
{"name": "spice jar", "id": 2493, "trainId": 823},
{"name": "night light", "id": 1658, "trainId": 824},
{"name": "soaps", "id": 2466, "trainId": 825},
{"name": "games table", "id": 1057, "trainId": 826},
{"name": "slotted spoon", "id": 2444, "trainId": 827},
{"name": "reel", "id": 2093, "trainId": 828},
{"name": "scourer", "id": 2248, "trainId": 829},
{"name": "sleeping robe", "id": 2432, "trainId": 830},
{"name": "desk mat", "id": 726, "trainId": 831},
{"name": "dumbbell", "id": 816, "trainId": 832},
{"name": "hammer", "id": 1171, "trainId": 833},
{"name": "tie", "id": 2766, "trainId": 834},
{"name": "typewriter", "id": 2900, "trainId": 835},
{"name": "shaker", "id": 2313, "trainId": 836},
{"name": "cheese dish", "id": 488, "trainId": 837},
{"name": "sea star", "id": 2265, "trainId": 838},
{"name": "racquet", "id": 2043, "trainId": 839},
{"name": "butane gas cylinder", "id": 332, "trainId": 840},
{"name": "paper weight", "id": 1771, "trainId": 841},
{"name": "shaving brush", "id": 2320, "trainId": 842},
{"name": "sunglasses", "id": 2646, "trainId": 843},
{"name": "gear shift", "id": 1089, "trainId": 844},
{"name": "towel rail", "id": 2826, "trainId": 845},
{"name": "adding machine, totalizer, totaliser", "id": 3148, "trainId": 846},
]
def _get_ade20k_full_meta():
# Id 0 is reserved for ignore_label, we change ignore_label for 0
# to 255 in our pre-processing, so all ids are shifted by 1.
stuff_ids = [k["id"] for k in ADE20K_SEM_SEG_FULL_CATEGORIES]
assert len(stuff_ids) == 847, len(stuff_ids)
# For semantic segmentation, this mapping maps from contiguous stuff id
# (in [0, 91], used in models) to ids in the dataset (used for processing results)
stuff_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(stuff_ids)}
stuff_classes = [k["name"] for k in ADE20K_SEM_SEG_FULL_CATEGORIES]
ret = {
"stuff_dataset_id_to_contiguous_id": stuff_dataset_id_to_contiguous_id,
"stuff_classes": stuff_classes,
}
return ret
def register_all_ade20k_full(root):
root = os.path.join(root, "ADE20K_2021_17_01")
meta = _get_ade20k_full_meta()
for name, dirname in [("train", "training"), ("val", "validation")]:
image_dir = os.path.join(root, "images_detectron2", dirname)
gt_dir = os.path.join(root, "annotations_detectron2", dirname)
name = f"ade20k_full_sem_seg_{name}"
DatasetCatalog.register(
name, lambda x=image_dir, y=gt_dir: load_sem_seg(y, x, gt_ext="tif", image_ext="jpg")
)
MetadataCatalog.get(name).set(
stuff_classes=meta["stuff_classes"][:],
image_root=image_dir,
sem_seg_root=gt_dir,
evaluator_type="sem_seg",
ignore_label=65535, # NOTE: gt is saved in 16-bit TIFF images
)
_root = os.getenv("DETECTRON2_DATASETS", "datasets")
register_all_ade20k_full(_root)
| CutLER-main | videocutler/mask2former/data/datasets/register_ade20k_full.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import os
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data.datasets import load_sem_seg
MAPILLARY_VISTAS_SEM_SEG_CATEGORIES = [
{
"color": [165, 42, 42],
"instances": True,
"readable": "Bird",
"name": "animal--bird",
"evaluate": True,
},
{
"color": [0, 192, 0],
"instances": True,
"readable": "Ground Animal",
"name": "animal--ground-animal",
"evaluate": True,
},
{
"color": [196, 196, 196],
"instances": False,
"readable": "Curb",
"name": "construction--barrier--curb",
"evaluate": True,
},
{
"color": [190, 153, 153],
"instances": False,
"readable": "Fence",
"name": "construction--barrier--fence",
"evaluate": True,
},
{
"color": [180, 165, 180],
"instances": False,
"readable": "Guard Rail",
"name": "construction--barrier--guard-rail",
"evaluate": True,
},
{
"color": [90, 120, 150],
"instances": False,
"readable": "Barrier",
"name": "construction--barrier--other-barrier",
"evaluate": True,
},
{
"color": [102, 102, 156],
"instances": False,
"readable": "Wall",
"name": "construction--barrier--wall",
"evaluate": True,
},
{
"color": [128, 64, 255],
"instances": False,
"readable": "Bike Lane",
"name": "construction--flat--bike-lane",
"evaluate": True,
},
{
"color": [140, 140, 200],
"instances": True,
"readable": "Crosswalk - Plain",
"name": "construction--flat--crosswalk-plain",
"evaluate": True,
},
{
"color": [170, 170, 170],
"instances": False,
"readable": "Curb Cut",
"name": "construction--flat--curb-cut",
"evaluate": True,
},
{
"color": [250, 170, 160],
"instances": False,
"readable": "Parking",
"name": "construction--flat--parking",
"evaluate": True,
},
{
"color": [96, 96, 96],
"instances": False,
"readable": "Pedestrian Area",
"name": "construction--flat--pedestrian-area",
"evaluate": True,
},
{
"color": [230, 150, 140],
"instances": False,
"readable": "Rail Track",
"name": "construction--flat--rail-track",
"evaluate": True,
},
{
"color": [128, 64, 128],
"instances": False,
"readable": "Road",
"name": "construction--flat--road",
"evaluate": True,
},
{
"color": [110, 110, 110],
"instances": False,
"readable": "Service Lane",
"name": "construction--flat--service-lane",
"evaluate": True,
},
{
"color": [244, 35, 232],
"instances": False,
"readable": "Sidewalk",
"name": "construction--flat--sidewalk",
"evaluate": True,
},
{
"color": [150, 100, 100],
"instances": False,
"readable": "Bridge",
"name": "construction--structure--bridge",
"evaluate": True,
},
{
"color": [70, 70, 70],
"instances": False,
"readable": "Building",
"name": "construction--structure--building",
"evaluate": True,
},
{
"color": [150, 120, 90],
"instances": False,
"readable": "Tunnel",
"name": "construction--structure--tunnel",
"evaluate": True,
},
{
"color": [220, 20, 60],
"instances": True,
"readable": "Person",
"name": "human--person",
"evaluate": True,
},
{
"color": [255, 0, 0],
"instances": True,
"readable": "Bicyclist",
"name": "human--rider--bicyclist",
"evaluate": True,
},
{
"color": [255, 0, 100],
"instances": True,
"readable": "Motorcyclist",
"name": "human--rider--motorcyclist",
"evaluate": True,
},
{
"color": [255, 0, 200],
"instances": True,
"readable": "Other Rider",
"name": "human--rider--other-rider",
"evaluate": True,
},
{
"color": [200, 128, 128],
"instances": True,
"readable": "Lane Marking - Crosswalk",
"name": "marking--crosswalk-zebra",
"evaluate": True,
},
{
"color": [255, 255, 255],
"instances": False,
"readable": "Lane Marking - General",
"name": "marking--general",
"evaluate": True,
},
{
"color": [64, 170, 64],
"instances": False,
"readable": "Mountain",
"name": "nature--mountain",
"evaluate": True,
},
{
"color": [230, 160, 50],
"instances": False,
"readable": "Sand",
"name": "nature--sand",
"evaluate": True,
},
{
"color": [70, 130, 180],
"instances": False,
"readable": "Sky",
"name": "nature--sky",
"evaluate": True,
},
{
"color": [190, 255, 255],
"instances": False,
"readable": "Snow",
"name": "nature--snow",
"evaluate": True,
},
{
"color": [152, 251, 152],
"instances": False,
"readable": "Terrain",
"name": "nature--terrain",
"evaluate": True,
},
{
"color": [107, 142, 35],
"instances": False,
"readable": "Vegetation",
"name": "nature--vegetation",
"evaluate": True,
},
{
"color": [0, 170, 30],
"instances": False,
"readable": "Water",
"name": "nature--water",
"evaluate": True,
},
{
"color": [255, 255, 128],
"instances": True,
"readable": "Banner",
"name": "object--banner",
"evaluate": True,
},
{
"color": [250, 0, 30],
"instances": True,
"readable": "Bench",
"name": "object--bench",
"evaluate": True,
},
{
"color": [100, 140, 180],
"instances": True,
"readable": "Bike Rack",
"name": "object--bike-rack",
"evaluate": True,
},
{
"color": [220, 220, 220],
"instances": True,
"readable": "Billboard",
"name": "object--billboard",
"evaluate": True,
},
{
"color": [220, 128, 128],
"instances": True,
"readable": "Catch Basin",
"name": "object--catch-basin",
"evaluate": True,
},
{
"color": [222, 40, 40],
"instances": True,
"readable": "CCTV Camera",
"name": "object--cctv-camera",
"evaluate": True,
},
{
"color": [100, 170, 30],
"instances": True,
"readable": "Fire Hydrant",
"name": "object--fire-hydrant",
"evaluate": True,
},
{
"color": [40, 40, 40],
"instances": True,
"readable": "Junction Box",
"name": "object--junction-box",
"evaluate": True,
},
{
"color": [33, 33, 33],
"instances": True,
"readable": "Mailbox",
"name": "object--mailbox",
"evaluate": True,
},
{
"color": [100, 128, 160],
"instances": True,
"readable": "Manhole",
"name": "object--manhole",
"evaluate": True,
},
{
"color": [142, 0, 0],
"instances": True,
"readable": "Phone Booth",
"name": "object--phone-booth",
"evaluate": True,
},
{
"color": [70, 100, 150],
"instances": False,
"readable": "Pothole",
"name": "object--pothole",
"evaluate": True,
},
{
"color": [210, 170, 100],
"instances": True,
"readable": "Street Light",
"name": "object--street-light",
"evaluate": True,
},
{
"color": [153, 153, 153],
"instances": True,
"readable": "Pole",
"name": "object--support--pole",
"evaluate": True,
},
{
"color": [128, 128, 128],
"instances": True,
"readable": "Traffic Sign Frame",
"name": "object--support--traffic-sign-frame",
"evaluate": True,
},
{
"color": [0, 0, 80],
"instances": True,
"readable": "Utility Pole",
"name": "object--support--utility-pole",
"evaluate": True,
},
{
"color": [250, 170, 30],
"instances": True,
"readable": "Traffic Light",
"name": "object--traffic-light",
"evaluate": True,
},
{
"color": [192, 192, 192],
"instances": True,
"readable": "Traffic Sign (Back)",
"name": "object--traffic-sign--back",
"evaluate": True,
},
{
"color": [220, 220, 0],
"instances": True,
"readable": "Traffic Sign (Front)",
"name": "object--traffic-sign--front",
"evaluate": True,
},
{
"color": [140, 140, 20],
"instances": True,
"readable": "Trash Can",
"name": "object--trash-can",
"evaluate": True,
},
{
"color": [119, 11, 32],
"instances": True,
"readable": "Bicycle",
"name": "object--vehicle--bicycle",
"evaluate": True,
},
{
"color": [150, 0, 255],
"instances": True,
"readable": "Boat",
"name": "object--vehicle--boat",
"evaluate": True,
},
{
"color": [0, 60, 100],
"instances": True,
"readable": "Bus",
"name": "object--vehicle--bus",
"evaluate": True,
},
{
"color": [0, 0, 142],
"instances": True,
"readable": "Car",
"name": "object--vehicle--car",
"evaluate": True,
},
{
"color": [0, 0, 90],
"instances": True,
"readable": "Caravan",
"name": "object--vehicle--caravan",
"evaluate": True,
},
{
"color": [0, 0, 230],
"instances": True,
"readable": "Motorcycle",
"name": "object--vehicle--motorcycle",
"evaluate": True,
},
{
"color": [0, 80, 100],
"instances": False,
"readable": "On Rails",
"name": "object--vehicle--on-rails",
"evaluate": True,
},
{
"color": [128, 64, 64],
"instances": True,
"readable": "Other Vehicle",
"name": "object--vehicle--other-vehicle",
"evaluate": True,
},
{
"color": [0, 0, 110],
"instances": True,
"readable": "Trailer",
"name": "object--vehicle--trailer",
"evaluate": True,
},
{
"color": [0, 0, 70],
"instances": True,
"readable": "Truck",
"name": "object--vehicle--truck",
"evaluate": True,
},
{
"color": [0, 0, 192],
"instances": True,
"readable": "Wheeled Slow",
"name": "object--vehicle--wheeled-slow",
"evaluate": True,
},
{
"color": [32, 32, 32],
"instances": False,
"readable": "Car Mount",
"name": "void--car-mount",
"evaluate": True,
},
{
"color": [120, 10, 10],
"instances": False,
"readable": "Ego Vehicle",
"name": "void--ego-vehicle",
"evaluate": True,
},
{
"color": [0, 0, 0],
"instances": False,
"readable": "Unlabeled",
"name": "void--unlabeled",
"evaluate": False,
},
]
def _get_mapillary_vistas_meta():
stuff_classes = [k["readable"] for k in MAPILLARY_VISTAS_SEM_SEG_CATEGORIES if k["evaluate"]]
assert len(stuff_classes) == 65
stuff_colors = [k["color"] for k in MAPILLARY_VISTAS_SEM_SEG_CATEGORIES if k["evaluate"]]
assert len(stuff_colors) == 65
ret = {
"stuff_classes": stuff_classes,
"stuff_colors": stuff_colors,
}
return ret
def register_all_mapillary_vistas(root):
root = os.path.join(root, "mapillary_vistas")
meta = _get_mapillary_vistas_meta()
for name, dirname in [("train", "training"), ("val", "validation")]:
image_dir = os.path.join(root, dirname, "images")
gt_dir = os.path.join(root, dirname, "labels")
name = f"mapillary_vistas_sem_seg_{name}"
DatasetCatalog.register(
name, lambda x=image_dir, y=gt_dir: load_sem_seg(y, x, gt_ext="png", image_ext="jpg")
)
MetadataCatalog.get(name).set(
image_root=image_dir,
sem_seg_root=gt_dir,
evaluator_type="sem_seg",
ignore_label=65, # different from other datasets, Mapillary Vistas sets ignore_label to 65
**meta,
)
_root = os.getenv("DETECTRON2_DATASETS", "datasets")
register_all_mapillary_vistas(_root)
| CutLER-main | videocutler/mask2former/data/datasets/register_mapillary_vistas.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from . import (
register_ade20k_full,
register_ade20k_panoptic,
register_coco_stuff_10k,
register_mapillary_vistas,
register_coco_panoptic_annos_semseg,
register_ade20k_instance,
register_mapillary_vistas_panoptic,
)
| CutLER-main | videocutler/mask2former/data/datasets/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import json
import os
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.utils.file_io import PathManager
MAPILLARY_VISTAS_SEM_SEG_CATEGORIES = [
{'color': [165, 42, 42],
'id': 1,
'isthing': 1,
'name': 'Bird',
'supercategory': 'animal--bird'},
{'color': [0, 192, 0],
'id': 2,
'isthing': 1,
'name': 'Ground Animal',
'supercategory': 'animal--ground-animal'},
{'color': [196, 196, 196],
'id': 3,
'isthing': 0,
'name': 'Curb',
'supercategory': 'construction--barrier--curb'},
{'color': [190, 153, 153],
'id': 4,
'isthing': 0,
'name': 'Fence',
'supercategory': 'construction--barrier--fence'},
{'color': [180, 165, 180],
'id': 5,
'isthing': 0,
'name': 'Guard Rail',
'supercategory': 'construction--barrier--guard-rail'},
{'color': [90, 120, 150],
'id': 6,
'isthing': 0,
'name': 'Barrier',
'supercategory': 'construction--barrier--other-barrier'},
{'color': [102, 102, 156],
'id': 7,
'isthing': 0,
'name': 'Wall',
'supercategory': 'construction--barrier--wall'},
{'color': [128, 64, 255],
'id': 8,
'isthing': 0,
'name': 'Bike Lane',
'supercategory': 'construction--flat--bike-lane'},
{'color': [140, 140, 200],
'id': 9,
'isthing': 1,
'name': 'Crosswalk - Plain',
'supercategory': 'construction--flat--crosswalk-plain'},
{'color': [170, 170, 170],
'id': 10,
'isthing': 0,
'name': 'Curb Cut',
'supercategory': 'construction--flat--curb-cut'},
{'color': [250, 170, 160],
'id': 11,
'isthing': 0,
'name': 'Parking',
'supercategory': 'construction--flat--parking'},
{'color': [96, 96, 96],
'id': 12,
'isthing': 0,
'name': 'Pedestrian Area',
'supercategory': 'construction--flat--pedestrian-area'},
{'color': [230, 150, 140],
'id': 13,
'isthing': 0,
'name': 'Rail Track',
'supercategory': 'construction--flat--rail-track'},
{'color': [128, 64, 128],
'id': 14,
'isthing': 0,
'name': 'Road',
'supercategory': 'construction--flat--road'},
{'color': [110, 110, 110],
'id': 15,
'isthing': 0,
'name': 'Service Lane',
'supercategory': 'construction--flat--service-lane'},
{'color': [244, 35, 232],
'id': 16,
'isthing': 0,
'name': 'Sidewalk',
'supercategory': 'construction--flat--sidewalk'},
{'color': [150, 100, 100],
'id': 17,
'isthing': 0,
'name': 'Bridge',
'supercategory': 'construction--structure--bridge'},
{'color': [70, 70, 70],
'id': 18,
'isthing': 0,
'name': 'Building',
'supercategory': 'construction--structure--building'},
{'color': [150, 120, 90],
'id': 19,
'isthing': 0,
'name': 'Tunnel',
'supercategory': 'construction--structure--tunnel'},
{'color': [220, 20, 60],
'id': 20,
'isthing': 1,
'name': 'Person',
'supercategory': 'human--person'},
{'color': [255, 0, 0],
'id': 21,
'isthing': 1,
'name': 'Bicyclist',
'supercategory': 'human--rider--bicyclist'},
{'color': [255, 0, 100],
'id': 22,
'isthing': 1,
'name': 'Motorcyclist',
'supercategory': 'human--rider--motorcyclist'},
{'color': [255, 0, 200],
'id': 23,
'isthing': 1,
'name': 'Other Rider',
'supercategory': 'human--rider--other-rider'},
{'color': [200, 128, 128],
'id': 24,
'isthing': 1,
'name': 'Lane Marking - Crosswalk',
'supercategory': 'marking--crosswalk-zebra'},
{'color': [255, 255, 255],
'id': 25,
'isthing': 0,
'name': 'Lane Marking - General',
'supercategory': 'marking--general'},
{'color': [64, 170, 64],
'id': 26,
'isthing': 0,
'name': 'Mountain',
'supercategory': 'nature--mountain'},
{'color': [230, 160, 50],
'id': 27,
'isthing': 0,
'name': 'Sand',
'supercategory': 'nature--sand'},
{'color': [70, 130, 180],
'id': 28,
'isthing': 0,
'name': 'Sky',
'supercategory': 'nature--sky'},
{'color': [190, 255, 255],
'id': 29,
'isthing': 0,
'name': 'Snow',
'supercategory': 'nature--snow'},
{'color': [152, 251, 152],
'id': 30,
'isthing': 0,
'name': 'Terrain',
'supercategory': 'nature--terrain'},
{'color': [107, 142, 35],
'id': 31,
'isthing': 0,
'name': 'Vegetation',
'supercategory': 'nature--vegetation'},
{'color': [0, 170, 30],
'id': 32,
'isthing': 0,
'name': 'Water',
'supercategory': 'nature--water'},
{'color': [255, 255, 128],
'id': 33,
'isthing': 1,
'name': 'Banner',
'supercategory': 'object--banner'},
{'color': [250, 0, 30],
'id': 34,
'isthing': 1,
'name': 'Bench',
'supercategory': 'object--bench'},
{'color': [100, 140, 180],
'id': 35,
'isthing': 1,
'name': 'Bike Rack',
'supercategory': 'object--bike-rack'},
{'color': [220, 220, 220],
'id': 36,
'isthing': 1,
'name': 'Billboard',
'supercategory': 'object--billboard'},
{'color': [220, 128, 128],
'id': 37,
'isthing': 1,
'name': 'Catch Basin',
'supercategory': 'object--catch-basin'},
{'color': [222, 40, 40],
'id': 38,
'isthing': 1,
'name': 'CCTV Camera',
'supercategory': 'object--cctv-camera'},
{'color': [100, 170, 30],
'id': 39,
'isthing': 1,
'name': 'Fire Hydrant',
'supercategory': 'object--fire-hydrant'},
{'color': [40, 40, 40],
'id': 40,
'isthing': 1,
'name': 'Junction Box',
'supercategory': 'object--junction-box'},
{'color': [33, 33, 33],
'id': 41,
'isthing': 1,
'name': 'Mailbox',
'supercategory': 'object--mailbox'},
{'color': [100, 128, 160],
'id': 42,
'isthing': 1,
'name': 'Manhole',
'supercategory': 'object--manhole'},
{'color': [142, 0, 0],
'id': 43,
'isthing': 1,
'name': 'Phone Booth',
'supercategory': 'object--phone-booth'},
{'color': [70, 100, 150],
'id': 44,
'isthing': 0,
'name': 'Pothole',
'supercategory': 'object--pothole'},
{'color': [210, 170, 100],
'id': 45,
'isthing': 1,
'name': 'Street Light',
'supercategory': 'object--street-light'},
{'color': [153, 153, 153],
'id': 46,
'isthing': 1,
'name': 'Pole',
'supercategory': 'object--support--pole'},
{'color': [128, 128, 128],
'id': 47,
'isthing': 1,
'name': 'Traffic Sign Frame',
'supercategory': 'object--support--traffic-sign-frame'},
{'color': [0, 0, 80],
'id': 48,
'isthing': 1,
'name': 'Utility Pole',
'supercategory': 'object--support--utility-pole'},
{'color': [250, 170, 30],
'id': 49,
'isthing': 1,
'name': 'Traffic Light',
'supercategory': 'object--traffic-light'},
{'color': [192, 192, 192],
'id': 50,
'isthing': 1,
'name': 'Traffic Sign (Back)',
'supercategory': 'object--traffic-sign--back'},
{'color': [220, 220, 0],
'id': 51,
'isthing': 1,
'name': 'Traffic Sign (Front)',
'supercategory': 'object--traffic-sign--front'},
{'color': [140, 140, 20],
'id': 52,
'isthing': 1,
'name': 'Trash Can',
'supercategory': 'object--trash-can'},
{'color': [119, 11, 32],
'id': 53,
'isthing': 1,
'name': 'Bicycle',
'supercategory': 'object--vehicle--bicycle'},
{'color': [150, 0, 255],
'id': 54,
'isthing': 1,
'name': 'Boat',
'supercategory': 'object--vehicle--boat'},
{'color': [0, 60, 100],
'id': 55,
'isthing': 1,
'name': 'Bus',
'supercategory': 'object--vehicle--bus'},
{'color': [0, 0, 142],
'id': 56,
'isthing': 1,
'name': 'Car',
'supercategory': 'object--vehicle--car'},
{'color': [0, 0, 90],
'id': 57,
'isthing': 1,
'name': 'Caravan',
'supercategory': 'object--vehicle--caravan'},
{'color': [0, 0, 230],
'id': 58,
'isthing': 1,
'name': 'Motorcycle',
'supercategory': 'object--vehicle--motorcycle'},
{'color': [0, 80, 100],
'id': 59,
'isthing': 0,
'name': 'On Rails',
'supercategory': 'object--vehicle--on-rails'},
{'color': [128, 64, 64],
'id': 60,
'isthing': 1,
'name': 'Other Vehicle',
'supercategory': 'object--vehicle--other-vehicle'},
{'color': [0, 0, 110],
'id': 61,
'isthing': 1,
'name': 'Trailer',
'supercategory': 'object--vehicle--trailer'},
{'color': [0, 0, 70],
'id': 62,
'isthing': 1,
'name': 'Truck',
'supercategory': 'object--vehicle--truck'},
{'color': [0, 0, 192],
'id': 63,
'isthing': 1,
'name': 'Wheeled Slow',
'supercategory': 'object--vehicle--wheeled-slow'},
{'color': [32, 32, 32],
'id': 64,
'isthing': 0,
'name': 'Car Mount',
'supercategory': 'void--car-mount'},
{'color': [120, 10, 10],
'id': 65,
'isthing': 0,
'name': 'Ego Vehicle',
'supercategory': 'void--ego-vehicle'}
]
def load_mapillary_vistas_panoptic_json(json_file, image_dir, gt_dir, semseg_dir, meta):
"""
Args:
image_dir (str): path to the raw dataset. e.g., "~/coco/train2017".
gt_dir (str): path to the raw annotations. e.g., "~/coco/panoptic_train2017".
json_file (str): path to the json file. e.g., "~/coco/annotations/panoptic_train2017.json".
Returns:
list[dict]: a list of dicts in Detectron2 standard format. (See
`Using Custom Datasets </tutorials/datasets.html>`_ )
"""
def _convert_category_id(segment_info, meta):
if segment_info["category_id"] in meta["thing_dataset_id_to_contiguous_id"]:
segment_info["category_id"] = meta["thing_dataset_id_to_contiguous_id"][
segment_info["category_id"]
]
segment_info["isthing"] = True
else:
segment_info["category_id"] = meta["stuff_dataset_id_to_contiguous_id"][
segment_info["category_id"]
]
segment_info["isthing"] = False
return segment_info
with PathManager.open(json_file) as f:
json_info = json.load(f)
ret = []
for ann in json_info["annotations"]:
image_id = ann["image_id"]
# TODO: currently we assume image and label has the same filename but
# different extension, and images have extension ".jpg" for COCO. Need
# to make image extension a user-provided argument if we extend this
# function to support other COCO-like datasets.
image_file = os.path.join(image_dir, os.path.splitext(ann["file_name"])[0] + ".jpg")
label_file = os.path.join(gt_dir, ann["file_name"])
sem_label_file = os.path.join(semseg_dir, ann["file_name"])
segments_info = [_convert_category_id(x, meta) for x in ann["segments_info"]]
ret.append(
{
"file_name": image_file,
"image_id": image_id,
"pan_seg_file_name": label_file,
"sem_seg_file_name": sem_label_file,
"segments_info": segments_info,
}
)
assert len(ret), f"No images found in {image_dir}!"
assert PathManager.isfile(ret[0]["file_name"]), ret[0]["file_name"]
assert PathManager.isfile(ret[0]["pan_seg_file_name"]), ret[0]["pan_seg_file_name"]
assert PathManager.isfile(ret[0]["sem_seg_file_name"]), ret[0]["sem_seg_file_name"]
return ret
def register_mapillary_vistas_panoptic(
name, metadata, image_root, panoptic_root, semantic_root, panoptic_json, instances_json=None
):
"""
Register a "standard" version of ADE20k panoptic segmentation dataset named `name`.
The dictionaries in this registered dataset follows detectron2's standard format.
Hence it's called "standard".
Args:
name (str): the name that identifies a dataset,
e.g. "ade20k_panoptic_train"
metadata (dict): extra metadata associated with this dataset.
image_root (str): directory which contains all the images
panoptic_root (str): directory which contains panoptic annotation images in COCO format
panoptic_json (str): path to the json panoptic annotation file in COCO format
sem_seg_root (none): not used, to be consistent with
`register_coco_panoptic_separated`.
instances_json (str): path to the json instance annotation file
"""
panoptic_name = name
DatasetCatalog.register(
panoptic_name,
lambda: load_mapillary_vistas_panoptic_json(
panoptic_json, image_root, panoptic_root, semantic_root, metadata
),
)
MetadataCatalog.get(panoptic_name).set(
panoptic_root=panoptic_root,
image_root=image_root,
panoptic_json=panoptic_json,
json_file=instances_json,
evaluator_type="mapillary_vistas_panoptic_seg",
ignore_label=65, # different from other datasets, Mapillary Vistas sets ignore_label to 65
label_divisor=1000,
**metadata,
)
_PREDEFINED_SPLITS_ADE20K_PANOPTIC = {
"mapillary_vistas_panoptic_train": (
"mapillary_vistas/training/images",
"mapillary_vistas/training/panoptic",
"mapillary_vistas/training/panoptic/panoptic_2018.json",
"mapillary_vistas/training/labels",
),
"mapillary_vistas_panoptic_val": (
"mapillary_vistas/validation/images",
"mapillary_vistas/validation/panoptic",
"mapillary_vistas/validation/panoptic/panoptic_2018.json",
"mapillary_vistas/validation/labels",
),
}
def get_metadata():
meta = {}
# The following metadata maps contiguous id from [0, #thing categories +
# #stuff categories) to their names and colors. We have to replica of the
# same name and color under "thing_*" and "stuff_*" because the current
# visualization function in D2 handles thing and class classes differently
# due to some heuristic used in Panoptic FPN. We keep the same naming to
# enable reusing existing visualization functions.
thing_classes = [k["name"] for k in MAPILLARY_VISTAS_SEM_SEG_CATEGORIES]
thing_colors = [k["color"] for k in MAPILLARY_VISTAS_SEM_SEG_CATEGORIES]
stuff_classes = [k["name"] for k in MAPILLARY_VISTAS_SEM_SEG_CATEGORIES]
stuff_colors = [k["color"] for k in MAPILLARY_VISTAS_SEM_SEG_CATEGORIES]
meta["thing_classes"] = thing_classes
meta["thing_colors"] = thing_colors
meta["stuff_classes"] = stuff_classes
meta["stuff_colors"] = stuff_colors
# Convert category id for training:
# category id: like semantic segmentation, it is the class id for each
# pixel. Since there are some classes not used in evaluation, the category
# id is not always contiguous and thus we have two set of category ids:
# - original category id: category id in the original dataset, mainly
# used for evaluation.
# - contiguous category id: [0, #classes), in order to train the linear
# softmax classifier.
thing_dataset_id_to_contiguous_id = {}
stuff_dataset_id_to_contiguous_id = {}
for i, cat in enumerate(MAPILLARY_VISTAS_SEM_SEG_CATEGORIES):
if cat["isthing"]:
thing_dataset_id_to_contiguous_id[cat["id"]] = i
# else:
# stuff_dataset_id_to_contiguous_id[cat["id"]] = i
# in order to use sem_seg evaluator
stuff_dataset_id_to_contiguous_id[cat["id"]] = i
meta["thing_dataset_id_to_contiguous_id"] = thing_dataset_id_to_contiguous_id
meta["stuff_dataset_id_to_contiguous_id"] = stuff_dataset_id_to_contiguous_id
return meta
def register_all_mapillary_vistas_panoptic(root):
metadata = get_metadata()
for (
prefix,
(image_root, panoptic_root, panoptic_json, semantic_root),
) in _PREDEFINED_SPLITS_ADE20K_PANOPTIC.items():
# The "standard" version of COCO panoptic segmentation dataset,
# e.g. used by Panoptic-DeepLab
register_mapillary_vistas_panoptic(
prefix,
metadata,
os.path.join(root, image_root),
os.path.join(root, panoptic_root),
os.path.join(root, semantic_root),
os.path.join(root, panoptic_json),
)
_root = os.getenv("DETECTRON2_DATASETS", "datasets")
register_all_mapillary_vistas_panoptic(_root)
| CutLER-main | videocutler/mask2former/data/datasets/register_mapillary_vistas_panoptic.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import json
import logging
import numpy as np
import os
from PIL import Image
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data.datasets.coco import load_coco_json, register_coco_instances
from detectron2.utils.file_io import PathManager
ADE_CATEGORIES = [{'id': 7, 'name': 'bed'}, {'id': 8, 'name': 'windowpane'}, {'id': 10, 'name': 'cabinet'}, {'id': 12, 'name': 'person'}, {'id': 14, 'name': 'door'}, {'id': 15, 'name': 'table'}, {'id': 18, 'name': 'curtain'}, {'id': 19, 'name': 'chair'}, {'id': 20, 'name': 'car'}, {'id': 22, 'name': 'painting'}, {'id': 23, 'name': 'sofa'}, {'id': 24, 'name': 'shelf'}, {'id': 27, 'name': 'mirror'}, {'id': 30, 'name': 'armchair'}, {'id': 31, 'name': 'seat'}, {'id': 32, 'name': 'fence'}, {'id': 33, 'name': 'desk'}, {'id': 35, 'name': 'wardrobe'}, {'id': 36, 'name': 'lamp'}, {'id': 37, 'name': 'bathtub'}, {'id': 38, 'name': 'railing'}, {'id': 39, 'name': 'cushion'}, {'id': 41, 'name': 'box'}, {'id': 42, 'name': 'column'}, {'id': 43, 'name': 'signboard'}, {'id': 44, 'name': 'chest of drawers'}, {'id': 45, 'name': 'counter'}, {'id': 47, 'name': 'sink'}, {'id': 49, 'name': 'fireplace'}, {'id': 50, 'name': 'refrigerator'}, {'id': 53, 'name': 'stairs'}, {'id': 55, 'name': 'case'}, {'id': 56, 'name': 'pool table'}, {'id': 57, 'name': 'pillow'}, {'id': 58, 'name': 'screen door'}, {'id': 62, 'name': 'bookcase'}, {'id': 64, 'name': 'coffee table'}, {'id': 65, 'name': 'toilet'}, {'id': 66, 'name': 'flower'}, {'id': 67, 'name': 'book'}, {'id': 69, 'name': 'bench'}, {'id': 70, 'name': 'countertop'}, {'id': 71, 'name': 'stove'}, {'id': 72, 'name': 'palm'}, {'id': 73, 'name': 'kitchen island'}, {'id': 74, 'name': 'computer'}, {'id': 75, 'name': 'swivel chair'}, {'id': 76, 'name': 'boat'}, {'id': 78, 'name': 'arcade machine'}, {'id': 80, 'name': 'bus'}, {'id': 81, 'name': 'towel'}, {'id': 82, 'name': 'light'}, {'id': 83, 'name': 'truck'}, {'id': 85, 'name': 'chandelier'}, {'id': 86, 'name': 'awning'}, {'id': 87, 'name': 'streetlight'}, {'id': 88, 'name': 'booth'}, {'id': 89, 'name': 'television receiver'}, {'id': 90, 'name': 'airplane'}, {'id': 92, 'name': 'apparel'}, {'id': 93, 'name': 'pole'}, {'id': 95, 'name': 'bannister'}, {'id': 97, 'name': 'ottoman'}, {'id': 98, 'name': 'bottle'}, {'id': 102, 'name': 'van'}, {'id': 103, 'name': 'ship'}, {'id': 104, 'name': 'fountain'}, {'id': 107, 'name': 'washer'}, {'id': 108, 'name': 'plaything'}, {'id': 110, 'name': 'stool'}, {'id': 111, 'name': 'barrel'}, {'id': 112, 'name': 'basket'}, {'id': 115, 'name': 'bag'}, {'id': 116, 'name': 'minibike'}, {'id': 118, 'name': 'oven'}, {'id': 119, 'name': 'ball'}, {'id': 120, 'name': 'food'}, {'id': 121, 'name': 'step'}, {'id': 123, 'name': 'trade name'}, {'id': 124, 'name': 'microwave'}, {'id': 125, 'name': 'pot'}, {'id': 126, 'name': 'animal'}, {'id': 127, 'name': 'bicycle'}, {'id': 129, 'name': 'dishwasher'}, {'id': 130, 'name': 'screen'}, {'id': 132, 'name': 'sculpture'}, {'id': 133, 'name': 'hood'}, {'id': 134, 'name': 'sconce'}, {'id': 135, 'name': 'vase'}, {'id': 136, 'name': 'traffic light'}, {'id': 137, 'name': 'tray'}, {'id': 138, 'name': 'ashcan'}, {'id': 139, 'name': 'fan'}, {'id': 142, 'name': 'plate'}, {'id': 143, 'name': 'monitor'}, {'id': 144, 'name': 'bulletin board'}, {'id': 146, 'name': 'radiator'}, {'id': 147, 'name': 'glass'}, {'id': 148, 'name': 'clock'}, {'id': 149, 'name': 'flag'}]
_PREDEFINED_SPLITS = {
# point annotations without masks
"ade20k_instance_train": (
"ADEChallengeData2016/images/training",
"ADEChallengeData2016/ade20k_instance_train.json",
),
"ade20k_instance_val": (
"ADEChallengeData2016/images/validation",
"ADEChallengeData2016/ade20k_instance_val.json",
),
}
def _get_ade_instances_meta():
thing_ids = [k["id"] for k in ADE_CATEGORIES]
assert len(thing_ids) == 100, len(thing_ids)
# Mapping from the incontiguous ADE category id to an id in [0, 99]
thing_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(thing_ids)}
thing_classes = [k["name"] for k in ADE_CATEGORIES]
ret = {
"thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id,
"thing_classes": thing_classes,
}
return ret
def register_all_ade20k_instance(root):
for key, (image_root, json_file) in _PREDEFINED_SPLITS.items():
# Assume pre-defined datasets live in `./datasets`.
register_coco_instances(
key,
_get_ade_instances_meta(),
os.path.join(root, json_file) if "://" not in json_file else json_file,
os.path.join(root, image_root),
)
_root = os.getenv("DETECTRON2_DATASETS", "datasets")
register_all_ade20k_instance(_root)
| CutLER-main | videocutler/mask2former/data/datasets/register_ade20k_instance.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import json
import os
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data.datasets import load_sem_seg
from detectron2.data.datasets.builtin_meta import COCO_CATEGORIES
from detectron2.utils.file_io import PathManager
_PREDEFINED_SPLITS_COCO_PANOPTIC = {
"coco_2017_train_panoptic": (
# This is the original panoptic annotation directory
"coco/panoptic_train2017",
"coco/annotations/panoptic_train2017.json",
# This directory contains semantic annotations that are
# converted from panoptic annotations.
# It is used by PanopticFPN.
# You can use the script at detectron2/datasets/prepare_panoptic_fpn.py
# to create these directories.
"coco/panoptic_semseg_train2017",
),
"coco_2017_val_panoptic": (
"coco/panoptic_val2017",
"coco/annotations/panoptic_val2017.json",
"coco/panoptic_semseg_val2017",
),
}
def get_metadata():
meta = {}
# The following metadata maps contiguous id from [0, #thing categories +
# #stuff categories) to their names and colors. We have to replica of the
# same name and color under "thing_*" and "stuff_*" because the current
# visualization function in D2 handles thing and class classes differently
# due to some heuristic used in Panoptic FPN. We keep the same naming to
# enable reusing existing visualization functions.
thing_classes = [k["name"] for k in COCO_CATEGORIES if k["isthing"] == 1]
thing_colors = [k["color"] for k in COCO_CATEGORIES if k["isthing"] == 1]
stuff_classes = [k["name"] for k in COCO_CATEGORIES]
stuff_colors = [k["color"] for k in COCO_CATEGORIES]
meta["thing_classes"] = thing_classes
meta["thing_colors"] = thing_colors
meta["stuff_classes"] = stuff_classes
meta["stuff_colors"] = stuff_colors
# Convert category id for training:
# category id: like semantic segmentation, it is the class id for each
# pixel. Since there are some classes not used in evaluation, the category
# id is not always contiguous and thus we have two set of category ids:
# - original category id: category id in the original dataset, mainly
# used for evaluation.
# - contiguous category id: [0, #classes), in order to train the linear
# softmax classifier.
thing_dataset_id_to_contiguous_id = {}
stuff_dataset_id_to_contiguous_id = {}
for i, cat in enumerate(COCO_CATEGORIES):
if cat["isthing"]:
thing_dataset_id_to_contiguous_id[cat["id"]] = i
# else:
# stuff_dataset_id_to_contiguous_id[cat["id"]] = i
# in order to use sem_seg evaluator
stuff_dataset_id_to_contiguous_id[cat["id"]] = i
meta["thing_dataset_id_to_contiguous_id"] = thing_dataset_id_to_contiguous_id
meta["stuff_dataset_id_to_contiguous_id"] = stuff_dataset_id_to_contiguous_id
return meta
def load_coco_panoptic_json(json_file, image_dir, gt_dir, semseg_dir, meta):
"""
Args:
image_dir (str): path to the raw dataset. e.g., "~/coco/train2017".
gt_dir (str): path to the raw annotations. e.g., "~/coco/panoptic_train2017".
json_file (str): path to the json file. e.g., "~/coco/annotations/panoptic_train2017.json".
Returns:
list[dict]: a list of dicts in Detectron2 standard format. (See
`Using Custom Datasets </tutorials/datasets.html>`_ )
"""
def _convert_category_id(segment_info, meta):
if segment_info["category_id"] in meta["thing_dataset_id_to_contiguous_id"]:
segment_info["category_id"] = meta["thing_dataset_id_to_contiguous_id"][
segment_info["category_id"]
]
segment_info["isthing"] = True
else:
segment_info["category_id"] = meta["stuff_dataset_id_to_contiguous_id"][
segment_info["category_id"]
]
segment_info["isthing"] = False
return segment_info
with PathManager.open(json_file) as f:
json_info = json.load(f)
ret = []
for ann in json_info["annotations"]:
image_id = int(ann["image_id"])
# TODO: currently we assume image and label has the same filename but
# different extension, and images have extension ".jpg" for COCO. Need
# to make image extension a user-provided argument if we extend this
# function to support other COCO-like datasets.
image_file = os.path.join(image_dir, os.path.splitext(ann["file_name"])[0] + ".jpg")
label_file = os.path.join(gt_dir, ann["file_name"])
sem_label_file = os.path.join(semseg_dir, ann["file_name"])
segments_info = [_convert_category_id(x, meta) for x in ann["segments_info"]]
ret.append(
{
"file_name": image_file,
"image_id": image_id,
"pan_seg_file_name": label_file,
"sem_seg_file_name": sem_label_file,
"segments_info": segments_info,
}
)
assert len(ret), f"No images found in {image_dir}!"
assert PathManager.isfile(ret[0]["file_name"]), ret[0]["file_name"]
assert PathManager.isfile(ret[0]["pan_seg_file_name"]), ret[0]["pan_seg_file_name"]
assert PathManager.isfile(ret[0]["sem_seg_file_name"]), ret[0]["sem_seg_file_name"]
return ret
def register_coco_panoptic_annos_sem_seg(
name, metadata, image_root, panoptic_root, panoptic_json, sem_seg_root, instances_json
):
panoptic_name = name
delattr(MetadataCatalog.get(panoptic_name), "thing_classes")
delattr(MetadataCatalog.get(panoptic_name), "thing_colors")
MetadataCatalog.get(panoptic_name).set(
thing_classes=metadata["thing_classes"],
thing_colors=metadata["thing_colors"],
# thing_dataset_id_to_contiguous_id=metadata["thing_dataset_id_to_contiguous_id"],
)
# the name is "coco_2017_train_panoptic_with_sem_seg" and "coco_2017_val_panoptic_with_sem_seg"
semantic_name = name + "_with_sem_seg"
DatasetCatalog.register(
semantic_name,
lambda: load_coco_panoptic_json(panoptic_json, image_root, panoptic_root, sem_seg_root, metadata),
)
MetadataCatalog.get(semantic_name).set(
sem_seg_root=sem_seg_root,
panoptic_root=panoptic_root,
image_root=image_root,
panoptic_json=panoptic_json,
json_file=instances_json,
evaluator_type="coco_panoptic_seg",
ignore_label=255,
label_divisor=1000,
**metadata,
)
def register_all_coco_panoptic_annos_sem_seg(root):
for (
prefix,
(panoptic_root, panoptic_json, semantic_root),
) in _PREDEFINED_SPLITS_COCO_PANOPTIC.items():
prefix_instances = prefix[: -len("_panoptic")]
instances_meta = MetadataCatalog.get(prefix_instances)
image_root, instances_json = instances_meta.image_root, instances_meta.json_file
register_coco_panoptic_annos_sem_seg(
prefix,
get_metadata(),
image_root,
os.path.join(root, panoptic_root),
os.path.join(root, panoptic_json),
os.path.join(root, semantic_root),
instances_json,
)
_root = os.getenv("DETECTRON2_DATASETS", "datasets")
register_all_coco_panoptic_annos_sem_seg(_root)
| CutLER-main | videocutler/mask2former/data/datasets/register_coco_panoptic_annos_semseg.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import json
import os
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.utils.file_io import PathManager
ADE20K_150_CATEGORIES = [
{"color": [120, 120, 120], "id": 0, "isthing": 0, "name": "wall"},
{"color": [180, 120, 120], "id": 1, "isthing": 0, "name": "building"},
{"color": [6, 230, 230], "id": 2, "isthing": 0, "name": "sky"},
{"color": [80, 50, 50], "id": 3, "isthing": 0, "name": "floor"},
{"color": [4, 200, 3], "id": 4, "isthing": 0, "name": "tree"},
{"color": [120, 120, 80], "id": 5, "isthing": 0, "name": "ceiling"},
{"color": [140, 140, 140], "id": 6, "isthing": 0, "name": "road, route"},
{"color": [204, 5, 255], "id": 7, "isthing": 1, "name": "bed"},
{"color": [230, 230, 230], "id": 8, "isthing": 1, "name": "window "},
{"color": [4, 250, 7], "id": 9, "isthing": 0, "name": "grass"},
{"color": [224, 5, 255], "id": 10, "isthing": 1, "name": "cabinet"},
{"color": [235, 255, 7], "id": 11, "isthing": 0, "name": "sidewalk, pavement"},
{"color": [150, 5, 61], "id": 12, "isthing": 1, "name": "person"},
{"color": [120, 120, 70], "id": 13, "isthing": 0, "name": "earth, ground"},
{"color": [8, 255, 51], "id": 14, "isthing": 1, "name": "door"},
{"color": [255, 6, 82], "id": 15, "isthing": 1, "name": "table"},
{"color": [143, 255, 140], "id": 16, "isthing": 0, "name": "mountain, mount"},
{"color": [204, 255, 4], "id": 17, "isthing": 0, "name": "plant"},
{"color": [255, 51, 7], "id": 18, "isthing": 1, "name": "curtain"},
{"color": [204, 70, 3], "id": 19, "isthing": 1, "name": "chair"},
{"color": [0, 102, 200], "id": 20, "isthing": 1, "name": "car"},
{"color": [61, 230, 250], "id": 21, "isthing": 0, "name": "water"},
{"color": [255, 6, 51], "id": 22, "isthing": 1, "name": "painting, picture"},
{"color": [11, 102, 255], "id": 23, "isthing": 1, "name": "sofa"},
{"color": [255, 7, 71], "id": 24, "isthing": 1, "name": "shelf"},
{"color": [255, 9, 224], "id": 25, "isthing": 0, "name": "house"},
{"color": [9, 7, 230], "id": 26, "isthing": 0, "name": "sea"},
{"color": [220, 220, 220], "id": 27, "isthing": 1, "name": "mirror"},
{"color": [255, 9, 92], "id": 28, "isthing": 0, "name": "rug"},
{"color": [112, 9, 255], "id": 29, "isthing": 0, "name": "field"},
{"color": [8, 255, 214], "id": 30, "isthing": 1, "name": "armchair"},
{"color": [7, 255, 224], "id": 31, "isthing": 1, "name": "seat"},
{"color": [255, 184, 6], "id": 32, "isthing": 1, "name": "fence"},
{"color": [10, 255, 71], "id": 33, "isthing": 1, "name": "desk"},
{"color": [255, 41, 10], "id": 34, "isthing": 0, "name": "rock, stone"},
{"color": [7, 255, 255], "id": 35, "isthing": 1, "name": "wardrobe, closet, press"},
{"color": [224, 255, 8], "id": 36, "isthing": 1, "name": "lamp"},
{"color": [102, 8, 255], "id": 37, "isthing": 1, "name": "tub"},
{"color": [255, 61, 6], "id": 38, "isthing": 1, "name": "rail"},
{"color": [255, 194, 7], "id": 39, "isthing": 1, "name": "cushion"},
{"color": [255, 122, 8], "id": 40, "isthing": 0, "name": "base, pedestal, stand"},
{"color": [0, 255, 20], "id": 41, "isthing": 1, "name": "box"},
{"color": [255, 8, 41], "id": 42, "isthing": 1, "name": "column, pillar"},
{"color": [255, 5, 153], "id": 43, "isthing": 1, "name": "signboard, sign"},
{
"color": [6, 51, 255],
"id": 44,
"isthing": 1,
"name": "chest of drawers, chest, bureau, dresser",
},
{"color": [235, 12, 255], "id": 45, "isthing": 1, "name": "counter"},
{"color": [160, 150, 20], "id": 46, "isthing": 0, "name": "sand"},
{"color": [0, 163, 255], "id": 47, "isthing": 1, "name": "sink"},
{"color": [140, 140, 140], "id": 48, "isthing": 0, "name": "skyscraper"},
{"color": [250, 10, 15], "id": 49, "isthing": 1, "name": "fireplace"},
{"color": [20, 255, 0], "id": 50, "isthing": 1, "name": "refrigerator, icebox"},
{"color": [31, 255, 0], "id": 51, "isthing": 0, "name": "grandstand, covered stand"},
{"color": [255, 31, 0], "id": 52, "isthing": 0, "name": "path"},
{"color": [255, 224, 0], "id": 53, "isthing": 1, "name": "stairs"},
{"color": [153, 255, 0], "id": 54, "isthing": 0, "name": "runway"},
{"color": [0, 0, 255], "id": 55, "isthing": 1, "name": "case, display case, showcase, vitrine"},
{
"color": [255, 71, 0],
"id": 56,
"isthing": 1,
"name": "pool table, billiard table, snooker table",
},
{"color": [0, 235, 255], "id": 57, "isthing": 1, "name": "pillow"},
{"color": [0, 173, 255], "id": 58, "isthing": 1, "name": "screen door, screen"},
{"color": [31, 0, 255], "id": 59, "isthing": 0, "name": "stairway, staircase"},
{"color": [11, 200, 200], "id": 60, "isthing": 0, "name": "river"},
{"color": [255, 82, 0], "id": 61, "isthing": 0, "name": "bridge, span"},
{"color": [0, 255, 245], "id": 62, "isthing": 1, "name": "bookcase"},
{"color": [0, 61, 255], "id": 63, "isthing": 0, "name": "blind, screen"},
{"color": [0, 255, 112], "id": 64, "isthing": 1, "name": "coffee table"},
{
"color": [0, 255, 133],
"id": 65,
"isthing": 1,
"name": "toilet, can, commode, crapper, pot, potty, stool, throne",
},
{"color": [255, 0, 0], "id": 66, "isthing": 1, "name": "flower"},
{"color": [255, 163, 0], "id": 67, "isthing": 1, "name": "book"},
{"color": [255, 102, 0], "id": 68, "isthing": 0, "name": "hill"},
{"color": [194, 255, 0], "id": 69, "isthing": 1, "name": "bench"},
{"color": [0, 143, 255], "id": 70, "isthing": 1, "name": "countertop"},
{"color": [51, 255, 0], "id": 71, "isthing": 1, "name": "stove"},
{"color": [0, 82, 255], "id": 72, "isthing": 1, "name": "palm, palm tree"},
{"color": [0, 255, 41], "id": 73, "isthing": 1, "name": "kitchen island"},
{"color": [0, 255, 173], "id": 74, "isthing": 1, "name": "computer"},
{"color": [10, 0, 255], "id": 75, "isthing": 1, "name": "swivel chair"},
{"color": [173, 255, 0], "id": 76, "isthing": 1, "name": "boat"},
{"color": [0, 255, 153], "id": 77, "isthing": 0, "name": "bar"},
{"color": [255, 92, 0], "id": 78, "isthing": 1, "name": "arcade machine"},
{"color": [255, 0, 255], "id": 79, "isthing": 0, "name": "hovel, hut, hutch, shack, shanty"},
{"color": [255, 0, 245], "id": 80, "isthing": 1, "name": "bus"},
{"color": [255, 0, 102], "id": 81, "isthing": 1, "name": "towel"},
{"color": [255, 173, 0], "id": 82, "isthing": 1, "name": "light"},
{"color": [255, 0, 20], "id": 83, "isthing": 1, "name": "truck"},
{"color": [255, 184, 184], "id": 84, "isthing": 0, "name": "tower"},
{"color": [0, 31, 255], "id": 85, "isthing": 1, "name": "chandelier"},
{"color": [0, 255, 61], "id": 86, "isthing": 1, "name": "awning, sunshade, sunblind"},
{"color": [0, 71, 255], "id": 87, "isthing": 1, "name": "street lamp"},
{"color": [255, 0, 204], "id": 88, "isthing": 1, "name": "booth"},
{"color": [0, 255, 194], "id": 89, "isthing": 1, "name": "tv"},
{"color": [0, 255, 82], "id": 90, "isthing": 1, "name": "plane"},
{"color": [0, 10, 255], "id": 91, "isthing": 0, "name": "dirt track"},
{"color": [0, 112, 255], "id": 92, "isthing": 1, "name": "clothes"},
{"color": [51, 0, 255], "id": 93, "isthing": 1, "name": "pole"},
{"color": [0, 194, 255], "id": 94, "isthing": 0, "name": "land, ground, soil"},
{
"color": [0, 122, 255],
"id": 95,
"isthing": 1,
"name": "bannister, banister, balustrade, balusters, handrail",
},
{
"color": [0, 255, 163],
"id": 96,
"isthing": 0,
"name": "escalator, moving staircase, moving stairway",
},
{
"color": [255, 153, 0],
"id": 97,
"isthing": 1,
"name": "ottoman, pouf, pouffe, puff, hassock",
},
{"color": [0, 255, 10], "id": 98, "isthing": 1, "name": "bottle"},
{"color": [255, 112, 0], "id": 99, "isthing": 0, "name": "buffet, counter, sideboard"},
{
"color": [143, 255, 0],
"id": 100,
"isthing": 0,
"name": "poster, posting, placard, notice, bill, card",
},
{"color": [82, 0, 255], "id": 101, "isthing": 0, "name": "stage"},
{"color": [163, 255, 0], "id": 102, "isthing": 1, "name": "van"},
{"color": [255, 235, 0], "id": 103, "isthing": 1, "name": "ship"},
{"color": [8, 184, 170], "id": 104, "isthing": 1, "name": "fountain"},
{
"color": [133, 0, 255],
"id": 105,
"isthing": 0,
"name": "conveyer belt, conveyor belt, conveyer, conveyor, transporter",
},
{"color": [0, 255, 92], "id": 106, "isthing": 0, "name": "canopy"},
{
"color": [184, 0, 255],
"id": 107,
"isthing": 1,
"name": "washer, automatic washer, washing machine",
},
{"color": [255, 0, 31], "id": 108, "isthing": 1, "name": "plaything, toy"},
{"color": [0, 184, 255], "id": 109, "isthing": 0, "name": "pool"},
{"color": [0, 214, 255], "id": 110, "isthing": 1, "name": "stool"},
{"color": [255, 0, 112], "id": 111, "isthing": 1, "name": "barrel, cask"},
{"color": [92, 255, 0], "id": 112, "isthing": 1, "name": "basket, handbasket"},
{"color": [0, 224, 255], "id": 113, "isthing": 0, "name": "falls"},
{"color": [112, 224, 255], "id": 114, "isthing": 0, "name": "tent"},
{"color": [70, 184, 160], "id": 115, "isthing": 1, "name": "bag"},
{"color": [163, 0, 255], "id": 116, "isthing": 1, "name": "minibike, motorbike"},
{"color": [153, 0, 255], "id": 117, "isthing": 0, "name": "cradle"},
{"color": [71, 255, 0], "id": 118, "isthing": 1, "name": "oven"},
{"color": [255, 0, 163], "id": 119, "isthing": 1, "name": "ball"},
{"color": [255, 204, 0], "id": 120, "isthing": 1, "name": "food, solid food"},
{"color": [255, 0, 143], "id": 121, "isthing": 1, "name": "step, stair"},
{"color": [0, 255, 235], "id": 122, "isthing": 0, "name": "tank, storage tank"},
{"color": [133, 255, 0], "id": 123, "isthing": 1, "name": "trade name"},
{"color": [255, 0, 235], "id": 124, "isthing": 1, "name": "microwave"},
{"color": [245, 0, 255], "id": 125, "isthing": 1, "name": "pot"},
{"color": [255, 0, 122], "id": 126, "isthing": 1, "name": "animal"},
{"color": [255, 245, 0], "id": 127, "isthing": 1, "name": "bicycle"},
{"color": [10, 190, 212], "id": 128, "isthing": 0, "name": "lake"},
{"color": [214, 255, 0], "id": 129, "isthing": 1, "name": "dishwasher"},
{"color": [0, 204, 255], "id": 130, "isthing": 1, "name": "screen"},
{"color": [20, 0, 255], "id": 131, "isthing": 0, "name": "blanket, cover"},
{"color": [255, 255, 0], "id": 132, "isthing": 1, "name": "sculpture"},
{"color": [0, 153, 255], "id": 133, "isthing": 1, "name": "hood, exhaust hood"},
{"color": [0, 41, 255], "id": 134, "isthing": 1, "name": "sconce"},
{"color": [0, 255, 204], "id": 135, "isthing": 1, "name": "vase"},
{"color": [41, 0, 255], "id": 136, "isthing": 1, "name": "traffic light"},
{"color": [41, 255, 0], "id": 137, "isthing": 1, "name": "tray"},
{"color": [173, 0, 255], "id": 138, "isthing": 1, "name": "trash can"},
{"color": [0, 245, 255], "id": 139, "isthing": 1, "name": "fan"},
{"color": [71, 0, 255], "id": 140, "isthing": 0, "name": "pier"},
{"color": [122, 0, 255], "id": 141, "isthing": 0, "name": "crt screen"},
{"color": [0, 255, 184], "id": 142, "isthing": 1, "name": "plate"},
{"color": [0, 92, 255], "id": 143, "isthing": 1, "name": "monitor"},
{"color": [184, 255, 0], "id": 144, "isthing": 1, "name": "bulletin board"},
{"color": [0, 133, 255], "id": 145, "isthing": 0, "name": "shower"},
{"color": [255, 214, 0], "id": 146, "isthing": 1, "name": "radiator"},
{"color": [25, 194, 194], "id": 147, "isthing": 1, "name": "glass, drinking glass"},
{"color": [102, 255, 0], "id": 148, "isthing": 1, "name": "clock"},
{"color": [92, 0, 255], "id": 149, "isthing": 1, "name": "flag"},
]
ADE20k_COLORS = [k["color"] for k in ADE20K_150_CATEGORIES]
MetadataCatalog.get("ade20k_sem_seg_train").set(
stuff_colors=ADE20k_COLORS[:],
)
MetadataCatalog.get("ade20k_sem_seg_val").set(
stuff_colors=ADE20k_COLORS[:],
)
def load_ade20k_panoptic_json(json_file, image_dir, gt_dir, semseg_dir, meta):
"""
Args:
image_dir (str): path to the raw dataset. e.g., "~/coco/train2017".
gt_dir (str): path to the raw annotations. e.g., "~/coco/panoptic_train2017".
json_file (str): path to the json file. e.g., "~/coco/annotations/panoptic_train2017.json".
Returns:
list[dict]: a list of dicts in Detectron2 standard format. (See
`Using Custom Datasets </tutorials/datasets.html>`_ )
"""
def _convert_category_id(segment_info, meta):
if segment_info["category_id"] in meta["thing_dataset_id_to_contiguous_id"]:
segment_info["category_id"] = meta["thing_dataset_id_to_contiguous_id"][
segment_info["category_id"]
]
segment_info["isthing"] = True
else:
segment_info["category_id"] = meta["stuff_dataset_id_to_contiguous_id"][
segment_info["category_id"]
]
segment_info["isthing"] = False
return segment_info
with PathManager.open(json_file) as f:
json_info = json.load(f)
ret = []
for ann in json_info["annotations"]:
image_id = ann["image_id"]
# TODO: currently we assume image and label has the same filename but
# different extension, and images have extension ".jpg" for COCO. Need
# to make image extension a user-provided argument if we extend this
# function to support other COCO-like datasets.
image_file = os.path.join(image_dir, os.path.splitext(ann["file_name"])[0] + ".jpg")
label_file = os.path.join(gt_dir, ann["file_name"])
sem_label_file = os.path.join(semseg_dir, ann["file_name"])
segments_info = [_convert_category_id(x, meta) for x in ann["segments_info"]]
ret.append(
{
"file_name": image_file,
"image_id": image_id,
"pan_seg_file_name": label_file,
"sem_seg_file_name": sem_label_file,
"segments_info": segments_info,
}
)
assert len(ret), f"No images found in {image_dir}!"
assert PathManager.isfile(ret[0]["file_name"]), ret[0]["file_name"]
assert PathManager.isfile(ret[0]["pan_seg_file_name"]), ret[0]["pan_seg_file_name"]
assert PathManager.isfile(ret[0]["sem_seg_file_name"]), ret[0]["sem_seg_file_name"]
return ret
def register_ade20k_panoptic(
name, metadata, image_root, panoptic_root, semantic_root, panoptic_json, instances_json=None
):
"""
Register a "standard" version of ADE20k panoptic segmentation dataset named `name`.
The dictionaries in this registered dataset follows detectron2's standard format.
Hence it's called "standard".
Args:
name (str): the name that identifies a dataset,
e.g. "ade20k_panoptic_train"
metadata (dict): extra metadata associated with this dataset.
image_root (str): directory which contains all the images
panoptic_root (str): directory which contains panoptic annotation images in COCO format
panoptic_json (str): path to the json panoptic annotation file in COCO format
sem_seg_root (none): not used, to be consistent with
`register_coco_panoptic_separated`.
instances_json (str): path to the json instance annotation file
"""
panoptic_name = name
DatasetCatalog.register(
panoptic_name,
lambda: load_ade20k_panoptic_json(
panoptic_json, image_root, panoptic_root, semantic_root, metadata
),
)
MetadataCatalog.get(panoptic_name).set(
panoptic_root=panoptic_root,
image_root=image_root,
panoptic_json=panoptic_json,
json_file=instances_json,
evaluator_type="ade20k_panoptic_seg",
ignore_label=255,
label_divisor=1000,
**metadata,
)
_PREDEFINED_SPLITS_ADE20K_PANOPTIC = {
"ade20k_panoptic_train": (
"ADEChallengeData2016/images/training",
"ADEChallengeData2016/ade20k_panoptic_train",
"ADEChallengeData2016/ade20k_panoptic_train.json",
"ADEChallengeData2016/annotations_detectron2/training",
"ADEChallengeData2016/ade20k_instance_train.json",
),
"ade20k_panoptic_val": (
"ADEChallengeData2016/images/validation",
"ADEChallengeData2016/ade20k_panoptic_val",
"ADEChallengeData2016/ade20k_panoptic_val.json",
"ADEChallengeData2016/annotations_detectron2/validation",
"ADEChallengeData2016/ade20k_instance_val.json",
),
}
def get_metadata():
meta = {}
# The following metadata maps contiguous id from [0, #thing categories +
# #stuff categories) to their names and colors. We have to replica of the
# same name and color under "thing_*" and "stuff_*" because the current
# visualization function in D2 handles thing and class classes differently
# due to some heuristic used in Panoptic FPN. We keep the same naming to
# enable reusing existing visualization functions.
thing_classes = [k["name"] for k in ADE20K_150_CATEGORIES if k["isthing"] == 1]
thing_colors = [k["color"] for k in ADE20K_150_CATEGORIES if k["isthing"] == 1]
stuff_classes = [k["name"] for k in ADE20K_150_CATEGORIES]
stuff_colors = [k["color"] for k in ADE20K_150_CATEGORIES]
meta["thing_classes"] = thing_classes
meta["thing_colors"] = thing_colors
meta["stuff_classes"] = stuff_classes
meta["stuff_colors"] = stuff_colors
# Convert category id for training:
# category id: like semantic segmentation, it is the class id for each
# pixel. Since there are some classes not used in evaluation, the category
# id is not always contiguous and thus we have two set of category ids:
# - original category id: category id in the original dataset, mainly
# used for evaluation.
# - contiguous category id: [0, #classes), in order to train the linear
# softmax classifier.
thing_dataset_id_to_contiguous_id = {}
stuff_dataset_id_to_contiguous_id = {}
for i, cat in enumerate(ADE20K_150_CATEGORIES):
if cat["isthing"]:
thing_dataset_id_to_contiguous_id[cat["id"]] = i
# else:
# stuff_dataset_id_to_contiguous_id[cat["id"]] = i
# in order to use sem_seg evaluator
stuff_dataset_id_to_contiguous_id[cat["id"]] = i
meta["thing_dataset_id_to_contiguous_id"] = thing_dataset_id_to_contiguous_id
meta["stuff_dataset_id_to_contiguous_id"] = stuff_dataset_id_to_contiguous_id
return meta
def register_all_ade20k_panoptic(root):
metadata = get_metadata()
for (
prefix,
(image_root, panoptic_root, panoptic_json, semantic_root, instance_json),
) in _PREDEFINED_SPLITS_ADE20K_PANOPTIC.items():
# The "standard" version of COCO panoptic segmentation dataset,
# e.g. used by Panoptic-DeepLab
register_ade20k_panoptic(
prefix,
metadata,
os.path.join(root, image_root),
os.path.join(root, panoptic_root),
os.path.join(root, semantic_root),
os.path.join(root, panoptic_json),
os.path.join(root, instance_json),
)
_root = os.getenv("DETECTRON2_DATASETS", "datasets")
register_all_ade20k_panoptic(_root)
| CutLER-main | videocutler/mask2former/data/datasets/register_ade20k_panoptic.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import os
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data.datasets import load_sem_seg
COCO_CATEGORIES = [
{"color": [220, 20, 60], "isthing": 1, "id": 1, "name": "person"},
{"color": [119, 11, 32], "isthing": 1, "id": 2, "name": "bicycle"},
{"color": [0, 0, 142], "isthing": 1, "id": 3, "name": "car"},
{"color": [0, 0, 230], "isthing": 1, "id": 4, "name": "motorcycle"},
{"color": [106, 0, 228], "isthing": 1, "id": 5, "name": "airplane"},
{"color": [0, 60, 100], "isthing": 1, "id": 6, "name": "bus"},
{"color": [0, 80, 100], "isthing": 1, "id": 7, "name": "train"},
{"color": [0, 0, 70], "isthing": 1, "id": 8, "name": "truck"},
{"color": [0, 0, 192], "isthing": 1, "id": 9, "name": "boat"},
{"color": [250, 170, 30], "isthing": 1, "id": 10, "name": "traffic light"},
{"color": [100, 170, 30], "isthing": 1, "id": 11, "name": "fire hydrant"},
{"color": [220, 220, 0], "isthing": 1, "id": 13, "name": "stop sign"},
{"color": [175, 116, 175], "isthing": 1, "id": 14, "name": "parking meter"},
{"color": [250, 0, 30], "isthing": 1, "id": 15, "name": "bench"},
{"color": [165, 42, 42], "isthing": 1, "id": 16, "name": "bird"},
{"color": [255, 77, 255], "isthing": 1, "id": 17, "name": "cat"},
{"color": [0, 226, 252], "isthing": 1, "id": 18, "name": "dog"},
{"color": [182, 182, 255], "isthing": 1, "id": 19, "name": "horse"},
{"color": [0, 82, 0], "isthing": 1, "id": 20, "name": "sheep"},
{"color": [120, 166, 157], "isthing": 1, "id": 21, "name": "cow"},
{"color": [110, 76, 0], "isthing": 1, "id": 22, "name": "elephant"},
{"color": [174, 57, 255], "isthing": 1, "id": 23, "name": "bear"},
{"color": [199, 100, 0], "isthing": 1, "id": 24, "name": "zebra"},
{"color": [72, 0, 118], "isthing": 1, "id": 25, "name": "giraffe"},
{"color": [255, 179, 240], "isthing": 1, "id": 27, "name": "backpack"},
{"color": [0, 125, 92], "isthing": 1, "id": 28, "name": "umbrella"},
{"color": [209, 0, 151], "isthing": 1, "id": 31, "name": "handbag"},
{"color": [188, 208, 182], "isthing": 1, "id": 32, "name": "tie"},
{"color": [0, 220, 176], "isthing": 1, "id": 33, "name": "suitcase"},
{"color": [255, 99, 164], "isthing": 1, "id": 34, "name": "frisbee"},
{"color": [92, 0, 73], "isthing": 1, "id": 35, "name": "skis"},
{"color": [133, 129, 255], "isthing": 1, "id": 36, "name": "snowboard"},
{"color": [78, 180, 255], "isthing": 1, "id": 37, "name": "sports ball"},
{"color": [0, 228, 0], "isthing": 1, "id": 38, "name": "kite"},
{"color": [174, 255, 243], "isthing": 1, "id": 39, "name": "baseball bat"},
{"color": [45, 89, 255], "isthing": 1, "id": 40, "name": "baseball glove"},
{"color": [134, 134, 103], "isthing": 1, "id": 41, "name": "skateboard"},
{"color": [145, 148, 174], "isthing": 1, "id": 42, "name": "surfboard"},
{"color": [255, 208, 186], "isthing": 1, "id": 43, "name": "tennis racket"},
{"color": [197, 226, 255], "isthing": 1, "id": 44, "name": "bottle"},
{"color": [171, 134, 1], "isthing": 1, "id": 46, "name": "wine glass"},
{"color": [109, 63, 54], "isthing": 1, "id": 47, "name": "cup"},
{"color": [207, 138, 255], "isthing": 1, "id": 48, "name": "fork"},
{"color": [151, 0, 95], "isthing": 1, "id": 49, "name": "knife"},
{"color": [9, 80, 61], "isthing": 1, "id": 50, "name": "spoon"},
{"color": [84, 105, 51], "isthing": 1, "id": 51, "name": "bowl"},
{"color": [74, 65, 105], "isthing": 1, "id": 52, "name": "banana"},
{"color": [166, 196, 102], "isthing": 1, "id": 53, "name": "apple"},
{"color": [208, 195, 210], "isthing": 1, "id": 54, "name": "sandwich"},
{"color": [255, 109, 65], "isthing": 1, "id": 55, "name": "orange"},
{"color": [0, 143, 149], "isthing": 1, "id": 56, "name": "broccoli"},
{"color": [179, 0, 194], "isthing": 1, "id": 57, "name": "carrot"},
{"color": [209, 99, 106], "isthing": 1, "id": 58, "name": "hot dog"},
{"color": [5, 121, 0], "isthing": 1, "id": 59, "name": "pizza"},
{"color": [227, 255, 205], "isthing": 1, "id": 60, "name": "donut"},
{"color": [147, 186, 208], "isthing": 1, "id": 61, "name": "cake"},
{"color": [153, 69, 1], "isthing": 1, "id": 62, "name": "chair"},
{"color": [3, 95, 161], "isthing": 1, "id": 63, "name": "couch"},
{"color": [163, 255, 0], "isthing": 1, "id": 64, "name": "potted plant"},
{"color": [119, 0, 170], "isthing": 1, "id": 65, "name": "bed"},
{"color": [0, 182, 199], "isthing": 1, "id": 67, "name": "dining table"},
{"color": [0, 165, 120], "isthing": 1, "id": 70, "name": "toilet"},
{"color": [183, 130, 88], "isthing": 1, "id": 72, "name": "tv"},
{"color": [95, 32, 0], "isthing": 1, "id": 73, "name": "laptop"},
{"color": [130, 114, 135], "isthing": 1, "id": 74, "name": "mouse"},
{"color": [110, 129, 133], "isthing": 1, "id": 75, "name": "remote"},
{"color": [166, 74, 118], "isthing": 1, "id": 76, "name": "keyboard"},
{"color": [219, 142, 185], "isthing": 1, "id": 77, "name": "cell phone"},
{"color": [79, 210, 114], "isthing": 1, "id": 78, "name": "microwave"},
{"color": [178, 90, 62], "isthing": 1, "id": 79, "name": "oven"},
{"color": [65, 70, 15], "isthing": 1, "id": 80, "name": "toaster"},
{"color": [127, 167, 115], "isthing": 1, "id": 81, "name": "sink"},
{"color": [59, 105, 106], "isthing": 1, "id": 82, "name": "refrigerator"},
{"color": [142, 108, 45], "isthing": 1, "id": 84, "name": "book"},
{"color": [196, 172, 0], "isthing": 1, "id": 85, "name": "clock"},
{"color": [95, 54, 80], "isthing": 1, "id": 86, "name": "vase"},
{"color": [128, 76, 255], "isthing": 1, "id": 87, "name": "scissors"},
{"color": [201, 57, 1], "isthing": 1, "id": 88, "name": "teddy bear"},
{"color": [246, 0, 122], "isthing": 1, "id": 89, "name": "hair drier"},
{"color": [191, 162, 208], "isthing": 1, "id": 90, "name": "toothbrush"},
{"id": 92, "name": "banner", "supercategory": "textile"},
{"id": 93, "name": "blanket", "supercategory": "textile"},
{"id": 94, "name": "branch", "supercategory": "plant"},
{"id": 95, "name": "bridge", "supercategory": "building"},
{"id": 96, "name": "building-other", "supercategory": "building"},
{"id": 97, "name": "bush", "supercategory": "plant"},
{"id": 98, "name": "cabinet", "supercategory": "furniture-stuff"},
{"id": 99, "name": "cage", "supercategory": "structural"},
{"id": 100, "name": "cardboard", "supercategory": "raw-material"},
{"id": 101, "name": "carpet", "supercategory": "floor"},
{"id": 102, "name": "ceiling-other", "supercategory": "ceiling"},
{"id": 103, "name": "ceiling-tile", "supercategory": "ceiling"},
{"id": 104, "name": "cloth", "supercategory": "textile"},
{"id": 105, "name": "clothes", "supercategory": "textile"},
{"id": 106, "name": "clouds", "supercategory": "sky"},
{"id": 107, "name": "counter", "supercategory": "furniture-stuff"},
{"id": 108, "name": "cupboard", "supercategory": "furniture-stuff"},
{"id": 109, "name": "curtain", "supercategory": "textile"},
{"id": 110, "name": "desk-stuff", "supercategory": "furniture-stuff"},
{"id": 111, "name": "dirt", "supercategory": "ground"},
{"id": 112, "name": "door-stuff", "supercategory": "furniture-stuff"},
{"id": 113, "name": "fence", "supercategory": "structural"},
{"id": 114, "name": "floor-marble", "supercategory": "floor"},
{"id": 115, "name": "floor-other", "supercategory": "floor"},
{"id": 116, "name": "floor-stone", "supercategory": "floor"},
{"id": 117, "name": "floor-tile", "supercategory": "floor"},
{"id": 118, "name": "floor-wood", "supercategory": "floor"},
{"id": 119, "name": "flower", "supercategory": "plant"},
{"id": 120, "name": "fog", "supercategory": "water"},
{"id": 121, "name": "food-other", "supercategory": "food-stuff"},
{"id": 122, "name": "fruit", "supercategory": "food-stuff"},
{"id": 123, "name": "furniture-other", "supercategory": "furniture-stuff"},
{"id": 124, "name": "grass", "supercategory": "plant"},
{"id": 125, "name": "gravel", "supercategory": "ground"},
{"id": 126, "name": "ground-other", "supercategory": "ground"},
{"id": 127, "name": "hill", "supercategory": "solid"},
{"id": 128, "name": "house", "supercategory": "building"},
{"id": 129, "name": "leaves", "supercategory": "plant"},
{"id": 130, "name": "light", "supercategory": "furniture-stuff"},
{"id": 131, "name": "mat", "supercategory": "textile"},
{"id": 132, "name": "metal", "supercategory": "raw-material"},
{"id": 133, "name": "mirror-stuff", "supercategory": "furniture-stuff"},
{"id": 134, "name": "moss", "supercategory": "plant"},
{"id": 135, "name": "mountain", "supercategory": "solid"},
{"id": 136, "name": "mud", "supercategory": "ground"},
{"id": 137, "name": "napkin", "supercategory": "textile"},
{"id": 138, "name": "net", "supercategory": "structural"},
{"id": 139, "name": "paper", "supercategory": "raw-material"},
{"id": 140, "name": "pavement", "supercategory": "ground"},
{"id": 141, "name": "pillow", "supercategory": "textile"},
{"id": 142, "name": "plant-other", "supercategory": "plant"},
{"id": 143, "name": "plastic", "supercategory": "raw-material"},
{"id": 144, "name": "platform", "supercategory": "ground"},
{"id": 145, "name": "playingfield", "supercategory": "ground"},
{"id": 146, "name": "railing", "supercategory": "structural"},
{"id": 147, "name": "railroad", "supercategory": "ground"},
{"id": 148, "name": "river", "supercategory": "water"},
{"id": 149, "name": "road", "supercategory": "ground"},
{"id": 150, "name": "rock", "supercategory": "solid"},
{"id": 151, "name": "roof", "supercategory": "building"},
{"id": 152, "name": "rug", "supercategory": "textile"},
{"id": 153, "name": "salad", "supercategory": "food-stuff"},
{"id": 154, "name": "sand", "supercategory": "ground"},
{"id": 155, "name": "sea", "supercategory": "water"},
{"id": 156, "name": "shelf", "supercategory": "furniture-stuff"},
{"id": 157, "name": "sky-other", "supercategory": "sky"},
{"id": 158, "name": "skyscraper", "supercategory": "building"},
{"id": 159, "name": "snow", "supercategory": "ground"},
{"id": 160, "name": "solid-other", "supercategory": "solid"},
{"id": 161, "name": "stairs", "supercategory": "furniture-stuff"},
{"id": 162, "name": "stone", "supercategory": "solid"},
{"id": 163, "name": "straw", "supercategory": "plant"},
{"id": 164, "name": "structural-other", "supercategory": "structural"},
{"id": 165, "name": "table", "supercategory": "furniture-stuff"},
{"id": 166, "name": "tent", "supercategory": "building"},
{"id": 167, "name": "textile-other", "supercategory": "textile"},
{"id": 168, "name": "towel", "supercategory": "textile"},
{"id": 169, "name": "tree", "supercategory": "plant"},
{"id": 170, "name": "vegetable", "supercategory": "food-stuff"},
{"id": 171, "name": "wall-brick", "supercategory": "wall"},
{"id": 172, "name": "wall-concrete", "supercategory": "wall"},
{"id": 173, "name": "wall-other", "supercategory": "wall"},
{"id": 174, "name": "wall-panel", "supercategory": "wall"},
{"id": 175, "name": "wall-stone", "supercategory": "wall"},
{"id": 176, "name": "wall-tile", "supercategory": "wall"},
{"id": 177, "name": "wall-wood", "supercategory": "wall"},
{"id": 178, "name": "water-other", "supercategory": "water"},
{"id": 179, "name": "waterdrops", "supercategory": "water"},
{"id": 180, "name": "window-blind", "supercategory": "window"},
{"id": 181, "name": "window-other", "supercategory": "window"},
{"id": 182, "name": "wood", "supercategory": "solid"},
]
def _get_coco_stuff_meta():
# Id 0 is reserved for ignore_label, we change ignore_label for 0
# to 255 in our pre-processing.
stuff_ids = [k["id"] for k in COCO_CATEGORIES]
assert len(stuff_ids) == 171, len(stuff_ids)
# For semantic segmentation, this mapping maps from contiguous stuff id
# (in [0, 91], used in models) to ids in the dataset (used for processing results)
stuff_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(stuff_ids)}
stuff_classes = [k["name"] for k in COCO_CATEGORIES]
ret = {
"stuff_dataset_id_to_contiguous_id": stuff_dataset_id_to_contiguous_id,
"stuff_classes": stuff_classes,
}
return ret
def register_all_coco_stuff_10k(root):
root = os.path.join(root, "coco", "coco_stuff_10k")
meta = _get_coco_stuff_meta()
for name, image_dirname, sem_seg_dirname in [
("train", "images_detectron2/train", "annotations_detectron2/train"),
("test", "images_detectron2/test", "annotations_detectron2/test"),
]:
image_dir = os.path.join(root, image_dirname)
gt_dir = os.path.join(root, sem_seg_dirname)
name = f"coco_2017_{name}_stuff_10k_sem_seg"
DatasetCatalog.register(
name, lambda x=image_dir, y=gt_dir: load_sem_seg(y, x, gt_ext="png", image_ext="jpg")
)
MetadataCatalog.get(name).set(
image_root=image_dir,
sem_seg_root=gt_dir,
evaluator_type="sem_seg",
ignore_label=255,
**meta,
)
_root = os.getenv("DETECTRON2_DATASETS", "datasets")
register_all_coco_stuff_10k(_root)
| CutLER-main | videocutler/mask2former/data/datasets/register_coco_stuff_10k.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import os
from pathlib import Path
import numpy as np
import tqdm
from PIL import Image
def convert(input, output):
img = np.asarray(Image.open(input))
assert img.dtype == np.uint8
img = img - 1 # 0 (ignore) becomes 255. others are shifted by 1
Image.fromarray(img).save(output)
if __name__ == "__main__":
dataset_dir = Path(os.getenv("DETECTRON2_DATASETS", "datasets")) / "ADEChallengeData2016"
for name in ["training", "validation"]:
annotation_dir = dataset_dir / "annotations" / name
output_dir = dataset_dir / "annotations_detectron2" / name
output_dir.mkdir(parents=True, exist_ok=True)
for file in tqdm.tqdm(list(annotation_dir.iterdir())):
output_file = output_dir / file.name
convert(file, output_file)
| CutLER-main | videocutler/datasets/prepare_ade20k_sem_seg.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import glob
import json
import os
from collections import Counter
import numpy as np
import tqdm
from panopticapi.utils import IdGenerator, save_json
from PIL import Image
import pycocotools.mask as mask_util
if __name__ == "__main__":
dataset_dir = os.getenv("DETECTRON2_DATASETS", "datasets")
for name, dirname in [("train", "training"), ("val", "validation")]:
image_dir = os.path.join(dataset_dir, f"ADEChallengeData2016/images/{dirname}/")
instance_dir = os.path.join(
dataset_dir, f"ADEChallengeData2016/annotations_instance/{dirname}/"
)
# img_id = 0
ann_id = 1
# json
out_file = os.path.join(dataset_dir, f"ADEChallengeData2016/ade20k_instance_{name}.json")
# json config
instance_config_file = "datasets/ade20k_instance_imgCatIds.json"
with open(instance_config_file) as f:
category_dict = json.load(f)["categories"]
# load catid mapping
# it is important to share category id for both instance and panoptic annotations
mapping_file = "datasets/ade20k_instance_catid_mapping.txt"
with open(mapping_file) as f:
map_id = {}
for i, line in enumerate(f.readlines()):
if i == 0:
continue
ins_id, sem_id, _ = line.strip().split()
# shift id by 1 because we want it to start from 0!
# ignore_label becomes 255
map_id[int(ins_id)] = int(sem_id) - 1
for cat in category_dict:
cat["id"] = map_id[cat["id"]]
filenames = sorted(glob.glob(os.path.join(image_dir, "*.jpg")))
ann_dict = {}
images = []
annotations = []
for idx, filename in enumerate(tqdm.tqdm(filenames)):
image = {}
image_id = os.path.basename(filename).split(".")[0]
image["id"] = image_id
image["file_name"] = os.path.basename(filename)
original_format = np.array(Image.open(filename))
image["width"] = original_format.shape[1]
image["height"] = original_format.shape[0]
images.append(image)
filename_instance = os.path.join(instance_dir, image_id + ".png")
ins_seg = np.asarray(Image.open(filename_instance))
assert ins_seg.dtype == np.uint8
instance_cat_ids = ins_seg[..., 0]
# instance id starts from 1!
# because 0 is reserved as VOID label
instance_ins_ids = ins_seg[..., 1]
# process things
for thing_id in np.unique(instance_ins_ids):
if thing_id == 0:
continue
mask = instance_ins_ids == thing_id
instance_cat_id = np.unique(instance_cat_ids[mask])
assert len(instance_cat_id) == 1
anno = {}
anno['id'] = ann_id
ann_id += 1
anno['image_id'] = image['id']
anno["iscrowd"] = int(0)
anno["category_id"] = int(map_id[instance_cat_id[0]])
inds = np.nonzero(mask)
ymin, ymax = inds[0].min(), inds[0].max()
xmin, xmax = inds[1].min(), inds[1].max()
anno["bbox"] = [int(xmin), int(ymin), int(xmax - xmin + 1), int(ymax - ymin + 1)]
# if xmax <= xmin or ymax <= ymin:
# continue
rle = mask_util.encode(np.array(mask[:, :, None], order="F", dtype="uint8"))[0]
rle["counts"] = rle["counts"].decode("utf-8")
anno["segmentation"] = rle
anno["area"] = int(mask_util.area(rle))
annotations.append(anno)
# save this
ann_dict['images'] = images
ann_dict['categories'] = category_dict
ann_dict['annotations'] = annotations
save_json(ann_dict, out_file)
| CutLER-main | videocutler/datasets/prepare_ade20k_ins_seg.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import glob
import json
import os
from collections import Counter
import numpy as np
import tqdm
from panopticapi.utils import IdGenerator, save_json
from PIL import Image
ADE20K_SEM_SEG_CATEGORIES = [
"wall",
"building",
"sky",
"floor",
"tree",
"ceiling",
"road, route",
"bed",
"window ",
"grass",
"cabinet",
"sidewalk, pavement",
"person",
"earth, ground",
"door",
"table",
"mountain, mount",
"plant",
"curtain",
"chair",
"car",
"water",
"painting, picture",
"sofa",
"shelf",
"house",
"sea",
"mirror",
"rug",
"field",
"armchair",
"seat",
"fence",
"desk",
"rock, stone",
"wardrobe, closet, press",
"lamp",
"tub",
"rail",
"cushion",
"base, pedestal, stand",
"box",
"column, pillar",
"signboard, sign",
"chest of drawers, chest, bureau, dresser",
"counter",
"sand",
"sink",
"skyscraper",
"fireplace",
"refrigerator, icebox",
"grandstand, covered stand",
"path",
"stairs",
"runway",
"case, display case, showcase, vitrine",
"pool table, billiard table, snooker table",
"pillow",
"screen door, screen",
"stairway, staircase",
"river",
"bridge, span",
"bookcase",
"blind, screen",
"coffee table",
"toilet, can, commode, crapper, pot, potty, stool, throne",
"flower",
"book",
"hill",
"bench",
"countertop",
"stove",
"palm, palm tree",
"kitchen island",
"computer",
"swivel chair",
"boat",
"bar",
"arcade machine",
"hovel, hut, hutch, shack, shanty",
"bus",
"towel",
"light",
"truck",
"tower",
"chandelier",
"awning, sunshade, sunblind",
"street lamp",
"booth",
"tv",
"plane",
"dirt track",
"clothes",
"pole",
"land, ground, soil",
"bannister, banister, balustrade, balusters, handrail",
"escalator, moving staircase, moving stairway",
"ottoman, pouf, pouffe, puff, hassock",
"bottle",
"buffet, counter, sideboard",
"poster, posting, placard, notice, bill, card",
"stage",
"van",
"ship",
"fountain",
"conveyer belt, conveyor belt, conveyer, conveyor, transporter",
"canopy",
"washer, automatic washer, washing machine",
"plaything, toy",
"pool",
"stool",
"barrel, cask",
"basket, handbasket",
"falls",
"tent",
"bag",
"minibike, motorbike",
"cradle",
"oven",
"ball",
"food, solid food",
"step, stair",
"tank, storage tank",
"trade name",
"microwave",
"pot",
"animal",
"bicycle",
"lake",
"dishwasher",
"screen",
"blanket, cover",
"sculpture",
"hood, exhaust hood",
"sconce",
"vase",
"traffic light",
"tray",
"trash can",
"fan",
"pier",
"crt screen",
"plate",
"monitor",
"bulletin board",
"shower",
"radiator",
"glass, drinking glass",
"clock",
"flag", # noqa
]
PALETTE = [
[120, 120, 120],
[180, 120, 120],
[6, 230, 230],
[80, 50, 50],
[4, 200, 3],
[120, 120, 80],
[140, 140, 140],
[204, 5, 255],
[230, 230, 230],
[4, 250, 7],
[224, 5, 255],
[235, 255, 7],
[150, 5, 61],
[120, 120, 70],
[8, 255, 51],
[255, 6, 82],
[143, 255, 140],
[204, 255, 4],
[255, 51, 7],
[204, 70, 3],
[0, 102, 200],
[61, 230, 250],
[255, 6, 51],
[11, 102, 255],
[255, 7, 71],
[255, 9, 224],
[9, 7, 230],
[220, 220, 220],
[255, 9, 92],
[112, 9, 255],
[8, 255, 214],
[7, 255, 224],
[255, 184, 6],
[10, 255, 71],
[255, 41, 10],
[7, 255, 255],
[224, 255, 8],
[102, 8, 255],
[255, 61, 6],
[255, 194, 7],
[255, 122, 8],
[0, 255, 20],
[255, 8, 41],
[255, 5, 153],
[6, 51, 255],
[235, 12, 255],
[160, 150, 20],
[0, 163, 255],
[140, 140, 200],
[250, 10, 15],
[20, 255, 0],
[31, 255, 0],
[255, 31, 0],
[255, 224, 0],
[153, 255, 0],
[0, 0, 255],
[255, 71, 0],
[0, 235, 255],
[0, 173, 255],
[31, 0, 255],
[11, 200, 200],
[255, 82, 0],
[0, 255, 245],
[0, 61, 255],
[0, 255, 112],
[0, 255, 133],
[255, 0, 0],
[255, 163, 0],
[255, 102, 0],
[194, 255, 0],
[0, 143, 255],
[51, 255, 0],
[0, 82, 255],
[0, 255, 41],
[0, 255, 173],
[10, 0, 255],
[173, 255, 0],
[0, 255, 153],
[255, 92, 0],
[255, 0, 255],
[255, 0, 245],
[255, 0, 102],
[255, 173, 0],
[255, 0, 20],
[255, 184, 184],
[0, 31, 255],
[0, 255, 61],
[0, 71, 255],
[255, 0, 204],
[0, 255, 194],
[0, 255, 82],
[0, 10, 255],
[0, 112, 255],
[51, 0, 255],
[0, 194, 255],
[0, 122, 255],
[0, 255, 163],
[255, 153, 0],
[0, 255, 10],
[255, 112, 0],
[143, 255, 0],
[82, 0, 255],
[163, 255, 0],
[255, 235, 0],
[8, 184, 170],
[133, 0, 255],
[0, 255, 92],
[184, 0, 255],
[255, 0, 31],
[0, 184, 255],
[0, 214, 255],
[255, 0, 112],
[92, 255, 0],
[0, 224, 255],
[112, 224, 255],
[70, 184, 160],
[163, 0, 255],
[153, 0, 255],
[71, 255, 0],
[255, 0, 163],
[255, 204, 0],
[255, 0, 143],
[0, 255, 235],
[133, 255, 0],
[255, 0, 235],
[245, 0, 255],
[255, 0, 122],
[255, 245, 0],
[10, 190, 212],
[214, 255, 0],
[0, 204, 255],
[20, 0, 255],
[255, 255, 0],
[0, 153, 255],
[0, 41, 255],
[0, 255, 204],
[41, 0, 255],
[41, 255, 0],
[173, 0, 255],
[0, 245, 255],
[71, 0, 255],
[122, 0, 255],
[0, 255, 184],
[0, 92, 255],
[184, 255, 0],
[0, 133, 255],
[255, 214, 0],
[25, 194, 194],
[102, 255, 0],
[92, 0, 255],
]
if __name__ == "__main__":
dataset_dir = os.getenv("DETECTRON2_DATASETS", "datasets")
for name, dirname in [("train", "training"), ("val", "validation")]:
image_dir = os.path.join(dataset_dir, f"ADEChallengeData2016/images/{dirname}/")
semantic_dir = os.path.join(dataset_dir, f"ADEChallengeData2016/annotations/{dirname}/")
instance_dir = os.path.join(
dataset_dir, f"ADEChallengeData2016/annotations_instance/{dirname}/"
)
# folder to store panoptic PNGs
out_folder = os.path.join(dataset_dir, f"ADEChallengeData2016/ade20k_panoptic_{name}/")
# json with segmentations information
out_file = os.path.join(dataset_dir, f"ADEChallengeData2016/ade20k_panoptic_{name}.json")
if not os.path.isdir(out_folder):
print("Creating folder {} for panoptic segmentation PNGs".format(out_folder))
os.mkdir(out_folder)
# json config
config_file = "datasets/ade20k_instance_imgCatIds.json"
with open(config_file) as f:
config = json.load(f)
# load catid mapping
mapping_file = "datasets/ade20k_instance_catid_mapping.txt"
with open(mapping_file) as f:
map_id = {}
for i, line in enumerate(f.readlines()):
if i == 0:
continue
ins_id, sem_id, _ = line.strip().split()
# shift id by 1 because we want it to start from 0!
# ignore_label becomes 255
map_id[int(ins_id) - 1] = int(sem_id) - 1
ADE20K_150_CATEGORIES = []
for cat_id, cat_name in enumerate(ADE20K_SEM_SEG_CATEGORIES):
ADE20K_150_CATEGORIES.append(
{
"name": cat_name,
"id": cat_id,
"isthing": int(cat_id in map_id.values()),
"color": PALETTE[cat_id],
}
)
categories_dict = {cat["id"]: cat for cat in ADE20K_150_CATEGORIES}
panoptic_json_categories = ADE20K_150_CATEGORIES[:]
panoptic_json_images = []
panoptic_json_annotations = []
filenames = sorted(glob.glob(os.path.join(image_dir, "*.jpg")))
for idx, filename in enumerate(tqdm.tqdm(filenames)):
panoptic_json_image = {}
panoptic_json_annotation = {}
image_id = os.path.basename(filename).split(".")[0]
panoptic_json_image["id"] = image_id
panoptic_json_image["file_name"] = os.path.basename(filename)
original_format = np.array(Image.open(filename))
panoptic_json_image["width"] = original_format.shape[1]
panoptic_json_image["height"] = original_format.shape[0]
pan_seg = np.zeros(
(original_format.shape[0], original_format.shape[1], 3), dtype=np.uint8
)
id_generator = IdGenerator(categories_dict)
filename_semantic = os.path.join(semantic_dir, image_id + ".png")
filename_instance = os.path.join(instance_dir, image_id + ".png")
sem_seg = np.asarray(Image.open(filename_semantic))
ins_seg = np.asarray(Image.open(filename_instance))
assert sem_seg.dtype == np.uint8
assert ins_seg.dtype == np.uint8
semantic_cat_ids = sem_seg - 1
instance_cat_ids = ins_seg[..., 0] - 1
# instance id starts from 1!
# because 0 is reserved as VOID label
instance_ins_ids = ins_seg[..., 1]
segm_info = []
# NOTE: there is some overlap between semantic and instance annotation
# thus we paste stuffs first
# process stuffs
for semantic_cat_id in np.unique(semantic_cat_ids):
if semantic_cat_id == 255:
continue
if categories_dict[semantic_cat_id]["isthing"]:
continue
mask = semantic_cat_ids == semantic_cat_id
# should not have any overlap
assert pan_seg[mask].sum() == 0
segment_id, color = id_generator.get_id_and_color(semantic_cat_id)
pan_seg[mask] = color
area = np.sum(mask) # segment area computation
# bbox computation for a segment
hor = np.sum(mask, axis=0)
hor_idx = np.nonzero(hor)[0]
x = hor_idx[0]
width = hor_idx[-1] - x + 1
vert = np.sum(mask, axis=1)
vert_idx = np.nonzero(vert)[0]
y = vert_idx[0]
height = vert_idx[-1] - y + 1
bbox = [int(x), int(y), int(width), int(height)]
segm_info.append(
{
"id": int(segment_id),
"category_id": int(semantic_cat_id),
"area": int(area),
"bbox": bbox,
"iscrowd": 0,
}
)
# process things
for thing_id in np.unique(instance_ins_ids):
if thing_id == 0:
continue
mask = instance_ins_ids == thing_id
instance_cat_id = np.unique(instance_cat_ids[mask])
assert len(instance_cat_id) == 1
semantic_cat_id = map_id[instance_cat_id[0]]
segment_id, color = id_generator.get_id_and_color(semantic_cat_id)
pan_seg[mask] = color
area = np.sum(mask) # segment area computation
# bbox computation for a segment
hor = np.sum(mask, axis=0)
hor_idx = np.nonzero(hor)[0]
x = hor_idx[0]
width = hor_idx[-1] - x + 1
vert = np.sum(mask, axis=1)
vert_idx = np.nonzero(vert)[0]
y = vert_idx[0]
height = vert_idx[-1] - y + 1
bbox = [int(x), int(y), int(width), int(height)]
segm_info.append(
{
"id": int(segment_id),
"category_id": int(semantic_cat_id),
"area": int(area),
"bbox": bbox,
"iscrowd": 0,
}
)
panoptic_json_annotation = {
"image_id": image_id,
"file_name": image_id + ".png",
"segments_info": segm_info,
}
Image.fromarray(pan_seg).save(os.path.join(out_folder, image_id + ".png"))
panoptic_json_images.append(panoptic_json_image)
panoptic_json_annotations.append(panoptic_json_annotation)
# save this
d = {
"images": panoptic_json_images,
"annotations": panoptic_json_annotations,
"categories": panoptic_json_categories,
}
save_json(d, out_file)
| CutLER-main | videocutler/datasets/prepare_ade20k_pan_seg.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import functools
import json
import multiprocessing as mp
import numpy as np
import os
import time
from fvcore.common.download import download
from panopticapi.utils import rgb2id
from PIL import Image
from detectron2.data.datasets.builtin_meta import COCO_CATEGORIES
def _process_panoptic_to_semantic(input_panoptic, output_semantic, segments, id_map):
panoptic = np.asarray(Image.open(input_panoptic), dtype=np.uint32)
panoptic = rgb2id(panoptic)
output = np.zeros_like(panoptic, dtype=np.uint8) + 255
for seg in segments:
cat_id = seg["category_id"]
new_cat_id = id_map[cat_id]
output[panoptic == seg["id"]] = new_cat_id
Image.fromarray(output).save(output_semantic)
def separate_coco_semantic_from_panoptic(panoptic_json, panoptic_root, sem_seg_root, categories):
"""
Create semantic segmentation annotations from panoptic segmentation
annotations, to be used by PanopticFPN.
It maps all thing categories to class 0, and maps all unlabeled pixels to class 255.
It maps all stuff categories to contiguous ids starting from 1.
Args:
panoptic_json (str): path to the panoptic json file, in COCO's format.
panoptic_root (str): a directory with panoptic annotation files, in COCO's format.
sem_seg_root (str): a directory to output semantic annotation files
categories (list[dict]): category metadata. Each dict needs to have:
"id": corresponds to the "category_id" in the json annotations
"isthing": 0 or 1
"""
os.makedirs(sem_seg_root, exist_ok=True)
id_map = {} # map from category id to id in the output semantic annotation
assert len(categories) <= 254
for i, k in enumerate(categories):
id_map[k["id"]] = i
# what is id = 0?
# id_map[0] = 255
print(id_map)
with open(panoptic_json) as f:
obj = json.load(f)
pool = mp.Pool(processes=max(mp.cpu_count() // 2, 4))
def iter_annotations():
for anno in obj["annotations"]:
file_name = anno["file_name"]
segments = anno["segments_info"]
input = os.path.join(panoptic_root, file_name)
output = os.path.join(sem_seg_root, file_name)
yield input, output, segments
print("Start writing to {} ...".format(sem_seg_root))
start = time.time()
pool.starmap(
functools.partial(_process_panoptic_to_semantic, id_map=id_map),
iter_annotations(),
chunksize=100,
)
print("Finished. time: {:.2f}s".format(time.time() - start))
if __name__ == "__main__":
dataset_dir = os.path.join(os.getenv("DETECTRON2_DATASETS", "datasets"), "coco")
for s in ["val2017", "train2017"]:
separate_coco_semantic_from_panoptic(
os.path.join(dataset_dir, "annotations/panoptic_{}.json".format(s)),
os.path.join(dataset_dir, "panoptic_{}".format(s)),
os.path.join(dataset_dir, "panoptic_semseg_{}".format(s)),
COCO_CATEGORIES,
)
| CutLER-main | videocutler/datasets/prepare_coco_semantic_annos_from_panoptic_annos.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Modified by XuDong Wang from: https://github.com/sukjunhwang/IFC/blob/master/projects/IFC/demo/predictor.py
import atexit
import bisect
import multiprocessing as mp
from collections import deque
import cv2
import torch
from visualizer import TrackVisualizer
from detectron2.data import MetadataCatalog
from detectron2.engine.defaults import DefaultPredictor
from detectron2.structures import Instances
from detectron2.utils.video_visualizer import VideoVisualizer
from detectron2.utils.visualizer import ColorMode
class VisualizationDemo(object):
def __init__(self, cfg, instance_mode=ColorMode.IMAGE, parallel=False):
"""
Args:
cfg (CfgNode):
instance_mode (ColorMode):
parallel (bool): whether to run the model in different processes from visualization.
Useful since the visualization logic can be slow.
"""
self.metadata = MetadataCatalog.get(
cfg.DATASETS.TRAIN[0] if len(cfg.DATASETS.TRAIN) else "__unused"
)
self.cpu_device = torch.device("cpu")
self.instance_mode = instance_mode
self.parallel = parallel
if parallel:
num_gpu = torch.cuda.device_count()
self.predictor = AsyncPredictor(cfg, num_gpus=num_gpu)
else:
self.predictor = VideoPredictor(cfg)
def run_on_video(self, frames, confidence_threshold=0, inst_id=0):
"""
Args:
frames (List[np.ndarray]): a list of images of shape (H, W, C) (in BGR order).
This is the format used by OpenCV.
Returns:
predictions (dict): the output of the model.
vis_output (VisImage): the visualized image output.
"""
vis_output = None
predictions = self.predictor(frames)
image_size = predictions["image_size"]
pred_scores = predictions["pred_scores"]
pred_labels = predictions["pred_labels"]
pred_masks = predictions["pred_masks"]
selected_idx = [idx for idx, score in enumerate(pred_scores) if score >= confidence_threshold]
pred_scores = [pred_scores[idx] for idx in selected_idx]
pred_labels = [pred_labels[idx] for idx in selected_idx]
pred_masks = [pred_masks[idx] for idx in selected_idx]
frame_masks = list(zip(*pred_masks))
total_vis_output = []
for frame_idx in range(len(frames)):
frame = frames[frame_idx][:, :, ::-1]
visualizer = TrackVisualizer(frame, self.metadata, instance_mode=self.instance_mode, inst_id=inst_id)
ins = Instances(image_size)
if len(pred_scores) > 0:
ins.scores = pred_scores
ins.pred_classes = pred_labels
ins.pred_masks = torch.stack(frame_masks[frame_idx], dim=0)
vis_output = visualizer.draw_instance_predictions(predictions=ins)
total_vis_output.append(vis_output)
return predictions, total_vis_output
class VideoPredictor(DefaultPredictor):
"""
Create a simple end-to-end predictor with the given config that runs on
single device for a single input image.
Compared to using the model directly, this class does the following additions:
1. Load checkpoint from `cfg.MODEL.WEIGHTS`.
2. Always take BGR image as the input and apply conversion defined by `cfg.INPUT.FORMAT`.
3. Apply resizing defined by `cfg.INPUT.{MIN,MAX}_SIZE_TEST`.
4. Take one input image and produce a single output, instead of a batch.
If you'd like to do anything more fancy, please refer to its source code
as examples to build and use the model manually.
Attributes:
metadata (Metadata): the metadata of the underlying dataset, obtained from
cfg.DATASETS.TEST.
Examples:
::
pred = DefaultPredictor(cfg)
inputs = cv2.imread("input.jpg")
outputs = pred(inputs)
"""
def __call__(self, frames):
"""
Args:
original_image (np.ndarray): an image of shape (H, W, C) (in BGR order).
Returns:
predictions (dict):
the output of the model for one image only.
See :doc:`/tutorials/models` for details about the format.
"""
with torch.no_grad(): # https://github.com/sphinx-doc/sphinx/issues/4258
input_frames = []
for original_image in frames:
# Apply pre-processing to image.
if self.input_format == "RGB":
# whether the model expects BGR inputs or RGB
original_image = original_image[:, :, ::-1]
height, width = original_image.shape[:2]
image = self.aug.get_transform(original_image).apply_image(original_image)
image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
input_frames.append(image)
inputs = {"image": input_frames, "height": height, "width": width}
predictions = self.model([inputs])
return predictions
class AsyncPredictor:
"""
A predictor that runs the model asynchronously, possibly on >1 GPUs.
Because rendering the visualization takes considerably amount of time,
this helps improve throughput when rendering videos.
"""
class _StopToken:
pass
class _PredictWorker(mp.Process):
def __init__(self, cfg, task_queue, result_queue):
self.cfg = cfg
self.task_queue = task_queue
self.result_queue = result_queue
super().__init__()
def run(self):
predictor = VideoPredictor(self.cfg)
while True:
task = self.task_queue.get()
if isinstance(task, AsyncPredictor._StopToken):
break
idx, data = task
result = predictor(data)
self.result_queue.put((idx, result))
def __init__(self, cfg, num_gpus: int = 1):
"""
Args:
cfg (CfgNode):
num_gpus (int): if 0, will run on CPU
"""
num_workers = max(num_gpus, 1)
self.task_queue = mp.Queue(maxsize=num_workers * 3)
self.result_queue = mp.Queue(maxsize=num_workers * 3)
self.procs = []
for gpuid in range(max(num_gpus, 1)):
cfg = cfg.clone()
cfg.defrost()
cfg.MODEL.DEVICE = "cuda:{}".format(gpuid) if num_gpus > 0 else "cpu"
self.procs.append(
AsyncPredictor._PredictWorker(cfg, self.task_queue, self.result_queue)
)
self.put_idx = 0
self.get_idx = 0
self.result_rank = []
self.result_data = []
for p in self.procs:
p.start()
atexit.register(self.shutdown)
def put(self, image):
self.put_idx += 1
self.task_queue.put((self.put_idx, image))
def get(self):
self.get_idx += 1 # the index needed for this request
if len(self.result_rank) and self.result_rank[0] == self.get_idx:
res = self.result_data[0]
del self.result_data[0], self.result_rank[0]
return res
while True:
# make sure the results are returned in the correct order
idx, res = self.result_queue.get()
if idx == self.get_idx:
return res
insert = bisect.bisect(self.result_rank, idx)
self.result_rank.insert(insert, idx)
self.result_data.insert(insert, res)
def __len__(self):
return self.put_idx - self.get_idx
def __call__(self, image):
self.put(image)
return self.get()
def shutdown(self):
for _ in self.procs:
self.task_queue.put(AsyncPredictor._StopToken())
@property
def default_buffer_size(self):
return len(self.procs) * 5
| CutLER-main | videocutler/demo_video/predictor.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# copied from https://github.com/facebookresearch/detectron2/blob/main/detectron2/utils/colormap.py
"""
An awesome colormap for really neat visualizations.
Copied from Detectron, and removed gray colors.
"""
import numpy as np
import random
__all__ = ["colormap", "random_color", "random_colors"]
# fmt: off
# RGB:
_COLORS = np.array(
[
0.000, 0.447, 0.741,
0.850, 0.325, 0.098,
0.929, 0.694, 0.125,
0.494, 0.184, 0.556,
0.466, 0.674, 0.188,
0.301, 0.745, 0.933,
0.635, 0.078, 0.184,
0.300, 0.300, 0.300,
0.600, 0.600, 0.600,
1.000, 0.000, 0.000,
1.000, 0.500, 0.000,
0.749, 0.749, 0.000,
0.000, 1.000, 0.000,
0.000, 0.000, 1.000,
0.667, 0.000, 1.000,
0.333, 0.333, 0.000,
0.333, 0.667, 0.000,
0.333, 1.000, 0.000,
0.667, 0.333, 0.000,
0.667, 0.667, 0.000,
0.667, 1.000, 0.000,
1.000, 0.333, 0.000,
1.000, 0.667, 0.000,
1.000, 1.000, 0.000,
0.000, 0.333, 0.500,
0.000, 0.667, 0.500,
0.000, 1.000, 0.500,
0.333, 0.000, 0.500,
0.333, 0.333, 0.500,
0.333, 0.667, 0.500,
0.333, 1.000, 0.500,
0.667, 0.000, 0.500,
0.667, 0.333, 0.500,
0.667, 0.667, 0.500,
0.667, 1.000, 0.500,
1.000, 0.000, 0.500,
1.000, 0.333, 0.500,
1.000, 0.667, 0.500,
1.000, 1.000, 0.500,
0.000, 0.333, 1.000,
0.000, 0.667, 1.000,
0.000, 1.000, 1.000,
0.333, 0.000, 1.000,
0.333, 0.333, 1.000,
0.333, 0.667, 1.000,
0.333, 1.000, 1.000,
0.667, 0.000, 1.000,
0.667, 0.333, 1.000,
0.667, 0.667, 1.000,
0.667, 1.000, 1.000,
1.000, 0.000, 1.000,
1.000, 0.333, 1.000,
1.000, 0.667, 1.000,
0.333, 0.000, 0.000,
0.500, 0.000, 0.000,
0.667, 0.000, 0.000,
0.833, 0.000, 0.000,
1.000, 0.000, 0.000,
0.000, 0.167, 0.000,
0.000, 0.333, 0.000,
0.000, 0.500, 0.000,
0.000, 0.667, 0.000,
0.000, 0.833, 0.000,
0.000, 1.000, 0.000,
0.000, 0.000, 0.167,
0.000, 0.000, 0.333,
0.000, 0.000, 0.500,
0.000, 0.000, 0.667,
0.000, 0.000, 0.833,
0.000, 0.000, 1.000,
0.000, 0.000, 0.000,
0.143, 0.143, 0.143,
0.857, 0.857, 0.857,
1.000, 1.000, 1.000
]
).astype(np.float32).reshape(-1, 3)
# fmt: on
def colormap(rgb=False, maximum=255):
"""
Args:
rgb (bool): whether to return RGB colors or BGR colors.
maximum (int): either 255 or 1
Returns:
ndarray: a float32 array of Nx3 colors, in range [0, 255] or [0, 1]
"""
assert maximum in [255, 1], maximum
c = _COLORS * maximum
if not rgb:
c = c[:, ::-1]
return c
def random_color(rgb=False, maximum=255):
"""
Args:
rgb (bool): whether to return RGB colors or BGR colors.
maximum (int): either 255 or 1
Returns:
ndarray: a vector of 3 numbers
"""
idx = np.random.randint(0, len(_COLORS))
ret = _COLORS[idx] * maximum
if not rgb:
ret = ret[::-1]
return ret
def random_colors(N, rgb=False, maximum=255):
"""
Args:
N (int): number of unique colors needed
rgb (bool): whether to return RGB colors or BGR colors.
maximum (int): either 255 or 1
Returns:
ndarray: a list of random_color
"""
indices = random.sample(range(len(_COLORS)), N)
ret = [_COLORS[i] * maximum for i in indices]
if not rgb:
ret = [x[::-1] for x in ret]
return ret
def select_colors(rgb=False, maximum=255, indices=[0]):
"""
Args:
N (int): number of unique colors needed
rgb (bool): whether to return RGB colors or BGR colors.
maximum (int): either 255 or 1
Returns:
ndarray: a list of random_color
"""
# indices = random.sample(range(len(_COLORS)), N)
ret = [_COLORS[i] * maximum for i in indices]
if not rgb:
ret = [x[::-1] for x in ret]
return ret
if __name__ == "__main__":
import cv2
size = 100
H, W = 10, 10
canvas = np.random.rand(H * size, W * size, 3).astype("float32")
for h in range(H):
for w in range(W):
idx = h * W + w
if idx >= len(_COLORS):
break
canvas[h * size : (h + 1) * size, w * size : (w + 1) * size] = _COLORS[idx]
cv2.imshow("a", canvas)
cv2.waitKey(0) | CutLER-main | videocutler/demo_video/colormap.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Modified by XuDong Wang
import argparse
import glob
import multiprocessing as mp
import os
# fmt: off
import sys
sys.path.insert(1, os.path.join(sys.path[0], '..'))
# fmt: on
import tempfile
import time
import cv2
import numpy as np
from torch.cuda.amp import autocast
from detectron2.config import get_cfg
from detectron2.data.detection_utils import read_image
from detectron2.projects.deeplab import add_deeplab_config
from detectron2.utils.logger import setup_logger
from mask2former import add_maskformer2_config
from mask2former_video import add_maskformer2_video_config
from predictor import VisualizationDemo
from PIL import Image
# constants
WINDOW_NAME = "VideoCutLER video demo"
def setup_cfg(args):
# load config from file and command-line arguments
cfg = get_cfg()
add_deeplab_config(cfg)
add_maskformer2_config(cfg)
add_maskformer2_video_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
return cfg
def get_parser():
parser = argparse.ArgumentParser(description="maskformer2 demo for builtin configs")
parser.add_argument(
"--config-file",
default="configs/youtubevis_2019/video_maskformer2_R50_bs16_8ep.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument("--video-input", help="Path to video file.")
parser.add_argument(
"--input",
nargs="+",
help="A list of space separated input images; "
"or a single glob pattern such as 'directory/*.jpg'"
"this will be treated as frames of a video",
)
parser.add_argument(
"--output",
help="A file or directory to save output visualizations. "
"If not given, will show output in an OpenCV window.",
)
parser.add_argument(
"--save-frames",
default=False,
help="Save frame level image outputs.",
)
parser.add_argument(
"--save-masks",
default=False,
help="Save frame level image masks.",
)
parser.add_argument(
"--confidence-threshold",
type=float,
default=0.5,
help="Minimum score for instance predictions to be shown",
)
parser.add_argument(
"--opts",
help="Modify config options using the command-line 'KEY VALUE' pairs",
default=[],
nargs=argparse.REMAINDER,
)
return parser
def test_opencv_video_format(codec, file_ext):
with tempfile.TemporaryDirectory(prefix="video_format_test") as dir:
filename = os.path.join(dir, "test_file" + file_ext)
writer = cv2.VideoWriter(
filename=filename,
fourcc=cv2.VideoWriter_fourcc(*codec),
fps=float(30),
frameSize=(10, 10),
isColor=True,
)
[writer.write(np.zeros((10, 10, 3), np.uint8)) for _ in range(30)]
writer.release()
if os.path.isfile(filename):
return True
return False
PALETTE = [0, 0, 0, 128, 0, 0, 0, 128, 0, 128, 128, 0, 0, 0, 128, 128, 0, 128, 0, 128, 128, 128, 128, 128, 64, 0, 0, 191, 0, 0, 64, 128, 0, 191, 128, 0, 64, 0, 128]
def save_masks(masks, file_path):
# n_masks = len(masks)
# width, height = masks[0].shape
mask_image = np.zeros(masks[0].shape, dtype=np.uint8)
for i, mask in enumerate(masks):
mask_image[mask!=0] = i + 1
mask_image = Image.fromarray(mask_image, mode="P")
mask_image.putpalette(PALETTE)
mask_image.save(file_path)
if __name__ == "__main__":
mp.set_start_method("spawn", force=True)
args = get_parser().parse_args()
setup_logger(name="fvcore")
logger = setup_logger()
logger.info("Arguments: " + str(args))
cfg = setup_cfg(args)
demo = VisualizationDemo(cfg)
if args.output:
os.makedirs(args.output, exist_ok=True)
if args.input:
folder = args.input
video_name = folder[0].split("/")[-2]
if len(folder) == 1:
args.input = sorted(glob.glob(os.path.expanduser(folder[0])))
assert args.input, "The input path(s) was not found"
vid_frames = []
for path in args.input:
img = read_image(path, format="BGR")
vid_frames.append(img)
start_time = time.time()
with autocast():
predictions, visualized_output = demo.run_on_video(vid_frames, confidence_threshold=args.confidence_threshold)
selected_idx = [idx for idx, score in enumerate(predictions["pred_scores"]) if score >= args.confidence_threshold]
predictions["pred_scores"] = [predictions["pred_scores"][idx] for idx in selected_idx]
predictions["pred_labels"] = [predictions["pred_labels"][idx] for idx in selected_idx]
predictions["pred_masks"] = [predictions["pred_masks"][idx] for idx in selected_idx]
logger.info(
"detected {} instances per frame in {:.2f}s".format(
len(predictions["pred_scores"]), time.time() - start_time
)
)
if args.output:
# save image-level predictions
if args.save_frames:
frame_index = 0
for path, _vis_output in zip(args.input, visualized_output):
video_folder_path = os.path.join(args.output, video_name)
os.makedirs(video_folder_path, exist_ok=True)
out_filename = os.path.join(video_folder_path, os.path.basename(path))
_vis_output.save(out_filename)
# get masks for frame frame_index and save them
if args.save_masks:
pseudo_masks = [predictions["pred_masks"][inst_id][frame_index] for inst_id in range(len(predictions["pred_masks"]))]
save_masks(pseudo_masks, os.path.join(video_folder_path, "mask_" + os.path.basename(path)).replace(".jpg", ".png"))
frame_index += 1
# save mp4
H, W = visualized_output[0].height, visualized_output[0].width
cap = cv2.VideoCapture(-1)
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
out = cv2.VideoWriter(os.path.join(args.output, video_name + "_visualization.mp4"), fourcc, 10.0, (W, H), True)
for _vis_output in visualized_output:
frame = _vis_output.get_image()[:, :, ::-1]
out.write(frame)
cap.release()
out.release()
elif args.video_input:
video = cv2.VideoCapture(args.video_input)
vid_frames = []
while video.isOpened():
success, frame = video.read()
if success:
vid_frames.append(frame)
else:
break
start_time = time.time()
with autocast():
predictions, visualized_output = demo.run_on_video(vid_frames)
logger.info(
"detected {} instances per frame in {:.2f}s".format(
len(predictions["pred_scores"]), time.time() - start_time
)
)
if args.output:
if args.save_frames:
for idx, _vis_output in enumerate(visualized_output):
out_filename = os.path.join(args.output, f"{idx}.jpg")
_vis_output.save(out_filename)
H, W = visualized_output[0].height, visualized_output[0].width
cap = cv2.VideoCapture(-1)
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
out = cv2.VideoWriter(os.path.join(args.output, "visualization.mp4"), fourcc, 10.0, (W, H), True)
for _vis_output in visualized_output:
frame = _vis_output.get_image()[:, :, ::-1]
out.write(frame)
cap.release()
out.release()
| CutLER-main | videocutler/demo_video/demo.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Modified by XuDong Wang from : https://github.com/sukjunhwang/IFC/blob/master/projects/IFC/demo/visualizer.py
import torch
import numpy as np
import matplotlib.colors as mplc
from detectron2.utils.visualizer import ColorMode, GenericMask, Visualizer, _create_text_labels
from colormap import random_colors, select_colors, colormap
import random
_ID_JITTERS = [[0.9047944201469568, 0.3241718265806123, 0.33443746665210006], [0.4590171386127151, 0.9095038146383864, 0.3143840671974788], [0.4769356899795538, 0.5044406738441948, 0.5354530846360839], [0.00820945625670777, 0.24099210193126785, 0.15471834055332978], [0.6195684374237388, 0.4020380013509799, 0.26100266066404676], [0.08281237756545068, 0.05900744492710419, 0.06106221202154216], [0.2264886829978755, 0.04925271007292076, 0.10214429345996079], [0.1888247470009874, 0.11275000298612425, 0.46112894830685514], [0.37415767691880975, 0.844284596118331, 0.950471611180866], [0.3817344218157631, 0.3483259270707101, 0.6572989333690541], [0.2403115731054466, 0.03078280287279167, 0.5385975692534737], [0.7035076951650824, 0.12352084932325424, 0.12873080308790197], [0.12607434914489934, 0.111244793010015, 0.09333334699716023], [0.6551607300342269, 0.7003064103554443, 0.4131794512286162], [0.13592107365596595, 0.5390702818232149, 0.004540643174930525], [0.38286244894454347, 0.709142545393449, 0.529074791609835], [0.4279376583651734, 0.5634708596431771, 0.8505569717104301], [0.3460488523902999, 0.464769595519293, 0.6676839675477276], [0.8544063246675081, 0.5041190233407755, 0.9081217697141578], [0.9207009090747208, 0.2403865944739051, 0.05375410999863772], [0.6515786136947107, 0.6299918449948327, 0.45292029442034387], [0.986174217295693, 0.2424849846977214, 0.3981993323108266], [0.22101915872994693, 0.3408589198278038, 0.006381420347677524], [0.3159785813515982, 0.1145748921741011, 0.595754317197274], [0.10263421488052715, 0.5864139253490858, 0.23908000741142432], [0.8272999391532938, 0.6123527260897751, 0.3365197327803193], [0.5269583712937912, 0.25668929554516506, 0.7888411215078127], [0.2433880265410031, 0.7240751234287827, 0.8483215810528648], [0.7254601709704898, 0.8316525547295984, 0.9325253855921963], [0.5574483824856672, 0.2935331727879944, 0.6594839453793155], [0.6209642371433579, 0.054030693198821256, 0.5080873988178534], [0.9055507077365624, 0.12865888619203514, 0.9309191861440005], [0.9914469722960537, 0.3074114506206205, 0.8762107657323488], [0.4812682518247371, 0.15055826298548158, 0.9656340505308308], [0.6459219454316445, 0.9144794010251625, 0.751338812155106], [0.860840174209798, 0.8844626353077639, 0.3604624506769899], [0.8194991672032272, 0.926399617787601, 0.8059222327343247], [0.6540413175393658, 0.04579445254618297, 0.26891917826531275], [0.37778835833987046, 0.36247927666109536, 0.7989799305827889], [0.22738304978177726, 0.9038018263773739, 0.6970838854138303], [0.6362015495896184, 0.527680794236961, 0.5570915425178721], [0.6436401915860954, 0.6316925317144524, 0.9137151236993912], [0.04161828388587163, 0.3832413349082706, 0.6880829921949752], [0.7768167825719299, 0.8933821497682587, 0.7221278391266809], [0.8632760876301346, 0.3278628094906323, 0.8421587587114462], [0.8556499133262127, 0.6497385872901932, 0.5436895688477963], [0.9861940318610894, 0.03562313777386272, 0.9183454677106616], [0.8042586091176366, 0.6167222703170994, 0.24181981557207644], [0.9504247117633057, 0.3454233714011461, 0.6883727005547743], [0.9611909135491202, 0.46384154263898114, 0.32700443315058914], [0.523542176970206, 0.446222414615845, 0.9067402987747814], [0.7536954008682911, 0.6675512338797588, 0.22538238957839196], [0.1554052265688285, 0.05746097492966129, 0.8580358872587424], [0.8540838640971405, 0.9165504335482566, 0.6806982829158964], [0.7065090319405029, 0.8683059983962002, 0.05167128320624026], [0.39134812961899124, 0.8910075505622979, 0.7639815712623922], [0.1578117311479783, 0.20047326898284668, 0.9220177338840568], [0.2017488993096358, 0.6949259970936679, 0.8729196864798128], [0.5591089340651949, 0.15576770423813258, 0.1469857469387812], [0.14510398622626974, 0.24451497734532168, 0.46574271993578786], [0.13286397822351492, 0.4178244533944635, 0.03728728952131943], [0.556463206310225, 0.14027595183361663, 0.2731537988657907], [0.4093837966398032, 0.8015225687789814, 0.8033567296903834], [0.527442563956637, 0.902232617214431, 0.7066626674362227], [0.9058355503297827, 0.34983989180213004, 0.8353262183839384], [0.7108382186953104, 0.08591307895133471, 0.21434688012521974], [0.22757345065207668, 0.7943075496583976, 0.2992305547627421], [0.20454109788173636, 0.8251670332103687, 0.012981987094547232], [0.7672562637297392, 0.005429019973062554, 0.022163616037108702], [0.37487345910117564, 0.5086240194440863, 0.9061216063654387], [0.9878004014101087, 0.006345852772772331, 0.17499753379350858], [0.030061528704491303, 0.1409704315546606, 0.3337131835834506], [0.5022506782611504, 0.5448435505388706, 0.40584238936140726], [0.39560774627423445, 0.8905943695833262, 0.5850815030921116], [0.058615671926786406, 0.5365713844300387, 0.1620457551256279], [0.41843842882069693, 0.1536005983609976, 0.3127878501592438], [0.05947621790155899, 0.5412421167331932, 0.2611322146455659], [0.5196159938235607, 0.7066461551682705, 0.970261497412556], [0.30443031606149007, 0.45158581060034975, 0.4331841153149706], [0.8848298403933996, 0.7241791700943656, 0.8917110054596072], [0.5720260591898779, 0.3072801598203052, 0.8891066705989902], [0.13964015336177327, 0.2531778096760302, 0.5703756837403124], [0.2156307542329836, 0.4139947500641685, 0.87051676884144], [0.10800455881891169, 0.05554646035458266, 0.2947027428551443], [0.35198009410633857, 0.365849666213808, 0.06525787683513773], [0.5223264108118847, 0.9032195574351178, 0.28579084943315025], [0.7607724246546966, 0.3087194381828555, 0.6253235528354899], [0.5060485442077824, 0.19173600467625274, 0.9931175692203702], [0.5131805830323746, 0.07719515392040577, 0.923212006754969], [0.3629762141280106, 0.02429179642710888, 0.6963754952399983], [0.7542592485456767, 0.6478893299494212, 0.3424965345400731], [0.49944574453364454, 0.6775665366832825, 0.33758796076989583], [0.010621818120767679, 0.8221571611173205, 0.5186257457566332], [0.5857910304290109, 0.7178133992025467, 0.9729243483606071], [0.16987399482717613, 0.9942570210657463, 0.18120758122552927], [0.016362572521240848, 0.17582788603087263, 0.7255176922640298], [0.10981764283706419, 0.9078582203470377, 0.7638063718334003], [0.9252097840441119, 0.3330197086990039, 0.27888705301420136], [0.12769972651171546, 0.11121470804891687, 0.12710743734391716], [0.5753520518360334, 0.2763862879599456, 0.6115636613363361]]
class TrackVisualizer(Visualizer):
def __init__(self, img_rgb, metadata=None, scale=1.0, instance_mode=ColorMode.IMAGE, inst_id=0):
super().__init__(
img_rgb, metadata=metadata, scale=scale, instance_mode=instance_mode
)
self.cpu_device = torch.device("cpu")
self.inst_id = inst_id
def _jitter(self, color, id):
"""
Randomly modifies given color to produce a slightly different color than the color given.
Args:
color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color
picked. The values in the list are in the [0.0, 1.0] range.
Returns:
jittered_color (tuple[double]): a tuple of 3 elements, containing the RGB values of the
color after being jittered. The values in the list are in the [0.0, 1.0] range.
"""
color = mplc.to_rgb(color)
vec = _ID_JITTERS[id]
# better to do it in another color space
vec = vec / np.linalg.norm(vec) * 0.5
res = np.clip(vec + color, 0, 1)
return tuple(res)
def draw_instance_predictions(self, predictions):
"""
Draw instance-level prediction results on an image.
Args:
predictions (Instances): the output of an instance detection/segmentation
model. Following fields will be used to draw:
"pred_boxes", "pred_classes", "scores", "pred_masks" (or "pred_masks_rle").
Returns:
output (VisImage): image object with visualizations.
"""
preds = predictions.to(self.cpu_device)
boxes = preds.pred_boxes if preds.has("pred_boxes") else None
scores = preds.scores if preds.has("scores") else None
classes = preds.pred_classes if preds.has("pred_classes") else None
labels = _create_text_labels(classes, scores, self.metadata.get("thing_classes", None))
if labels is not None:
# labels = ["[{}] ".format(_id) + l for _id, l in enumerate(labels)]
labels = ["[{}] ".format(_id) for _id, l in enumerate(labels)]
if preds.has("pred_masks"):
masks = np.asarray(preds.pred_masks)
masks = [GenericMask(x, self.output.height, self.output.width) for x in masks]
else:
masks = None
if classes is None:
return self.output
colors = colormap(rgb=True, maximum=1)
index_start = self.inst_id % len(colors)
# select len(classes) colors from the list of colors
if index_start + len(classes) >= len(colors):
colors = colors[:len(classes)]
else:
colors = colors[index_start:index_start+len(classes)]
alpha = 0.5 # DEFAULT 0.5
if self._instance_mode == ColorMode.IMAGE_BW:
self.output.img = self._create_grayscale_image(
(preds.pred_masks.any(dim=0) > 0).numpy()
if preds.has("pred_masks")
else None
)
alpha = 0.5 # DEFAULT 0.3
self.overlay_instances(
masks=masks,
boxes=boxes,
labels=labels,
assigned_colors=colors,
alpha=alpha,
)
return self.output
| CutLER-main | videocutler/demo_video/visualizer.py |
# -*- coding: utf-8 -*-
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Modified by XuDong Wang from https://github.com/facebookresearch/Mask2Former/tree/main/mask2former_video
from detectron2.config import CfgNode as CN
def add_maskformer2_video_config(cfg):
# video data
# DataLoader
cfg.INPUT.SAMPLING_FRAME_NUM = 2
cfg.INPUT.SAMPLING_FRAME_RANGE = 20
cfg.INPUT.SAMPLING_FRAME_SHUFFLE = False
cfg.INPUT.AUGMENTATIONS = [] # "brightness", "contrast", "saturation", "rotation"
cfg.DATALOADER.COPY_PASTE = False
cfg.DATALOADER.COPY_PASTE_RATE = 0.0
cfg.DATALOADER.COPY_PASTE_MIN_RATIO = 0.5
cfg.DATALOADER.COPY_PASTE_MAX_RATIO = 1.0
cfg.DATALOADER.COPY_PASTE_RANDOM_NUM = True # random select number of instances
cfg.DATALOADER.VISUALIZE_COPY_PASTE = False
cfg.SOLVER.BASE_LR_MULTIPLIER = 1
cfg.SOLVER.BASE_LR_MULTIPLIER_NAMES = []
| CutLER-main | videocutler/mask2former_video/config.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from . import modeling
# config
from .config import add_maskformer2_video_config
# models
from .video_maskformer_model import VideoMaskFormer
# video
from .data_video import (
YTVISDatasetMapper,
YTVISEvaluator,
build_detection_train_loader,
build_detection_test_loader,
get_detection_dataset_dicts,
)
# copy-paste
from .engine import * | CutLER-main | videocutler/mask2former_video/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import math
from typing import Tuple
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.data import MetadataCatalog
from detectron2.modeling import META_ARCH_REGISTRY, build_backbone, build_sem_seg_head
from detectron2.modeling.backbone import Backbone
from detectron2.modeling.postprocessing import sem_seg_postprocess
from detectron2.structures import Boxes, ImageList, Instances, BitMasks
from .modeling.criterion import VideoSetCriterion
from .modeling.matcher import VideoHungarianMatcher
from .utils.memory import retry_if_cuda_oom
logger = logging.getLogger(__name__)
@META_ARCH_REGISTRY.register()
class VideoMaskFormer(nn.Module):
"""
Main class for mask classification semantic segmentation architectures.
"""
@configurable
def __init__(
self,
*,
backbone: Backbone,
sem_seg_head: nn.Module,
criterion: nn.Module,
num_queries: int,
object_mask_threshold: float,
overlap_threshold: float,
metadata,
size_divisibility: int,
sem_seg_postprocess_before_inference: bool,
pixel_mean: Tuple[float],
pixel_std: Tuple[float],
# video
num_frames,
):
"""
Args:
backbone: a backbone module, must follow detectron2's backbone interface
sem_seg_head: a module that predicts semantic segmentation from backbone features
criterion: a module that defines the loss
num_queries: int, number of queries
object_mask_threshold: float, threshold to filter query based on classification score
for panoptic segmentation inference
overlap_threshold: overlap threshold used in general inference for panoptic segmentation
metadata: dataset meta, get `thing` and `stuff` category names for panoptic
segmentation inference
size_divisibility: Some backbones require the input height and width to be divisible by a
specific integer. We can use this to override such requirement.
sem_seg_postprocess_before_inference: whether to resize the prediction back
to original input size before semantic segmentation inference or after.
For high-resolution dataset like Mapillary, resizing predictions before
inference will cause OOM error.
pixel_mean, pixel_std: list or tuple with #channels element, representing
the per-channel mean and std to be used to normalize the input image
semantic_on: bool, whether to output semantic segmentation prediction
instance_on: bool, whether to output instance segmentation prediction
panoptic_on: bool, whether to output panoptic segmentation prediction
test_topk_per_image: int, instance segmentation parameter, keep topk instances per image
"""
super().__init__()
self.backbone = backbone
self.sem_seg_head = sem_seg_head
self.criterion = criterion
self.num_queries = num_queries
self.overlap_threshold = overlap_threshold
self.object_mask_threshold = object_mask_threshold
self.metadata = metadata
if size_divisibility < 0:
# use backbone size_divisibility if not set
size_divisibility = self.backbone.size_divisibility
self.size_divisibility = size_divisibility
self.sem_seg_postprocess_before_inference = sem_seg_postprocess_before_inference
self.register_buffer("pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False)
self.num_frames = num_frames
@classmethod
def from_config(cls, cfg):
backbone = build_backbone(cfg)
sem_seg_head = build_sem_seg_head(cfg, backbone.output_shape())
# Loss parameters:
deep_supervision = cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION
no_object_weight = cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT
# loss weights
class_weight = cfg.MODEL.MASK_FORMER.CLASS_WEIGHT
dice_weight = cfg.MODEL.MASK_FORMER.DICE_WEIGHT
mask_weight = cfg.MODEL.MASK_FORMER.MASK_WEIGHT
# building criterion
matcher = VideoHungarianMatcher(
cost_class=class_weight,
cost_mask=mask_weight,
cost_dice=dice_weight,
num_points=cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS,
)
weight_dict = {"loss_ce": class_weight, "loss_mask": mask_weight, "loss_dice": dice_weight}
if deep_supervision:
dec_layers = cfg.MODEL.MASK_FORMER.DEC_LAYERS
aux_weight_dict = {}
for i in range(dec_layers - 1):
aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()})
weight_dict.update(aux_weight_dict)
losses = ["labels", "masks"]
criterion = VideoSetCriterion(
sem_seg_head.num_classes,
matcher=matcher,
weight_dict=weight_dict,
eos_coef=no_object_weight,
losses=losses,
num_points=cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS,
oversample_ratio=cfg.MODEL.MASK_FORMER.OVERSAMPLE_RATIO,
importance_sample_ratio=cfg.MODEL.MASK_FORMER.IMPORTANCE_SAMPLE_RATIO,
)
return {
"backbone": backbone,
"sem_seg_head": sem_seg_head,
"criterion": criterion,
"num_queries": cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES,
"object_mask_threshold": cfg.MODEL.MASK_FORMER.TEST.OBJECT_MASK_THRESHOLD,
"overlap_threshold": cfg.MODEL.MASK_FORMER.TEST.OVERLAP_THRESHOLD,
"metadata": MetadataCatalog.get(cfg.DATASETS.TRAIN[0]),
"size_divisibility": cfg.MODEL.MASK_FORMER.SIZE_DIVISIBILITY,
"sem_seg_postprocess_before_inference": True,
"pixel_mean": cfg.MODEL.PIXEL_MEAN,
"pixel_std": cfg.MODEL.PIXEL_STD,
# video
"num_frames": cfg.INPUT.SAMPLING_FRAME_NUM,
}
@property
def device(self):
return self.pixel_mean.device
def forward(self, batched_inputs):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper`.
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* "image": Tensor, image in (C, H, W) format.
* "instances": per-region ground truth
* Other information that's included in the original dicts, such as:
"height", "width" (int): the output resolution of the model (may be different
from input resolution), used in inference.
Returns:
list[dict]:
each dict has the results for one image. The dict contains the following keys:
* "sem_seg":
A Tensor that represents the
per-pixel segmentation prediced by the head.
The prediction has shape KxHxW that represents the logits of
each class for each pixel.
* "panoptic_seg":
A tuple that represent panoptic output
panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment.
segments_info (list[dict]): Describe each segment in `panoptic_seg`.
Each dict contains keys "id", "category_id", "isthing".
"""
images = []
for video in batched_inputs:
for frame in video["image"]:
images.append(frame.to(self.device))
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, self.size_divisibility)
features = self.backbone(images.tensor)
outputs = self.sem_seg_head(features)
if self.training:
# mask classification target
targets = self.prepare_targets(batched_inputs, images)
# bipartite matching-based loss
losses = self.criterion(outputs, targets)
for k in list(losses.keys()):
if k in self.criterion.weight_dict:
losses[k] *= self.criterion.weight_dict[k]
else:
# remove this loss if not specified in `weight_dict`
losses.pop(k)
return losses
else:
mask_cls_results = outputs["pred_logits"]
mask_pred_results = outputs["pred_masks"]
mask_cls_result = mask_cls_results[0]
# upsample masks
mask_pred_result = retry_if_cuda_oom(F.interpolate)(
mask_pred_results[0],
size=(images.tensor.shape[-2], images.tensor.shape[-1]),
mode="bilinear",
align_corners=False,
)
del outputs
input_per_image = batched_inputs[0]
image_size = images.image_sizes[0] # image size without padding after data augmentation
height = input_per_image.get("height", image_size[0]) # raw image size before data augmentation
width = input_per_image.get("width", image_size[1])
return retry_if_cuda_oom(self.inference_video)(mask_cls_result, mask_pred_result, image_size, height, width)
def prepare_targets(self, targets, images):
h_pad, w_pad = images.tensor.shape[-2:]
gt_instances = []
for targets_per_video in targets:
_num_instance = len(targets_per_video["instances"][0])
mask_shape = [_num_instance, self.num_frames, h_pad, w_pad]
gt_masks_per_video = torch.zeros(mask_shape, dtype=torch.bool, device=self.device)
gt_ids_per_video = []
for f_i, targets_per_frame in enumerate(targets_per_video["instances"]):
targets_per_frame = targets_per_frame.to(self.device)
h, w = targets_per_frame.image_size
gt_ids_per_video.append(targets_per_frame.gt_ids[:, None])
gt_masks_per_video[:, f_i, :h, :w] = targets_per_frame.gt_masks.tensor
gt_ids_per_video = torch.cat(gt_ids_per_video, dim=1)
valid_idx = (gt_ids_per_video != -1).any(dim=-1)
gt_classes_per_video = targets_per_frame.gt_classes[valid_idx] # N,
gt_ids_per_video = gt_ids_per_video[valid_idx] # N, num_frames
gt_instances.append({"labels": gt_classes_per_video, "ids": gt_ids_per_video})
gt_masks_per_video = gt_masks_per_video[valid_idx].float() # N, num_frames, H, W
gt_instances[-1].update({"masks": gt_masks_per_video})
return gt_instances
def inference_video(self, pred_cls, pred_masks, img_size, output_height, output_width):
if len(pred_cls) > 0:
scores = F.softmax(pred_cls, dim=-1)[:, :-1]
labels = torch.arange(self.sem_seg_head.num_classes, device=self.device).unsqueeze(0).repeat(self.num_queries, 1).flatten(0, 1)
# keep top-10 predictions
# TODO: make it configurable
scores_per_image, topk_indices = scores.flatten(0, 1).topk(10, sorted=False)
labels_per_image = labels[topk_indices]
topk_indices = topk_indices // self.sem_seg_head.num_classes
pred_masks = pred_masks[topk_indices]
pred_masks = pred_masks[:, :, : img_size[0], : img_size[1]]
pred_masks = F.interpolate(
pred_masks, size=(output_height, output_width), mode="bilinear", align_corners=False
)
masks = pred_masks > 0.
out_scores = scores_per_image.tolist()
out_labels = labels_per_image.tolist()
out_masks = [m for m in masks.cpu()]
else:
out_scores = []
out_labels = []
out_masks = []
video_output = {
"image_size": (output_height, output_width),
"pred_scores": out_scores,
"pred_labels": out_labels,
"pred_masks": out_masks,
}
return video_output
| CutLER-main | videocutler/mask2former_video/video_maskformer_model.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/sukjunhwang/IFC
import itertools
import logging
import torch.utils.data
from detectron2.config import CfgNode, configurable
from detectron2.data.build import (
build_batch_data_loader,
load_proposals_into_dataset,
trivial_batch_collator,
)
from detectron2.data.catalog import DatasetCatalog
from detectron2.data.common import DatasetFromList, MapDataset
from detectron2.data.dataset_mapper import DatasetMapper
from detectron2.data.samplers import InferenceSampler, TrainingSampler
from detectron2.utils.comm import get_world_size
def _compute_num_images_per_worker(cfg: CfgNode):
num_workers = get_world_size()
images_per_batch = cfg.SOLVER.IMS_PER_BATCH
assert (
images_per_batch % num_workers == 0
), "SOLVER.IMS_PER_BATCH ({}) must be divisible by the number of workers ({}).".format(
images_per_batch, num_workers
)
assert (
images_per_batch >= num_workers
), "SOLVER.IMS_PER_BATCH ({}) must be larger than the number of workers ({}).".format(
images_per_batch, num_workers
)
images_per_worker = images_per_batch // num_workers
return images_per_worker
def filter_images_with_only_crowd_annotations(dataset_dicts, dataset_names):
"""
Filter out images with none annotations or only crowd annotations
(i.e., images without non-crowd annotations).
A common training-time preprocessing on COCO dataset.
Args:
dataset_dicts (list[dict]): annotations in Detectron2 Dataset format.
Returns:
list[dict]: the same format, but filtered.
"""
num_before = len(dataset_dicts)
def valid(anns):
for ann in anns:
if isinstance(ann, list):
for instance in ann:
if instance.get("iscrowd", 0) == 0:
return True
else:
if ann.get("iscrowd", 0) == 0:
return True
return False
dataset_dicts = [x for x in dataset_dicts if valid(x["annotations"])]
num_after = len(dataset_dicts)
logger = logging.getLogger(__name__)
logger.info(
"Removed {} images with no usable annotations. {} images left.".format(
num_before - num_after, num_after
)
)
return dataset_dicts
def get_detection_dataset_dicts(
dataset_names, filter_empty=True, proposal_files=None
):
"""
Load and prepare dataset dicts for instance detection/segmentation and semantic segmentation.
Args:
dataset_names (str or list[str]): a dataset name or a list of dataset names
filter_empty (bool): whether to filter out images without instance annotations
proposal_files (list[str]): if given, a list of object proposal files
that match each dataset in `dataset_names`.
Returns:
list[dict]: a list of dicts following the standard dataset dict format.
"""
if isinstance(dataset_names, str):
dataset_names = [dataset_names]
assert len(dataset_names)
dataset_dicts = [DatasetCatalog.get(dataset_name) for dataset_name in dataset_names]
for dataset_name, dicts in zip(dataset_names, dataset_dicts):
assert len(dicts), "Dataset '{}' is empty!".format(dataset_name)
if proposal_files is not None:
assert len(dataset_names) == len(proposal_files)
# load precomputed proposals from proposal files
dataset_dicts = [
load_proposals_into_dataset(dataset_i_dicts, proposal_file)
for dataset_i_dicts, proposal_file in zip(dataset_dicts, proposal_files)
]
dataset_dicts = list(itertools.chain.from_iterable(dataset_dicts))
has_instances = "annotations" in dataset_dicts[0]
if filter_empty and has_instances:
dataset_dicts = filter_images_with_only_crowd_annotations(dataset_dicts, dataset_names)
assert len(dataset_dicts), "No valid data found in {}.".format(",".join(dataset_names))
return dataset_dicts
def _train_loader_from_config(cfg, mapper, *, dataset=None, sampler=None):
if dataset is None:
dataset = get_detection_dataset_dicts(
cfg.DATASETS.TRAIN,
filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None,
)
if mapper is None:
mapper = DatasetMapper(cfg, True)
if sampler is None:
sampler_name = cfg.DATALOADER.SAMPLER_TRAIN
logger = logging.getLogger(__name__)
logger.info("Using training sampler {}".format(sampler_name))
sampler = TrainingSampler(len(dataset))
return {
"dataset": dataset,
"sampler": sampler,
"mapper": mapper,
"total_batch_size": cfg.SOLVER.IMS_PER_BATCH,
"aspect_ratio_grouping": cfg.DATALOADER.ASPECT_RATIO_GROUPING,
"num_workers": cfg.DATALOADER.NUM_WORKERS,
}
# TODO can allow dataset as an iterable or IterableDataset to make this function more general
@configurable(from_config=_train_loader_from_config)
def build_detection_train_loader(
dataset, *, mapper, sampler=None, total_batch_size, aspect_ratio_grouping=True, num_workers=0
):
"""
Build a dataloader for object detection with some default features.
This interface is experimental.
Args:
dataset (list or torch.utils.data.Dataset): a list of dataset dicts,
or a map-style pytorch dataset. They can be obtained by using
:func:`DatasetCatalog.get` or :func:`get_detection_dataset_dicts`.
mapper (callable): a callable which takes a sample (dict) from dataset and
returns the format to be consumed by the model.
When using cfg, the default choice is ``DatasetMapper(cfg, is_train=True)``.
sampler (torch.utils.data.sampler.Sampler or None): a sampler that
produces indices to be applied on ``dataset``.
Default to :class:`TrainingSampler`, which coordinates a random shuffle
sequence across all workers.
total_batch_size (int): total batch size across all workers. Batching
simply puts data into a list.
aspect_ratio_grouping (bool): whether to group images with similar
aspect ratio for efficiency. When enabled, it requires each
element in dataset be a dict with keys "width" and "height".
num_workers (int): number of parallel data loading workers
Returns:
torch.utils.data.DataLoader: a dataloader. Each output from it is a
``list[mapped_element]`` of length ``total_batch_size / num_workers``,
where ``mapped_element`` is produced by the ``mapper``.
"""
if isinstance(dataset, list):
dataset = DatasetFromList(dataset, copy=False)
if mapper is not None:
dataset = MapDataset(dataset, mapper)
if sampler is None:
sampler = TrainingSampler(len(dataset))
assert isinstance(sampler, torch.utils.data.sampler.Sampler)
return build_batch_data_loader(
dataset,
sampler,
total_batch_size,
aspect_ratio_grouping=aspect_ratio_grouping,
num_workers=num_workers,
)
def _test_loader_from_config(cfg, dataset_name, mapper=None):
"""
Uses the given `dataset_name` argument (instead of the names in cfg), because the
standard practice is to evaluate each test set individually (not combining them).
"""
dataset = get_detection_dataset_dicts(
[dataset_name],
filter_empty=False,
proposal_files=[
cfg.DATASETS.PROPOSAL_FILES_TEST[list(cfg.DATASETS.TEST).index(dataset_name)]
]
if cfg.MODEL.LOAD_PROPOSALS
else None,
)
if mapper is None:
mapper = DatasetMapper(cfg, False)
return {"dataset": dataset, "mapper": mapper, "num_workers": cfg.DATALOADER.NUM_WORKERS}
@configurable(from_config=_test_loader_from_config)
def build_detection_test_loader(dataset, *, mapper, num_workers=0):
"""
Similar to `build_detection_train_loader`, but uses a batch size of 1.
This interface is experimental.
Args:
dataset (list or torch.utils.data.Dataset): a list of dataset dicts,
or a map-style pytorch dataset. They can be obtained by using
:func:`DatasetCatalog.get` or :func:`get_detection_dataset_dicts`.
mapper (callable): a callable which takes a sample (dict) from dataset
and returns the format to be consumed by the model.
When using cfg, the default choice is ``DatasetMapper(cfg, is_train=False)``.
num_workers (int): number of parallel data loading workers
Returns:
DataLoader: a torch DataLoader, that loads the given detection
dataset, with test-time transformation and batching.
Examples:
::
data_loader = build_detection_test_loader(
DatasetRegistry.get("my_test"),
mapper=DatasetMapper(...))
# or, instantiate with a CfgNode:
data_loader = build_detection_test_loader(cfg, "my_test")
"""
if isinstance(dataset, list):
dataset = DatasetFromList(dataset, copy=False)
if mapper is not None:
dataset = MapDataset(dataset, mapper)
sampler = InferenceSampler(len(dataset))
# Always use 1 image per worker during inference since this is the
# standard when reporting inference time in papers.
batch_sampler = torch.utils.data.sampler.BatchSampler(sampler, 1, drop_last=False)
data_loader = torch.utils.data.DataLoader(
dataset,
num_workers=num_workers,
batch_sampler=batch_sampler,
collate_fn=trivial_batch_collator,
)
return data_loader
| CutLER-main | videocutler/mask2former_video/data_video/build.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/sukjunhwang/IFC
import contextlib
import copy
import io
import itertools
import json
import logging
import numpy as np
import os
from collections import OrderedDict
import pycocotools.mask as mask_util
import torch
from .datasets.ytvis_api.ytvos import YTVOS
from .datasets.ytvis_api.ytvoseval import YTVOSeval
from tabulate import tabulate
import detectron2.utils.comm as comm
from detectron2.config import CfgNode
from detectron2.data import MetadataCatalog
from detectron2.evaluation import DatasetEvaluator
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import create_small_table
class YTVISEvaluator(DatasetEvaluator):
"""
Evaluate AR for object proposals, AP for instance detection/segmentation, AP
for keypoint detection outputs using COCO's metrics.
See http://cocodataset.org/#detection-eval and
http://cocodataset.org/#keypoints-eval to understand its metrics.
In addition to COCO, this evaluator is able to support any bounding box detection,
instance segmentation, or keypoint detection dataset.
"""
def __init__(
self,
dataset_name,
tasks=None,
distributed=True,
output_dir=None,
*,
use_fast_impl=True,
):
"""
Args:
dataset_name (str): name of the dataset to be evaluated.
It must have either the following corresponding metadata:
"json_file": the path to the COCO format annotation
Or it must be in detectron2's standard dataset format
so it can be converted to COCO format automatically.
tasks (tuple[str]): tasks that can be evaluated under the given
configuration. A task is one of "bbox", "segm", "keypoints".
By default, will infer this automatically from predictions.
distributed (True): if True, will collect results from all ranks and run evaluation
in the main process.
Otherwise, will only evaluate the results in the current process.
output_dir (str): optional, an output directory to dump all
results predicted on the dataset. The dump contains two files:
1. "instances_predictions.pth" a file in torch serialization
format that contains all the raw original predictions.
2. "coco_instances_results.json" a json file in COCO's result
format.
use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP.
Although the results should be very close to the official implementation in COCO
API, it is still recommended to compute results with the official API for use in
papers. The faster implementation also uses more RAM.
"""
self._logger = logging.getLogger(__name__)
self._distributed = distributed
self._output_dir = output_dir
self._use_fast_impl = use_fast_impl
if tasks is not None and isinstance(tasks, CfgNode):
self._logger.warning(
"COCO Evaluator instantiated using config, this is deprecated behavior."
" Please pass in explicit arguments instead."
)
self._tasks = None # Infering it from predictions should be better
else:
self._tasks = tasks
self._cpu_device = torch.device("cpu")
self._metadata = MetadataCatalog.get(dataset_name)
json_file = PathManager.get_local_path(self._metadata.json_file)
with contextlib.redirect_stdout(io.StringIO()):
self._ytvis_api = YTVOS(json_file)
# Test set json files do not contain annotations (evaluation must be
# performed using the COCO evaluation server).
self._do_evaluation = "annotations" in self._ytvis_api.dataset
def reset(self):
self._predictions = []
def process(self, inputs, outputs):
"""
Args:
inputs: the inputs to a COCO model (e.g., GeneralizedRCNN).
It is a list of dict. Each dict corresponds to an image and
contains keys like "height", "width", "file_name", "image_id".
outputs: the outputs of a COCO model. It is a list of dicts with key
"instances" that contains :class:`Instances`.
"""
prediction = instances_to_coco_json_video(inputs, outputs)
self._predictions.extend(prediction)
def evaluate(self):
"""
Args:
img_ids: a list of image IDs to evaluate on. Default to None for the whole dataset
"""
if self._distributed:
comm.synchronize()
predictions = comm.gather(self._predictions, dst=0)
predictions = list(itertools.chain(*predictions))
if not comm.is_main_process():
return {}
else:
predictions = self._predictions
if len(predictions) == 0:
self._logger.warning("[COCOEvaluator] Did not receive valid predictions.")
return {}
if self._output_dir:
PathManager.mkdirs(self._output_dir)
file_path = os.path.join(self._output_dir, "instances_predictions.pth")
with PathManager.open(file_path, "wb") as f:
torch.save(predictions, f)
self._results = OrderedDict()
self._eval_predictions(predictions)
# Copy so the caller can do whatever with results
return copy.deepcopy(self._results)
def _eval_predictions(self, predictions):
"""
Evaluate predictions. Fill self._results with the metrics of the tasks.
"""
self._logger.info("Preparing results for YTVIS format ...")
# unmap the category ids for COCO
if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"):
dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id
all_contiguous_ids = list(dataset_id_to_contiguous_id.values())
num_classes = len(all_contiguous_ids)
assert min(all_contiguous_ids) == 0 and max(all_contiguous_ids) == num_classes - 1
reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()}
for result in predictions:
category_id = result["category_id"]
assert category_id < num_classes, (
f"A prediction has class={category_id}, "
f"but the dataset only has {num_classes} classes and "
f"predicted class id should be in [0, {num_classes - 1}]."
)
result["category_id"] = reverse_id_mapping[category_id]
if self._output_dir:
file_path = os.path.join(self._output_dir, "results.json")
self._logger.info("Saving results to {}".format(file_path))
with PathManager.open(file_path, "w") as f:
f.write(json.dumps(predictions))
f.flush()
if not self._do_evaluation:
self._logger.info("Annotations are not available for evaluation.")
return
coco_eval = (
_evaluate_predictions_on_coco(
self._ytvis_api,
predictions,
)
if len(predictions) > 0
else None # cocoapi does not handle empty results very well
)
res = self._derive_coco_results(
coco_eval, class_names=self._metadata.get("thing_classes")
)
self._results["segm"] = res
def _derive_coco_results(self, coco_eval, class_names=None):
"""
Derive the desired score numbers from summarized COCOeval.
Args:
coco_eval (None or COCOEval): None represents no predictions from model.
iou_type (str):
class_names (None or list[str]): if provided, will use it to predict
per-category AP.
Returns:
a dict of {metric name: score}
"""
metrics = ["AP", "AP50", "AP75", "APs", "APm", "APl", "AR1", "AR10"]
if coco_eval is None:
self._logger.warn("No predictions from the model!")
return {metric: float("nan") for metric in metrics}
# the standard metrics
results = {
metric: float(coco_eval.stats[idx] * 100 if coco_eval.stats[idx] >= 0 else "nan")
for idx, metric in enumerate(metrics)
}
self._logger.info(
"Evaluation results for {}: \n".format("segm") + create_small_table(results)
)
if not np.isfinite(sum(results.values())):
self._logger.info("Some metrics cannot be computed and is shown as NaN.")
if class_names is None or len(class_names) <= 1:
return results
# Compute per-category AP
# from https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L222-L252 # noqa
precisions = coco_eval.eval["precision"]
# precision has dims (iou, recall, cls, area range, max dets)
assert len(class_names) == precisions.shape[2]
results_per_category = []
for idx, name in enumerate(class_names):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
ap = np.mean(precision) if precision.size else float("nan")
results_per_category.append(("{}".format(name), float(ap * 100)))
# tabulate it
N_COLS = min(6, len(results_per_category) * 2)
results_flatten = list(itertools.chain(*results_per_category))
results_2d = itertools.zip_longest(*[results_flatten[i::N_COLS] for i in range(N_COLS)])
table = tabulate(
results_2d,
tablefmt="pipe",
floatfmt=".3f",
headers=["category", "AP"] * (N_COLS // 2),
numalign="left",
)
self._logger.info("Per-category {} AP: \n".format("segm") + table)
results.update({"AP-" + name: ap for name, ap in results_per_category})
return results
def instances_to_coco_json_video(inputs, outputs):
"""
Dump an "Instances" object to a COCO-format json that's used for evaluation.
Args:
instances (Instances):
video_id (int): the image id
Returns:
list[dict]: list of json annotations in COCO format.
"""
assert len(inputs) == 1, "More than one inputs are loaded for inference!"
video_id = inputs[0]["video_id"]
video_length = inputs[0]["length"]
scores = outputs["pred_scores"]
labels = outputs["pred_labels"]
masks = outputs["pred_masks"]
ytvis_results = []
for instance_id, (s, l, m) in enumerate(zip(scores, labels, masks)):
segms = [
mask_util.encode(np.array(_mask[:, :, None], order="F", dtype="uint8"))[0]
for _mask in m
]
for rle in segms:
rle["counts"] = rle["counts"].decode("utf-8")
res = {
"video_id": video_id,
"score": s,
"category_id": l,
"segmentations": segms,
}
ytvis_results.append(res)
return ytvis_results
def _evaluate_predictions_on_coco(
coco_gt,
coco_results,
img_ids=None,
):
"""
Evaluate the coco results using COCOEval API.
"""
assert len(coco_results) > 0
coco_results = copy.deepcopy(coco_results)
# When evaluating mask AP, if the results contain bbox, cocoapi will
# use the box area as the area of the instance, instead of the mask area.
# This leads to a different definition of small/medium/large.
# We remove the bbox field to let mask AP use mask area.
for c in coco_results:
c.pop("bbox", None)
coco_dt = coco_gt.loadRes(coco_results)
coco_eval = YTVOSeval(coco_gt, coco_dt)
# For COCO, the default max_dets_per_image is [1, 10, 100].
max_dets_per_image = [1, 10, 100] # Default from COCOEval
coco_eval.params.maxDets = max_dets_per_image
if img_ids is not None:
coco_eval.params.imgIds = img_ids
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
return coco_eval
| CutLER-main | videocutler/mask2former_video/data_video/ytvis_eval.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/sukjunhwang/IFC
import numpy as np
import logging
import sys
from fvcore.transforms.transform import (
HFlipTransform,
NoOpTransform,
VFlipTransform,
)
from PIL import Image
from detectron2.data import transforms as T
class ResizeShortestEdge(T.Augmentation):
"""
Scale the shorter edge to the given size, with a limit of `max_size` on the longer edge.
If `max_size` is reached, then downscale so that the longer edge does not exceed max_size.
"""
def __init__(
self, short_edge_length, max_size=sys.maxsize, sample_style="range", interp=Image.BILINEAR, clip_frame_cnt=1
):
"""
Args:
short_edge_length (list[int]): If ``sample_style=="range"``,
a [min, max] interval from which to sample the shortest edge length.
If ``sample_style=="choice"``, a list of shortest edge lengths to sample from.
max_size (int): maximum allowed longest edge length.
sample_style (str): either "range" or "choice".
"""
super().__init__()
assert sample_style in ["range", "choice", "range_by_clip", "choice_by_clip"], sample_style
self.is_range = ("range" in sample_style)
if isinstance(short_edge_length, int):
short_edge_length = (short_edge_length, short_edge_length)
if self.is_range:
assert len(short_edge_length) == 2, (
"short_edge_length must be two values using 'range' sample style."
f" Got {short_edge_length}!"
)
self._cnt = 0
self._init(locals())
def get_transform(self, image):
if self._cnt % self.clip_frame_cnt == 0:
if self.is_range:
self.size = np.random.randint(self.short_edge_length[0], self.short_edge_length[1] + 1)
else:
self.size = np.random.choice(self.short_edge_length)
if self.size == 0:
return NoOpTransform()
self._cnt = 0 # avoiding overflow
self._cnt += 1
h, w = image.shape[:2]
scale = self.size * 1.0 / min(h, w)
if h < w:
newh, neww = self.size, scale * w
else:
newh, neww = scale * h, self.size
if max(newh, neww) > self.max_size:
scale = self.max_size * 1.0 / max(newh, neww)
newh = newh * scale
neww = neww * scale
neww = int(neww + 0.5)
newh = int(newh + 0.5)
return T.ResizeTransform(h, w, newh, neww, self.interp)
class RandomFlip(T.Augmentation):
"""
Flip the image horizontally or vertically with the given probability.
"""
def __init__(self, prob=0.5, *, horizontal=True, vertical=False, clip_frame_cnt=1):
"""
Args:
prob (float): probability of flip.
horizontal (boolean): whether to apply horizontal flipping
vertical (boolean): whether to apply vertical flipping
"""
super().__init__()
if horizontal and vertical:
raise ValueError("Cannot do both horiz and vert. Please use two Flip instead.")
if not horizontal and not vertical:
raise ValueError("At least one of horiz or vert has to be True!")
self._cnt = 0
self._init(locals())
def get_transform(self, image):
if self._cnt % self.clip_frame_cnt == 0:
self.do = self._rand_range() < self.prob
self._cnt = 0 # avoiding overflow
self._cnt += 1
h, w = image.shape[:2]
if self.do:
if self.horizontal:
return HFlipTransform(w)
elif self.vertical:
return VFlipTransform(h)
else:
return NoOpTransform()
def build_augmentation(cfg, is_train):
logger = logging.getLogger(__name__)
aug_list = []
if is_train:
# Crop
if cfg.INPUT.CROP.ENABLED:
aug_list.append(T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE))
# Resize
min_size = cfg.INPUT.MIN_SIZE_TRAIN
max_size = cfg.INPUT.MAX_SIZE_TRAIN
sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
ms_clip_frame_cnt = cfg.INPUT.SAMPLING_FRAME_NUM if "by_clip" in cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING else 1
aug_list.append(ResizeShortestEdge(min_size, max_size, sample_style, clip_frame_cnt=ms_clip_frame_cnt))
# Flip
if cfg.INPUT.RANDOM_FLIP != "none":
if cfg.INPUT.RANDOM_FLIP == "flip_by_clip":
flip_clip_frame_cnt = cfg.INPUT.SAMPLING_FRAME_NUM
else:
flip_clip_frame_cnt = 1
aug_list.append(
# NOTE using RandomFlip modified for the support of flip maintenance
RandomFlip(
horizontal=(cfg.INPUT.RANDOM_FLIP == "horizontal") or (cfg.INPUT.RANDOM_FLIP == "flip_by_clip"),
vertical=cfg.INPUT.RANDOM_FLIP == "vertical",
clip_frame_cnt=flip_clip_frame_cnt,
)
)
# Additional augmentations : brightness, contrast, saturation, rotation
augmentations = cfg.INPUT.AUGMENTATIONS
if "brightness" in augmentations:
aug_list.append(T.RandomBrightness(0.9, 1.1))
if "contrast" in augmentations:
aug_list.append(T.RandomContrast(0.9, 1.1))
if "saturation" in augmentations:
aug_list.append(T.RandomSaturation(0.9, 1.1))
if "rotation" in augmentations:
aug_list.append(
T.RandomRotation(
[-15, 15], expand=False, center=[(0.4, 0.4), (0.6, 0.6)], sample_style="range"
)
)
else:
# Resize
min_size = cfg.INPUT.MIN_SIZE_TEST
max_size = cfg.INPUT.MAX_SIZE_TEST
sample_style = "choice"
aug_list.append(T.ResizeShortestEdge(min_size, max_size, sample_style))
return aug_list
| CutLER-main | videocutler/mask2former_video/data_video/augmentation.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/sukjunhwang/IFC
from .dataset_mapper import YTVISDatasetMapper, CocoClipDatasetMapper
from .build import *
from .datasets import *
from .ytvis_eval import YTVISEvaluator
| CutLER-main | videocutler/mask2former_video/data_video/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/sukjunhwang/IFC
import copy
import logging
import random
import numpy as np
from typing import List, Union
import torch
from detectron2.config import configurable
from detectron2.structures import (
BitMasks,
Boxes,
BoxMode,
Instances,
)
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
from .augmentation import build_augmentation
__all__ = ["YTVISDatasetMapper", "CocoClipDatasetMapper"]
def filter_empty_instances(instances, by_box=True, by_mask=True, box_threshold=1e-5):
"""
Filter out empty instances in an `Instances` object.
Args:
instances (Instances):
by_box (bool): whether to filter out instances with empty boxes
by_mask (bool): whether to filter out instances with empty masks
box_threshold (float): minimum width and height to be considered non-empty
Returns:
Instances: the filtered instances.
"""
assert by_box or by_mask
r = []
if by_box:
r.append(instances.gt_boxes.nonempty(threshold=box_threshold))
if instances.has("gt_masks") and by_mask:
r.append(instances.gt_masks.nonempty())
if not r:
return instances
m = r[0]
for x in r[1:]:
m = m & x
instances.gt_ids[~m] = -1
return instances
def _get_dummy_anno(num_classes):
return {
"iscrowd": 0,
"category_id": num_classes,
"id": -1,
"bbox": np.array([0, 0, 0, 0]),
"bbox_mode": BoxMode.XYXY_ABS,
"segmentation": [np.array([0.0] * 6)]
}
def ytvis_annotations_to_instances(annos, image_size):
"""
Create an :class:`Instances` object used by the models,
from instance annotations in the dataset dict.
Args:
annos (list[dict]): a list of instance annotations in one image, each
element for one instance.
image_size (tuple): height, width
Returns:
Instances:
It will contain fields "gt_boxes", "gt_classes", "gt_ids",
"gt_masks", if they can be obtained from `annos`.
This is the format that builtin models expect.
"""
boxes = [BoxMode.convert(obj["bbox"], obj["bbox_mode"], BoxMode.XYXY_ABS) for obj in annos]
target = Instances(image_size)
target.gt_boxes = Boxes(boxes)
classes = [int(obj["category_id"]) for obj in annos]
classes = torch.tensor(classes, dtype=torch.int64)
target.gt_classes = classes
ids = [int(obj["id"]) for obj in annos]
ids = torch.tensor(ids, dtype=torch.int64)
target.gt_ids = ids
if len(annos) and "segmentation" in annos[0]:
segms = [obj["segmentation"] for obj in annos]
masks = []
for segm in segms:
assert segm.ndim == 2, "Expect segmentation of 2 dimensions, got {}.".format(
segm.ndim
)
# mask array
masks.append(segm)
# torch.from_numpy does not support array with negative stride.
masks = BitMasks(
torch.stack([torch.from_numpy(np.ascontiguousarray(x)) for x in masks])
)
target.gt_masks = masks
return target
class YTVISDatasetMapper:
"""
A callable which takes a dataset dict in YouTube-VIS Dataset format,
and map it into a format used by the model.
"""
@configurable
def __init__(
self,
is_train: bool,
*,
augmentations: List[Union[T.Augmentation, T.Transform]],
image_format: str,
use_instance_mask: bool = False,
sampling_frame_num: int = 2,
sampling_frame_range: int = 5,
sampling_frame_shuffle: bool = False,
num_classes: int = 40,
):
"""
NOTE: this interface is experimental.
Args:
is_train: whether it's used in training or inference
augmentations: a list of augmentations or deterministic transforms to apply
image_format: an image format supported by :func:`detection_utils.read_image`.
use_instance_mask: whether to process instance segmentation annotations, if available
"""
# fmt: off
self.is_train = is_train
self.augmentations = T.AugmentationList(augmentations)
self.image_format = image_format
self.use_instance_mask = use_instance_mask
self.sampling_frame_num = sampling_frame_num
self.sampling_frame_range = sampling_frame_range
self.sampling_frame_shuffle = sampling_frame_shuffle
self.num_classes = num_classes
# fmt: on
logger = logging.getLogger(__name__)
mode = "training" if is_train else "inference"
logger.info(f"[DatasetMapper] Augmentations used in {mode}: {augmentations}")
@classmethod
def from_config(cls, cfg, is_train: bool = True):
augs = build_augmentation(cfg, is_train)
sampling_frame_num = cfg.INPUT.SAMPLING_FRAME_NUM
sampling_frame_range = cfg.INPUT.SAMPLING_FRAME_RANGE
sampling_frame_shuffle = cfg.INPUT.SAMPLING_FRAME_SHUFFLE
ret = {
"is_train": is_train,
"augmentations": augs,
"image_format": cfg.INPUT.FORMAT,
"use_instance_mask": cfg.MODEL.MASK_ON,
"sampling_frame_num": sampling_frame_num,
"sampling_frame_range": sampling_frame_range,
"sampling_frame_shuffle": sampling_frame_shuffle,
"num_classes": cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
}
return ret
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one video, in YTVIS Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
# TODO consider examining below deepcopy as it costs huge amount of computations.
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
video_length = dataset_dict["length"]
if self.is_train:
ref_frame = random.randrange(video_length)
start_idx = max(0, ref_frame-self.sampling_frame_range)
end_idx = min(video_length, ref_frame+self.sampling_frame_range + 1)
selected_idx = np.random.choice(
np.array(list(range(start_idx, ref_frame)) + list(range(ref_frame+1, end_idx))),
self.sampling_frame_num - 1,
)
selected_idx = selected_idx.tolist() + [ref_frame]
selected_idx = sorted(selected_idx)
if self.sampling_frame_shuffle:
random.shuffle(selected_idx)
else:
selected_idx = range(video_length)
video_annos = dataset_dict.pop("annotations", None)
file_names = dataset_dict.pop("file_names", None)
if self.is_train:
_ids = set()
for frame_idx in selected_idx:
_ids.update([anno["id"] for anno in video_annos[frame_idx]])
ids = dict()
for i, _id in enumerate(_ids):
ids[_id] = i
dataset_dict["image"] = []
dataset_dict["instances"] = []
dataset_dict["file_names"] = []
for frame_idx in selected_idx:
dataset_dict["file_names"].append(file_names[frame_idx])
# Read image
image = utils.read_image(file_names[frame_idx], format=self.image_format)
utils.check_image_size(dataset_dict, image)
aug_input = T.AugInput(image)
transforms = self.augmentations(aug_input)
image = aug_input.image
image_shape = image.shape[:2] # h, w
# Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
# but not efficient on large generic data structures due to the use of pickle & mp.Queue.
# Therefore it's important to use torch.Tensor.
dataset_dict["image"].append(torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1))))
if (video_annos is None) or (not self.is_train):
continue
# NOTE copy() is to prevent annotations getting changed from applying augmentations
_frame_annos = []
for anno in video_annos[frame_idx]:
_anno = {}
for k, v in anno.items():
_anno[k] = copy.deepcopy(v)
_frame_annos.append(_anno)
# USER: Implement additional transformations if you have other types of data
annos = [
utils.transform_instance_annotations(obj, transforms, image_shape)
for obj in _frame_annos
if obj.get("iscrowd", 0) == 0
]
sorted_annos = [_get_dummy_anno(self.num_classes) for _ in range(len(ids))]
for _anno in annos:
idx = ids[_anno["id"]]
sorted_annos[idx] = _anno
_gt_ids = [_anno["id"] for _anno in sorted_annos]
instances = utils.annotations_to_instances(sorted_annos, image_shape, mask_format="bitmask")
instances.gt_ids = torch.tensor(_gt_ids)
if instances.has("gt_masks"):
instances.gt_boxes = instances.gt_masks.get_bounding_boxes()
instances = filter_empty_instances(instances)
else:
instances.gt_masks = BitMasks(torch.empty((0, *image_shape)))
dataset_dict["instances"].append(instances)
return dataset_dict
class CocoClipDatasetMapper:
"""
A callable which takes a COCO image which converts into multiple frames,
and map it into a format used by the model.
"""
@configurable
def __init__(
self,
is_train: bool,
*,
augmentations: List[Union[T.Augmentation, T.Transform]],
image_format: str,
use_instance_mask: bool = False,
sampling_frame_num: int = 2,
):
"""
NOTE: this interface is experimental.
Args:
is_train: whether it's used in training or inference
augmentations: a list of augmentations or deterministic transforms to apply
image_format: an image format supported by :func:`detection_utils.read_image`.
use_instance_mask: whether to process instance segmentation annotations, if available
"""
# fmt: off
self.is_train = is_train
self.augmentations = T.AugmentationList(augmentations)
self.image_format = image_format
self.use_instance_mask = use_instance_mask
self.sampling_frame_num = sampling_frame_num
# fmt: on
logger = logging.getLogger(__name__)
mode = "training" if is_train else "inference"
logger.info(f"[DatasetMapper] Augmentations used in {mode}: {augmentations}")
@classmethod
def from_config(cls, cfg, is_train: bool = True):
augs = build_augmentation(cfg, is_train)
sampling_frame_num = cfg.INPUT.SAMPLING_FRAME_NUM
ret = {
"is_train": is_train,
"augmentations": augs,
"image_format": cfg.INPUT.FORMAT,
"use_instance_mask": cfg.MODEL.MASK_ON,
"sampling_frame_num": sampling_frame_num,
}
return ret
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
img_annos = dataset_dict.pop("annotations", None)
file_name = dataset_dict.pop("file_name", None)
original_image = utils.read_image(file_name, format=self.image_format)
dataset_dict["image"] = []
dataset_dict["instances"] = []
dataset_dict["file_names"] = [file_name] * self.sampling_frame_num
for _ in range(self.sampling_frame_num):
utils.check_image_size(dataset_dict, original_image)
aug_input = T.AugInput(original_image)
transforms = self.augmentations(aug_input)
image = aug_input.image
image_shape = image.shape[:2] # h, w
# Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
# but not efficient on large generic data structures due to the use of pickle & mp.Queue.
# Therefore it's important to use torch.Tensor.
dataset_dict["image"].append(torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1))))
if (img_annos is None) or (not self.is_train):
continue
_img_annos = []
for anno in img_annos:
_anno = {}
for k, v in anno.items():
_anno[k] = copy.deepcopy(v)
_img_annos.append(_anno)
# USER: Implement additional transformations if you have other types of data
annos = [
utils.transform_instance_annotations(obj, transforms, image_shape)
for obj in _img_annos
if obj.get("iscrowd", 0) == 0
]
_gt_ids = list(range(len(annos)))
for idx in range(len(annos)):
if len(annos[idx]["segmentation"]) == 0:
annos[idx]["segmentation"] = [np.array([0.0] * 6)]
instances = utils.annotations_to_instances(annos, image_shape, mask_format="bitmask")
instances.gt_ids = torch.tensor(_gt_ids)
if instances.has("gt_masks"):
instances.gt_boxes = instances.gt_masks.get_bounding_boxes()
instances = filter_empty_instances(instances)
else:
instances.gt_masks = BitMasks(torch.empty((0, *image_shape)))
dataset_dict["instances"].append(instances)
return dataset_dict
| CutLER-main | videocutler/mask2former_video/data_video/dataset_mapper.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Modified by XuDong Wang from https://github.com/facebookresearch/Mask2Former/tree/main/mask2former_video
from . import builtin # ensure the builtin datasets are registered
__all__ = [k for k in globals().keys() if "builtin" not in k and not k.startswith("_")]
| CutLER-main | videocutler/mask2former_video/data_video/datasets/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Modified by XuDong Wang from https://github.com/facebookresearch/Mask2Former/tree/main/mask2former_video
import os
from .ytvis import (
register_ytvis_instances,
_get_ytvis_2019_instances_meta,
_get_ytvis_2021_instances_meta,
_get_imagenet_cls_agnostic_instances_meta,
)
# ==== Predefined splits for YTVIS 2019 ===========
_PREDEFINED_SPLITS_YTVIS_2019 = {
"ytvis_2019_train": ("ytvis_2019/train/JPEGImages",
"ytvis_2019/train.json"),
"ytvis_2019_val": ("ytvis_2019/valid/JPEGImages",
"ytvis_2019/valid.json"),
"ytvis_2019_test": ("ytvis_2019/test/JPEGImages",
"ytvis_2019/test.json"),
"ytvis_2019_train_5perc": ("ytvis_2019/train/JPEGImages",
"ytvis_2019/train_5percent.json"),
"ytvis_2019_train_10perc": ("ytvis_2019/train/JPEGImages",
"ytvis_2019/train_10percent.json"),
"ytvis_2019_train_20perc": ("ytvis_2019/train/JPEGImages",
"ytvis_2019/train_20percent.json"),
"ytvis_2019_train_30perc": ("ytvis_2019/train/JPEGImages",
"ytvis_2019/train_30percent.json"),
"ytvis_2019_train_40perc": ("ytvis_2019/train/JPEGImages",
"ytvis_2019/train_40percent.json"),
"ytvis_2019_train_50perc": ("ytvis_2019/train/JPEGImages",
"ytvis_2019/train_50percent.json"),
}
# ==== Predefined splits for YTVIS 2021 ===========
_PREDEFINED_SPLITS_YTVIS_2021 = {
"ytvis_2021_train": ("ytvis_2021/train/JPEGImages",
"ytvis_2021/train.json"),
"ytvis_2021_val": ("ytvis_2021/valid/JPEGImages",
"ytvis_2021/valid.json"),
"ytvis_2021_test": ("ytvis_2021/test/JPEGImages",
"ytvis_2021/test.json"),
"ytvis_2021_minus_2019_train": ("ytvis_2021/train/JPEGImages",
"ytvis_2021/instances_val_sub.json"),
}
_PREDEFINED_SPLITS_ImageNet_CLS_AGNOSTIC = {
"imagenet_video_train_cls_agnostic": ("imagenet/train",
"imagenet/annotations/video_imagenet_train_fixsize480_tau0.15_N3.json"),
}
def register_all_ytvis_2019(root):
for key, (image_root, json_file) in _PREDEFINED_SPLITS_YTVIS_2019.items():
# Assume pre-defined datasets live in `./datasets`.
register_ytvis_instances(
key,
_get_ytvis_2019_instances_meta(),
os.path.join(root, json_file) if "://" not in json_file else json_file,
os.path.join(root, image_root),
)
def register_all_ytvis_2021(root):
for key, (image_root, json_file) in _PREDEFINED_SPLITS_YTVIS_2021.items():
# Assume pre-defined datasets live in `./datasets`.
register_ytvis_instances(
key,
_get_ytvis_2021_instances_meta(),
os.path.join(root, json_file) if "://" not in json_file else json_file,
os.path.join(root, image_root),
)
def register_all_imagenet_cls_agnostic(root):
for key, (image_root, json_file) in _PREDEFINED_SPLITS_ImageNet_CLS_AGNOSTIC.items():
# Assume pre-defined datasets live in `./datasets`.
register_ytvis_instances(
key,
_get_imagenet_cls_agnostic_instances_meta(),
os.path.join(root, json_file) if "://" not in json_file else json_file,
os.path.join(root, image_root),
)
if __name__.endswith(".builtin"):
# Assume pre-defined datasets live in `./datasets`.
_root = os.getenv("DETECTRON2_DATASETS", "datasets")
register_all_ytvis_2019(_root)
register_all_ytvis_2021(_root)
register_all_imagenet_cls_agnostic(_root) | CutLER-main | videocutler/mask2former_video/data_video/datasets/builtin.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Modified by XuDong Wang from https://github.com/facebookresearch/Mask2Former/tree/main/mask2former_video
import contextlib
import io
import json
import logging
import numpy as np
import os
import pycocotools.mask as mask_util
from fvcore.common.file_io import PathManager
from fvcore.common.timer import Timer
from detectron2.structures import Boxes, BoxMode, PolygonMasks
from detectron2.data import DatasetCatalog, MetadataCatalog
"""
This file contains functions to parse YTVIS dataset of
COCO-format annotations into dicts in "Detectron2 format".
"""
logger = logging.getLogger(__name__)
__all__ = ["load_ytvis_json", "register_ytvis_instances"]
YTVIS_CATEGORIES_2019 = [
{"color": [220, 20, 60], "isthing": 1, "id": 1, "name": "person"},
{"color": [0, 82, 0], "isthing": 1, "id": 2, "name": "giant_panda"},
{"color": [119, 11, 32], "isthing": 1, "id": 3, "name": "lizard"},
{"color": [165, 42, 42], "isthing": 1, "id": 4, "name": "parrot"},
{"color": [134, 134, 103], "isthing": 1, "id": 5, "name": "skateboard"},
{"color": [0, 0, 142], "isthing": 1, "id": 6, "name": "sedan"},
{"color": [255, 109, 65], "isthing": 1, "id": 7, "name": "ape"},
{"color": [0, 226, 252], "isthing": 1, "id": 8, "name": "dog"},
{"color": [5, 121, 0], "isthing": 1, "id": 9, "name": "snake"},
{"color": [0, 60, 100], "isthing": 1, "id": 10, "name": "monkey"},
{"color": [250, 170, 30], "isthing": 1, "id": 11, "name": "hand"},
{"color": [100, 170, 30], "isthing": 1, "id": 12, "name": "rabbit"},
{"color": [179, 0, 194], "isthing": 1, "id": 13, "name": "duck"},
{"color": [255, 77, 255], "isthing": 1, "id": 14, "name": "cat"},
{"color": [120, 166, 157], "isthing": 1, "id": 15, "name": "cow"},
{"color": [73, 77, 174], "isthing": 1, "id": 16, "name": "fish"},
{"color": [0, 80, 100], "isthing": 1, "id": 17, "name": "train"},
{"color": [182, 182, 255], "isthing": 1, "id": 18, "name": "horse"},
{"color": [0, 143, 149], "isthing": 1, "id": 19, "name": "turtle"},
{"color": [174, 57, 255], "isthing": 1, "id": 20, "name": "bear"},
{"color": [0, 0, 230], "isthing": 1, "id": 21, "name": "motorbike"},
{"color": [72, 0, 118], "isthing": 1, "id": 22, "name": "giraffe"},
{"color": [255, 179, 240], "isthing": 1, "id": 23, "name": "leopard"},
{"color": [0, 125, 92], "isthing": 1, "id": 24, "name": "fox"},
{"color": [209, 0, 151], "isthing": 1, "id": 25, "name": "deer"},
{"color": [188, 208, 182], "isthing": 1, "id": 26, "name": "owl"},
{"color": [145, 148, 174], "isthing": 1, "id": 27, "name": "surfboard"},
{"color": [106, 0, 228], "isthing": 1, "id": 28, "name": "airplane"},
{"color": [0, 0, 70], "isthing": 1, "id": 29, "name": "truck"},
{"color": [199, 100, 0], "isthing": 1, "id": 30, "name": "zebra"},
{"color": [166, 196, 102], "isthing": 1, "id": 31, "name": "tiger"},
{"color": [110, 76, 0], "isthing": 1, "id": 32, "name": "elephant"},
{"color": [133, 129, 255], "isthing": 1, "id": 33, "name": "snowboard"},
{"color": [0, 0, 192], "isthing": 1, "id": 34, "name": "boat"},
{"color": [183, 130, 88], "isthing": 1, "id": 35, "name": "shark"},
{"color": [130, 114, 135], "isthing": 1, "id": 36, "name": "mouse"},
{"color": [107, 142, 35], "isthing": 1, "id": 37, "name": "frog"},
{"color": [0, 228, 0], "isthing": 1, "id": 38, "name": "eagle"},
{"color": [174, 255, 243], "isthing": 1, "id": 39, "name": "earless_seal"},
{"color": [255, 208, 186], "isthing": 1, "id": 40, "name": "tennis_racket"},
]
IMAGENET_CATEGORIES_cls_agnostic = [
# {"color": [220, 20, 60], "isthing": 1, "id": 1, "name": "fg"},
# {"color": [120, 166, 157], "isthing": 1, "id": 1, "name": "fg"},
{"color": [73, 77, 174], "isthing": 1, "id": 1, "name": "fg"},
# {"color": [199, 100, 0], "isthing": 1, "id": 2, "name": "fg"},
]
YTVIS_CATEGORIES_2021 = [
{"color": [106, 0, 228], "isthing": 1, "id": 1, "name": "airplane"},
{"color": [174, 57, 255], "isthing": 1, "id": 2, "name": "bear"},
{"color": [255, 109, 65], "isthing": 1, "id": 3, "name": "bird"},
{"color": [0, 0, 192], "isthing": 1, "id": 4, "name": "boat"},
{"color": [0, 0, 142], "isthing": 1, "id": 5, "name": "car"},
{"color": [255, 77, 255], "isthing": 1, "id": 6, "name": "cat"},
{"color": [120, 166, 157], "isthing": 1, "id": 7, "name": "cow"},
{"color": [209, 0, 151], "isthing": 1, "id": 8, "name": "deer"},
{"color": [0, 226, 252], "isthing": 1, "id": 9, "name": "dog"},
{"color": [179, 0, 194], "isthing": 1, "id": 10, "name": "duck"},
{"color": [174, 255, 243], "isthing": 1, "id": 11, "name": "earless_seal"},
{"color": [110, 76, 0], "isthing": 1, "id": 12, "name": "elephant"},
{"color": [73, 77, 174], "isthing": 1, "id": 13, "name": "fish"},
{"color": [250, 170, 30], "isthing": 1, "id": 14, "name": "flying_disc"},
{"color": [0, 125, 92], "isthing": 1, "id": 15, "name": "fox"},
{"color": [107, 142, 35], "isthing": 1, "id": 16, "name": "frog"},
{"color": [0, 82, 0], "isthing": 1, "id": 17, "name": "giant_panda"},
{"color": [72, 0, 118], "isthing": 1, "id": 18, "name": "giraffe"},
{"color": [182, 182, 255], "isthing": 1, "id": 19, "name": "horse"},
{"color": [255, 179, 240], "isthing": 1, "id": 20, "name": "leopard"},
{"color": [119, 11, 32], "isthing": 1, "id": 21, "name": "lizard"},
{"color": [0, 60, 100], "isthing": 1, "id": 22, "name": "monkey"},
{"color": [0, 0, 230], "isthing": 1, "id": 23, "name": "motorbike"},
{"color": [130, 114, 135], "isthing": 1, "id": 24, "name": "mouse"},
{"color": [165, 42, 42], "isthing": 1, "id": 25, "name": "parrot"},
{"color": [220, 20, 60], "isthing": 1, "id": 26, "name": "person"},
{"color": [100, 170, 30], "isthing": 1, "id": 27, "name": "rabbit"},
{"color": [183, 130, 88], "isthing": 1, "id": 28, "name": "shark"},
{"color": [134, 134, 103], "isthing": 1, "id": 29, "name": "skateboard"},
{"color": [5, 121, 0], "isthing": 1, "id": 30, "name": "snake"},
{"color": [133, 129, 255], "isthing": 1, "id": 31, "name": "snowboard"},
{"color": [188, 208, 182], "isthing": 1, "id": 32, "name": "squirrel"},
{"color": [145, 148, 174], "isthing": 1, "id": 33, "name": "surfboard"},
{"color": [255, 208, 186], "isthing": 1, "id": 34, "name": "tennis_racket"},
{"color": [166, 196, 102], "isthing": 1, "id": 35, "name": "tiger"},
{"color": [0, 80, 100], "isthing": 1, "id": 36, "name": "train"},
{"color": [0, 0, 70], "isthing": 1, "id": 37, "name": "truck"},
{"color": [0, 143, 149], "isthing": 1, "id": 38, "name": "turtle"},
{"color": [0, 228, 0], "isthing": 1, "id": 39, "name": "whale"},
{"color": [199, 100, 0], "isthing": 1, "id": 40, "name": "zebra"},
]
def _get_ytvis_2019_instances_meta():
thing_ids = [k["id"] for k in YTVIS_CATEGORIES_2019 if k["isthing"] == 1]
thing_colors = [k["color"] for k in YTVIS_CATEGORIES_2019 if k["isthing"] == 1]
assert len(thing_ids) == 40, len(thing_ids)
# Mapping from the incontiguous YTVIS category id to an id in [0, 39]
thing_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(thing_ids)}
thing_classes = [k["name"] for k in YTVIS_CATEGORIES_2019 if k["isthing"] == 1]
ret = {
"thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id,
"thing_classes": thing_classes,
"thing_colors": thing_colors,
}
return ret
def _get_ytvis_2021_instances_meta():
thing_ids = [k["id"] for k in YTVIS_CATEGORIES_2021 if k["isthing"] == 1]
thing_colors = [k["color"] for k in YTVIS_CATEGORIES_2021 if k["isthing"] == 1]
assert len(thing_ids) == 40, len(thing_ids)
# Mapping from the incontiguous YTVIS category id to an id in [0, 39]
thing_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(thing_ids)}
thing_classes = [k["name"] for k in YTVIS_CATEGORIES_2021 if k["isthing"] == 1]
ret = {
"thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id,
"thing_classes": thing_classes,
"thing_colors": thing_colors,
}
return ret
def _get_imagenet_cls_agnostic_instances_meta():
thing_ids = [k["id"] for k in IMAGENET_CATEGORIES_cls_agnostic if k["isthing"] == 1]
thing_colors = [k["color"] for k in IMAGENET_CATEGORIES_cls_agnostic if k["isthing"] == 1]
assert len(thing_ids) == 1, len(thing_ids)
# Mapping from the incontiguous YTVIS category id to an id in [0, 39]
thing_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(thing_ids)}
thing_classes = [k["name"] for k in IMAGENET_CATEGORIES_cls_agnostic if k["isthing"] == 1]
ret = {
"thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id,
"thing_classes": thing_classes,
"thing_colors": thing_colors,
}
return ret
def load_ytvis_json(json_file, image_root, dataset_name=None, extra_annotation_keys=None):
from .ytvis_api.ytvos import YTVOS
timer = Timer()
json_file = PathManager.get_local_path(json_file)
with contextlib.redirect_stdout(io.StringIO()):
ytvis_api = YTVOS(json_file)
if timer.seconds() > 1:
logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds()))
id_map = None
if dataset_name is not None:
meta = MetadataCatalog.get(dataset_name)
cat_ids = sorted(ytvis_api.getCatIds())
cats = ytvis_api.loadCats(cat_ids)
# The categories in a custom json file may not be sorted.
thing_classes = [c["name"] for c in sorted(cats, key=lambda x: x["id"])]
meta.thing_classes = thing_classes
# In COCO, certain category ids are artificially removed,
# and by convention they are always ignored.
# We deal with COCO's id issue and translate
# the category ids to contiguous ids in [0, 80).
# It works by looking at the "categories" field in the json, therefore
# if users' own json also have incontiguous ids, we'll
# apply this mapping as well but print a warning.
if not (min(cat_ids) == 1 and max(cat_ids) == len(cat_ids)):
if "coco" not in dataset_name:
logger.warning(
"""
Category ids in annotations are not in [1, #categories]! We'll apply a mapping for you.
"""
)
id_map = {v: i for i, v in enumerate(cat_ids)}
meta.thing_dataset_id_to_contiguous_id = id_map
# sort indices for reproducible results
vid_ids = sorted(ytvis_api.vids.keys())
# vids is a list of dicts, each looks something like:
# {'license': 1,
# 'flickr_url': ' ',
# 'file_names': ['ff25f55852/00000.jpg', 'ff25f55852/00005.jpg', ..., 'ff25f55852/00175.jpg'],
# 'height': 720,
# 'width': 1280,
# 'length': 36,
# 'date_captured': '2019-04-11 00:55:41.903902',
# 'id': 2232}
vids = ytvis_api.loadVids(vid_ids)
anns = [ytvis_api.vidToAnns[vid_id] for vid_id in vid_ids]
total_num_valid_anns = sum([len(x) for x in anns])
total_num_anns = len(ytvis_api.anns)
if total_num_valid_anns < total_num_anns:
logger.warning(
f"{json_file} contains {total_num_anns} annotations, but only "
f"{total_num_valid_anns} of them match to images in the file."
)
vids_anns = list(zip(vids, anns))
logger.info("Loaded {} videos in YTVIS format from {}".format(len(vids_anns), json_file))
dataset_dicts = []
ann_keys = ["iscrowd", "category_id", "id"] + (extra_annotation_keys or [])
num_instances_without_valid_segmentation = 0
for (vid_dict, anno_dict_list) in vids_anns:
record = {}
record["file_names"] = [os.path.join(image_root, vid_dict["file_names"][i]) for i in range(vid_dict["length"])]
record["height"] = vid_dict["height"]
record["width"] = vid_dict["width"]
record["length"] = vid_dict["length"]
video_id = record["video_id"] = vid_dict["id"]
video_objs = []
for frame_idx in range(record["length"]):
frame_objs = []
for anno in anno_dict_list:
assert anno["video_id"] == video_id
obj = {key: anno[key] for key in ann_keys if key in anno}
_bboxes = anno.get("bboxes", None)
_segm = anno.get("segmentations", None)
if not (_bboxes and _segm and _bboxes[frame_idx] and _segm[frame_idx]):
continue
bbox = _bboxes[frame_idx]
segm = _segm[frame_idx]
obj["bbox"] = bbox
obj["bbox_mode"] = BoxMode.XYWH_ABS
if isinstance(segm, dict):
if isinstance(segm["counts"], list):
# convert to compressed RLE
segm = mask_util.frPyObjects(segm, *segm["size"])
elif segm:
# filter out invalid polygons (< 3 points)
segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6]
if len(segm) == 0:
num_instances_without_valid_segmentation += 1
continue # ignore this instance
obj["segmentation"] = segm
if id_map:
obj["category_id"] = id_map[obj["category_id"]]
frame_objs.append(obj)
video_objs.append(frame_objs)
record["annotations"] = video_objs
dataset_dicts.append(record)
if num_instances_without_valid_segmentation > 0:
logger.warning(
"Filtered out {} instances without valid segmentation. ".format(
num_instances_without_valid_segmentation
)
+ "There might be issues in your dataset generation process. "
"A valid polygon should be a list[float] with even length >= 6."
)
return dataset_dicts
def register_ytvis_instances(name, metadata, json_file, image_root):
"""
Register a dataset in YTVIS's json annotation format for
instance tracking.
Args:
name (str): the name that identifies a dataset, e.g. "ytvis_train".
metadata (dict): extra metadata associated with this dataset. You can
leave it as an empty dict.
json_file (str): path to the json instance annotation file.
image_root (str or path-like): directory which contains all the images.
"""
assert isinstance(name, str), name
assert isinstance(json_file, (str, os.PathLike)), json_file
assert isinstance(image_root, (str, os.PathLike)), image_root
# 1. register a function which returns dicts
DatasetCatalog.register(name, lambda: load_ytvis_json(json_file, image_root, name))
# 2. Optionally, add metadata about this dataset,
# since they might be useful in evaluation, visualization or logging
MetadataCatalog.get(name).set(
json_file=json_file, image_root=image_root, evaluator_type="ytvis", **metadata
)
if __name__ == "__main__":
"""
Test the YTVIS json dataset loader.
"""
from detectron2.utils.logger import setup_logger
from detectron2.utils.visualizer import Visualizer
import detectron2.data.datasets # noqa # add pre-defined metadata
import sys
from PIL import Image
logger = setup_logger(name=__name__)
#assert sys.argv[3] in DatasetCatalog.list()
meta = MetadataCatalog.get("ytvis_2019_train")
json_file = "./datasets/ytvis/instances_train_sub.json"
image_root = "./datasets/ytvis/train/JPEGImages"
dicts = load_ytvis_json(json_file, image_root, dataset_name="ytvis_2019_train")
logger.info("Done loading {} samples.".format(len(dicts)))
dirname = "ytvis-data-vis"
os.makedirs(dirname, exist_ok=True)
def extract_frame_dic(dic, frame_idx):
import copy
frame_dic = copy.deepcopy(dic)
annos = frame_dic.get("annotations", None)
if annos:
frame_dic["annotations"] = annos[frame_idx]
return frame_dic
for d in dicts:
vid_name = d["file_names"][0].split('/')[-2]
os.makedirs(os.path.join(dirname, vid_name), exist_ok=True)
for idx, file_name in enumerate(d["file_names"]):
img = np.array(Image.open(file_name))
visualizer = Visualizer(img, metadata=meta)
vis = visualizer.draw_dataset_dict(extract_frame_dic(d, idx))
fpath = os.path.join(dirname, vid_name, file_name.split('/')[-1])
vis.save(fpath)
| CutLER-main | videocutler/mask2former_video/data_video/datasets/ytvis.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/youtubevos/cocoapi
__author__ = 'ychfan'
import numpy as np
import datetime
import time
from collections import defaultdict
from pycocotools import mask as maskUtils
import copy
class YTVOSeval:
# Interface for evaluating video instance segmentation on the YouTubeVIS dataset.
#
# The usage for YTVOSeval is as follows:
# cocoGt=..., cocoDt=... # load dataset and results
# E = YTVOSeval(cocoGt,cocoDt); # initialize YTVOSeval object
# E.params.recThrs = ...; # set parameters as desired
# E.evaluate(); # run per image evaluation
# E.accumulate(); # accumulate per image results
# E.summarize(); # display summary metrics of results
# For example usage see evalDemo.m and http://mscoco.org/.
#
# The evaluation parameters are as follows (defaults in brackets):
# imgIds - [all] N img ids to use for evaluation
# catIds - [all] K cat ids to use for evaluation
# iouThrs - [.5:.05:.95] T=10 IoU thresholds for evaluation
# recThrs - [0:.01:1] R=101 recall thresholds for evaluation
# areaRng - [...] A=4 object area ranges for evaluation
# maxDets - [1 10 100] M=3 thresholds on max detections per image
# iouType - ['segm'] set iouType to 'segm', 'bbox' or 'keypoints'
# iouType replaced the now DEPRECATED useSegm parameter.
# useCats - [1] if true use category labels for evaluation
# Note: if useCats=0 category labels are ignored as in proposal scoring.
# Note: multiple areaRngs [Ax2] and maxDets [Mx1] can be specified.
#
# evaluate(): evaluates detections on every image and every category and
# concats the results into the "evalImgs" with fields:
# dtIds - [1xD] id for each of the D detections (dt)
# gtIds - [1xG] id for each of the G ground truths (gt)
# dtMatches - [TxD] matching gt id at each IoU or 0
# gtMatches - [TxG] matching dt id at each IoU or 0
# dtScores - [1xD] confidence of each dt
# gtIgnore - [1xG] ignore flag for each gt
# dtIgnore - [TxD] ignore flag for each dt at each IoU
#
# accumulate(): accumulates the per-image, per-category evaluation
# results in "evalImgs" into the dictionary "eval" with fields:
# params - parameters used for evaluation
# date - date evaluation was performed
# counts - [T,R,K,A,M] parameter dimensions (see above)
# precision - [TxRxKxAxM] precision for every evaluation setting
# recall - [TxKxAxM] max recall for every evaluation setting
# Note: precision and recall==-1 for settings with no gt objects.
#
# See also coco, mask, pycocoDemo, pycocoEvalDemo
#
# Microsoft COCO Toolbox. version 2.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2015.
# Licensed under the Simplified BSD License [see coco/license.txt]
def __init__(self, cocoGt=None, cocoDt=None, iouType='segm'):
'''
Initialize CocoEval using coco APIs for gt and dt
:param cocoGt: coco object with ground truth annotations
:param cocoDt: coco object with detection results
:return: None
'''
if not iouType:
print('iouType not specified. use default iouType segm')
self.cocoGt = cocoGt # ground truth COCO API
self.cocoDt = cocoDt # detections COCO API
self.params = {} # evaluation parameters
self.evalVids = defaultdict(list) # per-image per-category evaluation results [KxAxI] elements
self.eval = {} # accumulated evaluation results
self._gts = defaultdict(list) # gt for evaluation
self._dts = defaultdict(list) # dt for evaluation
self.params = Params(iouType=iouType) # parameters
self._paramsEval = {} # parameters for evaluation
self.stats = [] # result summarization
self.ious = {} # ious between all gts and dts
if not cocoGt is None:
self.params.vidIds = sorted(cocoGt.getVidIds())
self.params.catIds = sorted(cocoGt.getCatIds())
def _prepare(self):
'''
Prepare ._gts and ._dts for evaluation based on params
:return: None
'''
def _toMask(anns, coco):
# modify ann['segmentation'] by reference
for ann in anns:
for i, a in enumerate(ann['segmentations']):
if a:
rle = coco.annToRLE(ann, i)
ann['segmentations'][i] = rle
l = [a for a in ann['areas'] if a]
if len(l)==0:
ann['avg_area'] = 0
else:
ann['avg_area'] = np.array(l).mean()
p = self.params
if p.useCats:
gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(vidIds=p.vidIds, catIds=p.catIds))
dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(vidIds=p.vidIds, catIds=p.catIds))
else:
gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(vidIds=p.vidIds))
dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(vidIds=p.vidIds))
# convert ground truth to mask if iouType == 'segm'
if p.iouType == 'segm':
_toMask(gts, self.cocoGt)
_toMask(dts, self.cocoDt)
# set ignore flag
for gt in gts:
gt['ignore'] = gt['ignore'] if 'ignore' in gt else 0
gt['ignore'] = 'iscrowd' in gt and gt['iscrowd']
if p.iouType == 'keypoints':
gt['ignore'] = (gt['num_keypoints'] == 0) or gt['ignore']
self._gts = defaultdict(list) # gt for evaluation
self._dts = defaultdict(list) # dt for evaluation
for gt in gts:
self._gts[gt['video_id'], gt['category_id']].append(gt)
for dt in dts:
self._dts[dt['video_id'], dt['category_id']].append(dt)
self.evalVids = defaultdict(list) # per-image per-category evaluation results
self.eval = {} # accumulated evaluation results
def evaluate(self):
'''
Run per image evaluation on given images and store results (a list of dict) in self.evalVids
:return: None
'''
tic = time.time()
print('Running per image evaluation...')
p = self.params
# add backward compatibility if useSegm is specified in params
if not p.useSegm is None:
p.iouType = 'segm' if p.useSegm == 1 else 'bbox'
print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType))
print('Evaluate annotation type *{}*'.format(p.iouType))
p.vidIds = list(np.unique(p.vidIds))
if p.useCats:
p.catIds = list(np.unique(p.catIds))
p.maxDets = sorted(p.maxDets)
self.params=p
self._prepare()
# loop through images, area range, max detection number
catIds = p.catIds if p.useCats else [-1]
if p.iouType == 'segm' or p.iouType == 'bbox':
computeIoU = self.computeIoU
elif p.iouType == 'keypoints':
computeIoU = self.computeOks
self.ious = {(vidId, catId): computeIoU(vidId, catId) \
for vidId in p.vidIds
for catId in catIds}
evaluateVid = self.evaluateVid
maxDet = p.maxDets[-1]
self.evalImgs = [evaluateVid(vidId, catId, areaRng, maxDet)
for catId in catIds
for areaRng in p.areaRng
for vidId in p.vidIds
]
self._paramsEval = copy.deepcopy(self.params)
toc = time.time()
print('DONE (t={:0.2f}s).'.format(toc-tic))
def computeIoU(self, vidId, catId):
p = self.params
if p.useCats:
gt = self._gts[vidId,catId]
dt = self._dts[vidId,catId]
else:
gt = [_ for cId in p.catIds for _ in self._gts[vidId,cId]]
dt = [_ for cId in p.catIds for _ in self._dts[vidId,cId]]
if len(gt) == 0 and len(dt) ==0:
return []
inds = np.argsort([-d['score'] for d in dt], kind='mergesort')
dt = [dt[i] for i in inds]
if len(dt) > p.maxDets[-1]:
dt=dt[0:p.maxDets[-1]]
if p.iouType == 'segm':
g = [g['segmentations'] for g in gt]
d = [d['segmentations'] for d in dt]
elif p.iouType == 'bbox':
g = [g['bboxes'] for g in gt]
d = [d['bboxes'] for d in dt]
else:
raise Exception('unknown iouType for iou computation')
# compute iou between each dt and gt region
iscrowd = [int(o['iscrowd']) for o in gt]
#ious = maskUtils.iou(d,g,iscrowd)
def iou_seq(d_seq, g_seq):
i = .0
u = .0
for d, g in zip(d_seq, g_seq):
if d and g:
i += maskUtils.area(maskUtils.merge([d, g], True))
u += maskUtils.area(maskUtils.merge([d, g], False))
elif not d and g:
u += maskUtils.area(g)
elif d and not g:
u += maskUtils.area(d)
if not u > .0:
print("Mask sizes in video {} and category {} may not match!".format(vidId, catId))
iou = i / u if u > .0 else .0
return iou
ious = np.zeros([len(d), len(g)])
for i, j in np.ndindex(ious.shape):
ious[i, j] = iou_seq(d[i], g[j])
#print(vidId, catId, ious.shape, ious)
return ious
def computeOks(self, imgId, catId):
p = self.params
# dimention here should be Nxm
gts = self._gts[imgId, catId]
dts = self._dts[imgId, catId]
inds = np.argsort([-d['score'] for d in dts], kind='mergesort')
dts = [dts[i] for i in inds]
if len(dts) > p.maxDets[-1]:
dts = dts[0:p.maxDets[-1]]
# if len(gts) == 0 and len(dts) == 0:
if len(gts) == 0 or len(dts) == 0:
return []
ious = np.zeros((len(dts), len(gts)))
sigmas = np.array([.26, .25, .25, .35, .35, .79, .79, .72, .72, .62,.62, 1.07, 1.07, .87, .87, .89, .89])/10.0
vars = (sigmas * 2)**2
k = len(sigmas)
# compute oks between each detection and ground truth object
for j, gt in enumerate(gts):
# create bounds for ignore regions(double the gt bbox)
g = np.array(gt['keypoints'])
xg = g[0::3]; yg = g[1::3]; vg = g[2::3]
k1 = np.count_nonzero(vg > 0)
bb = gt['bbox']
x0 = bb[0] - bb[2]; x1 = bb[0] + bb[2] * 2
y0 = bb[1] - bb[3]; y1 = bb[1] + bb[3] * 2
for i, dt in enumerate(dts):
d = np.array(dt['keypoints'])
xd = d[0::3]; yd = d[1::3]
if k1>0:
# measure the per-keypoint distance if keypoints visible
dx = xd - xg
dy = yd - yg
else:
# measure minimum distance to keypoints in (x0,y0) & (x1,y1)
z = np.zeros((k))
dx = np.max((z, x0-xd),axis=0)+np.max((z, xd-x1),axis=0)
dy = np.max((z, y0-yd),axis=0)+np.max((z, yd-y1),axis=0)
e = (dx**2 + dy**2) / vars / (gt['avg_area']+np.spacing(1)) / 2
if k1 > 0:
e=e[vg > 0]
ious[i, j] = np.sum(np.exp(-e)) / e.shape[0]
return ious
def evaluateVid(self, vidId, catId, aRng, maxDet):
'''
perform evaluation for single category and image
:return: dict (single image results)
'''
p = self.params
if p.useCats:
gt = self._gts[vidId,catId]
dt = self._dts[vidId,catId]
else:
gt = [_ for cId in p.catIds for _ in self._gts[vidId,cId]]
dt = [_ for cId in p.catIds for _ in self._dts[vidId,cId]]
if len(gt) == 0 and len(dt) ==0:
return None
for g in gt:
if g['ignore'] or (g['avg_area']<aRng[0] or g['avg_area']>aRng[1]):
g['_ignore'] = 1
else:
g['_ignore'] = 0
# sort dt highest score first, sort gt ignore last
gtind = np.argsort([g['_ignore'] for g in gt], kind='mergesort')
gt = [gt[i] for i in gtind]
dtind = np.argsort([-d['score'] for d in dt], kind='mergesort')
dt = [dt[i] for i in dtind[0:maxDet]]
iscrowd = [int(o['iscrowd']) for o in gt]
# load computed ious
ious = self.ious[vidId, catId][:, gtind] if len(self.ious[vidId, catId]) > 0 else self.ious[vidId, catId]
T = len(p.iouThrs)
G = len(gt)
D = len(dt)
gtm = np.zeros((T,G))
dtm = np.zeros((T,D))
gtIg = np.array([g['_ignore'] for g in gt])
dtIg = np.zeros((T,D))
if not len(ious)==0:
for tind, t in enumerate(p.iouThrs):
for dind, d in enumerate(dt):
# information about best match so far (m=-1 -> unmatched)
iou = min([t,1-1e-10])
m = -1
for gind, g in enumerate(gt):
# if this gt already matched, and not a crowd, continue
if gtm[tind,gind]>0 and not iscrowd[gind]:
continue
# if dt matched to reg gt, and on ignore gt, stop
if m>-1 and gtIg[m]==0 and gtIg[gind]==1:
break
# continue to next gt unless better match made
if ious[dind,gind] < iou:
continue
# if match successful and best so far, store appropriately
iou=ious[dind,gind]
m=gind
# if match made store id of match for both dt and gt
if m ==-1:
continue
dtIg[tind,dind] = gtIg[m]
dtm[tind,dind] = gt[m]['id']
gtm[tind,m] = d['id']
# set unmatched detections outside of area range to ignore
a = np.array([d['avg_area']<aRng[0] or d['avg_area']>aRng[1] for d in dt]).reshape((1, len(dt)))
dtIg = np.logical_or(dtIg, np.logical_and(dtm==0, np.repeat(a,T,0)))
# store results for given image and category
return {
'video_id': vidId,
'category_id': catId,
'aRng': aRng,
'maxDet': maxDet,
'dtIds': [d['id'] for d in dt],
'gtIds': [g['id'] for g in gt],
'dtMatches': dtm,
'gtMatches': gtm,
'dtScores': [d['score'] for d in dt],
'gtIgnore': gtIg,
'dtIgnore': dtIg,
}
def accumulate(self, p = None):
'''
Accumulate per image evaluation results and store the result in self.eval
:param p: input params for evaluation
:return: None
'''
print('Accumulating evaluation results...')
tic = time.time()
if not self.evalImgs:
print('Please run evaluate() first')
# allows input customized parameters
if p is None:
p = self.params
p.catIds = p.catIds if p.useCats == 1 else [-1]
T = len(p.iouThrs)
R = len(p.recThrs)
K = len(p.catIds) if p.useCats else 1
A = len(p.areaRng)
M = len(p.maxDets)
precision = -np.ones((T,R,K,A,M)) # -1 for the precision of absent categories
recall = -np.ones((T,K,A,M))
scores = -np.ones((T,R,K,A,M))
# create dictionary for future indexing
_pe = self._paramsEval
catIds = _pe.catIds if _pe.useCats else [-1]
setK = set(catIds)
setA = set(map(tuple, _pe.areaRng))
setM = set(_pe.maxDets)
setI = set(_pe.vidIds)
# get inds to evaluate
k_list = [n for n, k in enumerate(p.catIds) if k in setK]
m_list = [m for n, m in enumerate(p.maxDets) if m in setM]
a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA]
i_list = [n for n, i in enumerate(p.vidIds) if i in setI]
I0 = len(_pe.vidIds)
A0 = len(_pe.areaRng)
# retrieve E at each category, area range, and max number of detections
for k, k0 in enumerate(k_list):
Nk = k0*A0*I0
for a, a0 in enumerate(a_list):
Na = a0*I0
for m, maxDet in enumerate(m_list):
E = [self.evalImgs[Nk + Na + i] for i in i_list]
E = [e for e in E if not e is None]
if len(E) == 0:
continue
dtScores = np.concatenate([e['dtScores'][0:maxDet] for e in E])
# different sorting method generates slightly different results.
# mergesort is used to be consistent as Matlab implementation.
inds = np.argsort(-dtScores, kind='mergesort')
dtScoresSorted = dtScores[inds]
dtm = np.concatenate([e['dtMatches'][:,0:maxDet] for e in E], axis=1)[:,inds]
dtIg = np.concatenate([e['dtIgnore'][:,0:maxDet] for e in E], axis=1)[:,inds]
gtIg = np.concatenate([e['gtIgnore'] for e in E])
npig = np.count_nonzero(gtIg==0 )
if npig == 0:
continue
tps = np.logical_and( dtm, np.logical_not(dtIg) )
fps = np.logical_and(np.logical_not(dtm), np.logical_not(dtIg) )
tp_sum = np.cumsum(tps, axis=1).astype(dtype=float)
fp_sum = np.cumsum(fps, axis=1).astype(dtype=float)
for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):
tp = np.array(tp)
fp = np.array(fp)
nd = len(tp)
rc = tp / npig
pr = tp / (fp+tp+np.spacing(1))
q = np.zeros((R,))
ss = np.zeros((R,))
if nd:
recall[t,k,a,m] = rc[-1]
else:
recall[t,k,a,m] = 0
# numpy is slow without cython optimization for accessing elements
# use python array gets significant speed improvement
pr = pr.tolist(); q = q.tolist()
for i in range(nd-1, 0, -1):
if pr[i] > pr[i-1]:
pr[i-1] = pr[i]
inds = np.searchsorted(rc, p.recThrs, side='left')
try:
for ri, pi in enumerate(inds):
q[ri] = pr[pi]
ss[ri] = dtScoresSorted[pi]
except:
pass
precision[t,:,k,a,m] = np.array(q)
scores[t,:,k,a,m] = np.array(ss)
self.eval = {
'params': p,
'counts': [T, R, K, A, M],
'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'precision': precision,
'recall': recall,
'scores': scores,
}
toc = time.time()
print('DONE (t={:0.2f}s).'.format( toc-tic))
def summarize(self):
'''
Compute and display summary metrics for evaluation results.
Note this functin can *only* be applied on the default parameter setting
'''
def _summarize( ap=1, iouThr=None, areaRng='all', maxDets=100 ):
p = self.params
iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}'
titleStr = 'Average Precision' if ap == 1 else 'Average Recall'
typeStr = '(AP)' if ap==1 else '(AR)'
iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1]) \
if iouThr is None else '{:0.2f}'.format(iouThr)
aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]
mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]
if ap == 1:
# dimension of precision: [TxRxKxAxM]
s = self.eval['precision']
# IoU
if iouThr is not None:
t = np.where(iouThr == p.iouThrs)[0]
s = s[t]
s = s[:,:,:,aind,mind]
else:
# dimension of recall: [TxKxAxM]
s = self.eval['recall']
if iouThr is not None:
t = np.where(iouThr == p.iouThrs)[0]
s = s[t]
s = s[:,:,aind,mind]
if len(s[s>-1])==0:
mean_s = -1
else:
mean_s = np.mean(s[s>-1])
print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s))
return mean_s
def _summarizeDets():
stats = np.zeros((12,))
stats[0] = _summarize(1)
stats[1] = _summarize(1, iouThr=.5, maxDets=self.params.maxDets[2])
stats[2] = _summarize(1, iouThr=.75, maxDets=self.params.maxDets[2])
stats[3] = _summarize(1, areaRng='small', maxDets=self.params.maxDets[2])
stats[4] = _summarize(1, areaRng='medium', maxDets=self.params.maxDets[2])
stats[5] = _summarize(1, areaRng='large', maxDets=self.params.maxDets[2])
stats[6] = _summarize(0, maxDets=self.params.maxDets[0])
stats[7] = _summarize(0, maxDets=self.params.maxDets[1])
stats[8] = _summarize(0, maxDets=self.params.maxDets[2])
stats[9] = _summarize(0, areaRng='small', maxDets=self.params.maxDets[2])
stats[10] = _summarize(0, areaRng='medium', maxDets=self.params.maxDets[2])
stats[11] = _summarize(0, areaRng='large', maxDets=self.params.maxDets[2])
return stats
def _summarizeKps():
stats = np.zeros((10,))
stats[0] = _summarize(1, maxDets=20)
stats[1] = _summarize(1, maxDets=20, iouThr=.5)
stats[2] = _summarize(1, maxDets=20, iouThr=.75)
stats[3] = _summarize(1, maxDets=20, areaRng='medium')
stats[4] = _summarize(1, maxDets=20, areaRng='large')
stats[5] = _summarize(0, maxDets=20)
stats[6] = _summarize(0, maxDets=20, iouThr=.5)
stats[7] = _summarize(0, maxDets=20, iouThr=.75)
stats[8] = _summarize(0, maxDets=20, areaRng='medium')
stats[9] = _summarize(0, maxDets=20, areaRng='large')
return stats
if not self.eval:
raise Exception('Please run accumulate() first')
iouType = self.params.iouType
if iouType == 'segm' or iouType == 'bbox':
summarize = _summarizeDets
elif iouType == 'keypoints':
summarize = _summarizeKps
self.stats = summarize()
def __str__(self):
self.summarize()
class Params:
'''
Params for coco evaluation api
'''
def setDetParams(self):
self.vidIds = []
self.catIds = []
# np.arange causes trouble. the data point on arange is slightly larger than the true value
#self.iouThrs = np.linspace(.5, 0.95, np.round((0.95 - .5) / .05) + 1, endpoint=True)
#self.recThrs = np.linspace(.0, 1.00, np.round((1.00 - .0) / .01) + 1, endpoint=True)
self.iouThrs = np.linspace(.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
self.recThrs = np.linspace(.0, 1.00, int(np.round((1.00 - .0) / .01)) + 1, endpoint=True)
self.maxDets = [1, 10, 100]
self.areaRng = [[0 ** 2, 1e5 ** 2], [0 ** 2, 128 ** 2], [ 128 ** 2, 256 ** 2], [256 ** 2, 1e5 ** 2]]
self.areaRngLbl = ['all', 'small', 'medium', 'large']
self.useCats = 1
def setKpParams(self):
self.vidIds = []
self.catIds = []
# np.arange causes trouble. the data point on arange is slightly larger than the true value
self.iouThrs = np.linspace(.5, 0.95, np.round((0.95 - .5) / .05) + 1, endpoint=True)
self.recThrs = np.linspace(.0, 1.00, np.round((1.00 - .0) / .01) + 1, endpoint=True)
self.maxDets = [20]
self.areaRng = [[0 ** 2, 1e5 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]]
self.areaRngLbl = ['all', 'medium', 'large']
self.useCats = 1
def __init__(self, iouType='segm'):
if iouType == 'segm' or iouType == 'bbox':
self.setDetParams()
elif iouType == 'keypoints':
self.setKpParams()
else:
raise Exception('iouType not supported')
self.iouType = iouType
# useSegm is deprecated
self.useSegm = None
| CutLER-main | videocutler/mask2former_video/data_video/datasets/ytvis_api/ytvoseval.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/youtubevos/cocoapi
| CutLER-main | videocutler/mask2former_video/data_video/datasets/ytvis_api/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/youtubevos/cocoapi
__author__ = 'ychfan'
# Interface for accessing the YouTubeVIS dataset.
# The following API functions are defined:
# YTVOS - YTVOS api class that loads YouTubeVIS annotation file and prepare data structures.
# decodeMask - Decode binary mask M encoded via run-length encoding.
# encodeMask - Encode binary mask M using run-length encoding.
# getAnnIds - Get ann ids that satisfy given filter conditions.
# getCatIds - Get cat ids that satisfy given filter conditions.
# getImgIds - Get img ids that satisfy given filter conditions.
# loadAnns - Load anns with the specified ids.
# loadCats - Load cats with the specified ids.
# loadImgs - Load imgs with the specified ids.
# annToMask - Convert segmentation in an annotation to binary mask.
# loadRes - Load algorithm results and create API for accessing them.
# Microsoft COCO Toolbox. version 2.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2014.
# Licensed under the Simplified BSD License [see bsd.txt]
import json
import time
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
import numpy as np
import copy
import itertools
from pycocotools import mask as maskUtils
import os
from collections import defaultdict
import sys
PYTHON_VERSION = sys.version_info[0]
if PYTHON_VERSION == 2:
from urllib import urlretrieve
elif PYTHON_VERSION == 3:
from urllib.request import urlretrieve
def _isArrayLike(obj):
return hasattr(obj, '__iter__') and hasattr(obj, '__len__')
class YTVOS:
def __init__(self, annotation_file=None):
"""
Constructor of Microsoft COCO helper class for reading and visualizing annotations.
:param annotation_file (str): location of annotation file
:param image_folder (str): location to the folder that hosts images.
:return:
"""
# load dataset
self.dataset,self.anns,self.cats,self.vids = dict(),dict(),dict(),dict()
self.vidToAnns, self.catToVids = defaultdict(list), defaultdict(list)
if not annotation_file == None:
print('loading annotations into memory...')
tic = time.time()
dataset = json.load(open(annotation_file, 'r'))
assert type(dataset)==dict, 'annotation file format {} not supported'.format(type(dataset))
print('Done (t={:0.2f}s)'.format(time.time()- tic))
self.dataset = dataset
self.createIndex()
def createIndex(self):
# create index
print('creating index...')
anns, cats, vids = {}, {}, {}
vidToAnns,catToVids = defaultdict(list),defaultdict(list)
if 'annotations' in self.dataset:
for ann in self.dataset['annotations']:
vidToAnns[ann['video_id']].append(ann)
anns[ann['id']] = ann
if 'videos' in self.dataset:
for vid in self.dataset['videos']:
vids[vid['id']] = vid
if 'categories' in self.dataset:
for cat in self.dataset['categories']:
cats[cat['id']] = cat
if 'annotations' in self.dataset and 'categories' in self.dataset:
for ann in self.dataset['annotations']:
catToVids[ann['category_id']].append(ann['video_id'])
print('index created!')
# create class members
self.anns = anns
self.vidToAnns = vidToAnns
self.catToVids = catToVids
self.vids = vids
self.cats = cats
def info(self):
"""
Print information about the annotation file.
:return:
"""
for key, value in self.dataset['info'].items():
print('{}: {}'.format(key, value))
def getAnnIds(self, vidIds=[], catIds=[], areaRng=[], iscrowd=None):
"""
Get ann ids that satisfy given filter conditions. default skips that filter
:param vidIds (int array) : get anns for given vids
catIds (int array) : get anns for given cats
areaRng (float array) : get anns for given area range (e.g. [0 inf])
iscrowd (boolean) : get anns for given crowd label (False or True)
:return: ids (int array) : integer array of ann ids
"""
vidIds = vidIds if _isArrayLike(vidIds) else [vidIds]
catIds = catIds if _isArrayLike(catIds) else [catIds]
if len(vidIds) == len(catIds) == len(areaRng) == 0:
anns = self.dataset['annotations']
else:
if not len(vidIds) == 0:
lists = [self.vidToAnns[vidId] for vidId in vidIds if vidId in self.vidToAnns]
anns = list(itertools.chain.from_iterable(lists))
else:
anns = self.dataset['annotations']
anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]
anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['avg_area'] > areaRng[0] and ann['avg_area'] < areaRng[1]]
if not iscrowd == None:
ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]
else:
ids = [ann['id'] for ann in anns]
return ids
def getCatIds(self, catNms=[], supNms=[], catIds=[]):
"""
filtering parameters. default skips that filter.
:param catNms (str array) : get cats for given cat names
:param supNms (str array) : get cats for given supercategory names
:param catIds (int array) : get cats for given cat ids
:return: ids (int array) : integer array of cat ids
"""
catNms = catNms if _isArrayLike(catNms) else [catNms]
supNms = supNms if _isArrayLike(supNms) else [supNms]
catIds = catIds if _isArrayLike(catIds) else [catIds]
if len(catNms) == len(supNms) == len(catIds) == 0:
cats = self.dataset['categories']
else:
cats = self.dataset['categories']
cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]
cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]
cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]
ids = [cat['id'] for cat in cats]
return ids
def getVidIds(self, vidIds=[], catIds=[]):
'''
Get vid ids that satisfy given filter conditions.
:param vidIds (int array) : get vids for given ids
:param catIds (int array) : get vids with all given cats
:return: ids (int array) : integer array of vid ids
'''
vidIds = vidIds if _isArrayLike(vidIds) else [vidIds]
catIds = catIds if _isArrayLike(catIds) else [catIds]
if len(vidIds) == len(catIds) == 0:
ids = self.vids.keys()
else:
ids = set(vidIds)
for i, catId in enumerate(catIds):
if i == 0 and len(ids) == 0:
ids = set(self.catToVids[catId])
else:
ids &= set(self.catToVids[catId])
return list(ids)
def loadAnns(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying anns
:return: anns (object array) : loaded ann objects
"""
if _isArrayLike(ids):
return [self.anns[id] for id in ids]
elif type(ids) == int:
return [self.anns[ids]]
def loadCats(self, ids=[]):
"""
Load cats with the specified ids.
:param ids (int array) : integer ids specifying cats
:return: cats (object array) : loaded cat objects
"""
if _isArrayLike(ids):
return [self.cats[id] for id in ids]
elif type(ids) == int:
return [self.cats[ids]]
def loadVids(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying vid
:return: vids (object array) : loaded vid objects
"""
if _isArrayLike(ids):
return [self.vids[id] for id in ids]
elif type(ids) == int:
return [self.vids[ids]]
def loadRes(self, resFile):
"""
Load result file and return a result api object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = YTVOS()
res.dataset['videos'] = [img for img in self.dataset['videos']]
print('Loading and preparing results...')
tic = time.time()
if type(resFile) == str or (PYTHON_VERSION == 2 and type(resFile) == unicode):
anns = json.load(open(resFile))
elif type(resFile) == np.ndarray:
anns = self.loadNumpyAnnotations(resFile)
else:
anns = resFile
assert type(anns) == list, 'results in not an array of objects'
annsVidIds = [ann['video_id'] for ann in anns]
assert set(annsVidIds) == (set(annsVidIds) & set(self.getVidIds())), \
'Results do not correspond to current coco set'
if 'segmentations' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
ann['areas'] = []
if not 'bboxes' in ann:
ann['bboxes'] = []
for seg in ann['segmentations']:
# now only support compressed RLE format as segmentation results
if seg:
ann['areas'].append(maskUtils.area(seg))
if len(ann['bboxes']) < len(ann['areas']):
ann['bboxes'].append(maskUtils.toBbox(seg))
else:
ann['areas'].append(None)
if len(ann['bboxes']) < len(ann['areas']):
ann['bboxes'].append(None)
ann['id'] = id+1
l = [a for a in ann['areas'] if a]
if len(l)==0:
ann['avg_area'] = 0
else:
ann['avg_area'] = np.array(l).mean()
ann['iscrowd'] = 0
print('DONE (t={:0.2f}s)'.format(time.time()- tic))
res.dataset['annotations'] = anns
res.createIndex()
return res
def annToRLE(self, ann, frameId):
"""
Convert annotation which can be polygons, uncompressed RLE to RLE.
:return: binary mask (numpy 2D array)
"""
t = self.vids[ann['video_id']]
h, w = t['height'], t['width']
segm = ann['segmentations'][frameId]
if type(segm) == list:
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = maskUtils.frPyObjects(segm, h, w)
rle = maskUtils.merge(rles)
elif type(segm['counts']) == list:
# uncompressed RLE
rle = maskUtils.frPyObjects(segm, h, w)
else:
# rle
rle = segm
return rle
def annToMask(self, ann, frameId):
"""
Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.
:return: binary mask (numpy 2D array)
"""
rle = self.annToRLE(ann, frameId)
m = maskUtils.decode(rle)
return m
| CutLER-main | videocutler/mask2former_video/data_video/datasets/ytvis_api/ytvos.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
from contextlib import contextmanager
from functools import wraps
import torch
from torch.cuda.amp import autocast
__all__ = ["retry_if_cuda_oom"]
@contextmanager
def _ignore_torch_cuda_oom():
"""
A context which ignores CUDA OOM exception from pytorch.
"""
try:
yield
except RuntimeError as e:
# NOTE: the string may change?
if "CUDA out of memory. " in str(e):
pass
else:
raise
def retry_if_cuda_oom(func):
"""
Makes a function retry itself after encountering
pytorch's CUDA OOM error.
It will first retry after calling `torch.cuda.empty_cache()`.
If that still fails, it will then retry by trying to convert inputs to CPUs.
In this case, it expects the function to dispatch to CPU implementation.
The return values may become CPU tensors as well and it's user's
responsibility to convert it back to CUDA tensor if needed.
Args:
func: a stateless callable that takes tensor-like objects as arguments
Returns:
a callable which retries `func` if OOM is encountered.
Examples:
::
output = retry_if_cuda_oom(some_torch_function)(input1, input2)
# output may be on CPU even if inputs are on GPU
Note:
1. When converting inputs to CPU, it will only look at each argument and check
if it has `.device` and `.to` for conversion. Nested structures of tensors
are not supported.
2. Since the function might be called more than once, it has to be
stateless.
"""
def maybe_to_cpu(x):
try:
like_gpu_tensor = x.device.type == "cuda" and hasattr(x, "to")
except AttributeError:
like_gpu_tensor = False
if like_gpu_tensor:
return x.to(device="cpu").to(torch.float32)
else:
return x
@wraps(func)
def wrapped(*args, **kwargs):
with _ignore_torch_cuda_oom():
return func(*args, **kwargs)
# Clear cache and retry
torch.cuda.empty_cache()
with _ignore_torch_cuda_oom():
return func(*args, **kwargs)
# Try on CPU. This slows down the code significantly, therefore print a notice.
logger = logging.getLogger(__name__)
logger.info("Attempting to copy inputs to CPU due to CUDA OOM")
new_args = (maybe_to_cpu(x) for x in args)
new_kwargs = {k: maybe_to_cpu(v) for k, v in kwargs.items()}
with autocast(enabled=False):
return func(*new_args, **new_kwargs)
return wrapped
| CutLER-main | videocutler/mask2former_video/utils/memory.py |
# Copyright (c) Facebook, Inc. and its affiliates.
| CutLER-main | videocutler/mask2former_video/utils/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/facebookresearch/detr/blob/master/models/matcher.py
"""
Modules to compute the matching cost and solve the corresponding LSAP.
"""
import torch
import torch.nn.functional as F
from scipy.optimize import linear_sum_assignment
from torch import nn
from torch.cuda.amp import autocast
from detectron2.projects.point_rend.point_features import point_sample
def batch_dice_loss(inputs: torch.Tensor, targets: torch.Tensor):
"""
Compute the DICE loss, similar to generalized IOU for masks
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
"""
inputs = inputs.sigmoid()
inputs = inputs.flatten(1)
numerator = 2 * torch.einsum("nc,mc->nm", inputs, targets)
denominator = inputs.sum(-1)[:, None] + targets.sum(-1)[None, :]
loss = 1 - (numerator + 1) / (denominator + 1)
return loss
batch_dice_loss_jit = torch.jit.script(
batch_dice_loss
) # type: torch.jit.ScriptModule
def batch_sigmoid_ce_loss(inputs: torch.Tensor, targets: torch.Tensor):
"""
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
Returns:
Loss tensor
"""
hw = inputs.shape[1]
pos = F.binary_cross_entropy_with_logits(
inputs, torch.ones_like(inputs), reduction="none"
)
neg = F.binary_cross_entropy_with_logits(
inputs, torch.zeros_like(inputs), reduction="none"
)
loss = torch.einsum("nc,mc->nm", pos, targets) + torch.einsum(
"nc,mc->nm", neg, (1 - targets)
)
return loss / hw
batch_sigmoid_ce_loss_jit = torch.jit.script(
batch_sigmoid_ce_loss
) # type: torch.jit.ScriptModule
class VideoHungarianMatcher(nn.Module):
"""This class computes an assignment between the targets and the predictions of the network
For efficiency reasons, the targets don't include the no_object. Because of this, in general,
there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,
while the others are un-matched (and thus treated as non-objects).
"""
def __init__(self, cost_class: float = 1, cost_mask: float = 1, cost_dice: float = 1, num_points: int = 0):
"""Creates the matcher
Params:
cost_class: This is the relative weight of the classification error in the matching cost
cost_mask: This is the relative weight of the focal loss of the binary mask in the matching cost
cost_dice: This is the relative weight of the dice loss of the binary mask in the matching cost
"""
super().__init__()
self.cost_class = cost_class
self.cost_mask = cost_mask
self.cost_dice = cost_dice
assert cost_class != 0 or cost_mask != 0 or cost_dice != 0, "all costs cant be 0"
self.num_points = num_points
@torch.no_grad()
def memory_efficient_forward(self, outputs, targets):
"""More memory-friendly matching"""
bs, num_queries = outputs["pred_logits"].shape[:2]
indices = []
# Iterate through batch size
for b in range(bs):
out_prob = outputs["pred_logits"][b].softmax(-1) # [num_queries, num_classes]
tgt_ids = targets[b]["labels"]
# Compute the classification cost. Contrary to the loss, we don't use the NLL,
# but approximate it in 1 - proba[target class].
# The 1 is a constant that doesn't change the matching, it can be ommitted.
cost_class = -out_prob[:, tgt_ids]
out_mask = outputs["pred_masks"][b] # [num_queries, T, H_pred, W_pred]
# gt masks are already padded when preparing target
tgt_mask = targets[b]["masks"].to(out_mask) # [num_gts, T, H_pred, W_pred]
# out_mask = out_mask[:, None]
# tgt_mask = tgt_mask[:, None]
# all masks share the same set of points for efficient matching!
point_coords = torch.rand(1, self.num_points, 2, device=out_mask.device)
# get gt labels
tgt_mask = point_sample(
tgt_mask,
point_coords.repeat(tgt_mask.shape[0], 1, 1),
align_corners=False,
).flatten(1)
out_mask = point_sample(
out_mask,
point_coords.repeat(out_mask.shape[0], 1, 1),
align_corners=False,
).flatten(1)
with autocast(enabled=False):
out_mask = out_mask.float()
tgt_mask = tgt_mask.float()
# Compute the focal loss between masks
if out_mask.shape[0] == 0 or tgt_mask.shape[0] == 0:
cost_mask = batch_sigmoid_ce_loss(out_mask, tgt_mask)
cost_dice = batch_dice_loss(out_mask, tgt_mask)
else:
cost_mask = batch_sigmoid_ce_loss_jit(out_mask, tgt_mask)
# Compute the dice loss betwen masks
cost_dice = batch_dice_loss_jit(out_mask, tgt_mask)
# Final cost matrix
C = (
self.cost_mask * cost_mask
+ self.cost_class * cost_class
+ self.cost_dice * cost_dice
)
C = C.reshape(num_queries, -1).cpu()
indices.append(linear_sum_assignment(C))
return [
(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64))
for i, j in indices
]
@torch.no_grad()
def forward(self, outputs, targets):
"""Performs the matching
Params:
outputs: This is a dict that contains at least these entries:
"pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits
"pred_masks": Tensor of dim [batch_size, num_queries, H_pred, W_pred] with the predicted masks
targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:
"labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth
objects in the target) containing the class labels
"masks": Tensor of dim [num_target_boxes, H_gt, W_gt] containing the target masks
Returns:
A list of size batch_size, containing tuples of (index_i, index_j) where:
- index_i is the indices of the selected predictions (in order)
- index_j is the indices of the corresponding selected targets (in order)
For each batch element, it holds:
len(index_i) = len(index_j) = min(num_queries, num_target_boxes)
"""
return self.memory_efficient_forward(outputs, targets)
def __repr__(self, _repr_indent=4):
head = "Matcher " + self.__class__.__name__
body = [
"cost_class: {}".format(self.cost_class),
"cost_mask: {}".format(self.cost_mask),
"cost_dice: {}".format(self.cost_dice),
]
lines = [head] + [" " * _repr_indent + line for line in body]
return "\n".join(lines)
| CutLER-main | videocutler/mask2former_video/modeling/matcher.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from .transformer_decoder.video_mask2former_transformer_decoder import VideoMultiScaleMaskedTransformerDecoder
| CutLER-main | videocutler/mask2former_video/modeling/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/facebookresearch/detr/blob/master/models/detr.py
"""
MaskFormer criterion.
"""
import logging
import torch
import torch.nn.functional as F
from torch import nn
from detectron2.utils.comm import get_world_size
from detectron2.projects.point_rend.point_features import (
get_uncertain_point_coords_with_randomness,
point_sample,
)
from mask2former.utils.misc import is_dist_avail_and_initialized
def dice_loss(
inputs: torch.Tensor,
targets: torch.Tensor,
num_masks: float,
):
"""
Compute the DICE loss, similar to generalized IOU for masks
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
"""
inputs = inputs.sigmoid()
inputs = inputs.flatten(1)
numerator = 2 * (inputs * targets).sum(-1)
denominator = inputs.sum(-1) + targets.sum(-1)
loss = 1 - (numerator + 1) / (denominator + 1)
return loss.sum() / num_masks
dice_loss_jit = torch.jit.script(
dice_loss
) # type: torch.jit.ScriptModule
def sigmoid_ce_loss(
inputs: torch.Tensor,
targets: torch.Tensor,
num_masks: float,
):
"""
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
Returns:
Loss tensor
"""
loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
return loss.mean(1).sum() / num_masks
sigmoid_ce_loss_jit = torch.jit.script(
sigmoid_ce_loss
) # type: torch.jit.ScriptModule
def calculate_uncertainty(logits):
"""
We estimate uncerainty as L1 distance between 0.0 and the logit prediction in 'logits' for the
foreground class in `classes`.
Args:
logits (Tensor): A tensor of shape (R, 1, ...) for class-specific or
class-agnostic, where R is the total number of predicted masks in all images and C is
the number of foreground classes. The values are logits.
Returns:
scores (Tensor): A tensor of shape (R, 1, ...) that contains uncertainty scores with
the most uncertain locations having the highest uncertainty score.
"""
assert logits.shape[1] == 1
gt_class_logits = logits.clone()
return -(torch.abs(gt_class_logits))
class VideoSetCriterion(nn.Module):
"""This class computes the loss for DETR.
The process happens in two steps:
1) we compute hungarian assignment between ground truth boxes and the outputs of the model
2) we supervise each pair of matched ground-truth / prediction (supervise class and box)
"""
def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses,
num_points, oversample_ratio, importance_sample_ratio):
"""Create the criterion.
Parameters:
num_classes: number of object categories, omitting the special no-object category
matcher: module able to compute a matching between targets and proposals
weight_dict: dict containing as key the names of the losses and as values their relative weight.
eos_coef: relative classification weight applied to the no-object category
losses: list of all the losses to be applied. See get_loss for list of available losses.
"""
super().__init__()
self.num_classes = num_classes
self.matcher = matcher
self.weight_dict = weight_dict
self.eos_coef = eos_coef
self.losses = losses
empty_weight = torch.ones(self.num_classes + 1)
empty_weight[-1] = self.eos_coef
self.register_buffer("empty_weight", empty_weight)
# pointwise mask loss parameters
self.num_points = num_points
self.oversample_ratio = oversample_ratio
self.importance_sample_ratio = importance_sample_ratio
def loss_labels(self, outputs, targets, indices, num_masks):
"""Classification loss (NLL)
targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]
"""
assert "pred_logits" in outputs
src_logits = outputs["pred_logits"].float()
idx = self._get_src_permutation_idx(indices)
target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)])
target_classes = torch.full(
src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device
)
target_classes[idx] = target_classes_o
loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight)
losses = {"loss_ce": loss_ce}
return losses
def loss_masks(self, outputs, targets, indices, num_masks):
"""Compute the losses related to the masks: the focal loss and the dice loss.
targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w]
"""
assert "pred_masks" in outputs
src_idx = self._get_src_permutation_idx(indices)
src_masks = outputs["pred_masks"]
src_masks = src_masks[src_idx]
# Modified to handle video
target_masks = torch.cat([t['masks'][i] for t, (_, i) in zip(targets, indices)]).to(src_masks)
# No need to upsample predictions as we are using normalized coordinates :)
# NT x 1 x H x W
src_masks = src_masks.flatten(0, 1)[:, None]
target_masks = target_masks.flatten(0, 1)[:, None]
with torch.no_grad():
# sample point_coords
point_coords = get_uncertain_point_coords_with_randomness(
src_masks,
lambda logits: calculate_uncertainty(logits),
self.num_points,
self.oversample_ratio,
self.importance_sample_ratio,
)
# get gt labels
point_labels = point_sample(
target_masks,
point_coords,
align_corners=False,
).squeeze(1)
point_logits = point_sample(
src_masks,
point_coords,
align_corners=False,
).squeeze(1)
losses = {
"loss_mask": sigmoid_ce_loss_jit(point_logits, point_labels, num_masks),
"loss_dice": dice_loss_jit(point_logits, point_labels, num_masks),
}
del src_masks
del target_masks
return losses
def _get_src_permutation_idx(self, indices):
# permute predictions following indices
batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])
src_idx = torch.cat([src for (src, _) in indices])
return batch_idx, src_idx
def _get_tgt_permutation_idx(self, indices):
# permute targets following indices
batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])
tgt_idx = torch.cat([tgt for (_, tgt) in indices])
return batch_idx, tgt_idx
def get_loss(self, loss, outputs, targets, indices, num_masks):
loss_map = {
'labels': self.loss_labels,
'masks': self.loss_masks,
}
assert loss in loss_map, f"do you really want to compute {loss} loss?"
return loss_map[loss](outputs, targets, indices, num_masks)
def forward(self, outputs, targets):
"""This performs the loss computation.
Parameters:
outputs: dict of tensors, see the output specification of the model for the format
targets: list of dicts, such that len(targets) == batch_size.
The expected keys in each dict depends on the losses applied, see each loss' doc
"""
outputs_without_aux = {k: v for k, v in outputs.items() if k != "aux_outputs"}
# Retrieve the matching between the outputs of the last layer and the targets
indices = self.matcher(outputs_without_aux, targets)
# Compute the average number of target boxes accross all nodes, for normalization purposes
num_masks = sum(len(t["labels"]) for t in targets)
num_masks = torch.as_tensor(
[num_masks], dtype=torch.float, device=next(iter(outputs.values())).device
)
if is_dist_avail_and_initialized():
torch.distributed.all_reduce(num_masks)
num_masks = torch.clamp(num_masks / get_world_size(), min=1).item()
# Compute all the requested losses
losses = {}
for loss in self.losses:
losses.update(self.get_loss(loss, outputs, targets, indices, num_masks))
# In case of auxiliary losses, we repeat this process with the output of each intermediate layer.
if "aux_outputs" in outputs:
for i, aux_outputs in enumerate(outputs["aux_outputs"]):
indices = self.matcher(aux_outputs, targets)
for loss in self.losses:
l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_masks)
l_dict = {k + f"_{i}": v for k, v in l_dict.items()}
losses.update(l_dict)
return losses
def __repr__(self):
head = "Criterion " + self.__class__.__name__
body = [
"matcher: {}".format(self.matcher.__repr__(_repr_indent=8)),
"losses: {}".format(self.losses),
"weight_dict: {}".format(self.weight_dict),
"num_classes: {}".format(self.num_classes),
"eos_coef: {}".format(self.eos_coef),
"num_points: {}".format(self.num_points),
"oversample_ratio: {}".format(self.oversample_ratio),
"importance_sample_ratio: {}".format(self.importance_sample_ratio),
]
_repr_indent = 4
lines = [head] + [" " * _repr_indent + line for line in body]
return "\n".join(lines)
| CutLER-main | videocutler/mask2former_video/modeling/criterion.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# # Modified by Bowen Cheng from: https://github.com/facebookresearch/detr/blob/master/models/position_encoding.py
"""
Various positional encodings for the transformer.
"""
import math
import torch
from torch import nn
class PositionEmbeddingSine3D(nn.Module):
"""
This is a more standard version of the position embedding, very similar to the one
used by the Attention is all you need paper, generalized to work on images.
"""
def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):
super().__init__()
self.num_pos_feats = num_pos_feats
self.temperature = temperature
self.normalize = normalize
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * math.pi
self.scale = scale
def forward(self, x, mask=None):
# b, t, c, h, w
assert x.dim() == 5, f"{x.shape} should be a 5-dimensional Tensor, got {x.dim()}-dimensional Tensor instead"
if mask is None:
mask = torch.zeros((x.size(0), x.size(1), x.size(3), x.size(4)), device=x.device, dtype=torch.bool)
not_mask = ~mask
z_embed = not_mask.cumsum(1, dtype=torch.float32)
y_embed = not_mask.cumsum(2, dtype=torch.float32)
x_embed = not_mask.cumsum(3, dtype=torch.float32)
if self.normalize:
eps = 1e-6
z_embed = z_embed / (z_embed[:, -1:, :, :] + eps) * self.scale
y_embed = y_embed / (y_embed[:, :, -1:, :] + eps) * self.scale
x_embed = x_embed / (x_embed[:, :, :, -1:] + eps) * self.scale
dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
dim_t_z = torch.arange((self.num_pos_feats * 2), dtype=torch.float32, device=x.device)
dim_t_z = self.temperature ** (2 * (dim_t_z // 2) / (self.num_pos_feats * 2))
pos_x = x_embed[:, :, :, :, None] / dim_t
pos_y = y_embed[:, :, :, :, None] / dim_t
pos_z = z_embed[:, :, :, :, None] / dim_t_z
pos_x = torch.stack((pos_x[:, :, :, :, 0::2].sin(), pos_x[:, :, :, :, 1::2].cos()), dim=5).flatten(4)
pos_y = torch.stack((pos_y[:, :, :, :, 0::2].sin(), pos_y[:, :, :, :, 1::2].cos()), dim=5).flatten(4)
pos_z = torch.stack((pos_z[:, :, :, :, 0::2].sin(), pos_z[:, :, :, :, 1::2].cos()), dim=5).flatten(4)
pos = (torch.cat((pos_y, pos_x), dim=4) + pos_z).permute(0, 1, 4, 2, 3) # b, t, c, h, w
return pos
| CutLER-main | videocutler/mask2former_video/modeling/transformer_decoder/position_encoding.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from .video_mask2former_transformer_decoder import VideoMultiScaleMaskedTransformerDecoder
| CutLER-main | videocutler/mask2former_video/modeling/transformer_decoder/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from: https://github.com/facebookresearch/detr/blob/master/models/detr.py
import logging
import fvcore.nn.weight_init as weight_init
from typing import Optional
import torch
from torch import nn, Tensor
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.layers import Conv2d
from mask2former.modeling.transformer_decoder.maskformer_transformer_decoder import TRANSFORMER_DECODER_REGISTRY
from .position_encoding import PositionEmbeddingSine3D
class SelfAttentionLayer(nn.Module):
def __init__(self, d_model, nhead, dropout=0.0,
activation="relu", normalize_before=False):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.norm = nn.LayerNorm(d_model)
self.dropout = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self, tgt,
tgt_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
q = k = self.with_pos_embed(tgt, query_pos)
tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout(tgt2)
tgt = self.norm(tgt)
return tgt
def forward_pre(self, tgt,
tgt_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
tgt2 = self.norm(tgt)
q = k = self.with_pos_embed(tgt2, query_pos)
tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout(tgt2)
return tgt
def forward(self, tgt,
tgt_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
if self.normalize_before:
return self.forward_pre(tgt, tgt_mask,
tgt_key_padding_mask, query_pos)
return self.forward_post(tgt, tgt_mask,
tgt_key_padding_mask, query_pos)
class CrossAttentionLayer(nn.Module):
def __init__(self, d_model, nhead, dropout=0.0,
activation="relu", normalize_before=False):
super().__init__()
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.norm = nn.LayerNorm(d_model)
self.dropout = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self, tgt, memory,
memory_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout(tgt2)
tgt = self.norm(tgt)
return tgt
def forward_pre(self, tgt, memory,
memory_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
tgt2 = self.norm(tgt)
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout(tgt2)
return tgt
def forward(self, tgt, memory,
memory_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
if self.normalize_before:
return self.forward_pre(tgt, memory, memory_mask,
memory_key_padding_mask, pos, query_pos)
return self.forward_post(tgt, memory, memory_mask,
memory_key_padding_mask, pos, query_pos)
class FFNLayer(nn.Module):
def __init__(self, d_model, dim_feedforward=2048, dropout=0.0,
activation="relu", normalize_before=False):
super().__init__()
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm = nn.LayerNorm(d_model)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self, tgt):
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout(tgt2)
tgt = self.norm(tgt)
return tgt
def forward_pre(self, tgt):
tgt2 = self.norm(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
tgt = tgt + self.dropout(tgt2)
return tgt
def forward(self, tgt):
if self.normalize_before:
return self.forward_pre(tgt)
return self.forward_post(tgt)
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
class MLP(nn.Module):
""" Very simple multi-layer perceptron (also called FFN)"""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
@TRANSFORMER_DECODER_REGISTRY.register()
class VideoMultiScaleMaskedTransformerDecoder(nn.Module):
_version = 2
def _load_from_state_dict(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
version = local_metadata.get("version", None)
if version is None or version < 2:
# Do not warn if train from scratch
scratch = True
logger = logging.getLogger(__name__)
for k in list(state_dict.keys()):
newk = k
if "static_query" in k:
newk = k.replace("static_query", "query_feat")
if newk != k:
state_dict[newk] = state_dict[k]
del state_dict[k]
scratch = False
if not scratch:
logger.warning(
f"Weight format of {self.__class__.__name__} have changed! "
"Please upgrade your models. Applying automatic conversion now ..."
)
@configurable
def __init__(
self,
in_channels,
mask_classification=True,
*,
num_classes: int,
hidden_dim: int,
num_queries: int,
nheads: int,
dim_feedforward: int,
dec_layers: int,
pre_norm: bool,
mask_dim: int,
enforce_input_project: bool,
# video related
num_frames,
):
"""
NOTE: this interface is experimental.
Args:
in_channels: channels of the input features
mask_classification: whether to add mask classifier or not
num_classes: number of classes
hidden_dim: Transformer feature dimension
num_queries: number of queries
nheads: number of heads
dim_feedforward: feature dimension in feedforward network
enc_layers: number of Transformer encoder layers
dec_layers: number of Transformer decoder layers
pre_norm: whether to use pre-LayerNorm or not
mask_dim: mask feature dimension
enforce_input_project: add input project 1x1 conv even if input
channels and hidden dim is identical
"""
super().__init__()
assert mask_classification, "Only support mask classification model"
self.mask_classification = mask_classification
self.num_frames = num_frames
# positional encoding
N_steps = hidden_dim // 2
self.pe_layer = PositionEmbeddingSine3D(N_steps, normalize=True)
# define Transformer decoder here
self.num_heads = nheads
self.num_layers = dec_layers
self.transformer_self_attention_layers = nn.ModuleList()
self.transformer_cross_attention_layers = nn.ModuleList()
self.transformer_ffn_layers = nn.ModuleList()
for _ in range(self.num_layers):
self.transformer_self_attention_layers.append(
SelfAttentionLayer(
d_model=hidden_dim,
nhead=nheads,
dropout=0.0,
normalize_before=pre_norm,
)
)
self.transformer_cross_attention_layers.append(
CrossAttentionLayer(
d_model=hidden_dim,
nhead=nheads,
dropout=0.0,
normalize_before=pre_norm,
)
)
self.transformer_ffn_layers.append(
FFNLayer(
d_model=hidden_dim,
dim_feedforward=dim_feedforward,
dropout=0.0,
normalize_before=pre_norm,
)
)
self.decoder_norm = nn.LayerNorm(hidden_dim)
self.num_queries = num_queries
# learnable query features
self.query_feat = nn.Embedding(num_queries, hidden_dim)
# learnable query p.e.
self.query_embed = nn.Embedding(num_queries, hidden_dim)
# level embedding (we always use 3 scales)
self.num_feature_levels = 3
self.level_embed = nn.Embedding(self.num_feature_levels, hidden_dim)
self.input_proj = nn.ModuleList()
for _ in range(self.num_feature_levels):
if in_channels != hidden_dim or enforce_input_project:
self.input_proj.append(Conv2d(in_channels, hidden_dim, kernel_size=1))
weight_init.c2_xavier_fill(self.input_proj[-1])
else:
self.input_proj.append(nn.Sequential())
# output FFNs
if self.mask_classification:
self.class_embed = nn.Linear(hidden_dim, num_classes + 1)
self.mask_embed = MLP(hidden_dim, hidden_dim, mask_dim, 3)
@classmethod
def from_config(cls, cfg, in_channels, mask_classification):
ret = {}
ret["in_channels"] = in_channels
ret["mask_classification"] = mask_classification
ret["num_classes"] = cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES
ret["hidden_dim"] = cfg.MODEL.MASK_FORMER.HIDDEN_DIM
ret["num_queries"] = cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES
# Transformer parameters:
ret["nheads"] = cfg.MODEL.MASK_FORMER.NHEADS
ret["dim_feedforward"] = cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD
# NOTE: because we add learnable query features which requires supervision,
# we add minus 1 to decoder layers to be consistent with our loss
# implementation: that is, number of auxiliary losses is always
# equal to number of decoder layers. With learnable query features, the number of
# auxiliary losses equals number of decoders plus 1.
assert cfg.MODEL.MASK_FORMER.DEC_LAYERS >= 1
ret["dec_layers"] = cfg.MODEL.MASK_FORMER.DEC_LAYERS - 1
ret["pre_norm"] = cfg.MODEL.MASK_FORMER.PRE_NORM
ret["enforce_input_project"] = cfg.MODEL.MASK_FORMER.ENFORCE_INPUT_PROJ
ret["mask_dim"] = cfg.MODEL.SEM_SEG_HEAD.MASK_DIM
ret["num_frames"] = cfg.INPUT.SAMPLING_FRAME_NUM
return ret
def forward(self, x, mask_features, mask = None):
bt, c_m, h_m, w_m = mask_features.shape
bs = bt // self.num_frames if self.training else 1
t = bt // bs
mask_features = mask_features.view(bs, t, c_m, h_m, w_m)
# x is a list of multi-scale feature
assert len(x) == self.num_feature_levels
src = []
pos = []
size_list = []
# disable mask, it does not affect performance
del mask
for i in range(self.num_feature_levels):
size_list.append(x[i].shape[-2:])
pos.append(self.pe_layer(x[i].view(bs, t, -1, size_list[-1][0], size_list[-1][1]), None).flatten(3))
src.append(self.input_proj[i](x[i]).flatten(2) + self.level_embed.weight[i][None, :, None])
# NTxCxHW => NxTxCxHW => (TxHW)xNxC
_, c, hw = src[-1].shape
pos[-1] = pos[-1].view(bs, t, c, hw).permute(1, 3, 0, 2).flatten(0, 1)
src[-1] = src[-1].view(bs, t, c, hw).permute(1, 3, 0, 2).flatten(0, 1)
# QxNxC
query_embed = self.query_embed.weight.unsqueeze(1).repeat(1, bs, 1)
output = self.query_feat.weight.unsqueeze(1).repeat(1, bs, 1)
predictions_class = []
predictions_mask = []
# prediction heads on learnable query features
outputs_class, outputs_mask, attn_mask = self.forward_prediction_heads(output, mask_features, attn_mask_target_size=size_list[0])
predictions_class.append(outputs_class)
predictions_mask.append(outputs_mask)
for i in range(self.num_layers):
level_index = i % self.num_feature_levels
attn_mask[torch.where(attn_mask.sum(-1) == attn_mask.shape[-1])] = False
# attention: cross-attention first
output = self.transformer_cross_attention_layers[i](
output, src[level_index],
memory_mask=attn_mask,
memory_key_padding_mask=None, # here we do not apply masking on padded region
pos=pos[level_index], query_pos=query_embed
)
output = self.transformer_self_attention_layers[i](
output, tgt_mask=None,
tgt_key_padding_mask=None,
query_pos=query_embed
)
# FFN
output = self.transformer_ffn_layers[i](
output
)
outputs_class, outputs_mask, attn_mask = self.forward_prediction_heads(output, mask_features, attn_mask_target_size=size_list[(i + 1) % self.num_feature_levels])
predictions_class.append(outputs_class)
predictions_mask.append(outputs_mask)
assert len(predictions_class) == self.num_layers + 1
out = {
'pred_logits': predictions_class[-1],
'pred_masks': predictions_mask[-1],
'aux_outputs': self._set_aux_loss(
predictions_class if self.mask_classification else None, predictions_mask
)
}
return out
def forward_prediction_heads(self, output, mask_features, attn_mask_target_size):
decoder_output = self.decoder_norm(output)
decoder_output = decoder_output.transpose(0, 1)
outputs_class = self.class_embed(decoder_output)
mask_embed = self.mask_embed(decoder_output)
outputs_mask = torch.einsum("bqc,btchw->bqthw", mask_embed, mask_features)
b, q, t, _, _ = outputs_mask.shape
# NOTE: prediction is of higher-resolution
# [B, Q, T, H, W] -> [B, Q, T*H*W] -> [B, h, Q, T*H*W] -> [B*h, Q, T*HW]
attn_mask = F.interpolate(outputs_mask.flatten(0, 1), size=attn_mask_target_size, mode="bilinear", align_corners=False).view(
b, q, t, attn_mask_target_size[0], attn_mask_target_size[1])
# must use bool type
# If a BoolTensor is provided, positions with ``True`` are not allowed to attend while ``False`` values will be unchanged.
attn_mask = (attn_mask.sigmoid().flatten(2).unsqueeze(1).repeat(1, self.num_heads, 1, 1).flatten(0, 1) < 0.5).bool()
attn_mask = attn_mask.detach()
return outputs_class, outputs_mask, attn_mask
@torch.jit.unused
def _set_aux_loss(self, outputs_class, outputs_seg_masks):
# this is a workaround to make torchscript happy, as torchscript
# doesn't support dictionary with non-homogeneous values, such
# as a dict having both a Tensor and a list.
if self.mask_classification:
return [
{"pred_logits": a, "pred_masks": b}
for a, b in zip(outputs_class[:-1], outputs_seg_masks[:-1])
]
else:
return [{"pred_masks": b} for b in outputs_seg_masks[:-1]]
| CutLER-main | videocutler/mask2former_video/modeling/transformer_decoder/video_mask2former_transformer_decoder.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
from .train_loop import *
__all__ = [k for k in globals().keys() if not k.startswith("_")]
from .defaults import * | CutLER-main | videocutler/mask2former_video/engine/__init__.py |
# -*- coding: utf-8 -*-
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Modified by XuDong Wang from https://github.com/facebookresearch/detectron2/blob/main/detectron2/engine/train_loop.py
import torch
from torch.nn.parallel import DataParallel, DistributedDataParallel
import numpy as np
import time
import torch
from torch.nn.parallel import DataParallel, DistributedDataParallel
import copy
import random
import torch.nn.functional as F
from detectron2.structures.instances import Instances
from detectron2.structures import BitMasks
from detectron2.engine import SimpleTrainer
__all__ = ["CustomSimpleTrainer", "CustomAMPTrainer"]
class CustomSimpleTrainer(SimpleTrainer):
"""
A simple trainer for the most common type of task:
single-cost single-optimizer single-data-source iterative optimization,
optionally using data-parallelism.
It assumes that every step, you:
1. Compute the loss with a data from the data_loader.
2. Compute the gradients with the above loss.
3. Update the model with the optimizer.
All other tasks during training (checkpointing, logging, evaluation, LR schedule)
are maintained by hooks, which can be registered by :meth:`TrainerBase.register_hooks`.
If you want to do anything fancier than this,
either subclass TrainerBase and implement your own `run_step`,
or write your own training loop.
"""
def __init__(self, model, data_loader, optimizer, cfg=None, use_copy_paste=False,
copy_paste_rate=-1, copy_paste_random_num=None, copy_paste_min_ratio=-1,
copy_paste_max_ratio=-1, visualize_copy_paste=False):
"""
Args:
model: a torch Module. Takes a data from data_loader and returns a
dict of losses.
data_loader: an iterable. Contains data to be used to call model.
optimizer: a torch optimizer.
"""
super().__init__(model, data_loader, optimizer)
"""
We set the model to training mode in the trainer.
However it's valid to train a model that's in eval mode.
If you want your model (or a submodule of it) to behave
like evaluation during training, you can overwrite its train() method.
"""
self.cfg = cfg
# model.train()
# self.model = model
# self.data_loader = data_loader
# to access the data loader iterator, call `self._data_loader_iter`
# self._data_loader_iter_obj = None
# self.optimizer = optimizer
self.use_copy_paste = use_copy_paste if self.cfg is None else self.cfg.DATALOADER.COPY_PASTE
self.cfg_COPY_PASTE_RATE = copy_paste_rate if self.cfg is None else self.cfg.DATALOADER.COPY_PASTE_RATE
self.cfg_COPY_PASTE_RANDOM_NUM = copy_paste_random_num if self.cfg is None else self.cfg.DATALOADER.COPY_PASTE_RANDOM_NUM
self.cfg_COPY_PASTE_MIN_RATIO = copy_paste_min_ratio if self.cfg is None else self.cfg.DATALOADER.COPY_PASTE_MIN_RATIO
self.cfg_COPY_PASTE_MAX_RATIO = copy_paste_max_ratio if self.cfg is None else self.cfg.DATALOADER.COPY_PASTE_MAX_RATIO
self.cfg_VISUALIZE_COPY_PASTE = visualize_copy_paste if self.cfg is None else self.cfg.DATALOADER.VISUALIZE_COPY_PASTE
print("copy_paste hyper-params:", self.use_copy_paste, self.cfg_COPY_PASTE_RATE, self.cfg_COPY_PASTE_RANDOM_NUM)
def IoU(self, mask1, mask2): # only work when the batch size is 1
mask1, mask2 = (mask1>0.5).to(torch.bool), (mask2>0.5).to(torch.bool)
intersection = torch.sum(mask1 * (mask1 == mask2), dim=[-1, -2]).squeeze()
union = torch.sum(mask1 + mask2, dim=[-1, -2]).squeeze()
return (intersection.to(torch.float) / union).mean().view(1, -1)
def IoY(self, mask1, mask2): # only work when the batch size is 1
# print(mask1.size(), mask2.size())
mask1, mask2 = mask1.squeeze(), mask2.squeeze()
mask1, mask2 = (mask1>0.5).to(torch.bool), (mask2>0.5).to(torch.bool)
intersection = torch.sum(mask1 * (mask1 == mask2), dim=[-1, -2]).squeeze()
union = torch.sum(mask2, dim=[-1, -2]).squeeze()
return (intersection.to(torch.float) / union).mean().view(1, -1)
def copy_and_paste(self, labeled_data, unlabeled_data):
# print("batch size: ", len(labeled_data))
new_unlabeled_data = []
def mask_iou_matrix(x, y, mode='iou'):
x = x.reshape(x.shape[0], -1).float()
y = y.reshape(y.shape[0], -1).float()
inter_matrix = x @ y.transpose(1, 0) # n1xn2
sum_x = x.sum(1)[:, None].expand(x.shape[0], y.shape[0])
sum_y = y.sum(1)[None, :].expand(x.shape[0], y.shape[0])
if mode == 'ioy':
iou_matrix = inter_matrix / (sum_y) # [1, 1]
else:
iou_matrix = inter_matrix / (sum_x + sum_y - inter_matrix) # [1, 1]
return iou_matrix
def visualize_data(data, save_path = './sample.jpg'):
from detectron2.data import detection_utils as utils
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.utils.visualizer import Visualizer
data["instances"] = data["instances"].to(device='cpu')
img = data["image"].permute(1, 2, 0).cpu().detach().numpy()
img = utils.convert_image_to_rgb(img, 'RGB')
metadata = MetadataCatalog.get('ytvis_2019_train_cls_agnostic')
visualizer = Visualizer(img, metadata=metadata, scale=1.0)
target_fields = data["instances"].get_fields()
labels = [metadata.thing_classes[i] for i in target_fields["gt_classes"]]
vis = visualizer.overlay_instances(
labels=labels,
boxes=target_fields.get("gt_boxes"), # ("gt_boxes", None),
masks=target_fields.get("gt_masks"), # ("gt_masks", None),
keypoints=target_fields.get("gt_keypoints", None),
)
print("Saving to {} ...".format(save_path))
vis.save(save_path)
for cur_labeled_data, cur_unlabeled_data in zip(labeled_data, unlabeled_data):
# print("keys: ", [key for key in cur_labeled_data])
# data dict is created by mask2former_video/data_video/dataset_mapper.py
# data['instances']: list of instances [[frame1 instances],[frame2 instances]...[frameN instances]]
cur_labeled_instances = cur_labeled_data["instances"]
# print('num frames is {}; num instances is {}'.format(len(cur_labeled_instances), len(cur_labeled_instances[0])))
cur_labeled_image_list = cur_labeled_data["image"]
# print('num images is: ', len(cur_labeled_image_list))
cur_unlabeled_instances_list = cur_unlabeled_data["instances"]
cur_unlabeled_image_list = cur_unlabeled_data["image"]
num_labeled_instances = len(cur_labeled_instances[0])
# num_labeled_instances = len(cur_labeled_data["instances"][0])
copy_paste_rate = random.random()
if self.cfg_COPY_PASTE_RATE >= copy_paste_rate and num_labeled_instances > 0:
if self.cfg_COPY_PASTE_RANDOM_NUM:
num_copy = 1 if num_labeled_instances == 1 else np.random.randint(1, max(1, num_labeled_instances))
else:
num_copy = num_labeled_instances
else:
num_copy = 0
if num_labeled_instances == 0 or num_copy == 0:
new_unlabeled_data.append(cur_unlabeled_data)
else:
choice = np.random.choice(num_labeled_instances, num_copy, replace=False)
# print("num_labeled_instances: {}; num_copy: {}; choice: {}".format(num_labeled_instances, num_copy, choice))
# randomly choose instances from the first frame and copy all these selected instances
frame_id = np.random.randint(1, max(1, len(cur_labeled_instances))) - 1
copied_instances = cur_labeled_instances[frame_id][choice].to(device=cur_unlabeled_instances_list[frame_id].gt_boxes.device)
# paste these instances to ALL frames in the same video
# print("copied_instances: ", len(copied_instances), copied_instances)
# print("copied to: ", len(cur_unlabeled_instances_list), cur_unlabeled_instances_list)
cur_unlabeled_instances_list_new = []
cur_unlabeled_image_list_new = []
for f in range(len(cur_unlabeled_instances_list)):
cur_unlabeled_instances = cur_unlabeled_instances_list[f]
cur_unlabeled_image = cur_unlabeled_image_list[f]
copied_masks = copied_instances.gt_masks
copied_boxes = copied_instances.gt_boxes
_, labeled_h, labeled_w = cur_labeled_image_list[frame_id].shape
_, unlabeled_h, unlabeled_w = cur_unlabeled_image.shape
# rescale the labeled image to align with unlabeled one.
if isinstance(copied_masks, torch.Tensor):
masks_new = copied_masks[None, ...].float()
else:
masks_new = copied_masks.tensor[None, ...].float()
# resize the masks with a random ratio from 0.5 to 1.0
resize_ratio = random.uniform(self.cfg_COPY_PASTE_MIN_RATIO, self.cfg_COPY_PASTE_MAX_RATIO)
w_new = int(resize_ratio * unlabeled_w)
h_new = int(resize_ratio * unlabeled_h)
# randomly shift the masks,so that the masks are not always in the center of the image
w_shift = random.randint(0, max(0, unlabeled_w - w_new))
h_shift = random.randint(0, max(0, unlabeled_h - h_new))
cur_labeled_image_new = F.interpolate(cur_labeled_image_list[frame_id][None, ...].float(), size=(h_new, w_new), mode="bilinear", align_corners=False).byte().squeeze(0)
if isinstance(copied_masks, torch.Tensor):
masks_new = F.interpolate(copied_masks[None, ...].float(), size=(h_new, w_new), mode="bilinear", align_corners=False).bool().squeeze(0)
else:
masks_new = F.interpolate(copied_masks.tensor[None, ...].float(), size=(h_new, w_new), mode="bilinear", align_corners=False).bool().squeeze(0)
copied_boxes.scale(1. * unlabeled_w / labeled_w * resize_ratio, 1. * unlabeled_h / labeled_h * resize_ratio)
if isinstance(cur_unlabeled_instances.gt_masks, torch.Tensor):
_, mask_w, mask_h = cur_unlabeled_instances.gt_masks.size()
else:
_, mask_w, mask_h = cur_unlabeled_instances.gt_masks.tensor.size()
masks_new_all = torch.zeros(num_copy, mask_w, mask_h)
image_new_all = torch.zeros_like(cur_unlabeled_image)
image_new_all[:, h_shift:h_shift+h_new, w_shift:w_shift+w_new] += cur_labeled_image_new
masks_new_all[:, h_shift:h_shift+h_new, w_shift:w_shift+w_new] += masks_new
cur_labeled_image = image_new_all.byte() #.squeeze(0)
if isinstance(copied_masks, torch.Tensor):
copied_masks = masks_new_all.bool() #.squeeze(0)
else:
copied_masks.tensor = masks_new_all.bool() #.squeeze(0)
copied_boxes.tensor[:, 0] += h_shift
copied_boxes.tensor[:, 2] += h_shift
copied_boxes.tensor[:, 1] += w_shift
copied_boxes.tensor[:, 3] += w_shift
copied_instances.gt_masks = copied_masks
copied_instances.gt_boxes = copied_boxes
copied_instances._image_size = (unlabeled_h, unlabeled_w)
if len(cur_unlabeled_instances) == 0:
if isinstance(copied_instances.gt_masks, torch.Tensor):
alpha = copied_instances.gt_masks.sum(0) > 0
else:
alpha = copied_instances.gt_masks.tensor.sum(0) > 0
# merge image
alpha = alpha.cpu()
composited_image = (alpha * cur_labeled_image) + (~alpha * cur_unlabeled_image)
cur_unlabeled_image_list_new.append(composited_image)
cur_unlabeled_instances_list_new.append(copied_instances)
# cur_unlabeled_data["image"] = composited_image
# cur_unlabeled_data["instances"] = copied_instances
else:
# remove the copied object if iou greater than 0.5
if isinstance(copied_masks, torch.Tensor):
iou_matrix = mask_iou_matrix(copied_masks, cur_unlabeled_instances.gt_masks, mode='ioy') # nxN
else:
iou_matrix = mask_iou_matrix(copied_masks.tensor, cur_unlabeled_instances.gt_masks.tensor, mode='ioy') # nxN
# keep = iou_matrix.max(1)[0] < 0.5
keep = iou_matrix.max(1)[0] < 0.0
# for each video, all frames should have the same amount of instances and gt masks (can be None).
if keep.sum() == 0:
# new_unlabeled_data.append(cur_unlabeled_data)
cur_unlabeled_image_list_new.append(cur_unlabeled_image)
cur_unlabeled_instances_list_new.append(cur_unlabeled_instances)
continue
copied_instances = copied_instances[keep]
# update existing instances in unlabeled image
if isinstance(copied_instances.gt_masks, torch.Tensor):
alpha = copied_instances.gt_masks.sum(0) > 0
cur_unlabeled_instances.gt_masks = ~alpha * cur_unlabeled_instances.gt_masks
areas_unlabeled = cur_unlabeled_instances.gt_masks.sum((1,2))
else:
alpha = copied_instances.gt_masks.tensor.sum(0) > 0
cur_unlabeled_instances.gt_masks.tensor = ~alpha * cur_unlabeled_instances.gt_masks.tensor
areas_unlabeled = cur_unlabeled_instances.gt_masks.tensor.sum((1,2))
# merge image
alpha = alpha.cpu()
composited_image = (alpha * cur_labeled_image) + (~alpha * cur_unlabeled_image)
# merge instances
merged_instances = Instances.cat([cur_unlabeled_instances[areas_unlabeled > 0], copied_instances])
# update boxes
if isinstance(merged_instances.gt_masks, torch.Tensor):
merged_instances.gt_boxes = BitMasks(merged_instances.gt_masks).get_bounding_boxes()
# merged_instances.gt_boxes = merged_instances.gt_masks.get_bounding_boxes()
else:
merged_instances.gt_boxes = merged_instances.gt_masks.get_bounding_boxes()
cur_unlabeled_image_list_new.append(composited_image)
cur_unlabeled_instances_list_new.append(merged_instances)
# cur_unlabeled_data["image"] = composited_image
# cur_unlabeled_data["instances"] = merged_instances
if self.cfg_VISUALIZE_COPY_PASTE:
visualize_data(cur_unlabeled_data, save_path = 'sample_{}.jpg'.format(np.random.randint(5)))
cur_unlabeled_data["image"] = cur_unlabeled_image_list_new
cur_unlabeled_data["instances"] = cur_unlabeled_instances_list_new
# print("before appending: ", len(cur_unlabeled_image_list_new), len(cur_unlabeled_instances_list_new))
new_unlabeled_data.append(cur_unlabeled_data)
# for i in range(len(new_unlabeled_data)):
# try:
# print(i, len(new_unlabeled_data[i]['instances'][0]))
# except:
# print("Error!!!", len(cur_unlabeled_instances_list), i, new_unlabeled_data[i])
return new_unlabeled_data
def run_step(self):
"""
Implement the standard training logic described above.
"""
assert self.model.training, "[SimpleTrainer] model was changed to eval mode!"
start = time.perf_counter()
"""
If you want to do something with the data, you can wrap the dataloader.
"""
data = next(self._data_loader_iter)
# print(data, len(data))
if self.use_copy_paste:
# print('using copy paste')
data = self.copy_and_paste(copy.deepcopy(data[::-1]), data)
data_time = time.perf_counter() - start
"""
If you want to do something with the losses, you can wrap the model.
"""
loss_dict = self.model(data)
if isinstance(loss_dict, torch.Tensor):
losses = loss_dict
loss_dict = {"total_loss": loss_dict}
else:
losses = sum(loss_dict.values())
"""
If you need to accumulate gradients or do something similar, you can
wrap the optimizer with your custom `zero_grad()` method.
"""
if not torch.isnan(losses):
self.optimizer.zero_grad()
losses.backward()
else:
print('Nan loss. Skipped.')
self._write_metrics(loss_dict, data_time)
"""
If you need gradient clipping/scaling or other processing, you can
wrap the optimizer with your custom `step()` method. But it is
suboptimal as explained in https://arxiv.org/abs/2006.15704 Sec 3.2.4
"""
self.optimizer.step()
class CustomAMPTrainer(CustomSimpleTrainer):
"""
Like :class:`SimpleTrainer`, but uses PyTorch's native automatic mixed precision
in the training loop.
"""
def __init__(self, model, data_loader, optimizer, cfg=None, grad_scaler=None, use_copy_paste=False,
copy_paste_rate=-1, copy_paste_random_num=None, copy_paste_min_ratio=-1,
copy_paste_max_ratio=-1, visualize_copy_paste=False):
"""
Args:
model, data_loader, optimizer: same as in :class:`SimpleTrainer`.
grad_scaler: torch GradScaler to automatically scale gradients.
"""
unsupported = "AMPTrainer does not support single-process multi-device training!"
if isinstance(model, DistributedDataParallel):
assert not (model.device_ids and len(model.device_ids) > 1), unsupported
assert not isinstance(model, DataParallel), unsupported
print("INFO: use AMPTrainer.")
super().__init__(model, data_loader, optimizer, cfg=cfg, use_copy_paste=use_copy_paste, \
copy_paste_rate=copy_paste_rate, copy_paste_random_num=copy_paste_random_num, \
copy_paste_min_ratio=copy_paste_min_ratio, copy_paste_max_ratio=copy_paste_max_ratio, \
visualize_copy_paste=visualize_copy_paste)
if grad_scaler is None:
from torch.cuda.amp import GradScaler
grad_scaler = GradScaler()
self.grad_scaler = grad_scaler
def run_step(self):
"""
Implement the AMP training logic.
"""
assert self.model.training, "[AMPTrainer] model was changed to eval mode!"
assert torch.cuda.is_available(), "[AMPTrainer] CUDA is required for AMP training!"
from torch.cuda.amp import autocast
start = time.perf_counter()
data = next(self._data_loader_iter)
if self.use_copy_paste:
# print('using copy paste')
data = self.copy_and_paste(copy.deepcopy(data[::-1]), data)
data_time = time.perf_counter() - start
with autocast():
loss_dict = self.model(data)
if isinstance(loss_dict, torch.Tensor):
losses = loss_dict
loss_dict = {"total_loss": loss_dict}
else:
losses = sum(loss_dict.values())
if not torch.isnan(losses):
self.optimizer.zero_grad()
self.grad_scaler.scale(losses).backward()
else:
print('Nan loss.')
self._write_metrics(loss_dict, data_time)
self.grad_scaler.step(self.optimizer)
self.grad_scaler.update()
def state_dict(self):
ret = super().state_dict()
ret["grad_scaler"] = self.grad_scaler.state_dict()
return ret
def load_state_dict(self, state_dict):
super().load_state_dict(state_dict)
self.grad_scaler.load_state_dict(state_dict["grad_scaler"])
| CutLER-main | videocutler/mask2former_video/engine/train_loop.py |
# -*- coding: utf-8 -*-
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Modified by XuDong Wang from https://github.com/facebookresearch/detectron2/blob/main/detectron2/engine/defaults.py
"""
This file contains components with some default boilerplate logic user may need
in training / testing. They will not work for everyone, but many users may find them useful.
The behavior of functions/classes in this file is subject to change,
since they are meant to represent the "common default behavior" people need in their projects.
"""
import argparse
import logging
import os
import sys
import weakref
from collections import OrderedDict
from typing import Optional
import torch
from fvcore.nn.precise_bn import get_bn_modules
from omegaconf import OmegaConf
from torch.nn.parallel import DistributedDataParallel
import detectron2.data.transforms as T
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import CfgNode, LazyConfig
from detectron2.data import (
MetadataCatalog,
)
from detectron2.data import (
build_detection_test_loader,
build_detection_train_loader,
)
from detectron2.evaluation import (
DatasetEvaluator,
inference_on_dataset,
print_csv_format,
verify_results,
)
from detectron2.modeling import build_model
from detectron2.solver import build_lr_scheduler, build_optimizer
from detectron2.utils import comm
from detectron2.utils.collect_env import collect_env_info
from detectron2.utils.env import seed_all_rng
from detectron2.utils.events import CommonMetricPrinter, JSONWriter, TensorboardXWriter
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import setup_logger
from detectron2.engine import hooks
from detectron2.engine import TrainerBase
from .train_loop import CustomAMPTrainer, CustomSimpleTrainer
__all__ = [
"create_ddp_model",
"default_argument_parser",
"default_setup",
"default_writers",
"DefaultPredictor",
"DefaultTrainer",
]
def create_ddp_model(model, *, fp16_compression=False, **kwargs):
"""
Create a DistributedDataParallel model if there are >1 processes.
Args:
model: a torch.nn.Module
fp16_compression: add fp16 compression hooks to the ddp object.
See more at https://pytorch.org/docs/stable/ddp_comm_hooks.html#torch.distributed.algorithms.ddp_comm_hooks.default_hooks.fp16_compress_hook
kwargs: other arguments of :module:`torch.nn.parallel.DistributedDataParallel`.
""" # noqa
if comm.get_world_size() == 1:
return model
if "device_ids" not in kwargs:
kwargs["device_ids"] = [comm.get_local_rank()]
ddp = DistributedDataParallel(model, **kwargs)
if fp16_compression:
from torch.distributed.algorithms.ddp_comm_hooks import default as comm_hooks
ddp.register_comm_hook(state=None, hook=comm_hooks.fp16_compress_hook)
return ddp
def default_argument_parser(epilog=None):
"""
Create a parser with some common arguments used by detectron2 users.
Args:
epilog (str): epilog passed to ArgumentParser describing the usage.
Returns:
argparse.ArgumentParser:
"""
parser = argparse.ArgumentParser(
epilog=epilog
or f"""
Examples:
Run on single machine:
$ {sys.argv[0]} --num-gpus 8 --config-file cfg.yaml
Change some config options:
$ {sys.argv[0]} --config-file cfg.yaml MODEL.WEIGHTS /path/to/weight.pth SOLVER.BASE_LR 0.001
Run on multiple machines:
(machine0)$ {sys.argv[0]} --machine-rank 0 --num-machines 2 --dist-url <URL> [--other-flags]
(machine1)$ {sys.argv[0]} --machine-rank 1 --num-machines 2 --dist-url <URL> [--other-flags]
""",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file")
parser.add_argument(
"--resume",
action="store_true",
help="Whether to attempt to resume from the checkpoint directory. "
"See documentation of `DefaultTrainer.resume_or_load()` for what it means.",
)
parser.add_argument("--eval-only", action="store_true", help="perform evaluation only")
parser.add_argument("--num-gpus", type=int, default=1, help="number of gpus *per machine*")
parser.add_argument("--num-machines", type=int, default=1, help="total number of machines")
parser.add_argument(
"--machine-rank", type=int, default=0, help="the rank of this machine (unique per machine)"
)
parser.add_argument(
"--test-dataset", type=str, default="", help="the dataset used for evaluation"
)
parser.add_argument(
"--train-dataset", type=str, default="", help="the dataset used for training"
)
parser.add_argument(
"--wandb-name", type=str, default="", help="the wandb project name"
)
parser.add_argument("--no-segm", action="store_true", help="perform evaluation on detection only")
# PyTorch still may leave orphan processes in multi-gpu training.
# Therefore we use a deterministic way to obtain port,
# so that users are aware of orphan processes by seeing the port occupied.
port = 2**15 + 2**14 + hash(os.getuid() if sys.platform != "win32" else 1) % 2**14
parser.add_argument(
"--dist-url",
default="tcp://127.0.0.1:{}".format(port),
help="initialization URL for pytorch distributed backend. See "
"https://pytorch.org/docs/stable/distributed.html for details.",
)
parser.add_argument(
"opts",
help="""
Modify config options at the end of the command. For Yacs configs, use
space-separated "PATH.KEY VALUE" pairs.
For python-based LazyConfig, use "path.key=value".
""".strip(),
default=None,
nargs=argparse.REMAINDER,
)
return parser
def _try_get_key(cfg, *keys, default=None):
"""
Try select keys from cfg until the first key that exists. Otherwise return default.
"""
if isinstance(cfg, CfgNode):
cfg = OmegaConf.create(cfg.dump())
for k in keys:
none = object()
p = OmegaConf.select(cfg, k, default=none)
if p is not none:
return p
return default
def _highlight(code, filename):
try:
import pygments
except ImportError:
return code
from pygments.lexers import Python3Lexer, YamlLexer
from pygments.formatters import Terminal256Formatter
lexer = Python3Lexer() if filename.endswith(".py") else YamlLexer()
code = pygments.highlight(code, lexer, Terminal256Formatter(style="monokai"))
return code
def default_setup(cfg, args):
"""
Perform some basic common setups at the beginning of a job, including:
1. Set up the detectron2 logger
2. Log basic information about environment, cmdline arguments, and config
3. Backup the config to the output directory
Args:
cfg (CfgNode or omegaconf.DictConfig): the full config to be used
args (argparse.NameSpace): the command line arguments to be logged
"""
output_dir = _try_get_key(cfg, "OUTPUT_DIR", "output_dir", "train.output_dir")
if comm.is_main_process() and output_dir:
PathManager.mkdirs(output_dir)
rank = comm.get_rank()
setup_logger(output_dir, distributed_rank=rank, name="fvcore")
logger = setup_logger(output_dir, distributed_rank=rank)
logger.info("Rank of current process: {}. World size: {}".format(rank, comm.get_world_size()))
logger.info("Environment info:\n" + collect_env_info())
logger.info("Command line arguments: " + str(args))
if hasattr(args, "config_file") and args.config_file != "":
logger.info(
"Contents of args.config_file={}:\n{}".format(
args.config_file,
_highlight(PathManager.open(args.config_file, "r").read(), args.config_file),
)
)
if comm.is_main_process() and output_dir:
# Note: some of our scripts may expect the existence of
# config.yaml in output directory
path = os.path.join(output_dir, "config.yaml")
if isinstance(cfg, CfgNode):
logger.info("Running with full config:\n{}".format(_highlight(cfg.dump(), ".yaml")))
with PathManager.open(path, "w") as f:
f.write(cfg.dump())
else:
LazyConfig.save(cfg, path)
logger.info("Full config saved to {}".format(path))
# make sure each worker has a different, yet deterministic seed if specified
seed = _try_get_key(cfg, "SEED", "train.seed", default=-1)
seed_all_rng(None if seed < 0 else seed + rank)
# cudnn benchmark has large overhead. It shouldn't be used considering the small size of
# typical validation set.
if not (hasattr(args, "eval_only") and args.eval_only):
torch.backends.cudnn.benchmark = _try_get_key(
cfg, "CUDNN_BENCHMARK", "train.cudnn_benchmark", default=False
)
def default_writers(output_dir: str, max_iter: Optional[int] = None):
"""
Build a list of :class:`EventWriter` to be used.
It now consists of a :class:`CommonMetricPrinter`,
:class:`TensorboardXWriter` and :class:`JSONWriter`.
Args:
output_dir: directory to store JSON metrics and tensorboard events
max_iter: the total number of iterations
Returns:
list[EventWriter]: a list of :class:`EventWriter` objects.
"""
PathManager.mkdirs(output_dir)
return [
# It may not always print what you want to see, since it prints "common" metrics only.
CommonMetricPrinter(max_iter),
JSONWriter(os.path.join(output_dir, "metrics.json")),
TensorboardXWriter(output_dir),
]
class DefaultPredictor:
"""
Create a simple end-to-end predictor with the given config that runs on
single device for a single input image.
Compared to using the model directly, this class does the following additions:
1. Load checkpoint from `cfg.MODEL.WEIGHTS`.
2. Always take BGR image as the input and apply conversion defined by `cfg.INPUT.FORMAT`.
3. Apply resizing defined by `cfg.INPUT.{MIN,MAX}_SIZE_TEST`.
4. Take one input image and produce a single output, instead of a batch.
This is meant for simple demo purposes, so it does the above steps automatically.
This is not meant for benchmarks or running complicated inference logic.
If you'd like to do anything more complicated, please refer to its source code as
examples to build and use the model manually.
Attributes:
metadata (Metadata): the metadata of the underlying dataset, obtained from
cfg.DATASETS.TEST.
Examples:
::
pred = DefaultPredictor(cfg)
inputs = cv2.imread("input.jpg")
outputs = pred(inputs)
"""
def __init__(self, cfg):
self.cfg = cfg.clone() # cfg can be modified by model
self.model = build_model(self.cfg)
self.model.eval()
if len(cfg.DATASETS.TEST):
self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0])
checkpointer = DetectionCheckpointer(self.model)
checkpointer.load(cfg.MODEL.WEIGHTS)
self.aug = T.ResizeShortestEdge(
[cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST
)
self.input_format = cfg.INPUT.FORMAT
assert self.input_format in ["RGB", "BGR"], self.input_format
def __call__(self, original_image):
"""
Args:
original_image (np.ndarray): an image of shape (H, W, C) (in BGR order).
Returns:
predictions (dict):
the output of the model for one image only.
See :doc:`/tutorials/models` for details about the format.
"""
with torch.no_grad(): # https://github.com/sphinx-doc/sphinx/issues/4258
# Apply pre-processing to image.
if self.input_format == "RGB":
# whether the model expects BGR inputs or RGB
original_image = original_image[:, :, ::-1]
height, width = original_image.shape[:2]
image = self.aug.get_transform(original_image).apply_image(original_image)
image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
inputs = {"image": image, "height": height, "width": width}
predictions = self.model([inputs])[0]
return predictions
class DefaultTrainer(TrainerBase):
"""
A trainer with default training logic. It does the following:
1. Create a :class:`SimpleTrainer` using model, optimizer, dataloader
defined by the given config. Create a LR scheduler defined by the config.
2. Load the last checkpoint or `cfg.MODEL.WEIGHTS`, if exists, when
`resume_or_load` is called.
3. Register a few common hooks defined by the config.
It is created to simplify the **standard model training workflow** and reduce code boilerplate
for users who only need the standard training workflow, with standard features.
It means this class makes *many assumptions* about your training logic that
may easily become invalid in a new research. In fact, any assumptions beyond those made in the
:class:`SimpleTrainer` are too much for research.
The code of this class has been annotated about restrictive assumptions it makes.
When they do not work for you, you're encouraged to:
1. Overwrite methods of this class, OR:
2. Use :class:`SimpleTrainer`, which only does minimal SGD training and
nothing else. You can then add your own hooks if needed. OR:
3. Write your own training loop similar to `tools/plain_train_net.py`.
See the :doc:`/tutorials/training` tutorials for more details.
Note that the behavior of this class, like other functions/classes in
this file, is not stable, since it is meant to represent the "common default behavior".
It is only guaranteed to work well with the standard models and training workflow in detectron2.
To obtain more stable behavior, write your own training logic with other public APIs.
Examples:
::
trainer = DefaultTrainer(cfg)
trainer.resume_or_load() # load last checkpoint or MODEL.WEIGHTS
trainer.train()
Attributes:
scheduler:
checkpointer (DetectionCheckpointer):
cfg (CfgNode):
"""
def __init__(self, cfg):
"""
Args:
cfg (CfgNode):
"""
super().__init__()
logger = logging.getLogger("detectron2")
if not logger.isEnabledFor(logging.INFO): # setup_logger is not called for d2
setup_logger()
cfg = DefaultTrainer.auto_scale_workers(cfg, comm.get_world_size())
# Assume these objects must be constructed in this order.
model = self.build_model(cfg)
optimizer = self.build_optimizer(cfg, model)
data_loader = self.build_train_loader(cfg)
model = create_ddp_model(model, broadcast_buffers=False)
if cfg.SOLVER.AMP.ENABLED:
self._trainer = CustomAMPTrainer(model, data_loader, optimizer, cfg=cfg)
else:
self._trainer = CustomSimpleTrainer(model, data_loader, optimizer, cfg=cfg)
self.scheduler = self.build_lr_scheduler(cfg, optimizer)
self.checkpointer = DetectionCheckpointer(
# Assume you want to save checkpoints together with logs/statistics
model,
cfg.OUTPUT_DIR,
trainer=weakref.proxy(self),
)
self.start_iter = 0
self.max_iter = cfg.SOLVER.MAX_ITER
self.cfg = cfg
self.register_hooks(self.build_hooks())
def resume_or_load(self, resume=True):
"""
If `resume==True` and `cfg.OUTPUT_DIR` contains the last checkpoint (defined by
a `last_checkpoint` file), resume from the file. Resuming means loading all
available states (eg. optimizer and scheduler) and update iteration counter
from the checkpoint. ``cfg.MODEL.WEIGHTS`` will not be used.
Otherwise, this is considered as an independent training. The method will load model
weights from the file `cfg.MODEL.WEIGHTS` (but will not load other states) and start
from iteration 0.
Args:
resume (bool): whether to do resume or not
"""
self.checkpointer.resume_or_load(self.cfg.MODEL.WEIGHTS, resume=resume)
if resume and self.checkpointer.has_checkpoint():
# The checkpoint stores the training iteration that just finished, thus we start
# at the next iteration
self.start_iter = self.iter + 1
def build_hooks(self):
"""
Build a list of default hooks, including timing, evaluation,
checkpointing, lr scheduling, precise BN, writing events.
Returns:
list[HookBase]:
"""
cfg = self.cfg.clone()
cfg.defrost()
cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN
ret = [
hooks.IterationTimer(),
hooks.LRScheduler(),
hooks.PreciseBN(
# Run at the same freq as (but before) evaluation.
cfg.TEST.EVAL_PERIOD,
self.model,
# Build a new data loader to not affect training
self.build_train_loader(cfg),
cfg.TEST.PRECISE_BN.NUM_ITER,
)
if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model)
else None,
]
# Do PreciseBN before checkpointer, because it updates the model and need to
# be saved by checkpointer.
# This is not always the best: if checkpointing has a different frequency,
# some checkpoints may have more precise statistics than others.
if comm.is_main_process():
ret.append(hooks.PeriodicCheckpointer(self.checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD))
def test_and_save_results():
self._last_eval_results = self.test(self.cfg, self.model)
return self._last_eval_results
# Do evaluation after checkpointer, because then if it fails,
# we can use the saved checkpoint to debug.
ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results))
if comm.is_main_process():
# Here the default print/log frequency of each writer is used.
# run writers in the end, so that evaluation metrics are written
ret.append(hooks.PeriodicWriter(self.build_writers(), period=20))
return ret
def build_writers(self):
"""
Build a list of writers to be used using :func:`default_writers()`.
If you'd like a different list of writers, you can overwrite it in
your trainer.
Returns:
list[EventWriter]: a list of :class:`EventWriter` objects.
"""
return default_writers(self.cfg.OUTPUT_DIR, self.max_iter)
def train(self):
"""
Run training.
Returns:
OrderedDict of results, if evaluation is enabled. Otherwise None.
"""
super().train(self.start_iter, self.max_iter)
if len(self.cfg.TEST.EXPECTED_RESULTS) and comm.is_main_process():
assert hasattr(
self, "_last_eval_results"
), "No evaluation results obtained during training!"
verify_results(self.cfg, self._last_eval_results)
return self._last_eval_results
def run_step(self):
self._trainer.iter = self.iter
self._trainer.run_step()
def state_dict(self):
ret = super().state_dict()
ret["_trainer"] = self._trainer.state_dict()
return ret
def load_state_dict(self, state_dict):
super().load_state_dict(state_dict)
self._trainer.load_state_dict(state_dict["_trainer"])
@classmethod
def build_model(cls, cfg):
"""
Returns:
torch.nn.Module:
It now calls :func:`detectron2.modeling.build_model`.
Overwrite it if you'd like a different model.
"""
model = build_model(cfg)
logger = logging.getLogger(__name__)
logger.info("Model:\n{}".format(model))
return model
@classmethod
def build_optimizer(cls, cfg, model):
"""
Returns:
torch.optim.Optimizer:
It now calls :func:`detectron2.solver.build_optimizer`.
Overwrite it if you'd like a different optimizer.
"""
return build_optimizer(cfg, model)
@classmethod
def build_lr_scheduler(cls, cfg, optimizer):
"""
It now calls :func:`detectron2.solver.build_lr_scheduler`.
Overwrite it if you'd like a different scheduler.
"""
return build_lr_scheduler(cfg, optimizer)
@classmethod
def build_train_loader(cls, cfg):
"""
Returns:
iterable
It now calls :func:`detectron2.data.build_detection_train_loader`.
Overwrite it if you'd like a different data loader.
"""
return build_detection_train_loader(cfg)
@classmethod
def build_test_loader(cls, cfg, dataset_name):
"""
Returns:
iterable
It now calls :func:`detectron2.data.build_detection_test_loader`.
Overwrite it if you'd like a different data loader.
"""
return build_detection_test_loader(cfg, dataset_name)
@classmethod
def build_evaluator(cls, cfg, dataset_name):
"""
Returns:
DatasetEvaluator or None
It is not implemented by default.
"""
raise NotImplementedError(
"""
If you want DefaultTrainer to automatically run evaluation,
please implement `build_evaluator()` in subclasses (see train_net.py for example).
Alternatively, you can call evaluation functions yourself (see Colab balloon tutorial for example).
"""
)
@classmethod
def test(cls, cfg, model, evaluators=None):
"""
Evaluate the given model. The given model is expected to already contain
weights to evaluate.
Args:
cfg (CfgNode):
model (nn.Module):
evaluators (list[DatasetEvaluator] or None): if None, will call
:meth:`build_evaluator`. Otherwise, must have the same length as
``cfg.DATASETS.TEST``.
Returns:
dict: a dict of result metrics
"""
logger = logging.getLogger(__name__)
if isinstance(evaluators, DatasetEvaluator):
evaluators = [evaluators]
if evaluators is not None:
assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format(
len(cfg.DATASETS.TEST), len(evaluators)
)
results = OrderedDict()
for idx, dataset_name in enumerate(cfg.DATASETS.TEST):
data_loader = cls.build_test_loader(cfg, dataset_name)
# When evaluators are passed in as arguments,
# implicitly assume that evaluators can be created before data_loader.
if evaluators is not None:
evaluator = evaluators[idx]
else:
try:
evaluator = cls.build_evaluator(cfg, dataset_name)
except NotImplementedError:
logger.warn(
"No evaluator found. Use `DefaultTrainer.test(evaluators=)`, "
"or implement its `build_evaluator` method."
)
results[dataset_name] = {}
continue
results_i = inference_on_dataset(model, data_loader, evaluator)
results[dataset_name] = results_i
if comm.is_main_process():
assert isinstance(
results_i, dict
), "Evaluator must return a dict on the main process. Got {} instead.".format(
results_i
)
logger.info("Evaluation results for {} in csv format:".format(dataset_name))
print_csv_format(results_i)
if len(results) == 1:
results = list(results.values())[0]
return results
@staticmethod
def auto_scale_workers(cfg, num_workers: int):
"""
When the config is defined for certain number of workers (according to
``cfg.SOLVER.REFERENCE_WORLD_SIZE``) that's different from the number of
workers currently in use, returns a new cfg where the total batch size
is scaled so that the per-GPU batch size stays the same as the
original ``IMS_PER_BATCH // REFERENCE_WORLD_SIZE``.
Other config options are also scaled accordingly:
* training steps and warmup steps are scaled inverse proportionally.
* learning rate are scaled proportionally, following :paper:`ImageNet in 1h`.
For example, with the original config like the following:
.. code-block:: yaml
IMS_PER_BATCH: 16
BASE_LR: 0.1
REFERENCE_WORLD_SIZE: 8
MAX_ITER: 5000
STEPS: (4000,)
CHECKPOINT_PERIOD: 1000
When this config is used on 16 GPUs instead of the reference number 8,
calling this method will return a new config with:
.. code-block:: yaml
IMS_PER_BATCH: 32
BASE_LR: 0.2
REFERENCE_WORLD_SIZE: 16
MAX_ITER: 2500
STEPS: (2000,)
CHECKPOINT_PERIOD: 500
Note that both the original config and this new config can be trained on 16 GPUs.
It's up to user whether to enable this feature (by setting ``REFERENCE_WORLD_SIZE``).
Returns:
CfgNode: a new config. Same as original if ``cfg.SOLVER.REFERENCE_WORLD_SIZE==0``.
"""
old_world_size = cfg.SOLVER.REFERENCE_WORLD_SIZE
if old_world_size == 0 or old_world_size == num_workers:
return cfg
cfg = cfg.clone()
frozen = cfg.is_frozen()
cfg.defrost()
assert (
cfg.SOLVER.IMS_PER_BATCH % old_world_size == 0
), "Invalid REFERENCE_WORLD_SIZE in config!"
scale = num_workers / old_world_size
bs = cfg.SOLVER.IMS_PER_BATCH = int(round(cfg.SOLVER.IMS_PER_BATCH * scale))
lr = cfg.SOLVER.BASE_LR = cfg.SOLVER.BASE_LR * scale
max_iter = cfg.SOLVER.MAX_ITER = int(round(cfg.SOLVER.MAX_ITER / scale))
warmup_iter = cfg.SOLVER.WARMUP_ITERS = int(round(cfg.SOLVER.WARMUP_ITERS / scale))
cfg.SOLVER.STEPS = tuple(int(round(s / scale)) for s in cfg.SOLVER.STEPS)
cfg.TEST.EVAL_PERIOD = int(round(cfg.TEST.EVAL_PERIOD / scale))
cfg.SOLVER.CHECKPOINT_PERIOD = int(round(cfg.SOLVER.CHECKPOINT_PERIOD / scale))
cfg.SOLVER.REFERENCE_WORLD_SIZE = num_workers # maintain invariant
logger = logging.getLogger(__name__)
logger.info(
f"Auto-scaling the config to batch_size={bs}, learning_rate={lr}, "
f"max_iter={max_iter}, warmup={warmup_iter}."
)
if frozen:
cfg.freeze()
return cfg
# Access basic attributes from the underlying trainer
for _attr in ["model", "data_loader", "optimizer"]:
setattr(
DefaultTrainer,
_attr,
property(
# getter
lambda self, x=_attr: getattr(self._trainer, x),
# setter
lambda self, value, x=_attr: setattr(self._trainer, x, value),
),
)
| CutLER-main | videocutler/mask2former_video/engine/defaults.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
import config
import engine
import modeling
import structures
import tools
import demo
# dataset loading
from . import data # register all new datasets
from data import datasets # register all new datasets
from solver import *
# from .data import register_all_imagenet | CutLER-main | cutler/__init__.py |
#!/usr/bin/env python
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Modified by XuDong Wang from https://github.com/facebookresearch/detectron2/blob/main/tools/train_net.py
"""
A main training script.
This scripts reads a given config file and runs the training or evaluation.
It is an entry point that is made to train standard models in detectron2.
In order to let one script support training of many models,
this script contains logic that are specific to these built-in models and therefore
may not be suitable for your own project.
For example, your research project perhaps only needs a single "evaluator".
Therefore, we recommend you to use detectron2 as an library and take
this file as an example of how to use the library.
You may want to write your own script with your datasets and other customizations.
"""
import logging
import os
from collections import OrderedDict
import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from config import add_cutler_config
from detectron2.data import MetadataCatalog
from engine import DefaultTrainer, default_argument_parser, default_setup
from detectron2.engine import hooks, launch
from detectron2.evaluation import (
CityscapesInstanceEvaluator,
CityscapesSemSegEvaluator,
# COCOEvaluator,
COCOPanopticEvaluator,
DatasetEvaluators,
LVISEvaluator,
PascalVOCDetectionEvaluator,
SemSegEvaluator,
verify_results,
)
from evaluation import COCOEvaluator
from detectron2.modeling import GeneralizedRCNNWithTTA
import data # register new datasets
import modeling.roi_heads
def build_evaluator(cfg, dataset_name, output_folder=None):
"""
Create evaluator(s) for a given dataset.
This uses the special metadata "evaluator_type" associated with each builtin dataset.
For your own dataset, you can simply create an evaluator manually in your
script and do not have to worry about the hacky if-else logic here.
"""
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
evaluator_list = []
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
if evaluator_type in ["sem_seg", "coco_panoptic_seg"]:
evaluator_list.append(
SemSegEvaluator(
dataset_name,
distributed=True,
output_dir=output_folder,
)
)
if evaluator_type in ["coco", "coco_panoptic_seg"]:
evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder, no_segm=cfg.TEST.NO_SEGM))
if evaluator_type == "coco_panoptic_seg":
evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))
if evaluator_type == "cityscapes_instance":
return CityscapesInstanceEvaluator(dataset_name)
if evaluator_type == "cityscapes_sem_seg":
return CityscapesSemSegEvaluator(dataset_name)
elif evaluator_type == "pascal_voc":
return PascalVOCDetectionEvaluator(dataset_name)
elif evaluator_type == "lvis":
return LVISEvaluator(dataset_name, output_dir=output_folder)
if len(evaluator_list) == 0:
raise NotImplementedError(
"no Evaluator for the dataset {} with the type {}".format(dataset_name, evaluator_type)
)
elif len(evaluator_list) == 1:
return evaluator_list[0]
return DatasetEvaluators(evaluator_list)
class Trainer(DefaultTrainer):
"""
We use the "DefaultTrainer" which contains pre-defined default logic for
standard training workflow. They may not work for you, especially if you
are working on a new research project. In that case you can write your
own training loop. You can use "tools/plain_train_net.py" as an example.
"""
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
return build_evaluator(cfg, dataset_name, output_folder)
@classmethod
def test_with_TTA(cls, cfg, model):
logger = logging.getLogger("detectron2.trainer")
# In the end of training, run an evaluation with TTA
# Only support some R-CNN models.
logger.info("Running inference with test-time augmentation ...")
model = GeneralizedRCNNWithTTA(cfg, model)
evaluators = [
cls.build_evaluator(
cfg, name, output_folder=os.path.join(cfg.OUTPUT_DIR, "inference_TTA")
)
for name in cfg.DATASETS.TEST
]
res = cls.test(cfg, model, evaluators)
res = OrderedDict({k + "_TTA": v for k, v in res.items()})
return res
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
add_cutler_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
# FIXME: brute force changes to test datasets and evaluation tasks
if args.test_dataset != "": cfg.DATASETS.TEST = ((args.test_dataset),)
if args.train_dataset != "": cfg.DATASETS.TRAIN = ((args.train_dataset),)
cfg.TEST.NO_SEGM = args.no_segm
cfg.freeze()
default_setup(cfg, args)
return cfg
def main(args):
cfg = setup(args)
if args.eval_only:
model = Trainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
res = Trainer.test(cfg, model)
if cfg.TEST.AUG.ENABLED:
res.update(Trainer.test_with_TTA(cfg, model))
if comm.is_main_process():
verify_results(cfg, res)
return res
"""
If you'd like to do anything fancier than the standard training logic,
consider writing your own training loop (see plain_train_net.py) or
subclassing the trainer.
"""
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
if cfg.TEST.AUG.ENABLED:
trainer.register_hooks(
[hooks.EvalHook(0, lambda: trainer.test_with_TTA(cfg, trainer.model))]
)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
# print(args)
# args.opts = postprocess_args(args.opts)
# rint = random.randint(0, 10000)
# args.dist_url = args.dist_url.replace('12399', str(12399 + rint))
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| CutLER-main | cutler/train_net.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
import atexit
import bisect
import multiprocessing as mp
from collections import deque
import cv2
import torch
from detectron2.data import MetadataCatalog
import sys
sys.path.append('./')
from engine.defaults import DefaultPredictor
from detectron2.utils.video_visualizer import VideoVisualizer
from detectron2.utils.visualizer import ColorMode, Visualizer
class VisualizationDemo(object):
def __init__(self, cfg, instance_mode=ColorMode.IMAGE, parallel=False):
"""
Args:
cfg (CfgNode):
instance_mode (ColorMode):
parallel (bool): whether to run the model in different processes from visualization.
Useful since the visualization logic can be slow.
"""
self.metadata = MetadataCatalog.get(
cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else "__unused"
)
self.cpu_device = torch.device("cpu")
self.instance_mode = instance_mode
self.parallel = parallel
if parallel:
num_gpu = torch.cuda.device_count()
self.predictor = AsyncPredictor(cfg, num_gpus=num_gpu)
else:
self.predictor = DefaultPredictor(cfg)
def run_on_image(self, image):
"""
Args:
image (np.ndarray): an image of shape (H, W, C) (in BGR order).
This is the format used by OpenCV.
Returns:
predictions (dict): the output of the model.
vis_output (VisImage): the visualized image output.
"""
vis_output = None
predictions = self.predictor(image)
# Convert image from OpenCV BGR format to Matplotlib RGB format.
image = image[:, :, ::-1]
visualizer = Visualizer(image, self.metadata, instance_mode=self.instance_mode)
if "panoptic_seg" in predictions:
panoptic_seg, segments_info = predictions["panoptic_seg"]
vis_output = visualizer.draw_panoptic_seg_predictions(
panoptic_seg.to(self.cpu_device), segments_info
)
else:
if "sem_seg" in predictions:
vis_output = visualizer.draw_sem_seg(
predictions["sem_seg"].argmax(dim=0).to(self.cpu_device)
)
if "instances" in predictions:
instances = predictions["instances"].to(self.cpu_device)
vis_output = visualizer.draw_instance_predictions(predictions=instances)
return predictions, vis_output
def _frame_from_video(self, video):
while video.isOpened():
success, frame = video.read()
if success:
yield frame
else:
break
def run_on_video(self, video):
"""
Visualizes predictions on frames of the input video.
Args:
video (cv2.VideoCapture): a :class:`VideoCapture` object, whose source can be
either a webcam or a video file.
Yields:
ndarray: BGR visualizations of each video frame.
"""
video_visualizer = VideoVisualizer(self.metadata, self.instance_mode)
def process_predictions(frame, predictions):
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
if "panoptic_seg" in predictions:
panoptic_seg, segments_info = predictions["panoptic_seg"]
vis_frame = video_visualizer.draw_panoptic_seg_predictions(
frame, panoptic_seg.to(self.cpu_device), segments_info
)
elif "instances" in predictions:
predictions = predictions["instances"].to(self.cpu_device)
vis_frame = video_visualizer.draw_instance_predictions(frame, predictions)
elif "sem_seg" in predictions:
vis_frame = video_visualizer.draw_sem_seg(
frame, predictions["sem_seg"].argmax(dim=0).to(self.cpu_device)
)
# Converts Matplotlib RGB format to OpenCV BGR format
vis_frame = cv2.cvtColor(vis_frame.get_image(), cv2.COLOR_RGB2BGR)
return vis_frame
frame_gen = self._frame_from_video(video)
if self.parallel:
buffer_size = self.predictor.default_buffer_size
frame_data = deque()
for cnt, frame in enumerate(frame_gen):
frame_data.append(frame)
self.predictor.put(frame)
if cnt >= buffer_size:
frame = frame_data.popleft()
predictions = self.predictor.get()
yield process_predictions(frame, predictions)
while len(frame_data):
frame = frame_data.popleft()
predictions = self.predictor.get()
yield process_predictions(frame, predictions)
else:
for frame in frame_gen:
yield process_predictions(frame, self.predictor(frame))
class AsyncPredictor:
"""
A predictor that runs the model asynchronously, possibly on >1 GPUs.
Because rendering the visualization takes considerably amount of time,
this helps improve throughput a little bit when rendering videos.
"""
class _StopToken:
pass
class _PredictWorker(mp.Process):
def __init__(self, cfg, task_queue, result_queue):
self.cfg = cfg
self.task_queue = task_queue
self.result_queue = result_queue
super().__init__()
def run(self):
predictor = DefaultPredictor(self.cfg)
while True:
task = self.task_queue.get()
if isinstance(task, AsyncPredictor._StopToken):
break
idx, data = task
result = predictor(data)
self.result_queue.put((idx, result))
def __init__(self, cfg, num_gpus: int = 1):
"""
Args:
cfg (CfgNode):
num_gpus (int): if 0, will run on CPU
"""
num_workers = max(num_gpus, 1)
self.task_queue = mp.Queue(maxsize=num_workers * 3)
self.result_queue = mp.Queue(maxsize=num_workers * 3)
self.procs = []
for gpuid in range(max(num_gpus, 1)):
cfg = cfg.clone()
cfg.defrost()
cfg.MODEL.DEVICE = "cuda:{}".format(gpuid) if num_gpus > 0 else "cpu"
self.procs.append(
AsyncPredictor._PredictWorker(cfg, self.task_queue, self.result_queue)
)
self.put_idx = 0
self.get_idx = 0
self.result_rank = []
self.result_data = []
for p in self.procs:
p.start()
atexit.register(self.shutdown)
def put(self, image):
self.put_idx += 1
self.task_queue.put((self.put_idx, image))
def get(self):
self.get_idx += 1 # the index needed for this request
if len(self.result_rank) and self.result_rank[0] == self.get_idx:
res = self.result_data[0]
del self.result_data[0], self.result_rank[0]
return res
while True:
# make sure the results are returned in the correct order
idx, res = self.result_queue.get()
if idx == self.get_idx:
return res
insert = bisect.bisect(self.result_rank, idx)
self.result_rank.insert(insert, idx)
self.result_data.insert(insert, res)
def __len__(self):
return self.put_idx - self.get_idx
def __call__(self, image):
self.put(image)
return self.get()
def shutdown(self):
for _ in self.procs:
self.task_queue.put(AsyncPredictor._StopToken())
@property
def default_buffer_size(self):
return len(self.procs) * 5 | CutLER-main | cutler/demo/predictor.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
from demo import *
from predictor import *
__all__ = [k for k in globals().keys() if not k.startswith("_")] | CutLER-main | cutler/demo/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Modified by XuDong Wang from https://github.com/facebookresearch/detectron2/blob/main/demo/demo.py
import argparse
import glob
import multiprocessing as mp
import numpy as np
import os
import tempfile
import time
import warnings
import cv2
import tqdm
from detectron2.config import get_cfg
from detectron2.data.detection_utils import read_image
from detectron2.utils.logger import setup_logger
import sys
sys.path.append('./')
sys.path.append('../')
from config import add_cutler_config
from predictor import VisualizationDemo
# constants
WINDOW_NAME = "CutLER detections"
def setup_cfg(args):
# load config from file and command-line arguments
cfg = get_cfg()
add_cutler_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
# Disable the use of SyncBN normalization when running on a CPU
# SyncBN is not supported on CPU and can cause errors, so we switch to BN instead
if cfg.MODEL.DEVICE == 'cpu' and cfg.MODEL.RESNETS.NORM == 'SyncBN':
cfg.MODEL.RESNETS.NORM = "BN"
cfg.MODEL.FPN.NORM = "BN"
# Set score_threshold for builtin models
cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold
cfg.freeze()
return cfg
def get_parser():
parser = argparse.ArgumentParser(description="Detectron2 demo for builtin configs")
parser.add_argument(
"--config-file",
default="model_zoo/configs/CutLER-ImageNet/cascade_mask_rcnn_R_50_FPN.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument("--webcam", action="store_true", help="Take inputs from webcam.")
parser.add_argument("--video-input", help="Path to video file.")
parser.add_argument(
"--input",
nargs="+",
help="A list of space separated input images; "
"or a single glob pattern such as 'directory/*.jpg'",
)
parser.add_argument(
"--output",
help="A file or directory to save output visualizations. "
"If not given, will show output in an OpenCV window.",
)
parser.add_argument(
"--confidence-threshold",
type=float,
default=0.35,
help="Minimum score for instance predictions to be shown",
)
parser.add_argument(
"--opts",
help="Modify config options using the command-line 'KEY VALUE' pairs",
default=[],
nargs=argparse.REMAINDER,
)
return parser
def test_opencv_video_format(codec, file_ext):
with tempfile.TemporaryDirectory(prefix="video_format_test") as dir:
filename = os.path.join(dir, "test_file" + file_ext)
writer = cv2.VideoWriter(
filename=filename,
fourcc=cv2.VideoWriter_fourcc(*codec),
fps=float(30),
frameSize=(10, 10),
isColor=True,
)
[writer.write(np.zeros((10, 10, 3), np.uint8)) for _ in range(30)]
writer.release()
if os.path.isfile(filename):
return True
return False
if __name__ == "__main__":
mp.set_start_method("spawn", force=True)
args = get_parser().parse_args()
setup_logger(name="fvcore")
logger = setup_logger()
logger.info("Arguments: " + str(args))
cfg = setup_cfg(args)
demo = VisualizationDemo(cfg)
if args.input:
if len(args.input) == 1:
args.input = glob.glob(os.path.expanduser(args.input[0]))
assert args.input, "The input path(s) was not found"
for path in tqdm.tqdm(args.input, disable=not args.output):
# use PIL, to be consistent with evaluation
img = read_image(path, format="BGR")
start_time = time.time()
predictions, visualized_output = demo.run_on_image(img)
logger.info(
"{}: {} in {:.2f}s".format(
path,
"detected {} instances".format(len(predictions["instances"]))
if "instances" in predictions
else "finished",
time.time() - start_time,
)
)
if args.output:
if os.path.isdir(args.output):
assert os.path.isdir(args.output), args.output
out_filename = os.path.join(args.output, os.path.basename(path))
else:
assert len(args.input) == 1, "Please specify a directory with args.output"
out_filename = args.output
visualized_output.save(out_filename)
else:
cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
cv2.imshow(WINDOW_NAME, visualized_output.get_image()[:, :, ::-1])
if cv2.waitKey(0) == 27:
break # esc to quit
elif args.webcam:
assert args.input is None, "Cannot have both --input and --webcam!"
assert args.output is None, "output not yet supported with --webcam!"
cam = cv2.VideoCapture(0)
for vis in tqdm.tqdm(demo.run_on_video(cam)):
cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
cv2.imshow(WINDOW_NAME, vis)
if cv2.waitKey(1) == 27:
break # esc to quit
cam.release()
cv2.destroyAllWindows()
elif args.video_input:
video = cv2.VideoCapture(args.video_input)
width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
frames_per_second = video.get(cv2.CAP_PROP_FPS)
num_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
basename = os.path.basename(args.video_input)
codec, file_ext = (
("x264", ".mkv") if test_opencv_video_format("x264", ".mkv") else ("mp4v", ".mp4")
)
if codec == ".mp4v":
warnings.warn("x264 codec not available, switching to mp4v")
if args.output:
if os.path.isdir(args.output):
output_fname = os.path.join(args.output, basename)
output_fname = os.path.splitext(output_fname)[0] + file_ext
else:
output_fname = args.output
assert not os.path.isfile(output_fname), output_fname
output_file = cv2.VideoWriter(
filename=output_fname,
# some installation of opencv may not support x264 (due to its license),
# you can try other format (e.g. MPEG)
fourcc=cv2.VideoWriter_fourcc(*codec),
fps=float(frames_per_second),
frameSize=(width, height),
isColor=True,
)
assert os.path.isfile(args.video_input)
for vis_frame in tqdm.tqdm(demo.run_on_video(video), total=num_frames):
if args.output:
output_file.write(vis_frame)
else:
cv2.namedWindow(basename, cv2.WINDOW_NORMAL)
cv2.imshow(basename, vis_frame)
if cv2.waitKey(1) == 27:
break # esc to quit
video.release()
if args.output:
output_file.release()
else:
cv2.destroyAllWindows() | CutLER-main | cutler/demo/demo.py |
#!/usr/bin/env python
# Copyright (c) Meta Platforms, Inc. and affiliates.
import os
import json
import tqdm
import torch
import datetime
import argparse
import pycocotools.mask as cocomask
from detectron2.utils.file_io import PathManager
INFO = {
"description": "ImageNet-1K: Self-train",
"url": "",
"version": "1.0",
"year": 2022,
"contributor": "Xudong Wang",
"date_created": datetime.datetime.utcnow().isoformat(' ')
}
LICENSES = [
{
"id": 1,
"name": "Apache License",
"url": "https://github.com/facebookresearch/CutLER/blob/main/LICENSE"
}
]
CATEGORIES = [
{
'id': 1,
'name': 'fg',
'supercategory': 'fg',
},
]
new_dict_filtered = {
"info": INFO,
"licenses": LICENSES,
"categories": CATEGORIES,
"images": [],
"annotations": []
}
category_info = {
"is_crowd": 0,
"id": 1
}
def segmToRLE(segm, h, w):
if isinstance(segm, list):
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = cocomask.frPyObjects(segm, h, w)
rle = cocomask.merge(rles)
elif isinstance(segm["counts"], list):
# uncompressed RLE
rle = cocomask.frPyObjects(segm, h, w)
else:
# rle
rle = segm
return rle
def rle2mask(rle, height, width):
if "counts" in rle and isinstance(rle["counts"], list):
# if compact RLE, ignore this conversion
# Magic RLE format handling painfully discovered by looking at the
# COCO API showAnns function.
rle = cocomask.frPyObjects(rle, height, width)
mask = cocomask.decode(rle)
return mask
def cocosegm2mask(segm, h, w):
rle = segmToRLE(segm, h, w)
mask = rle2mask(rle, h, w)
return mask
def BatchIoU(masks1, masks2):
n1, n2 = masks1.size()[0], masks2.size()[0]
masks1, masks2 = (masks1>0.5).to(torch.bool), (masks2>0.5).to(torch.bool)
masks1_ = masks1[:,None,:,:,].expand(-1, n2, -1, -1)
masks2_ = masks2[None,:,:,:,].expand(n1, -1, -1, -1)
intersection = torch.sum(masks1_ * (masks1_ == masks2_), dim=[-1, -2])
union = torch.sum(masks1_ + masks2_, dim=[-1, -2])
ious = intersection.to(torch.float) / union
return ious
if __name__ == "__main__":
# load model arguments
parser = argparse.ArgumentParser(description='Generate json files for the self-training')
parser.add_argument('--new-pred', type=str,
default='output/inference/coco_instances_results.json',
help='Path to model predictions')
parser.add_argument('--prev-ann', type=str,
default='DETECTRON2_DATASETS/imagenet/annotations/cutler_imagenet1k_train.json',
help='Path to annotations in the previous round')
parser.add_argument('--save-path', type=str,
default='DETECTRON2_DATASETS/imagenet/annotations/cutler_imagenet1k_train_r1.json',
help='Path to save the generated annotation file')
# parser.add_argument('--n-rounds', type=int, default=1,
# help='N-th round of self-training')
parser.add_argument('--threshold', type=float, default=0.7,
help='Confidence score thresholds')
args = parser.parse_args()
# load model predictions
new_pred = args.new_pred
with PathManager.open(new_pred, "r") as f:
predictions = json.load(f)
# filter out low-confidence model predictions
THRESHOLD = args.threshold
pred_image_to_anns = {}
for id, ann in enumerate(predictions):
confidence_score = ann['score']
if confidence_score >= THRESHOLD:
if ann['image_id'] in pred_image_to_anns:
pred_image_to_anns[ann['image_id']].append(ann)
else:
pred_image_to_anns[ann['image_id']] = [ann]
# load psedu-masks used by the previous round
pseudo_ann_dict = json.load(open(args.prev_ann))
pseudo_image_list = pseudo_ann_dict['images']
pseudo_annotations = pseudo_ann_dict['annotations']
pseudo_image_to_anns = {}
for id, ann in enumerate(pseudo_annotations):
if ann['image_id'] in pseudo_image_to_anns:
pseudo_image_to_anns[ann['image_id']].append(ann)
else:
pseudo_image_to_anns[ann['image_id']] = [ann]
# merge model predictions and the json file used by the previous round.
merged_anns = []
num_preds, num_pseudo = 0, 0
for k, anns_pseudo in tqdm.tqdm(pseudo_image_to_anns.items()):
masks = []
for ann in anns_pseudo:
segm = ann['segmentation']
mask = cocosegm2mask(segm, segm['size'][0], segm['size'][1])
masks.append(torch.from_numpy(mask))
pseudo_masks = torch.stack(masks, dim=0).cuda()
del masks
num_pseudo += len(anns_pseudo)
try:
anns_pred = pred_image_to_anns[k]
except:
merged_anns += anns_pseudo
continue
masks = []
for ann in anns_pred:
segm = ann['segmentation']
mask = cocosegm2mask(segm, segm['size'][0], segm['size'][1])
masks.append(torch.from_numpy(mask))
pred_masks = torch.stack(masks, dim=0).cuda()
num_preds += len(anns_pred)
try:
ious = BatchIoU(pseudo_masks, pred_masks)
iou_max, _ = ious.max(dim=1)
selected_index = (iou_max < 0.5).nonzero()
selected_pseudo = [anns_pseudo[i] for i in selected_index]
merged_anns += anns_pred + selected_pseudo
# if num_preds % 200000 == 0:
# print(len(merged_anns), num_preds, num_pseudo)
except:
merged_anns += anns_pseudo
for key in pred_image_to_anns:
if key in pseudo_image_to_anns:
continue
else:
merged_anns += pred_image_to_anns[key]
# re-generate annotation id
ann_id = 1
for ann in merged_anns:
ann['id'] = ann_id
ann['area'] = ann['bbox'][-1] * ann['bbox'][-2]
ann['iscrowd'] = 0
ann['width'] = ann['segmentation']['size'][0]
ann['height'] = ann['segmentation']['size'][1]
ann_id += 1
new_dict_filtered['images'] = pseudo_image_list
new_dict_filtered['annotations'] = merged_anns
# save annotation file
# save_path = os.path.join(args.save_path, "cutler_imagenet1k_train_r{}.json".format(args.n_rounds))
json.dump(new_dict_filtered, open(args.save_path, 'w'))
print("Done: {} images; {} anns.".format(len(new_dict_filtered['images']), len(new_dict_filtered['annotations']))) | CutLER-main | cutler/tools/get_self_training_ann.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
from .boxes import pairwise_iou_max_scores
__all__ = [k for k in globals().keys() if not k.startswith("_")]
from detectron2.utils.env import fixup_module_metadata
fixup_module_metadata(__name__, globals(), __all__)
del fixup_module_metadata
| CutLER-main | cutler/structures/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Modified by XuDong Wang from https://github.com/facebookresearch/detectron2/blob/main/detectron2/structures/boxes.py
import torch
def pairwise_iou_max_scores(boxes1: torch.Tensor, boxes2: torch.Tensor) -> torch.Tensor:
"""
Given two lists of boxes of size N and M, compute the IoU
(intersection over union) between **all** N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1]) # [N]
area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1]) # [M]
width_height = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) - torch.max(
boxes1[:, None, :2], boxes2[:, :2]
) # [N,M,2]
width_height.clamp_(min=0) # [N,M,2]
inter = width_height.prod(dim=2) # [N,M]
# handle empty boxes
iou = torch.where(
inter > 0,
inter / (area1[:, None] + area2 - inter),
torch.zeros(1, dtype=inter.dtype, device=inter.device),
)
iou_max, _ = torch.max(iou, dim=1)
return iou_max | CutLER-main | cutler/structures/boxes.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
from detectron2.config import CfgNode as CN
def add_cutler_config(cfg):
cfg.DATALOADER.COPY_PASTE = False
cfg.DATALOADER.COPY_PASTE_RATE = 0.0
cfg.DATALOADER.COPY_PASTE_MIN_RATIO = 0.5
cfg.DATALOADER.COPY_PASTE_MAX_RATIO = 1.0
cfg.DATALOADER.COPY_PASTE_RANDOM_NUM = True
cfg.DATALOADER.VISUALIZE_COPY_PASTE = False
cfg.MODEL.ROI_HEADS.USE_DROPLOSS = False
cfg.MODEL.ROI_HEADS.DROPLOSS_IOU_THRESH = 0.0
cfg.SOLVER.BASE_LR_MULTIPLIER = 1
cfg.SOLVER.BASE_LR_MULTIPLIER_NAMES = []
cfg.TEST.NO_SEGM = False | CutLER-main | cutler/config/cutler_config.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
from .cutler_config import add_cutler_config | CutLER-main | cutler/config/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Modified by XuDong Wang from https://github.com/facebookresearch/detectron2/blob/main/detectron2/solver/build.py
import copy
import itertools
import logging
from collections import defaultdict
from enum import Enum
from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Type, Union
import torch
from fvcore.common.param_scheduler import CosineParamScheduler, MultiStepParamScheduler
from detectron2.config import CfgNode
from detectron2.solver.lr_scheduler import LRMultiplier, WarmupParamScheduler
_GradientClipperInput = Union[torch.Tensor, Iterable[torch.Tensor]]
_GradientClipper = Callable[[_GradientClipperInput], None]
class GradientClipType(Enum):
VALUE = "value"
NORM = "norm"
def _create_gradient_clipper(cfg: CfgNode) -> _GradientClipper:
"""
Creates gradient clipping closure to clip by value or by norm,
according to the provided config.
"""
cfg = copy.deepcopy(cfg)
def clip_grad_norm(p: _GradientClipperInput):
torch.nn.utils.clip_grad_norm_(p, cfg.CLIP_VALUE, cfg.NORM_TYPE)
def clip_grad_value(p: _GradientClipperInput):
torch.nn.utils.clip_grad_value_(p, cfg.CLIP_VALUE)
_GRADIENT_CLIP_TYPE_TO_CLIPPER = {
GradientClipType.VALUE: clip_grad_value,
GradientClipType.NORM: clip_grad_norm,
}
return _GRADIENT_CLIP_TYPE_TO_CLIPPER[GradientClipType(cfg.CLIP_TYPE)]
def _generate_optimizer_class_with_gradient_clipping(
optimizer: Type[torch.optim.Optimizer],
*,
per_param_clipper: Optional[_GradientClipper] = None,
global_clipper: Optional[_GradientClipper] = None,
) -> Type[torch.optim.Optimizer]:
"""
Dynamically creates a new type that inherits the type of a given instance
and overrides the `step` method to add gradient clipping
"""
assert (
per_param_clipper is None or global_clipper is None
), "Not allowed to use both per-parameter clipping and global clipping"
def optimizer_wgc_step(self, closure=None):
if per_param_clipper is not None:
for group in self.param_groups:
for p in group["params"]:
per_param_clipper(p)
else:
# global clipper for future use with detr
# (https://github.com/facebookresearch/detr/pull/287)
all_params = itertools.chain(*[g["params"] for g in self.param_groups])
global_clipper(all_params)
super(type(self), self).step(closure)
OptimizerWithGradientClip = type(
optimizer.__name__ + "WithGradientClip",
(optimizer,),
{"step": optimizer_wgc_step},
)
return OptimizerWithGradientClip
def maybe_add_gradient_clipping(
cfg: CfgNode, optimizer: Type[torch.optim.Optimizer]
) -> Type[torch.optim.Optimizer]:
"""
If gradient clipping is enabled through config options, wraps the existing
optimizer type to become a new dynamically created class OptimizerWithGradientClip
that inherits the given optimizer and overrides the `step` method to
include gradient clipping.
Args:
cfg: CfgNode, configuration options
optimizer: type. A subclass of torch.optim.Optimizer
Return:
type: either the input `optimizer` (if gradient clipping is disabled), or
a subclass of it with gradient clipping included in the `step` method.
"""
if not cfg.SOLVER.CLIP_GRADIENTS.ENABLED:
return optimizer
if isinstance(optimizer, torch.optim.Optimizer):
optimizer_type = type(optimizer)
else:
assert issubclass(optimizer, torch.optim.Optimizer), optimizer
optimizer_type = optimizer
grad_clipper = _create_gradient_clipper(cfg.SOLVER.CLIP_GRADIENTS)
OptimizerWithGradientClip = _generate_optimizer_class_with_gradient_clipping(
optimizer_type, per_param_clipper=grad_clipper
)
if isinstance(optimizer, torch.optim.Optimizer):
optimizer.__class__ = OptimizerWithGradientClip # a bit hacky, not recommended
return optimizer
else:
return OptimizerWithGradientClip
def build_optimizer(cfg: CfgNode, model: torch.nn.Module) -> torch.optim.Optimizer:
"""
Build an optimizer from config.
"""
params = get_default_optimizer_params(
model,
base_lr=cfg.SOLVER.BASE_LR,
base_lr_multiplier=cfg.SOLVER.BASE_LR_MULTIPLIER,
base_lr_multiplier_names=cfg.SOLVER.BASE_LR_MULTIPLIER_NAMES,
weight_decay_norm=cfg.SOLVER.WEIGHT_DECAY_NORM,
bias_lr_factor=cfg.SOLVER.BIAS_LR_FACTOR,
weight_decay_bias=cfg.SOLVER.WEIGHT_DECAY_BIAS,
)
return maybe_add_gradient_clipping(cfg, torch.optim.SGD)(
params,
lr=cfg.SOLVER.BASE_LR,
momentum=cfg.SOLVER.MOMENTUM,
nesterov=cfg.SOLVER.NESTEROV,
weight_decay=cfg.SOLVER.WEIGHT_DECAY,
)
def get_default_optimizer_params(
model: torch.nn.Module,
base_lr: Optional[float] = None,
base_lr_multiplier: Optional[float] = 1.0,
base_lr_multiplier_names: Optional[List[str]] = [],
weight_decay: Optional[float] = None,
weight_decay_norm: Optional[float] = None,
bias_lr_factor: Optional[float] = 1.0,
weight_decay_bias: Optional[float] = None,
lr_factor_func: Optional[Callable] = None,
overrides: Optional[Dict[str, Dict[str, float]]] = None,
) -> List[Dict[str, Any]]:
"""
Get default param list for optimizer, with support for a few types of
overrides. If no overrides needed, this is equivalent to `model.parameters()`.
Args:
base_lr: lr for every group by default. Can be omitted to use the one in optimizer.
weight_decay: weight decay for every group by default. Can be omitted to use the one
in optimizer.
weight_decay_norm: override weight decay for params in normalization layers
bias_lr_factor: multiplier of lr for bias parameters.
weight_decay_bias: override weight decay for bias parameters.
lr_factor_func: function to calculate lr decay rate by mapping the parameter names to
corresponding lr decay rate. Note that setting this option requires
also setting ``base_lr``.
overrides: if not `None`, provides values for optimizer hyperparameters
(LR, weight decay) for module parameters with a given name; e.g.
``{"embedding": {"lr": 0.01, "weight_decay": 0.1}}`` will set the LR and
weight decay values for all module parameters named `embedding`.
For common detection models, ``weight_decay_norm`` is the only option
needed to be set. ``bias_lr_factor,weight_decay_bias`` are legacy settings
from Detectron1 that are not found useful.
Example:
::
torch.optim.SGD(get_default_optimizer_params(model, weight_decay_norm=0),
lr=0.01, weight_decay=1e-4, momentum=0.9)
"""
if overrides is None:
overrides = {}
defaults = {}
if base_lr is not None:
defaults["lr"] = base_lr
if weight_decay is not None:
defaults["weight_decay"] = weight_decay
bias_overrides = {}
if bias_lr_factor is not None and bias_lr_factor != 1.0:
# NOTE: unlike Detectron v1, we now by default make bias hyperparameters
# exactly the same as regular weights.
if base_lr is None:
raise ValueError("bias_lr_factor requires base_lr")
bias_overrides["lr"] = base_lr * bias_lr_factor
if weight_decay_bias is not None:
bias_overrides["weight_decay"] = weight_decay_bias
if len(bias_overrides):
if "bias" in overrides:
raise ValueError("Conflicting overrides for 'bias'")
overrides["bias"] = bias_overrides
if lr_factor_func is not None:
if base_lr is None:
raise ValueError("lr_factor_func requires base_lr")
norm_module_types = (
torch.nn.BatchNorm1d,
torch.nn.BatchNorm2d,
torch.nn.BatchNorm3d,
torch.nn.SyncBatchNorm,
# NaiveSyncBatchNorm inherits from BatchNorm2d
torch.nn.GroupNorm,
torch.nn.InstanceNorm1d,
torch.nn.InstanceNorm2d,
torch.nn.InstanceNorm3d,
torch.nn.LayerNorm,
torch.nn.LocalResponseNorm,
)
params: List[Dict[str, Any]] = []
memo: Set[torch.nn.parameter.Parameter] = set()
for module_name, module in model.named_modules():
for module_param_name, value in module.named_parameters(recurse=False):
if not value.requires_grad:
continue
# Avoid duplicating parameters
if value in memo:
continue
memo.add(value)
hyperparams = copy.copy(defaults)
if isinstance(module, norm_module_types) and weight_decay_norm is not None:
hyperparams["weight_decay"] = weight_decay_norm
if lr_factor_func is not None:
hyperparams["lr"] *= lr_factor_func(f"{module_name}.{module_param_name}")
hyperparams.update(overrides.get(module_param_name, {}))
if module_name in base_lr_multiplier_names:
hyperparams["lr"] *= base_lr_multiplier
# print(" Checked: ", module_name, hyperparams["lr"])
params.append({"params": [value], **hyperparams})
return reduce_param_groups(params)
def _expand_param_groups(params: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
# Transform parameter groups into per-parameter structure.
# Later items in `params` can overwrite parameters set in previous items.
ret = defaultdict(dict)
for item in params:
assert "params" in item
cur_params = {x: y for x, y in item.items() if x != "params"}
for param in item["params"]:
ret[param].update({"params": [param], **cur_params})
return list(ret.values())
def reduce_param_groups(params: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
# Reorganize the parameter groups and merge duplicated groups.
# The number of parameter groups needs to be as small as possible in order
# to efficiently use the PyTorch multi-tensor optimizer. Therefore instead
# of using a parameter_group per single parameter, we reorganize the
# parameter groups and merge duplicated groups. This approach speeds
# up multi-tensor optimizer significantly.
params = _expand_param_groups(params)
groups = defaultdict(list) # re-group all parameter groups by their hyperparams
for item in params:
cur_params = tuple((x, y) for x, y in item.items() if x != "params")
groups[cur_params].extend(item["params"])
ret = []
for param_keys, param_values in groups.items():
cur = {kv[0]: kv[1] for kv in param_keys}
cur["params"] = param_values
ret.append(cur)
return ret
def build_lr_scheduler(
cfg: CfgNode, optimizer: torch.optim.Optimizer
) -> torch.optim.lr_scheduler._LRScheduler:
"""
Build a LR scheduler from config.
"""
name = cfg.SOLVER.LR_SCHEDULER_NAME
if name == "WarmupMultiStepLR":
steps = [x for x in cfg.SOLVER.STEPS if x <= cfg.SOLVER.MAX_ITER]
if len(steps) != len(cfg.SOLVER.STEPS):
logger = logging.getLogger(__name__)
logger.warning(
"SOLVER.STEPS contains values larger than SOLVER.MAX_ITER. "
"These values will be ignored."
)
sched = MultiStepParamScheduler(
values=[cfg.SOLVER.GAMMA**k for k in range(len(steps) + 1)],
milestones=steps,
num_updates=cfg.SOLVER.MAX_ITER,
)
elif name == "WarmupCosineLR":
end_value = cfg.SOLVER.BASE_LR_END / cfg.SOLVER.BASE_LR
assert end_value >= 0.0 and end_value <= 1.0, end_value
sched = CosineParamScheduler(1, end_value)
else:
raise ValueError("Unknown LR scheduler: {}".format(name))
sched = WarmupParamScheduler(
sched,
cfg.SOLVER.WARMUP_FACTOR,
min(cfg.SOLVER.WARMUP_ITERS / cfg.SOLVER.MAX_ITER, 1.0),
cfg.SOLVER.WARMUP_METHOD,
)
return LRMultiplier(optimizer, multiplier=sched, max_iter=cfg.SOLVER.MAX_ITER)
| CutLER-main | cutler/solver/build.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
from .build import build_lr_scheduler, build_optimizer, get_default_optimizer_params
__all__ = [k for k in globals().keys() if not k.startswith("_")]
| CutLER-main | cutler/solver/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
from .roi_heads import (
ROI_HEADS_REGISTRY,
ROIHeads,
CustomStandardROIHeads,
FastRCNNOutputLayers,
build_roi_heads,
)
from .roi_heads.custom_cascade_rcnn import CustomCascadeROIHeads
from .roi_heads.fast_rcnn import FastRCNNOutputLayers
from .meta_arch.rcnn import GeneralizedRCNN, ProposalNetwork
from .meta_arch.build import build_model
_EXCLUDE = {"ShapeSpec"}
__all__ = [k for k in globals().keys() if k not in _EXCLUDE and not k.startswith("_")] | CutLER-main | cutler/modeling/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Modified by XuDong Wang from https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/meta_arch/build.py
import torch
from detectron2.utils.logger import _log_api_usage
from detectron2.utils.registry import Registry
META_ARCH_REGISTRY = Registry("META_ARCH") # noqa F401 isort:skip
META_ARCH_REGISTRY.__doc__ = """
Registry for meta-architectures, i.e. the whole model.
The registered object will be called with `obj(cfg)`
and expected to return a `nn.Module` object.
"""
def build_model(cfg):
"""
Build the whole model architecture, defined by ``cfg.MODEL.META_ARCHITECTURE``.
Note that it does not load any weights from ``cfg``.
"""
meta_arch = cfg.MODEL.META_ARCHITECTURE
model = META_ARCH_REGISTRY.get(meta_arch)(cfg)
model.to(torch.device(cfg.MODEL.DEVICE))
_log_api_usage("modeling.meta_arch." + meta_arch)
return model
| CutLER-main | cutler/modeling/meta_arch/build.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Modified by XuDong Wang from https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/meta_arch/rcnn.py
import logging
import numpy as np
from typing import Dict, List, Optional, Tuple
import torch
from torch import nn
from detectron2.config import configurable
from detectron2.data.detection_utils import convert_image_to_rgb
from detectron2.layers import move_device_like
from detectron2.structures import ImageList, Instances
from detectron2.utils.events import get_event_storage
from detectron2.utils.logger import log_first_n
from detectron2.modeling.backbone import Backbone, build_backbone
from detectron2.modeling.postprocessing import detector_postprocess
from detectron2.modeling.proposal_generator import build_proposal_generator
from ..roi_heads import build_roi_heads
from .build import META_ARCH_REGISTRY
__all__ = ["GeneralizedRCNN", "ProposalNetwork"]
@META_ARCH_REGISTRY.register()
class GeneralizedRCNN(nn.Module):
"""
Generalized R-CNN. Any models that contains the following three components:
1. Per-image feature extraction (aka backbone)
2. Region proposal generation
3. Per-region feature extraction and prediction
"""
@configurable
def __init__(
self,
*,
backbone: Backbone,
proposal_generator: nn.Module,
roi_heads: nn.Module,
pixel_mean: Tuple[float],
pixel_std: Tuple[float],
input_format: Optional[str] = None,
vis_period: int = 0,
):
"""
Args:
backbone: a backbone module, must follow detectron2's backbone interface
proposal_generator: a module that generates proposals using backbone features
roi_heads: a ROI head that performs per-region computation
pixel_mean, pixel_std: list or tuple with #channels element, representing
the per-channel mean and std to be used to normalize the input image
input_format: describe the meaning of channels of input. Needed by visualization
vis_period: the period to run visualization. Set to 0 to disable.
"""
super().__init__()
self.backbone = backbone
self.proposal_generator = proposal_generator
self.roi_heads = roi_heads
self.input_format = input_format
self.vis_period = vis_period
if vis_period > 0:
assert input_format is not None, "input_format is required for visualization!"
self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False)
assert (
self.pixel_mean.shape == self.pixel_std.shape
), f"{self.pixel_mean} and {self.pixel_std} have different shapes!"
@classmethod
def from_config(cls, cfg):
backbone = build_backbone(cfg)
return {
"backbone": backbone,
"proposal_generator": build_proposal_generator(cfg, backbone.output_shape()),
"roi_heads": build_roi_heads(cfg, backbone.output_shape()),
"input_format": cfg.INPUT.FORMAT,
"vis_period": cfg.VIS_PERIOD,
"pixel_mean": cfg.MODEL.PIXEL_MEAN,
"pixel_std": cfg.MODEL.PIXEL_STD,
}
@property
def device(self):
return self.pixel_mean.device
def _move_to_current_device(self, x):
return move_device_like(x, self.pixel_mean)
def visualize_training(self, batched_inputs, proposals):
"""
A function used to visualize images and proposals. It shows ground truth
bounding boxes on the original image and up to 20 top-scoring predicted
object proposals on the original image. Users can implement different
visualization functions for different models.
Args:
batched_inputs (list): a list that contains input to the model.
proposals (list): a list that contains predicted proposals. Both
batched_inputs and proposals should have the same length.
"""
from detectron2.utils.visualizer import Visualizer
storage = get_event_storage()
max_vis_prop = 20
for input, prop in zip(batched_inputs, proposals):
img = input["image"]
img = convert_image_to_rgb(img.permute(1, 2, 0), self.input_format)
v_gt = Visualizer(img, None)
v_gt = v_gt.overlay_instances(boxes=input["instances"].gt_boxes)
anno_img = v_gt.get_image()
box_size = min(len(prop.proposal_boxes), max_vis_prop)
v_pred = Visualizer(img, None)
v_pred = v_pred.overlay_instances(
boxes=prop.proposal_boxes[0:box_size].tensor.cpu().numpy()
)
prop_img = v_pred.get_image()
vis_img = np.concatenate((anno_img, prop_img), axis=1)
vis_img = vis_img.transpose(2, 0, 1)
vis_name = "Left: GT bounding boxes; Right: Predicted proposals"
storage.put_image(vis_name, vis_img)
break # only visualize one image in a batch
def forward(self, batched_inputs: List[Dict[str, torch.Tensor]]):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* image: Tensor, image in (C, H, W) format.
* instances (optional): groundtruth :class:`Instances`
* proposals (optional): :class:`Instances`, precomputed proposals.
Other information that's included in the original dicts, such as:
* "height", "width" (int): the output resolution of the model, used in inference.
See :meth:`postprocess` for details.
Returns:
list[dict]:
Each dict is the output for one input image.
The dict contains one key "instances" whose value is a :class:`Instances`.
The :class:`Instances` object has the following keys:
"pred_boxes", "pred_classes", "scores", "pred_masks", "pred_keypoints"
"""
if not self.training:
return self.inference(batched_inputs)
images = self.preprocess_image(batched_inputs)
if "instances" in batched_inputs[0]:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
else:
gt_instances = None
features = self.backbone(images.tensor)
if self.proposal_generator is not None:
proposals, proposal_losses = self.proposal_generator(images, features, gt_instances)
else:
assert "proposals" in batched_inputs[0]
proposals = [x["proposals"].to(self.device) for x in batched_inputs]
proposal_losses = {}
_, detector_losses = self.roi_heads(images, features, proposals, gt_instances)
if self.vis_period > 0:
storage = get_event_storage()
if storage.iter % self.vis_period == 0:
self.visualize_training(batched_inputs, proposals)
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
return losses
def inference(
self,
batched_inputs: List[Dict[str, torch.Tensor]],
detected_instances: Optional[List[Instances]] = None,
do_postprocess: bool = True,
):
"""
Run inference on the given inputs.
Args:
batched_inputs (list[dict]): same as in :meth:`forward`
detected_instances (None or list[Instances]): if not None, it
contains an `Instances` object per image. The `Instances`
object contains "pred_boxes" and "pred_classes" which are
known boxes in the image.
The inference will then skip the detection of bounding boxes,
and only predict other per-ROI outputs.
do_postprocess (bool): whether to apply post-processing on the outputs.
Returns:
When do_postprocess=True, same as in :meth:`forward`.
Otherwise, a list[Instances] containing raw network outputs.
"""
assert not self.training
images = self.preprocess_image(batched_inputs)
features = self.backbone(images.tensor)
if detected_instances is None:
if self.proposal_generator is not None:
proposals, _ = self.proposal_generator(images, features, None)
else:
assert "proposals" in batched_inputs[0]
proposals = [x["proposals"].to(self.device) for x in batched_inputs]
results, _ = self.roi_heads(images, features, proposals, None)
else:
detected_instances = [x.to(self.device) for x in detected_instances]
results = self.roi_heads.forward_with_given_boxes(features, detected_instances)
if do_postprocess:
assert not torch.jit.is_scripting(), "Scripting is not supported for postprocess."
return GeneralizedRCNN._postprocess(results, batched_inputs, images.image_sizes)
else:
return results
def preprocess_image(self, batched_inputs: List[Dict[str, torch.Tensor]]):
"""
Normalize, pad and batch the input images.
"""
images = [self._move_to_current_device(x["image"]) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(
images,
self.backbone.size_divisibility,
padding_constraints=self.backbone.padding_constraints,
)
return images
@staticmethod
def _postprocess(instances, batched_inputs: List[Dict[str, torch.Tensor]], image_sizes):
"""
Rescale the output instances to the target size.
"""
# note: private function; subject to changes
processed_results = []
for results_per_image, input_per_image, image_size in zip(
instances, batched_inputs, image_sizes
):
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
r = detector_postprocess(results_per_image, height, width)
processed_results.append({"instances": r})
return processed_results
@META_ARCH_REGISTRY.register()
class ProposalNetwork(nn.Module):
"""
A meta architecture that only predicts object proposals.
"""
@configurable
def __init__(
self,
*,
backbone: Backbone,
proposal_generator: nn.Module,
pixel_mean: Tuple[float],
pixel_std: Tuple[float],
):
"""
Args:
backbone: a backbone module, must follow detectron2's backbone interface
proposal_generator: a module that generates proposals using backbone features
pixel_mean, pixel_std: list or tuple with #channels element, representing
the per-channel mean and std to be used to normalize the input image
"""
super().__init__()
self.backbone = backbone
self.proposal_generator = proposal_generator
self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False)
@classmethod
def from_config(cls, cfg):
backbone = build_backbone(cfg)
return {
"backbone": backbone,
"proposal_generator": build_proposal_generator(cfg, backbone.output_shape()),
"pixel_mean": cfg.MODEL.PIXEL_MEAN,
"pixel_std": cfg.MODEL.PIXEL_STD,
}
@property
def device(self):
return self.pixel_mean.device
def _move_to_current_device(self, x):
return move_device_like(x, self.pixel_mean)
def forward(self, batched_inputs):
"""
Args:
Same as in :class:`GeneralizedRCNN.forward`
Returns:
list[dict]:
Each dict is the output for one input image.
The dict contains one key "proposals" whose value is a
:class:`Instances` with keys "proposal_boxes" and "objectness_logits".
"""
images = [self._move_to_current_device(x["image"]) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(
images,
self.backbone.size_divisibility,
padding_constraints=self.backbone.padding_constraints,
)
features = self.backbone(images.tensor)
if "instances" in batched_inputs[0]:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
elif "targets" in batched_inputs[0]:
log_first_n(
logging.WARN, "'targets' in the model inputs is now renamed to 'instances'!", n=10
)
gt_instances = [x["targets"].to(self.device) for x in batched_inputs]
else:
gt_instances = None
proposals, proposal_losses = self.proposal_generator(images, features, gt_instances)
# In training, the proposals are not useful at all but we generate them anyway.
# This makes RPN-only models about 5% slower.
if self.training:
return proposal_losses
processed_results = []
for results_per_image, input_per_image, image_size in zip(
proposals, batched_inputs, images.image_sizes
):
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
r = detector_postprocess(results_per_image, height, width)
processed_results.append({"proposals": r})
return processed_results
| CutLER-main | cutler/modeling/meta_arch/rcnn.py |
# -*- coding: utf-8 -*-
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Modified by XuDong Wang from https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/meta_arch/__init__.py
from .build import META_ARCH_REGISTRY, build_model # isort:skip
__all__ = list(globals().keys())
| CutLER-main | cutler/modeling/meta_arch/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Modified by XuDong Wang from https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/roi_heads/cascade_rcnn.py
from typing import List
import torch
from torch import nn
from torch.autograd.function import Function
from detectron2.config import configurable
from detectron2.layers import ShapeSpec
from detectron2.structures import Boxes, pairwise_iou
from structures import pairwise_iou_max_scores
from detectron2.structures import Instances
from detectron2.utils.events import get_event_storage
from detectron2.modeling.box_regression import Box2BoxTransform
from detectron2.modeling.matcher import Matcher
from detectron2.modeling.poolers import ROIPooler
from detectron2.modeling.roi_heads.box_head import build_box_head
from .fast_rcnn import FastRCNNOutputLayers, fast_rcnn_inference
from .roi_heads import ROI_HEADS_REGISTRY, CustomStandardROIHeads
import torch.nn.functional as F
class _ScaleGradient(Function):
@staticmethod
def forward(ctx, input, scale):
ctx.scale = scale
return input
@staticmethod
def backward(ctx, grad_output):
return grad_output * ctx.scale, None
@ROI_HEADS_REGISTRY.register()
class CustomCascadeROIHeads(CustomStandardROIHeads):
"""
The ROI heads that implement :paper:`Cascade R-CNN`.
"""
@configurable
def __init__(
self,
*,
box_in_features: List[str],
box_pooler: ROIPooler,
box_heads: List[nn.Module],
box_predictors: List[nn.Module],
proposal_matchers: List[Matcher],
**kwargs,
):
"""
NOTE: this interface is experimental.
Args:
box_pooler (ROIPooler): pooler that extracts region features from given boxes
box_heads (list[nn.Module]): box head for each cascade stage
box_predictors (list[nn.Module]): box predictor for each cascade stage
proposal_matchers (list[Matcher]): matcher with different IoU thresholds to
match boxes with ground truth for each stage. The first matcher matches
RPN proposals with ground truth, the other matchers use boxes predicted
by the previous stage as proposals and match them with ground truth.
"""
assert "proposal_matcher" not in kwargs, (
"CustomCascadeROIHeads takes 'proposal_matchers=' for each stage instead "
"of one 'proposal_matcher='."
)
# The first matcher matches RPN proposals with ground truth, done in the base class
kwargs["proposal_matcher"] = proposal_matchers[0]
num_stages = self.num_cascade_stages = len(box_heads)
box_heads = nn.ModuleList(box_heads)
box_predictors = nn.ModuleList(box_predictors)
assert len(box_predictors) == num_stages, f"{len(box_predictors)} != {num_stages}!"
assert len(proposal_matchers) == num_stages, f"{len(proposal_matchers)} != {num_stages}!"
super().__init__(
box_in_features=box_in_features,
box_pooler=box_pooler,
box_head=box_heads,
box_predictor=box_predictors,
**kwargs,
)
self.proposal_matchers = proposal_matchers
@classmethod
def from_config(cls, cfg, input_shape):
ret = super().from_config(cfg, input_shape)
ret.pop("proposal_matcher")
return ret
@classmethod
def _init_box_head(cls, cfg, input_shape):
# fmt: off
in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES
pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features)
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE
cascade_bbox_reg_weights = cfg.MODEL.ROI_BOX_CASCADE_HEAD.BBOX_REG_WEIGHTS
cascade_ious = cfg.MODEL.ROI_BOX_CASCADE_HEAD.IOUS
assert len(cascade_bbox_reg_weights) == len(cascade_ious)
assert cfg.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG, \
"CustomCascadeROIHeads only support class-agnostic regression now!"
assert cascade_ious[0] == cfg.MODEL.ROI_HEADS.IOU_THRESHOLDS[0]
# fmt: on
in_channels = [input_shape[f].channels for f in in_features]
# Check all channel counts are equal
assert len(set(in_channels)) == 1, in_channels
in_channels = in_channels[0]
box_pooler = ROIPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=sampling_ratio,
pooler_type=pooler_type,
)
pooled_shape = ShapeSpec(
channels=in_channels, width=pooler_resolution, height=pooler_resolution
)
box_heads, box_predictors, proposal_matchers = [], [], []
for match_iou, bbox_reg_weights in zip(cascade_ious, cascade_bbox_reg_weights):
box_head = build_box_head(cfg, pooled_shape)
box_heads.append(box_head)
box_predictors.append(
FastRCNNOutputLayers(
cfg,
box_head.output_shape,
box2box_transform=Box2BoxTransform(weights=bbox_reg_weights),
)
)
proposal_matchers.append(Matcher([match_iou], [0, 1], allow_low_quality_matches=False))
return {
"box_in_features": in_features,
"box_pooler": box_pooler,
"box_heads": box_heads,
"box_predictors": box_predictors,
"proposal_matchers": proposal_matchers,
}
def forward(self, images, features, proposals, targets=None):
del images
if self.training:
proposals = self.label_and_sample_proposals(proposals, targets)
if self.training:
# Need targets to box head
losses = self._forward_box(features, proposals, targets)
losses.update(self._forward_mask(features, proposals))
losses.update(self._forward_keypoint(features, proposals))
return proposals, losses
else:
pred_instances = self._forward_box(features, proposals)
pred_instances = self.forward_with_given_boxes(features, pred_instances)
return pred_instances, {}
def _forward_box(self, features, proposals, targets=None):
"""
Args:
features, targets: the same as in
Same as in :meth:`ROIHeads.forward`.
proposals (list[Instances]): the per-image object proposals with
their matching ground truth.
Each has fields "proposal_boxes", and "objectness_logits",
"gt_classes", "gt_boxes".
"""
features = [features[f] for f in self.box_in_features]
head_outputs = [] # (predictor, predictions, proposals)
prev_pred_boxes = None
image_sizes = [x.image_size for x in proposals]
for k in range(self.num_cascade_stages):
if k > 0:
# The output boxes of the previous stage are used to create the input
# proposals of the next stage.
proposals = self._create_proposals_from_boxes(prev_pred_boxes, image_sizes)
if self.training:
proposals = self._match_and_label_boxes(proposals, k, targets)
predictions = self._run_stage(features, proposals, k)
prev_pred_boxes = self.box_predictor[k].predict_boxes(predictions, proposals)
head_outputs.append((self.box_predictor[k], predictions, proposals))
no_gt_found = False
if self.training:
losses = {}
storage = get_event_storage()
for stage, (predictor, predictions, proposals) in enumerate(head_outputs):
no_gt_found = False
with storage.name_scope("stage{}".format(stage)):
if self.use_droploss:
try:
box_num_list = [len(x.gt_boxes) for x in proposals]
gt_num_list = [torch.unique(x.gt_boxes.tensor[:100], dim=0).size()[0] for x in proposals]
except:
box_num_list = [0 for x in proposals]
gt_num_list = [0 for x in proposals]
no_gt_found = True
if not no_gt_found:
# NOTE: confidence score
prediction_score, predictions_delta = predictions[0], predictions[1]
prediction_score = F.softmax(prediction_score, dim=1)[:,0]
# NOTE: maximum overlapping with GT (IoU)
proposal_boxes = Boxes.cat([x.proposal_boxes for x in proposals])
predictions_bbox = predictor.box2box_transform.apply_deltas(predictions_delta, proposal_boxes.tensor)
idx_start = 0
iou_max_list = []
for idx, x in enumerate(proposals):
idx_end = idx_start + box_num_list[idx]
iou_max_list.append(pairwise_iou_max_scores(predictions_bbox[idx_start:idx_end], x.gt_boxes[:gt_num_list[idx]].tensor))
idx_start = idx_end
iou_max = torch.cat(iou_max_list, dim=0)
# NOTE: get the weight of each proposal
weights = iou_max.le(self.droploss_iou_thresh).float()
weights = 1 - weights.ge(1.0).float()
stage_losses = predictor.losses(predictions, proposals, weights=weights.detach())
else:
stage_losses = predictor.losses(predictions, proposals)
else:
stage_losses = predictor.losses(predictions, proposals)
losses.update({k + "_stage{}".format(stage): v for k, v in stage_losses.items()})
return losses
else:
# Each is a list[Tensor] of length #image. Each tensor is Ri x (K+1)
scores_per_stage = [h[0].predict_probs(h[1], h[2]) for h in head_outputs]
# Average the scores across heads
scores = [
sum(list(scores_per_image)) * (1.0 / self.num_cascade_stages)
for scores_per_image in zip(*scores_per_stage)
]
# Use the boxes of the last head
predictor, predictions, proposals = head_outputs[-1]
boxes = predictor.predict_boxes(predictions, proposals)
pred_instances, _ = fast_rcnn_inference(
boxes,
scores,
image_sizes,
predictor.test_score_thresh,
predictor.test_nms_thresh,
predictor.test_topk_per_image,
)
return pred_instances
@torch.no_grad()
def _match_and_label_boxes(self, proposals, stage, targets):
"""
Match proposals with groundtruth using the matcher at the given stage.
Label the proposals as foreground or background based on the match.
Args:
proposals (list[Instances]): One Instances for each image, with
the field "proposal_boxes".
stage (int): the current stage
targets (list[Instances]): the ground truth instances
Returns:
list[Instances]: the same proposals, but with fields "gt_classes" and "gt_boxes"
"""
num_fg_samples, num_bg_samples = [], []
for proposals_per_image, targets_per_image in zip(proposals, targets):
match_quality_matrix = pairwise_iou(
targets_per_image.gt_boxes, proposals_per_image.proposal_boxes
)
# proposal_labels are 0 or 1
matched_idxs, proposal_labels = self.proposal_matchers[stage](match_quality_matrix)
if len(targets_per_image) > 0:
gt_classes = targets_per_image.gt_classes[matched_idxs]
# Label unmatched proposals (0 label from matcher) as background (label=num_classes)
gt_classes[proposal_labels == 0] = self.num_classes
gt_boxes = targets_per_image.gt_boxes[matched_idxs]
else:
gt_classes = torch.zeros_like(matched_idxs) + self.num_classes
gt_boxes = Boxes(
targets_per_image.gt_boxes.tensor.new_zeros((len(proposals_per_image), 4))
)
proposals_per_image.gt_classes = gt_classes
proposals_per_image.gt_boxes = gt_boxes
num_fg_samples.append((proposal_labels == 1).sum().item())
num_bg_samples.append(proposal_labels.numel() - num_fg_samples[-1])
# Log the number of fg/bg samples in each stage
storage = get_event_storage()
storage.put_scalar(
"stage{}/roi_head/num_fg_samples".format(stage),
sum(num_fg_samples) / len(num_fg_samples),
)
storage.put_scalar(
"stage{}/roi_head/num_bg_samples".format(stage),
sum(num_bg_samples) / len(num_bg_samples),
)
return proposals
def _run_stage(self, features, proposals, stage):
"""
Args:
features (list[Tensor]): #lvl input features to ROIHeads
proposals (list[Instances]): #image Instances, with the field "proposal_boxes"
stage (int): the current stage
Returns:
Same output as `FastRCNNOutputLayers.forward()`.
"""
box_features = self.box_pooler(features, [x.proposal_boxes for x in proposals])
# The original implementation averages the losses among heads,
# but scale up the parameter gradients of the heads.
# This is equivalent to adding the losses among heads,
# but scale down the gradients on features.
if self.training:
box_features = _ScaleGradient.apply(box_features, 1.0 / self.num_cascade_stages)
box_features = self.box_head[stage](box_features)
return self.box_predictor[stage](box_features)
def _create_proposals_from_boxes(self, boxes, image_sizes):
"""
Args:
boxes (list[Tensor]): per-image predicted boxes, each of shape Ri x 4
image_sizes (list[tuple]): list of image shapes in (h, w)
Returns:
list[Instances]: per-image proposals with the given boxes.
"""
# Just like RPN, the proposals should not have gradients
boxes = [Boxes(b.detach()) for b in boxes]
proposals = []
for boxes_per_image, image_size in zip(boxes, image_sizes):
boxes_per_image.clip(image_size)
if self.training:
# do not filter empty boxes at inference time,
# because the scores from each stage need to be aligned and added later
boxes_per_image = boxes_per_image[boxes_per_image.nonempty()]
prop = Instances(image_size)
prop.proposal_boxes = boxes_per_image
proposals.append(prop)
return proposals
| CutLER-main | cutler/modeling/roi_heads/custom_cascade_rcnn.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Modified by XuDong Wang from https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/roi_heads/fast_rcnn.py
import logging
from typing import Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.data.detection_utils import get_fed_loss_cls_weights
from detectron2.layers import ShapeSpec, batched_nms, cat, cross_entropy, nonzero_tuple
from detectron2.modeling.box_regression import Box2BoxTransform, _dense_box_regression_loss
from detectron2.structures import Instances, Boxes
from detectron2.utils.events import get_event_storage
from torch.nn import Parameter
import torch.nn.functional as F
__all__ = ["fast_rcnn_inference", "FastRCNNOutputLayers"]
logger = logging.getLogger(__name__)
"""
Shape shorthand in this module:
N: number of images in the minibatch
R: number of ROIs, combined over all images, in the minibatch
Ri: number of ROIs in image i
K: number of foreground classes. E.g.,there are 80 foreground classes in COCO.
Naming convention:
deltas: refers to the 4-d (dx, dy, dw, dh) deltas that parameterize the box2box
transform (see :class:`box_regression.Box2BoxTransform`).
pred_class_logits: predicted class scores in [-inf, +inf]; use
softmax(pred_class_logits) to estimate P(class).
gt_classes: ground-truth classification labels in [0, K], where [0, K) represent
foreground object classes and K represents the background class.
pred_proposal_deltas: predicted box2box transform deltas for transforming proposals
to detection box predictions.
gt_proposal_deltas: ground-truth box2box transform deltas
"""
def fast_rcnn_inference(
boxes: List[torch.Tensor],
scores: List[torch.Tensor],
image_shapes: List[Tuple[int, int]],
score_thresh: float,
nms_thresh: float,
topk_per_image: int,
):
"""
Call `fast_rcnn_inference_single_image` for all images.
Args:
boxes (list[Tensor]): A list of Tensors of predicted class-specific or class-agnostic
boxes for each image. Element i has shape (Ri, K * 4) if doing
class-specific regression, or (Ri, 4) if doing class-agnostic
regression, where Ri is the number of predicted objects for image i.
This is compatible with the output of :meth:`FastRCNNOutputLayers.predict_boxes`.
scores (list[Tensor]): A list of Tensors of predicted class scores for each image.
Element i has shape (Ri, K + 1), where Ri is the number of predicted objects
for image i. Compatible with the output of :meth:`FastRCNNOutputLayers.predict_probs`.
image_shapes (list[tuple]): A list of (width, height) tuples for each image in the batch.
score_thresh (float): Only return detections with a confidence score exceeding this
threshold.
nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1].
topk_per_image (int): The number of top scoring detections to return. Set < 0 to return
all detections.
Returns:
instances: (list[Instances]): A list of N instances, one for each image in the batch,
that stores the topk most confidence detections.
kept_indices: (list[Tensor]): A list of 1D tensor of length of N, each element indicates
the corresponding boxes/scores index in [0, Ri) from the input, for image i.
"""
result_per_image = [
fast_rcnn_inference_single_image(
boxes_per_image, scores_per_image, image_shape, score_thresh, nms_thresh, topk_per_image
)
for scores_per_image, boxes_per_image, image_shape in zip(scores, boxes, image_shapes)
]
return [x[0] for x in result_per_image], [x[1] for x in result_per_image]
def _log_classification_stats(pred_logits, gt_classes, prefix="fast_rcnn"):
"""
Log the classification metrics to EventStorage.
Args:
pred_logits: Rx(K+1) logits. The last column is for background class.
gt_classes: R labels
"""
num_instances = gt_classes.numel()
if num_instances == 0:
return
pred_classes = pred_logits.argmax(dim=1)
bg_class_ind = pred_logits.shape[1] - 1
fg_inds = (gt_classes >= 0) & (gt_classes < bg_class_ind)
num_fg = fg_inds.nonzero().numel()
fg_gt_classes = gt_classes[fg_inds]
fg_pred_classes = pred_classes[fg_inds]
num_false_negative = (fg_pred_classes == bg_class_ind).nonzero().numel()
num_accurate = (pred_classes == gt_classes).nonzero().numel()
fg_num_accurate = (fg_pred_classes == fg_gt_classes).nonzero().numel()
storage = get_event_storage()
storage.put_scalar(f"{prefix}/cls_accuracy", num_accurate / num_instances)
if num_fg > 0:
storage.put_scalar(f"{prefix}/fg_cls_accuracy", fg_num_accurate / num_fg)
storage.put_scalar(f"{prefix}/false_negative", num_false_negative / num_fg)
def fast_rcnn_inference_single_image(
boxes,
scores,
image_shape: Tuple[int, int],
score_thresh: float,
nms_thresh: float,
topk_per_image: int,
):
"""
Single-image inference. Return bounding-box detection results by thresholding
on scores and applying non-maximum suppression (NMS).
Args:
Same as `fast_rcnn_inference`, but with boxes, scores, and image shapes
per image.
Returns:
Same as `fast_rcnn_inference`, but for only one image.
"""
valid_mask = torch.isfinite(boxes).all(dim=1) & torch.isfinite(scores).all(dim=1)
if not valid_mask.all():
boxes = boxes[valid_mask]
scores = scores[valid_mask]
scores = scores[:, :-1]
num_bbox_reg_classes = boxes.shape[1] // 4
# Convert to Boxes to use the `clip` function ...
boxes = Boxes(boxes.reshape(-1, 4))
boxes.clip(image_shape)
boxes = boxes.tensor.view(-1, num_bbox_reg_classes, 4) # R x C x 4
# 1. Filter results based on detection scores. It can make NMS more efficient
# by filtering out low-confidence detections.
filter_mask = scores > score_thresh # R x K
# R' x 2. First column contains indices of the R predictions;
# Second column contains indices of classes.
filter_inds = filter_mask.nonzero()
if num_bbox_reg_classes == 1:
boxes = boxes[filter_inds[:, 0], 0]
else:
boxes = boxes[filter_mask]
scores = scores[filter_mask]
# 2. Apply NMS for each class independently.
keep = batched_nms(boxes, scores, filter_inds[:, 1], nms_thresh)
if topk_per_image >= 0:
keep = keep[:topk_per_image]
boxes, scores, filter_inds = boxes[keep], scores[keep], filter_inds[keep]
result = Instances(image_shape)
result.pred_boxes = Boxes(boxes)
result.scores = scores
result.pred_classes = filter_inds[:, 1]
return result, filter_inds[:, 0]
class NormedLinear(nn.Module):
def __init__(self, in_features, out_features):
super(NormedLinear, self).__init__()
self.weight = Parameter(torch.Tensor(in_features, out_features))
self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-5).mul_(1e5)
def forward(self, x):
out = F.normalize(x, dim=1).mm(F.normalize(self.weight, dim=0))
return out
class FastRCNNOutputLayers(nn.Module):
"""
Two linear layers for predicting Fast R-CNN outputs:
1. proposal-to-detection box regression deltas
2. classification scores
"""
@configurable
def __init__(
self,
input_shape: ShapeSpec,
*,
box2box_transform,
num_classes: int,
test_score_thresh: float = 0.0,
test_nms_thresh: float = 0.5,
test_topk_per_image: int = 100,
cls_agnostic_bbox_reg: bool = False,
smooth_l1_beta: float = 0.0,
box_reg_loss_type: str = "smooth_l1",
loss_weight: Union[float, Dict[str, float]] = 1.0,
use_fed_loss: bool = False,
use_sigmoid_ce: bool = False,
get_fed_loss_cls_weights: Optional[Callable] = None,
fed_loss_num_classes: int = 50,
):
"""
NOTE: this interface is experimental.
Args:
input_shape (ShapeSpec): shape of the input feature to this module
box2box_transform (Box2BoxTransform or Box2BoxTransformRotated):
num_classes (int): number of foreground classes
test_score_thresh (float): threshold to filter predictions results.
test_nms_thresh (float): NMS threshold for prediction results.
test_topk_per_image (int): number of top predictions to produce per image.
cls_agnostic_bbox_reg (bool): whether to use class agnostic for bbox regression
smooth_l1_beta (float): transition point from L1 to L2 loss. Only used if
`box_reg_loss_type` is "smooth_l1"
box_reg_loss_type (str): Box regression loss type. One of: "smooth_l1", "giou",
"diou", "ciou"
loss_weight (float|dict): weights to use for losses. Can be single float for weighting
all losses, or a dict of individual weightings. Valid dict keys are:
* "loss_cls": applied to classification loss
* "loss_box_reg": applied to box regression loss
use_fed_loss (bool): whether to use federated loss which samples additional negative
classes to calculate the loss
use_sigmoid_ce (bool): whether to calculate the loss using weighted average of binary
cross entropy with logits. This could be used together with federated loss
get_fed_loss_cls_weights (Callable): a callable which takes dataset name and frequency
weight power, and returns the probabilities to sample negative classes for
federated loss. The implementation can be found in
detectron2/data/detection_utils.py
fed_loss_num_classes (int): number of federated classes to keep in total
"""
super().__init__()
if isinstance(input_shape, int): # some backward compatibility
input_shape = ShapeSpec(channels=input_shape)
self.num_classes = num_classes
input_size = input_shape.channels * (input_shape.width or 1) * (input_shape.height or 1)
# prediction layer for num_classes foreground classes and one background class (hence + 1)
self.cls_score = nn.Linear(input_size, num_classes + 1)
nn.init.normal_(self.cls_score.weight, std=0.01)
num_bbox_reg_classes = 1 if cls_agnostic_bbox_reg else num_classes
box_dim = len(box2box_transform.weights)
self.bbox_pred = nn.Linear(input_size, num_bbox_reg_classes * box_dim)
nn.init.normal_(self.bbox_pred.weight, std=0.001)
for l in [self.cls_score, self.bbox_pred]:
nn.init.constant_(l.bias, 0)
self.box2box_transform = box2box_transform
self.smooth_l1_beta = smooth_l1_beta
self.test_score_thresh = test_score_thresh
self.test_nms_thresh = test_nms_thresh
self.test_topk_per_image = test_topk_per_image
self.box_reg_loss_type = box_reg_loss_type
if isinstance(loss_weight, float):
loss_weight = {"loss_cls": loss_weight, "loss_box_reg": loss_weight}
self.loss_weight = loss_weight
self.use_fed_loss = use_fed_loss
self.use_sigmoid_ce = use_sigmoid_ce
self.fed_loss_num_classes = fed_loss_num_classes
if self.use_fed_loss:
assert self.use_sigmoid_ce, "Please use sigmoid cross entropy loss with federated loss"
fed_loss_cls_weights = get_fed_loss_cls_weights()
assert (
len(fed_loss_cls_weights) == self.num_classes
), "Please check the provided fed_loss_cls_weights. Their size should match num_classes"
self.register_buffer("fed_loss_cls_weights", fed_loss_cls_weights)
@classmethod
def from_config(cls, cfg, input_shape):
return {
"input_shape": input_shape,
"box2box_transform": Box2BoxTransform(weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS),
# fmt: off
"num_classes" : cfg.MODEL.ROI_HEADS.NUM_CLASSES,
"cls_agnostic_bbox_reg" : cfg.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG,
"smooth_l1_beta" : cfg.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA,
"test_score_thresh" : cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST,
"test_nms_thresh" : cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST,
"test_topk_per_image" : cfg.TEST.DETECTIONS_PER_IMAGE,
"box_reg_loss_type" : cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_TYPE,
"loss_weight" : {"loss_box_reg": cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_WEIGHT}, # noqa
"use_fed_loss" : cfg.MODEL.ROI_BOX_HEAD.USE_FED_LOSS,
"use_sigmoid_ce" : cfg.MODEL.ROI_BOX_HEAD.USE_SIGMOID_CE,
"get_fed_loss_cls_weights" : lambda: get_fed_loss_cls_weights(dataset_names=cfg.DATASETS.TRAIN, freq_weight_power=cfg.MODEL.ROI_BOX_HEAD.FED_LOSS_FREQ_WEIGHT_POWER), # noqa
"fed_loss_num_classes" : cfg.MODEL.ROI_BOX_HEAD.FED_LOSS_NUM_CLASSES,
# fmt: on
}
def forward(self, x):
"""
Args:
x: per-region features of shape (N, ...) for N bounding boxes to predict.
Returns:
(Tensor, Tensor):
First tensor: shape (N,K+1), scores for each of the N box. Each row contains the
scores for K object categories and 1 background class.
Second tensor: bounding box regression deltas for each box. Shape is shape (N,Kx4),
or (N,4) for class-agnostic regression.
"""
if x.dim() > 2:
x = torch.flatten(x, start_dim=1)
scores = self.cls_score(x)
proposal_deltas = self.bbox_pred(x)
return scores, proposal_deltas
def losses(self, predictions, proposals, weights=None):
"""
Args:
predictions: return values of :meth:`forward()`.
proposals (list[Instances]): proposals that match the features that were used
to compute predictions. The fields ``proposal_boxes``, ``gt_boxes``,
``gt_classes`` are expected.
weights: weights for reweighting the loss of each instance based on IoU
Returns:
Dict[str, Tensor]: dict of losses
"""
scores, proposal_deltas = predictions
# parse classification outputs
gt_classes = (
cat([p.gt_classes for p in proposals], dim=0) if len(proposals) else torch.empty(0)
)
_log_classification_stats(scores, gt_classes)
# parse box regression outputs
if len(proposals):
proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0) # Nx4
assert not proposal_boxes.requires_grad, "Proposals should not require gradients!"
# If "gt_boxes" does not exist, the proposals must be all negative and
# should not be included in regression loss computation.
# Here we just use proposal_boxes as an arbitrary placeholder because its
# value won't be used in self.box_reg_loss().
gt_boxes = cat(
[(p.gt_boxes if p.has("gt_boxes") else p.proposal_boxes).tensor for p in proposals],
dim=0,
)
else:
proposal_boxes = gt_boxes = torch.empty((0, 4), device=proposal_deltas.device)
if self.use_sigmoid_ce:
loss_cls = self.sigmoid_cross_entropy_loss(scores, gt_classes)
else:
if weights != None:
loss_cls = (weights * cross_entropy(scores, gt_classes, reduction='none')).mean()
else:
loss_cls = cross_entropy(scores, gt_classes, reduction="mean")
losses = {
"loss_cls": loss_cls,
"loss_box_reg": self.box_reg_loss(
proposal_boxes, gt_boxes, proposal_deltas, gt_classes
),
}
return {k: v * self.loss_weight.get(k, 1.0) for k, v in losses.items()}
# Implementation from https://github.com/xingyizhou/CenterNet2/blob/master/projects/CenterNet2/centernet/modeling/roi_heads/fed_loss.py # noqa
# with slight modifications
def get_fed_loss_classes(self, gt_classes, num_fed_loss_classes, num_classes, weight):
"""
Args:
gt_classes: a long tensor of shape R that contains the gt class label of each proposal.
num_fed_loss_classes: minimum number of classes to keep when calculating federated loss.
Will sample negative classes if number of unique gt_classes is smaller than this value.
num_classes: number of foreground classes
weight: probabilities used to sample negative classes
Returns:
Tensor:
classes to keep when calculating the federated loss, including both unique gt
classes and sampled negative classes.
"""
unique_gt_classes = torch.unique(gt_classes)
prob = unique_gt_classes.new_ones(num_classes + 1).float()
prob[-1] = 0
if len(unique_gt_classes) < num_fed_loss_classes:
prob[:num_classes] = weight.float().clone()
prob[unique_gt_classes] = 0
sampled_negative_classes = torch.multinomial(
prob, num_fed_loss_classes - len(unique_gt_classes), replacement=False
)
fed_loss_classes = torch.cat([unique_gt_classes, sampled_negative_classes])
else:
fed_loss_classes = unique_gt_classes
return fed_loss_classes
# Implementation from https://github.com/xingyizhou/CenterNet2/blob/master/projects/CenterNet2/centernet/modeling/roi_heads/custom_fast_rcnn.py#L113 # noqa
# with slight modifications
def sigmoid_cross_entropy_loss(self, pred_class_logits, gt_classes):
"""
Args:
pred_class_logits: shape (N, K+1), scores for each of the N box. Each row contains the
scores for K object categories and 1 background class
gt_classes: a long tensor of shape R that contains the gt class label of each proposal.
"""
if pred_class_logits.numel() == 0:
return pred_class_logits.new_zeros([1])[0]
N = pred_class_logits.shape[0]
K = pred_class_logits.shape[1] - 1
target = pred_class_logits.new_zeros(N, K + 1)
target[range(len(gt_classes)), gt_classes] = 1
target = target[:, :K]
cls_loss = F.binary_cross_entropy_with_logits(
pred_class_logits[:, :-1], target, reduction="none"
)
if self.use_fed_loss:
fed_loss_classes = self.get_fed_loss_classes(
gt_classes,
num_fed_loss_classes=self.fed_loss_num_classes,
num_classes=K,
weight=self.fed_loss_cls_weights,
)
fed_loss_classes_mask = fed_loss_classes.new_zeros(K + 1)
fed_loss_classes_mask[fed_loss_classes] = 1
fed_loss_classes_mask = fed_loss_classes_mask[:K]
weight = fed_loss_classes_mask.view(1, K).expand(N, K).float()
else:
weight = 1
loss = torch.sum(cls_loss * weight) / N
return loss
def box_reg_loss(self, proposal_boxes, gt_boxes, pred_deltas, gt_classes):
"""
Args:
proposal_boxes/gt_boxes are tensors with the same shape (R, 4 or 5).
pred_deltas has shape (R, 4 or 5), or (R, num_classes * (4 or 5)).
gt_classes is a long tensor of shape R, the gt class label of each proposal.
R shall be the number of proposals.
"""
box_dim = proposal_boxes.shape[1] # 4 or 5
# Regression loss is only computed for foreground proposals (those matched to a GT)
fg_inds = nonzero_tuple((gt_classes >= 0) & (gt_classes < self.num_classes))[0]
if pred_deltas.shape[1] == box_dim: # cls-agnostic regression
fg_pred_deltas = pred_deltas[fg_inds]
else:
fg_pred_deltas = pred_deltas.view(-1, self.num_classes, box_dim)[
fg_inds, gt_classes[fg_inds]
]
loss_box_reg = _dense_box_regression_loss(
[proposal_boxes[fg_inds]],
self.box2box_transform,
[fg_pred_deltas.unsqueeze(0)],
[gt_boxes[fg_inds]],
...,
self.box_reg_loss_type,
self.smooth_l1_beta,
)
# The reg loss is normalized using the total number of regions (R), not the number
# of foreground regions even though the box regression loss is only defined on
# foreground regions. Why? Because doing so gives equal training influence to
# each foreground example. To see how, consider two different minibatches:
# (1) Contains a single foreground region
# (2) Contains 100 foreground regions
# If we normalize by the number of foreground regions, the single example in
# minibatch (1) will be given 100 times as much influence as each foreground
# example in minibatch (2). Normalizing by the total number of regions, R,
# means that the single example in minibatch (1) and each of the 100 examples
# in minibatch (2) are given equal influence.
return loss_box_reg / max(gt_classes.numel(), 1.0) # return 0 if empty
def inference(self, predictions: Tuple[torch.Tensor, torch.Tensor], proposals: List[Instances]):
"""
Args:
predictions: return values of :meth:`forward()`.
proposals (list[Instances]): proposals that match the features that were
used to compute predictions. The ``proposal_boxes`` field is expected.
Returns:
list[Instances]: same as `fast_rcnn_inference`.
list[Tensor]: same as `fast_rcnn_inference`.
"""
boxes = self.predict_boxes(predictions, proposals)
scores = self.predict_probs(predictions, proposals)
image_shapes = [x.image_size for x in proposals]
return fast_rcnn_inference(
boxes,
scores,
image_shapes,
self.test_score_thresh,
self.test_nms_thresh,
self.test_topk_per_image,
)
def predict_boxes_for_gt_classes(self, predictions, proposals):
"""
Args:
predictions: return values of :meth:`forward()`.
proposals (list[Instances]): proposals that match the features that were used
to compute predictions. The fields ``proposal_boxes``, ``gt_classes`` are expected.
Returns:
list[Tensor]:
A list of Tensors of predicted boxes for GT classes in case of
class-specific box head. Element i of the list has shape (Ri, B), where Ri is
the number of proposals for image i and B is the box dimension (4 or 5)
"""
if not len(proposals):
return []
scores, proposal_deltas = predictions
proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0)
N, B = proposal_boxes.shape
predict_boxes = self.box2box_transform.apply_deltas(
proposal_deltas, proposal_boxes
) # Nx(KxB)
K = predict_boxes.shape[1] // B
if K > 1:
gt_classes = torch.cat([p.gt_classes for p in proposals], dim=0)
# Some proposals are ignored or have a background class. Their gt_classes
# cannot be used as index.
gt_classes = gt_classes.clamp_(0, K - 1)
predict_boxes = predict_boxes.view(N, K, B)[
torch.arange(N, dtype=torch.long, device=predict_boxes.device), gt_classes
]
num_prop_per_image = [len(p) for p in proposals]
return predict_boxes.split(num_prop_per_image)
def predict_boxes(
self, predictions: Tuple[torch.Tensor, torch.Tensor], proposals: List[Instances]
):
"""
Args:
predictions: return values of :meth:`forward()`.
proposals (list[Instances]): proposals that match the features that were
used to compute predictions. The ``proposal_boxes`` field is expected.
Returns:
list[Tensor]:
A list of Tensors of predicted class-specific or class-agnostic boxes
for each image. Element i has shape (Ri, K * B) or (Ri, B), where Ri is
the number of proposals for image i and B is the box dimension (4 or 5)
"""
if not len(proposals):
return []
_, proposal_deltas = predictions
num_prop_per_image = [len(p) for p in proposals]
proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0)
predict_boxes = self.box2box_transform.apply_deltas(
proposal_deltas,
proposal_boxes,
) # Nx(KxB)
return predict_boxes.split(num_prop_per_image)
def predict_probs(
self, predictions: Tuple[torch.Tensor, torch.Tensor], proposals: List[Instances]
):
"""
Args:
predictions: return values of :meth:`forward()`.
proposals (list[Instances]): proposals that match the features that were
used to compute predictions.
Returns:
list[Tensor]:
A list of Tensors of predicted class probabilities for each image.
Element i has shape (Ri, K + 1), where Ri is the number of proposals for image i.
"""
scores, _ = predictions
num_inst_per_image = [len(p) for p in proposals]
if self.use_sigmoid_ce:
probs = scores.sigmoid()
else:
probs = F.softmax(scores, dim=-1)
return probs.split(num_inst_per_image, dim=0)
| CutLER-main | cutler/modeling/roi_heads/fast_rcnn.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
from .roi_heads import (
ROI_HEADS_REGISTRY,
ROIHeads,
Res5ROIHeads,
CustomStandardROIHeads,
build_roi_heads,
select_foreground_proposals,
)
from .custom_cascade_rcnn import CustomCascadeROIHeads
from .fast_rcnn import FastRCNNOutputLayers
from . import custom_cascade_rcnn # isort:skip
__all__ = list(globals().keys())
| CutLER-main | cutler/modeling/roi_heads/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Modified by XuDong Wang from https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/roi_heads/roi_heads.py
import inspect
import logging
import numpy as np
from typing import Dict, List, Optional, Tuple
import torch
from torch import nn
from detectron2.config import configurable
from detectron2.layers import ShapeSpec, nonzero_tuple
from detectron2.structures import Boxes, pairwise_iou
from structures import pairwise_iou_max_scores
from detectron2.structures import ImageList, Instances
from detectron2.utils.events import get_event_storage
from detectron2.utils.registry import Registry
from detectron2.modeling.backbone.resnet import BottleneckBlock, ResNet
from detectron2.modeling.matcher import Matcher
from detectron2.modeling.poolers import ROIPooler
from detectron2.modeling.proposal_generator.proposal_utils import add_ground_truth_to_proposals
from detectron2.modeling.sampling import subsample_labels
from detectron2.modeling.roi_heads.box_head import build_box_head
from .fast_rcnn import FastRCNNOutputLayers
from detectron2.modeling.roi_heads.keypoint_head import build_keypoint_head
from detectron2.modeling.roi_heads.mask_head import build_mask_head
from detectron2.modeling.box_regression import Box2BoxTransform
import torch.nn.functional as F
from colored import fg
blue, red = fg('blue'), fg('red')
ROI_HEADS_REGISTRY = Registry("ROI_HEADS")
ROI_HEADS_REGISTRY.__doc__ = """
Registry for ROI heads in a generalized R-CNN model.
ROIHeads take feature maps and region proposals, and
perform per-region computation.
The registered object will be called with `obj(cfg, input_shape)`.
The call is expected to return an :class:`ROIHeads`.
"""
logger = logging.getLogger(__name__)
def build_roi_heads(cfg, input_shape):
"""
Build ROIHeads defined by `cfg.MODEL.ROI_HEADS.NAME`.
"""
name = cfg.MODEL.ROI_HEADS.NAME
return ROI_HEADS_REGISTRY.get(name)(cfg, input_shape)
def select_foreground_proposals(
proposals: List[Instances], bg_label: int
) -> Tuple[List[Instances], List[torch.Tensor]]:
"""
Given a list of N Instances (for N images), each containing a `gt_classes` field,
return a list of Instances that contain only instances with `gt_classes != -1 &&
gt_classes != bg_label`.
Args:
proposals (list[Instances]): A list of N Instances, where N is the number of
images in the batch.
bg_label: label index of background class.
Returns:
list[Instances]: N Instances, each contains only the selected foreground instances.
list[Tensor]: N boolean vector, correspond to the selection mask of
each Instances object. True for selected instances.
"""
assert isinstance(proposals, (list, tuple))
assert isinstance(proposals[0], Instances)
assert proposals[0].has("gt_classes")
fg_proposals = []
fg_selection_masks = []
for proposals_per_image in proposals:
gt_classes = proposals_per_image.gt_classes
fg_selection_mask = (gt_classes != -1) & (gt_classes != bg_label)
fg_idxs = fg_selection_mask.nonzero().squeeze(1)
fg_proposals.append(proposals_per_image[fg_idxs])
fg_selection_masks.append(fg_selection_mask)
return fg_proposals, fg_selection_masks
def select_proposals_with_visible_keypoints(proposals: List[Instances]) -> List[Instances]:
"""
Args:
proposals (list[Instances]): a list of N Instances, where N is the
number of images.
Returns:
proposals: only contains proposals with at least one visible keypoint.
Note that this is still slightly different from Detectron.
In Detectron, proposals for training keypoint head are re-sampled from
all the proposals with IOU>threshold & >=1 visible keypoint.
Here, the proposals are first sampled from all proposals with
IOU>threshold, then proposals with no visible keypoint are filtered out.
This strategy seems to make no difference on Detectron and is easier to implement.
"""
ret = []
all_num_fg = []
for proposals_per_image in proposals:
# If empty/unannotated image (hard negatives), skip filtering for train
if len(proposals_per_image) == 0:
ret.append(proposals_per_image)
continue
gt_keypoints = proposals_per_image.gt_keypoints.tensor
# #fg x K x 3
vis_mask = gt_keypoints[:, :, 2] >= 1
xs, ys = gt_keypoints[:, :, 0], gt_keypoints[:, :, 1]
proposal_boxes = proposals_per_image.proposal_boxes.tensor.unsqueeze(dim=1) # #fg x 1 x 4
kp_in_box = (
(xs >= proposal_boxes[:, :, 0])
& (xs <= proposal_boxes[:, :, 2])
& (ys >= proposal_boxes[:, :, 1])
& (ys <= proposal_boxes[:, :, 3])
)
selection = (kp_in_box & vis_mask).any(dim=1)
selection_idxs = nonzero_tuple(selection)[0]
all_num_fg.append(selection_idxs.numel())
ret.append(proposals_per_image[selection_idxs])
storage = get_event_storage()
storage.put_scalar("keypoint_head/num_fg_samples", np.mean(all_num_fg))
return ret
class ROIHeads(torch.nn.Module):
"""
ROIHeads perform all per-region computation in an R-CNN.
It typically contains logic to
1. (in training only) match proposals with ground truth and sample them
2. crop the regions and extract per-region features using proposals
3. make per-region predictions with different heads
It can have many variants, implemented as subclasses of this class.
This base class contains the logic to match/sample proposals.
But it is not necessary to inherit this class if the sampling logic is not needed.
"""
@configurable
def __init__(
self,
*,
num_classes,
batch_size_per_image,
positive_fraction,
proposal_matcher,
proposal_append_gt=True,
):
"""
NOTE: this interface is experimental.
Args:
num_classes (int): number of foreground classes (i.e. background is not included)
batch_size_per_image (int): number of proposals to sample for training
positive_fraction (float): fraction of positive (foreground) proposals
to sample for training.
proposal_matcher (Matcher): matcher that matches proposals and ground truth
proposal_append_gt (bool): whether to include ground truth as proposals as well
"""
super().__init__()
self.batch_size_per_image = batch_size_per_image
self.positive_fraction = positive_fraction
self.num_classes = num_classes
self.proposal_matcher = proposal_matcher
self.proposal_append_gt = proposal_append_gt
@classmethod
def from_config(cls, cfg):
return {
"batch_size_per_image": cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE,
"positive_fraction": cfg.MODEL.ROI_HEADS.POSITIVE_FRACTION,
"num_classes": cfg.MODEL.ROI_HEADS.NUM_CLASSES,
"proposal_append_gt": cfg.MODEL.ROI_HEADS.PROPOSAL_APPEND_GT,
# Matcher to assign box proposals to gt boxes
"proposal_matcher": Matcher(
cfg.MODEL.ROI_HEADS.IOU_THRESHOLDS,
cfg.MODEL.ROI_HEADS.IOU_LABELS,
allow_low_quality_matches=False,
),
}
def _sample_proposals(
self, matched_idxs: torch.Tensor, matched_labels: torch.Tensor, gt_classes: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Based on the matching between N proposals and M groundtruth,
sample the proposals and set their classification labels.
Args:
matched_idxs (Tensor): a vector of length N, each is the best-matched
gt index in [0, M) for each proposal.
matched_labels (Tensor): a vector of length N, the matcher's label
(one of cfg.MODEL.ROI_HEADS.IOU_LABELS) for each proposal.
gt_classes (Tensor): a vector of length M.
Returns:
Tensor: a vector of indices of sampled proposals. Each is in [0, N).
Tensor: a vector of the same length, the classification label for
each sampled proposal. Each sample is labeled as either a category in
[0, num_classes) or the background (num_classes).
"""
has_gt = gt_classes.numel() > 0
# Get the corresponding GT for each proposal
if has_gt:
gt_classes = gt_classes[matched_idxs]
# Label unmatched proposals (0 label from matcher) as background (label=num_classes)
gt_classes[matched_labels == 0] = self.num_classes
# Label ignore proposals (-1 label)
gt_classes[matched_labels == -1] = -1
else:
gt_classes = torch.zeros_like(matched_idxs) + self.num_classes
sampled_fg_idxs, sampled_bg_idxs = subsample_labels(
gt_classes, self.batch_size_per_image, self.positive_fraction, self.num_classes
)
sampled_idxs = torch.cat([sampled_fg_idxs, sampled_bg_idxs], dim=0)
return sampled_idxs, gt_classes[sampled_idxs]
@torch.no_grad()
def label_and_sample_proposals(
self, proposals: List[Instances], targets: List[Instances]
) -> List[Instances]:
"""
Prepare some proposals to be used to train the ROI heads.
It performs box matching between `proposals` and `targets`, and assigns
training labels to the proposals.
It returns ``self.batch_size_per_image`` random samples from proposals and groundtruth
boxes, with a fraction of positives that is no larger than
``self.positive_fraction``.
Args:
See :meth:`ROIHeads.forward`
Returns:
list[Instances]:
length `N` list of `Instances`s containing the proposals
sampled for training. Each `Instances` has the following fields:
- proposal_boxes: the proposal boxes
- gt_boxes: the ground-truth box that the proposal is assigned to
(this is only meaningful if the proposal has a label > 0; if label = 0
then the ground-truth box is random)
Other fields such as "gt_classes", "gt_masks", that's included in `targets`.
"""
# Augment proposals with ground-truth boxes.
# In the case of learned proposals (e.g., RPN), when training starts
# the proposals will be low quality due to random initialization.
# It's possible that none of these initial
# proposals have high enough overlap with the gt objects to be used
# as positive examples for the second stage components (box head,
# cls head, mask head). Adding the gt boxes to the set of proposals
# ensures that the second stage components will have some positive
# examples from the start of training. For RPN, this augmentation improves
# convergence and empirically improves box AP on COCO by about 0.5
# points (under one tested configuration).
if self.proposal_append_gt:
proposals = add_ground_truth_to_proposals(targets, proposals)
proposals_with_gt = []
num_fg_samples = []
num_bg_samples = []
for proposals_per_image, targets_per_image in zip(proposals, targets):
has_gt = len(targets_per_image) > 0
match_quality_matrix = pairwise_iou(
targets_per_image.gt_boxes, proposals_per_image.proposal_boxes
)
matched_idxs, matched_labels = self.proposal_matcher(match_quality_matrix)
sampled_idxs, gt_classes = self._sample_proposals(
matched_idxs, matched_labels, targets_per_image.gt_classes
)
# Set target attributes of the sampled proposals:
proposals_per_image = proposals_per_image[sampled_idxs]
proposals_per_image.gt_classes = gt_classes
if has_gt:
sampled_targets = matched_idxs[sampled_idxs]
# We index all the attributes of targets that start with "gt_"
# and have not been added to proposals yet (="gt_classes").
# NOTE: here the indexing waste some compute, because heads
# like masks, keypoints, etc, will filter the proposals again,
# (by foreground/background, or number of keypoints in the image, etc)
# so we essentially index the data twice.
for (trg_name, trg_value) in targets_per_image.get_fields().items():
if trg_name.startswith("gt_") and not proposals_per_image.has(trg_name):
proposals_per_image.set(trg_name, trg_value[sampled_targets])
# If no GT is given in the image, we don't know what a dummy gt value can be.
# Therefore the returned proposals won't have any gt_* fields, except for a
# gt_classes full of background label.
num_bg_samples.append((gt_classes == self.num_classes).sum().item())
num_fg_samples.append(gt_classes.numel() - num_bg_samples[-1])
proposals_with_gt.append(proposals_per_image)
# Log the number of fg/bg samples that are selected for training ROI heads
storage = get_event_storage()
storage.put_scalar("roi_head/num_fg_samples", np.mean(num_fg_samples))
storage.put_scalar("roi_head/num_bg_samples", np.mean(num_bg_samples))
return proposals_with_gt
def forward(
self,
images: ImageList,
features: Dict[str, torch.Tensor],
proposals: List[Instances],
targets: Optional[List[Instances]] = None,
) -> Tuple[List[Instances], Dict[str, torch.Tensor]]:
"""
Args:
images (ImageList):
features (dict[str,Tensor]): input data as a mapping from feature
map name to tensor. Axis 0 represents the number of images `N` in
the input data; axes 1-3 are channels, height, and width, which may
vary between feature maps (e.g., if a feature pyramid is used).
proposals (list[Instances]): length `N` list of `Instances`. The i-th
`Instances` contains object proposals for the i-th input image,
with fields "proposal_boxes" and "objectness_logits".
targets (list[Instances], optional): length `N` list of `Instances`. The i-th
`Instances` contains the ground-truth per-instance annotations
for the i-th input image. Specify `targets` during training only.
It may have the following fields:
- gt_boxes: the bounding box of each instance.
- gt_classes: the label for each instance with a category ranging in [0, #class].
- gt_masks: PolygonMasks or BitMasks, the ground-truth masks of each instance.
- gt_keypoints: NxKx3, the groud-truth keypoints for each instance.
Returns:
list[Instances]: length `N` list of `Instances` containing the
detected instances. Returned during inference only; may be [] during training.
dict[str->Tensor]:
mapping from a named loss to a tensor storing the loss. Used during training only.
"""
raise NotImplementedError()
@ROI_HEADS_REGISTRY.register()
class Res5ROIHeads(ROIHeads):
"""
The ROIHeads in a typical "C4" R-CNN model, where
the box and mask head share the cropping and
the per-region feature computation by a Res5 block.
See :paper:`ResNet` Appendix A.
"""
@configurable
def __init__(
self,
*,
in_features: List[str],
pooler: ROIPooler,
res5: nn.Module,
box_predictor: nn.Module,
mask_head: Optional[nn.Module] = None,
**kwargs,
):
"""
NOTE: this interface is experimental.
Args:
in_features (list[str]): list of backbone feature map names to use for
feature extraction
pooler (ROIPooler): pooler to extra region features from backbone
res5 (nn.Sequential): a CNN to compute per-region features, to be used by
``box_predictor`` and ``mask_head``. Typically this is a "res5"
block from a ResNet.
box_predictor (nn.Module): make box predictions from the feature.
Should have the same interface as :class:`FastRCNNOutputLayers`.
mask_head (nn.Module): transform features to make mask predictions
"""
super().__init__(**kwargs)
self.in_features = in_features
self.pooler = pooler
if isinstance(res5, (list, tuple)):
res5 = nn.Sequential(*res5)
self.res5 = res5
self.box_predictor = box_predictor
self.mask_on = mask_head is not None
if self.mask_on:
self.mask_head = mask_head
@classmethod
def from_config(cls, cfg, input_shape):
# fmt: off
ret = super().from_config(cfg)
in_features = ret["in_features"] = cfg.MODEL.ROI_HEADS.IN_FEATURES
pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE
pooler_scales = (1.0 / input_shape[in_features[0]].stride, )
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
mask_on = cfg.MODEL.MASK_ON
# fmt: on
assert not cfg.MODEL.KEYPOINT_ON
assert len(in_features) == 1
ret["pooler"] = ROIPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=sampling_ratio,
pooler_type=pooler_type,
)
# Compatbility with old moco code. Might be useful.
# See notes in StandardROIHeads.from_config
if not inspect.ismethod(cls._build_res5_block):
logger.warning(
"The behavior of _build_res5_block may change. "
"Please do not depend on private methods."
)
cls._build_res5_block = classmethod(cls._build_res5_block)
ret["res5"], out_channels = cls._build_res5_block(cfg)
ret["box_predictor"] = FastRCNNOutputLayers(
cfg, ShapeSpec(channels=out_channels, height=1, width=1)
)
if mask_on:
ret["mask_head"] = build_mask_head(
cfg,
ShapeSpec(channels=out_channels, width=pooler_resolution, height=pooler_resolution),
)
return ret
@classmethod
def _build_res5_block(cls, cfg):
# fmt: off
stage_channel_factor = 2 ** 3 # res5 is 8x res2
num_groups = cfg.MODEL.RESNETS.NUM_GROUPS
width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP
bottleneck_channels = num_groups * width_per_group * stage_channel_factor
out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS * stage_channel_factor
stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1
norm = cfg.MODEL.RESNETS.NORM
assert not cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE[-1], \
"Deformable conv is not yet supported in res5 head."
# fmt: on
blocks = ResNet.make_stage(
BottleneckBlock,
3,
stride_per_block=[2, 1, 1],
in_channels=out_channels // 2,
bottleneck_channels=bottleneck_channels,
out_channels=out_channels,
num_groups=num_groups,
norm=norm,
stride_in_1x1=stride_in_1x1,
)
return nn.Sequential(*blocks), out_channels
def _shared_roi_transform(self, features: List[torch.Tensor], boxes: List[Boxes]):
x = self.pooler(features, boxes)
return self.res5(x)
def forward(
self,
images: ImageList,
features: Dict[str, torch.Tensor],
proposals: List[Instances],
targets: Optional[List[Instances]] = None,
):
"""
See :meth:`ROIHeads.forward`.
"""
del images
if self.training:
assert targets
proposals = self.label_and_sample_proposals(proposals, targets)
del targets
proposal_boxes = [x.proposal_boxes for x in proposals]
box_features = self._shared_roi_transform(
[features[f] for f in self.in_features], proposal_boxes
)
predictions = self.box_predictor(box_features.mean(dim=[2, 3]))
if self.training:
del features
losses = self.box_predictor.losses(predictions, proposals)
if self.mask_on:
proposals, fg_selection_masks = select_foreground_proposals(
proposals, self.num_classes
)
# Since the ROI feature transform is shared between boxes and masks,
# we don't need to recompute features. The mask loss is only defined
# on foreground proposals, so we need to select out the foreground
# features.
mask_features = box_features[torch.cat(fg_selection_masks, dim=0)]
del box_features
losses.update(self.mask_head(mask_features, proposals))
return [], losses
else:
pred_instances, _ = self.box_predictor.inference(predictions, proposals)
pred_instances = self.forward_with_given_boxes(features, pred_instances)
return pred_instances, {}
def forward_with_given_boxes(
self, features: Dict[str, torch.Tensor], instances: List[Instances]
) -> List[Instances]:
"""
Use the given boxes in `instances` to produce other (non-box) per-ROI outputs.
Args:
features: same as in `forward()`
instances (list[Instances]): instances to predict other outputs. Expect the keys
"pred_boxes" and "pred_classes" to exist.
Returns:
instances (Instances):
the same `Instances` object, with extra
fields such as `pred_masks` or `pred_keypoints`.
"""
assert not self.training
assert instances[0].has("pred_boxes") and instances[0].has("pred_classes")
if self.mask_on:
feature_list = [features[f] for f in self.in_features]
x = self._shared_roi_transform(feature_list, [x.pred_boxes for x in instances])
return self.mask_head(x, instances)
else:
return instances
@ROI_HEADS_REGISTRY.register()
class CustomStandardROIHeads(ROIHeads):
"""
It's "standard" in a sense that there is no ROI transform sharing
or feature sharing between tasks.
Each head independently processes the input features by each head's
own pooler and head.
This class is used by most models, such as FPN and C5.
To implement more models, you can subclass it and implement a different
:meth:`forward()` or a head.
"""
@configurable
def __init__(
self,
*,
box_in_features: List[str],
box_pooler: ROIPooler,
box_head: nn.Module,
box_predictor: nn.Module,
mask_in_features: Optional[List[str]] = None,
mask_pooler: Optional[ROIPooler] = None,
mask_head: Optional[nn.Module] = None,
keypoint_in_features: Optional[List[str]] = None,
keypoint_pooler: Optional[ROIPooler] = None,
keypoint_head: Optional[nn.Module] = None,
train_on_pred_boxes: bool = False,
box2box_transform = Box2BoxTransform,
use_droploss: bool = False,
droploss_iou_thresh: float = 1.0,
**kwargs,
):
"""
NOTE: this interface is experimental.
Args:
box_in_features (list[str]): list of feature names to use for the box head.
box_pooler (ROIPooler): pooler to extra region features for box head
box_head (nn.Module): transform features to make box predictions
box_predictor (nn.Module): make box predictions from the feature.
Should have the same interface as :class:`FastRCNNOutputLayers`.
mask_in_features (list[str]): list of feature names to use for the mask
pooler or mask head. None if not using mask head.
mask_pooler (ROIPooler): pooler to extract region features from image features.
The mask head will then take region features to make predictions.
If None, the mask head will directly take the dict of image features
defined by `mask_in_features`
mask_head (nn.Module): transform features to make mask predictions
keypoint_in_features, keypoint_pooler, keypoint_head: similar to ``mask_*``.
train_on_pred_boxes (bool): whether to use proposal boxes or
predicted boxes from the box head to train other heads.
"""
super().__init__(**kwargs)
# keep self.in_features for backward compatibility
self.in_features = self.box_in_features = box_in_features
self.box_pooler = box_pooler
self.box_head = box_head
self.box_predictor = box_predictor
self.mask_on = mask_in_features is not None
if self.mask_on:
self.mask_in_features = mask_in_features
self.mask_pooler = mask_pooler
self.mask_head = mask_head
self.keypoint_on = keypoint_in_features is not None
if self.keypoint_on:
self.keypoint_in_features = keypoint_in_features
self.keypoint_pooler = keypoint_pooler
self.keypoint_head = keypoint_head
self.train_on_pred_boxes = train_on_pred_boxes
self.use_droploss = use_droploss
self.box2box_transform = box2box_transform
self.droploss_iou_thresh = droploss_iou_thresh
@classmethod
def from_config(cls, cfg, input_shape):
ret = super().from_config(cfg)
ret["train_on_pred_boxes"] = cfg.MODEL.ROI_BOX_HEAD.TRAIN_ON_PRED_BOXES
# Subclasses that have not been updated to use from_config style construction
# may have overridden _init_*_head methods. In this case, those overridden methods
# will not be classmethods and we need to avoid trying to call them here.
# We test for this with ismethod which only returns True for bound methods of cls.
# Such subclasses will need to handle calling their overridden _init_*_head methods.
if cfg.MODEL.ROI_HEADS.USE_DROPLOSS:
ret['use_droploss'] = True
ret['droploss_iou_thresh'] = cfg.MODEL.ROI_HEADS.DROPLOSS_IOU_THRESH
ret['box2box_transform'] = Box2BoxTransform(weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS)
if inspect.ismethod(cls._init_box_head):
ret.update(cls._init_box_head(cfg, input_shape))
if inspect.ismethod(cls._init_mask_head):
ret.update(cls._init_mask_head(cfg, input_shape))
if inspect.ismethod(cls._init_keypoint_head):
ret.update(cls._init_keypoint_head(cfg, input_shape))
return ret
@classmethod
def _init_box_head(cls, cfg, input_shape):
# fmt: off
in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES
pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features)
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE
# fmt: on
# If CustomStandardROIHeads is applied on multiple feature maps (as in FPN),
# then we share the same predictors and therefore the channel counts must be the same
in_channels = [input_shape[f].channels for f in in_features]
# Check all channel counts are equal
assert len(set(in_channels)) == 1, in_channels
in_channels = in_channels[0]
box_pooler = ROIPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=sampling_ratio,
pooler_type=pooler_type,
)
# Here we split "box head" and "box predictor", which is mainly due to historical reasons.
# They are used together so the "box predictor" layers should be part of the "box head".
# New subclasses of ROIHeads do not need "box predictor"s.
box_head = build_box_head(
cfg, ShapeSpec(channels=in_channels, height=pooler_resolution, width=pooler_resolution)
)
box_predictor = FastRCNNOutputLayers(cfg, box_head.output_shape)
return {
"box_in_features": in_features,
"box_pooler": box_pooler,
"box_head": box_head,
"box_predictor": box_predictor,
}
@classmethod
def _init_mask_head(cls, cfg, input_shape):
if not cfg.MODEL.MASK_ON:
return {}
# fmt: off
in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES
pooler_resolution = cfg.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION
pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features)
sampling_ratio = cfg.MODEL.ROI_MASK_HEAD.POOLER_SAMPLING_RATIO
pooler_type = cfg.MODEL.ROI_MASK_HEAD.POOLER_TYPE
# fmt: on
in_channels = [input_shape[f].channels for f in in_features][0]
ret = {"mask_in_features": in_features}
ret["mask_pooler"] = (
ROIPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=sampling_ratio,
pooler_type=pooler_type,
)
if pooler_type
else None
)
if pooler_type:
shape = ShapeSpec(
channels=in_channels, width=pooler_resolution, height=pooler_resolution
)
else:
shape = {f: input_shape[f] for f in in_features}
ret["mask_head"] = build_mask_head(cfg, shape)
return ret
@classmethod
def _init_keypoint_head(cls, cfg, input_shape):
if not cfg.MODEL.KEYPOINT_ON:
return {}
# fmt: off
in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES
pooler_resolution = cfg.MODEL.ROI_KEYPOINT_HEAD.POOLER_RESOLUTION
pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features) # noqa
sampling_ratio = cfg.MODEL.ROI_KEYPOINT_HEAD.POOLER_SAMPLING_RATIO
pooler_type = cfg.MODEL.ROI_KEYPOINT_HEAD.POOLER_TYPE
# fmt: on
in_channels = [input_shape[f].channels for f in in_features][0]
ret = {"keypoint_in_features": in_features}
ret["keypoint_pooler"] = (
ROIPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=sampling_ratio,
pooler_type=pooler_type,
)
if pooler_type
else None
)
if pooler_type:
shape = ShapeSpec(
channels=in_channels, width=pooler_resolution, height=pooler_resolution
)
else:
shape = {f: input_shape[f] for f in in_features}
ret["keypoint_head"] = build_keypoint_head(cfg, shape)
return ret
def forward(
self,
images: ImageList,
features: Dict[str, torch.Tensor],
proposals: List[Instances],
targets: Optional[List[Instances]] = None,
) -> Tuple[List[Instances], Dict[str, torch.Tensor]]:
"""
See :class:`ROIHeads.forward`.
"""
del images
if self.training:
assert targets, "'targets' argument is required during training"
proposals = self.label_and_sample_proposals(proposals, targets)
del targets
if self.training:
losses = self._forward_box(features, proposals)
# Usually the original proposals used by the box head are used by the mask, keypoint
# heads. But when `self.train_on_pred_boxes is True`, proposals will contain boxes
# predicted by the box head.
losses.update(self._forward_mask(features, proposals))
losses.update(self._forward_keypoint(features, proposals))
return proposals, losses
else:
pred_instances = self._forward_box(features, proposals)
# During inference cascaded prediction is used: the mask and keypoints heads are only
# applied to the top scoring box detections.
pred_instances = self.forward_with_given_boxes(features, pred_instances)
return pred_instances, {}
def forward_with_given_boxes(
self, features: Dict[str, torch.Tensor], instances: List[Instances]
) -> List[Instances]:
"""
Use the given boxes in `instances` to produce other (non-box) per-ROI outputs.
This is useful for downstream tasks where a box is known, but need to obtain
other attributes (outputs of other heads).
Test-time augmentation also uses this.
Args:
features: same as in `forward()`
instances (list[Instances]): instances to predict other outputs. Expect the keys
"pred_boxes" and "pred_classes" to exist.
Returns:
list[Instances]:
the same `Instances` objects, with extra
fields such as `pred_masks` or `pred_keypoints`.
"""
assert not self.training
assert instances[0].has("pred_boxes") and instances[0].has("pred_classes")
instances = self._forward_mask(features, instances)
instances = self._forward_keypoint(features, instances)
return instances
def _forward_box(self, features: Dict[str, torch.Tensor], proposals: List[Instances]):
"""
Forward logic of the box prediction branch. If `self.train_on_pred_boxes is True`,
the function puts predicted boxes in the `proposal_boxes` field of `proposals` argument.
Args:
features (dict[str, Tensor]): mapping from feature map names to tensor.
Same as in :meth:`ROIHeads.forward`.
proposals (list[Instances]): the per-image object proposals with
their matching ground truth.
Each has fields "proposal_boxes", and "objectness_logits",
"gt_classes", "gt_boxes".
Returns:
In training, a dict of losses.
In inference, a list of `Instances`, the predicted instances.
"""
features = [features[f] for f in self.box_in_features]
box_features = self.box_pooler(features, [x.proposal_boxes for x in proposals]) # torch.Size([512 * batch_size, 256, 7, 7])
box_features = self.box_head(box_features) # torch.Size([512 * batch_size, 1024])
predictions = self.box_predictor(box_features) # [torch.Size([512 * batch_size, 2]), torch.Size([512 * batch_size, 4])]
no_gt_found = False
if self.use_droploss and self.training:
# the first K proposals are GT proposals
try:
box_num_list = [len(x.gt_boxes) for x in proposals]
gt_num_list = [torch.unique(x.gt_boxes.tensor[:100], dim=0).size()[0] for x in proposals]
except:
box_num_list = [0 for _ in proposals]
gt_num_list = [0 for _ in proposals]
no_gt_found = True
if self.use_droploss and self.training and not no_gt_found:
# NOTE: maximum overlapping with GT (IoU)
predictions_delta = predictions[1]
proposal_boxes = Boxes.cat([x.proposal_boxes for x in proposals])
predictions_bbox = self.box2box_transform.apply_deltas(predictions_delta, proposal_boxes.tensor)
idx_start = 0
iou_max_list = []
for idx, x in enumerate(proposals):
idx_end = idx_start + box_num_list[idx]
iou_max_list.append(pairwise_iou_max_scores(predictions_bbox[idx_start:idx_end], x.gt_boxes[:gt_num_list[idx]].tensor))
idx_start = idx_end
iou_max = torch.cat(iou_max_list, dim=0)
del box_features
if self.training:
if self.use_droploss and not no_gt_found:
weights = iou_max.le(self.droploss_iou_thresh).float()
weights = 1 - weights.ge(1.0).float()
losses = self.box_predictor.losses(predictions, proposals, weights=weights.detach())
else:
losses = self.box_predictor.losses(predictions, proposals)
if self.train_on_pred_boxes: # default is false
with torch.no_grad():
pred_boxes = self.box_predictor.predict_boxes_for_gt_classes(
predictions, proposals
)
for proposals_per_image, pred_boxes_per_image in zip(proposals, pred_boxes):
proposals_per_image.proposal_boxes = Boxes(pred_boxes_per_image)
return losses
else:
pred_instances, _ = self.box_predictor.inference(predictions, proposals)
return pred_instances
def _forward_mask(self, features: Dict[str, torch.Tensor], instances: List[Instances]):
"""
Forward logic of the mask prediction branch.
Args:
features (dict[str, Tensor]): mapping from feature map names to tensor.
Same as in :meth:`ROIHeads.forward`.
instances (list[Instances]): the per-image instances to train/predict masks.
In training, they can be the proposals.
In inference, they can be the boxes predicted by R-CNN box head.
Returns:
In training, a dict of losses.
In inference, update `instances` with new fields "pred_masks" and return it.
"""
if not self.mask_on:
return {} if self.training else instances
if self.training:
# head is only trained on positive proposals.
instances, _ = select_foreground_proposals(instances, self.num_classes)
if self.mask_pooler is not None:
features = [features[f] for f in self.mask_in_features]
boxes = [x.proposal_boxes if self.training else x.pred_boxes for x in instances]
features = self.mask_pooler(features, boxes)
else:
features = {f: features[f] for f in self.mask_in_features}
return self.mask_head(features, instances)
def _forward_keypoint(self, features: Dict[str, torch.Tensor], instances: List[Instances]):
"""
Forward logic of the keypoint prediction branch.
Args:
features (dict[str, Tensor]): mapping from feature map names to tensor.
Same as in :meth:`ROIHeads.forward`.
instances (list[Instances]): the per-image instances to train/predict keypoints.
In training, they can be the proposals.
In inference, they can be the boxes predicted by R-CNN box head.
Returns:
In training, a dict of losses.
In inference, update `instances` with new fields "pred_keypoints" and return it.
"""
if not self.keypoint_on:
return {} if self.training else instances
if self.training:
# head is only trained on positive proposals with >=1 visible keypoints.
instances, _ = select_foreground_proposals(instances, self.num_classes)
instances = select_proposals_with_visible_keypoints(instances)
if self.keypoint_pooler is not None:
features = [features[f] for f in self.keypoint_in_features]
boxes = [x.proposal_boxes if self.training else x.pred_boxes for x in instances]
features = self.keypoint_pooler(features, boxes)
else:
features = {f: features[f] for f in self.keypoint_in_features}
return self.keypoint_head(features, instances)
| CutLER-main | cutler/modeling/roi_heads/roi_heads.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
from .coco_evaluation import COCOEvaluator | CutLER-main | cutler/evaluation/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Modified by XuDong Wang from https://github.com/facebookresearch/detectron2/blob/main/detectron2/evaluation/coco_evaluation.py
# supports evaluation of object detection only, although the prediction contains both segmentation and detection results.
import contextlib
import copy
import io
import itertools
import json
import logging
import numpy as np
import os
import pickle
from collections import OrderedDict
import pycocotools.mask as mask_util
import torch
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from tabulate import tabulate
import detectron2.utils.comm as comm
from detectron2.config import CfgNode
from detectron2.data import MetadataCatalog
from detectron2.data.datasets.coco import convert_to_coco_json
from detectron2.structures import Boxes, BoxMode, pairwise_iou
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import create_small_table
from detectron2.evaluation.evaluator import DatasetEvaluator
try:
from detectron2.evaluation.fast_eval_api import COCOeval_opt
except ImportError:
COCOeval_opt = COCOeval
class COCOEvaluator(DatasetEvaluator):
"""
Evaluate AR for object proposals, AP for instance detection/segmentation, AP
for keypoint detection outputs using COCO's metrics.
See http://cocodataset.org/#detection-eval and
http://cocodataset.org/#keypoints-eval to understand its metrics.
The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means
the metric cannot be computed (e.g. due to no predictions made).
In addition to COCO, this evaluator is able to support any bounding box detection,
instance segmentation, or keypoint detection dataset.
"""
def __init__(
self,
dataset_name,
tasks=None,
distributed=True,
output_dir=None,
*,
max_dets_per_image=None,
use_fast_impl=True,
kpt_oks_sigmas=(),
allow_cached_coco=True,
no_segm=False,
):
"""
Args:
dataset_name (str): name of the dataset to be evaluated.
It must have either the following corresponding metadata:
"json_file": the path to the COCO format annotation
Or it must be in detectron2's standard dataset format
so it can be converted to COCO format automatically.
tasks (tuple[str]): tasks that can be evaluated under the given
configuration. A task is one of "bbox", "segm", "keypoints".
By default, will infer this automatically from predictions.
distributed (True): if True, will collect results from all ranks and run evaluation
in the main process.
Otherwise, will only evaluate the results in the current process.
output_dir (str): optional, an output directory to dump all
results predicted on the dataset. The dump contains two files:
1. "instances_predictions.pth" a file that can be loaded with `torch.load` and
contains all the results in the format they are produced by the model.
2. "coco_instances_results.json" a json file in COCO's result format.
max_dets_per_image (int): limit on the maximum number of detections per image.
By default in COCO, this limit is to 100, but this can be customized
to be greater, as is needed in evaluation metrics AP fixed and AP pool
(see https://arxiv.org/pdf/2102.01066.pdf)
This doesn't affect keypoint evaluation.
use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP.
Although the results should be very close to the official implementation in COCO
API, it is still recommended to compute results with the official API for use in
papers. The faster implementation also uses more RAM.
kpt_oks_sigmas (list[float]): The sigmas used to calculate keypoint OKS.
See http://cocodataset.org/#keypoints-eval
When empty, it will use the defaults in COCO.
Otherwise it should be the same length as ROI_KEYPOINT_HEAD.NUM_KEYPOINTS.
allow_cached_coco (bool): Whether to use cached coco json from previous validation
runs. You should set this to False if you need to use different validation data.
Defaults to True.
"""
self._logger = logging.getLogger(__name__)
self._distributed = distributed
self._output_dir = output_dir
self.no_segm = no_segm
if use_fast_impl and (COCOeval_opt is COCOeval):
self._logger.info("Fast COCO eval is not built. Falling back to official COCO eval.")
use_fast_impl = False
self._use_fast_impl = use_fast_impl
# COCOeval requires the limit on the number of detections per image (maxDets) to be a list
# with at least 3 elements. The default maxDets in COCOeval is [1, 10, 100], in which the
# 3rd element (100) is used as the limit on the number of detections per image when
# evaluating AP. COCOEvaluator expects an integer for max_dets_per_image, so for COCOeval,
# we reformat max_dets_per_image into [1, 10, max_dets_per_image], based on the defaults.
if max_dets_per_image is None:
max_dets_per_image = [1, 10, 100]
else:
max_dets_per_image = [1, 10, max_dets_per_image]
self._max_dets_per_image = max_dets_per_image
if tasks is not None and isinstance(tasks, CfgNode):
kpt_oks_sigmas = (
tasks.TEST.KEYPOINT_OKS_SIGMAS if not kpt_oks_sigmas else kpt_oks_sigmas
)
self._logger.warn(
"COCO Evaluator instantiated using config, this is deprecated behavior."
" Please pass in explicit arguments instead."
)
self._tasks = None # Infering it from predictions should be better
else:
self._tasks = tasks
self._cpu_device = torch.device("cpu")
self._metadata = MetadataCatalog.get(dataset_name)
if not hasattr(self._metadata, "json_file"):
if output_dir is None:
raise ValueError(
"output_dir must be provided to COCOEvaluator "
"for datasets not in COCO format."
)
self._logger.info(f"Trying to convert '{dataset_name}' to COCO format ...")
cache_path = os.path.join(output_dir, f"{dataset_name}_coco_format.json")
self._metadata.json_file = cache_path
convert_to_coco_json(dataset_name, cache_path, allow_cached=allow_cached_coco)
json_file = PathManager.get_local_path(self._metadata.json_file)
with contextlib.redirect_stdout(io.StringIO()):
self._coco_api = COCO(json_file)
# Test set json files do not contain annotations (evaluation must be
# performed using the COCO evaluation server).
self._do_evaluation = "annotations" in self._coco_api.dataset
if self._do_evaluation:
self._kpt_oks_sigmas = kpt_oks_sigmas
def reset(self):
self._predictions = []
def process(self, inputs, outputs):
"""
Args:
inputs: the inputs to a COCO model (e.g., GeneralizedRCNN).
It is a list of dict. Each dict corresponds to an image and
contains keys like "height", "width", "file_name", "image_id".
outputs: the outputs of a COCO model. It is a list of dicts with key
"instances" that contains :class:`Instances`.
"""
for input, output in zip(inputs, outputs):
prediction = {"image_id": input["image_id"]}
if "instances" in output:
instances = output["instances"].to(self._cpu_device)
prediction["instances"] = instances_to_coco_json(instances, input["image_id"])
if "proposals" in output:
prediction["proposals"] = output["proposals"].to(self._cpu_device)
if len(prediction) > 1:
self._predictions.append(prediction)
def evaluate(self, img_ids=None):
"""
Args:
img_ids: a list of image IDs to evaluate on. Default to None for the whole dataset
"""
if self._distributed:
comm.synchronize()
predictions = comm.gather(self._predictions, dst=0)
predictions = list(itertools.chain(*predictions))
if not comm.is_main_process():
return {}
else:
predictions = self._predictions
if len(predictions) == 0:
self._logger.warning("[COCOEvaluator] Did not receive valid predictions.")
return {}
if self._output_dir:
PathManager.mkdirs(self._output_dir)
file_path = os.path.join(self._output_dir, "instances_predictions.pth")
with PathManager.open(file_path, "wb") as f:
torch.save(predictions, f)
self._results = OrderedDict()
if "proposals" in predictions[0]:
self._eval_box_proposals(predictions)
if "instances" in predictions[0]:
self._eval_predictions(predictions, img_ids=img_ids)
# Copy so the caller can do whatever with results
return copy.deepcopy(self._results)
def _tasks_from_predictions(self, predictions):
"""
Get COCO API "tasks" (i.e. iou_type) from COCO-format predictions.
"""
tasks = {"bbox"}
for pred in predictions:
if "segmentation" in pred and not self.no_segm:
tasks.add("segm")
if "keypoints" in pred:
tasks.add("keypoints")
return sorted(tasks)
def _eval_predictions(self, predictions, img_ids=None):
"""
Evaluate predictions. Fill self._results with the metrics of the tasks.
"""
self._logger.info("Preparing results for COCO format ...")
coco_results = list(itertools.chain(*[x["instances"] for x in predictions]))
tasks = self._tasks or self._tasks_from_predictions(coco_results)
# unmap the category ids for COCO
if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"):
dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id
all_contiguous_ids = list(dataset_id_to_contiguous_id.values())
num_classes = len(all_contiguous_ids)
assert min(all_contiguous_ids) == 0 and max(all_contiguous_ids) == num_classes - 1
reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()}
for result in coco_results:
category_id = result["category_id"]
assert category_id < num_classes, (
f"A prediction has class={category_id}, "
f"but the dataset only has {num_classes} classes and "
f"predicted class id should be in [0, {num_classes - 1}]."
)
result["category_id"] = reverse_id_mapping[category_id]
if self._output_dir:
file_path = os.path.join(self._output_dir, "coco_instances_results.json")
self._logger.info("Saving results to {}".format(file_path))
with PathManager.open(file_path, "w") as f:
f.write(json.dumps(coco_results))
f.flush()
if not self._do_evaluation:
self._logger.info("Annotations are not available for evaluation.")
return
self._logger.info(
"Evaluating predictions with {} COCO API...".format(
"unofficial" if self._use_fast_impl else "official"
)
)
for task in sorted(tasks):
assert task in {"bbox", "segm", "keypoints"}, f"Got unknown task: {task}!"
coco_eval = (
_evaluate_predictions_on_coco(
self._coco_api,
coco_results,
task,
kpt_oks_sigmas=self._kpt_oks_sigmas,
use_fast_impl=self._use_fast_impl,
img_ids=img_ids,
max_dets_per_image=self._max_dets_per_image,
)
if len(coco_results) > 0
else None # cocoapi does not handle empty results very well
)
res = self._derive_coco_results(
coco_eval, task, class_names=self._metadata.get("thing_classes")
)
self._results[task] = res
def _eval_box_proposals(self, predictions):
"""
Evaluate the box proposals in predictions.
Fill self._results with the metrics for "box_proposals" task.
"""
if self._output_dir:
# Saving generated box proposals to file.
# Predicted box_proposals are in XYXY_ABS mode.
bbox_mode = BoxMode.XYXY_ABS.value
ids, boxes, objectness_logits = [], [], []
for prediction in predictions:
ids.append(prediction["image_id"])
boxes.append(prediction["proposals"].proposal_boxes.tensor.numpy())
objectness_logits.append(prediction["proposals"].objectness_logits.numpy())
proposal_data = {
"boxes": boxes,
"objectness_logits": objectness_logits,
"ids": ids,
"bbox_mode": bbox_mode,
}
with PathManager.open(os.path.join(self._output_dir, "box_proposals.pkl"), "wb") as f:
pickle.dump(proposal_data, f)
if not self._do_evaluation:
self._logger.info("Annotations are not available for evaluation.")
return
self._logger.info("Evaluating bbox proposals ...")
res = {}
areas = {"all": "", "small": "s", "medium": "m", "large": "l"}
for limit in [100, 1000]:
for area, suffix in areas.items():
stats = _evaluate_box_proposals(predictions, self._coco_api, area=area, limit=limit)
key = "AR{}@{:d}".format(suffix, limit)
res[key] = float(stats["ar"].item() * 100)
self._logger.info("Proposal metrics: \n" + create_small_table(res))
self._results["box_proposals"] = res
def _derive_coco_results(self, coco_eval, iou_type, class_names=None):
"""
Derive the desired score numbers from summarized COCOeval.
Args:
coco_eval (None or COCOEval): None represents no predictions from model.
iou_type (str):
class_names (None or list[str]): if provided, will use it to predict
per-category AP.
Returns:
a dict of {metric name: score}
"""
metrics = {
"bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
"segm": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
"keypoints": ["AP", "AP50", "AP75", "APm", "APl"],
}[iou_type]
if coco_eval is None:
self._logger.warn("No predictions from the model!")
return {metric: float("nan") for metric in metrics}
# the standard metrics
results = {
metric: float(coco_eval.stats[idx] * 100 if coco_eval.stats[idx] >= 0 else "nan")
for idx, metric in enumerate(metrics)
}
self._logger.info(
"Evaluation results for {}: \n".format(iou_type) + create_small_table(results)
)
if not np.isfinite(sum(results.values())):
self._logger.info("Some metrics cannot be computed and is shown as NaN.")
if class_names is None or len(class_names) <= 1:
return results
# Compute per-category AP
# from https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L222-L252 # noqa
precisions = coco_eval.eval["precision"]
# precision has dims (iou, recall, cls, area range, max dets)
assert len(class_names) == precisions.shape[2]
results_per_category = []
for idx, name in enumerate(class_names):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
ap = np.mean(precision) if precision.size else float("nan")
results_per_category.append(("{}".format(name), float(ap * 100)))
# tabulate it
N_COLS = min(6, len(results_per_category) * 2)
results_flatten = list(itertools.chain(*results_per_category))
results_2d = itertools.zip_longest(*[results_flatten[i::N_COLS] for i in range(N_COLS)])
table = tabulate(
results_2d,
tablefmt="pipe",
floatfmt=".3f",
headers=["category", "AP"] * (N_COLS // 2),
numalign="left",
)
self._logger.info("Per-category {} AP: \n".format(iou_type) + table)
results.update({"AP-" + name: ap for name, ap in results_per_category})
return results
def instances_to_coco_json(instances, img_id):
"""
Dump an "Instances" object to a COCO-format json that's used for evaluation.
Args:
instances (Instances):
img_id (int): the image id
Returns:
list[dict]: list of json annotations in COCO format.
"""
num_instance = len(instances)
if num_instance == 0:
return []
boxes = instances.pred_boxes.tensor.numpy()
boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
boxes = boxes.tolist()
scores = instances.scores.tolist()
classes = instances.pred_classes.tolist()
has_mask = instances.has("pred_masks")
if has_mask:
# use RLE to encode the masks, because they are too large and takes memory
# since this evaluator stores outputs of the entire dataset
rles = [
mask_util.encode(np.array(mask[:, :, None], order="F", dtype="uint8"))[0]
for mask in instances.pred_masks
]
for rle in rles:
# "counts" is an array encoded by mask_util as a byte-stream. Python3's
# json writer which always produces strings cannot serialize a bytestream
# unless you decode it. Thankfully, utf-8 works out (which is also what
# the pycocotools/_mask.pyx does).
rle["counts"] = rle["counts"].decode("utf-8")
has_keypoints = instances.has("pred_keypoints")
if has_keypoints:
keypoints = instances.pred_keypoints
results = []
for k in range(num_instance):
result = {
"image_id": img_id,
"category_id": classes[k],
"bbox": boxes[k],
"score": scores[k],
}
if has_mask:
result["segmentation"] = rles[k]
if has_keypoints:
# In COCO annotations,
# keypoints coordinates are pixel indices.
# However our predictions are floating point coordinates.
# Therefore we subtract 0.5 to be consistent with the annotation format.
# This is the inverse of data loading logic in `datasets/coco.py`.
keypoints[k][:, :2] -= 0.5
result["keypoints"] = keypoints[k].flatten().tolist()
results.append(result)
return results
# inspired from Detectron:
# https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L255 # noqa
def _evaluate_box_proposals(dataset_predictions, coco_api, thresholds=None, area="all", limit=None):
"""
Evaluate detection proposal recall metrics. This function is a much
faster alternative to the official COCO API recall evaluation code. However,
it produces slightly different results.
"""
# Record max overlap value for each gt box
# Return vector of overlap values
areas = {
"all": 0,
"small": 1,
"medium": 2,
"large": 3,
"96-128": 4,
"128-256": 5,
"256-512": 6,
"512-inf": 7,
}
area_ranges = [
[0**2, 1e5**2], # all
[0**2, 32**2], # small
[32**2, 96**2], # medium
[96**2, 1e5**2], # large
[96**2, 128**2], # 96-128
[128**2, 256**2], # 128-256
[256**2, 512**2], # 256-512
[512**2, 1e5**2],
] # 512-inf
assert area in areas, "Unknown area range: {}".format(area)
area_range = area_ranges[areas[area]]
gt_overlaps = []
num_pos = 0
for prediction_dict in dataset_predictions:
predictions = prediction_dict["proposals"]
# sort predictions in descending order
# TODO maybe remove this and make it explicit in the documentation
inds = predictions.objectness_logits.sort(descending=True)[1]
predictions = predictions[inds]
ann_ids = coco_api.getAnnIds(imgIds=prediction_dict["image_id"])
anno = coco_api.loadAnns(ann_ids)
gt_boxes = [
BoxMode.convert(obj["bbox"], BoxMode.XYWH_ABS, BoxMode.XYXY_ABS)
for obj in anno
if obj["iscrowd"] == 0
]
gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes
gt_boxes = Boxes(gt_boxes)
gt_areas = torch.as_tensor([obj["area"] for obj in anno if obj["iscrowd"] == 0])
if len(gt_boxes) == 0 or len(predictions) == 0:
continue
valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1])
gt_boxes = gt_boxes[valid_gt_inds]
num_pos += len(gt_boxes)
if len(gt_boxes) == 0:
continue
if limit is not None and len(predictions) > limit:
predictions = predictions[:limit]
overlaps = pairwise_iou(predictions.proposal_boxes, gt_boxes)
_gt_overlaps = torch.zeros(len(gt_boxes))
for j in range(min(len(predictions), len(gt_boxes))):
# find which proposal box maximally covers each gt box
# and get the iou amount of coverage for each gt box
max_overlaps, argmax_overlaps = overlaps.max(dim=0)
# find which gt box is 'best' covered (i.e. 'best' = most iou)
gt_ovr, gt_ind = max_overlaps.max(dim=0)
assert gt_ovr >= 0
# find the proposal box that covers the best covered gt box
box_ind = argmax_overlaps[gt_ind]
# record the iou coverage of this gt box
_gt_overlaps[j] = overlaps[box_ind, gt_ind]
assert _gt_overlaps[j] == gt_ovr
# mark the proposal box and the gt box as used
overlaps[box_ind, :] = -1
overlaps[:, gt_ind] = -1
# append recorded iou coverage level
gt_overlaps.append(_gt_overlaps)
gt_overlaps = (
torch.cat(gt_overlaps, dim=0) if len(gt_overlaps) else torch.zeros(0, dtype=torch.float32)
)
gt_overlaps, _ = torch.sort(gt_overlaps)
if thresholds is None:
step = 0.05
thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32)
recalls = torch.zeros_like(thresholds)
# compute recall for each iou threshold
for i, t in enumerate(thresholds):
recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos)
# ar = 2 * np.trapz(recalls, thresholds)
ar = recalls.mean()
return {
"ar": ar,
"recalls": recalls,
"thresholds": thresholds,
"gt_overlaps": gt_overlaps,
"num_pos": num_pos,
}
def _evaluate_predictions_on_coco(
coco_gt,
coco_results,
iou_type,
kpt_oks_sigmas=None,
use_fast_impl=True,
img_ids=None,
max_dets_per_image=None,
):
"""
Evaluate the coco results using COCOEval API.
"""
assert len(coco_results) > 0
if iou_type == "segm":
coco_results = copy.deepcopy(coco_results)
# When evaluating mask AP, if the results contain bbox, cocoapi will
# use the box area as the area of the instance, instead of the mask area.
# This leads to a different definition of small/medium/large.
# We remove the bbox field to let mask AP use mask area.
for c in coco_results:
c.pop("bbox", None)
coco_dt = coco_gt.loadRes(coco_results)
coco_eval = (COCOeval_opt if use_fast_impl else COCOeval)(coco_gt, coco_dt, iou_type)
# For COCO, the default max_dets_per_image is [1, 10, 100].
if max_dets_per_image is None:
max_dets_per_image = [1, 10, 100] # Default from COCOEval
else:
assert (
len(max_dets_per_image) >= 3
), "COCOeval requires maxDets (and max_dets_per_image) to have length at least 3"
# In the case that user supplies a custom input for max_dets_per_image,
# apply COCOevalMaxDets to evaluate AP with the custom input.
if max_dets_per_image[2] != 100:
coco_eval = COCOevalMaxDets(coco_gt, coco_dt, iou_type)
if iou_type != "keypoints":
coco_eval.params.maxDets = max_dets_per_image
if img_ids is not None:
coco_eval.params.imgIds = img_ids
if iou_type == "keypoints":
# Use the COCO default keypoint OKS sigmas unless overrides are specified
if kpt_oks_sigmas:
assert hasattr(coco_eval.params, "kpt_oks_sigmas"), "pycocotools is too old!"
coco_eval.params.kpt_oks_sigmas = np.array(kpt_oks_sigmas)
# COCOAPI requires every detection and every gt to have keypoints, so
# we just take the first entry from both
num_keypoints_dt = len(coco_results[0]["keypoints"]) // 3
num_keypoints_gt = len(next(iter(coco_gt.anns.values()))["keypoints"]) // 3
num_keypoints_oks = len(coco_eval.params.kpt_oks_sigmas)
assert num_keypoints_oks == num_keypoints_dt == num_keypoints_gt, (
f"[COCOEvaluator] Prediction contain {num_keypoints_dt} keypoints. "
f"Ground truth contains {num_keypoints_gt} keypoints. "
f"The length of cfg.TEST.KEYPOINT_OKS_SIGMAS is {num_keypoints_oks}. "
"They have to agree with each other. For meaning of OKS, please refer to "
"http://cocodataset.org/#keypoints-eval."
)
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
return coco_eval
class COCOevalMaxDets(COCOeval):
"""
Modified version of COCOeval for evaluating AP with a custom
maxDets (by default for COCO, maxDets is 100)
"""
def summarize(self):
"""
Compute and display summary metrics for evaluation results given
a custom value for max_dets_per_image
"""
def _summarize(ap=1, iouThr=None, areaRng="all", maxDets=100):
p = self.params
iStr = " {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}"
titleStr = "Average Precision" if ap == 1 else "Average Recall"
typeStr = "(AP)" if ap == 1 else "(AR)"
iouStr = (
"{:0.2f}:{:0.2f}".format(p.iouThrs[0], p.iouThrs[-1])
if iouThr is None
else "{:0.2f}".format(iouThr)
)
aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]
mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]
if ap == 1:
# dimension of precision: [TxRxKxAxM]
s = self.eval["precision"]
# IoU
if iouThr is not None:
t = np.where(iouThr == p.iouThrs)[0]
s = s[t]
s = s[:, :, :, aind, mind]
else:
# dimension of recall: [TxKxAxM]
s = self.eval["recall"]
if iouThr is not None:
t = np.where(iouThr == p.iouThrs)[0]
s = s[t]
s = s[:, :, aind, mind]
if len(s[s > -1]) == 0:
mean_s = -1
else:
mean_s = np.mean(s[s > -1])
print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s))
return mean_s
def _summarizeDets():
stats = np.zeros((12,))
# Evaluate AP using the custom limit on maximum detections per image
stats[0] = _summarize(1, maxDets=self.params.maxDets[2])
stats[1] = _summarize(1, iouThr=0.5, maxDets=self.params.maxDets[2])
stats[2] = _summarize(1, iouThr=0.75, maxDets=self.params.maxDets[2])
stats[3] = _summarize(1, areaRng="small", maxDets=self.params.maxDets[2])
stats[4] = _summarize(1, areaRng="medium", maxDets=self.params.maxDets[2])
stats[5] = _summarize(1, areaRng="large", maxDets=self.params.maxDets[2])
stats[6] = _summarize(0, maxDets=self.params.maxDets[0])
stats[7] = _summarize(0, maxDets=self.params.maxDets[1])
stats[8] = _summarize(0, maxDets=self.params.maxDets[2])
stats[9] = _summarize(0, areaRng="small", maxDets=self.params.maxDets[2])
stats[10] = _summarize(0, areaRng="medium", maxDets=self.params.maxDets[2])
stats[11] = _summarize(0, areaRng="large", maxDets=self.params.maxDets[2])
return stats
def _summarizeKps():
stats = np.zeros((10,))
stats[0] = _summarize(1, maxDets=20)
stats[1] = _summarize(1, maxDets=20, iouThr=0.5)
stats[2] = _summarize(1, maxDets=20, iouThr=0.75)
stats[3] = _summarize(1, maxDets=20, areaRng="medium")
stats[4] = _summarize(1, maxDets=20, areaRng="large")
stats[5] = _summarize(0, maxDets=20)
stats[6] = _summarize(0, maxDets=20, iouThr=0.5)
stats[7] = _summarize(0, maxDets=20, iouThr=0.75)
stats[8] = _summarize(0, maxDets=20, areaRng="medium")
stats[9] = _summarize(0, maxDets=20, areaRng="large")
return stats
if not self.eval:
raise Exception("Please run accumulate() first")
iouType = self.params.iouType
if iouType == "segm" or iouType == "bbox":
summarize = _summarizeDets
elif iouType == "keypoints":
summarize = _summarizeKps
self.stats = summarize()
def __str__(self):
self.summarize()
| CutLER-main | cutler/evaluation/coco_evaluation.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Modified by XuDong Wang from https://github.com/facebookresearch/detectron2/blob/main/detectron2/data/build.py
import itertools
import logging
import numpy as np
import operator
import pickle
from typing import Any, Callable, Dict, List, Optional, Union
import torch
import torch.utils.data as torchdata
from tabulate import tabulate
from termcolor import colored
from detectron2.config import configurable
from detectron2.structures import BoxMode
from detectron2.utils.comm import get_world_size
from detectron2.utils.env import seed_all_rng
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import _log_api_usage, log_first_n
from detectron2.data.catalog import DatasetCatalog, MetadataCatalog
from detectron2.data.common import AspectRatioGroupedDataset, DatasetFromList, MapDataset, ToIterableDataset
from data.dataset_mapper import DatasetMapper
from data.detection_utils import check_metadata_consistency
from detectron2.data.samplers import (
InferenceSampler,
RandomSubsetTrainingSampler,
RepeatFactorTrainingSampler,
TrainingSampler,
)
"""
This file contains the default logic to build a dataloader for training or testing.
"""
__all__ = [
"build_batch_data_loader",
"build_detection_train_loader",
"build_detection_test_loader",
"get_detection_dataset_dicts",
"load_proposals_into_dataset",
"print_instances_class_histogram",
]
def filter_images_with_only_crowd_annotations(dataset_dicts):
"""
Filter out images with none annotations or only crowd annotations
(i.e., images without non-crowd annotations).
A common training-time preprocessing on COCO dataset.
Args:
dataset_dicts (list[dict]): annotations in Detectron2 Dataset format.
Returns:
list[dict]: the same format, but filtered.
"""
num_before = len(dataset_dicts)
def valid(anns):
for ann in anns:
if ann.get("iscrowd", 0) == 0:
return True
return False
dataset_dicts = [x for x in dataset_dicts if valid(x["annotations"])]
num_after = len(dataset_dicts)
logger = logging.getLogger(__name__)
logger.info(
"Removed {} images with no usable annotations. {} images left.".format(
num_before - num_after, num_after
)
)
print("Removed {} images with no usable annotations. {} images left.".format(
num_before - num_after, num_after
))
return dataset_dicts
def filter_images_with_few_keypoints(dataset_dicts, min_keypoints_per_image):
"""
Filter out images with too few number of keypoints.
Args:
dataset_dicts (list[dict]): annotations in Detectron2 Dataset format.
Returns:
list[dict]: the same format as dataset_dicts, but filtered.
"""
num_before = len(dataset_dicts)
def visible_keypoints_in_image(dic):
# Each keypoints field has the format [x1, y1, v1, ...], where v is visibility
annotations = dic["annotations"]
return sum(
(np.array(ann["keypoints"][2::3]) > 0).sum()
for ann in annotations
if "keypoints" in ann
)
dataset_dicts = [
x for x in dataset_dicts if visible_keypoints_in_image(x) >= min_keypoints_per_image
]
num_after = len(dataset_dicts)
logger = logging.getLogger(__name__)
logger.info(
"Removed {} images with fewer than {} keypoints.".format(
num_before - num_after, min_keypoints_per_image
)
)
return dataset_dicts
def load_proposals_into_dataset(dataset_dicts, proposal_file):
"""
Load precomputed object proposals into the dataset.
The proposal file should be a pickled dict with the following keys:
- "ids": list[int] or list[str], the image ids
- "boxes": list[np.ndarray], each is an Nx4 array of boxes corresponding to the image id
- "objectness_logits": list[np.ndarray], each is an N sized array of objectness scores
corresponding to the boxes.
- "bbox_mode": the BoxMode of the boxes array. Defaults to ``BoxMode.XYXY_ABS``.
Args:
dataset_dicts (list[dict]): annotations in Detectron2 Dataset format.
proposal_file (str): file path of pre-computed proposals, in pkl format.
Returns:
list[dict]: the same format as dataset_dicts, but added proposal field.
"""
logger = logging.getLogger(__name__)
logger.info("Loading proposals from: {}".format(proposal_file))
with PathManager.open(proposal_file, "rb") as f:
proposals = pickle.load(f, encoding="latin1")
# Rename the key names in D1 proposal files
rename_keys = {"indexes": "ids", "scores": "objectness_logits"}
for key in rename_keys:
if key in proposals:
proposals[rename_keys[key]] = proposals.pop(key)
# Fetch the indexes of all proposals that are in the dataset
# Convert image_id to str since they could be int.
img_ids = set({str(record["image_id"]) for record in dataset_dicts})
id_to_index = {str(id): i for i, id in enumerate(proposals["ids"]) if str(id) in img_ids}
# Assuming default bbox_mode of precomputed proposals are 'XYXY_ABS'
bbox_mode = BoxMode(proposals["bbox_mode"]) if "bbox_mode" in proposals else BoxMode.XYXY_ABS
for record in dataset_dicts:
# Get the index of the proposal
i = id_to_index[str(record["image_id"])]
boxes = proposals["boxes"][i]
objectness_logits = proposals["objectness_logits"][i]
# Sort the proposals in descending order of the scores
inds = objectness_logits.argsort()[::-1]
record["proposal_boxes"] = boxes[inds]
record["proposal_objectness_logits"] = objectness_logits[inds]
record["proposal_bbox_mode"] = bbox_mode
return dataset_dicts
def print_instances_class_histogram(dataset_dicts, class_names):
"""
Args:
dataset_dicts (list[dict]): list of dataset dicts.
class_names (list[str]): list of class names (zero-indexed).
"""
num_classes = len(class_names)
hist_bins = np.arange(num_classes + 1)
histogram = np.zeros((num_classes,), dtype=np.int)
for entry in dataset_dicts:
annos = entry["annotations"]
classes = np.asarray(
[x["category_id"] for x in annos if not x.get("iscrowd", 0)], dtype=np.int
)
if len(classes):
assert classes.min() >= 0, f"Got an invalid category_id={classes.min()}"
assert (
classes.max() < num_classes
), f"Got an invalid category_id={classes.max()} for a dataset of {num_classes} classes"
histogram += np.histogram(classes, bins=hist_bins)[0]
N_COLS = min(6, len(class_names) * 2)
def short_name(x):
# make long class names shorter. useful for lvis
if len(x) > 13:
return x[:11] + ".."
return x
data = list(
itertools.chain(*[[short_name(class_names[i]), int(v)] for i, v in enumerate(histogram)])
)
total_num_instances = sum(data[1::2])
data.extend([None] * (N_COLS - (len(data) % N_COLS)))
if num_classes > 1:
data.extend(["total", total_num_instances])
data = itertools.zip_longest(*[data[i::N_COLS] for i in range(N_COLS)])
table = tabulate(
data,
headers=["category", "#instances"] * (N_COLS // 2),
tablefmt="pipe",
numalign="left",
stralign="center",
)
log_first_n(
logging.INFO,
"Distribution of instances among all {} categories:\n".format(num_classes)
+ colored(table, "cyan"),
key="message",
)
def get_detection_dataset_dicts(
names,
filter_empty=True,
min_keypoints=0,
proposal_files=None,
check_consistency=True,
):
"""
Load and prepare dataset dicts for instance detection/segmentation and semantic segmentation.
Args:
names (str or list[str]): a dataset name or a list of dataset names
filter_empty (bool): whether to filter out images without instance annotations
min_keypoints (int): filter out images with fewer keypoints than
`min_keypoints`. Set to 0 to do nothing.
proposal_files (list[str]): if given, a list of object proposal files
that match each dataset in `names`.
check_consistency (bool): whether to check if datasets have consistent metadata.
Returns:
list[dict]: a list of dicts following the standard dataset dict format.
"""
if isinstance(names, str):
names = [names]
assert len(names), names
dataset_dicts = [DatasetCatalog.get(dataset_name) for dataset_name in names]
if isinstance(dataset_dicts[0], torchdata.Dataset):
if len(dataset_dicts) > 1:
# ConcatDataset does not work for iterable style dataset.
# We could support concat for iterable as well, but it's often
# not a good idea to concat iterables anyway.
return torchdata.ConcatDataset(dataset_dicts)
return dataset_dicts[0]
for dataset_name, dicts in zip(names, dataset_dicts):
assert len(dicts), "Dataset '{}' is empty!".format(dataset_name)
if proposal_files is not None:
assert len(names) == len(proposal_files)
# load precomputed proposals from proposal files
dataset_dicts = [
load_proposals_into_dataset(dataset_i_dicts, proposal_file)
for dataset_i_dicts, proposal_file in zip(dataset_dicts, proposal_files)
]
dataset_dicts = list(itertools.chain.from_iterable(dataset_dicts))
has_instances = "annotations" in dataset_dicts[0]
if filter_empty and has_instances:
dataset_dicts = filter_images_with_only_crowd_annotations(dataset_dicts)
if min_keypoints > 0 and has_instances:
dataset_dicts = filter_images_with_few_keypoints(dataset_dicts, min_keypoints)
if check_consistency and has_instances:
try:
class_names = MetadataCatalog.get(names[0]).thing_classes
check_metadata_consistency("thing_classes", names)
print_instances_class_histogram(dataset_dicts, class_names)
except AttributeError: # class names are not available for this dataset
pass
assert len(dataset_dicts), "No valid data found in {}.".format(",".join(names))
return dataset_dicts
def build_batch_data_loader(
dataset,
sampler,
total_batch_size,
*,
aspect_ratio_grouping=False,
num_workers=0,
collate_fn=None,
):
"""
Build a batched dataloader. The main differences from `torch.utils.data.DataLoader` are:
1. support aspect ratio grouping options
2. use no "batch collation", because this is common for detection training
Args:
dataset (torch.utils.data.Dataset): a pytorch map-style or iterable dataset.
sampler (torch.utils.data.sampler.Sampler or None): a sampler that produces indices.
Must be provided iff. ``dataset`` is a map-style dataset.
total_batch_size, aspect_ratio_grouping, num_workers, collate_fn: see
:func:`build_detection_train_loader`.
Returns:
iterable[list]. Length of each list is the batch size of the current
GPU. Each element in the list comes from the dataset.
"""
world_size = get_world_size()
assert (
total_batch_size > 0 and total_batch_size % world_size == 0
), "Total batch size ({}) must be divisible by the number of gpus ({}).".format(
total_batch_size, world_size
)
batch_size = total_batch_size // world_size
if isinstance(dataset, torchdata.IterableDataset):
assert sampler is None, "sampler must be None if dataset is IterableDataset"
else:
dataset = ToIterableDataset(dataset, sampler)
if aspect_ratio_grouping:
data_loader = torchdata.DataLoader(
dataset,
num_workers=num_workers,
collate_fn=operator.itemgetter(0), # don't batch, but yield individual elements
worker_init_fn=worker_init_reset_seed,
) # yield individual mapped dict
data_loader = AspectRatioGroupedDataset(data_loader, batch_size)
if collate_fn is None:
return data_loader
return MapDataset(data_loader, collate_fn)
else:
return torchdata.DataLoader(
dataset,
batch_size=batch_size,
drop_last=True,
num_workers=num_workers,
collate_fn=trivial_batch_collator if collate_fn is None else collate_fn,
worker_init_fn=worker_init_reset_seed,
)
def _train_loader_from_config(cfg, mapper=None, *, dataset=None, sampler=None):
if dataset is None:
dataset = get_detection_dataset_dicts(
cfg.DATASETS.TRAIN,
filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
if cfg.MODEL.KEYPOINT_ON
else 0,
proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None,
)
_log_api_usage("dataset." + cfg.DATASETS.TRAIN[0])
if mapper is None:
mapper = DatasetMapper(cfg, True)
if sampler is None:
sampler_name = cfg.DATALOADER.SAMPLER_TRAIN
logger = logging.getLogger(__name__)
if isinstance(dataset, torchdata.IterableDataset):
logger.info("Not using any sampler since the dataset is IterableDataset.")
sampler = None
else:
logger.info("Using training sampler {}".format(sampler_name))
if sampler_name == "TrainingSampler":
sampler = TrainingSampler(len(dataset))
elif sampler_name == "RepeatFactorTrainingSampler":
repeat_factors = RepeatFactorTrainingSampler.repeat_factors_from_category_frequency(
dataset, cfg.DATALOADER.REPEAT_THRESHOLD
)
sampler = RepeatFactorTrainingSampler(repeat_factors)
elif sampler_name == "RandomSubsetTrainingSampler":
sampler = RandomSubsetTrainingSampler(
len(dataset), cfg.DATALOADER.RANDOM_SUBSET_RATIO
)
else:
raise ValueError("Unknown training sampler: {}".format(sampler_name))
return {
"dataset": dataset,
"sampler": sampler,
"mapper": mapper,
"total_batch_size": cfg.SOLVER.IMS_PER_BATCH,
"aspect_ratio_grouping": cfg.DATALOADER.ASPECT_RATIO_GROUPING,
"num_workers": cfg.DATALOADER.NUM_WORKERS,
}
@configurable(from_config=_train_loader_from_config)
def build_detection_train_loader(
dataset,
*,
mapper,
sampler=None,
total_batch_size,
aspect_ratio_grouping=True,
num_workers=0,
collate_fn=None,
):
"""
Build a dataloader for object detection with some default features.
Args:
dataset (list or torch.utils.data.Dataset): a list of dataset dicts,
or a pytorch dataset (either map-style or iterable). It can be obtained
by using :func:`DatasetCatalog.get` or :func:`get_detection_dataset_dicts`.
mapper (callable): a callable which takes a sample (dict) from dataset and
returns the format to be consumed by the model.
When using cfg, the default choice is ``DatasetMapper(cfg, is_train=True)``.
sampler (torch.utils.data.sampler.Sampler or None): a sampler that produces
indices to be applied on ``dataset``.
If ``dataset`` is map-style, the default sampler is a :class:`TrainingSampler`,
which coordinates an infinite random shuffle sequence across all workers.
Sampler must be None if ``dataset`` is iterable.
total_batch_size (int): total batch size across all workers.
aspect_ratio_grouping (bool): whether to group images with similar
aspect ratio for efficiency. When enabled, it requires each
element in dataset be a dict with keys "width" and "height".
num_workers (int): number of parallel data loading workers
collate_fn: a function that determines how to do batching, same as the argument of
`torch.utils.data.DataLoader`. Defaults to do no collation and return a list of
data. No collation is OK for small batch size and simple data structures.
If your batch size is large and each sample contains too many small tensors,
it's more efficient to collate them in data loader.
Returns:
torch.utils.data.DataLoader:
a dataloader. Each output from it is a ``list[mapped_element]`` of length
``total_batch_size / num_workers``, where ``mapped_element`` is produced
by the ``mapper``.
"""
if isinstance(dataset, list):
dataset = DatasetFromList(dataset, copy=False)
if mapper is not None:
dataset = MapDataset(dataset, mapper)
if isinstance(dataset, torchdata.IterableDataset):
assert sampler is None, "sampler must be None if dataset is IterableDataset"
else:
if sampler is None:
sampler = TrainingSampler(len(dataset))
assert isinstance(sampler, torchdata.Sampler), f"Expect a Sampler but got {type(sampler)}"
return build_batch_data_loader(
dataset,
sampler,
total_batch_size,
aspect_ratio_grouping=aspect_ratio_grouping,
num_workers=num_workers,
collate_fn=collate_fn,
)
def _test_loader_from_config(cfg, dataset_name, mapper=None):
"""
Uses the given `dataset_name` argument (instead of the names in cfg), because the
standard practice is to evaluate each test set individually (not combining them).
"""
if isinstance(dataset_name, str):
dataset_name = [dataset_name]
dataset = get_detection_dataset_dicts(
dataset_name,
filter_empty=False,
proposal_files=[
cfg.DATASETS.PROPOSAL_FILES_TEST[list(cfg.DATASETS.TEST).index(x)] for x in dataset_name
]
if cfg.MODEL.LOAD_PROPOSALS
else None,
)
if mapper is None:
mapper = DatasetMapper(cfg, False)
return {
"dataset": dataset,
"mapper": mapper,
"num_workers": cfg.DATALOADER.NUM_WORKERS,
"sampler": InferenceSampler(len(dataset))
if not isinstance(dataset, torchdata.IterableDataset)
else None,
}
@configurable(from_config=_test_loader_from_config)
def build_detection_test_loader(
dataset: Union[List[Any], torchdata.Dataset],
*,
mapper: Callable[[Dict[str, Any]], Any],
sampler: Optional[torchdata.Sampler] = None,
batch_size: int = 1,
num_workers: int = 0,
collate_fn: Optional[Callable[[List[Any]], Any]] = None,
) -> torchdata.DataLoader:
"""
Similar to `build_detection_train_loader`, with default batch size = 1,
and sampler = :class:`InferenceSampler`. This sampler coordinates all workers
to produce the exact set of all samples.
Args:
dataset: a list of dataset dicts,
or a pytorch dataset (either map-style or iterable). They can be obtained
by using :func:`DatasetCatalog.get` or :func:`get_detection_dataset_dicts`.
mapper: a callable which takes a sample (dict) from dataset
and returns the format to be consumed by the model.
When using cfg, the default choice is ``DatasetMapper(cfg, is_train=False)``.
sampler: a sampler that produces
indices to be applied on ``dataset``. Default to :class:`InferenceSampler`,
which splits the dataset across all workers. Sampler must be None
if `dataset` is iterable.
batch_size: the batch size of the data loader to be created.
Default to 1 image per worker since this is the standard when reporting
inference time in papers.
num_workers: number of parallel data loading workers
collate_fn: same as the argument of `torch.utils.data.DataLoader`.
Defaults to do no collation and return a list of data.
Returns:
DataLoader: a torch DataLoader, that loads the given detection
dataset, with test-time transformation and batching.
Examples:
::
data_loader = build_detection_test_loader(
DatasetRegistry.get("my_test"),
mapper=DatasetMapper(...))
# or, instantiate with a CfgNode:
data_loader = build_detection_test_loader(cfg, "my_test")
"""
if isinstance(dataset, list):
dataset = DatasetFromList(dataset, copy=False)
if mapper is not None:
dataset = MapDataset(dataset, mapper)
if isinstance(dataset, torchdata.IterableDataset):
assert sampler is None, "sampler must be None if dataset is IterableDataset"
else:
if sampler is None:
sampler = InferenceSampler(len(dataset))
return torchdata.DataLoader(
dataset,
batch_size=batch_size,
sampler=sampler,
drop_last=False,
num_workers=num_workers,
collate_fn=trivial_batch_collator if collate_fn is None else collate_fn,
)
def trivial_batch_collator(batch):
"""
A batch collator that does nothing.
"""
return batch
def worker_init_reset_seed(worker_id):
initial_seed = torch.initial_seed() % 2**31
seed_all_rng(initial_seed + worker_id)
| CutLER-main | cutler/data/build.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
from . import datasets # ensure the builtin datasets are registered
from .detection_utils import * # isort:skip
from .build import (
build_batch_data_loader,
build_detection_train_loader,
build_detection_test_loader,
get_detection_dataset_dicts,
load_proposals_into_dataset,
print_instances_class_histogram,
)
from detectron2.data.common import *
__all__ = [k for k in globals().keys() if not k.startswith("_")] | CutLER-main | cutler/data/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Modified by XuDong Wang from https://github.com/facebookresearch/detectron2/blob/main/detectron2/data/detection_utils.py
"""
Common data processing utilities that are used in a
typical object detection data pipeline.
"""
import logging
import numpy as np
from typing import List, Union
import pycocotools.mask as mask_util
import torch
from PIL import Image
from detectron2.structures import (
Boxes,
BoxMode,
BitMasks,
Instances,
Keypoints,
PolygonMasks,
RotatedBoxes,
polygons_to_bitmask,
)
from detectron2.utils.file_io import PathManager
from data import transforms as T
from detectron2.data.catalog import MetadataCatalog
__all__ = [
"SizeMismatchError",
"convert_image_to_rgb",
"check_image_size",
"transform_proposals",
"transform_instance_annotations",
"annotations_to_instances",
"annotations_to_instances_rotated",
"build_augmentation",
"build_transform_gen",
"create_keypoint_hflip_indices",
"filter_empty_instances",
"read_image",
]
class SizeMismatchError(ValueError):
"""
When loaded image has difference width/height compared with annotation.
"""
# https://en.wikipedia.org/wiki/YUV#SDTV_with_BT.601
_M_RGB2YUV = [[0.299, 0.587, 0.114], [-0.14713, -0.28886, 0.436], [0.615, -0.51499, -0.10001]]
_M_YUV2RGB = [[1.0, 0.0, 1.13983], [1.0, -0.39465, -0.58060], [1.0, 2.03211, 0.0]]
# https://www.exiv2.org/tags.html
_EXIF_ORIENT = 274 # exif 'Orientation' tag
def convert_PIL_to_numpy(image, format):
"""
Convert PIL image to numpy array of target format.
Args:
image (PIL.Image): a PIL image
format (str): the format of output image
Returns:
(np.ndarray): also see `read_image`
"""
if format is not None:
# PIL only supports RGB, so convert to RGB and flip channels over below
conversion_format = format
if format in ["BGR", "YUV-BT.601"]:
conversion_format = "RGB"
image = image.convert(conversion_format)
image = np.asarray(image)
# PIL squeezes out the channel dimension for "L", so make it HWC
if format == "L":
image = np.expand_dims(image, -1)
# handle formats not supported by PIL
elif format == "BGR":
# flip channels if needed
image = image[:, :, ::-1]
elif format == "YUV-BT.601":
image = image / 255.0
image = np.dot(image, np.array(_M_RGB2YUV).T)
return image
def convert_image_to_rgb(image, format):
"""
Convert an image from given format to RGB.
Args:
image (np.ndarray or Tensor): an HWC image
format (str): the format of input image, also see `read_image`
Returns:
(np.ndarray): (H,W,3) RGB image in 0-255 range, can be either float or uint8
"""
if isinstance(image, torch.Tensor):
image = image.cpu().numpy()
if format == "BGR":
image = image[:, :, [2, 1, 0]]
elif format == "YUV-BT.601":
image = np.dot(image, np.array(_M_YUV2RGB).T)
image = image * 255.0
else:
if format == "L":
image = image[:, :, 0]
image = image.astype(np.uint8)
image = np.asarray(Image.fromarray(image, mode=format).convert("RGB"))
return image
def _apply_exif_orientation(image):
"""
Applies the exif orientation correctly.
This code exists per the bug:
https://github.com/python-pillow/Pillow/issues/3973
with the function `ImageOps.exif_transpose`. The Pillow source raises errors with
various methods, especially `tobytes`
Function based on:
https://github.com/wkentaro/labelme/blob/v4.5.4/labelme/utils/image.py#L59
https://github.com/python-pillow/Pillow/blob/7.1.2/src/PIL/ImageOps.py#L527
Args:
image (PIL.Image): a PIL image
Returns:
(PIL.Image): the PIL image with exif orientation applied, if applicable
"""
if not hasattr(image, "getexif"):
return image
try:
exif = image.getexif()
except Exception: # https://github.com/facebookresearch/detectron2/issues/1885
exif = None
if exif is None:
return image
orientation = exif.get(_EXIF_ORIENT)
method = {
2: Image.FLIP_LEFT_RIGHT,
3: Image.ROTATE_180,
4: Image.FLIP_TOP_BOTTOM,
5: Image.TRANSPOSE,
6: Image.ROTATE_270,
7: Image.TRANSVERSE,
8: Image.ROTATE_90,
}.get(orientation)
if method is not None:
return image.transpose(method)
return image
def read_image(file_name, format=None):
"""
Read an image into the given format.
Will apply rotation and flipping if the image has such exif information.
Args:
file_name (str): image file path
format (str): one of the supported image modes in PIL, or "BGR" or "YUV-BT.601".
Returns:
image (np.ndarray):
an HWC image in the given format, which is 0-255, uint8 for
supported image modes in PIL or "BGR"; float (0-1 for Y) for YUV-BT.601.
"""
with PathManager.open(file_name, "rb") as f:
image = Image.open(f)
# work around this bug: https://github.com/python-pillow/Pillow/issues/3973
image = _apply_exif_orientation(image)
return convert_PIL_to_numpy(image, format)
def check_image_size(dataset_dict, image):
"""
Raise an error if the image does not match the size specified in the dict.
"""
if "width" in dataset_dict or "height" in dataset_dict:
image_wh = (image.shape[1], image.shape[0])
expected_wh = (dataset_dict["width"], dataset_dict["height"])
if not image_wh == expected_wh:
expected_wh = (dataset_dict["height"], dataset_dict["width"])
dataset_dict["height"], dataset_dict["width"] = dataset_dict["width"], dataset_dict["height"]
if image_wh != expected_wh:
raise SizeMismatchError(
"Mismatched image shape{}, got {}, expect {}.".format(
" for image " + dataset_dict["file_name"]
if "file_name" in dataset_dict
else "",
image_wh,
expected_wh,
)
+ " Please check the width/height in your annotation."
)
# To ensure bbox always remap to original image size
if "width" not in dataset_dict:
dataset_dict["width"] = image.shape[1]
if "height" not in dataset_dict:
dataset_dict["height"] = image.shape[0]
def transform_proposals(dataset_dict, image_shape, transforms, *, proposal_topk, min_box_size=0):
"""
Apply transformations to the proposals in dataset_dict, if any.
Args:
dataset_dict (dict): a dict read from the dataset, possibly
contains fields "proposal_boxes", "proposal_objectness_logits", "proposal_bbox_mode"
image_shape (tuple): height, width
transforms (TransformList):
proposal_topk (int): only keep top-K scoring proposals
min_box_size (int): proposals with either side smaller than this
threshold are removed
The input dict is modified in-place, with abovementioned keys removed. A new
key "proposals" will be added. Its value is an `Instances`
object which contains the transformed proposals in its field
"proposal_boxes" and "objectness_logits".
"""
if "proposal_boxes" in dataset_dict:
# Transform proposal boxes
boxes = transforms.apply_box(
BoxMode.convert(
dataset_dict.pop("proposal_boxes"),
dataset_dict.pop("proposal_bbox_mode"),
BoxMode.XYXY_ABS,
)
)
boxes = Boxes(boxes)
objectness_logits = torch.as_tensor(
dataset_dict.pop("proposal_objectness_logits").astype("float32")
)
boxes.clip(image_shape)
keep = boxes.nonempty(threshold=min_box_size)
boxes = boxes[keep]
objectness_logits = objectness_logits[keep]
proposals = Instances(image_shape)
proposals.proposal_boxes = boxes[:proposal_topk]
proposals.objectness_logits = objectness_logits[:proposal_topk]
dataset_dict["proposals"] = proposals
def transform_instance_annotations(
annotation, transforms, image_size, *, keypoint_hflip_indices=None
):
"""
Apply transforms to box, segmentation and keypoints annotations of a single instance.
It will use `transforms.apply_box` for the box, and
`transforms.apply_coords` for segmentation polygons & keypoints.
If you need anything more specially designed for each data structure,
you'll need to implement your own version of this function or the transforms.
Args:
annotation (dict): dict of instance annotations for a single instance.
It will be modified in-place.
transforms (TransformList or list[Transform]):
image_size (tuple): the height, width of the transformed image
keypoint_hflip_indices (ndarray[int]): see `create_keypoint_hflip_indices`.
Returns:
dict:
the same input dict with fields "bbox", "segmentation", "keypoints"
transformed according to `transforms`.
The "bbox_mode" field will be set to XYXY_ABS.
"""
if isinstance(transforms, (tuple, list)):
transforms = T.TransformList(transforms)
# bbox is 1d (per-instance bounding box)
bbox = BoxMode.convert(annotation["bbox"], annotation["bbox_mode"], BoxMode.XYXY_ABS)
# clip transformed bbox to image size
bbox = transforms.apply_box(np.array([bbox]))[0].clip(min=0)
annotation["bbox"] = np.minimum(bbox, list(image_size + image_size)[::-1])
annotation["bbox_mode"] = BoxMode.XYXY_ABS
if "segmentation" in annotation:
# each instance contains 1 or more polygons
segm = annotation["segmentation"]
if isinstance(segm, list):
# polygons
polygons = [np.asarray(p).reshape(-1, 2) for p in segm]
annotation["segmentation"] = [
p.reshape(-1) for p in transforms.apply_polygons(polygons)
]
elif isinstance(segm, dict):
# RLE
mask = mask_util.decode(segm)
mask = transforms.apply_segmentation(mask)
assert tuple(mask.shape[:2]) == image_size
annotation["segmentation"] = mask
else:
raise ValueError(
"Cannot transform segmentation of type '{}'!"
"Supported types are: polygons as list[list[float] or ndarray],"
" COCO-style RLE as a dict.".format(type(segm))
)
if "keypoints" in annotation:
keypoints = transform_keypoint_annotations(
annotation["keypoints"], transforms, image_size, keypoint_hflip_indices
)
annotation["keypoints"] = keypoints
return annotation
def transform_keypoint_annotations(keypoints, transforms, image_size, keypoint_hflip_indices=None):
"""
Transform keypoint annotations of an image.
If a keypoint is transformed out of image boundary, it will be marked "unlabeled" (visibility=0)
Args:
keypoints (list[float]): Nx3 float in Detectron2's Dataset format.
Each point is represented by (x, y, visibility).
transforms (TransformList):
image_size (tuple): the height, width of the transformed image
keypoint_hflip_indices (ndarray[int]): see `create_keypoint_hflip_indices`.
When `transforms` includes horizontal flip, will use the index
mapping to flip keypoints.
"""
# (N*3,) -> (N, 3)
keypoints = np.asarray(keypoints, dtype="float64").reshape(-1, 3)
keypoints_xy = transforms.apply_coords(keypoints[:, :2])
# Set all out-of-boundary points to "unlabeled"
inside = (keypoints_xy >= np.array([0, 0])) & (keypoints_xy <= np.array(image_size[::-1]))
inside = inside.all(axis=1)
keypoints[:, :2] = keypoints_xy
keypoints[:, 2][~inside] = 0
# This assumes that HorizFlipTransform is the only one that does flip
do_hflip = sum(isinstance(t, T.HFlipTransform) for t in transforms.transforms) % 2 == 1
# Alternative way: check if probe points was horizontally flipped.
# probe = np.asarray([[0.0, 0.0], [image_width, 0.0]])
# probe_aug = transforms.apply_coords(probe.copy())
# do_hflip = np.sign(probe[1][0] - probe[0][0]) != np.sign(probe_aug[1][0] - probe_aug[0][0]) # noqa
# If flipped, swap each keypoint with its opposite-handed equivalent
if do_hflip:
if keypoint_hflip_indices is None:
raise ValueError("Cannot flip keypoints without providing flip indices!")
if len(keypoints) != len(keypoint_hflip_indices):
raise ValueError(
"Keypoint data has {} points, but metadata "
"contains {} points!".format(len(keypoints), len(keypoint_hflip_indices))
)
keypoints = keypoints[np.asarray(keypoint_hflip_indices, dtype=np.int32), :]
# Maintain COCO convention that if visibility == 0 (unlabeled), then x, y = 0
keypoints[keypoints[:, 2] == 0] = 0
return keypoints
def annotations_to_instances(annos, image_size, mask_format="polygon"):
"""
Create an :class:`Instances` object used by the models,
from instance annotations in the dataset dict.
Args:
annos (list[dict]): a list of instance annotations in one image, each
element for one instance.
image_size (tuple): height, width
Returns:
Instances:
It will contain fields "gt_boxes", "gt_classes",
"gt_masks", "gt_keypoints", if they can be obtained from `annos`.
This is the format that builtin models expect.
"""
boxes = (
np.stack(
[BoxMode.convert(obj["bbox"], obj["bbox_mode"], BoxMode.XYXY_ABS) for obj in annos]
)
if len(annos)
else np.zeros((0, 4))
)
target = Instances(image_size)
target.gt_boxes = Boxes(boxes)
classes = [int(obj["category_id"]) for obj in annos]
classes = torch.tensor(classes, dtype=torch.int64)
target.gt_classes = classes
if len(annos) and "segmentation" in annos[0]:
segms = [obj["segmentation"] for obj in annos]
if mask_format == "polygon":
try:
masks = PolygonMasks(segms)
except ValueError as e:
raise ValueError(
"Failed to use mask_format=='polygon' from the given annotations!"
) from e
else:
assert mask_format == "bitmask", mask_format
masks = []
for segm in segms:
if isinstance(segm, list):
# polygon
masks.append(polygons_to_bitmask(segm, *image_size))
elif isinstance(segm, dict):
# COCO RLE
masks.append(mask_util.decode(segm))
elif isinstance(segm, np.ndarray):
assert segm.ndim == 2, "Expect segmentation of 2 dimensions, got {}.".format(
segm.ndim
)
# mask array
masks.append(segm)
else:
raise ValueError(
"Cannot convert segmentation of type '{}' to BitMasks!"
"Supported types are: polygons as list[list[float] or ndarray],"
" COCO-style RLE as a dict, or a binary segmentation mask "
" in a 2D numpy array of shape HxW.".format(type(segm))
)
# torch.from_numpy does not support array with negative stride.
masks = BitMasks(
torch.stack([torch.from_numpy(np.ascontiguousarray(x)) for x in masks])
)
target.gt_masks = masks
if len(annos) and "keypoints" in annos[0]:
kpts = [obj.get("keypoints", []) for obj in annos]
target.gt_keypoints = Keypoints(kpts)
return target
def annotations_to_instances_rotated(annos, image_size):
"""
Create an :class:`Instances` object used by the models,
from instance annotations in the dataset dict.
Compared to `annotations_to_instances`, this function is for rotated boxes only
Args:
annos (list[dict]): a list of instance annotations in one image, each
element for one instance.
image_size (tuple): height, width
Returns:
Instances:
Containing fields "gt_boxes", "gt_classes",
if they can be obtained from `annos`.
This is the format that builtin models expect.
"""
boxes = [obj["bbox"] for obj in annos]
target = Instances(image_size)
boxes = target.gt_boxes = RotatedBoxes(boxes)
boxes.clip(image_size)
classes = [obj["category_id"] for obj in annos]
classes = torch.tensor(classes, dtype=torch.int64)
target.gt_classes = classes
return target
def filter_empty_instances(
instances, by_box=True, by_mask=True, box_threshold=1e-5, return_mask=False
):
"""
Filter out empty instances in an `Instances` object.
Args:
instances (Instances):
by_box (bool): whether to filter out instances with empty boxes
by_mask (bool): whether to filter out instances with empty masks
box_threshold (float): minimum width and height to be considered non-empty
return_mask (bool): whether to return boolean mask of filtered instances
Returns:
Instances: the filtered instances.
tensor[bool], optional: boolean mask of filtered instances
"""
assert by_box or by_mask
r = []
if by_box:
r.append(instances.gt_boxes.nonempty(threshold=box_threshold))
if instances.has("gt_masks") and by_mask:
r.append(instances.gt_masks.nonempty())
# TODO: can also filter visible keypoints
if not r:
return instances
m = r[0]
for x in r[1:]:
m = m & x
if return_mask:
return instances[m], m
return instances[m]
def create_keypoint_hflip_indices(dataset_names: Union[str, List[str]]) -> List[int]:
"""
Args:
dataset_names: list of dataset names
Returns:
list[int]: a list of size=#keypoints, storing the
horizontally-flipped keypoint indices.
"""
if isinstance(dataset_names, str):
dataset_names = [dataset_names]
check_metadata_consistency("keypoint_names", dataset_names)
check_metadata_consistency("keypoint_flip_map", dataset_names)
meta = MetadataCatalog.get(dataset_names[0])
names = meta.keypoint_names
# TODO flip -> hflip
flip_map = dict(meta.keypoint_flip_map)
flip_map.update({v: k for k, v in flip_map.items()})
flipped_names = [i if i not in flip_map else flip_map[i] for i in names]
flip_indices = [names.index(i) for i in flipped_names]
return flip_indices
def get_fed_loss_cls_weights(dataset_names: Union[str, List[str]], freq_weight_power=1.0):
"""
Get frequency weight for each class sorted by class id.
We now calcualte freqency weight using image_count to the power freq_weight_power.
Args:
dataset_names: list of dataset names
freq_weight_power: power value
"""
if isinstance(dataset_names, str):
dataset_names = [dataset_names]
check_metadata_consistency("class_image_count", dataset_names)
meta = MetadataCatalog.get(dataset_names[0])
class_freq_meta = meta.class_image_count
class_freq = torch.tensor(
[c["image_count"] for c in sorted(class_freq_meta, key=lambda x: x["id"])]
)
class_freq_weight = class_freq.float() ** freq_weight_power
return class_freq_weight
def gen_crop_transform_with_instance(crop_size, image_size, instance):
"""
Generate a CropTransform so that the cropping region contains
the center of the given instance.
Args:
crop_size (tuple): h, w in pixels
image_size (tuple): h, w
instance (dict): an annotation dict of one instance, in Detectron2's
dataset format.
"""
crop_size = np.asarray(crop_size, dtype=np.int32)
bbox = BoxMode.convert(instance["bbox"], instance["bbox_mode"], BoxMode.XYXY_ABS)
center_yx = (bbox[1] + bbox[3]) * 0.5, (bbox[0] + bbox[2]) * 0.5
assert (
image_size[0] >= center_yx[0] and image_size[1] >= center_yx[1]
), "The annotation bounding box is outside of the image!"
assert (
image_size[0] >= crop_size[0] and image_size[1] >= crop_size[1]
), "Crop size is larger than image size!"
min_yx = np.maximum(np.floor(center_yx).astype(np.int32) - crop_size, 0)
max_yx = np.maximum(np.asarray(image_size, dtype=np.int32) - crop_size, 0)
max_yx = np.minimum(max_yx, np.ceil(center_yx).astype(np.int32))
y0 = np.random.randint(min_yx[0], max_yx[0] + 1)
x0 = np.random.randint(min_yx[1], max_yx[1] + 1)
return T.CropTransform(x0, y0, crop_size[1], crop_size[0])
def check_metadata_consistency(key, dataset_names):
"""
Check that the datasets have consistent metadata.
Args:
key (str): a metadata key
dataset_names (list[str]): a list of dataset names
Raises:
AttributeError: if the key does not exist in the metadata
ValueError: if the given datasets do not have the same metadata values defined by key
"""
if len(dataset_names) == 0:
return
logger = logging.getLogger(__name__)
entries_per_dataset = [getattr(MetadataCatalog.get(d), key) for d in dataset_names]
for idx, entry in enumerate(entries_per_dataset):
if entry != entries_per_dataset[0]:
logger.error(
"Metadata '{}' for dataset '{}' is '{}'".format(key, dataset_names[idx], str(entry))
)
logger.error(
"Metadata '{}' for dataset '{}' is '{}'".format(
key, dataset_names[0], str(entries_per_dataset[0])
)
)
raise ValueError("Datasets have different metadata '{}'!".format(key))
def build_augmentation(cfg, is_train):
"""
Create a list of default :class:`Augmentation` from config.
Now it includes resizing and flipping.
Returns:
list[Augmentation]
"""
if is_train:
min_size = cfg.INPUT.MIN_SIZE_TRAIN
max_size = cfg.INPUT.MAX_SIZE_TRAIN
sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
else:
min_size = cfg.INPUT.MIN_SIZE_TEST
max_size = cfg.INPUT.MAX_SIZE_TEST
sample_style = "choice"
augmentation = [T.ResizeShortestEdge(min_size, max_size, sample_style)]
if is_train and cfg.INPUT.RANDOM_FLIP != "none":
augmentation.append(
T.RandomFlip(
horizontal=cfg.INPUT.RANDOM_FLIP == "horizontal",
vertical=cfg.INPUT.RANDOM_FLIP == "vertical",
)
)
return augmentation
build_transform_gen = build_augmentation
"""
Alias for backward-compatibility.
"""
| CutLER-main | cutler/data/detection_utils.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Modified by XuDong Wang from https://github.com/facebookresearch/detectron2/blob/main/detectron2/data/dataset_mapper.py
import copy
import logging
import numpy as np
from typing import List, Optional, Union
import torch
from detectron2.config import configurable
import data.detection_utils as utils
import data.transforms as T
"""
This file contains the default mapping that's applied to "dataset dicts".
"""
__all__ = ["DatasetMapper"]
class DatasetMapper:
"""
A callable which takes a dataset dict in Detectron2 Dataset format,
and map it into a format used by the model.
This is the default callable to be used to map your dataset dict into training data.
You may need to follow it to implement your own one for customized logic,
such as a different way to read or transform images.
See :doc:`/tutorials/data_loading` for details.
The callable currently does the following:
1. Read the image from "file_name"
2. Applies cropping/geometric transforms to the image and annotations
3. Prepare data and annotations to Tensor and :class:`Instances`
"""
@configurable
def __init__(
self,
is_train: bool,
*,
augmentations: List[Union[T.Augmentation, T.Transform]],
image_format: str,
use_instance_mask: bool = False,
use_keypoint: bool = False,
instance_mask_format: str = "polygon",
keypoint_hflip_indices: Optional[np.ndarray] = None,
precomputed_proposal_topk: Optional[int] = None,
recompute_boxes: bool = False,
):
"""
NOTE: this interface is experimental.
Args:
is_train: whether it's used in training or inference
augmentations: a list of augmentations or deterministic transforms to apply
image_format: an image format supported by :func:`detection_utils.read_image`.
use_instance_mask: whether to process instance segmentation annotations, if available
use_keypoint: whether to process keypoint annotations if available
instance_mask_format: one of "polygon" or "bitmask". Process instance segmentation
masks into this format.
keypoint_hflip_indices: see :func:`detection_utils.create_keypoint_hflip_indices`
precomputed_proposal_topk: if given, will load pre-computed
proposals from dataset_dict and keep the top k proposals for each image.
recompute_boxes: whether to overwrite bounding box annotations
by computing tight bounding boxes from instance mask annotations.
"""
if recompute_boxes:
assert use_instance_mask, "recompute_boxes requires instance masks"
# fmt: off
self.is_train = is_train
self.augmentations = T.AugmentationList(augmentations)
self.image_format = image_format
self.use_instance_mask = use_instance_mask
self.instance_mask_format = instance_mask_format
self.use_keypoint = use_keypoint
self.keypoint_hflip_indices = keypoint_hflip_indices
self.proposal_topk = precomputed_proposal_topk
self.recompute_boxes = recompute_boxes
# fmt: on
logger = logging.getLogger(__name__)
mode = "training" if is_train else "inference"
logger.info(f"[DatasetMapper] Augmentations used in {mode}: {augmentations}")
@classmethod
def from_config(cls, cfg, is_train: bool = True):
augs = utils.build_augmentation(cfg, is_train)
if cfg.INPUT.CROP.ENABLED and is_train:
augs.insert(0, T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE))
recompute_boxes = cfg.MODEL.MASK_ON
else:
recompute_boxes = False
ret = {
"is_train": is_train,
"augmentations": augs,
"image_format": cfg.INPUT.FORMAT,
"use_instance_mask": cfg.MODEL.MASK_ON,
"instance_mask_format": cfg.INPUT.MASK_FORMAT,
"use_keypoint": cfg.MODEL.KEYPOINT_ON,
"recompute_boxes": recompute_boxes,
}
if cfg.MODEL.KEYPOINT_ON:
ret["keypoint_hflip_indices"] = utils.create_keypoint_hflip_indices(cfg.DATASETS.TRAIN)
if cfg.MODEL.LOAD_PROPOSALS:
ret["precomputed_proposal_topk"] = (
cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN
if is_train
else cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST
)
return ret
def _transform_annotations(self, dataset_dict, transforms, image_shape):
# USER: Modify this if you want to keep them for some reason.
for anno in dataset_dict["annotations"]:
if not self.use_instance_mask:
anno.pop("segmentation", None)
if not self.use_keypoint:
anno.pop("keypoints", None)
# USER: Implement additional transformations if you have other types of data
annos = [
utils.transform_instance_annotations(
obj, transforms, image_shape, keypoint_hflip_indices=self.keypoint_hflip_indices
)
for obj in dataset_dict.pop("annotations")
if obj.get("iscrowd", 0) == 0
]
instances = utils.annotations_to_instances(
annos, image_shape, mask_format=self.instance_mask_format
)
# After transforms such as cropping are applied, the bounding box may no longer
# tightly bound the object. As an example, imagine a triangle object
# [(0,0), (2,0), (0,2)] cropped by a box [(1,0),(2,2)] (XYXY format). The tight
# bounding box of the cropped triangle should be [(1,0),(2,1)], which is not equal to
# the intersection of original bounding box and the cropping box.
if self.recompute_boxes:
instances.gt_boxes = instances.gt_masks.get_bounding_boxes()
dataset_dict["instances"] = utils.filter_empty_instances(instances)
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
# USER: Write your own image loading if it's not from a file
image = utils.read_image(dataset_dict["file_name"], format=self.image_format)
utils.check_image_size(dataset_dict, image)
# USER: Remove if you don't do semantic/panoptic segmentation.
if "sem_seg_file_name" in dataset_dict:
sem_seg_gt = utils.read_image(dataset_dict.pop("sem_seg_file_name"), "L").squeeze(2)
else:
sem_seg_gt = None
aug_input = T.AugInput(image, sem_seg=sem_seg_gt)
transforms = self.augmentations(aug_input)
image, sem_seg_gt = aug_input.image, aug_input.sem_seg
image_shape = image.shape[:2] # h, w
# Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
# but not efficient on large generic data structures due to the use of pickle & mp.Queue.
# Therefore it's important to use torch.Tensor.
dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
if sem_seg_gt is not None:
dataset_dict["sem_seg"] = torch.as_tensor(sem_seg_gt.astype("long"))
# USER: Remove if you don't use pre-computed proposals.
# Most users would not need this feature.
if self.proposal_topk is not None:
utils.transform_proposals(
dataset_dict, image_shape, transforms, proposal_topk=self.proposal_topk
)
if not self.is_train:
# USER: Modify this if you want to keep them for some reason.
dataset_dict.pop("annotations", None)
dataset_dict.pop("sem_seg_file_name", None)
return dataset_dict
if "annotations" in dataset_dict:
self._transform_annotations(dataset_dict, transforms, image_shape)
return dataset_dict
| CutLER-main | cutler/data/dataset_mapper.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Modified by XuDong Wang from https://github.com/facebookresearch/detectron2/blob/main/detectron2/data/datasets/coco.py
import contextlib
import datetime
import io
import json
import logging
import numpy as np
import os
import shutil
import pycocotools.mask as mask_util
from fvcore.common.timer import Timer
from iopath.common.file_io import file_lock
from PIL import Image
from detectron2.structures import Boxes, BoxMode, PolygonMasks, RotatedBoxes
from detectron2.utils.file_io import PathManager
from detectron2.data import DatasetCatalog, MetadataCatalog
"""
This file contains functions to parse COCO-format annotations into dicts in "Detectron2 format".
"""
logger = logging.getLogger(__name__)
__all__ = ["load_coco_json", "load_sem_seg", "convert_to_coco_json", "register_coco_instances"]
def load_coco_json(json_file, image_root, dataset_name=None, extra_annotation_keys=None):
"""
Load a json file with COCO's instances annotation format.
Currently supports instance detection, instance segmentation,
and person keypoints annotations.
Args:
json_file (str): full path to the json file in COCO instances annotation format.
image_root (str or path-like): the directory where the images in this json file exists.
dataset_name (str or None): the name of the dataset (e.g., coco_2017_train).
When provided, this function will also do the following:
* Put "thing_classes" into the metadata associated with this dataset.
* Map the category ids into a contiguous range (needed by standard dataset format),
and add "thing_dataset_id_to_contiguous_id" to the metadata associated
with this dataset.
This option should usually be provided, unless users need to load
the original json content and apply more processing manually.
extra_annotation_keys (list[str]): list of per-annotation keys that should also be
loaded into the dataset dict (besides "iscrowd", "bbox", "keypoints",
"category_id", "segmentation"). The values for these keys will be returned as-is.
For example, the densepose annotations are loaded in this way.
Returns:
list[dict]: a list of dicts in Detectron2 standard dataset dicts format (See
`Using Custom Datasets </tutorials/datasets.html>`_ ) when `dataset_name` is not None.
If `dataset_name` is None, the returned `category_ids` may be
incontiguous and may not conform to the Detectron2 standard format.
Notes:
1. This function does not read the image files.
The results do not have the "image" field.
"""
from pycocotools.coco import COCO
timer = Timer()
json_file = PathManager.get_local_path(json_file)
with contextlib.redirect_stdout(io.StringIO()):
coco_api = COCO(json_file)
if timer.seconds() > 1:
logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds()))
id_map = None
if dataset_name is not None:
meta = MetadataCatalog.get(dataset_name)
cat_ids = sorted(coco_api.getCatIds())
cats = coco_api.loadCats(cat_ids)
# The categories in a custom json file may not be sorted.
thing_classes = [c["name"] for c in sorted(cats, key=lambda x: x["id"])]
if "imagenet" not in dataset_name and "cls_agnostic" not in dataset_name:
meta.thing_classes = thing_classes
# In COCO, certain category ids are artificially removed,
# and by convention they are always ignored.
# We deal with COCO's id issue and translate
# the category ids to contiguous ids in [0, 80).
# It works by looking at the "categories" field in the json, therefore
# if users' own json also have incontiguous ids, we'll
# apply this mapping as well but print a warning.
if not (min(cat_ids) == 1 and max(cat_ids) == len(cat_ids)):
if "coco" not in dataset_name:
logger.warning(
"""
Category ids in annotations are not in [1, #categories]! We'll apply a mapping for you.
"""
)
id_map = {v: i for i, v in enumerate(cat_ids)}
meta.thing_dataset_id_to_contiguous_id = id_map
else:
id_map = meta.thing_dataset_id_to_contiguous_id
# sort indices for reproducible results
img_ids = sorted(coco_api.imgs.keys())
# imgs is a list of dicts, each looks something like:
# {'license': 4,
# 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg',
# 'file_name': 'COCO_val2014_000000001268.jpg',
# 'height': 427,
# 'width': 640,
# 'date_captured': '2013-11-17 05:57:24',
# 'id': 1268}
imgs = coco_api.loadImgs(img_ids)
# anns is a list[list[dict]], where each dict is an annotation
# record for an object. The inner list enumerates the objects in an image
# and the outer list enumerates over images. Example of anns[0]:
# [{'segmentation': [[192.81,
# 247.09,
# ...
# 219.03,
# 249.06]],
# 'area': 1035.749,
# 'iscrowd': 0,
# 'image_id': 1268,
# 'bbox': [192.81, 224.8, 74.73, 33.43],
# 'category_id': 16,
# 'id': 42986},
# ...]
anns = [coco_api.imgToAnns[img_id] for img_id in img_ids]
total_num_valid_anns = sum([len(x) for x in anns])
total_num_anns = len(coco_api.anns)
if total_num_valid_anns < total_num_anns:
logger.warning(
f"{json_file} contains {total_num_anns} annotations, but only "
f"{total_num_valid_anns} of them match to images in the file."
)
if "minival" not in json_file:
# The popular valminusminival & minival annotations for COCO2014 contain this bug.
# However the ratio of buggy annotations there is tiny and does not affect accuracy.
# Therefore we explicitly white-list them.
ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image]
assert len(set(ann_ids)) == len(ann_ids), "Annotation ids in '{}' are not unique!".format(
json_file
)
imgs_anns = list(zip(imgs, anns))
logger.info("Loaded {} images in COCO format from {}".format(len(imgs_anns), json_file))
dataset_dicts = []
ann_keys = ["iscrowd", "bbox", "keypoints", "category_id"] + (extra_annotation_keys or [])
num_instances_without_valid_segmentation = 0
for (img_dict, anno_dict_list) in imgs_anns:
record = {}
record["file_name"] = os.path.join(image_root, img_dict["file_name"])
record["height"] = img_dict["height"]
record["width"] = img_dict["width"]
image_id = record["image_id"] = img_dict["id"]
objs = []
for anno in anno_dict_list:
# Check that the image_id in this annotation is the same as
# the image_id we're looking at.
# This fails only when the data parsing logic or the annotation file is buggy.
# The original COCO valminusminival2014 & minival2014 annotation files
# actually contains bugs that, together with certain ways of using COCO API,
# can trigger this assertion.
assert anno["image_id"] == image_id
assert anno.get("ignore", 0) == 0, '"ignore" in COCO json file is not supported.'
obj = {key: anno[key] for key in ann_keys if key in anno}
if "bbox" in obj and len(obj["bbox"]) == 0:
raise ValueError(
f"One annotation of image {image_id} contains empty 'bbox' value! "
"This json does not have valid COCO format."
)
segm = anno.get("segmentation", None)
if segm: # either list[list[float]] or dict(RLE)
if isinstance(segm, dict):
if isinstance(segm["counts"], list):
# convert to compressed RLE
segm = mask_util.frPyObjects(segm, *segm["size"])
else:
# filter out invalid polygons (< 3 points)
segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6]
if len(segm) == 0:
num_instances_without_valid_segmentation += 1
continue # ignore this instance
obj["segmentation"] = segm
keypts = anno.get("keypoints", None)
if keypts: # list[int]
for idx, v in enumerate(keypts):
if idx % 3 != 2:
# COCO's segmentation coordinates are floating points in [0, H or W],
# but keypoint coordinates are integers in [0, H-1 or W-1]
# Therefore we assume the coordinates are "pixel indices" and
# add 0.5 to convert to floating point coordinates.
keypts[idx] = v + 0.5
obj["keypoints"] = keypts
obj["bbox_mode"] = BoxMode.XYWH_ABS
if id_map:
annotation_category_id = obj["category_id"]
try:
obj["category_id"] = id_map[annotation_category_id]
except KeyError as e:
raise KeyError(
f"Encountered category_id={annotation_category_id} "
"but this id does not exist in 'categories' of the json file."
) from e
objs.append(obj)
record["annotations"] = objs
dataset_dicts.append(record)
if num_instances_without_valid_segmentation > 0:
logger.warning(
"Filtered out {} instances without valid segmentation. ".format(
num_instances_without_valid_segmentation
)
+ "There might be issues in your dataset generation process. Please "
"check https://detectron2.readthedocs.io/en/latest/tutorials/datasets.html carefully"
)
return dataset_dicts
def load_sem_seg(gt_root, image_root, gt_ext="png", image_ext="jpg"):
"""
Load semantic segmentation datasets. All files under "gt_root" with "gt_ext" extension are
treated as ground truth annotations and all files under "image_root" with "image_ext" extension
as input images. Ground truth and input images are matched using file paths relative to
"gt_root" and "image_root" respectively without taking into account file extensions.
This works for COCO as well as some other datasets.
Args:
gt_root (str): full path to ground truth semantic segmentation files. Semantic segmentation
annotations are stored as images with integer values in pixels that represent
corresponding semantic labels.
image_root (str): the directory where the input images are.
gt_ext (str): file extension for ground truth annotations.
image_ext (str): file extension for input images.
Returns:
list[dict]:
a list of dicts in detectron2 standard format without instance-level
annotation.
Notes:
1. This function does not read the image and ground truth files.
The results do not have the "image" and "sem_seg" fields.
"""
# We match input images with ground truth based on their relative filepaths (without file
# extensions) starting from 'image_root' and 'gt_root' respectively.
def file2id(folder_path, file_path):
# extract relative path starting from `folder_path`
image_id = os.path.normpath(os.path.relpath(file_path, start=folder_path))
# remove file extension
image_id = os.path.splitext(image_id)[0]
return image_id
input_files = sorted(
(os.path.join(image_root, f) for f in PathManager.ls(image_root) if f.endswith(image_ext)),
key=lambda file_path: file2id(image_root, file_path),
)
gt_files = sorted(
(os.path.join(gt_root, f) for f in PathManager.ls(gt_root) if f.endswith(gt_ext)),
key=lambda file_path: file2id(gt_root, file_path),
)
assert len(gt_files) > 0, "No annotations found in {}.".format(gt_root)
# Use the intersection, so that val2017_100 annotations can run smoothly with val2017 images
if len(input_files) != len(gt_files):
logger.warn(
"Directory {} and {} has {} and {} files, respectively.".format(
image_root, gt_root, len(input_files), len(gt_files)
)
)
input_basenames = [os.path.basename(f)[: -len(image_ext)] for f in input_files]
gt_basenames = [os.path.basename(f)[: -len(gt_ext)] for f in gt_files]
intersect = list(set(input_basenames) & set(gt_basenames))
# sort, otherwise each worker may obtain a list[dict] in different order
intersect = sorted(intersect)
logger.warn("Will use their intersection of {} files.".format(len(intersect)))
input_files = [os.path.join(image_root, f + image_ext) for f in intersect]
gt_files = [os.path.join(gt_root, f + gt_ext) for f in intersect]
logger.info(
"Loaded {} images with semantic segmentation from {}".format(len(input_files), image_root)
)
dataset_dicts = []
for (img_path, gt_path) in zip(input_files, gt_files):
record = {}
record["file_name"] = img_path
record["sem_seg_file_name"] = gt_path
dataset_dicts.append(record)
return dataset_dicts
def convert_to_coco_dict(dataset_name):
"""
Convert an instance detection/segmentation or keypoint detection dataset
in detectron2's standard format into COCO json format.
Generic dataset description can be found here:
https://detectron2.readthedocs.io/tutorials/datasets.html#register-a-dataset
COCO data format description can be found here:
http://cocodataset.org/#format-data
Args:
dataset_name (str):
name of the source dataset
Must be registered in DatastCatalog and in detectron2's standard format.
Must have corresponding metadata "thing_classes"
Returns:
coco_dict: serializable dict in COCO json format
"""
dataset_dicts = DatasetCatalog.get(dataset_name)
metadata = MetadataCatalog.get(dataset_name)
# unmap the category mapping ids for COCO
if hasattr(metadata, "thing_dataset_id_to_contiguous_id"):
reverse_id_mapping = {v: k for k, v in metadata.thing_dataset_id_to_contiguous_id.items()}
reverse_id_mapper = lambda contiguous_id: reverse_id_mapping[contiguous_id] # noqa
else:
reverse_id_mapper = lambda contiguous_id: contiguous_id # noqa
categories = [
{"id": reverse_id_mapper(id), "name": name}
for id, name in enumerate(metadata.thing_classes)
]
logger.info("Converting dataset dicts into COCO format")
coco_images = []
coco_annotations = []
for image_id, image_dict in enumerate(dataset_dicts):
coco_image = {
"id": image_dict.get("image_id", image_id),
"width": int(image_dict["width"]),
"height": int(image_dict["height"]),
"file_name": str(image_dict["file_name"]),
}
coco_images.append(coco_image)
anns_per_image = image_dict.get("annotations", [])
for annotation in anns_per_image:
# create a new dict with only COCO fields
coco_annotation = {}
# COCO requirement: XYWH box format for axis-align and XYWHA for rotated
bbox = annotation["bbox"]
if isinstance(bbox, np.ndarray):
if bbox.ndim != 1:
raise ValueError(f"bbox has to be 1-dimensional. Got shape={bbox.shape}.")
bbox = bbox.tolist()
if len(bbox) not in [4, 5]:
raise ValueError(f"bbox has to has length 4 or 5. Got {bbox}.")
from_bbox_mode = annotation["bbox_mode"]
to_bbox_mode = BoxMode.XYWH_ABS if len(bbox) == 4 else BoxMode.XYWHA_ABS
bbox = BoxMode.convert(bbox, from_bbox_mode, to_bbox_mode)
# COCO requirement: instance area
if "segmentation" in annotation:
# Computing areas for instances by counting the pixels
segmentation = annotation["segmentation"]
# TODO: check segmentation type: RLE, BinaryMask or Polygon
if isinstance(segmentation, list):
polygons = PolygonMasks([segmentation])
area = polygons.area()[0].item()
elif isinstance(segmentation, dict): # RLE
area = mask_util.area(segmentation).item()
else:
raise TypeError(f"Unknown segmentation type {type(segmentation)}!")
else:
# Computing areas using bounding boxes
if to_bbox_mode == BoxMode.XYWH_ABS:
bbox_xy = BoxMode.convert(bbox, to_bbox_mode, BoxMode.XYXY_ABS)
area = Boxes([bbox_xy]).area()[0].item()
else:
area = RotatedBoxes([bbox]).area()[0].item()
if "keypoints" in annotation:
keypoints = annotation["keypoints"] # list[int]
for idx, v in enumerate(keypoints):
if idx % 3 != 2:
# COCO's segmentation coordinates are floating points in [0, H or W],
# but keypoint coordinates are integers in [0, H-1 or W-1]
# For COCO format consistency we substract 0.5
# https://github.com/facebookresearch/detectron2/pull/175#issuecomment-551202163
keypoints[idx] = v - 0.5
if "num_keypoints" in annotation:
num_keypoints = annotation["num_keypoints"]
else:
num_keypoints = sum(kp > 0 for kp in keypoints[2::3])
# COCO requirement:
# linking annotations to images
# "id" field must start with 1
coco_annotation["id"] = len(coco_annotations) + 1
coco_annotation["image_id"] = coco_image["id"]
coco_annotation["bbox"] = [round(float(x), 3) for x in bbox]
coco_annotation["area"] = float(area)
coco_annotation["iscrowd"] = int(annotation.get("iscrowd", 0))
coco_annotation["category_id"] = int(reverse_id_mapper(annotation["category_id"]))
# Add optional fields
if "keypoints" in annotation:
coco_annotation["keypoints"] = keypoints
coco_annotation["num_keypoints"] = num_keypoints
if "segmentation" in annotation:
seg = coco_annotation["segmentation"] = annotation["segmentation"]
if isinstance(seg, dict): # RLE
counts = seg["counts"]
if not isinstance(counts, str):
# make it json-serializable
seg["counts"] = counts.decode("ascii")
coco_annotations.append(coco_annotation)
logger.info(
"Conversion finished, "
f"#images: {len(coco_images)}, #annotations: {len(coco_annotations)}"
)
info = {
"date_created": str(datetime.datetime.now()),
"description": "Automatically generated COCO json file for Detectron2.",
}
coco_dict = {"info": info, "images": coco_images, "categories": categories, "licenses": None}
if len(coco_annotations) > 0:
coco_dict["annotations"] = coco_annotations
return coco_dict
def convert_to_coco_json(dataset_name, output_file, allow_cached=True):
"""
Converts dataset into COCO format and saves it to a json file.
dataset_name must be registered in DatasetCatalog and in detectron2's standard format.
Args:
dataset_name:
reference from the config file to the catalogs
must be registered in DatasetCatalog and in detectron2's standard format
output_file: path of json file that will be saved to
allow_cached: if json file is already present then skip conversion
"""
# TODO: The dataset or the conversion script *may* change,
# a checksum would be useful for validating the cached data
PathManager.mkdirs(os.path.dirname(output_file))
with file_lock(output_file):
if PathManager.exists(output_file) and allow_cached:
logger.warning(
f"Using previously cached COCO format annotations at '{output_file}'. "
"You need to clear the cache file if your dataset has been modified."
)
else:
logger.info(f"Converting annotations of dataset '{dataset_name}' to COCO format ...)")
coco_dict = convert_to_coco_dict(dataset_name)
logger.info(f"Caching COCO format annotations at '{output_file}' ...")
tmp_file = output_file + ".tmp"
with PathManager.open(tmp_file, "w") as f:
json.dump(coco_dict, f)
shutil.move(tmp_file, output_file)
def register_coco_instances(name, metadata, json_file, image_root):
"""
Register a dataset in COCO's json annotation format for
instance detection, instance segmentation and keypoint detection.
(i.e., Type 1 and 2 in http://cocodataset.org/#format-data.
`instances*.json` and `person_keypoints*.json` in the dataset).
This is an example of how to register a new dataset.
You can do something similar to this function, to register new datasets.
Args:
name (str): the name that identifies a dataset, e.g. "coco_2014_train".
metadata (dict): extra metadata associated with this dataset. You can
leave it as an empty dict.
json_file (str): path to the json instance annotation file.
image_root (str or path-like): directory which contains all the images.
"""
assert isinstance(name, str), name
assert isinstance(json_file, (str, os.PathLike)), json_file
assert isinstance(image_root, (str, os.PathLike)), image_root
# 1. register a function which returns dicts
DatasetCatalog.register(name, lambda: load_coco_json(json_file, image_root, name))
# 2. Optionally, add metadata about this dataset,
# since they might be useful in evaluation, visualization or logging
MetadataCatalog.get(name).set(
json_file=json_file, image_root=image_root, evaluator_type="coco", **metadata
)
if __name__ == "__main__":
"""
Test the COCO json dataset loader.
Usage:
python -m detectron2.data.datasets.coco \
path/to/json path/to/image_root dataset_name
"dataset_name" can be "coco_2014_minival_100", or other
pre-registered ones
"""
from detectron2.utils.logger import setup_logger
from detectron2.utils.visualizer import Visualizer
import detectron2.data.datasets # noqa # add pre-defined metadata
import sys
logger = setup_logger(name=__name__)
assert sys.argv[3] in DatasetCatalog.list()
meta = MetadataCatalog.get(sys.argv[3])
dicts = load_coco_json(sys.argv[1], sys.argv[2], sys.argv[3])
logger.info("Done loading {} samples.".format(len(dicts)))
dirname = "coco-data-vis"
os.makedirs(dirname, exist_ok=True)
for d in dicts:
img = np.array(Image.open(d["file_name"]))
visualizer = Visualizer(img, metadata=meta)
vis = visualizer.draw_dataset_dict(d)
fpath = os.path.join(dirname, os.path.basename(d["file_name"]))
vis.save(fpath)
| CutLER-main | cutler/data/datasets/coco.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
from .coco import load_coco_json, load_sem_seg, register_coco_instances, convert_to_coco_json
from .builtin import (
register_all_imagenet,
register_all_uvo,
register_all_coco_ca,
register_all_coco_semi,
register_all_lvis,
register_all_voc,
register_all_cross_domain,
register_all_kitti,
register_all_objects365,
register_all_openimages,
)
__all__ = [k for k in globals().keys() if not k.startswith("_")] | CutLER-main | cutler/data/datasets/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Modified by XuDong Wang from https://github.com/facebookresearch/detectron2/blob/main/detectron2/data/datasets/builtin.py
"""
This file registers pre-defined datasets at hard-coded paths, and their metadata.
We hard-code metadata for common datasets. This will enable:
1. Consistency check when loading the datasets
2. Use models on these standard datasets directly and run demos,
without having to download the dataset annotations
We hard-code some paths to the dataset that's assumed to
exist in "./datasets/".
Users SHOULD NOT use this file to create new dataset / metadata for new dataset.
To add new dataset, refer to the tutorial "docs/DATASETS.md".
"""
import os
from .builtin_meta import _get_builtin_metadata
from .coco import register_coco_instances
# ==== Predefined datasets and splits for COCO ==========
_PREDEFINED_SPLITS_COCO_SEMI = {}
_PREDEFINED_SPLITS_COCO_SEMI["coco_semi"] = {
# we use seed 42 to be consistent with previous works on SSL detection and segmentation
"coco_semi_1perc": ("coco/train2017", "coco/annotations/1perc_instances_train2017.json"),
"coco_semi_2perc": ("coco/train2017", "coco/annotations/2perc_instances_train2017.json"),
"coco_semi_5perc": ("coco/train2017", "coco/annotations/5perc_instances_train2017.json"),
"coco_semi_10perc": ("coco/train2017", "coco/annotations/10perc_instances_train2017.json"),
"coco_semi_20perc": ("coco/train2017", "coco/annotations/20perc_instances_train2017.json"),
"coco_semi_30perc": ("coco/train2017", "coco/annotations/30perc_instances_train2017.json"),
"coco_semi_40perc": ("coco/train2017", "coco/annotations/40perc_instances_train2017.json"),
"coco_semi_50perc": ("coco/train2017", "coco/annotations/50perc_instances_train2017.json"),
"coco_semi_60perc": ("coco/train2017", "coco/annotations/60perc_instances_train2017.json"),
"coco_semi_80perc": ("coco/train2017", "coco/annotations/80perc_instances_train2017.json"),
}
_PREDEFINED_SPLITS_COCO_CA = {}
_PREDEFINED_SPLITS_COCO_CA["coco_cls_agnostic"] = {
"cls_agnostic_coco": ("coco/val2017", "coco/annotations/coco_cls_agnostic_instances_val2017.json"),
"cls_agnostic_coco20k": ("coco/train2014", "coco/annotations/coco20k_trainval_gt.json"),
}
_PREDEFINED_SPLITS_IMAGENET = {}
_PREDEFINED_SPLITS_IMAGENET["imagenet"] = {
# maskcut annotations
"imagenet_train": ("imagenet/train", "imagenet/annotations/imagenet_train_fixsize480_tau0.15_N3.json"),
# self-training round 1
"imagenet_train_r1": ("imagenet/train", "imagenet/annotations/cutler_imagenet1k_train_r1.json"),
# self-training round 2
"imagenet_train_r2": ("imagenet/train", "imagenet/annotations/cutler_imagenet1k_train_r2.json"),
# self-training round 3
"imagenet_train_r3": ("imagenet/train", "imagenet/annotations/cutler_imagenet1k_train_r3.json"),
}
_PREDEFINED_SPLITS_VOC = {}
_PREDEFINED_SPLITS_VOC["voc"] = {
'cls_agnostic_voc': ("voc/", "voc/annotations/trainvaltest_2007_cls_agnostic.json"),
}
_PREDEFINED_SPLITS_CROSSDOMAIN = {}
_PREDEFINED_SPLITS_CROSSDOMAIN["cross_domain"] = {
'cls_agnostic_clipart': ("clipart/", "clipart/annotations/traintest_cls_agnostic.json"),
'cls_agnostic_watercolor': ("watercolor/", "watercolor/annotations/traintest_cls_agnostic.json"),
'cls_agnostic_comic': ("comic/", "comic/annotations/traintest_cls_agnostic.json"),
}
_PREDEFINED_SPLITS_KITTI = {}
_PREDEFINED_SPLITS_KITTI["kitti"] = {
'cls_agnostic_kitti': ("kitti/", "kitti/annotations/trainval_cls_agnostic.json"),
}
_PREDEFINED_SPLITS_LVIS = {}
_PREDEFINED_SPLITS_LVIS["lvis"] = {
"cls_agnostic_lvis": ("coco/", "coco/annotations/lvis1.0_cocofied_val_cls_agnostic.json"),
}
_PREDEFINED_SPLITS_OBJECTS365 = {}
_PREDEFINED_SPLITS_OBJECTS365["objects365"] = {
'cls_agnostic_objects365': ("objects365/val", "objects365/annotations/zhiyuan_objv2_val_cls_agnostic.json"),
}
_PREDEFINED_SPLITS_OpenImages = {}
_PREDEFINED_SPLITS_OpenImages["openimages"] = {
'cls_agnostic_openimages': ("openImages/validation", "openImages/annotations/openimages_val_cls_agnostic.json"),
}
_PREDEFINED_SPLITS_UVO = {}
_PREDEFINED_SPLITS_UVO["uvo"] = {
"cls_agnostic_uvo": ("uvo/all_UVO_frames", "uvo/annotations/val_sparse_cleaned_cls_agnostic.json"),
}
def register_all_imagenet(root):
for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_IMAGENET.items():
for key, (image_root, json_file) in splits_per_dataset.items():
# Assume pre-defined datasets live in `./datasets`.
register_coco_instances(
key,
_get_builtin_metadata(dataset_name),
os.path.join(root, json_file) if "://" not in json_file else json_file,
os.path.join(root, image_root),
)
def register_all_voc(root):
for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_VOC.items():
for key, (image_root, json_file) in splits_per_dataset.items():
# Assume pre-defined datasets live in `./datasets`.
register_coco_instances(
key,
_get_builtin_metadata(dataset_name),
os.path.join(root, json_file) if "://" not in json_file else json_file,
os.path.join(root, image_root),
)
def register_all_cross_domain(root):
for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_CROSSDOMAIN.items():
for key, (image_root, json_file) in splits_per_dataset.items():
# Assume pre-defined datasets live in `./datasets`.
register_coco_instances(
key,
_get_builtin_metadata(dataset_name),
os.path.join(root, json_file) if "://" not in json_file else json_file,
os.path.join(root, image_root),
)
def register_all_kitti(root):
for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_KITTI.items():
for key, (image_root, json_file) in splits_per_dataset.items():
# Assume pre-defined datasets live in `./datasets`.
register_coco_instances(
key,
_get_builtin_metadata(dataset_name),
os.path.join(root, json_file) if "://" not in json_file else json_file,
os.path.join(root, image_root),
)
def register_all_objects365(root):
for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_OBJECTS365.items():
for key, (image_root, json_file) in splits_per_dataset.items():
# Assume pre-defined datasets live in `./datasets`.
register_coco_instances(
key,
_get_builtin_metadata(dataset_name),
os.path.join(root, json_file) if "://" not in json_file else json_file,
os.path.join(root, image_root),
)
def register_all_openimages(root):
for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_OpenImages.items():
for key, (image_root, json_file) in splits_per_dataset.items():
# Assume pre-defined datasets live in `./datasets`.
register_coco_instances(
key,
_get_builtin_metadata(dataset_name),
os.path.join(root, json_file) if "://" not in json_file else json_file,
os.path.join(root, image_root),
)
def register_all_lvis(root):
for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_LVIS.items():
for key, (image_root, json_file) in splits_per_dataset.items():
# Assume pre-defined datasets live in `./datasets`.
register_coco_instances(
key,
_get_builtin_metadata(dataset_name),
os.path.join(root, json_file) if "://" not in json_file else json_file,
os.path.join(root, image_root),
)
def register_all_uvo(root):
for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_UVO.items():
for key, (image_root, json_file) in splits_per_dataset.items():
# Assume pre-defined datasets live in `./datasets`.
register_coco_instances(
key,
_get_builtin_metadata(dataset_name),
os.path.join(root, json_file) if "://" not in json_file else json_file,
os.path.join(root, image_root),
)
def register_all_coco_semi(root):
for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_COCO_SEMI.items():
for key, (image_root, json_file) in splits_per_dataset.items():
# Assume pre-defined datasets live in `./datasets`.
register_coco_instances(
key,
_get_builtin_metadata(dataset_name),
os.path.join(root, json_file) if "://" not in json_file else json_file,
os.path.join(root, image_root),
)
def register_all_coco_ca(root):
for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_COCO_CA.items():
for key, (image_root, json_file) in splits_per_dataset.items():
# Assume pre-defined datasets live in `./datasets`.
register_coco_instances(
key,
_get_builtin_metadata(dataset_name),
os.path.join(root, json_file) if "://" not in json_file else json_file,
os.path.join(root, image_root),
)
_root = os.path.expanduser(os.getenv("DETECTRON2_DATASETS", "datasets"))
register_all_coco_semi(_root)
register_all_coco_ca(_root)
register_all_imagenet(_root)
register_all_uvo(_root)
register_all_voc(_root)
register_all_cross_domain(_root)
register_all_kitti(_root)
register_all_openimages(_root)
register_all_objects365(_root)
register_all_lvis(_root) | CutLER-main | cutler/data/datasets/builtin.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Modified by XuDong Wang from https://github.com/facebookresearch/detectron2/blob/main/detectron2/data/datasets/builtin_meta.py
"""
Note:
For your custom dataset, there is no need to hard-code metadata anywhere in the code.
For example, for COCO-format dataset, metadata will be obtained automatically
when calling `load_coco_json`. For other dataset, metadata may also be obtained in other ways
during loading.
However, we hard-coded metadata for a few common dataset here.
The only goal is to allow users who don't have these dataset to use pre-trained models.
Users don't have to download a COCO json (which contains metadata), in order to visualize a
COCO model (with correct class names and colors).
"""
# All coco categories, together with their nice-looking visualization colors
# It's from https://github.com/cocodataset/panopticapi/blob/master/panoptic_coco_categories.json
COCO_CATEGORIES = [
{"color": [220, 20, 60], "isthing": 1, "id": 1, "name": "person"},
{"color": [119, 11, 32], "isthing": 1, "id": 2, "name": "bicycle"},
{"color": [0, 0, 142], "isthing": 1, "id": 3, "name": "car"},
{"color": [0, 0, 230], "isthing": 1, "id": 4, "name": "motorcycle"},
{"color": [106, 0, 228], "isthing": 1, "id": 5, "name": "airplane"},
{"color": [0, 60, 100], "isthing": 1, "id": 6, "name": "bus"},
{"color": [0, 80, 100], "isthing": 1, "id": 7, "name": "train"},
{"color": [0, 0, 70], "isthing": 1, "id": 8, "name": "truck"},
{"color": [0, 0, 192], "isthing": 1, "id": 9, "name": "boat"},
{"color": [250, 170, 30], "isthing": 1, "id": 10, "name": "traffic light"},
{"color": [100, 170, 30], "isthing": 1, "id": 11, "name": "fire hydrant"},
{"color": [220, 220, 0], "isthing": 1, "id": 13, "name": "stop sign"},
{"color": [175, 116, 175], "isthing": 1, "id": 14, "name": "parking meter"},
{"color": [250, 0, 30], "isthing": 1, "id": 15, "name": "bench"},
{"color": [165, 42, 42], "isthing": 1, "id": 16, "name": "bird"},
{"color": [255, 77, 255], "isthing": 1, "id": 17, "name": "cat"},
{"color": [0, 226, 252], "isthing": 1, "id": 18, "name": "dog"},
{"color": [182, 182, 255], "isthing": 1, "id": 19, "name": "horse"},
{"color": [0, 82, 0], "isthing": 1, "id": 20, "name": "sheep"},
{"color": [120, 166, 157], "isthing": 1, "id": 21, "name": "cow"},
{"color": [110, 76, 0], "isthing": 1, "id": 22, "name": "elephant"},
{"color": [174, 57, 255], "isthing": 1, "id": 23, "name": "bear"},
{"color": [199, 100, 0], "isthing": 1, "id": 24, "name": "zebra"},
{"color": [72, 0, 118], "isthing": 1, "id": 25, "name": "giraffe"},
{"color": [255, 179, 240], "isthing": 1, "id": 27, "name": "backpack"},
{"color": [0, 125, 92], "isthing": 1, "id": 28, "name": "umbrella"},
{"color": [209, 0, 151], "isthing": 1, "id": 31, "name": "handbag"},
{"color": [188, 208, 182], "isthing": 1, "id": 32, "name": "tie"},
{"color": [0, 220, 176], "isthing": 1, "id": 33, "name": "suitcase"},
{"color": [255, 99, 164], "isthing": 1, "id": 34, "name": "frisbee"},
{"color": [92, 0, 73], "isthing": 1, "id": 35, "name": "skis"},
{"color": [133, 129, 255], "isthing": 1, "id": 36, "name": "snowboard"},
{"color": [78, 180, 255], "isthing": 1, "id": 37, "name": "sports ball"},
{"color": [0, 228, 0], "isthing": 1, "id": 38, "name": "kite"},
{"color": [174, 255, 243], "isthing": 1, "id": 39, "name": "baseball bat"},
{"color": [45, 89, 255], "isthing": 1, "id": 40, "name": "baseball glove"},
{"color": [134, 134, 103], "isthing": 1, "id": 41, "name": "skateboard"},
{"color": [145, 148, 174], "isthing": 1, "id": 42, "name": "surfboard"},
{"color": [255, 208, 186], "isthing": 1, "id": 43, "name": "tennis racket"},
{"color": [197, 226, 255], "isthing": 1, "id": 44, "name": "bottle"},
{"color": [171, 134, 1], "isthing": 1, "id": 46, "name": "wine glass"},
{"color": [109, 63, 54], "isthing": 1, "id": 47, "name": "cup"},
{"color": [207, 138, 255], "isthing": 1, "id": 48, "name": "fork"},
{"color": [151, 0, 95], "isthing": 1, "id": 49, "name": "knife"},
{"color": [9, 80, 61], "isthing": 1, "id": 50, "name": "spoon"},
{"color": [84, 105, 51], "isthing": 1, "id": 51, "name": "bowl"},
{"color": [74, 65, 105], "isthing": 1, "id": 52, "name": "banana"},
{"color": [166, 196, 102], "isthing": 1, "id": 53, "name": "apple"},
{"color": [208, 195, 210], "isthing": 1, "id": 54, "name": "sandwich"},
{"color": [255, 109, 65], "isthing": 1, "id": 55, "name": "orange"},
{"color": [0, 143, 149], "isthing": 1, "id": 56, "name": "broccoli"},
{"color": [179, 0, 194], "isthing": 1, "id": 57, "name": "carrot"},
{"color": [209, 99, 106], "isthing": 1, "id": 58, "name": "hot dog"},
{"color": [5, 121, 0], "isthing": 1, "id": 59, "name": "pizza"},
{"color": [227, 255, 205], "isthing": 1, "id": 60, "name": "donut"},
{"color": [147, 186, 208], "isthing": 1, "id": 61, "name": "cake"},
{"color": [153, 69, 1], "isthing": 1, "id": 62, "name": "chair"},
{"color": [3, 95, 161], "isthing": 1, "id": 63, "name": "couch"},
{"color": [163, 255, 0], "isthing": 1, "id": 64, "name": "potted plant"},
{"color": [119, 0, 170], "isthing": 1, "id": 65, "name": "bed"},
{"color": [0, 182, 199], "isthing": 1, "id": 67, "name": "dining table"},
{"color": [0, 165, 120], "isthing": 1, "id": 70, "name": "toilet"},
{"color": [183, 130, 88], "isthing": 1, "id": 72, "name": "tv"},
{"color": [95, 32, 0], "isthing": 1, "id": 73, "name": "laptop"},
{"color": [130, 114, 135], "isthing": 1, "id": 74, "name": "mouse"},
{"color": [110, 129, 133], "isthing": 1, "id": 75, "name": "remote"},
{"color": [166, 74, 118], "isthing": 1, "id": 76, "name": "keyboard"},
{"color": [219, 142, 185], "isthing": 1, "id": 77, "name": "cell phone"},
{"color": [79, 210, 114], "isthing": 1, "id": 78, "name": "microwave"},
{"color": [178, 90, 62], "isthing": 1, "id": 79, "name": "oven"},
{"color": [65, 70, 15], "isthing": 1, "id": 80, "name": "toaster"},
{"color": [127, 167, 115], "isthing": 1, "id": 81, "name": "sink"},
{"color": [59, 105, 106], "isthing": 1, "id": 82, "name": "refrigerator"},
{"color": [142, 108, 45], "isthing": 1, "id": 84, "name": "book"},
{"color": [196, 172, 0], "isthing": 1, "id": 85, "name": "clock"},
{"color": [95, 54, 80], "isthing": 1, "id": 86, "name": "vase"},
{"color": [128, 76, 255], "isthing": 1, "id": 87, "name": "scissors"},
{"color": [201, 57, 1], "isthing": 1, "id": 88, "name": "teddy bear"},
{"color": [246, 0, 122], "isthing": 1, "id": 89, "name": "hair drier"},
{"color": [191, 162, 208], "isthing": 1, "id": 90, "name": "toothbrush"},
{"color": [255, 255, 128], "isthing": 0, "id": 92, "name": "banner"},
{"color": [147, 211, 203], "isthing": 0, "id": 93, "name": "blanket"},
{"color": [150, 100, 100], "isthing": 0, "id": 95, "name": "bridge"},
{"color": [168, 171, 172], "isthing": 0, "id": 100, "name": "cardboard"},
{"color": [146, 112, 198], "isthing": 0, "id": 107, "name": "counter"},
{"color": [210, 170, 100], "isthing": 0, "id": 109, "name": "curtain"},
{"color": [92, 136, 89], "isthing": 0, "id": 112, "name": "door-stuff"},
{"color": [218, 88, 184], "isthing": 0, "id": 118, "name": "floor-wood"},
{"color": [241, 129, 0], "isthing": 0, "id": 119, "name": "flower"},
{"color": [217, 17, 255], "isthing": 0, "id": 122, "name": "fruit"},
{"color": [124, 74, 181], "isthing": 0, "id": 125, "name": "gravel"},
{"color": [70, 70, 70], "isthing": 0, "id": 128, "name": "house"},
{"color": [255, 228, 255], "isthing": 0, "id": 130, "name": "light"},
{"color": [154, 208, 0], "isthing": 0, "id": 133, "name": "mirror-stuff"},
{"color": [193, 0, 92], "isthing": 0, "id": 138, "name": "net"},
{"color": [76, 91, 113], "isthing": 0, "id": 141, "name": "pillow"},
{"color": [255, 180, 195], "isthing": 0, "id": 144, "name": "platform"},
{"color": [106, 154, 176], "isthing": 0, "id": 145, "name": "playingfield"},
{"color": [230, 150, 140], "isthing": 0, "id": 147, "name": "railroad"},
{"color": [60, 143, 255], "isthing": 0, "id": 148, "name": "river"},
{"color": [128, 64, 128], "isthing": 0, "id": 149, "name": "road"},
{"color": [92, 82, 55], "isthing": 0, "id": 151, "name": "roof"},
{"color": [254, 212, 124], "isthing": 0, "id": 154, "name": "sand"},
{"color": [73, 77, 174], "isthing": 0, "id": 155, "name": "sea"},
{"color": [255, 160, 98], "isthing": 0, "id": 156, "name": "shelf"},
{"color": [255, 255, 255], "isthing": 0, "id": 159, "name": "snow"},
{"color": [104, 84, 109], "isthing": 0, "id": 161, "name": "stairs"},
{"color": [169, 164, 131], "isthing": 0, "id": 166, "name": "tent"},
{"color": [225, 199, 255], "isthing": 0, "id": 168, "name": "towel"},
{"color": [137, 54, 74], "isthing": 0, "id": 171, "name": "wall-brick"},
{"color": [135, 158, 223], "isthing": 0, "id": 175, "name": "wall-stone"},
{"color": [7, 246, 231], "isthing": 0, "id": 176, "name": "wall-tile"},
{"color": [107, 255, 200], "isthing": 0, "id": 177, "name": "wall-wood"},
{"color": [58, 41, 149], "isthing": 0, "id": 178, "name": "water-other"},
{"color": [183, 121, 142], "isthing": 0, "id": 180, "name": "window-blind"},
{"color": [255, 73, 97], "isthing": 0, "id": 181, "name": "window-other"},
{"color": [107, 142, 35], "isthing": 0, "id": 184, "name": "tree-merged"},
{"color": [190, 153, 153], "isthing": 0, "id": 185, "name": "fence-merged"},
{"color": [146, 139, 141], "isthing": 0, "id": 186, "name": "ceiling-merged"},
{"color": [70, 130, 180], "isthing": 0, "id": 187, "name": "sky-other-merged"},
{"color": [134, 199, 156], "isthing": 0, "id": 188, "name": "cabinet-merged"},
{"color": [209, 226, 140], "isthing": 0, "id": 189, "name": "table-merged"},
{"color": [96, 36, 108], "isthing": 0, "id": 190, "name": "floor-other-merged"},
{"color": [96, 96, 96], "isthing": 0, "id": 191, "name": "pavement-merged"},
{"color": [64, 170, 64], "isthing": 0, "id": 192, "name": "mountain-merged"},
{"color": [152, 251, 152], "isthing": 0, "id": 193, "name": "grass-merged"},
{"color": [208, 229, 228], "isthing": 0, "id": 194, "name": "dirt-merged"},
{"color": [206, 186, 171], "isthing": 0, "id": 195, "name": "paper-merged"},
{"color": [152, 161, 64], "isthing": 0, "id": 196, "name": "food-other-merged"},
{"color": [116, 112, 0], "isthing": 0, "id": 197, "name": "building-other-merged"},
{"color": [0, 114, 143], "isthing": 0, "id": 198, "name": "rock-merged"},
{"color": [102, 102, 156], "isthing": 0, "id": 199, "name": "wall-other-merged"},
{"color": [250, 141, 255], "isthing": 0, "id": 200, "name": "rug-merged"},
]
IMAGENET_CATEGORIES = [
{"color": [220, 20, 60], "isthing": 1, "id": 1, "name": "fg"},
]
UVO_CATEGORIES = [
{"color": [220, 20, 60], "isthing": 1, "id": 1, "name": "object"},
]
# fmt: off
COCO_PERSON_KEYPOINT_NAMES = (
"nose",
"left_eye", "right_eye",
"left_ear", "right_ear",
"left_shoulder", "right_shoulder",
"left_elbow", "right_elbow",
"left_wrist", "right_wrist",
"left_hip", "right_hip",
"left_knee", "right_knee",
"left_ankle", "right_ankle",
)
# fmt: on
# Pairs of keypoints that should be exchanged under horizontal flipping
COCO_PERSON_KEYPOINT_FLIP_MAP = (
("left_eye", "right_eye"),
("left_ear", "right_ear"),
("left_shoulder", "right_shoulder"),
("left_elbow", "right_elbow"),
("left_wrist", "right_wrist"),
("left_hip", "right_hip"),
("left_knee", "right_knee"),
("left_ankle", "right_ankle"),
)
# rules for pairs of keypoints to draw a line between, and the line color to use.
KEYPOINT_CONNECTION_RULES = [
# face
("left_ear", "left_eye", (102, 204, 255)),
("right_ear", "right_eye", (51, 153, 255)),
("left_eye", "nose", (102, 0, 204)),
("nose", "right_eye", (51, 102, 255)),
# upper-body
("left_shoulder", "right_shoulder", (255, 128, 0)),
("left_shoulder", "left_elbow", (153, 255, 204)),
("right_shoulder", "right_elbow", (128, 229, 255)),
("left_elbow", "left_wrist", (153, 255, 153)),
("right_elbow", "right_wrist", (102, 255, 224)),
# lower-body
("left_hip", "right_hip", (255, 102, 0)),
("left_hip", "left_knee", (255, 255, 77)),
("right_hip", "right_knee", (153, 255, 204)),
("left_knee", "left_ankle", (191, 255, 128)),
("right_knee", "right_ankle", (255, 195, 77)),
]
# All Cityscapes categories, together with their nice-looking visualization colors
# It's from https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/helpers/labels.py # noqa
CITYSCAPES_CATEGORIES = [
{"color": (128, 64, 128), "isthing": 0, "id": 7, "trainId": 0, "name": "road"},
{"color": (244, 35, 232), "isthing": 0, "id": 8, "trainId": 1, "name": "sidewalk"},
{"color": (70, 70, 70), "isthing": 0, "id": 11, "trainId": 2, "name": "building"},
{"color": (102, 102, 156), "isthing": 0, "id": 12, "trainId": 3, "name": "wall"},
{"color": (190, 153, 153), "isthing": 0, "id": 13, "trainId": 4, "name": "fence"},
{"color": (153, 153, 153), "isthing": 0, "id": 17, "trainId": 5, "name": "pole"},
{"color": (250, 170, 30), "isthing": 0, "id": 19, "trainId": 6, "name": "traffic light"},
{"color": (220, 220, 0), "isthing": 0, "id": 20, "trainId": 7, "name": "traffic sign"},
{"color": (107, 142, 35), "isthing": 0, "id": 21, "trainId": 8, "name": "vegetation"},
{"color": (152, 251, 152), "isthing": 0, "id": 22, "trainId": 9, "name": "terrain"},
{"color": (70, 130, 180), "isthing": 0, "id": 23, "trainId": 10, "name": "sky"},
{"color": (220, 20, 60), "isthing": 1, "id": 24, "trainId": 11, "name": "person"},
{"color": (255, 0, 0), "isthing": 1, "id": 25, "trainId": 12, "name": "rider"},
{"color": (0, 0, 142), "isthing": 1, "id": 26, "trainId": 13, "name": "car"},
{"color": (0, 0, 70), "isthing": 1, "id": 27, "trainId": 14, "name": "truck"},
{"color": (0, 60, 100), "isthing": 1, "id": 28, "trainId": 15, "name": "bus"},
{"color": (0, 80, 100), "isthing": 1, "id": 31, "trainId": 16, "name": "train"},
{"color": (0, 0, 230), "isthing": 1, "id": 32, "trainId": 17, "name": "motorcycle"},
{"color": (119, 11, 32), "isthing": 1, "id": 33, "trainId": 18, "name": "bicycle"},
]
# fmt: off
ADE20K_SEM_SEG_CATEGORIES = [
"wall", "building", "sky", "floor", "tree", "ceiling", "road, route", "bed", "window ", "grass", "cabinet", "sidewalk, pavement", "person", "earth, ground", "door", "table", "mountain, mount", "plant", "curtain", "chair", "car", "water", "painting, picture", "sofa", "shelf", "house", "sea", "mirror", "rug", "field", "armchair", "seat", "fence", "desk", "rock, stone", "wardrobe, closet, press", "lamp", "tub", "rail", "cushion", "base, pedestal, stand", "box", "column, pillar", "signboard, sign", "chest of drawers, chest, bureau, dresser", "counter", "sand", "sink", "skyscraper", "fireplace", "refrigerator, icebox", "grandstand, covered stand", "path", "stairs", "runway", "case, display case, showcase, vitrine", "pool table, billiard table, snooker table", "pillow", "screen door, screen", "stairway, staircase", "river", "bridge, span", "bookcase", "blind, screen", "coffee table", "toilet, can, commode, crapper, pot, potty, stool, throne", "flower", "book", "hill", "bench", "countertop", "stove", "palm, palm tree", "kitchen island", "computer", "swivel chair", "boat", "bar", "arcade machine", "hovel, hut, hutch, shack, shanty", "bus", "towel", "light", "truck", "tower", "chandelier", "awning, sunshade, sunblind", "street lamp", "booth", "tv", "plane", "dirt track", "clothes", "pole", "land, ground, soil", "bannister, banister, balustrade, balusters, handrail", "escalator, moving staircase, moving stairway", "ottoman, pouf, pouffe, puff, hassock", "bottle", "buffet, counter, sideboard", "poster, posting, placard, notice, bill, card", "stage", "van", "ship", "fountain", "conveyer belt, conveyor belt, conveyer, conveyor, transporter", "canopy", "washer, automatic washer, washing machine", "plaything, toy", "pool", "stool", "barrel, cask", "basket, handbasket", "falls", "tent", "bag", "minibike, motorbike", "cradle", "oven", "ball", "food, solid food", "step, stair", "tank, storage tank", "trade name", "microwave", "pot", "animal", "bicycle", "lake", "dishwasher", "screen", "blanket, cover", "sculpture", "hood, exhaust hood", "sconce", "vase", "traffic light", "tray", "trash can", "fan", "pier", "crt screen", "plate", "monitor", "bulletin board", "shower", "radiator", "glass, drinking glass", "clock", "flag", # noqa
]
# After processed by `prepare_ade20k_sem_seg.py`, id 255 means ignore
# fmt: on
def _get_coco_instances_meta():
thing_ids = [k["id"] for k in COCO_CATEGORIES if k["isthing"] == 1]
thing_colors = [k["color"] for k in COCO_CATEGORIES if k["isthing"] == 1]
assert len(thing_ids) == 80, len(thing_ids)
# Mapping from the incontiguous COCO category id to an id in [0, 79]
thing_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(thing_ids)}
thing_classes = [k["name"] for k in COCO_CATEGORIES if k["isthing"] == 1]
ret = {
"thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id,
"thing_classes": thing_classes,
"thing_colors": thing_colors,
}
return ret
def _get_imagenet_instances_meta():
thing_ids = [k["id"] for k in IMAGENET_CATEGORIES if k["isthing"] == 1]
thing_colors = [k["color"] for k in IMAGENET_CATEGORIES if k["isthing"] == 1]
assert len(thing_ids) == 1, len(thing_ids)
thing_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(thing_ids)}
thing_classes = [k["name"] for k in IMAGENET_CATEGORIES if k["isthing"] == 1]
ret = {
"thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id,
"thing_classes": thing_classes,
"thing_colors": thing_colors,
"class_image_count": [{'id': 1, 'image_count': 116986}]
}
return ret
def _get_UVO_instances_meta():
thing_ids = [k["id"] for k in UVO_CATEGORIES if k["isthing"] == 1]
thing_colors = [k["color"] for k in UVO_CATEGORIES if k["isthing"] == 1]
assert len(thing_ids) == 1, len(thing_ids)
thing_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(thing_ids)}
thing_classes = [k["name"] for k in UVO_CATEGORIES if k["isthing"] == 1]
ret = {
"thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id,
"thing_classes": thing_classes,
"thing_colors": thing_colors,
"class_image_count": [{'id': 1, 'image_count': 116986}]
}
return ret
def _get_coco_panoptic_separated_meta():
"""
Returns metadata for "separated" version of the panoptic segmentation dataset.
"""
stuff_ids = [k["id"] for k in COCO_CATEGORIES if k["isthing"] == 0]
assert len(stuff_ids) == 53, len(stuff_ids)
# For semantic segmentation, this mapping maps from contiguous stuff id
# (in [0, 53], used in models) to ids in the dataset (used for processing results)
# The id 0 is mapped to an extra category "thing".
stuff_dataset_id_to_contiguous_id = {k: i + 1 for i, k in enumerate(stuff_ids)}
# When converting COCO panoptic annotations to semantic annotations
# We label the "thing" category to 0
stuff_dataset_id_to_contiguous_id[0] = 0
# 54 names for COCO stuff categories (including "things")
stuff_classes = ["things"] + [
k["name"].replace("-other", "").replace("-merged", "")
for k in COCO_CATEGORIES
if k["isthing"] == 0
]
# NOTE: I randomly picked a color for things
stuff_colors = [[82, 18, 128]] + [k["color"] for k in COCO_CATEGORIES if k["isthing"] == 0]
ret = {
"stuff_dataset_id_to_contiguous_id": stuff_dataset_id_to_contiguous_id,
"stuff_classes": stuff_classes,
"stuff_colors": stuff_colors,
}
ret.update(_get_coco_instances_meta())
return ret
def _get_builtin_metadata(dataset_name):
if dataset_name in ["coco", "coco_semi"]:
return _get_coco_instances_meta()
if dataset_name == "coco_panoptic_separated":
return _get_coco_panoptic_separated_meta()
elif dataset_name in ["imagenet", "kitti", "cross_domain", "lvis", "voc", "coco_cls_agnostic", "objects365", 'openimages']:
return _get_imagenet_instances_meta()
elif dataset_name == "uvo":
return _get_UVO_instances_meta()
elif dataset_name == "coco_panoptic_standard":
meta = {}
# The following metadata maps contiguous id from [0, #thing categories +
# #stuff categories) to their names and colors. We have to replica of the
# same name and color under "thing_*" and "stuff_*" because the current
# visualization function in D2 handles thing and class classes differently
# due to some heuristic used in Panoptic FPN. We keep the same naming to
# enable reusing existing visualization functions.
thing_classes = [k["name"] for k in COCO_CATEGORIES]
thing_colors = [k["color"] for k in COCO_CATEGORIES]
stuff_classes = [k["name"] for k in COCO_CATEGORIES]
stuff_colors = [k["color"] for k in COCO_CATEGORIES]
meta["thing_classes"] = thing_classes
meta["thing_colors"] = thing_colors
meta["stuff_classes"] = stuff_classes
meta["stuff_colors"] = stuff_colors
# Convert category id for training:
# category id: like semantic segmentation, it is the class id for each
# pixel. Since there are some classes not used in evaluation, the category
# id is not always contiguous and thus we have two set of category ids:
# - original category id: category id in the original dataset, mainly
# used for evaluation.
# - contiguous category id: [0, #classes), in order to train the linear
# softmax classifier.
thing_dataset_id_to_contiguous_id = {}
stuff_dataset_id_to_contiguous_id = {}
for i, cat in enumerate(COCO_CATEGORIES):
if cat["isthing"]:
thing_dataset_id_to_contiguous_id[cat["id"]] = i
else:
stuff_dataset_id_to_contiguous_id[cat["id"]] = i
meta["thing_dataset_id_to_contiguous_id"] = thing_dataset_id_to_contiguous_id
meta["stuff_dataset_id_to_contiguous_id"] = stuff_dataset_id_to_contiguous_id
return meta
elif dataset_name == "coco_person":
return {
"thing_classes": ["person"],
"keypoint_names": COCO_PERSON_KEYPOINT_NAMES,
"keypoint_flip_map": COCO_PERSON_KEYPOINT_FLIP_MAP,
"keypoint_connection_rules": KEYPOINT_CONNECTION_RULES,
}
elif dataset_name == "cityscapes":
# fmt: off
CITYSCAPES_THING_CLASSES = [
"person", "rider", "car", "truck",
"bus", "train", "motorcycle", "bicycle",
]
CITYSCAPES_STUFF_CLASSES = [
"road", "sidewalk", "building", "wall", "fence", "pole", "traffic light",
"traffic sign", "vegetation", "terrain", "sky", "person", "rider", "car",
"truck", "bus", "train", "motorcycle", "bicycle",
]
# fmt: on
return {
"thing_classes": CITYSCAPES_THING_CLASSES,
"stuff_classes": CITYSCAPES_STUFF_CLASSES,
}
raise KeyError("No built-in metadata for dataset {}".format(dataset_name))
| CutLER-main | cutler/data/datasets/builtin_meta.py |
# -*- coding: utf-8 -*-
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Modified by XuDong Wang from https://github.com/facebookresearch/detectron2/blob/main/detectron2/data/transforms/augmentation_impl.py
"""
Implement many useful :class:`Augmentation`.
"""
import numpy as np
import sys
from typing import Tuple
import torch
from fvcore.transforms.transform import (
BlendTransform,
CropTransform,
HFlipTransform,
NoOpTransform,
PadTransform,
Transform,
TransformList,
VFlipTransform,
)
from PIL import Image
from detectron2.data.transforms.augmentation import Augmentation, _transform_to_aug
from .transform import ExtentTransform, ResizeTransform, RotationTransform
__all__ = [
"FixedSizeCrop",
"RandomApply",
"RandomBrightness",
"RandomContrast",
"RandomCrop",
"RandomExtent",
"RandomFlip",
"RandomSaturation",
"RandomLighting",
"RandomRotation",
"Resize",
"ResizeScale",
"ResizeShortestEdge",
"RandomCrop_CategoryAreaConstraint",
]
class RandomApply(Augmentation):
"""
Randomly apply an augmentation with a given probability.
"""
def __init__(self, tfm_or_aug, prob=0.5):
"""
Args:
tfm_or_aug (Transform, Augmentation): the transform or augmentation
to be applied. It can either be a `Transform` or `Augmentation`
instance.
prob (float): probability between 0.0 and 1.0 that
the wrapper transformation is applied
"""
super().__init__()
self.aug = _transform_to_aug(tfm_or_aug)
assert 0.0 <= prob <= 1.0, f"Probablity must be between 0.0 and 1.0 (given: {prob})"
self.prob = prob
def get_transform(self, *args):
do = self._rand_range() < self.prob
if do:
return self.aug.get_transform(*args)
else:
return NoOpTransform()
def __call__(self, aug_input):
do = self._rand_range() < self.prob
if do:
return self.aug(aug_input)
else:
return NoOpTransform()
class RandomFlip(Augmentation):
"""
Flip the image horizontally or vertically with the given probability.
"""
def __init__(self, prob=0.5, *, horizontal=True, vertical=False):
"""
Args:
prob (float): probability of flip.
horizontal (boolean): whether to apply horizontal flipping
vertical (boolean): whether to apply vertical flipping
"""
super().__init__()
if horizontal and vertical:
raise ValueError("Cannot do both horiz and vert. Please use two Flip instead.")
if not horizontal and not vertical:
raise ValueError("At least one of horiz or vert has to be True!")
self._init(locals())
def get_transform(self, image):
h, w = image.shape[:2]
do = self._rand_range() < self.prob
if do:
if self.horizontal:
return HFlipTransform(w)
elif self.vertical:
return VFlipTransform(h)
else:
return NoOpTransform()
class Resize(Augmentation):
"""Resize image to a fixed target size"""
def __init__(self, shape, interp=Image.BILINEAR):
"""
Args:
shape: (h, w) tuple or a int
interp: PIL interpolation method
"""
if isinstance(shape, int):
shape = (shape, shape)
shape = tuple(shape)
self._init(locals())
def get_transform(self, image):
return ResizeTransform(
image.shape[0], image.shape[1], self.shape[0], self.shape[1], self.interp
)
class ResizeShortestEdge(Augmentation):
"""
Resize the image while keeping the aspect ratio unchanged.
It attempts to scale the shorter edge to the given `short_edge_length`,
as long as the longer edge does not exceed `max_size`.
If `max_size` is reached, then downscale so that the longer edge does not exceed max_size.
"""
@torch.jit.unused
def __init__(
self, short_edge_length, max_size=sys.maxsize, sample_style="range", interp=Image.BILINEAR
):
"""
Args:
short_edge_length (list[int]): If ``sample_style=="range"``,
a [min, max] interval from which to sample the shortest edge length.
If ``sample_style=="choice"``, a list of shortest edge lengths to sample from.
max_size (int): maximum allowed longest edge length.
sample_style (str): either "range" or "choice".
"""
super().__init__()
assert sample_style in ["range", "choice"], sample_style
self.is_range = sample_style == "range"
if isinstance(short_edge_length, int):
short_edge_length = (short_edge_length, short_edge_length)
if self.is_range:
assert len(short_edge_length) == 2, (
"short_edge_length must be two values using 'range' sample style."
f" Got {short_edge_length}!"
)
self._init(locals())
@torch.jit.unused
def get_transform(self, image):
h, w = image.shape[:2]
if self.is_range:
size = np.random.randint(self.short_edge_length[0], self.short_edge_length[1] + 1)
else:
size = np.random.choice(self.short_edge_length)
if size == 0:
return NoOpTransform()
newh, neww = ResizeShortestEdge.get_output_shape(h, w, size, self.max_size)
return ResizeTransform(h, w, newh, neww, self.interp)
@staticmethod
def get_output_shape(
oldh: int, oldw: int, short_edge_length: int, max_size: int
) -> Tuple[int, int]:
"""
Compute the output size given input size and target short edge length.
"""
h, w = oldh, oldw
size = short_edge_length * 1.0
scale = size / min(h, w)
if h < w:
newh, neww = size, scale * w
else:
newh, neww = scale * h, size
if max(newh, neww) > max_size:
scale = max_size * 1.0 / max(newh, neww)
newh = newh * scale
neww = neww * scale
neww = int(neww + 0.5)
newh = int(newh + 0.5)
return (newh, neww)
class ResizeScale(Augmentation):
"""
Takes target size as input and randomly scales the given target size between `min_scale`
and `max_scale`. It then scales the input image such that it fits inside the scaled target
box, keeping the aspect ratio constant.
This implements the resize part of the Google's 'resize_and_crop' data augmentation:
https://github.com/tensorflow/tpu/blob/master/models/official/detection/utils/input_utils.py#L127
"""
def __init__(
self,
min_scale: float,
max_scale: float,
target_height: int,
target_width: int,
interp: int = Image.BILINEAR,
):
"""
Args:
min_scale: minimum image scale range.
max_scale: maximum image scale range.
target_height: target image height.
target_width: target image width.
interp: image interpolation method.
"""
super().__init__()
self._init(locals())
def _get_resize(self, image: np.ndarray, scale: float) -> Transform:
input_size = image.shape[:2]
# Compute new target size given a scale.
target_size = (self.target_height, self.target_width)
target_scale_size = np.multiply(target_size, scale)
# Compute actual rescaling applied to input image and output size.
output_scale = np.minimum(
target_scale_size[0] / input_size[0], target_scale_size[1] / input_size[1]
)
output_size = np.round(np.multiply(input_size, output_scale)).astype(int)
return ResizeTransform(
input_size[0], input_size[1], output_size[0], output_size[1], self.interp
)
def get_transform(self, image: np.ndarray) -> Transform:
random_scale = np.random.uniform(self.min_scale, self.max_scale)
return self._get_resize(image, random_scale)
class RandomRotation(Augmentation):
"""
This method returns a copy of this image, rotated the given
number of degrees counter clockwise around the given center.
"""
def __init__(self, angle, expand=True, center=None, sample_style="range", interp=None):
"""
Args:
angle (list[float]): If ``sample_style=="range"``,
a [min, max] interval from which to sample the angle (in degrees).
If ``sample_style=="choice"``, a list of angles to sample from
expand (bool): choose if the image should be resized to fit the whole
rotated image (default), or simply cropped
center (list[[float, float]]): If ``sample_style=="range"``,
a [[minx, miny], [maxx, maxy]] relative interval from which to sample the center,
[0, 0] being the top left of the image and [1, 1] the bottom right.
If ``sample_style=="choice"``, a list of centers to sample from
Default: None, which means that the center of rotation is the center of the image
center has no effect if expand=True because it only affects shifting
"""
super().__init__()
assert sample_style in ["range", "choice"], sample_style
self.is_range = sample_style == "range"
if isinstance(angle, (float, int)):
angle = (angle, angle)
if center is not None and isinstance(center[0], (float, int)):
center = (center, center)
self._init(locals())
def get_transform(self, image):
h, w = image.shape[:2]
center = None
if self.is_range:
angle = np.random.uniform(self.angle[0], self.angle[1])
if self.center is not None:
center = (
np.random.uniform(self.center[0][0], self.center[1][0]),
np.random.uniform(self.center[0][1], self.center[1][1]),
)
else:
angle = np.random.choice(self.angle)
if self.center is not None:
center = np.random.choice(self.center)
if center is not None:
center = (w * center[0], h * center[1]) # Convert to absolute coordinates
if angle % 360 == 0:
return NoOpTransform()
return RotationTransform(h, w, angle, expand=self.expand, center=center, interp=self.interp)
class FixedSizeCrop(Augmentation):
"""
If `crop_size` is smaller than the input image size, then it uses a random crop of
the crop size. If `crop_size` is larger than the input image size, then it pads
the right and the bottom of the image to the crop size if `pad` is True, otherwise
it returns the smaller image.
"""
def __init__(self, crop_size: Tuple[int], pad: bool = True, pad_value: float = 128.0):
"""
Args:
crop_size: target image (height, width).
pad: if True, will pad images smaller than `crop_size` up to `crop_size`
pad_value: the padding value.
"""
super().__init__()
self._init(locals())
def _get_crop(self, image: np.ndarray) -> Transform:
# Compute the image scale and scaled size.
input_size = image.shape[:2]
output_size = self.crop_size
# Add random crop if the image is scaled up.
max_offset = np.subtract(input_size, output_size)
max_offset = np.maximum(max_offset, 0)
offset = np.multiply(max_offset, np.random.uniform(0.0, 1.0))
offset = np.round(offset).astype(int)
return CropTransform(
offset[1], offset[0], output_size[1], output_size[0], input_size[1], input_size[0]
)
def _get_pad(self, image: np.ndarray) -> Transform:
# Compute the image scale and scaled size.
input_size = image.shape[:2]
output_size = self.crop_size
# Add padding if the image is scaled down.
pad_size = np.subtract(output_size, input_size)
pad_size = np.maximum(pad_size, 0)
original_size = np.minimum(input_size, output_size)
return PadTransform(
0, 0, pad_size[1], pad_size[0], original_size[1], original_size[0], self.pad_value
)
def get_transform(self, image: np.ndarray) -> TransformList:
transforms = [self._get_crop(image)]
if self.pad:
transforms.append(self._get_pad(image))
return TransformList(transforms)
class RandomCrop(Augmentation):
"""
Randomly crop a rectangle region out of an image.
"""
def __init__(self, crop_type: str, crop_size):
"""
Args:
crop_type (str): one of "relative_range", "relative", "absolute", "absolute_range".
crop_size (tuple[float, float]): two floats, explained below.
- "relative": crop a (H * crop_size[0], W * crop_size[1]) region from an input image of
size (H, W). crop size should be in (0, 1]
- "relative_range": uniformly sample two values from [crop_size[0], 1]
and [crop_size[1]], 1], and use them as in "relative" crop type.
- "absolute" crop a (crop_size[0], crop_size[1]) region from input image.
crop_size must be smaller than the input image size.
- "absolute_range", for an input of size (H, W), uniformly sample H_crop in
[crop_size[0], min(H, crop_size[1])] and W_crop in [crop_size[0], min(W, crop_size[1])].
Then crop a region (H_crop, W_crop).
"""
# TODO style of relative_range and absolute_range are not consistent:
# one takes (h, w) but another takes (min, max)
super().__init__()
assert crop_type in ["relative_range", "relative", "absolute", "absolute_range"]
self._init(locals())
def get_transform(self, image):
h, w = image.shape[:2]
croph, cropw = self.get_crop_size((h, w))
assert h >= croph and w >= cropw, "Shape computation in {} has bugs.".format(self)
h0 = np.random.randint(h - croph + 1)
w0 = np.random.randint(w - cropw + 1)
return CropTransform(w0, h0, cropw, croph)
def get_crop_size(self, image_size):
"""
Args:
image_size (tuple): height, width
Returns:
crop_size (tuple): height, width in absolute pixels
"""
h, w = image_size
if self.crop_type == "relative":
ch, cw = self.crop_size
return int(h * ch + 0.5), int(w * cw + 0.5)
elif self.crop_type == "relative_range":
crop_size = np.asarray(self.crop_size, dtype=np.float32)
ch, cw = crop_size + np.random.rand(2) * (1 - crop_size)
return int(h * ch + 0.5), int(w * cw + 0.5)
elif self.crop_type == "absolute":
return (min(self.crop_size[0], h), min(self.crop_size[1], w))
elif self.crop_type == "absolute_range":
assert self.crop_size[0] <= self.crop_size[1]
ch = np.random.randint(min(h, self.crop_size[0]), min(h, self.crop_size[1]) + 1)
cw = np.random.randint(min(w, self.crop_size[0]), min(w, self.crop_size[1]) + 1)
return ch, cw
else:
raise NotImplementedError("Unknown crop type {}".format(self.crop_type))
class RandomCrop_CategoryAreaConstraint(Augmentation):
"""
Similar to :class:`RandomCrop`, but find a cropping window such that no single category
occupies a ratio of more than `single_category_max_area` in semantic segmentation ground
truth, which can cause unstability in training. The function attempts to find such a valid
cropping window for at most 10 times.
"""
def __init__(
self,
crop_type: str,
crop_size,
single_category_max_area: float = 1.0,
ignored_category: int = None,
):
"""
Args:
crop_type, crop_size: same as in :class:`RandomCrop`
single_category_max_area: the maximum allowed area ratio of a
category. Set to 1.0 to disable
ignored_category: allow this category in the semantic segmentation
ground truth to exceed the area ratio. Usually set to the category
that's ignored in training.
"""
self.crop_aug = RandomCrop(crop_type, crop_size)
self._init(locals())
def get_transform(self, image, sem_seg):
if self.single_category_max_area >= 1.0:
return self.crop_aug.get_transform(image)
else:
h, w = sem_seg.shape
for _ in range(10):
crop_size = self.crop_aug.get_crop_size((h, w))
y0 = np.random.randint(h - crop_size[0] + 1)
x0 = np.random.randint(w - crop_size[1] + 1)
sem_seg_temp = sem_seg[y0 : y0 + crop_size[0], x0 : x0 + crop_size[1]]
labels, cnt = np.unique(sem_seg_temp, return_counts=True)
if self.ignored_category is not None:
cnt = cnt[labels != self.ignored_category]
if len(cnt) > 1 and np.max(cnt) < np.sum(cnt) * self.single_category_max_area:
break
crop_tfm = CropTransform(x0, y0, crop_size[1], crop_size[0])
return crop_tfm
class RandomExtent(Augmentation):
"""
Outputs an image by cropping a random "subrect" of the source image.
The subrect can be parameterized to include pixels outside the source image,
in which case they will be set to zeros (i.e. black). The size of the output
image will vary with the size of the random subrect.
"""
def __init__(self, scale_range, shift_range):
"""
Args:
output_size (h, w): Dimensions of output image
scale_range (l, h): Range of input-to-output size scaling factor
shift_range (x, y): Range of shifts of the cropped subrect. The rect
is shifted by [w / 2 * Uniform(-x, x), h / 2 * Uniform(-y, y)],
where (w, h) is the (width, height) of the input image. Set each
component to zero to crop at the image's center.
"""
super().__init__()
self._init(locals())
def get_transform(self, image):
img_h, img_w = image.shape[:2]
# Initialize src_rect to fit the input image.
src_rect = np.array([-0.5 * img_w, -0.5 * img_h, 0.5 * img_w, 0.5 * img_h])
# Apply a random scaling to the src_rect.
src_rect *= np.random.uniform(self.scale_range[0], self.scale_range[1])
# Apply a random shift to the coordinates origin.
src_rect[0::2] += self.shift_range[0] * img_w * (np.random.rand() - 0.5)
src_rect[1::2] += self.shift_range[1] * img_h * (np.random.rand() - 0.5)
# Map src_rect coordinates into image coordinates (center at corner).
src_rect[0::2] += 0.5 * img_w
src_rect[1::2] += 0.5 * img_h
return ExtentTransform(
src_rect=(src_rect[0], src_rect[1], src_rect[2], src_rect[3]),
output_size=(int(src_rect[3] - src_rect[1]), int(src_rect[2] - src_rect[0])),
)
class RandomContrast(Augmentation):
"""
Randomly transforms image contrast.
Contrast intensity is uniformly sampled in (intensity_min, intensity_max).
- intensity < 1 will reduce contrast
- intensity = 1 will preserve the input image
- intensity > 1 will increase contrast
See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html
"""
def __init__(self, intensity_min, intensity_max):
"""
Args:
intensity_min (float): Minimum augmentation
intensity_max (float): Maximum augmentation
"""
super().__init__()
self._init(locals())
def get_transform(self, image):
w = np.random.uniform(self.intensity_min, self.intensity_max)
return BlendTransform(src_image=image.mean(), src_weight=1 - w, dst_weight=w)
class RandomBrightness(Augmentation):
"""
Randomly transforms image brightness.
Brightness intensity is uniformly sampled in (intensity_min, intensity_max).
- intensity < 1 will reduce brightness
- intensity = 1 will preserve the input image
- intensity > 1 will increase brightness
See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html
"""
def __init__(self, intensity_min, intensity_max):
"""
Args:
intensity_min (float): Minimum augmentation
intensity_max (float): Maximum augmentation
"""
super().__init__()
self._init(locals())
def get_transform(self, image):
w = np.random.uniform(self.intensity_min, self.intensity_max)
return BlendTransform(src_image=0, src_weight=1 - w, dst_weight=w)
class RandomSaturation(Augmentation):
"""
Randomly transforms saturation of an RGB image.
Input images are assumed to have 'RGB' channel order.
Saturation intensity is uniformly sampled in (intensity_min, intensity_max).
- intensity < 1 will reduce saturation (make the image more grayscale)
- intensity = 1 will preserve the input image
- intensity > 1 will increase saturation
See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html
"""
def __init__(self, intensity_min, intensity_max):
"""
Args:
intensity_min (float): Minimum augmentation (1 preserves input).
intensity_max (float): Maximum augmentation (1 preserves input).
"""
super().__init__()
self._init(locals())
def get_transform(self, image):
assert image.shape[-1] == 3, "RandomSaturation only works on RGB images"
w = np.random.uniform(self.intensity_min, self.intensity_max)
grayscale = image.dot([0.299, 0.587, 0.114])[:, :, np.newaxis]
return BlendTransform(src_image=grayscale, src_weight=1 - w, dst_weight=w)
class RandomLighting(Augmentation):
"""
The "lighting" augmentation described in AlexNet, using fixed PCA over ImageNet.
Input images are assumed to have 'RGB' channel order.
The degree of color jittering is randomly sampled via a normal distribution,
with standard deviation given by the scale parameter.
"""
def __init__(self, scale):
"""
Args:
scale (float): Standard deviation of principal component weighting.
"""
super().__init__()
self._init(locals())
self.eigen_vecs = np.array(
[[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140], [-0.5836, -0.6948, 0.4203]]
)
self.eigen_vals = np.array([0.2175, 0.0188, 0.0045])
def get_transform(self, image):
assert image.shape[-1] == 3, "RandomLighting only works on RGB images"
weights = np.random.normal(scale=self.scale, size=3)
return BlendTransform(
src_image=self.eigen_vecs.dot(weights * self.eigen_vals), src_weight=1.0, dst_weight=1.0
)
| CutLER-main | cutler/data/transforms/augmentation_impl.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Modified by XuDong Wang from https://github.com/facebookresearch/detectron2/blob/main/detectron2/data/transforms/__init__.py
from fvcore.transforms.transform import *
from .transform import *
from detectron2.data.transforms.augmentation import *
from .augmentation_impl import *
__all__ = [k for k in globals().keys() if not k.startswith("_")]
from detectron2.utils.env import fixup_module_metadata
fixup_module_metadata(__name__, globals(), __all__)
del fixup_module_metadata | CutLER-main | cutler/data/transforms/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Modified by XuDong Wang from https://github.com/facebookresearch/detectron2/blob/main/detectron2/data/transforms/transform.py
"""
See "Data Augmentation" tutorial for an overview of the system:
https://detectron2.readthedocs.io/tutorials/augmentation.html
"""
import numpy as np
import torch
import torch.nn.functional as F
from fvcore.transforms.transform import (
CropTransform,
HFlipTransform,
NoOpTransform,
Transform,
TransformList,
)
from PIL import Image
try:
import cv2 # noqa
except ImportError:
# OpenCV is an optional dependency at the moment
pass
__all__ = [
"ExtentTransform",
"ResizeTransform",
"RotationTransform",
"ColorTransform",
"PILColorTransform",
]
class ExtentTransform(Transform):
"""
Extracts a subregion from the source image and scales it to the output size.
The fill color is used to map pixels from the source rect that fall outside
the source image.
See: https://pillow.readthedocs.io/en/latest/PIL.html#PIL.ImageTransform.ExtentTransform
"""
def __init__(self, src_rect, output_size, interp=Image.LINEAR, fill=0):
"""
Args:
src_rect (x0, y0, x1, y1): src coordinates
output_size (h, w): dst image size
interp: PIL interpolation methods
fill: Fill color used when src_rect extends outside image
"""
super().__init__()
self._set_attributes(locals())
def apply_image(self, img, interp=None):
h, w = self.output_size
if len(img.shape) > 2 and img.shape[2] == 1:
pil_image = Image.fromarray(img[:, :, 0], mode="L")
else:
pil_image = Image.fromarray(img)
pil_image = pil_image.transform(
size=(w, h),
method=Image.EXTENT,
data=self.src_rect,
resample=interp if interp else self.interp,
fill=self.fill,
)
ret = np.asarray(pil_image)
if len(img.shape) > 2 and img.shape[2] == 1:
ret = np.expand_dims(ret, -1)
return ret
def apply_coords(self, coords):
# Transform image center from source coordinates into output coordinates
# and then map the new origin to the corner of the output image.
h, w = self.output_size
x0, y0, x1, y1 = self.src_rect
new_coords = coords.astype(np.float32)
new_coords[:, 0] -= 0.5 * (x0 + x1)
new_coords[:, 1] -= 0.5 * (y0 + y1)
new_coords[:, 0] *= w / (x1 - x0)
new_coords[:, 1] *= h / (y1 - y0)
new_coords[:, 0] += 0.5 * w
new_coords[:, 1] += 0.5 * h
return new_coords
def apply_segmentation(self, segmentation):
segmentation = self.apply_image(segmentation, interp=Image.NEAREST)
return segmentation
class ResizeTransform(Transform):
"""
Resize the image to a target size.
"""
def __init__(self, h, w, new_h, new_w, interp=None):
"""
Args:
h, w (int): original image size
new_h, new_w (int): new image size
interp: PIL interpolation methods, defaults to bilinear.
"""
# TODO decide on PIL vs opencv
super().__init__()
if interp is None:
interp = Image.BILINEAR
self._set_attributes(locals())
def apply_image(self, img, interp=None):
try:
img.shape[:2] == (self.h, self.w)
except:
(self.h, self.w) = (self.w, self.h)
assert img.shape[:2] == (self.h, self.w)
assert len(img.shape) <= 4
interp_method = interp if interp is not None else self.interp
if img.dtype == np.uint8:
if len(img.shape) > 2 and img.shape[2] == 1:
pil_image = Image.fromarray(img[:, :, 0], mode="L")
else:
pil_image = Image.fromarray(img)
pil_image = pil_image.resize((self.new_w, self.new_h), interp_method)
ret = np.asarray(pil_image)
if len(img.shape) > 2 and img.shape[2] == 1:
ret = np.expand_dims(ret, -1)
else:
# PIL only supports uint8
if any(x < 0 for x in img.strides):
img = np.ascontiguousarray(img)
img = torch.from_numpy(img)
shape = list(img.shape)
shape_4d = shape[:2] + [1] * (4 - len(shape)) + shape[2:]
img = img.view(shape_4d).permute(2, 3, 0, 1) # hw(c) -> nchw
_PIL_RESIZE_TO_INTERPOLATE_MODE = {
Image.NEAREST: "nearest",
Image.BILINEAR: "bilinear",
Image.BICUBIC: "bicubic",
}
mode = _PIL_RESIZE_TO_INTERPOLATE_MODE[interp_method]
align_corners = None if mode == "nearest" else False
img = F.interpolate(
img, (self.new_h, self.new_w), mode=mode, align_corners=align_corners
)
shape[:2] = (self.new_h, self.new_w)
ret = img.permute(2, 3, 0, 1).view(shape).numpy() # nchw -> hw(c)
return ret
def apply_coords(self, coords):
coords[:, 0] = coords[:, 0] * (self.new_w * 1.0 / self.w)
coords[:, 1] = coords[:, 1] * (self.new_h * 1.0 / self.h)
return coords
def apply_segmentation(self, segmentation):
segmentation = self.apply_image(segmentation, interp=Image.NEAREST)
return segmentation
def inverse(self):
return ResizeTransform(self.new_h, self.new_w, self.h, self.w, self.interp)
class RotationTransform(Transform):
"""
This method returns a copy of this image, rotated the given
number of degrees counter clockwise around its center.
"""
def __init__(self, h, w, angle, expand=True, center=None, interp=None):
"""
Args:
h, w (int): original image size
angle (float): degrees for rotation
expand (bool): choose if the image should be resized to fit the whole
rotated image (default), or simply cropped
center (tuple (width, height)): coordinates of the rotation center
if left to None, the center will be fit to the center of each image
center has no effect if expand=True because it only affects shifting
interp: cv2 interpolation method, default cv2.INTER_LINEAR
"""
super().__init__()
image_center = np.array((w / 2, h / 2))
if center is None:
center = image_center
if interp is None:
interp = cv2.INTER_LINEAR
abs_cos, abs_sin = (abs(np.cos(np.deg2rad(angle))), abs(np.sin(np.deg2rad(angle))))
if expand:
# find the new width and height bounds
bound_w, bound_h = np.rint(
[h * abs_sin + w * abs_cos, h * abs_cos + w * abs_sin]
).astype(int)
else:
bound_w, bound_h = w, h
self._set_attributes(locals())
self.rm_coords = self.create_rotation_matrix()
# Needed because of this problem https://github.com/opencv/opencv/issues/11784
self.rm_image = self.create_rotation_matrix(offset=-0.5)
def apply_image(self, img, interp=None):
"""
img should be a numpy array, formatted as Height * Width * Nchannels
"""
if len(img) == 0 or self.angle % 360 == 0:
return img
assert img.shape[:2] == (self.h, self.w)
interp = interp if interp is not None else self.interp
return cv2.warpAffine(img, self.rm_image, (self.bound_w, self.bound_h), flags=interp)
def apply_coords(self, coords):
"""
coords should be a N * 2 array-like, containing N couples of (x, y) points
"""
coords = np.asarray(coords, dtype=float)
if len(coords) == 0 or self.angle % 360 == 0:
return coords
return cv2.transform(coords[:, np.newaxis, :], self.rm_coords)[:, 0, :]
def apply_segmentation(self, segmentation):
segmentation = self.apply_image(segmentation, interp=cv2.INTER_NEAREST)
return segmentation
def create_rotation_matrix(self, offset=0):
center = (self.center[0] + offset, self.center[1] + offset)
rm = cv2.getRotationMatrix2D(tuple(center), self.angle, 1)
if self.expand:
# Find the coordinates of the center of rotation in the new image
# The only point for which we know the future coordinates is the center of the image
rot_im_center = cv2.transform(self.image_center[None, None, :] + offset, rm)[0, 0, :]
new_center = np.array([self.bound_w / 2, self.bound_h / 2]) + offset - rot_im_center
# shift the rotation center to the new coordinates
rm[:, 2] += new_center
return rm
def inverse(self):
"""
The inverse is to rotate it back with expand, and crop to get the original shape.
"""
if not self.expand: # Not possible to inverse if a part of the image is lost
raise NotImplementedError()
rotation = RotationTransform(
self.bound_h, self.bound_w, -self.angle, True, None, self.interp
)
crop = CropTransform(
(rotation.bound_w - self.w) // 2, (rotation.bound_h - self.h) // 2, self.w, self.h
)
return TransformList([rotation, crop])
class ColorTransform(Transform):
"""
Generic wrapper for any photometric transforms.
These transformations should only affect the color space and
not the coordinate space of the image (e.g. annotation
coordinates such as bounding boxes should not be changed)
"""
def __init__(self, op):
"""
Args:
op (Callable): operation to be applied to the image,
which takes in an ndarray and returns an ndarray.
"""
if not callable(op):
raise ValueError("op parameter should be callable")
super().__init__()
self._set_attributes(locals())
def apply_image(self, img):
return self.op(img)
def apply_coords(self, coords):
return coords
def inverse(self):
return NoOpTransform()
def apply_segmentation(self, segmentation):
return segmentation
class PILColorTransform(ColorTransform):
"""
Generic wrapper for PIL Photometric image transforms,
which affect the color space and not the coordinate
space of the image
"""
def __init__(self, op):
"""
Args:
op (Callable): operation to be applied to the image,
which takes in a PIL Image and returns a transformed
PIL Image.
For reference on possible operations see:
- https://pillow.readthedocs.io/en/stable/
"""
if not callable(op):
raise ValueError("op parameter should be callable")
super().__init__(op)
def apply_image(self, img):
img = Image.fromarray(img)
return np.asarray(super().apply_image(img))
def HFlip_rotated_box(transform, rotated_boxes):
"""
Apply the horizontal flip transform on rotated boxes.
Args:
rotated_boxes (ndarray): Nx5 floating point array of
(x_center, y_center, width, height, angle_degrees) format
in absolute coordinates.
"""
# Transform x_center
rotated_boxes[:, 0] = transform.width - rotated_boxes[:, 0]
# Transform angle
rotated_boxes[:, 4] = -rotated_boxes[:, 4]
return rotated_boxes
def Resize_rotated_box(transform, rotated_boxes):
"""
Apply the resizing transform on rotated boxes. For details of how these (approximation)
formulas are derived, please refer to :meth:`RotatedBoxes.scale`.
Args:
rotated_boxes (ndarray): Nx5 floating point array of
(x_center, y_center, width, height, angle_degrees) format
in absolute coordinates.
"""
scale_factor_x = transform.new_w * 1.0 / transform.w
scale_factor_y = transform.new_h * 1.0 / transform.h
rotated_boxes[:, 0] *= scale_factor_x
rotated_boxes[:, 1] *= scale_factor_y
theta = rotated_boxes[:, 4] * np.pi / 180.0
c = np.cos(theta)
s = np.sin(theta)
rotated_boxes[:, 2] *= np.sqrt(np.square(scale_factor_x * c) + np.square(scale_factor_y * s))
rotated_boxes[:, 3] *= np.sqrt(np.square(scale_factor_x * s) + np.square(scale_factor_y * c))
rotated_boxes[:, 4] = np.arctan2(scale_factor_x * s, scale_factor_y * c) * 180 / np.pi
return rotated_boxes
HFlipTransform.register_type("rotated_box", HFlip_rotated_box)
ResizeTransform.register_type("rotated_box", Resize_rotated_box)
# not necessary any more with latest fvcore
NoOpTransform.register_type("rotated_box", lambda t, x: x)
| CutLER-main | cutler/data/transforms/transform.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
from .train_loop import *
__all__ = [k for k in globals().keys() if not k.startswith("_")]
from .defaults import * | CutLER-main | cutler/engine/__init__.py |
# -*- coding: utf-8 -*-
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Modified by XuDong Wang from https://github.com/facebookresearch/detectron2/blob/main/detectron2/engine/train_loop.py and https://github.com/NVlabs/FreeSOLO/tree/main/freesolo/engine/trainer.py
import torch
from torch.nn.parallel import DataParallel, DistributedDataParallel
import numpy as np
import time
import torch
from torch.nn.parallel import DataParallel, DistributedDataParallel
import copy
import random
import torch.nn.functional as F
from detectron2.structures.instances import Instances
from detectron2.structures import BitMasks
from detectron2.engine import SimpleTrainer
__all__ = ["CustomSimpleTrainer", "CustomAMPTrainer"]
class CustomSimpleTrainer(SimpleTrainer):
"""
A simple trainer for the most common type of task:
single-cost single-optimizer single-data-source iterative optimization,
optionally using data-parallelism.
It assumes that every step, you:
1. Compute the loss with a data from the data_loader.
2. Compute the gradients with the above loss.
3. Update the model with the optimizer.
All other tasks during training (checkpointing, logging, evaluation, LR schedule)
are maintained by hooks, which can be registered by :meth:`TrainerBase.register_hooks`.
If you want to do anything fancier than this,
either subclass TrainerBase and implement your own `run_step`,
or write your own training loop.
"""
def __init__(self, model, data_loader, optimizer, cfg=None, use_copy_paste=False,
copy_paste_rate=-1, copy_paste_random_num=None, copy_paste_min_ratio=-1,
copy_paste_max_ratio=-1, visualize_copy_paste=False):
"""
Args:
model: a torch Module. Takes a data from data_loader and returns a
dict of losses.
data_loader: an iterable. Contains data to be used to call model.
optimizer: a torch optimizer.
"""
super().__init__(model, data_loader, optimizer)
"""
We set the model to training mode in the trainer.
However it's valid to train a model that's in eval mode.
If you want your model (or a submodule of it) to behave
like evaluation during training, you can overwrite its train() method.
"""
self.cfg = cfg
# model.train()
# self.model = model
# self.data_loader = data_loader
# to access the data loader iterator, call `self._data_loader_iter`
# self._data_loader_iter_obj = None
# self.optimizer = optimizer
self.use_copy_paste = use_copy_paste if self.cfg is None else self.cfg.DATALOADER.COPY_PASTE
self.cfg_COPY_PASTE_RATE = copy_paste_rate if self.cfg is None else self.cfg.DATALOADER.COPY_PASTE_RATE
self.cfg_COPY_PASTE_RANDOM_NUM = copy_paste_random_num if self.cfg is None else self.cfg.DATALOADER.COPY_PASTE_RANDOM_NUM
self.cfg_COPY_PASTE_MIN_RATIO = copy_paste_min_ratio if self.cfg is None else self.cfg.DATALOADER.COPY_PASTE_MIN_RATIO
self.cfg_COPY_PASTE_MAX_RATIO = copy_paste_max_ratio if self.cfg is None else self.cfg.DATALOADER.COPY_PASTE_MAX_RATIO
self.cfg_VISUALIZE_COPY_PASTE = visualize_copy_paste if self.cfg is None else self.cfg.DATALOADER.VISUALIZE_COPY_PASTE
def IoU(self, mask1, mask2): # only work when the batch size is 1
mask1, mask2 = (mask1>0.5).to(torch.bool), (mask2>0.5).to(torch.bool)
intersection = torch.sum(mask1 * (mask1 == mask2), dim=[-1, -2]).squeeze()
union = torch.sum(mask1 + mask2, dim=[-1, -2]).squeeze()
return (intersection.to(torch.float) / union).mean().view(1, -1)
def IoY(self, mask1, mask2): # only work when the batch size is 1
# print(mask1.size(), mask2.size())
mask1, mask2 = mask1.squeeze(), mask2.squeeze()
mask1, mask2 = (mask1>0.5).to(torch.bool), (mask2>0.5).to(torch.bool)
intersection = torch.sum(mask1 * (mask1 == mask2), dim=[-1, -2]).squeeze()
union = torch.sum(mask2, dim=[-1, -2]).squeeze()
return (intersection.to(torch.float) / union).mean().view(1, -1)
def copy_and_paste(self, labeled_data, unlabeled_data):
new_unlabeled_data = []
def mask_iou_matrix(x, y, mode='iou'):
x = x.reshape(x.shape[0], -1).float()
y = y.reshape(y.shape[0], -1).float()
inter_matrix = x @ y.transpose(1, 0) # n1xn2
sum_x = x.sum(1)[:, None].expand(x.shape[0], y.shape[0])
sum_y = y.sum(1)[None, :].expand(x.shape[0], y.shape[0])
if mode == 'ioy':
iou_matrix = inter_matrix / (sum_y) # [1, 1]
else:
iou_matrix = inter_matrix / (sum_x + sum_y - inter_matrix) # [1, 1]
return iou_matrix
def visualize_data(data, save_path = './sample.jpg'):
from data import detection_utils as utils
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.utils.visualizer import Visualizer
data["instances"] = data["instances"].to(device='cpu')
img = data["image"].permute(1, 2, 0).cpu().detach().numpy()
img = utils.convert_image_to_rgb(img, 'RGB')
metadata = MetadataCatalog.get('imagenet_train_tau0.15')
visualizer = Visualizer(img, metadata=metadata, scale=1.0)
target_fields = data["instances"].get_fields()
labels = [metadata.thing_classes[i] for i in target_fields["gt_classes"]]
vis = visualizer.overlay_instances(
labels=labels,
boxes=target_fields.get("gt_boxes"), # ("gt_boxes", None),
masks=target_fields.get("gt_masks"), # ("gt_masks", None),
keypoints=target_fields.get("gt_keypoints", None),
)
print("Saving to {} ...".format(save_path))
vis.save(save_path)
for cur_labeled_data, cur_unlabeled_data in zip(labeled_data, unlabeled_data):
cur_labeled_instances = cur_labeled_data["instances"]
cur_labeled_image = cur_labeled_data["image"]
cur_unlabeled_instances = cur_unlabeled_data["instances"]
cur_unlabeled_image = cur_unlabeled_data["image"]
num_labeled_instances = len(cur_labeled_instances)
copy_paste_rate = random.random()
if self.cfg_COPY_PASTE_RATE >= copy_paste_rate and num_labeled_instances > 0:
if self.cfg_COPY_PASTE_RANDOM_NUM:
num_copy = 1 if num_labeled_instances == 1 else np.random.randint(1, max(1, num_labeled_instances))
else:
num_copy = num_labeled_instances
else:
num_copy = 0
if num_labeled_instances == 0 or num_copy == 0:
new_unlabeled_data.append(cur_unlabeled_data)
else:
# print("num_labeled_instances, num_copy: ", num_labeled_instances, num_copy)
choice = np.random.choice(num_labeled_instances, num_copy, replace=False)
copied_instances = cur_labeled_instances[choice].to(device=cur_unlabeled_instances.gt_boxes.device)
copied_masks = copied_instances.gt_masks
copied_boxes = copied_instances.gt_boxes
_, labeled_h, labeled_w = cur_labeled_image.shape
_, unlabeled_h, unlabeled_w = cur_unlabeled_image.shape
# rescale the labeled image to align with unlabeled one.
if isinstance(copied_masks, torch.Tensor):
masks_new = copied_masks[None, ...].float()
else:
masks_new = copied_masks.tensor[None, ...].float()
# resize the masks with a random ratio from 0.5 to 1.0
resize_ratio = random.uniform(self.cfg_COPY_PASTE_MIN_RATIO, self.cfg_COPY_PASTE_MAX_RATIO)
w_new = int(resize_ratio * unlabeled_w)
h_new = int(resize_ratio * unlabeled_h)
w_shift = random.randint(0, unlabeled_w - w_new)
h_shift = random.randint(0, unlabeled_h - h_new)
cur_labeled_image_new = F.interpolate(cur_labeled_image[None, ...].float(), size=(h_new, w_new), mode="bilinear", align_corners=False).byte().squeeze(0)
if isinstance(copied_masks, torch.Tensor):
masks_new = F.interpolate(copied_masks[None, ...].float(), size=(h_new, w_new), mode="bilinear", align_corners=False).bool().squeeze(0)
else:
masks_new = F.interpolate(copied_masks.tensor[None, ...].float(), size=(h_new, w_new), mode="bilinear", align_corners=False).bool().squeeze(0)
copied_boxes.scale(1. * unlabeled_w / labeled_w * resize_ratio, 1. * unlabeled_h / labeled_h * resize_ratio)
if isinstance(cur_unlabeled_instances.gt_masks, torch.Tensor):
_, mask_w, mask_h = cur_unlabeled_instances.gt_masks.size()
else:
_, mask_w, mask_h = cur_unlabeled_instances.gt_masks.tensor.size()
masks_new_all = torch.zeros(num_copy, mask_w, mask_h)
image_new_all = torch.zeros_like(cur_unlabeled_image)
image_new_all[:, h_shift:h_shift+h_new, w_shift:w_shift+w_new] += cur_labeled_image_new
masks_new_all[:, h_shift:h_shift+h_new, w_shift:w_shift+w_new] += masks_new
cur_labeled_image = image_new_all.byte() #.squeeze(0)
if isinstance(copied_masks, torch.Tensor):
copied_masks = masks_new_all.bool() #.squeeze(0)
else:
copied_masks.tensor = masks_new_all.bool() #.squeeze(0)
copied_boxes.tensor[:, 0] += h_shift
copied_boxes.tensor[:, 2] += h_shift
copied_boxes.tensor[:, 1] += w_shift
copied_boxes.tensor[:, 3] += w_shift
copied_instances.gt_masks = copied_masks
copied_instances.gt_boxes = copied_boxes
copied_instances._image_size = (unlabeled_h, unlabeled_w)
if len(cur_unlabeled_instances) == 0:
if isinstance(copied_instances.gt_masks, torch.Tensor):
alpha = copied_instances.gt_masks.sum(0) > 0
else:
alpha = copied_instances.gt_masks.tensor.sum(0) > 0
# merge image
alpha = alpha.cpu()
composited_image = (alpha * cur_labeled_image) + (~alpha * cur_unlabeled_image)
cur_unlabeled_data["image"] = composited_image
cur_unlabeled_data["instances"] = copied_instances
else:
# remove the copied object if iou greater than 0.5
if isinstance(copied_masks, torch.Tensor):
iou_matrix = mask_iou_matrix(copied_masks, cur_unlabeled_instances.gt_masks, mode='ioy') # nxN
else:
iou_matrix = mask_iou_matrix(copied_masks.tensor, cur_unlabeled_instances.gt_masks.tensor, mode='ioy') # nxN
keep = iou_matrix.max(1)[0] < 0.5
if keep.sum() == 0:
new_unlabeled_data.append(cur_unlabeled_data)
continue
copied_instances = copied_instances[keep]
# update existing instances in unlabeled image
if isinstance(copied_instances.gt_masks, torch.Tensor):
alpha = copied_instances.gt_masks.sum(0) > 0
cur_unlabeled_instances.gt_masks = ~alpha * cur_unlabeled_instances.gt_masks
areas_unlabeled = cur_unlabeled_instances.gt_masks.sum((1,2))
else:
alpha = copied_instances.gt_masks.tensor.sum(0) > 0
cur_unlabeled_instances.gt_masks.tensor = ~alpha * cur_unlabeled_instances.gt_masks.tensor
areas_unlabeled = cur_unlabeled_instances.gt_masks.tensor.sum((1,2))
# merge image
alpha = alpha.cpu()
composited_image = (alpha * cur_labeled_image) + (~alpha * cur_unlabeled_image)
# merge instances
merged_instances = Instances.cat([cur_unlabeled_instances[areas_unlabeled > 0], copied_instances])
# update boxes
if isinstance(merged_instances.gt_masks, torch.Tensor):
merged_instances.gt_boxes = BitMasks(merged_instances.gt_masks).get_bounding_boxes()
# merged_instances.gt_boxes = merged_instances.gt_masks.get_bounding_boxes()
else:
merged_instances.gt_boxes = merged_instances.gt_masks.get_bounding_boxes()
cur_unlabeled_data["image"] = composited_image
cur_unlabeled_data["instances"] = merged_instances
if self.cfg_VISUALIZE_COPY_PASTE:
visualize_data(cur_unlabeled_data, save_path = 'sample_{}.jpg'.format(np.random.randint(5)))
new_unlabeled_data.append(cur_unlabeled_data)
return new_unlabeled_data
def run_step(self):
"""
Implement the standard training logic described above.
"""
assert self.model.training, "[SimpleTrainer] model was changed to eval mode!"
start = time.perf_counter()
"""
If you want to do something with the data, you can wrap the dataloader.
"""
data = next(self._data_loader_iter)
# print(data, len(data))
if self.use_copy_paste:
# print('using copy paste')
data = self.copy_and_paste(copy.deepcopy(data[::-1]), data)
data_time = time.perf_counter() - start
"""
If you want to do something with the losses, you can wrap the model.
"""
loss_dict = self.model(data)
if isinstance(loss_dict, torch.Tensor):
losses = loss_dict
loss_dict = {"total_loss": loss_dict}
else:
losses = sum(loss_dict.values())
"""
If you need to accumulate gradients or do something similar, you can
wrap the optimizer with your custom `zero_grad()` method.
"""
if not torch.isnan(losses):
self.optimizer.zero_grad()
losses.backward()
else:
print('Nan loss. Skipped.')
self._write_metrics(loss_dict, data_time)
"""
If you need gradient clipping/scaling or other processing, you can
wrap the optimizer with your custom `step()` method. But it is
suboptimal as explained in https://arxiv.org/abs/2006.15704 Sec 3.2.4
"""
self.optimizer.step()
class CustomAMPTrainer(CustomSimpleTrainer):
"""
Like :class:`SimpleTrainer`, but uses PyTorch's native automatic mixed precision
in the training loop.
"""
def __init__(self, model, data_loader, optimizer, cfg=None, grad_scaler=None, use_copy_paste=False,
copy_paste_rate=-1, copy_paste_random_num=None, copy_paste_min_ratio=-1,
copy_paste_max_ratio=-1, visualize_copy_paste=False):
"""
Args:
model, data_loader, optimizer: same as in :class:`SimpleTrainer`.
grad_scaler: torch GradScaler to automatically scale gradients.
"""
unsupported = "AMPTrainer does not support single-process multi-device training!"
if isinstance(model, DistributedDataParallel):
assert not (model.device_ids and len(model.device_ids) > 1), unsupported
assert not isinstance(model, DataParallel), unsupported
super().__init__(model, data_loader, optimizer, cfg=cfg, use_copy_paste=use_copy_paste, \
copy_paste_rate=copy_paste_rate, copy_paste_random_num=copy_paste_random_num, \
copy_paste_min_ratio=copy_paste_min_ratio, copy_paste_max_ratio=copy_paste_max_ratio, \
visualize_copy_paste=visualize_copy_paste)
if grad_scaler is None:
from torch.cuda.amp import GradScaler
grad_scaler = GradScaler()
self.grad_scaler = grad_scaler
def run_step(self):
"""
Implement the AMP training logic.
"""
assert self.model.training, "[AMPTrainer] model was changed to eval mode!"
assert torch.cuda.is_available(), "[AMPTrainer] CUDA is required for AMP training!"
from torch.cuda.amp import autocast
start = time.perf_counter()
data = next(self._data_loader_iter)
if self.use_copy_paste:
# print('using copy paste')
data = self.copy_and_paste(copy.deepcopy(data[::-1]), data)
data_time = time.perf_counter() - start
with autocast():
loss_dict = self.model(data)
if isinstance(loss_dict, torch.Tensor):
losses = loss_dict
loss_dict = {"total_loss": loss_dict}
else:
losses = sum(loss_dict.values())
if not torch.isnan(losses):
self.optimizer.zero_grad()
self.grad_scaler.scale(losses).backward()
else:
print('Nan loss.')
self._write_metrics(loss_dict, data_time)
self.grad_scaler.step(self.optimizer)
self.grad_scaler.update()
def state_dict(self):
ret = super().state_dict()
ret["grad_scaler"] = self.grad_scaler.state_dict()
return ret
def load_state_dict(self, state_dict):
super().load_state_dict(state_dict)
self.grad_scaler.load_state_dict(state_dict["grad_scaler"])
| CutLER-main | cutler/engine/train_loop.py |
# -*- coding: utf-8 -*-
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Modified by XuDong Wang from https://github.com/facebookresearch/detectron2/blob/main/detectron2/engine/defaults.py
"""
This file contains components with some default boilerplate logic user may need
in training / testing. They will not work for everyone, but many users may find them useful.
The behavior of functions/classes in this file is subject to change,
since they are meant to represent the "common default behavior" people need in their projects.
"""
import argparse
import logging
import os
import sys
import weakref
from collections import OrderedDict
from typing import Optional
import torch
from fvcore.nn.precise_bn import get_bn_modules
from omegaconf import OmegaConf
from torch.nn.parallel import DistributedDataParallel
import data.transforms as T
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import CfgNode, LazyConfig
from detectron2.data import (
MetadataCatalog,
)
from data import (
build_detection_test_loader,
build_detection_train_loader,
)
from detectron2.evaluation import (
DatasetEvaluator,
inference_on_dataset,
print_csv_format,
verify_results,
)
from modeling import build_model
from solver import build_lr_scheduler, build_optimizer
from detectron2.utils import comm
from detectron2.utils.collect_env import collect_env_info
from detectron2.utils.env import seed_all_rng
from detectron2.utils.events import CommonMetricPrinter, JSONWriter, TensorboardXWriter
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import setup_logger
from detectron2.engine import hooks
from detectron2.engine import TrainerBase
from .train_loop import CustomAMPTrainer, CustomSimpleTrainer
__all__ = [
"create_ddp_model",
"default_argument_parser",
"default_setup",
"default_writers",
"DefaultPredictor",
"DefaultTrainer",
]
def create_ddp_model(model, *, fp16_compression=False, **kwargs):
"""
Create a DistributedDataParallel model if there are >1 processes.
Args:
model: a torch.nn.Module
fp16_compression: add fp16 compression hooks to the ddp object.
See more at https://pytorch.org/docs/stable/ddp_comm_hooks.html#torch.distributed.algorithms.ddp_comm_hooks.default_hooks.fp16_compress_hook
kwargs: other arguments of :module:`torch.nn.parallel.DistributedDataParallel`.
""" # noqa
if comm.get_world_size() == 1:
return model
if "device_ids" not in kwargs:
kwargs["device_ids"] = [comm.get_local_rank()]
ddp = DistributedDataParallel(model, **kwargs)
if fp16_compression:
from torch.distributed.algorithms.ddp_comm_hooks import default as comm_hooks
ddp.register_comm_hook(state=None, hook=comm_hooks.fp16_compress_hook)
return ddp
def default_argument_parser(epilog=None):
"""
Create a parser with some common arguments used by detectron2 users.
Args:
epilog (str): epilog passed to ArgumentParser describing the usage.
Returns:
argparse.ArgumentParser:
"""
parser = argparse.ArgumentParser(
epilog=epilog
or f"""
Examples:
Run on single machine:
$ {sys.argv[0]} --num-gpus 8 --config-file cfg.yaml
Change some config options:
$ {sys.argv[0]} --config-file cfg.yaml MODEL.WEIGHTS /path/to/weight.pth SOLVER.BASE_LR 0.001
Run on multiple machines:
(machine0)$ {sys.argv[0]} --machine-rank 0 --num-machines 2 --dist-url <URL> [--other-flags]
(machine1)$ {sys.argv[0]} --machine-rank 1 --num-machines 2 --dist-url <URL> [--other-flags]
""",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file")
parser.add_argument(
"--resume",
action="store_true",
help="Whether to attempt to resume from the checkpoint directory. "
"See documentation of `DefaultTrainer.resume_or_load()` for what it means.",
)
parser.add_argument("--eval-only", action="store_true", help="perform evaluation only")
parser.add_argument("--num-gpus", type=int, default=1, help="number of gpus *per machine*")
parser.add_argument("--num-machines", type=int, default=1, help="total number of machines")
parser.add_argument(
"--machine-rank", type=int, default=0, help="the rank of this machine (unique per machine)"
)
parser.add_argument(
"--test-dataset", type=str, default="", help="the dataset used for evaluation"
)
parser.add_argument(
"--train-dataset", type=str, default="", help="the dataset used for training"
)
parser.add_argument("--no-segm", action="store_true", help="perform evaluation on detection only")
# PyTorch still may leave orphan processes in multi-gpu training.
# Therefore we use a deterministic way to obtain port,
# so that users are aware of orphan processes by seeing the port occupied.
port = 2**15 + 2**14 + hash(os.getuid() if sys.platform != "win32" else 1) % 2**14
parser.add_argument(
"--dist-url",
default="tcp://127.0.0.1:{}".format(port),
help="initialization URL for pytorch distributed backend. See "
"https://pytorch.org/docs/stable/distributed.html for details.",
)
parser.add_argument(
"opts",
help="""
Modify config options at the end of the command. For Yacs configs, use
space-separated "PATH.KEY VALUE" pairs.
For python-based LazyConfig, use "path.key=value".
""".strip(),
default=None,
nargs=argparse.REMAINDER,
)
return parser
def _try_get_key(cfg, *keys, default=None):
"""
Try select keys from cfg until the first key that exists. Otherwise return default.
"""
if isinstance(cfg, CfgNode):
cfg = OmegaConf.create(cfg.dump())
for k in keys:
none = object()
p = OmegaConf.select(cfg, k, default=none)
if p is not none:
return p
return default
def _highlight(code, filename):
try:
import pygments
except ImportError:
return code
from pygments.lexers import Python3Lexer, YamlLexer
from pygments.formatters import Terminal256Formatter
lexer = Python3Lexer() if filename.endswith(".py") else YamlLexer()
code = pygments.highlight(code, lexer, Terminal256Formatter(style="monokai"))
return code
def default_setup(cfg, args):
"""
Perform some basic common setups at the beginning of a job, including:
1. Set up the detectron2 logger
2. Log basic information about environment, cmdline arguments, and config
3. Backup the config to the output directory
Args:
cfg (CfgNode or omegaconf.DictConfig): the full config to be used
args (argparse.NameSpace): the command line arguments to be logged
"""
output_dir = _try_get_key(cfg, "OUTPUT_DIR", "output_dir", "train.output_dir")
if comm.is_main_process() and output_dir:
PathManager.mkdirs(output_dir)
rank = comm.get_rank()
setup_logger(output_dir, distributed_rank=rank, name="fvcore")
logger = setup_logger(output_dir, distributed_rank=rank)
logger.info("Rank of current process: {}. World size: {}".format(rank, comm.get_world_size()))
logger.info("Environment info:\n" + collect_env_info())
logger.info("Command line arguments: " + str(args))
if hasattr(args, "config_file") and args.config_file != "":
logger.info(
"Contents of args.config_file={}:\n{}".format(
args.config_file,
_highlight(PathManager.open(args.config_file, "r").read(), args.config_file),
)
)
if comm.is_main_process() and output_dir:
# Note: some of our scripts may expect the existence of
# config.yaml in output directory
path = os.path.join(output_dir, "config.yaml")
if isinstance(cfg, CfgNode):
logger.info("Running with full config:\n{}".format(_highlight(cfg.dump(), ".yaml")))
with PathManager.open(path, "w") as f:
f.write(cfg.dump())
else:
LazyConfig.save(cfg, path)
logger.info("Full config saved to {}".format(path))
# make sure each worker has a different, yet deterministic seed if specified
seed = _try_get_key(cfg, "SEED", "train.seed", default=-1)
seed_all_rng(None if seed < 0 else seed + rank)
# cudnn benchmark has large overhead. It shouldn't be used considering the small size of
# typical validation set.
if not (hasattr(args, "eval_only") and args.eval_only):
torch.backends.cudnn.benchmark = _try_get_key(
cfg, "CUDNN_BENCHMARK", "train.cudnn_benchmark", default=False
)
def default_writers(output_dir: str, max_iter: Optional[int] = None):
"""
Build a list of :class:`EventWriter` to be used.
It now consists of a :class:`CommonMetricPrinter`,
:class:`TensorboardXWriter` and :class:`JSONWriter`.
Args:
output_dir: directory to store JSON metrics and tensorboard events
max_iter: the total number of iterations
Returns:
list[EventWriter]: a list of :class:`EventWriter` objects.
"""
PathManager.mkdirs(output_dir)
return [
# It may not always print what you want to see, since it prints "common" metrics only.
CommonMetricPrinter(max_iter),
JSONWriter(os.path.join(output_dir, "metrics.json")),
TensorboardXWriter(output_dir),
]
class DefaultPredictor:
"""
Create a simple end-to-end predictor with the given config that runs on
single device for a single input image.
Compared to using the model directly, this class does the following additions:
1. Load checkpoint from `cfg.MODEL.WEIGHTS`.
2. Always take BGR image as the input and apply conversion defined by `cfg.INPUT.FORMAT`.
3. Apply resizing defined by `cfg.INPUT.{MIN,MAX}_SIZE_TEST`.
4. Take one input image and produce a single output, instead of a batch.
This is meant for simple demo purposes, so it does the above steps automatically.
This is not meant for benchmarks or running complicated inference logic.
If you'd like to do anything more complicated, please refer to its source code as
examples to build and use the model manually.
Attributes:
metadata (Metadata): the metadata of the underlying dataset, obtained from
cfg.DATASETS.TEST.
Examples:
::
pred = DefaultPredictor(cfg)
inputs = cv2.imread("input.jpg")
outputs = pred(inputs)
"""
def __init__(self, cfg):
self.cfg = cfg.clone() # cfg can be modified by model
self.model = build_model(self.cfg)
self.model.eval()
if len(cfg.DATASETS.TEST):
self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0])
checkpointer = DetectionCheckpointer(self.model)
checkpointer.load(cfg.MODEL.WEIGHTS)
self.aug = T.ResizeShortestEdge(
[cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST
)
self.input_format = cfg.INPUT.FORMAT
assert self.input_format in ["RGB", "BGR"], self.input_format
def __call__(self, original_image):
"""
Args:
original_image (np.ndarray): an image of shape (H, W, C) (in BGR order).
Returns:
predictions (dict):
the output of the model for one image only.
See :doc:`/tutorials/models` for details about the format.
"""
with torch.no_grad(): # https://github.com/sphinx-doc/sphinx/issues/4258
# Apply pre-processing to image.
if self.input_format == "RGB":
# whether the model expects BGR inputs or RGB
original_image = original_image[:, :, ::-1]
height, width = original_image.shape[:2]
image = self.aug.get_transform(original_image).apply_image(original_image)
image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
inputs = {"image": image, "height": height, "width": width}
predictions = self.model([inputs])[0]
return predictions
class DefaultTrainer(TrainerBase):
"""
A trainer with default training logic. It does the following:
1. Create a :class:`SimpleTrainer` using model, optimizer, dataloader
defined by the given config. Create a LR scheduler defined by the config.
2. Load the last checkpoint or `cfg.MODEL.WEIGHTS`, if exists, when
`resume_or_load` is called.
3. Register a few common hooks defined by the config.
It is created to simplify the **standard model training workflow** and reduce code boilerplate
for users who only need the standard training workflow, with standard features.
It means this class makes *many assumptions* about your training logic that
may easily become invalid in a new research. In fact, any assumptions beyond those made in the
:class:`SimpleTrainer` are too much for research.
The code of this class has been annotated about restrictive assumptions it makes.
When they do not work for you, you're encouraged to:
1. Overwrite methods of this class, OR:
2. Use :class:`SimpleTrainer`, which only does minimal SGD training and
nothing else. You can then add your own hooks if needed. OR:
3. Write your own training loop similar to `tools/plain_train_net.py`.
See the :doc:`/tutorials/training` tutorials for more details.
Note that the behavior of this class, like other functions/classes in
this file, is not stable, since it is meant to represent the "common default behavior".
It is only guaranteed to work well with the standard models and training workflow in detectron2.
To obtain more stable behavior, write your own training logic with other public APIs.
Examples:
::
trainer = DefaultTrainer(cfg)
trainer.resume_or_load() # load last checkpoint or MODEL.WEIGHTS
trainer.train()
Attributes:
scheduler:
checkpointer (DetectionCheckpointer):
cfg (CfgNode):
"""
def __init__(self, cfg):
"""
Args:
cfg (CfgNode):
"""
super().__init__()
logger = logging.getLogger("detectron2")
if not logger.isEnabledFor(logging.INFO): # setup_logger is not called for d2
setup_logger()
cfg = DefaultTrainer.auto_scale_workers(cfg, comm.get_world_size())
# Assume these objects must be constructed in this order.
model = self.build_model(cfg)
optimizer = self.build_optimizer(cfg, model)
data_loader = self.build_train_loader(cfg)
model = create_ddp_model(model, broadcast_buffers=False)
if cfg.SOLVER.AMP.ENABLED:
self._trainer = CustomAMPTrainer(model, data_loader, optimizer, cfg=cfg)
else:
self._trainer = CustomSimpleTrainer(model, data_loader, optimizer, cfg=cfg)
self.scheduler = self.build_lr_scheduler(cfg, optimizer)
self.checkpointer = DetectionCheckpointer(
# Assume you want to save checkpoints together with logs/statistics
model,
cfg.OUTPUT_DIR,
trainer=weakref.proxy(self),
)
self.start_iter = 0
self.max_iter = cfg.SOLVER.MAX_ITER
self.cfg = cfg
self.register_hooks(self.build_hooks())
def resume_or_load(self, resume=True):
"""
If `resume==True` and `cfg.OUTPUT_DIR` contains the last checkpoint (defined by
a `last_checkpoint` file), resume from the file. Resuming means loading all
available states (eg. optimizer and scheduler) and update iteration counter
from the checkpoint. ``cfg.MODEL.WEIGHTS`` will not be used.
Otherwise, this is considered as an independent training. The method will load model
weights from the file `cfg.MODEL.WEIGHTS` (but will not load other states) and start
from iteration 0.
Args:
resume (bool): whether to do resume or not
"""
self.checkpointer.resume_or_load(self.cfg.MODEL.WEIGHTS, resume=resume)
if resume and self.checkpointer.has_checkpoint():
# The checkpoint stores the training iteration that just finished, thus we start
# at the next iteration
self.start_iter = self.iter + 1
def build_hooks(self):
"""
Build a list of default hooks, including timing, evaluation,
checkpointing, lr scheduling, precise BN, writing events.
Returns:
list[HookBase]:
"""
cfg = self.cfg.clone()
cfg.defrost()
cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN
ret = [
hooks.IterationTimer(),
hooks.LRScheduler(),
hooks.PreciseBN(
# Run at the same freq as (but before) evaluation.
cfg.TEST.EVAL_PERIOD,
self.model,
# Build a new data loader to not affect training
self.build_train_loader(cfg),
cfg.TEST.PRECISE_BN.NUM_ITER,
)
if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model)
else None,
]
# Do PreciseBN before checkpointer, because it updates the model and need to
# be saved by checkpointer.
# This is not always the best: if checkpointing has a different frequency,
# some checkpoints may have more precise statistics than others.
if comm.is_main_process():
ret.append(hooks.PeriodicCheckpointer(self.checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD))
def test_and_save_results():
self._last_eval_results = self.test(self.cfg, self.model)
return self._last_eval_results
# Do evaluation after checkpointer, because then if it fails,
# we can use the saved checkpoint to debug.
ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results))
if comm.is_main_process():
# Here the default print/log frequency of each writer is used.
# run writers in the end, so that evaluation metrics are written
ret.append(hooks.PeriodicWriter(self.build_writers(), period=20))
return ret
def build_writers(self):
"""
Build a list of writers to be used using :func:`default_writers()`.
If you'd like a different list of writers, you can overwrite it in
your trainer.
Returns:
list[EventWriter]: a list of :class:`EventWriter` objects.
"""
return default_writers(self.cfg.OUTPUT_DIR, self.max_iter)
def train(self):
"""
Run training.
Returns:
OrderedDict of results, if evaluation is enabled. Otherwise None.
"""
super().train(self.start_iter, self.max_iter)
if len(self.cfg.TEST.EXPECTED_RESULTS) and comm.is_main_process():
assert hasattr(
self, "_last_eval_results"
), "No evaluation results obtained during training!"
verify_results(self.cfg, self._last_eval_results)
return self._last_eval_results
def run_step(self):
self._trainer.iter = self.iter
self._trainer.run_step()
def state_dict(self):
ret = super().state_dict()
ret["_trainer"] = self._trainer.state_dict()
return ret
def load_state_dict(self, state_dict):
super().load_state_dict(state_dict)
self._trainer.load_state_dict(state_dict["_trainer"])
@classmethod
def build_model(cls, cfg):
"""
Returns:
torch.nn.Module:
It now calls :func:`detectron2.modeling.build_model`.
Overwrite it if you'd like a different model.
"""
model = build_model(cfg)
logger = logging.getLogger(__name__)
logger.info("Model:\n{}".format(model))
return model
@classmethod
def build_optimizer(cls, cfg, model):
"""
Returns:
torch.optim.Optimizer:
It now calls :func:`detectron2.solver.build_optimizer`.
Overwrite it if you'd like a different optimizer.
"""
return build_optimizer(cfg, model)
@classmethod
def build_lr_scheduler(cls, cfg, optimizer):
"""
It now calls :func:`detectron2.solver.build_lr_scheduler`.
Overwrite it if you'd like a different scheduler.
"""
return build_lr_scheduler(cfg, optimizer)
@classmethod
def build_train_loader(cls, cfg):
"""
Returns:
iterable
It now calls :func:`detectron2.data.build_detection_train_loader`.
Overwrite it if you'd like a different data loader.
"""
return build_detection_train_loader(cfg)
@classmethod
def build_test_loader(cls, cfg, dataset_name):
"""
Returns:
iterable
It now calls :func:`detectron2.data.build_detection_test_loader`.
Overwrite it if you'd like a different data loader.
"""
return build_detection_test_loader(cfg, dataset_name)
@classmethod
def build_evaluator(cls, cfg, dataset_name):
"""
Returns:
DatasetEvaluator or None
It is not implemented by default.
"""
raise NotImplementedError(
"""
If you want DefaultTrainer to automatically run evaluation,
please implement `build_evaluator()` in subclasses (see train_net.py for example).
Alternatively, you can call evaluation functions yourself (see Colab balloon tutorial for example).
"""
)
@classmethod
def test(cls, cfg, model, evaluators=None):
"""
Evaluate the given model. The given model is expected to already contain
weights to evaluate.
Args:
cfg (CfgNode):
model (nn.Module):
evaluators (list[DatasetEvaluator] or None): if None, will call
:meth:`build_evaluator`. Otherwise, must have the same length as
``cfg.DATASETS.TEST``.
Returns:
dict: a dict of result metrics
"""
logger = logging.getLogger(__name__)
if isinstance(evaluators, DatasetEvaluator):
evaluators = [evaluators]
if evaluators is not None:
assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format(
len(cfg.DATASETS.TEST), len(evaluators)
)
results = OrderedDict()
for idx, dataset_name in enumerate(cfg.DATASETS.TEST):
data_loader = cls.build_test_loader(cfg, dataset_name)
# When evaluators are passed in as arguments,
# implicitly assume that evaluators can be created before data_loader.
if evaluators is not None:
evaluator = evaluators[idx]
else:
try:
evaluator = cls.build_evaluator(cfg, dataset_name)
except NotImplementedError:
logger.warn(
"No evaluator found. Use `DefaultTrainer.test(evaluators=)`, "
"or implement its `build_evaluator` method."
)
results[dataset_name] = {}
continue
results_i = inference_on_dataset(model, data_loader, evaluator)
results[dataset_name] = results_i
if comm.is_main_process():
assert isinstance(
results_i, dict
), "Evaluator must return a dict on the main process. Got {} instead.".format(
results_i
)
logger.info("Evaluation results for {} in csv format:".format(dataset_name))
print_csv_format(results_i)
if len(results) == 1:
results = list(results.values())[0]
return results
@staticmethod
def auto_scale_workers(cfg, num_workers: int):
"""
When the config is defined for certain number of workers (according to
``cfg.SOLVER.REFERENCE_WORLD_SIZE``) that's different from the number of
workers currently in use, returns a new cfg where the total batch size
is scaled so that the per-GPU batch size stays the same as the
original ``IMS_PER_BATCH // REFERENCE_WORLD_SIZE``.
Other config options are also scaled accordingly:
* training steps and warmup steps are scaled inverse proportionally.
* learning rate are scaled proportionally, following :paper:`ImageNet in 1h`.
For example, with the original config like the following:
.. code-block:: yaml
IMS_PER_BATCH: 16
BASE_LR: 0.1
REFERENCE_WORLD_SIZE: 8
MAX_ITER: 5000
STEPS: (4000,)
CHECKPOINT_PERIOD: 1000
When this config is used on 16 GPUs instead of the reference number 8,
calling this method will return a new config with:
.. code-block:: yaml
IMS_PER_BATCH: 32
BASE_LR: 0.2
REFERENCE_WORLD_SIZE: 16
MAX_ITER: 2500
STEPS: (2000,)
CHECKPOINT_PERIOD: 500
Note that both the original config and this new config can be trained on 16 GPUs.
It's up to user whether to enable this feature (by setting ``REFERENCE_WORLD_SIZE``).
Returns:
CfgNode: a new config. Same as original if ``cfg.SOLVER.REFERENCE_WORLD_SIZE==0``.
"""
old_world_size = cfg.SOLVER.REFERENCE_WORLD_SIZE
if old_world_size == 0 or old_world_size == num_workers:
return cfg
cfg = cfg.clone()
frozen = cfg.is_frozen()
cfg.defrost()
assert (
cfg.SOLVER.IMS_PER_BATCH % old_world_size == 0
), "Invalid REFERENCE_WORLD_SIZE in config!"
scale = num_workers / old_world_size
bs = cfg.SOLVER.IMS_PER_BATCH = int(round(cfg.SOLVER.IMS_PER_BATCH * scale))
lr = cfg.SOLVER.BASE_LR = cfg.SOLVER.BASE_LR * scale
max_iter = cfg.SOLVER.MAX_ITER = int(round(cfg.SOLVER.MAX_ITER / scale))
warmup_iter = cfg.SOLVER.WARMUP_ITERS = int(round(cfg.SOLVER.WARMUP_ITERS / scale))
cfg.SOLVER.STEPS = tuple(int(round(s / scale)) for s in cfg.SOLVER.STEPS)
cfg.TEST.EVAL_PERIOD = int(round(cfg.TEST.EVAL_PERIOD / scale))
cfg.SOLVER.CHECKPOINT_PERIOD = int(round(cfg.SOLVER.CHECKPOINT_PERIOD / scale))
cfg.SOLVER.REFERENCE_WORLD_SIZE = num_workers # maintain invariant
logger = logging.getLogger(__name__)
logger.info(
f"Auto-scaling the config to batch_size={bs}, learning_rate={lr}, "
f"max_iter={max_iter}, warmup={warmup_iter}."
)
if frozen:
cfg.freeze()
return cfg
# Access basic attributes from the underlying trainer
for _attr in ["model", "data_loader", "optimizer"]:
setattr(
DefaultTrainer,
_attr,
property(
# getter
lambda self, x=_attr: getattr(self._trainer, x),
# setter
lambda self, value, x=_attr: setattr(self._trainer, x, value),
),
)
| CutLER-main | cutler/engine/defaults.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# copied from https://github.com/facebookresearch/detectron2/blob/main/detectron2/utils/colormap.py
"""
An awesome colormap for really neat visualizations.
Copied from Detectron, and removed gray colors.
"""
import numpy as np
import random
__all__ = ["colormap", "random_color", "random_colors"]
# fmt: off
# RGB:
_COLORS = np.array(
[
0.000, 0.447, 0.741,
0.850, 0.325, 0.098,
0.929, 0.694, 0.125,
0.494, 0.184, 0.556,
0.466, 0.674, 0.188,
0.301, 0.745, 0.933,
0.635, 0.078, 0.184,
0.300, 0.300, 0.300,
0.600, 0.600, 0.600,
1.000, 0.000, 0.000,
1.000, 0.500, 0.000,
0.749, 0.749, 0.000,
0.000, 1.000, 0.000,
0.000, 0.000, 1.000,
0.667, 0.000, 1.000,
0.333, 0.333, 0.000,
0.333, 0.667, 0.000,
0.333, 1.000, 0.000,
0.667, 0.333, 0.000,
0.667, 0.667, 0.000,
0.667, 1.000, 0.000,
1.000, 0.333, 0.000,
1.000, 0.667, 0.000,
1.000, 1.000, 0.000,
0.000, 0.333, 0.500,
0.000, 0.667, 0.500,
0.000, 1.000, 0.500,
0.333, 0.000, 0.500,
0.333, 0.333, 0.500,
0.333, 0.667, 0.500,
0.333, 1.000, 0.500,
0.667, 0.000, 0.500,
0.667, 0.333, 0.500,
0.667, 0.667, 0.500,
0.667, 1.000, 0.500,
1.000, 0.000, 0.500,
1.000, 0.333, 0.500,
1.000, 0.667, 0.500,
1.000, 1.000, 0.500,
0.000, 0.333, 1.000,
0.000, 0.667, 1.000,
0.000, 1.000, 1.000,
0.333, 0.000, 1.000,
0.333, 0.333, 1.000,
0.333, 0.667, 1.000,
0.333, 1.000, 1.000,
0.667, 0.000, 1.000,
0.667, 0.333, 1.000,
0.667, 0.667, 1.000,
0.667, 1.000, 1.000,
1.000, 0.000, 1.000,
1.000, 0.333, 1.000,
1.000, 0.667, 1.000,
0.333, 0.000, 0.000,
0.500, 0.000, 0.000,
0.667, 0.000, 0.000,
0.833, 0.000, 0.000,
1.000, 0.000, 0.000,
0.000, 0.167, 0.000,
0.000, 0.333, 0.000,
0.000, 0.500, 0.000,
0.000, 0.667, 0.000,
0.000, 0.833, 0.000,
0.000, 1.000, 0.000,
0.000, 0.000, 0.167,
0.000, 0.000, 0.333,
0.000, 0.000, 0.500,
0.000, 0.000, 0.667,
0.000, 0.000, 0.833,
0.000, 0.000, 1.000,
0.000, 0.000, 0.000,
0.143, 0.143, 0.143,
0.857, 0.857, 0.857,
1.000, 1.000, 1.000
]
).astype(np.float32).reshape(-1, 3)
# fmt: on
def colormap(rgb=False, maximum=255):
"""
Args:
rgb (bool): whether to return RGB colors or BGR colors.
maximum (int): either 255 or 1
Returns:
ndarray: a float32 array of Nx3 colors, in range [0, 255] or [0, 1]
"""
assert maximum in [255, 1], maximum
c = _COLORS * maximum
if not rgb:
c = c[:, ::-1]
return c
def random_color(rgb=False, maximum=255):
"""
Args:
rgb (bool): whether to return RGB colors or BGR colors.
maximum (int): either 255 or 1
Returns:
ndarray: a vector of 3 numbers
"""
idx = np.random.randint(0, len(_COLORS))
ret = _COLORS[idx] * maximum
if not rgb:
ret = ret[::-1]
return ret
def random_colors(N, rgb=False, maximum=255):
"""
Args:
N (int): number of unique colors needed
rgb (bool): whether to return RGB colors or BGR colors.
maximum (int): either 255 or 1
Returns:
ndarray: a list of random_color
"""
indices = random.sample(range(len(_COLORS)), N)
ret = [_COLORS[i] * maximum for i in indices]
if not rgb:
ret = [x[::-1] for x in ret]
return ret
if __name__ == "__main__":
import cv2
size = 100
H, W = 10, 10
canvas = np.random.rand(H * size, W * size, 3).astype("float32")
for h in range(H):
for w in range(W):
idx = h * W + w
if idx >= len(_COLORS):
break
canvas[h * size : (h + 1) * size, w * size : (w + 1) * size] = _COLORS[idx]
cv2.imshow("a", canvas)
cv2.waitKey(0) | CutLER-main | maskcut/colormap.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# modfied by Xudong Wang based on https://github.com/lucasb-eyer/pydensecrf/blob/master/pydensecrf/tests/test_dcrf.py and third_party/TokenCut
import numpy as np
import pydensecrf.densecrf as dcrf
import pydensecrf.utils as utils
import torch
import torch.nn.functional as F
import torchvision.transforms.functional as VF
MAX_ITER = 10
POS_W = 7
POS_XY_STD = 3
Bi_W = 10
Bi_XY_STD = 50
Bi_RGB_STD = 5
def densecrf(image, mask):
h, w = mask.shape
mask = mask.reshape(1, h, w)
fg = mask.astype(float)
bg = 1 - fg
output_logits = torch.from_numpy(np.concatenate((bg,fg), axis=0))
H, W = image.shape[:2]
image = np.ascontiguousarray(image)
output_logits = F.interpolate(output_logits.unsqueeze(0), size=(H, W), mode="bilinear").squeeze()
output_probs = F.softmax(output_logits, dim=0).cpu().numpy()
c = output_probs.shape[0]
h = output_probs.shape[1]
w = output_probs.shape[2]
U = utils.unary_from_softmax(output_probs)
U = np.ascontiguousarray(U)
d = dcrf.DenseCRF2D(w, h, c)
d.setUnaryEnergy(U)
d.addPairwiseGaussian(sxy=POS_XY_STD, compat=POS_W)
d.addPairwiseBilateral(sxy=Bi_XY_STD, srgb=Bi_RGB_STD, rgbim=image, compat=Bi_W)
Q = d.inference(MAX_ITER)
Q = np.array(Q).reshape((c, h, w))
MAP = np.argmax(Q, axis=0).reshape((h,w)).astype(np.float32)
return MAP
| CutLER-main | maskcut/crf.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#!/usr/bin/env python3
import os
import sys
sys.path.append('../')
import argparse
import numpy as np
from tqdm import tqdm
import re
import datetime
import PIL
import PIL.Image as Image
import torch
import torch.nn.functional as F
from torchvision import transforms
from itertools import groupby
from pycocotools import mask
import pycocotools.mask as mask_util
from scipy import ndimage
from scipy.linalg import eigh
import json
import dino # model
from third_party.TokenCut.unsupervised_saliency_detection import utils, metric
from third_party.TokenCut.unsupervised_saliency_detection.object_discovery import detect_box
# Modified by XuDong Wang from third_party/TokenCut
# bilateral_solver codes are modfied based on https://github.com/poolio/bilateral_solver/blob/master/notebooks/bilateral_solver.ipynb
# from third_party.TokenCut.unsupervised_saliency_detection.bilateral_solver import BilateralSolver, BilateralGrid
# crf codes are are modfied based on https://github.com/lucasb-eyer/pydensecrf/blob/master/pydensecrf/tests/test_dcrf.py
from crf import densecrf
# Image transformation applied to all images
ToTensor = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(
(0.485, 0.456, 0.406),
(0.229, 0.224, 0.225)),])
def get_affinity_matrix(feats, tau, eps=1e-5):
# get affinity matrix via measuring patch-wise cosine similarity
feats = F.normalize(feats, p=2, dim=0)
A = (feats.transpose(0,1) @ feats).cpu().numpy()
# convert the affinity matrix to a binary one.
A = A > tau
A = np.where(A.astype(float) == 0, eps, A)
d_i = np.sum(A, axis=1)
D = np.diag(d_i)
return A, D
def second_smallest_eigenvector(A, D):
# get the second smallest eigenvector from affinity matrix
_, eigenvectors = eigh(D-A, D, subset_by_index=[1,2])
eigenvec = np.copy(eigenvectors[:, 0])
second_smallest_vec = eigenvectors[:, 0]
return eigenvec, second_smallest_vec
def get_salient_areas(second_smallest_vec):
# get the area corresponding to salient objects.
avg = np.sum(second_smallest_vec) / len(second_smallest_vec)
bipartition = second_smallest_vec > avg
return bipartition
def check_num_fg_corners(bipartition, dims):
# check number of corners belonging to the foreground
bipartition_ = bipartition.reshape(dims)
top_l, top_r, bottom_l, bottom_r = bipartition_[0][0], bipartition_[0][-1], bipartition_[-1][0], bipartition_[-1][-1]
nc = int(top_l) + int(top_r) + int(bottom_l) + int(bottom_r)
return nc
def get_masked_affinity_matrix(painting, feats, mask, ps):
# mask out affinity matrix based on the painting matrix
dim, num_patch = feats.size()[0], feats.size()[1]
painting = painting + mask.unsqueeze(0)
painting[painting > 0] = 1
painting[painting <= 0] = 0
feats = feats.clone().view(dim, ps, ps)
feats = ((1 - painting) * feats).view(dim, num_patch)
return feats, painting
def maskcut_forward(feats, dims, scales, init_image_size, tau=0, N=3):
"""
Implementation of MaskCut.
Inputs
feats: the pixel/patche features of an image
dims: dimension of the map from which the features are used
scales: from image to map scale
init_image_size: size of the image
tau: thresold for graph construction
N: number of pseudo-masks per image.
"""
bipartitions = []
eigvecs = []
for i in range(N):
if i == 0:
painting = torch.from_numpy(np.zeros(dims)).cuda()
else:
feats, painting = get_masked_affinity_matrix(painting, feats, current_mask, ps)
# construct the affinity matrix
A, D = get_affinity_matrix(feats, tau)
# get the second smallest eigenvector
eigenvec, second_smallest_vec = second_smallest_eigenvector(A, D)
# get salient area
bipartition = get_salient_areas(second_smallest_vec)
# check if we should reverse the partition based on:
# 1) peak of the 2nd smallest eigvec 2) object centric bias
seed = np.argmax(np.abs(second_smallest_vec))
nc = check_num_fg_corners(bipartition, dims)
if nc >= 3:
reverse = True
else:
reverse = bipartition[seed] != 1
if reverse:
# reverse bipartition, eigenvector and get new seed
eigenvec = eigenvec * -1
bipartition = np.logical_not(bipartition)
seed = np.argmax(eigenvec)
else:
seed = np.argmax(second_smallest_vec)
# get pxiels corresponding to the seed
bipartition = bipartition.reshape(dims).astype(float)
_, _, _, cc = detect_box(bipartition, seed, dims, scales=scales, initial_im_size=init_image_size) ## We only extract the principal object BBox
pseudo_mask = np.zeros(dims)
pseudo_mask[cc[0],cc[1]] = 1
pseudo_mask = torch.from_numpy(pseudo_mask).to('cuda')
ps = pseudo_mask.shape[0]
# check if the extra mask is heavily overlapped with the previous one or is too small.
if i >= 1:
ratio = torch.sum(pseudo_mask) / pseudo_mask.size()[0] / pseudo_mask.size()[1]
if metric.IoU(current_mask, pseudo_mask) > 0.5 or ratio <= 0.01:
pseudo_mask = np.zeros(dims)
pseudo_mask = torch.from_numpy(pseudo_mask).to('cuda')
current_mask = pseudo_mask
# mask out foreground areas in previous stages
masked_out = 0 if len(bipartitions) == 0 else np.sum(bipartitions, axis=0)
bipartition = F.interpolate(pseudo_mask.unsqueeze(0).unsqueeze(0), size=init_image_size, mode='nearest').squeeze()
bipartition_masked = bipartition.cpu().numpy() - masked_out
bipartition_masked[bipartition_masked <= 0] = 0
bipartitions.append(bipartition_masked)
# unsample the eigenvec
eigvec = second_smallest_vec.reshape(dims)
eigvec = torch.from_numpy(eigvec).to('cuda')
eigvec = F.interpolate(eigvec.unsqueeze(0).unsqueeze(0), size=init_image_size, mode='nearest').squeeze()
eigvecs.append(eigvec.cpu().numpy())
return seed, bipartitions, eigvecs
def maskcut(img_path, backbone,patch_size, tau, N=1, fixed_size=480) :
I = Image.open(img_path).convert('RGB')
bipartitions, eigvecs = [], []
I_new = I.resize((int(fixed_size), int(fixed_size)), PIL.Image.LANCZOS)
I_resize, w, h, feat_w, feat_h = utils.resize_pil(I_new, patch_size)
tensor = ToTensor(I_resize).unsqueeze(0).cuda()
feat = backbone(tensor)[0]
_, bipartition, eigvec = maskcut_forward(feat, [feat_h, feat_w], [patch_size, patch_size], [h,w], tau, N=N)
bipartitions += bipartition
eigvecs += eigvec
return bipartitions, eigvecs, I_new
def resize_binary_mask(array, new_size):
image = Image.fromarray(array.astype(np.uint8)*255)
image = image.resize(new_size)
return np.asarray(image).astype(np.bool_)
def close_contour(contour):
if not np.array_equal(contour[0], contour[-1]):
contour = np.vstack((contour, contour[0]))
return contour
def create_image_info(image_id, file_name, image_size,
date_captured=datetime.datetime.utcnow().isoformat(' '),
license_id=1, coco_url="", flickr_url=""):
"""Return image_info in COCO style
Args:
image_id: the image ID
file_name: the file name of each image
image_size: image size in the format of (width, height)
date_captured: the date this image info is created
license: license of this image
coco_url: url to COCO images if there is any
flickr_url: url to flickr if there is any
"""
image_info = {
"id": image_id,
"file_name": file_name,
"width": image_size[0],
"height": image_size[1],
"date_captured": date_captured,
"license": license_id,
"coco_url": coco_url,
"flickr_url": flickr_url
}
return image_info
def create_annotation_info(annotation_id, image_id, category_info, binary_mask,
image_size=None, bounding_box=None):
"""Return annotation info in COCO style
Args:
annotation_id: the annotation ID
image_id: the image ID
category_info: the information on categories
binary_mask: a 2D binary numpy array where '1's represent the object
file_name: the file name of each image
image_size: image size in the format of (width, height)
bounding_box: the bounding box for detection task. If bounding_box is not provided,
we will generate one according to the binary mask.
"""
upper = np.max(binary_mask)
lower = np.min(binary_mask)
thresh = upper / 2.0
binary_mask[binary_mask > thresh] = upper
binary_mask[binary_mask <= thresh] = lower
if image_size is not None:
binary_mask = resize_binary_mask(binary_mask.astype(np.uint8), image_size)
binary_mask_encoded = mask.encode(np.asfortranarray(binary_mask.astype(np.uint8)))
area = mask.area(binary_mask_encoded)
if area < 1:
return None
if bounding_box is None:
bounding_box = mask.toBbox(binary_mask_encoded)
rle = mask_util.encode(np.array(binary_mask[...,None], order="F", dtype="uint8"))[0]
rle['counts'] = rle['counts'].decode('ascii')
segmentation = rle
annotation_info = {
"id": annotation_id,
"image_id": image_id,
"category_id": category_info["id"],
"iscrowd": 0,
"area": area.tolist(),
"bbox": bounding_box.tolist(),
"segmentation": segmentation,
"width": binary_mask.shape[1],
"height": binary_mask.shape[0],
}
return annotation_info
# necessay info used for coco style annotations
INFO = {
"description": "ImageNet-1K: pseudo-masks with MaskCut",
"url": "https://github.com/facebookresearch/CutLER",
"version": "1.0",
"year": 2022,
"contributor": "Xudong Wang",
"date_created": datetime.datetime.utcnow().isoformat(' ')
}
LICENSES = [
{
"id": 1,
"name": "Apache License",
"url": "https://github.com/facebookresearch/CutLER/blob/main/LICENSE"
}
]
# only one class, i.e. foreground
CATEGORIES = [
{
'id': 1,
'name': 'fg',
'supercategory': 'fg',
},
]
convert = lambda text: int(text) if text.isdigit() else text.lower()
natrual_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
output = {
"info": INFO,
"licenses": LICENSES,
"categories": CATEGORIES,
"images": [],
"annotations": []}
category_info = {
"is_crowd": 0,
"id": 1
}
def get_args_parser():
parser = argparse.ArgumentParser('MaskCut script', add_help=False)
# default arguments
parser.add_argument('--out-dir', type=str, help='output directory')
parser.add_argument('--vit-arch', type=str, default='small', choices=['base', 'small'], help='which architecture')
parser.add_argument('--vit-feat', type=str, default='k', choices=['k', 'q', 'v', 'kqv'], help='which features')
parser.add_argument('--patch-size', type=int, default=16, choices=[16, 8], help='patch size')
parser.add_argument('--nb-vis', type=int, default=20, choices=[1, 200], help='nb of visualization')
parser.add_argument('--img-path', type=str, default=None, help='single image visualization')
# additional arguments
parser.add_argument('--dataset-path', type=str, default="imagenet/train/", help='path to the dataset')
parser.add_argument('--tau', type=float, default=0.2, help='threshold used for producing binary graph')
parser.add_argument('--num-folder-per-job', type=int, default=1, help='the number of folders each job processes')
parser.add_argument('--job-index', type=int, default=0, help='the index of the job (for imagenet: in the range of 0 to 1000/args.num_folder_per_job-1)')
parser.add_argument('--fixed_size', type=int, default=480, help='rescale the input images to a fixed size')
parser.add_argument('--pretrain_path', type=str, default=None, help='path to pretrained model')
parser.add_argument('--N', type=int, default=3, help='the maximum number of pseudo-masks per image')
return parser
## feature net
def main(args):
if args.pretrain_path is not None:
url = args.pretrain_path
if args.vit_arch == 'base' and args.patch_size == 8:
if args.pretrain_path is None:
url = "https://dl.fbaipublicfiles.com/dino/dino_vitbase8_pretrain/dino_vitbase8_pretrain.pth"
feat_dim = 768
elif args.vit_arch == 'small' and args.patch_size == 8:
if args.pretrain_path is None:
url = "https://dl.fbaipublicfiles.com/dino/dino_deitsmall8_300ep_pretrain/dino_deitsmall8_300ep_pretrain.pth"
feat_dim = 384
backbone = dino.ViTFeat(url, feat_dim, args.vit_arch, args.vit_feat, args.patch_size)
msg = 'Load {} pre-trained feature...'.format(args.vit_arch)
print (msg)
backbone.eval()
backbone.cuda()
img_folders = os.listdir(args.dataset_path)
if args.out_dir is not None and not os.path.exists(args.out_dir) :
os.mkdir(args.out_dir)
start_idx = max(args.job_index*args.num_folder_per_job, 0)
end_idx = min((args.job_index+1)*args.num_folder_per_job, len(img_folders))
image_id, segmentation_id = 1, 1
image_names = []
for img_folder in img_folders[start_idx:end_idx]:
args.img_dir = os.path.join(args.dataset_path, img_folder)
img_list = sorted(os.listdir(args.img_dir))
for img_name in tqdm(img_list) :
if image_id >= 20:
break
# get image path
img_path = os.path.join(args.img_dir, img_name)
# get bipartitions for each image
try:
bipartitions, _, I_new = maskcut(img_path, backbone, args.patch_size, args.tau, \
N=args.N, fixed_size=args.fixed_size)
except:
print(f'Skipping {img_name}')
continue
I = Image.open(img_path).convert('RGB')
width, height = I.size
for idx, bipartition in enumerate(bipartitions):
# post-process pesudo-masks with CRF
pseudo_mask = densecrf(np.array(I_new), bipartition)
pseudo_mask = ndimage.binary_fill_holes(pseudo_mask>=0.5)
# filter out the mask that have a very different pseudo-mask after the CRF
mask1 = torch.from_numpy(bipartition).cuda()
mask2 = torch.from_numpy(pseudo_mask).cuda()
if metric.IoU(mask1, mask2) < 0.5:
pseudo_mask = pseudo_mask * -1
# construct binary pseudo-masks
pseudo_mask[pseudo_mask < 0] = 0
pseudo_mask = Image.fromarray(np.uint8(pseudo_mask*255))
pseudo_mask = np.asarray(pseudo_mask.resize((width, height)))
# create coco-style image info
if img_name not in image_names:
image_info = create_image_info(
image_id, "{}/{}".format(img_folder, img_name), (height, width, 3))
output["images"].append(image_info)
image_names.append(img_name)
# create coco-style annotation info
annotation_info = create_annotation_info(
segmentation_id, image_id, category_info, pseudo_mask.astype(np.uint8), None)
if annotation_info is not None:
output["annotations"].append(annotation_info)
segmentation_id += 1
image_id += 1
# dump annotations
if len(img_folders) == args.num_folder_per_job and args.job_index == 0:
json_name = '{}/imagenet_train_fixsize{}_tau{}_N{}.json'.format(args.out_dir, args.fixed_size, args.tau, args.N)
else:
json_name = '{}/imagenet_train_fixsize{}_tau{}_N{}_{}_{}.json'.format(args.out_dir, args.fixed_size, args.tau, args.N, start_idx, end_idx)
with open(json_name, 'w') as output_json_file:
json.dump(output, output_json_file)
print(f'dumping {json_name}')
print("Done: {} images; {} anns.".format(len(output['images']), len(output['annotations'])))
| CutLER-main | maskcut/maskcut_with_submitit.py |
"""
download pretrained weights to ./weights
wget https://dl.fbaipublicfiles.com/dino/dino_vitbase8_pretrain/dino_vitbase8_pretrain.pth
wget https://dl.fbaipublicfiles.com/dino/dino_deitsmall8_300ep_pretrain/dino_deitsmall8_300ep_pretrain.pth
"""
import sys
sys.path.append("maskcut")
import numpy as np
import PIL.Image as Image
import torch
from scipy import ndimage
from colormap import random_color
import dino
from third_party.TokenCut.unsupervised_saliency_detection import metric
from crf import densecrf
from maskcut import maskcut
from cog import BasePredictor, Input, Path
class Predictor(BasePredictor):
def setup(self):
"""Load the model into memory to make running multiple predictions efficient"""
# DINO pre-trained model
vit_features = "k"
self.patch_size = 8
# adapted dino.ViTFeat to load from local pretrained_path
self.backbone_base = dino.ViTFeat(
"weights/dino_vitbase8_pretrain.pth",
768,
"base",
vit_features,
self.patch_size,
)
self.backbone_small = dino.ViTFeat(
"weights/dino_deitsmall8_300ep_pretrain.pth",
384,
"small",
vit_features,
self.patch_size,
)
self.backbone_base.eval()
self.backbone_base.cuda()
self.backbone_small.eval()
self.backbone_small.cuda()
def predict(
self,
image: Path = Input(
description="Input image",
),
model: str = Input(
description="Choose the model architecture",
default="base",
choices=["small", "base"]
),
n_pseudo_masks: int = Input(
description="The maximum number of pseudo-masks per image",
default=3,
),
tau: float = Input(
description="Threshold used for producing binary graph",
default=0.15,
),
) -> Path:
"""Run a single prediction on the model"""
backbone = self.backbone_base if model == "base" else self.backbone_small
# MaskCut hyperparameters
fixed_size = 480
# get pesudo-masks with MaskCut
bipartitions, _, I_new = maskcut(
str(image),
backbone,
self.patch_size,
tau,
N=n_pseudo_masks,
fixed_size=fixed_size,
cpu=False,
)
I = Image.open(str(image)).convert("RGB")
width, height = I.size
pseudo_mask_list = []
for idx, bipartition in enumerate(bipartitions):
# post-process pesudo-masks with CRF
pseudo_mask = densecrf(np.array(I_new), bipartition)
pseudo_mask = ndimage.binary_fill_holes(pseudo_mask >= 0.5)
# filter out the mask that have a very different pseudo-mask after the CRF
mask1 = torch.from_numpy(bipartition).cuda()
mask2 = torch.from_numpy(pseudo_mask).cuda()
if metric.IoU(mask1, mask2) < 0.5:
pseudo_mask = pseudo_mask * -1
# construct binary pseudo-masks
pseudo_mask[pseudo_mask < 0] = 0
pseudo_mask = Image.fromarray(np.uint8(pseudo_mask * 255))
pseudo_mask = np.asarray(pseudo_mask.resize((width, height)))
pseudo_mask = pseudo_mask.astype(np.uint8)
upper = np.max(pseudo_mask)
lower = np.min(pseudo_mask)
thresh = upper / 2.0
pseudo_mask[pseudo_mask > thresh] = upper
pseudo_mask[pseudo_mask <= thresh] = lower
pseudo_mask_list.append(pseudo_mask)
out = np.array(I)
for pseudo_mask in pseudo_mask_list:
out = vis_mask(out, pseudo_mask, random_color(rgb=True))
output_path = f"/tmp/out.png"
out.save(str(output_path))
return Path(output_path)
def vis_mask(input, mask, mask_color):
fg = mask > 0.5
rgb = np.copy(input)
rgb[fg] = (rgb[fg] * 0.3 + np.array(mask_color) * 0.7).astype(np.uint8)
return Image.fromarray(rgb)
| CutLER-main | maskcut/predict.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
"""
A script to run multinode training with submitit.
"""
import sys
sys.path.append('./')
sys.path.append('./MaskCut')
sys.path.append('./third_party')
import argparse
import os
import uuid
from pathlib import Path
import maskcut_with_submitit as main_func
import submitit
import copy
def parse_args():
parent_parser = main_func.get_args_parser()
parser = argparse.ArgumentParser("Submitit for MaskCut", parents=[parent_parser])
parser.add_argument("--ngpus", default=1, type=int, help="Number of gpus to request on each node")
parser.add_argument("--nodes", default=1, type=int, help="Number of nodes to request")
parser.add_argument("--timeout", default=1400, type=int, help="Duration of the job")
parser.add_argument("--job_dir", default="", type=str, help="Job dir. Leave empty for automatic.")
parser.add_argument("--partition", default="learnfair", type=str, help="Partition where to submit")
parser.add_argument("--use_volta32", action='store_true', help="Big models? Use this")
parser.add_argument('--comment', default="", type=str,
help='Comment to pass to scheduler, e.g. priority message')
# Removed the followings if the main file has it already
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
parser.add_argument('--output_dir', default='',
help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--gpu', default=0, type=int)
parser.add_argument('--rank', default=0, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--tolerance', default=1, type=int, help='tolerance for finding contours')
return parser.parse_args()
def get_shared_folder() -> Path:
user = os.getenv("USER")
if Path("/checkpoint/").is_dir():
p = Path(f"/checkpoint/{user}/experiments/maskcut/")
p.mkdir(exist_ok=True)
return p
raise RuntimeError("No shared folder available")
def get_init_file():
# Init file must not exist, but it's parent dir must exist.
os.makedirs(str(get_shared_folder()), exist_ok=True)
init_file = get_shared_folder() / f"{uuid.uuid4().hex}_init"
if init_file.exists():
os.remove(str(init_file))
return init_file
# Using a for loop for getting the array job and submit all jobs in one single array
class Trainer(object):
def __init__(self, args):
self.args = args
def __call__(self):
self._setup_gpu_args()
main_func.main(self.args)
def checkpoint(self):
import os
import submitit
from pathlib import Path
self.args.dist_url = get_init_file().as_uri()
checkpoint_file = os.path.join(self.args.output_dir, "checkpoint.pth")
if os.path.exists(checkpoint_file):
self.args.resume = checkpoint_file
print("Requeuing ", self.args)
empty_trainer = type(self)(self.args)
return submitit.helpers.DelayedSubmission(empty_trainer)
def _setup_gpu_args(self):
import submitit
from pathlib import Path
job_env = submitit.JobEnvironment()
self.args.output_dir = Path(str(self.args.output_dir).replace("%j", str(job_env.job_id)))
self.args.gpu = job_env.local_rank
self.args.rank = job_env.global_rank
self.args.world_size = job_env.num_tasks
print(f"Process group: {job_env.num_tasks} tasks, rank: {job_env.global_rank}")
def main():
args = parse_args()
if args.job_dir == "":
args.job_dir = get_shared_folder() / "%j"
# Note that the folder will depend on the job_id, to easily track experiments
executor = submitit.AutoExecutor(folder=args.job_dir, slurm_max_num_timeout=30)
num_gpus_per_node = args.ngpus
nodes = args.nodes
timeout_min = args.timeout
partition = args.partition
kwargs = {}
if args.use_volta32:
kwargs['slurm_constraint'] = 'volta32gb'
if args.comment:
kwargs['slurm_comment'] = args.comment
executor.update_parameters(
mem_gb=40 * num_gpus_per_node, # 40
gpus_per_node=num_gpus_per_node,
tasks_per_node=num_gpus_per_node, # one task per GPU
cpus_per_task=8, # default 8
nodes=nodes,
timeout_min=timeout_min, # max is 60 * 72
# Below are cluster dependent parameters
slurm_partition=partition,
slurm_signal_delay_s=120,
**kwargs
)
executor.update_parameters(name="MaskCut")
# Since it is often necessary to submit over 100 jobs simutanously,
# using an array to submit these jobs is a more efficient way.
args.dist_url = get_init_file().as_uri()
args.output_dir = args.job_dir
print(args.output_dir)
# list_folders = list(range(0, 500))
end_idx = (1000 - args.job_index) // args.num_folder_per_job + 1
list_folders = list(range(args.job_index, end_idx))
jobs = []
args_list = []
for folder_index in list_folders:
args_copy = copy.deepcopy(args)
args_copy.job_index = folder_index
args_list.append(args_copy)
with executor.batch():
for args in args_list:
trainer = Trainer(args)
job = executor.submit(trainer)
jobs.append(job)
for job in jobs:
print("Submitted job_id:", job.job_id)
if __name__ == "__main__":
main() | CutLER-main | maskcut/run_with_submitit_maskcut_array.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# merge all ImageNet annotation files as a single one.
import os
import json
import argparse
if __name__ == "__main__":
# load model arguments
parser = argparse.ArgumentParser(description='Merge json files')
parser.add_argument('--base-dir', type=str,
default='annotations/',
help='Dir to the generated annotation files with MaskCut')
parser.add_argument('--save-path', type=str, default="imagenet_train_fixsize480_tau0.15_N3.json",
help='Path to save the merged annotation file')
# following arguments should be consistent with maskcut.py or maskcut_with_submitit.py (if use submitit)
parser.add_argument('--num-folder-per-job', type=int, default=1,
help='Number of folders per json file')
parser.add_argument('--fixed-size', type=int, default=480,
help='rescale the input images to a fixed size')
parser.add_argument('--tau', type=float, default=0.15, help='threshold used for producing binary graph')
parser.add_argument('--N', type=int, default=3, help='the maximum number of pseudo-masks per image')
args = parser.parse_args()
base_name = 'imagenet_train_fixsize{}_tau{}_N{}'.format(args.fixed_size, args.tau, args.N)
start_idx = 0
every_k = args.num_folder_per_job
missed_folders = []
tobe_merged_ann_dicts = []
# check if pseudo-masks for all 1000 ImageNet-1K folders are avaliable.
while start_idx < 1000:
end_idx = start_idx + every_k
filename = "{}_{}_{}.json".format(base_name, start_idx, end_idx)
tobe_merged = os.path.join(args.base_dir, filename)
if not os.path.isfile(tobe_merged):
end_idx = start_idx + 1
tobe_merged_ = "{}_{}_{}.json".format(base_name, start_idx, end_idx)
if not os.path.isfile(tobe_merged_):
missed_folders.append(start_idx)
start_idx += 1
continue
else:
tobe_merged = tobe_merged_
start_idx += 1
else:
start_idx += every_k
tobe_merged_ann_dict = json.load(open(tobe_merged))
tobe_merged_ann_dicts.append(tobe_merged_ann_dict)
print("Warning: these folders are not found: ", missed_folders)
# filter out repeated image info
for idx, ann_dict in enumerate(tobe_merged_ann_dicts):
images = []
images_ids = []
for image in ann_dict['images']:
if image['id'] in images_ids:
continue
else:
images.append(image)
images_ids.append(image['id'])
ann_dict['images'] = images
# re-generate image_id and segment_id, and combine annotation info and image info
# from all annotation files
base_ann_dict = tobe_merged_ann_dicts[0]
image_id = base_ann_dict['images'][-1]['id'] + 1
segment_id = base_ann_dict['annotations'][-1]['id'] + 1
segment_id_list = [ann['id'] for ann in base_ann_dict['annotations']]
for tobe_merged_ann_dict in tobe_merged_ann_dicts[1:]:
file_name_and_id = {}
for i, image in enumerate(tobe_merged_ann_dict['images']):
file_name_and_id[str(image['id'])] = image_id
image['id'] = image_id
base_ann_dict['images'].append(image)
image_id = image_id + 1
for i, annotation_info in enumerate(tobe_merged_ann_dict['annotations']):
annotation_info["image_id"] = file_name_and_id[str(annotation_info["image_id"])]
annotation_info["id"] = segment_id
annotation_info["iscrowd"] = 0
segment_id_list.append(segment_id)
base_ann_dict['annotations'].append(annotation_info)
segment_id = segment_id + 1
segment_id = 1
for ann in base_ann_dict['annotations']:
ann["id"] = segment_id
segment_id += 1
# save the final json file.
anns = [ann['id'] for ann in base_ann_dict['annotations']]
anns_image_id = [ann['image_id'] for ann in base_ann_dict['annotations']]
json.dump(base_ann_dict, open(args.save_path, 'w'))
print("Done: {} images; {} anns.".format(len(base_ann_dict['images']), len(base_ann_dict['annotations'])))
| CutLER-main | maskcut/merge_jsons.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
import os
import sys
sys.path.append('../')
import argparse
import numpy as np
from tqdm import tqdm
import re
import datetime
import PIL
import PIL.Image as Image
import torch
import torch.nn.functional as F
from torchvision import transforms
from pycocotools import mask
import pycocotools.mask as mask_util
from scipy import ndimage
from scipy.linalg import eigh
import json
import dino
# modfied by Xudong Wang based on third_party/TokenCut
from third_party.TokenCut.unsupervised_saliency_detection import utils, metric
from third_party.TokenCut.unsupervised_saliency_detection.object_discovery import detect_box
# bilateral_solver codes are modfied based on https://github.com/poolio/bilateral_solver/blob/master/notebooks/bilateral_solver.ipynb
# from third_party.TokenCut.unsupervised_saliency_detection.bilateral_solver import BilateralSolver, BilateralGrid
# crf codes are are modfied based on https://github.com/lucasb-eyer/pydensecrf/blob/master/pydensecrf/tests/test_dcrf.py
from crf import densecrf
# Image transformation applied to all images
ToTensor = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(
(0.485, 0.456, 0.406),
(0.229, 0.224, 0.225)),])
def get_affinity_matrix(feats, tau, eps=1e-5):
# get affinity matrix via measuring patch-wise cosine similarity
feats = F.normalize(feats, p=2, dim=0)
A = (feats.transpose(0,1) @ feats).cpu().numpy()
# convert the affinity matrix to a binary one.
A = A > tau
A = np.where(A.astype(float) == 0, eps, A)
d_i = np.sum(A, axis=1)
D = np.diag(d_i)
return A, D
def second_smallest_eigenvector(A, D):
# get the second smallest eigenvector from affinity matrix
_, eigenvectors = eigh(D-A, D, subset_by_index=[1,2])
eigenvec = np.copy(eigenvectors[:, 0])
second_smallest_vec = eigenvectors[:, 0]
return eigenvec, second_smallest_vec
def get_salient_areas(second_smallest_vec):
# get the area corresponding to salient objects.
avg = np.sum(second_smallest_vec) / len(second_smallest_vec)
bipartition = second_smallest_vec > avg
return bipartition
def check_num_fg_corners(bipartition, dims):
# check number of corners belonging to the foreground
bipartition_ = bipartition.reshape(dims)
top_l, top_r, bottom_l, bottom_r = bipartition_[0][0], bipartition_[0][-1], bipartition_[-1][0], bipartition_[-1][-1]
nc = int(top_l) + int(top_r) + int(bottom_l) + int(bottom_r)
return nc
def get_masked_affinity_matrix(painting, feats, mask, ps):
# mask out affinity matrix based on the painting matrix
dim, num_patch = feats.size()[0], feats.size()[1]
painting = painting + mask.unsqueeze(0)
painting[painting > 0] = 1
painting[painting <= 0] = 0
feats = feats.clone().view(dim, ps, ps)
feats = ((1 - painting) * feats).view(dim, num_patch)
return feats, painting
def maskcut_forward(feats, dims, scales, init_image_size, tau=0, N=3, cpu=False):
"""
Implementation of MaskCut.
Inputs
feats: the pixel/patche features of an image
dims: dimension of the map from which the features are used
scales: from image to map scale
init_image_size: size of the image
tau: thresold for graph construction
N: number of pseudo-masks per image.
"""
bipartitions = []
eigvecs = []
for i in range(N):
if i == 0:
painting = torch.from_numpy(np.zeros(dims))
if not cpu: painting = painting.cuda()
else:
feats, painting = get_masked_affinity_matrix(painting, feats, current_mask, ps)
# construct the affinity matrix
A, D = get_affinity_matrix(feats, tau)
# get the second smallest eigenvector
eigenvec, second_smallest_vec = second_smallest_eigenvector(A, D)
# get salient area
bipartition = get_salient_areas(second_smallest_vec)
# check if we should reverse the partition based on:
# 1) peak of the 2nd smallest eigvec 2) object centric bias
seed = np.argmax(np.abs(second_smallest_vec))
nc = check_num_fg_corners(bipartition, dims)
if nc >= 3:
reverse = True
else:
reverse = bipartition[seed] != 1
if reverse:
# reverse bipartition, eigenvector and get new seed
eigenvec = eigenvec * -1
bipartition = np.logical_not(bipartition)
seed = np.argmax(eigenvec)
else:
seed = np.argmax(second_smallest_vec)
# get pxiels corresponding to the seed
bipartition = bipartition.reshape(dims).astype(float)
_, _, _, cc = detect_box(bipartition, seed, dims, scales=scales, initial_im_size=init_image_size)
pseudo_mask = np.zeros(dims)
pseudo_mask[cc[0],cc[1]] = 1
pseudo_mask = torch.from_numpy(pseudo_mask)
if not cpu: pseudo_mask = pseudo_mask.to('cuda')
ps = pseudo_mask.shape[0]
# check if the extra mask is heavily overlapped with the previous one or is too small.
if i >= 1:
ratio = torch.sum(pseudo_mask) / pseudo_mask.size()[0] / pseudo_mask.size()[1]
if metric.IoU(current_mask, pseudo_mask) > 0.5 or ratio <= 0.01:
pseudo_mask = np.zeros(dims)
pseudo_mask = torch.from_numpy(pseudo_mask)
if not cpu: pseudo_mask = pseudo_mask.to('cuda')
current_mask = pseudo_mask
# mask out foreground areas in previous stages
masked_out = 0 if len(bipartitions) == 0 else np.sum(bipartitions, axis=0)
bipartition = F.interpolate(pseudo_mask.unsqueeze(0).unsqueeze(0), size=init_image_size, mode='nearest').squeeze()
bipartition_masked = bipartition.cpu().numpy() - masked_out
bipartition_masked[bipartition_masked <= 0] = 0
bipartitions.append(bipartition_masked)
# unsample the eigenvec
eigvec = second_smallest_vec.reshape(dims)
eigvec = torch.from_numpy(eigvec)
if not cpu: eigvec = eigvec.to('cuda')
eigvec = F.interpolate(eigvec.unsqueeze(0).unsqueeze(0), size=init_image_size, mode='nearest').squeeze()
eigvecs.append(eigvec.cpu().numpy())
return seed, bipartitions, eigvecs
def maskcut(img_path, backbone,patch_size, tau, N=1, fixed_size=480, cpu=False) :
I = Image.open(img_path).convert('RGB')
bipartitions, eigvecs = [], []
I_new = I.resize((int(fixed_size), int(fixed_size)), PIL.Image.LANCZOS)
I_resize, w, h, feat_w, feat_h = utils.resize_pil(I_new, patch_size)
tensor = ToTensor(I_resize).unsqueeze(0)
if not cpu: tensor = tensor.cuda()
feat = backbone(tensor)[0]
_, bipartition, eigvec = maskcut_forward(feat, [feat_h, feat_w], [patch_size, patch_size], [h,w], tau, N=N, cpu=cpu)
bipartitions += bipartition
eigvecs += eigvec
return bipartitions, eigvecs, I_new
def resize_binary_mask(array, new_size):
image = Image.fromarray(array.astype(np.uint8)*255)
image = image.resize(new_size)
return np.asarray(image).astype(np.bool_)
def close_contour(contour):
if not np.array_equal(contour[0], contour[-1]):
contour = np.vstack((contour, contour[0]))
return contour
def create_image_info(image_id, file_name, image_size,
date_captured=datetime.datetime.utcnow().isoformat(' '),
license_id=1, coco_url="", flickr_url=""):
"""Return image_info in COCO style
Args:
image_id: the image ID
file_name: the file name of each image
image_size: image size in the format of (width, height)
date_captured: the date this image info is created
license: license of this image
coco_url: url to COCO images if there is any
flickr_url: url to flickr if there is any
"""
image_info = {
"id": image_id,
"file_name": file_name,
"width": image_size[0],
"height": image_size[1],
"date_captured": date_captured,
"license": license_id,
"coco_url": coco_url,
"flickr_url": flickr_url
}
return image_info
def create_annotation_info(annotation_id, image_id, category_info, binary_mask,
image_size=None, bounding_box=None):
"""Return annotation info in COCO style
Args:
annotation_id: the annotation ID
image_id: the image ID
category_info: the information on categories
binary_mask: a 2D binary numpy array where '1's represent the object
file_name: the file name of each image
image_size: image size in the format of (width, height)
bounding_box: the bounding box for detection task. If bounding_box is not provided,
we will generate one according to the binary mask.
"""
upper = np.max(binary_mask)
lower = np.min(binary_mask)
thresh = upper / 2.0
binary_mask[binary_mask > thresh] = upper
binary_mask[binary_mask <= thresh] = lower
if image_size is not None:
binary_mask = resize_binary_mask(binary_mask.astype(np.uint8), image_size)
binary_mask_encoded = mask.encode(np.asfortranarray(binary_mask.astype(np.uint8)))
area = mask.area(binary_mask_encoded)
if area < 1:
return None
if bounding_box is None:
bounding_box = mask.toBbox(binary_mask_encoded)
rle = mask_util.encode(np.array(binary_mask[...,None], order="F", dtype="uint8"))[0]
rle['counts'] = rle['counts'].decode('ascii')
segmentation = rle
annotation_info = {
"id": annotation_id,
"image_id": image_id,
"category_id": category_info["id"],
"iscrowd": 0,
"area": area.tolist(),
"bbox": bounding_box.tolist(),
"segmentation": segmentation,
"width": binary_mask.shape[1],
"height": binary_mask.shape[0],
}
return annotation_info
# necessay info used for coco style annotations
INFO = {
"description": "ImageNet-1K: pseudo-masks with MaskCut",
"url": "https://github.com/facebookresearch/CutLER",
"version": "1.0",
"year": 2023,
"contributor": "Xudong Wang",
"date_created": datetime.datetime.utcnow().isoformat(' ')
}
LICENSES = [
{
"id": 1,
"name": "Apache License",
"url": "https://github.com/facebookresearch/CutLER/blob/main/LICENSE"
}
]
# only one class, i.e. foreground
CATEGORIES = [
{
'id': 1,
'name': 'fg',
'supercategory': 'fg',
},
]
convert = lambda text: int(text) if text.isdigit() else text.lower()
natrual_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
output = {
"info": INFO,
"licenses": LICENSES,
"categories": CATEGORIES,
"images": [],
"annotations": []}
category_info = {
"is_crowd": 0,
"id": 1
}
if __name__ == "__main__":
parser = argparse.ArgumentParser('MaskCut script')
# default arguments
parser.add_argument('--out-dir', type=str, help='output directory')
parser.add_argument('--vit-arch', type=str, default='small', choices=['base', 'small'], help='which architecture')
parser.add_argument('--vit-feat', type=str, default='k', choices=['k', 'q', 'v', 'kqv'], help='which features')
parser.add_argument('--patch-size', type=int, default=16, choices=[16, 8], help='patch size')
parser.add_argument('--nb-vis', type=int, default=20, choices=[1, 200], help='nb of visualization')
parser.add_argument('--img-path', type=str, default=None, help='single image visualization')
# additional arguments
parser.add_argument('--dataset-path', type=str, default="imagenet/train/", help='path to the dataset')
parser.add_argument('--tau', type=float, default=0.2, help='threshold used for producing binary graph')
parser.add_argument('--num-folder-per-job', type=int, default=1, help='the number of folders each job processes')
parser.add_argument('--job-index', type=int, default=0, help='the index of the job (for imagenet: in the range of 0 to 1000/args.num_folder_per_job-1)')
parser.add_argument('--fixed_size', type=int, default=480, help='rescale the input images to a fixed size')
parser.add_argument('--pretrain_path', type=str, default=None, help='path to pretrained model')
parser.add_argument('--N', type=int, default=3, help='the maximum number of pseudo-masks per image')
parser.add_argument('--cpu', action='store_true', help='use cpu')
args = parser.parse_args()
if args.pretrain_path is not None:
url = args.pretrain_path
if args.vit_arch == 'base' and args.patch_size == 8:
if args.pretrain_path is None:
url = "https://dl.fbaipublicfiles.com/dino/dino_vitbase8_pretrain/dino_vitbase8_pretrain.pth"
feat_dim = 768
elif args.vit_arch == 'small' and args.patch_size == 8:
if args.pretrain_path is None:
url = "https://dl.fbaipublicfiles.com/dino/dino_deitsmall8_300ep_pretrain/dino_deitsmall8_300ep_pretrain.pth"
feat_dim = 384
backbone = dino.ViTFeat(url, feat_dim, args.vit_arch, args.vit_feat, args.patch_size)
msg = 'Load {} pre-trained feature...'.format(args.vit_arch)
print (msg)
backbone.eval()
if not args.cpu:
backbone.cuda()
img_folders = os.listdir(args.dataset_path)
if args.out_dir is not None and not os.path.exists(args.out_dir) :
os.mkdir(args.out_dir)
start_idx = max(args.job_index*args.num_folder_per_job, 0)
end_idx = min((args.job_index+1)*args.num_folder_per_job, len(img_folders))
image_id, segmentation_id = 1, 1
image_names = []
for img_folder in img_folders[start_idx:end_idx]:
args.img_dir = os.path.join(args.dataset_path, img_folder)
img_list = sorted(os.listdir(args.img_dir))
for img_name in tqdm(img_list) :
# get image path
img_path = os.path.join(args.img_dir, img_name)
# get pseudo-masks for each image using MaskCut
try:
bipartitions, _, I_new = maskcut(img_path, backbone, args.patch_size, \
args.tau, N=args.N, fixed_size=args.fixed_size, cpu=args.cpu)
except:
print(f'Skipping {img_name}')
continue
I = Image.open(img_path).convert('RGB')
width, height = I.size
for idx, bipartition in enumerate(bipartitions):
# post-process pesudo-masks with CRF
pseudo_mask = densecrf(np.array(I_new), bipartition)
pseudo_mask = ndimage.binary_fill_holes(pseudo_mask>=0.5)
# filter out the mask that have a very different pseudo-mask after the CRF
mask1 = torch.from_numpy(bipartition)
mask2 = torch.from_numpy(pseudo_mask)
if not args.cpu:
mask1 = mask1.cuda()
mask2 = mask2.cuda()
if metric.IoU(mask1, mask2) < 0.5:
pseudo_mask = pseudo_mask * -1
# construct binary pseudo-masks
pseudo_mask[pseudo_mask < 0] = 0
pseudo_mask = Image.fromarray(np.uint8(pseudo_mask*255))
pseudo_mask = np.asarray(pseudo_mask.resize((width, height)))
# create coco-style image info
if img_name not in image_names:
image_info = create_image_info(
image_id, "{}/{}".format(img_folder, img_name), (height, width, 3))
output["images"].append(image_info)
image_names.append(img_name)
# create coco-style annotation info
annotation_info = create_annotation_info(
segmentation_id, image_id, category_info, pseudo_mask.astype(np.uint8), None)
if annotation_info is not None:
output["annotations"].append(annotation_info)
segmentation_id += 1
image_id += 1
# save annotations
if len(img_folders) == args.num_folder_per_job and args.job_index == 0:
json_name = '{}/imagenet_train_fixsize{}_tau{}_N{}.json'.format(args.out_dir, args.fixed_size, args.tau, args.N)
else:
json_name = '{}/imagenet_train_fixsize{}_tau{}_N{}_{}_{}.json'.format(args.out_dir, args.fixed_size, args.tau, args.N, start_idx, end_idx)
with open(json_name, 'w') as output_json_file:
json.dump(output, output_json_file)
print(f'dumping {json_name}')
print("Done: {} images; {} anns.".format(len(output['images']), len(output['annotations']))) | CutLER-main | maskcut/maskcut.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
"""
Copied from Dino repo. https://github.com/facebookresearch/dino
Mostly copy-paste from timm library.
https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
"""
import math
from functools import partial
import torch
import torch.nn as nn
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
def drop_path(x, drop_prob: float = 0., training: bool = False):
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, return_attention=False):
y, attn = self.attn(self.norm1(x))
if return_attention:
return attn
x = x + self.drop_path(y)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
num_patches = (img_size // patch_size) * (img_size // patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
B, C, H, W = x.shape
x = self.proj(x).flatten(2).transpose(1, 2)
return x
class VisionTransformer(nn.Module):
""" Vision Transformer """
def __init__(self, img_size=[224], patch_size=16, in_chans=3, num_classes=0, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=nn.LayerNorm, **kwargs):
super().__init__()
self.num_features = self.embed_dim = embed_dim
self.patch_embed = PatchEmbed(
img_size=img_size[0], patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# Classifier head
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def interpolate_pos_encoding(self, x, w, h):
npatch = x.shape[1] - 1
N = self.pos_embed.shape[1] - 1
if npatch == N and w == h:
return self.pos_embed
class_pos_embed = self.pos_embed[:, 0]
patch_pos_embed = self.pos_embed[:, 1:]
dim = x.shape[-1]
w0 = w // self.patch_embed.patch_size
h0 = h // self.patch_embed.patch_size
# we add a small number to avoid floating point error in the interpolation
# see discussion at https://github.com/facebookresearch/dino/issues/8
w0, h0 = w0 + 0.1, h0 + 0.1
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),
scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)),
mode='bicubic',
)
assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1]
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
def prepare_tokens(self, x):
B, nc, w, h = x.shape
x = self.patch_embed(x) # patch linear embedding
# add the [CLS] token to the embed patch tokens
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
# add positional encoding to each token
x = x + self.interpolate_pos_encoding(x, w, h)
return self.pos_drop(x)
def forward(self, x):
x = self.prepare_tokens(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
return x[:, 0]
def get_last_selfattention(self, x):
x = self.prepare_tokens(x)
for i, blk in enumerate(self.blocks):
if i < len(self.blocks) - 1:
x = blk(x)
else:
# return attention of the last block
return blk(x, return_attention=True)
def get_intermediate_layers(self, x, n=1):
x = self.prepare_tokens(x)
# we return the output tokens from the `n` last blocks
output = []
for i, blk in enumerate(self.blocks):
x = blk(x)
if len(self.blocks) - i <= n:
output.append(self.norm(x))
return output
def vit_small(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def vit_base(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
class ViTFeat(nn.Module):
""" Vision Transformer """
def __init__(self, pretrained_pth, feat_dim, vit_arch = 'base', vit_feat = 'k', patch_size=16):
super().__init__()
if vit_arch == 'base' :
self.model = vit_base(patch_size=patch_size, num_classes=0)
else :
self.model = vit_small(patch_size=patch_size, num_classes=0)
self.feat_dim = feat_dim
self.vit_feat = vit_feat
self.patch_size = patch_size
# state_dict = torch.load(pretrained_pth, map_location="cpu")
state_dict = torch.hub.load_state_dict_from_url(pretrained_pth)
self.model.load_state_dict(state_dict, strict=True)
print('Loading weight from {}'.format(pretrained_pth))
def forward(self, img) :
feat_out = {}
def hook_fn_forward_qkv(module, input, output):
feat_out["qkv"] = output
self.model._modules["blocks"][-1]._modules["attn"]._modules["qkv"].register_forward_hook(hook_fn_forward_qkv)
# Forward pass in the model
with torch.no_grad() :
h, w = img.shape[2], img.shape[3]
feat_h, feat_w = h // self.patch_size, w // self.patch_size
attentions = self.model.get_last_selfattention(img)
bs, nb_head, nb_token = attentions.shape[0], attentions.shape[1], attentions.shape[2]
qkv = (
feat_out["qkv"]
.reshape(bs, nb_token, 3, nb_head, -1)
.permute(2, 0, 3, 1, 4)
)
q, k, v = qkv[0], qkv[1], qkv[2]
k = k.transpose(1, 2).reshape(bs, nb_token, -1)
q = q.transpose(1, 2).reshape(bs, nb_token, -1)
v = v.transpose(1, 2).reshape(bs, nb_token, -1)
# Modality selection
if self.vit_feat == "k":
feats = k[:, 1:].transpose(1, 2).reshape(bs, self.feat_dim, feat_h * feat_w)
elif self.vit_feat == "q":
feats = q[:, 1:].transpose(1, 2).reshape(bs, self.feat_dim, feat_h * feat_w)
elif self.vit_feat == "v":
feats = v[:, 1:].transpose(1, 2).reshape(bs, self.feat_dim, feat_h * feat_w)
elif self.vit_feat == "kqv":
k = k[:, 1:].transpose(1, 2).reshape(bs, self.feat_dim, feat_h * feat_w)
q = q[:, 1:].transpose(1, 2).reshape(bs, self.feat_dim, feat_h * feat_w)
v = v[:, 1:].transpose(1, 2).reshape(bs, self.feat_dim, feat_h * feat_w)
feats = torch.cat([k, q, v], dim=1)
return feats
if __name__ == "__main__":
vit_arch = 'base'
vit_feat = 'k'
model = ViTFeat(vit_arch, vit_feat)
img = torch.cuda.FloatTensor(4, 3, 224, 224)
model.cuda()
# Forward pass in the model
feat = model(img)
print (feat.shape)
| CutLER-main | maskcut/dino.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
import os
import sys
sys.path.append('../')
import argparse
import numpy as np
import PIL.Image as Image
import torch
from torchvision import transforms
from scipy import ndimage
from detectron2.utils.colormap import random_color
import dino # model
from third_party.TokenCut.unsupervised_saliency_detection import metric
from crf import densecrf
from maskcut import maskcut
# Image transformation applied to all images
ToTensor = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(
(0.485, 0.456, 0.406),
(0.229, 0.224, 0.225)),])
def vis_mask(input, mask, mask_color) :
fg = mask > 0.5
rgb = np.copy(input)
rgb[fg] = (rgb[fg] * 0.3 + np.array(mask_color) * 0.7).astype(np.uint8)
return Image.fromarray(rgb)
if __name__ == "__main__":
parser = argparse.ArgumentParser('MaskCut Demo')
# default arguments
parser.add_argument('--out-dir', type=str, help='output directory')
parser.add_argument('--vit-arch', type=str, default='small', choices=['base', 'small'], help='which architecture')
parser.add_argument('--vit-feat', type=str, default='k', choices=['k', 'q', 'v', 'kqv'], help='which features')
parser.add_argument('--patch-size', type=int, default=8, choices=[16, 8], help='patch size')
parser.add_argument('--img-path', type=str, default=None, help='single image visualization')
parser.add_argument('--tau', type=float, default=0.15, help='threshold used for producing binary graph')
# additional arguments
parser.add_argument('--fixed_size', type=int, default=480, help='rescale the input images to a fixed size')
parser.add_argument('--pretrain_path', type=str, default=None, help='path to pretrained model')
parser.add_argument('--N', type=int, default=3, help='the maximum number of pseudo-masks per image')
parser.add_argument('--cpu', action='store_true', help='use cpu')
parser.add_argument('--output_path', type=str, default='', help='path to save outputs')
args = parser.parse_args()
print (args)
if args.pretrain_path is not None:
url = args.pretrain_path
if args.vit_arch == 'base' and args.patch_size == 8:
if args.pretrain_path is None:
url = "https://dl.fbaipublicfiles.com/dino/dino_vitbase8_pretrain/dino_vitbase8_pretrain.pth"
feat_dim = 768
elif args.vit_arch == 'small' and args.patch_size == 8:
if args.pretrain_path is None:
url = "https://dl.fbaipublicfiles.com/dino/dino_deitsmall8_300ep_pretrain/dino_deitsmall8_300ep_pretrain.pth"
feat_dim = 384
backbone = dino.ViTFeat(url, feat_dim, args.vit_arch, args.vit_feat, args.patch_size)
msg = 'Load {} pre-trained feature...'.format(args.vit_arch)
print (msg)
backbone.eval()
if not args.cpu:
backbone.cuda()
bipartitions, _, I_new = maskcut(args.img_path, backbone, args.patch_size, args.tau, \
N=args.N, fixed_size=args.fixed_size, cpu=args.cpu)
I = Image.open(args.img_path).convert('RGB')
width, height = I.size
pseudo_mask_list = []
for idx, bipartition in enumerate(bipartitions):
# post-process pesudo-masks with CRF
pseudo_mask = densecrf(np.array(I_new), bipartition)
pseudo_mask = ndimage.binary_fill_holes(pseudo_mask>=0.5)
# filter out the mask that have a very different pseudo-mask after the CRF
if not args.cpu:
mask1 = torch.from_numpy(bipartition).cuda()
mask2 = torch.from_numpy(pseudo_mask).cuda()
else:
mask1 = torch.from_numpy(bipartition)
mask2 = torch.from_numpy(pseudo_mask)
if metric.IoU(mask1, mask2) < 0.5:
pseudo_mask = pseudo_mask * -1
# construct binary pseudo-masks
pseudo_mask[pseudo_mask < 0] = 0
pseudo_mask = Image.fromarray(np.uint8(pseudo_mask*255))
pseudo_mask = np.asarray(pseudo_mask.resize((width, height)))
pseudo_mask = pseudo_mask.astype(np.uint8)
upper = np.max(pseudo_mask)
lower = np.min(pseudo_mask)
thresh = upper / 2.0
pseudo_mask[pseudo_mask > thresh] = upper
pseudo_mask[pseudo_mask <= thresh] = lower
pseudo_mask_list.append(pseudo_mask)
input = np.array(I)
for pseudo_mask in pseudo_mask_list:
input = vis_mask(input, pseudo_mask, random_color(rgb=True))
input.save(os.path.join(args.output_path, "demo.jpg")) | CutLER-main | maskcut/demo.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
from torch.nn import functional as F
from torchvision.utils import make_grid, save_image
import numpy as np
import argparse
import os
import sys
sys.path.append('vae_submodule')
from utils.helpers import FormatterNoDuplicate, check_bounds, set_seed
from utils.visualize import Visualizer
from utils.viz_helpers import get_samples
from disvae.utils.modelIO import load_model, load_metadata
from disvae.models.losses import get_loss_f
from evaluate_amortization_speed_function import evaluate_amortization_speed
import sys
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(
mode='Plain', color_scheme='Neutral', call_pdb=1)
def sample_gaussian(mean, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return mean + std * eps
def unflatten_latent(z_flat):
n = z_flat.shape[-1]
return z_flat[...,:n//2], z_flat[...,n//2:]
def estimate_elbo(x, z_flat, decoder):
latent_dist = unflatten_latent(z_flat)
latent_sample = sample_gaussian(*latent_dist)
latent_sample = latent_sample
recon_batch = decoder(latent_sample)
batch_size = x.shape[0]
log_likelihood = -F.binary_cross_entropy(recon_batch, x, reduce=False).sum(dim=[1,2,3])
mean, logvar = latent_dist
latent_kl = 0.5 * (-1 - logvar + mean.pow(2) + logvar.exp())
kl_to_prior = latent_kl.sum(dim=[-1])
assert log_likelihood.shape == kl_to_prior.shape
loss = log_likelihood - kl_to_prior
return loss
def main():
model_dir = 'vae_submodule/results/VAE_mnist'
meta_data = load_metadata(model_dir)
model = load_model(model_dir).cuda()
model.eval() # don't sample from latent: use mean
dataset = meta_data['dataset']
loss_f = get_loss_f('VAE',
n_data=len(dataset),
device='cuda',
rec_dist='bernoulli',
reg_anneal=0)
batch_size = 1024
num_save = 15
data_samples = get_samples(dataset, batch_size, idcs=[25518, 13361, 22622]).cuda()
def amortization_model(data_samples):
latent_dist = model.encoder(data_samples)
latent_dist_flat = torch.cat(latent_dist, dim=-1)
return latent_dist_flat
def amortization_objective(latent_dist_flat, data_samples):
elbo = estimate_elbo(data_samples, latent_dist_flat, model.decoder)
return elbo
iterate_history, predicted_samples = evaluate_amortization_speed(
amortization_model=amortization_model,
amortization_objective=amortization_objective,
contexts=data_samples,
tag='vae',
fig_ylabel='ELBO',
adam_lr=5e-3,
num_iterations=2000,
maximize=True,
save_iterates=[0, 250, 500, 1000, 2000],
num_save=num_save,
)
iterate_history.append((-1, predicted_samples[:num_save]))
reconstructions = []
for i, latent_dist_flat in iterate_history:
latent_dist = unflatten_latent(latent_dist_flat)
latent_mean = latent_dist[0]
reconstructions.append(1.-model.decoder(latent_mean))
reconstructions.append(1.-data_samples[:num_save])
reconstructions = torch.cat(reconstructions, dim=0)
reconstructions = F.interpolate(reconstructions,
recompute_scale_factor=True, scale_factor=1.5, mode='bilinear')
fname = f'vae-samples.png'
save_image(reconstructions, fname, nrow=num_save)
if __name__ == '__main__':
main()
| amortized-optimization-tutorial-main | code/evaluate_amortization_speed_vae.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('bmh')
params = {
"text.usetex" : True,
"font.family" : "serif",
"font.serif" : ["Computer Modern Serif"]
}
plt.rcParams.update(params)
import os
import time
def evaluate_amortization_speed(
amortization_model,
amortization_objective,
contexts,
tag,
fig_ylabel,
adam_lr=5e-3,
num_iterations=2000,
maximize=False,
iter_history_callback=None,
save_iterates=[],
num_save=8,
):
times = []
n_trials = 10
for i in range(n_trials+1):
start_time = time.time()
predicted_solutions = amortization_model(contexts)
if i > 0:
times.append(time.time()-start_time)
amortized_objectives = amortization_objective(
predicted_solutions, contexts
).cpu().detach()
print(f'solution size: {predicted_solutions.shape[1]}')
print('--- amortization model')
print(f'average objective value: {amortized_objectives.mean():.2f}')
print(f'average runtime: {np.mean(times)*1000:.2f}ms')
iterates = torch.nn.Parameter(torch.zeros_like(predicted_solutions))
opt = torch.optim.Adam([iterates], lr=adam_lr)
objective_history = []
times = []
iterations = []
iterate_history = []
start_time = time.time()
for i in range(num_iterations+1):
objectives = amortization_objective(iterates, contexts)
mean_objective = objectives.mean()
if maximize:
mean_objective *= -1.
opt.zero_grad()
mean_objective.backward()
opt.step()
if i % 50 == 0:
iterations.append(i)
times.append(time.time()-start_time)
objective_history.append((objectives.mean().item(), objectives.std().item()))
print(i, objectives.mean().item())
if i in save_iterates:
iterate_history.append((i, iterates[:num_save].detach().clone()))
times = np.array(times)
figsize = (4,2)
fig, ax = plt.subplots(figsize=figsize, dpi=200)
objective_means, objective_stds = map(np.array, zip(*objective_history))
l, = ax.plot(iterations, objective_means)
ax.axhline(amortized_objectives.mean().cpu().detach(), color='k', linestyle='--')
ax.axhspan(amortized_objectives.mean()-amortized_objectives.std(),
amortized_objectives.mean()+amortized_objectives.std(), color='k', alpha=0.15)
ax.fill_between(
iterations, objective_means-objective_stds, objective_means+objective_stds,
color=l.get_color(), alpha=0.5)
ax.set_xlabel('Adam Iterations')
ax.set_ylabel(fig_ylabel)
ax.set_xlim(0, max(iterations))
# ax.set_ylim(0, 1000)
fig.tight_layout()
fname = f'{tag}-iter.pdf'
print(f'saving to {fname}')
fig.savefig(fname, transparent=True)
os.system(f'pdfcrop {fname} {fname}')
fig, ax = plt.subplots(figsize=figsize, dpi=200)
ax.axhline(amortized_objectives.mean(), color='k', linestyle='--')
ax.axhspan(amortized_objectives.mean()-amortized_objectives.std(),
amortized_objectives.mean()+amortized_objectives.std(), color='k', alpha=0.15)
l, = ax.plot(times, objective_means)
ax.fill_between(
times, objective_means-objective_stds, objective_means+objective_stds,
color=l.get_color(), alpha=0.5)
ax.set_xlim(0, max(times))
# ax.set_ylim(0, 1000)
ax.set_xlabel('Runtime (seconds)')
ax.set_ylabel(fig_ylabel)
fig.tight_layout()
fname = f'{tag}-time.pdf'
print(f'saving to {fname}')
fig.savefig(fname, transparent=True)
os.system(f'pdfcrop {fname} {fname}')
return iterate_history, predicted_solutions
| amortized-optimization-tutorial-main | code/evaluate_amortization_speed_function.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
import torch
from torch import nn
import numpy as np
import os
import matplotlib.pyplot as plt
plt.style.use('bmh')
import sys
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(
mode='Plain', color_scheme='Neutral', call_pdb=1)
def celestial_to_euclidean(ra, dec):
x = np.cos(dec)*np.cos(ra)
y = np.cos(dec)*np.sin(ra)
z = np.sin(dec)
return x, y, z
def euclidean_to_celestial(x, y, z):
sindec = z
cosdec = np.sqrt(x*x + y*y)
sinra = y / cosdec
cosra = x / cosdec
ra = np.arctan2(sinra, cosra)
dec = np.arctan2(sindec, cosdec)
return ra, dec
def euclidean_to_celestial_th(x, y, z):
sindec = z
cosdec = (x*x + y*y).sqrt()
sinra = y / cosdec
cosra = x / cosdec
ra = torch.atan2(sinra, cosra)
dec = torch.atan2(sindec, cosdec)
return ra, dec
def sphere_dist_th(x,y):
if x.ndim == 1:
x = x.unsqueeze(0)
if y.ndim == 1:
y = y.unsqueeze(0)
assert x.ndim == y.ndim == 2
inner = (x*y).sum(-1)
return torch.arccos(inner)
class c_convex(nn.Module):
def __init__(self, n_components=4, gamma=0.5, seed=None):
super().__init__()
self.n_components = n_components
self.gamma = gamma
# Sample a random c-convex function
if seed is not None:
torch.manual_seed(seed)
self.ys = torch.randn(n_components, 3)
self.ys = self.ys / torch.norm(self.ys, 2, dim=-1, keepdim=True)
self.alphas = .7*torch.rand(self.n_components)
self.params = torch.cat((self.ys.view(-1), self.alphas.view(-1)))
def forward(self, xyz):
# TODO: Could be optimized
cs = []
for y, alpha in zip(self.ys, self.alphas):
ci = 0.5*sphere_dist_th(y, xyz)**2 + alpha
cs.append(ci)
cs = torch.stack(cs)
if self.gamma == None or self.gamma == 0.:
z = cs.min(dim=0).values
else:
z = -self.gamma*(-cs/self.gamma).logsumexp(dim=0)
return z
seeds = [8,9,2,31,4,20,16,7]
fs = [c_convex(seed=i) for i in seeds]
n_params = len(fs[0].params)
class AmortizedModel(nn.Module):
def __init__(self, n_params):
super().__init__()
self.base = nn.Sequential(
nn.Linear(n_params, n_hidden),
nn.ReLU(inplace=True),
nn.Linear(n_hidden, n_hidden),
nn.ReLU(inplace=True),
nn.Linear(n_hidden, 3)
)
def forward(self, p):
squeeze = p.ndim == 1
if squeeze:
p = p.unsqueeze(0)
assert p.ndim == 2
z = self.base(p)
z = z / z.norm(dim=-1, keepdim=True)
if squeeze:
z = z.squeeze(0)
return z
n_hidden = 128
torch.manual_seed(0)
model = AmortizedModel(n_params=n_params)
opt = torch.optim.Adam(model.parameters(), lr=5e-4)
xs = []
for i in range(100):
losses = []
xis = []
for f in fs:
pred_opt = model(f.params)
xis.append(pred_opt)
losses.append(f(pred_opt))
with torch.no_grad():
xis = torch.stack(xis)
xs.append(xis)
loss = sum(losses)
opt.zero_grad()
loss.backward()
opt.step()
xs = torch.stack(xs, dim=1)
pad = .1
n_sample = 100
ra = np.linspace(-np.pi+pad, np.pi-pad, n_sample)
dec= np.linspace(-np.pi/2+pad, np.pi/2-pad, n_sample)
ra_grid, dec_grid = np.meshgrid(ra,dec)
ra_grid_flat = ra_grid.ravel()
dec_grid_flat = dec_grid.ravel()
x_grid, y_grid, z_grid = celestial_to_euclidean(ra_grid_flat, dec_grid_flat)
p_grid = np.stack((x_grid, y_grid, z_grid), axis=-1)
p_grid_th = torch.from_numpy(p_grid).float()
for i, (f, xs_i) in enumerate(zip(fs, xs)):
nrow, ncol = 1, 1
fig, ax = plt.subplots(
nrow, ncol, figsize=(3*ncol, 2*nrow),
subplot_kw={'projection': 'mollweide'},
gridspec_kw = {'wspace':0, 'hspace':0}
)
with torch.no_grad():
f_grid = f(p_grid_th).numpy()
best_i = f_grid.argmin()
ra_opt, dec_opt= ra_grid_flat[best_i], dec_grid_flat[best_i]
f_grid = f_grid.reshape(ra_grid.shape)
n_levels = 10
ax.contourf(ra_grid, dec_grid, f_grid, n_levels, cmap='Purples')
x,y,z = xs_i.split(1,dim=-1)
ra, dec = euclidean_to_celestial_th(x,y,z)
ax.plot(ra, dec, color='#5499FF', lw=3, ls=':')
ax.scatter(ra_opt, dec_opt, marker='*', color='#AA0000',
s=100, zorder=10)
for s in ax.spines.values():
s.set_visible(False)
ax.set_xticks([])
ax.set_yticks([])
ax.grid(False)
fname = f'paper/fig/sphere/{i}.png'
plt.savefig(fname, transparent=True)
os.system(f'convert -trim {fname} {fname}')
| amortized-optimization-tutorial-main | code/train-sphere.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
import torch
import argparse
import os
import sys
import pickle as pkl
import shutil
from omegaconf import OmegaConf
from collections import namedtuple
import dmc2gym
import matplotlib.pyplot as plt
plt.style.use('bmh')
from matplotlib import cm
from multiprocessing import Process
from svg.video import VideoRecorder
from svg import utils, dx
from evaluate_amortization_speed_function import evaluate_amortization_speed
def main():
import sys
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(mode='Verbose',
color_scheme='Linux',
call_pdb=1)
exp = torch.load('svg_submodule/trained-humanoid/latest.pt')
# Clean up logging after resuming the experiment code
del exp.logger
os.remove('eval.csv')
os.remove('train.csv')
observations = collect_eval_episode(exp)
# First try to predict the maximum value action
def amortization_model(observations):
actions, _, _ = exp.agent.actor(observations, compute_pi=False, compute_log_pi=False)
return actions
def amortization_objective(actions, observations, normalize=True):
q1, q2 = exp.agent.critic(observations, actions)
values = torch.min(q1, q2).squeeze()
if normalize:
values = normalize_values(values)
return values
with torch.no_grad():
expert_actions = amortization_model(observations)
zero_actions = torch.zeros_like(expert_actions)
expert_values = amortization_objective(expert_actions, observations, normalize=False)
zero_values = amortization_objective(zero_actions, observations, normalize=False)
def normalize_values(values):
"""normalize so that the expert value is 0 and the zero action is -1."""
norm_values = (values - expert_values) / (expert_values - zero_values)
# assume we can't do better than the expert.
# otherwise the optimization overfits to the inaccurate model
# and value approximation.
norm_values[norm_values > 0.] = 0.
return norm_values
evaluate_amortization_speed(
amortization_model=amortization_model,
amortization_objective=amortization_objective,
contexts=observations,
tag='control-model-free',
fig_ylabel='Value',
adam_lr=5e-3,
num_iterations=500,
maximize=True,
)
# Next try to predict the solution to the short-horizon model-based
# control problem.
def amortization_model(observations):
num_batch = observations.shape[0]
action_seq, _, _ = exp.agent.dx.unroll_policy(
observations, exp.agent.actor, sample=False, last_u=True)
action_seq_flat = action_seq.transpose(0,1).reshape(num_batch, -1)
return action_seq_flat
def amortization_objective(action_seq_flat, observations, normalize=True):
num_batch = action_seq_flat.shape[0]
action_seq = action_seq_flat.reshape(num_batch, -1, exp.agent.action_dim).transpose(0, 1)
predicted_states = exp.agent.dx.unroll(observations, action_seq[:-1])
all_obs = torch.cat((observations.unsqueeze(0), predicted_states), dim=0)
xu = torch.cat((all_obs, action_seq), dim=2)
dones = exp.agent.done(xu).sigmoid().squeeze(dim=2)
not_dones = 1. - dones
not_dones = utils.accum_prod(not_dones)
last_not_dones = not_dones[-1]
rewards = not_dones * exp.agent.rew(xu).squeeze(2)
q1, q2 = exp.agent.critic(all_obs[-1], action_seq[-1])
q = torch.min(q1, q2).reshape(num_batch)
rewards[-1] = last_not_dones * q
rewards *= exp.agent.discount_horizon.unsqueeze(1)
values = rewards.sum(dim=0)
if normalize:
values = normalize_values(values)
return values
with torch.no_grad():
# used in the normalization
expert_action_seq = amortization_model(observations)
zero_action_seq = torch.zeros_like(expert_action_seq)
expert_values = amortization_objective(expert_action_seq, observations, normalize=False)
zero_values = amortization_objective(zero_action_seq, observations, normalize=False)
evaluate_amortization_speed(
amortization_model=amortization_model,
amortization_objective=amortization_objective,
contexts=observations,
tag='control-model-based',
fig_ylabel='Value',
adam_lr=5e-3,
num_iterations=500,
maximize=True,
)
def collect_eval_episode(exp):
device = 'cuda'
exp.env.set_seed(0)
obs = exp.env.reset()
done = False
total_reward = 0.
step = 0
observations = []
while not done:
if exp.cfg.normalize_obs:
mu, sigma = exp.replay_buffer.get_obs_stats()
obs = (obs - mu) / sigma
obs = torch.FloatTensor(obs).to(device)
observations.append(obs)
action, _, _ = exp.agent.actor(obs, compute_pi=False, compute_log_pi=False)
action = action.clamp(min=exp.env.action_space.low.min(),
max=exp.env.action_space.high.max())
obs, reward, done, _ = exp.env.step(utils.to_np(action.squeeze(0)))
total_reward += reward
step += 1
print(f'+ eval episode reward: {total_reward}')
observations = torch.stack(observations, dim=0)
return observations
if __name__ == '__main__':
main()
| amortized-optimization-tutorial-main | code/evaluate_amortization_speed_control.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import jax
import jax.numpy as jnp
import os
plt.rcParams.update({
"text.usetex": True,
"font.family": "serif",
"font.sans-serif": ["Computer Modern Roman"]})
plt.style.use('bmh')
# Initial problem with x^\star
N = 1000
x = np.linspace(-3.8, 5.0, N)
# y = -(x**2)*np.sin(x)
# y = (np.cos(x)**2) #- np.abs(x)
y = -x**2 + 7*np.sin(x+np.pi)
y = y/8.
nrow, ncol = 1, 2
fig, axs = plt.subplots(nrow, ncol, figsize=(ncol*2.5,nrow*1.1), dpi=200)
ax = axs[0]
ax.axhline(0, color='k')
ax.plot(x, y-y.min(), color='k')
ustar = x[y.argmax()]
ax.axvline(ustar, color='#AA0000')
ax.text(0.13, 0.09, '$$\pi^\star(x)$$', color='#AA0000',
transform=ax.transAxes, ha='left', va='bottom')
ax.axvline(ustar+2, color='#5499FF')
ax.text(0.54, 0.09, r'$$\pi_\theta(x)$$', color='#5499FF',
transform=ax.transAxes, ha='left', va='bottom')
ax.arrow(x=ustar+2, y=0.5, dx=-0.5, dy=0.,
width=0.1, color='#5499FF', zorder=10)
ax.text(0.7, 0.44, '$$Q(x, u)$$',
transform=ax.transAxes, ha='left', va='bottom')
ax.set_xlabel('$$u$$')
ax.xaxis.set_label_coords(.5, 0.01)
ax.set_title('Deterministic Policy', fontsize=12, pad=5)
# ax.set_ylabel('$$Q(x, u)$$', rotation=0, labelpad=0)
# ax.yaxis.set_label_coords(-.1, .44)
ax = axs[1]
y = np.exp(y)
y -= y.min()
ax.plot(x, y, color='k') #, zorder=10)
ax.set_xlabel('$$u$$')
ax.xaxis.set_label_coords(.5, 0.01)
mu, sigma = ustar, 0.8
ystar = np.exp(-.5*((x-mu)/sigma)**2) #/ (sigma*np.sqrt(2.*np.pi))
ystar = ystar * y.sum() / ystar.sum()
ax.plot(x, ystar, color='#AA0000')
mu, sigma = ustar+2, 1.5
yhat = np.exp(-.5*((x-mu)/sigma)**2) #/ (sigma*np.sqrt(2.*np.pi))
yhat = yhat * y.sum() / yhat.sum()
ax.plot(x, yhat, color='#5499FF')
# I = [250, 300, 350, 400, 450, 500, 550, 600, 650, 700, 750, 800]
I = [250, 300, 350, 400, 650, 700, 750, 800]
for i in I:
ax.arrow(x=x[i], y=yhat[i], dx=-0.5, dy=0.,
width=0.05,
color='#5499FF',
zorder=10)
ax.text(0.37, 0.74, '$$\pi^\star(x)$$', color='#AA0000',
transform=ax.transAxes, ha='left', va='bottom')
ax.text(0.6, 0.45, r'$$\pi_\theta(x)$$', color='#5499FF',
transform=ax.transAxes, ha='left', va='bottom')
ax.text(0., 0.43, '$$\mathcal{Q}(x, u)$$',
transform=ax.transAxes, ha='left', va='bottom')
ax.axhline(0., color='k',zorder=-1)
# ax.set_ylabel('$${\mathcal{Q}}(x, u)$$', rotation=0, labelpad=0)
# ax.yaxis.set_label_coords(-.1, .44)
fig.tight_layout()
for ax in axs:
ax.set_xticks([])
ax.set_yticks([])
ax.grid(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.set_title('Stochastic Policy', fontsize=12, pad=5)
fname = 'ctrl.pdf'
plt.savefig(fname, transparent=True)
os.system(f'pdfcrop {fname} {fname}')
| amortized-optimization-tutorial-main | code/figures/ctrl.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import jax
import jax.numpy as jnp
import os
plt.rcParams.update({
"text.usetex": True,
"font.family": "serif",
"font.sans-serif": ["Computer Modern Roman"]})
plt.style.use('bmh')
fig, ax = plt.subplots(figsize=(2.5,1.5), dpi=200)
def f(x):
return np.cos(x) + 0.2*np.abs(x-np.pi/2)
N = 100
x = np.linspace(-4.*np.pi, 2*np.pi, N)
y = f(x)
ax.plot(x, y, color='k')
sigmas = [1., 1.5, 2.5]
for sigma in sigmas:
ys = []
# Inefficiently doing this...
for xi in x:
eps = sigma*np.random.randn(50000)
yi = np.mean(f(xi+eps))
ys.append(yi)
ax.plot(x, ys, alpha=1., lw=2)
# ax.set_xlabel(r'$$\theta$$')
# ax.xaxis.set_label_coords(.5, 0.01)
# ax.set_ylabel(r'$${\mathcal L}(\hat y_\theta)$$', rotation=0, labelpad=0)
# ax.yaxis.set_label_coords(-.07, .44)
# ax.set_ylabel('$$y$$', rotation=0, labelpad=0)
# ax.xaxis.set_label_coords(.5, 0.01)
fig.tight_layout()
ax.set_xticks([])
ax.set_yticks([])
ax.grid(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
fname = 'smoothed-loss.pdf'
plt.savefig(fname, transparent=True)
os.system(f'pdfcrop {fname} {fname}')
| amortized-optimization-tutorial-main | code/figures/smoothed-loss.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import jax
import jax.numpy as jnp
import shutil
import os
plt.rcParams.update({
"text.usetex": True,
"font.family": "serif",
"font.sans-serif": ["Computer Modern Roman"],
"text.latex.preamble": r"\usepackage{amsfonts}",
})
# We will define a 1D density parameterized by \phi to maximize over
# with gradient steps, and implement this by discretizing over the domain.
phi = jnp.array([0., 1.5, .7, 6.])
@jax.jit
def compute_dist(x, phi):
# Compute values of at the discretized points in the domain.
v = jnp.exp(-0.5*((x-phi[0])/phi[1])**2 + phi[2]*jnp.sin(x*phi[3]))
dx = x[1:]-x[:-1]
y = v/sum(v[1:]*dx) # Normalize to be a proper distribution.
flow_x = flow(x, y) # Constrain the mean and variance.
# Compute the new probabilities.
J_flow = jnp.diag(jax.jacfwd(flow)(x, y))
flow_y = y / J_flow
return flow_x, flow_y
@jax.jit
def mean(x, y):
dx = x[1:]-x[:-1]
x = x[1:]
y = y[1:]
return sum(x*y*dx)
@jax.jit
def std(x, y):
mu = mean(x,y)
dx = x[1:]-x[:-1]
x = x[1:]
y = y[1:]
return jnp.sqrt(sum(((x-mu)**2)*y*dx))
@jax.jit
def entr(x, y):
dx = x[1:]-x[:-1]
y = y[1:]
return -sum(y*jnp.log(y+1e-8)*dx)
@jax.jit
def flow(x, y):
# Normalize the domain so that the distribution has
# zero mean and identity variance.
return (x - mean(x,y)) / std(x, y)
@jax.jit
def loss(x, phi):
x, y = compute_dist(x, phi)
return -entr(x, y)
# Prepare the output directory
d = 'maxent-animation'
if os.path.exists(d):
shutil.rmtree(d)
os.makedirs(d)
def plot(t):
nrow, ncol = 1, 2
fig, axs = plt.subplots(nrow, ncol, figsize=(ncol*3, nrow*2), dpi=200,
gridspec_kw={'wspace': .3, 'hspace': 0})
ax = axs[0]
ax.plot(entrs, color='k')
ax.set_xlabel('Updates', fontsize=10)
ax.set_title(r'Entropy ($\mathbb{H}_p[X]$)', fontsize=10)
ax.set_xlim(0, n_step)
ax.set_ylim(1.3, 1.45)
ax = axs[1]
ax.plot(x, y, color='k')
ax.set_ylim(0, 0.7)
ax.set_xlim(-3, 3)
ax.set_xlabel('$x$', fontsize=10)
ax.set_title('$p(x)$', fontsize=10)
for ax in axs:
ax.grid(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
fig.suptitle(
r'$\max_{p} \mathbb{H}_p[X]\; \rm{subject\; to}\; \mathbb{E}_p[X] = \mu\;\rm{and}\;\rm{Var}_p[X]=\Sigma$')
fig.subplots_adjust(top=0.7)
fname = f'{d}/{t:04d}.png'
plt.savefig(fname, bbox_inches='tight')
plt.close(fig)
# os.system(f'pdfcrop {fname} {fname}')
os.system(f'convert -trim {fname} {fname}')
# jitted derivative of the loss with respect to phi
dloss_dphi = jax.jit(jax.grad(loss, argnums=1))
# Number of discretization points in the domain
# Decrease this to run faster
N = 1000
# The domain of the unprojected distribution
x_unproj = jnp.linspace(-5.0, 5.0, N)
entrs = []
x, y = compute_dist(x_unproj, phi)
entrs.append(entr(x,y))
print(f'entr={entr(x,y):.2f} (mean={mean(x, y):.2f} std={std(x,y):.2f})')
# The step size can be much larger but it's set to this for the animation.
n_step = 100
step_size = 0.13
for t in range(n_step):
# Take a gradient step with respect to the
# parameters of the distribution
phi -= step_size*dloss_dphi(x_unproj, phi)
x, y = compute_dist(x_unproj, phi)
entrs.append(entr(x,y))
print(f'entr={entr(x,y):.2f} (mean={mean(x, y):.2f} std={std(x,y):.2f})')
plot(t)
# By the end, we see that the entropy is the true maximal entropy
# of the Gaussian of (1/2)log(2\pi)+(1/2) \approx 1.42.
os.system(f'convert -delay 10 -loop 0 {d}/*.png {d}/maxent.gif')
| amortized-optimization-tutorial-main | code/figures/maxent-animation.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import jax
import jax.numpy as jnp
import os
plt.rcParams.update({
"text.usetex": True,
"font.family": "serif",
"font.sans-serif": ["Computer Modern Roman"]})
plt.style.use('bmh')
# Initial problem with x^\star
N = 1000
x = np.linspace(-5.0, 5.0, N)
y = np.linspace(-10.0, 10.0, N)
X, Y = np.meshgrid(x, y)
Z = (Y-np.sin(X)-X*(1+0.1*np.cos(X)))**2
Z = 1./(1.+np.exp(-Z/80.))
fig, ax = plt.subplots(figsize=(2,1.7), dpi=200)
CS = ax.contourf(X, Y, Z, cmap='Purples')
ax.text(0., 1., r'$$f(y; x)$$', color='#491386',
bbox=dict(facecolor='white', pad=0, alpha=0.9, edgecolor='none'),
transform=ax.transAxes, ha='left', va='top')
I = np.argmin(Z, axis=0)
xstar, ystar = x, y[I]
ax.plot(xstar, ystar, color='#AA0000', lw=3)
ax.text(.92, .8, '$$y^\star(x)$$', color='#AA0000',
transform=ax.transAxes, ha='right', va='top')
ax.set_ylabel('$$y$$', rotation=0, labelpad=0)
ax.yaxis.set_label_coords(-.07, .44)
ax.set_xlabel('$$x$$')
ax.xaxis.set_label_coords(.5, 0.01)
fig.tight_layout()
ax.set_xticks([])
ax.set_yticks([])
ax.grid(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
fname = 'opt.pdf'
plt.savefig(fname, transparent=True)
os.system(f'pdfcrop {fname} {fname}')
# Regression loss
xhat, yhat= xstar.copy(), ystar.copy()
yhat = -0.5*yhat + 0.0*xhat*np.maximum(xhat, 0.) - \
0.23*xhat*np.minimum(xhat, 0.)
fig, ax = plt.subplots(figsize=(2,1.7), dpi=200)
CS = ax.contourf(X, Y, Z, cmap='Purples')
ax.text(0., 1., r'$$f(y; x)$$', color='#491386',
bbox=dict(facecolor='white', pad=0, alpha=0.9, edgecolor='none'),
transform=ax.transAxes, ha='left', va='top')
I = np.argmin(Z, axis=0)
xstar, ystar = x, y[I]
ax.plot(xstar, ystar, color='#AA0000', lw=3)
ax.text(.92, .8, '$$y^\star(x)$$', color='#AA0000',
transform=ax.transAxes, ha='right', va='top')
ax.plot(xhat, yhat, color='#5499FF', lw=3)
ax.text(0.3, .57, r'$$\hat y_\theta(x)$$', color='#5499FF',
bbox=dict(facecolor='white', pad=0, alpha=0.6, edgecolor='none'),
transform=ax.transAxes, ha='left', va='bottom')
n_reg = 15
pad = 35
I = np.round(np.linspace(pad, len(y) - 1 - pad, n_reg)).astype(int)
for idx in I:
ax.plot(
(xstar[idx], xhat[idx]), (yhat[idx], ystar[idx]),
color='k', lw=1, solid_capstyle='round')
ax.set_ylabel('$$y$$', rotation=0, labelpad=0)
ax.yaxis.set_label_coords(-.07, .44)
ax.set_xlabel('$$x$$')
ax.xaxis.set_label_coords(.5, 0.01)
ax.set_title('Regression-Based', fontsize=12, pad=0)
fig.tight_layout()
ax.set_xticks([])
ax.set_yticks([])
ax.grid(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
fname = 'learning-reg.pdf'
plt.savefig(fname, transparent=True)
os.system(f'pdfcrop {fname} {fname}')
# Objective loss
fig, ax = plt.subplots(figsize=(2,1.7), dpi=200)
CS = ax.contourf(X, Y, Z, cmap='Purples')
ax.plot(xstar, ystar, color='#AA0000', lw=3, ls='--')
ax.plot(xhat, yhat, color='#5499FF', lw=3)
I = np.round(np.linspace(pad, len(y) - 1 - pad, n_reg)).astype(int)
def f(x,y):
z = y-jnp.sin(x)-x*1.+0.1*jnp.cos(x)
z = z**2
z = 1./(1.+jnp.exp(-z/80.))
return z
df = jax.grad(f, argnums=1)
for idx in I:
x,y = jnp.array(xhat[idx]), jnp.array(yhat[idx])
z = f(x,y)
dz = df(x,y)
ax.quiver(
xhat[idx], yhat[idx], 0., -dz,
color='k', lw=1, scale=.2, zorder=10) #, solid_capstyle='round')
ax.set_ylabel('$$y$$', rotation=0, labelpad=0)
ax.yaxis.set_label_coords(-.07, .44)
ax.set_xlabel('$$x$$')
ax.xaxis.set_label_coords(.5, 0.01)
ax.set_title('Objective-Based', fontsize=12, pad=0)
fig.tight_layout()
ax.set_xticks([])
ax.set_yticks([])
ax.grid(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
fname = 'learning-obj.pdf'
plt.savefig(fname, transparent=True)
os.system(f'pdfcrop {fname} {fname}')
# RL loss
fig, ax = plt.subplots(figsize=(2,1.5), dpi=200)
CS = ax.contourf(X, Y, Z, cmap='Purples')
ax.plot(xstar, ystar, color='#AA0000', lw=3, ls='--')
ax.plot(xhat, yhat, color='#5499FF', lw=3)
np.random.seed(2)
for _ in range(20):
p = np.linspace(0, 3., len(xhat))
p = p*np.flip(p)
q = 0.04*np.random.randn(len(xhat))
q = np.cumsum(q, axis=-1)
q = q*np.flip(q)
pert = 0.3*(p+q)*np.random.randn()
ax.plot(xhat, yhat+pert, color='#5499FF', lw=1, alpha=0.3)
# ax.set_xlabel('$$x$$')
ax.xaxis.set_label_coords(.5, 0.01)
# ax.set_title('RL-Based', fontsize=12, pad=0)
fig.tight_layout()
ax.set_xticks([])
ax.set_yticks([])
ax.grid(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
fname = 'learning-rl.pdf'
plt.savefig(fname, transparent=True)
os.system(f'pdfcrop {fname} {fname}')
| amortized-optimization-tutorial-main | code/figures/main-example.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import jax
import jax.numpy as jnp
import os
plt.rcParams.update({
"text.usetex": True,
"font.family": "serif",
"font.sans-serif": ["Computer Modern Roman"]})
plt.style.use('bmh')
N = 1000
x = np.linspace(-5., 5.0, N)
fig, ax = plt.subplots(figsize=(2,1.3), dpi=200)
y = x
ax.plot(x, y, color='k', linestyle='--', alpha=.5)
y = -2.*np.sin(x)+0.9*x*(1+0.1*np.cos(x))**2
ax.plot(x, y, color='k')
fp = max(x[np.abs(y-x) <= 5e-3]) # Numerically find the fixed-point :)
ax.scatter([0], [0], color='#AA0000', lw=1, s=70, zorder=10, marker='*')
ax.scatter([fp], [fp], color='#AA0000', lw=1, s=70, zorder=10, marker='*')
ax.scatter([-fp], [-fp], color='#AA0000', lw=1, s=70, zorder=10, marker='*')
# ax.set_ylabel('$$g(y)$$', rotation=0, labelpad=0)
# ax.yaxis.set_label_coords(-.07, .44)
# ax.set_xlabel('$$y$$')
# ax.xaxis.set_label_coords(.5, 0.01)
fig.tight_layout()
ax.set_xticks([])
ax.set_yticks([])
ax.grid(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
fname = 'fp.pdf'
plt.savefig(fname, transparent=True)
os.system(f'pdfcrop {fname} {fname}')
| amortized-optimization-tutorial-main | code/figures/fixed-point.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import jax
import jax.numpy as jnp
import os
plt.rcParams.update({
"text.usetex": True,
"font.family": "serif",
"font.sans-serif": ["Computer Modern Roman"]})
plt.style.use('bmh')
fig, ax = plt.subplots(figsize=(1.5,1.5), dpi=200)
N = 1000
x = np.linspace(-5.0, 5.0, N)
y = np.linspace(-5.0, 5.0, N)
X, Y = np.meshgrid(x, y)
a,b = 0., 10.
Z = X**2 + Y**2 + 1.4*X*Y
Z = 1./(1.+np.exp(-Z/10.))
fig, ax = plt.subplots(figsize=(2,1.7), dpi=200)
CS = ax.contourf(X, Y, Z, cmap='Purples', alpha=0.8)
Z = X**2 + Y**2
CS = ax.contour(X, Y, Z, colors='k', alpha=.7, linewidths=1, levels=5)
ax.scatter([0], [0], color='#AA0000', lw=1, s=50, zorder=10, marker='*')
ax.set_ylabel('$$y_1$$', rotation=0, labelpad=0)
ax.yaxis.set_label_coords(-.07, .44)
ax.set_xlabel('$$y_0$$')
ax.xaxis.set_label_coords(.5, 0.01)
ax.text(0., 1., r'$$f(y; x)$$', color='#491386',
bbox=dict(facecolor='white', pad=0, alpha=0.9, edgecolor='none'),
transform=ax.transAxes, ha='left', va='top')
ax.text(.3, .3, '$$y^\star(x)$$', color='#AA0000',
ha='left', va='bottom')
fig.tight_layout()
ax.set_xticks([])
ax.set_yticks([])
ax.grid(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
fname = 'loss-comp.pdf'
plt.savefig(fname, transparent=True)
os.system(f'pdfcrop {fname} {fname}')
| amortized-optimization-tutorial-main | code/figures/loss-comp.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import jax
import jax.numpy as jnp
import os
plt.rcParams.update({
"text.usetex": True,
"font.family": "serif",
"font.sans-serif": ["Computer Modern Roman"]})
plt.style.use('bmh')
fig, ax = plt.subplots(figsize=(2,1.3), dpi=200)
N = 1000
y = np.linspace(-2.0, 2.0, N)
z = -y**3 - 10.*y
ax.plot(y, z, color='k')
I = N // 5
y0, z0 = y[I], z[I]
ax.scatter(y0, z0, color='#5499FF', lw=1, s=50, zorder=10, marker='.')
ax.text(y0, z0-3, r'$$\hat y^0_\theta$$', color='#5499FF',
ha='right', va='top')
lams = np.linspace(0., 12., 15)
for lam in lams:
z_ = z + (lam/2)*(y-y0)**2
ax.plot(y, z_, color='k', alpha=0.2)
# ax.set_title('$$f(y) + {\lambda\over 2}||y-\hat y_0||_2^2$$', size=10)
# ax.set_xlabel('$$y$$')
# ax.xaxis.set_label_coords(.5, 0.01)
fig.tight_layout()
ax.set_xticks([])
ax.set_yticks([])
ax.grid(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
fname = 'imaml.pdf'
plt.savefig(fname, transparent=True)
os.system(f'pdfcrop {fname} {fname}')
| amortized-optimization-tutorial-main | code/figures/imaml.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import jax
import jax.numpy as jnp
import os
plt.rcParams.update({
"text.usetex": True,
"font.family": "serif",
"font.sans-serif": ["Computer Modern Roman"]})
plt.style.use('bmh')
phi = jnp.array([0., .7, 4.]) # Parameters to learn
@jax.jit
def compute_dist(x, phi):
# Compute values at the discretized points in the domain
v = jnp.exp(-0.5*(x-phi[0])**2 + phi[1]*jnp.sin(x*phi[2]))
dx = x[1:]-x[:-1]
y = v/sum(v[1:]*dx) # Normalize to be a proper distribution.
flow_x = flow(x, y) # Constrain the mean and variance.
J_flow = jnp.diag(jax.jacfwd(flow)(x, y))
flow_y = y / J_flow
return flow_x, flow_y
@jax.jit
def mean(x, y):
dx = x[1:]-x[:-1]
x = x[1:]
y = y[1:]
return sum(x*y*dx)
@jax.jit
def std(x, y):
mu = mean(x,y)
dx = x[1:]-x[:-1]
x = x[1:]
y = y[1:]
return jnp.sqrt(sum(((x-mu)**2)*y*dx))
@jax.jit
def entr(x, y):
dx = x[1:]-x[:-1]
y = y[1:]
return -sum(y*jnp.log(y+1e-8)*dx)
@jax.jit
def flow(x, y):
# Normalize the domain so that the distribution has
# zero mean and identity variance.
return (x - mean(x,y)) / std(x, y)
@jax.jit
def loss(x, phi):
x, y = compute_dist(x, phi)
return -entr(x, y)
dloss_dphi = jax.jit(jax.grad(loss, argnums=1))
fig, ax = plt.subplots(figsize=(2,1.3), dpi=200)
N = 1000 # Number of discretization points in the domain
# The domain of the unprojected distribution
x_unproj = jnp.linspace(-5.0, 5.0, N)
# Plot the initialization
x, y = compute_dist(x_unproj, phi)
ax.plot(x, y, color='k', alpha=0.5)
print(f'entr={entr(x,y):.2f} (mean={mean(x, y):.2f} std={std(x,y):.2f})')
for t in range(20):
# Take a gradient step with respect to the
# parameters of the distribution
phi -= dloss_dphi(x_unproj, phi)
x, y = compute_dist(x_unproj, phi)
ax.plot(x, y, color='k', alpha=0.2)
print(f'entr={entr(x,y):.2f} (mean={mean(x, y):.2f} std={std(x,y):.2f})')
fig.tight_layout()
ax.set_xticks([])
ax.set_yticks([])
ax.grid(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
fname = 'maxent.pdf'
plt.savefig(fname, transparent=True)
os.system(f'pdfcrop {fname} {fname}')
| amortized-optimization-tutorial-main | code/figures/maxent.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.