python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
# Adapted from https://github.com/facebookresearch/detectron2/
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import glob
import multiprocessing as mp
import os
import time
import cv2
import tqdm
from d2go.model_zoo import model_zoo
from d2go.utils.demo_predictor import VisualizationDemo
from detectron2.data.detection_utils import read_image
from detectron2.utils.logger import setup_logger
# constants
WINDOW_NAME = "COCO detections"
def setup_cfg(cfg, args):
# Set score_threshold for builtin models
cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = (
args.confidence_threshold
)
cfg.freeze()
return cfg
def get_parser():
parser = argparse.ArgumentParser(description="Detectron2 demo for builtin configs")
parser.add_argument(
"--config-file",
default="keypoint_rcnn_fbnetv3a_dsmask_C4.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument(
"--webcam", action="store_true", help="Take inputs from webcam."
)
parser.add_argument("--video-input", help="Path to video file.")
parser.add_argument(
"--input",
nargs="+",
help="A list of space separated input images; "
"or a single glob pattern such as 'directory/*.jpg'",
)
parser.add_argument(
"--output",
help="A file or directory to save output visualizations. "
"If not given, will show output in an OpenCV window.",
)
parser.add_argument(
"--confidence-threshold",
type=float,
default=0.5,
help="Minimum score for instance predictions to be shown",
)
parser.add_argument(
"--opts",
help="Modify config options using the command-line 'KEY VALUE' pairs",
default=[],
nargs=argparse.REMAINDER,
)
return parser
def main():
mp.set_start_method("spawn", force=True)
args = get_parser().parse_args()
setup_logger(name="fvcore")
logger = setup_logger()
logger.info("Arguments: " + str(args))
cfg = model_zoo.get_config(args.config_file)
cfg = setup_cfg(cfg, args)
demo = VisualizationDemo(cfg, args.config_file)
if args.input:
if len(args.input) == 1:
args.input = glob.glob(os.path.expanduser(args.input[0]))
assert args.input, "The input path(s) was not found"
for path in tqdm.tqdm(args.input, disable=not args.output):
# use PIL, to be consistent with evaluation
img = read_image(path, format="BGR")
start_time = time.time()
predictions, visualized_output = demo.run_on_image(img)
logger.info(
"{}: {} in {:.2f}s".format(
path,
"detected {} instances".format(len(predictions["instances"]))
if "instances" in predictions
else "finished",
time.time() - start_time,
)
)
if args.output:
if os.path.isdir(args.output):
assert os.path.isdir(args.output), args.output
out_filename = os.path.join(args.output, os.path.basename(path))
else:
assert (
len(args.input) == 1
), "Please specify a directory with args.output"
out_filename = args.output
visualized_output.save(out_filename)
else:
cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
cv2.imshow(WINDOW_NAME, visualized_output.get_image()[:, :, ::-1])
if cv2.waitKey(0) == 27:
break # esc to quit
elif args.video_input:
video = cv2.VideoCapture(args.video_input)
width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
frames_per_second = video.get(cv2.CAP_PROP_FPS)
num_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
basename = os.path.basename(args.video_input)
if args.output:
if os.path.isdir(args.output):
output_fname = os.path.join(args.output, basename)
output_fname = os.path.splitext(output_fname)[0] + ".mkv"
else:
output_fname = args.output
assert not os.path.isfile(output_fname), output_fname
output_file = cv2.VideoWriter(
filename=output_fname,
# some installation of opencv may not support x264 (due to its license),
# you can try other format (e.g. MPEG)
fourcc=cv2.VideoWriter_fourcc(*"x264"),
fps=float(frames_per_second),
frameSize=(width, height),
isColor=True,
)
assert os.path.isfile(args.video_input)
for vis_frame in tqdm.tqdm(demo.run_on_video(video), total=num_frames):
if args.output:
output_file.write(vis_frame)
else:
cv2.namedWindow(basename, cv2.WINDOW_NORMAL)
cv2.imshow(basename, vis_frame)
if cv2.waitKey(1) == 27:
break # esc to quit
video.release()
if args.output:
output_file.release()
else:
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
| d2go-main | demo/demo.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import tempfile
import unittest
import d2go.projects.detr.runner as oss_runner
import d2go.runner.default_runner as default_runner
from d2go.utils.testing.data_loader_helper import create_local_dataset
# RUN:
# buck test mobile-vision/d2go/projects_oss/detr:test_detr_runner
def _get_cfg(runner, output_dir, dataset_name):
cfg = runner.get_default_cfg()
cfg.MODEL.DEVICE = "cpu"
cfg.MODEL.META_ARCHITECTURE = "Detr"
cfg.DATASETS.TRAIN = (dataset_name,)
cfg.DATASETS.TEST = (dataset_name,)
cfg.INPUT.MIN_SIZE_TRAIN = (10,)
cfg.INPUT.MIN_SIZE_TEST = (10,)
cfg.SOLVER.MAX_ITER = 5
cfg.SOLVER.STEPS = []
cfg.SOLVER.WARMUP_ITERS = 1
cfg.SOLVER.CHECKPOINT_PERIOD = 1
cfg.SOLVER.IMS_PER_BATCH = 2
cfg.OUTPUT_DIR = output_dir
return cfg
class TestOssRunner(unittest.TestCase):
def test_build_model(self):
with tempfile.TemporaryDirectory() as tmp_dir:
ds_name = create_local_dataset(tmp_dir, 5, 10, 10)
runner = oss_runner.DETRRunner()
cfg = _get_cfg(runner, tmp_dir, ds_name)
model = runner.build_model(cfg)
dl = runner.build_detection_train_loader(cfg)
batch = next(iter(dl))
output = model(batch)
self.assertIsInstance(output, dict)
model.eval()
output = model(batch)
self.assertIsInstance(output, list)
default_runner._close_all_tbx_writers()
def test_runner_train(self):
with tempfile.TemporaryDirectory() as tmp_dir:
ds_name = create_local_dataset(tmp_dir, 5, 10, 10, num_classes=1000)
runner = oss_runner.DETRRunner()
cfg = _get_cfg(runner, tmp_dir, ds_name)
model = runner.build_model(cfg)
runner.do_train(cfg, model, True)
final_model_path = os.path.join(tmp_dir, "model_final.pth")
self.assertTrue(os.path.isfile(final_model_path))
default_runner._close_all_tbx_writers()
| d2go-main | projects_oss/detr/test_detr_runner.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import io
import unittest
from typing import List
import torch
from detr.hub import detr_resnet50, detr_resnet50_panoptic
from detr.models.backbone import Backbone
from detr.models.matcher import HungarianMatcher
from detr.models.position_encoding import (
PositionEmbeddingLearned,
PositionEmbeddingSine,
)
from detr.util import box_ops
from detr.util.misc import nested_tensor_from_tensor_list
from torch import nn, Tensor
# onnxruntime requires python 3.5 or above
try:
import onnxruntime
except ImportError:
onnxruntime = None
class Tester(unittest.TestCase):
def test_box_cxcywh_to_xyxy(self):
t = torch.rand(10, 4)
r = box_ops.box_xyxy_to_cxcywh(box_ops.box_cxcywh_to_xyxy(t))
self.assertLess((t - r).abs().max(), 1e-5)
@staticmethod
def indices_torch2python(indices):
return [(i.tolist(), j.tolist()) for i, j in indices]
def test_hungarian(self):
n_queries, n_targets, n_classes = 100, 15, 91
logits = torch.rand(1, n_queries, n_classes + 1)
boxes = torch.rand(1, n_queries, 4)
tgt_labels = torch.randint(high=n_classes, size=(n_targets,))
tgt_boxes = torch.rand(n_targets, 4)
matcher = HungarianMatcher()
targets = [{"labels": tgt_labels, "boxes": tgt_boxes}]
indices_single = matcher({"pred_logits": logits, "pred_boxes": boxes}, targets)
indices_batched = matcher(
{
"pred_logits": logits.repeat(2, 1, 1),
"pred_boxes": boxes.repeat(2, 1, 1),
},
targets * 2,
)
self.assertEqual(len(indices_single[0][0]), n_targets)
self.assertEqual(len(indices_single[0][1]), n_targets)
self.assertEqual(
self.indices_torch2python(indices_single),
self.indices_torch2python([indices_batched[0]]),
)
self.assertEqual(
self.indices_torch2python(indices_single),
self.indices_torch2python([indices_batched[1]]),
)
# test with empty targets
tgt_labels_empty = torch.randint(high=n_classes, size=(0,))
tgt_boxes_empty = torch.rand(0, 4)
targets_empty = [{"labels": tgt_labels_empty, "boxes": tgt_boxes_empty}]
indices = matcher(
{
"pred_logits": logits.repeat(2, 1, 1),
"pred_boxes": boxes.repeat(2, 1, 1),
},
targets + targets_empty,
)
self.assertEqual(len(indices[1][0]), 0)
indices = matcher(
{
"pred_logits": logits.repeat(2, 1, 1),
"pred_boxes": boxes.repeat(2, 1, 1),
},
targets_empty * 2,
)
self.assertEqual(len(indices[0][0]), 0)
def test_position_encoding_script(self):
m1, m2 = PositionEmbeddingSine(), PositionEmbeddingLearned()
mm1, mm2 = torch.jit.script(m1), torch.jit.script(m2) # noqa
def test_backbone_script(self):
backbone = Backbone("resnet50", True, False, False)
torch.jit.script(backbone) # noqa
def test_model_script_detection(self):
model = detr_resnet50(pretrained=False).eval()
scripted_model = torch.jit.script(model)
x = nested_tensor_from_tensor_list(
[torch.rand(3, 200, 200), torch.rand(3, 200, 250)]
)
out = model(x)
out_script = scripted_model(x)
self.assertTrue(out["pred_logits"].equal(out_script["pred_logits"]))
self.assertTrue(out["pred_boxes"].equal(out_script["pred_boxes"]))
def test_model_script_panoptic(self):
model = detr_resnet50_panoptic(pretrained=False).eval()
scripted_model = torch.jit.script(model)
x = nested_tensor_from_tensor_list(
[torch.rand(3, 200, 200), torch.rand(3, 200, 250)]
)
out = model(x)
out_script = scripted_model(x)
self.assertTrue(out["pred_logits"].equal(out_script["pred_logits"]))
self.assertTrue(out["pred_boxes"].equal(out_script["pred_boxes"]))
self.assertTrue(out["pred_masks"].equal(out_script["pred_masks"]))
def test_model_detection_different_inputs(self):
model = detr_resnet50(pretrained=False).eval()
# support NestedTensor
x = nested_tensor_from_tensor_list(
[torch.rand(3, 200, 200), torch.rand(3, 200, 250)]
)
out = model(x)
self.assertIn("pred_logits", out)
# and 4d Tensor
x = torch.rand(1, 3, 200, 200)
out = model(x)
self.assertIn("pred_logits", out)
# and List[Tensor[C, H, W]]
x = torch.rand(3, 200, 200)
out = model([x])
self.assertIn("pred_logits", out)
def test_warpped_model_script_detection(self):
class WrappedDETR(nn.Module):
def __init__(self, model):
super().__init__()
self.model = model
def forward(self, inputs: List[Tensor]):
sample = nested_tensor_from_tensor_list(inputs)
return self.model(sample)
model = detr_resnet50(pretrained=False)
wrapped_model = WrappedDETR(model)
wrapped_model.eval()
scripted_model = torch.jit.script(wrapped_model)
x = [torch.rand(3, 200, 200), torch.rand(3, 200, 250)]
out = wrapped_model(x)
out_script = scripted_model(x)
self.assertTrue(out["pred_logits"].equal(out_script["pred_logits"]))
self.assertTrue(out["pred_boxes"].equal(out_script["pred_boxes"]))
@unittest.skipIf(onnxruntime is None, "ONNX Runtime unavailable")
class ONNXExporterTester(unittest.TestCase):
@classmethod
def setUpClass(cls):
torch.manual_seed(123)
def run_model(
self,
model,
inputs_list,
tolerate_small_mismatch=False,
do_constant_folding=True,
dynamic_axes=None,
output_names=None,
input_names=None,
):
model.eval()
onnx_io = io.BytesIO()
# export to onnx with the first input
torch.onnx.export(
model,
inputs_list[0],
onnx_io,
do_constant_folding=do_constant_folding,
opset_version=12,
dynamic_axes=dynamic_axes,
input_names=input_names,
output_names=output_names,
)
# validate the exported model with onnx runtime
for test_inputs in inputs_list:
with torch.no_grad():
if isinstance(test_inputs, torch.Tensor) or isinstance(
test_inputs, list
):
test_inputs = (nested_tensor_from_tensor_list(test_inputs),)
test_ouputs = model(*test_inputs)
if isinstance(test_ouputs, torch.Tensor):
test_ouputs = (test_ouputs,)
self.ort_validate(
onnx_io, test_inputs, test_ouputs, tolerate_small_mismatch
)
def ort_validate(self, onnx_io, inputs, outputs, tolerate_small_mismatch=False):
inputs, _ = torch.jit._flatten(inputs)
outputs, _ = torch.jit._flatten(outputs)
def to_numpy(tensor):
if tensor.requires_grad:
return tensor.detach().cpu().numpy()
else:
return tensor.cpu().numpy()
inputs = list(map(to_numpy, inputs))
outputs = list(map(to_numpy, outputs))
ort_session = onnxruntime.InferenceSession(onnx_io.getvalue())
# compute onnxruntime output prediction
ort_inputs = dict(
(ort_session.get_inputs()[i].name, inpt) for i, inpt in enumerate(inputs)
) # noqa: C402
ort_outs = ort_session.run(None, ort_inputs)
for i in range(0, len(outputs)):
try:
torch.testing.assert_allclose(
outputs[i], ort_outs[i], rtol=1e-03, atol=1e-05
)
except AssertionError as error:
if tolerate_small_mismatch:
self.assertIn("(0.00%)", str(error), str(error))
else:
raise
def test_model_onnx_detection(self):
model = detr_resnet50(pretrained=False).eval()
dummy_image = torch.ones(1, 3, 800, 800) * 0.3
model(dummy_image)
# Test exported model on images of different size, or dummy input
self.run_model(
model,
[(torch.rand(1, 3, 750, 800),)],
input_names=["inputs"],
output_names=["pred_logits", "pred_boxes"],
tolerate_small_mismatch=True,
)
@unittest.skip("CI doesn't have enough memory")
def test_model_onnx_detection_panoptic(self):
model = detr_resnet50_panoptic(pretrained=False).eval()
dummy_image = torch.ones(1, 3, 800, 800) * 0.3
model(dummy_image)
# Test exported model on images of different size, or dummy input
self.run_model(
model,
[(torch.rand(1, 3, 750, 800),)],
input_names=["inputs"],
output_names=["pred_logits", "pred_boxes", "pred_masks"],
tolerate_small_mismatch=True,
)
if __name__ == "__main__":
unittest.main()
| d2go-main | projects_oss/detr/test_all.py |
import logging
import unittest
import torch
from d2go.config import CfgNode as CN
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.modeling import BACKBONE_REGISTRY
from detectron2.utils.file_io import PathManager
from detr.backbone.deit import add_deit_backbone_config
from detr.backbone.pit import add_pit_backbone_config
logger = logging.getLogger(__name__)
# avoid testing on sandcastle due to access to manifold
USE_CUDA = torch.cuda.device_count() > 0
class TestTransformerBackbone(unittest.TestCase):
@unittest.skipIf(
not USE_CUDA, "avoid testing on sandcastle due to access to manifold"
)
def test_deit_model(self):
cfg = CN()
cfg.MODEL = CN()
add_deit_backbone_config(cfg)
build_model = BACKBONE_REGISTRY.get("deit_d2go_model_wrapper")
deit_models = {
"8X-7-RM_4": 170,
"DeiT-Tiny": 224,
"DeiT-Small": 224,
"32X-1-RM_2": 221,
"8X-7": 160,
"32X-1": 256,
}
deit_model_weights = {
"8X-7-RM_4": "manifold://mobile_vision_workflows/tree/workflows/kyungminkim/20210511/deit_[model]deit_scaling_distill_[bs]128_[mcfg]8X-7-RM_4_.OIXarYpbZw/checkpoint_best.pth",
"DeiT-Tiny": "manifold://mobile_vision_workflows/tree/workflows/cl114/DeiT-official-ckpt/deit_tiny_distilled_patch16_224-b40b3cf7.pth",
"DeiT-Small": "manifold://mobile_vision_workflows/tree/workflows/cl114/DeiT-official-ckpt/deit_small_distilled_patch16_224-649709d9.pth",
"32X-1-RM_2": "manifold://mobile_vision_workflows/tree/workflows/kyungminkim/20210511/deit_[model]deit_scaling_distill_[bs]64_[mcfg]32X-1-RM_2_.xusuFyNMdD/checkpoint_best.pth",
"8X-7": "manifold://mobile_vision_workflows/tree/workflows/cl114/scaled_best/8X-7.pth",
"32X-1": "manifold://mobile_vision_workflows/tree/workflows/cl114/scaled_best/32X-1.pth",
}
for model_name, org_size in deit_models.items():
print("model_name", model_name)
cfg.MODEL.DEIT.MODEL_CONFIG = f"manifold://mobile_vision_workflows/tree/workflows/wbc/deit/model_cfgs/{model_name}.json"
cfg.MODEL.DEIT.WEIGHTS = deit_model_weights[model_name]
model = build_model(cfg, None)
model.eval()
for input_size_h in [org_size, 192, 224, 256, 320]:
for input_size_w in [org_size, 192, 224, 256, 320]:
x = torch.rand(1, 3, input_size_h, input_size_w)
y = model(x)
print(f"x.shape: {x.shape}, y.shape: {y.shape}")
@unittest.skipIf(
not USE_CUDA, "avoid testing on sandcastle due to access to manifold"
)
def test_pit_model(self):
cfg = CN()
cfg.MODEL = CN()
add_pit_backbone_config(cfg)
build_model = BACKBONE_REGISTRY.get("pit_d2go_model_wrapper")
pit_models = {
"pit_ti_ours": 160,
"pit_ti": 224,
"pit_s_ours_v1": 256,
"pit_s": 224,
}
pit_model_weights = {
"pit_ti_ours": "manifold://mobile_vision_workflows/tree/workflows/kyungminkim/20210515/deit_[model]pit_scalable_distilled_[bs]128_[mcfg]pit_ti_ours_.HImkjNCpJI/checkpoint_best.pth",
"pit_ti": "manifold://mobile_vision_workflows/tree/workflows/kyungminkim/20210515/deit_[model]pit_scalable_distilled_[bs]128_[mcfg]pit_ti_.QJeFNUfYOD/checkpoint_best.pth",
"pit_s_ours_v1": "manifold://mobile_vision_workflows/tree/workflows/kyungminkim/20210515/deit_[model]pit_scalable_distilled_[bs]64_[mcfg]pit_s_ours_v1_.LXdwyBDaNY/checkpoint_best.pth",
"pit_s": "manifold://mobile_vision_workflows/tree/workflows/kyungminkim/20210515/deit_[model]pit_scalable_distilled_[bs]128_[mcfg]pit_s_.zReQLPOuJe/checkpoint_best.pth",
}
for model_name, org_size in pit_models.items():
print("model_name", model_name)
cfg.MODEL.PIT.MODEL_CONFIG = f"manifold://mobile_vision_workflows/tree/workflows/wbc/deit/model_cfgs/{model_name}.json"
cfg.MODEL.PIT.WEIGHTS = pit_model_weights[model_name]
cfg.MODEL.PIT.DILATED = True
model = build_model(cfg, None)
model.eval()
for input_size_h in [org_size, 192, 224, 256, 320]:
for input_size_w in [org_size, 192, 224, 256, 320]:
x = torch.rand(1, 3, input_size_h, input_size_w)
y = model(x)
print(f"x.shape: {x.shape}, y.shape: {y.shape}")
| d2go-main | projects_oss/detr/test_deit_backbone.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import torch
from d2go.runner import create_runner
from detr.util.misc import nested_tensor_from_tensor_list
from fvcore.nn import flop_count_table, FlopCountAnalysis
class Tester(unittest.TestCase):
@staticmethod
def _set_detr_cfg(cfg, enc_layers, dec_layers, num_queries, dim_feedforward):
cfg.MODEL.META_ARCHITECTURE = "Detr"
cfg.MODEL.DETR.NUM_OBJECT_QUERIES = num_queries
cfg.MODEL.DETR.ENC_LAYERS = enc_layers
cfg.MODEL.DETR.DEC_LAYERS = dec_layers
cfg.MODEL.DETR.DEEP_SUPERVISION = False
cfg.MODEL.DETR.DIM_FEEDFORWARD = dim_feedforward # 2048
def _assert_model_output(self, model, scripted_model):
x = nested_tensor_from_tensor_list(
[torch.rand(3, 200, 200), torch.rand(3, 200, 250)]
)
out = model(x)
out_script = scripted_model(x)
self.assertTrue(out["pred_logits"].equal(out_script["pred_logits"]))
self.assertTrue(out["pred_boxes"].equal(out_script["pred_boxes"]))
def test_detr_res50_export(self):
runner = create_runner("d2go.projects.detr.runner.DETRRunner")
cfg = runner.get_default_cfg()
cfg.MODEL.DEVICE = "cpu"
# DETR
self._set_detr_cfg(cfg, 6, 6, 100, 2048)
# backbone
cfg.MODEL.BACKBONE.NAME = "build_resnet_backbone"
cfg.MODEL.RESNETS.DEPTH = 50
cfg.MODEL.RESNETS.STRIDE_IN_1X1 = False
cfg.MODEL.RESNETS.OUT_FEATURES = ["res2", "res3", "res4", "res5"]
# build model
model = runner.build_model(cfg).eval()
model = model.detr
scripted_model = torch.jit.script(model)
self._assert_model_output(model, scripted_model)
def test_detr_fbnet_export(self):
runner = create_runner("d2go.projects.detr.runner.DETRRunner")
cfg = runner.get_default_cfg()
cfg.MODEL.DEVICE = "cpu"
# DETR
self._set_detr_cfg(cfg, 3, 3, 50, 256)
# backbone
cfg.MODEL.BACKBONE.NAME = "FBNetV2C4Backbone"
cfg.MODEL.FBNET_V2.ARCH = "FBNetV3_A_dsmask_C5"
cfg.MODEL.FBNET_V2.WIDTH_DIVISOR = 8
cfg.MODEL.FBNET_V2.OUT_FEATURES = ["trunk4"]
# build model
model = runner.build_model(cfg).eval()
model = model.detr
print(model)
scripted_model = torch.jit.script(model)
self._assert_model_output(model, scripted_model)
# print flops
table = flop_count_table(FlopCountAnalysis(model, ([torch.rand(3, 224, 320)],)))
print(table)
| d2go-main | projects_oss/detr/test_detr_export.py |
# ------------------------------------------------------------------------------------------------
# Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------------------------------
# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
# ------------------------------------------------------------------------------------------------
import glob
import os
import torch
from setuptools import find_packages, setup
from torch.utils.cpp_extension import CppExtension, CUDA_HOME, CUDAExtension
requirements = ["torch", "torchvision"]
def get_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir = os.path.join(this_dir, "detr/src")
print(f"extensions_dir: {extensions_dir}")
main_file = glob.glob(os.path.join(extensions_dir, "*.cpp"))
source_cpu = glob.glob(os.path.join(extensions_dir, "cpu", "*.cpp"))
source_cuda = glob.glob(os.path.join(extensions_dir, "cuda", "*.cu"))
print(f"main_file: {main_file}")
print(f"source_cpu: {source_cpu}")
print(f"source_cuda: {source_cuda}")
sources = main_file + source_cpu
extension = CppExtension
extra_compile_args = {"cxx": []}
define_macros = []
if torch.cuda.is_available() and CUDA_HOME is not None:
extension = CUDAExtension
sources += source_cuda
define_macros += [("WITH_CUDA", None)]
extra_compile_args["nvcc"] = [
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
]
else:
raise NotImplementedError("Cuda is not availabel")
sources = [os.path.join(extensions_dir, s) for s in sources]
include_dirs = [extensions_dir]
ext_modules = [
extension(
"detr._C",
sources,
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
)
]
return ext_modules
if __name__ == "__main__":
setup(
name="detr",
url="https://github.com/facebookresearch/d2go/detr",
license="Apache-2.0",
packages=find_packages(exclude=["test_all.py"]),
package_data={"detr": ["LICENSE"]},
ext_modules=get_extensions(),
cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension},
)
| d2go-main | projects_oss/detr/setup.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# ------------------------------------------------------------------------------------------------
# Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------------------------------
# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
# ------------------------------------------------------------------------------------------------
import io
import unittest
from functools import wraps
import torch
from detr.functions.ms_deform_attn_func import (
ms_deform_attn_core_pytorch,
MSDeformAttnFunction,
)
from torch.autograd import gradcheck
USE_CUDA = torch.cuda.device_count() > 0
N, M, D = 1, 2, 2
Lq, L, P = 2, 2, 2
if USE_CUDA:
shapes = torch.as_tensor([(6, 4), (3, 2)], dtype=torch.long).cuda()
level_start_index = torch.cat(
(shapes.new_zeros((1,)), shapes.prod(1).cumsum(0)[:-1])
)
S = sum([(H * W).item() for H, W in shapes])
torch.manual_seed(3)
class Tester(unittest.TestCase):
@unittest.skipIf(not USE_CUDA, "CI does not have gpu")
@torch.no_grad()
def test_forward_equal_with_pytorch_double(self):
value = torch.rand(N, S, M, D).cuda() * 0.01
sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda()
attention_weights = torch.rand(N, Lq, M, L, P).cuda() + 1e-5
attention_weights /= attention_weights.sum(-1, keepdim=True).sum(
-2, keepdim=True
)
im2col_step = 2
output_pytorch = (
ms_deform_attn_core_pytorch(
value.double(),
shapes,
sampling_locations.double(),
attention_weights.double(),
)
.detach()
.cpu()
)
output_cuda = (
MSDeformAttnFunction.apply(
value.double(),
shapes,
level_start_index,
sampling_locations.double(),
attention_weights.double(),
im2col_step,
)
.detach()
.cpu()
)
fwdok = torch.allclose(output_cuda, output_pytorch)
max_abs_err = (output_cuda - output_pytorch).abs().max()
max_rel_err = (
(output_cuda - output_pytorch).abs() / output_pytorch.abs()
).max()
print(
f"* {fwdok} test_forward_equal_with_pytorch_double: max_abs_err {max_abs_err:.2e} max_rel_err {max_rel_err:.2e}"
)
@unittest.skipIf(not USE_CUDA, "CI does not have gpu")
@torch.no_grad()
def test_forward_equal_with_pytorch_float(self):
value = torch.rand(N, S, M, D).cuda() * 0.01
sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda()
attention_weights = torch.rand(N, Lq, M, L, P).cuda() + 1e-5
attention_weights /= attention_weights.sum(-1, keepdim=True).sum(
-2, keepdim=True
)
im2col_step = 2
output_pytorch = (
ms_deform_attn_core_pytorch(
value, shapes, sampling_locations, attention_weights
)
.detach()
.cpu()
)
output_cuda = (
MSDeformAttnFunction.apply(
value,
shapes,
level_start_index,
sampling_locations,
attention_weights,
im2col_step,
)
.detach()
.cpu()
)
fwdok = torch.allclose(output_cuda, output_pytorch, rtol=1e-2, atol=1e-3)
max_abs_err = (output_cuda - output_pytorch).abs().max()
max_rel_err = (
(output_cuda - output_pytorch).abs() / output_pytorch.abs()
).max()
print(
f"* {fwdok} test_forward_equal_with_pytorch_float: max_abs_err {max_abs_err:.2e} max_rel_err {max_rel_err:.2e}"
)
@unittest.skipIf(not USE_CUDA, "CI does not have gpu")
def test_gradient_numerical(
self, channels=4, grad_value=True, grad_sampling_loc=True, grad_attn_weight=True
):
value = torch.rand(N, S, M, channels).cuda() * 0.01
sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda()
attention_weights = torch.rand(N, Lq, M, L, P).cuda() + 1e-5
attention_weights /= attention_weights.sum(-1, keepdim=True).sum(
-2, keepdim=True
)
im2col_step = 2
func = MSDeformAttnFunction.apply
value.requires_grad = grad_value
sampling_locations.requires_grad = grad_sampling_loc
attention_weights.requires_grad = grad_attn_weight
gradok = gradcheck(
func,
(
value.double(),
shapes,
level_start_index,
sampling_locations.double(),
attention_weights.double(),
im2col_step,
),
)
print(f"* {gradok} test_gradient_numerical(D={channels})")
if __name__ == "__main__":
unittest.main()
| d2go-main | projects_oss/detr/test_op.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import datetime
import json
import logging
import os
import random
import time
from datetime import timedelta
from pathlib import Path
import detr.util.misc as utils
import numpy as np
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from detectron2.engine.launch import _find_free_port
from detectron2.utils.file_io import PathManager
from detr import datasets
from detr.datasets import build_dataset, get_coco_api_from_dataset
from detr.engine import evaluate, train_one_epoch
from detr.models import build_model
from torch.utils.data import DataLoader, DistributedSampler
DEFAULT_TIMEOUT = timedelta(minutes=30)
def get_args_parser():
parser = argparse.ArgumentParser("Set transformer detector", add_help=False)
parser.add_argument("--lr", default=1e-4, type=float)
parser.add_argument("--lr_backbone", default=1e-5, type=float)
parser.add_argument("--batch_size", default=2, type=int)
parser.add_argument("--weight_decay", default=1e-4, type=float)
parser.add_argument("--epochs", default=300, type=int)
parser.add_argument("--lr_drop", default=200, type=int)
parser.add_argument(
"--clip_max_norm", default=0.1, type=float, help="gradient clipping max norm"
)
# Model parameters
parser.add_argument(
"--frozen_weights",
type=str,
default=None,
help="Path to the pretrained model. If set, only the mask head will be trained",
)
# * Backbone
parser.add_argument(
"--backbone",
default="resnet50",
type=str,
help="Name of the convolutional backbone to use",
)
parser.add_argument(
"--dilation",
action="store_true",
help="If true, we replace stride with dilation in the last convolutional block (DC5)",
)
parser.add_argument(
"--position_embedding",
default="sine",
type=str,
choices=("sine", "learned"),
help="Type of positional embedding to use on top of the image features",
)
# * Transformer
parser.add_argument(
"--enc_layers",
default=6,
type=int,
help="Number of encoding layers in the transformer",
)
parser.add_argument(
"--dec_layers",
default=6,
type=int,
help="Number of decoding layers in the transformer",
)
parser.add_argument(
"--dim_feedforward",
default=2048,
type=int,
help="Intermediate size of the feedforward layers in the transformer blocks",
)
parser.add_argument(
"--hidden_dim",
default=256,
type=int,
help="Size of the embeddings (dimension of the transformer)",
)
parser.add_argument(
"--dropout", default=0.1, type=float, help="Dropout applied in the transformer"
)
parser.add_argument(
"--nheads",
default=8,
type=int,
help="Number of attention heads inside the transformer's attentions",
)
parser.add_argument(
"--num_queries", default=100, type=int, help="Number of query slots"
)
parser.add_argument("--pre_norm", action="store_true")
# * Segmentation
parser.add_argument(
"--masks",
action="store_true",
help="Train segmentation head if the flag is provided",
)
# Loss
parser.add_argument(
"--no_aux_loss",
dest="aux_loss",
action="store_false",
help="Disables auxiliary decoding losses (loss at each layer)",
)
# * Matcher
parser.add_argument(
"--set_cost_class",
default=1,
type=float,
help="Class coefficient in the matching cost",
)
parser.add_argument(
"--set_cost_bbox",
default=5,
type=float,
help="L1 box coefficient in the matching cost",
)
parser.add_argument(
"--set_cost_giou",
default=2,
type=float,
help="giou box coefficient in the matching cost",
)
# * Loss coefficients
parser.add_argument("--mask_loss_coef", default=1, type=float)
parser.add_argument("--dice_loss_coef", default=1, type=float)
parser.add_argument("--bbox_loss_coef", default=5, type=float)
parser.add_argument("--giou_loss_coef", default=2, type=float)
parser.add_argument(
"--eos_coef",
default=0.1,
type=float,
help="Relative classification weight of the no-object class",
)
# dataset parameters
parser.add_argument("--dataset_file", default="coco")
parser.add_argument(
"--ade_path",
type=str,
default="manifold://winvision/tree/detectron2/ADEChallengeData2016/",
)
parser.add_argument(
"--coco_path", type=str, default="manifold://fair_vision_data/tree/"
)
parser.add_argument(
"--coco_panoptic_path", type=str, default="manifold://fair_vision_data/tree/"
)
parser.add_argument("--remove_difficult", action="store_true")
parser.add_argument(
"--output-dir", default="", help="path where to save, empty for no saving"
)
parser.add_argument(
"--device", default="cuda", help="device to use for training / testing"
)
parser.add_argument("--seed", default=42, type=int)
parser.add_argument("--resume", default="", help="resume from checkpoint")
parser.add_argument(
"--start_epoch", default=0, type=int, metavar="N", help="start epoch"
)
parser.add_argument("--eval", action="store_true")
parser.add_argument("--num_workers", default=2, type=int)
# distributed training parameters
parser.add_argument(
"--num-gpus", type=int, default=8, help="number of gpus *per machine*"
)
parser.add_argument(
"--num-machines", type=int, default=1, help="total number of machines"
)
parser.add_argument(
"--machine-rank",
type=int,
default=0,
help="the rank of this machine (unique per machine)",
)
parser.add_argument(
"--dist-url", default="env://", help="url used to set up distributed training"
)
return parser
def main(args):
# utils.init_distributed_mode(args)
if args.frozen_weights is not None:
assert args.masks, "Frozen training is meant for segmentation only"
print(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
model, criterion, postprocessors = build_model(args)
model.to(device)
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("number of params:", n_parameters)
param_dicts = [
{
"params": [
p
for n, p in model_without_ddp.named_parameters()
if "backbone" not in n and p.requires_grad
]
},
{
"params": [
p
for n, p in model_without_ddp.named_parameters()
if "backbone" in n and p.requires_grad
],
"lr": args.lr_backbone,
},
]
optimizer = torch.optim.AdamW(
param_dicts, lr=args.lr, weight_decay=args.weight_decay
)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_drop)
dataset_train = build_dataset(image_set="train", args=args)
dataset_val = build_dataset(image_set="val", args=args)
if args.distributed:
sampler_train = DistributedSampler(dataset_train)
sampler_val = DistributedSampler(dataset_val, shuffle=False)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
batch_sampler_train = torch.utils.data.BatchSampler(
sampler_train, args.batch_size, drop_last=True
)
data_loader_train = DataLoader(
dataset_train,
batch_sampler=batch_sampler_train,
collate_fn=utils.collate_fn,
num_workers=args.num_workers,
)
data_loader_val = DataLoader(
dataset_val,
args.batch_size,
sampler=sampler_val,
drop_last=False,
collate_fn=utils.collate_fn,
num_workers=args.num_workers,
)
if args.dataset_file == "coco_panoptic":
# We also evaluate AP during panoptic training, on original coco DS
coco_val = datasets.coco.build("val", args)
base_ds = get_coco_api_from_dataset(coco_val)
else:
base_ds = get_coco_api_from_dataset(dataset_val)
if args.frozen_weights is not None:
checkpoint = torch.load(args.frozen_weights, map_location="cpu")
model_without_ddp.detr.load_state_dict(checkpoint["model"])
if args.resume:
if args.resume.startswith("https"):
checkpoint = torch.hub.load_state_dict_from_url(
args.resume, map_location="cpu", check_hash=True
)
else:
checkpoint = torch.load(args.resume, map_location="cpu")
model_without_ddp.load_state_dict(checkpoint["model"])
if (
not args.eval
and "optimizer" in checkpoint
and "lr_scheduler" in checkpoint
and "epoch" in checkpoint
):
optimizer.load_state_dict(checkpoint["optimizer"])
lr_scheduler.load_state_dict(checkpoint["lr_scheduler"])
args.start_epoch = checkpoint["epoch"] + 1
if args.eval:
test_stats, coco_evaluator = evaluate(
model,
criterion,
postprocessors,
data_loader_val,
base_ds,
device,
args.output_dir,
)
if args.output_dir:
with PathManager.open(os.path.join(args.output_dir, "eval.pth"), "wb") as f:
utils.save_on_master(coco_evaluator.coco_eval["bbox"].eval, f)
return
print("Start training")
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
sampler_train.set_epoch(epoch)
train_stats = train_one_epoch(
model,
criterion,
data_loader_train,
optimizer,
device,
epoch,
args.clip_max_norm,
)
lr_scheduler.step()
if args.output_dir:
checkpoint_paths = [] # os.path.join(args.output_dir, 'checkpoint.pth')]
# extra checkpoint before LR drop and every 10 epochs
if (epoch + 1) % args.lr_drop == 0 or (epoch + 1) % 10 == 0:
checkpoint_paths.append(
os.path.join(args.output_dir, f"checkpoint{epoch:04}.pth")
)
for checkpoint_path in checkpoint_paths:
with PathManager.open(checkpoint_path, "wb") as f:
if args.gpu == 0 and args.machine_rank == 0:
utils.save_on_master(
{
"model": model_without_ddp.state_dict(),
"optimizer": optimizer.state_dict(),
"lr_scheduler": lr_scheduler.state_dict(),
"epoch": epoch,
"args": args,
},
f,
)
test_stats, coco_evaluator = evaluate(
model,
criterion,
postprocessors,
data_loader_val,
base_ds,
device,
args.output_dir,
)
log_stats = {
**{f"train_{k}": v for k, v in train_stats.items()},
**{f"test_{k}": v for k, v in test_stats.items()},
"epoch": epoch,
"n_parameters": n_parameters,
}
if args.output_dir and utils.is_main_process():
with PathManager.open(os.path.join(args.output_dir, "log.txt"), "w") as f:
f.write(json.dumps(log_stats) + "\n")
# for evaluation logs
if coco_evaluator is not None:
PathManager.mkdirs(os.path.join(args.output_dir, "eval"))
if "bbox" in coco_evaluator.coco_eval:
filenames = ["latest.pth"]
if epoch % 50 == 0:
filenames.append(f"{epoch:03}.pth")
for name in filenames:
with PathManager.open(
os.path.join(args.output_dir, "eval", name), "wb"
) as f:
torch.save(coco_evaluator.coco_eval["bbox"].eval, f)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print("Training time {}".format(total_time_str))
def launch(
main_func,
num_gpus_per_machine,
num_machines=1,
machine_rank=0,
dist_url=None,
args=(),
timeout=DEFAULT_TIMEOUT,
):
"""
Launch multi-gpu or distributed training.
This function must be called on all machines involved in the training.
It will spawn child processes (defined by ``num_gpus_per_machine``) on each machine.
Args:
main_func: a function that will be called by `main_func(*args)`
num_gpus_per_machine (int): number of GPUs per machine
num_machines (int): the total number of machines
machine_rank (int): the rank of this machine
dist_url (str): url to connect to for distributed jobs, including protocol
e.g. "tcp://127.0.0.1:8686".
Can be set to "auto" to automatically select a free port on localhost
timeout (timedelta): timeout of the distributed workers
args (tuple): arguments passed to main_func
"""
world_size = num_machines * num_gpus_per_machine
args[0].distributed = world_size > 1
if args[0].distributed:
# https://github.com/pytorch/pytorch/pull/14391
# TODO prctl in spawned processes
if dist_url == "auto":
assert (
num_machines == 1
), "dist_url=auto not supported in multi-machine jobs."
port = _find_free_port()
dist_url = f"tcp://127.0.0.1:{port}"
if num_machines > 1 and dist_url.startswith("file://"):
logger = logging.getLogger(__name__)
logger.warning(
"file:// is not a reliable init_method in multi-machine jobs. Prefer tcp://"
)
mp.spawn(
_distributed_worker,
nprocs=num_gpus_per_machine,
args=(
main_func,
world_size,
num_gpus_per_machine,
machine_rank,
dist_url,
args,
timeout,
),
daemon=False,
)
else:
main_func(*args)
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
def _distributed_worker(
local_rank,
main_func,
world_size,
num_gpus_per_machine,
machine_rank,
dist_url,
args,
timeout=DEFAULT_TIMEOUT,
):
assert (
torch.cuda.is_available()
), "cuda is not available. Please check your installation."
global_rank = machine_rank * num_gpus_per_machine + local_rank
try:
dist.init_process_group(
backend="NCCL",
init_method=dist_url,
world_size=world_size,
rank=global_rank,
timeout=timeout,
)
except Exception as e:
logger = logging.getLogger(__name__)
logger.error("Process group URL: {}".format(dist_url))
raise e
# synchronize is needed here to prevent a possible timeout after calling init_process_group
# See: https://github.com/facebookresearch/maskrcnn-benchmark/issues/172
synchronize()
assert num_gpus_per_machine <= torch.cuda.device_count()
torch.cuda.set_device(local_rank)
args[0].gpu = local_rank
# Setup the local process group (which contains ranks within the same machine)
# assert comm._LOCAL_PROCESS_GROUP is None
# num_machines = world_size // num_gpus_per_machine
# for i in range(num_machines):
# ranks_on_i = list(range(i * num_gpus_per_machine, (i + 1) * num_gpus_per_machine))
# pg = dist.new_group(ranks_on_i)
# if i == machine_rank:
# comm._LOCAL_PROCESS_GROUP = pg
main_func(*args)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
"DETR training and evaluation script", parents=[get_args_parser()]
)
args = parser.parse_args()
if args.output_dir:
PathManager.mkdirs(args.output_dir)
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| d2go-main | projects_oss/detr/main.py |
#!/usr/bin/env python3
from d2go.config import CfgNode as CN
from d2go.data.dataset_mappers.build import D2GO_DATA_MAPPER_REGISTRY
from d2go.data.dataset_mappers.d2go_dataset_mapper import D2GoDatasetMapper
from d2go.runner import GeneralizedRCNNRunner
from detr.backbone.deit import add_deit_backbone_config
from detr.backbone.pit import add_pit_backbone_config
from detr.d2 import add_detr_config, DetrDatasetMapper
@D2GO_DATA_MAPPER_REGISTRY.register()
class DETRDatasetMapper(DetrDatasetMapper, D2GoDatasetMapper):
def __init__(self, cfg, is_train=True, image_loader=None, tfm_gens=None):
self.image_loader = None
self.backfill_size = False
self.retry = 3
self.catch_exception = True
self._error_count = 0
self._total_counts = 0
self._error_types = {}
super().__init__(cfg, is_train)
def _original_call(self, dataset_dict):
return DetrDatasetMapper.__call__(self, dataset_dict)
def __call__(self, dataset_dict):
return D2GoDatasetMapper.__call__(self, dataset_dict)
class DETRRunner(GeneralizedRCNNRunner):
@classmethod
def get_default_cfg(cls):
_C = super().get_default_cfg()
add_detr_config(_C)
add_deit_backbone_config(_C)
add_pit_backbone_config(_C)
return _C
| d2go-main | projects_oss/detr/detr/runner.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from . import datasets, models, util
__all__ = ["models", "util", "datasets"]
| d2go-main | projects_oss/detr/detr/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
from detr.models.backbone import Backbone, Joiner
from detr.models.detr import DETR, PostProcess
from detr.models.position_encoding import PositionEmbeddingSine
from detr.models.segmentation import DETRsegm, PostProcessPanoptic
from detr.models.transformer import Transformer
dependencies = ["torch", "torchvision"]
def _make_detr(backbone_name: str, dilation=False, num_classes=91, mask=False):
hidden_dim = 256
backbone = Backbone(
backbone_name, train_backbone=True, return_interm_layers=mask, dilation=dilation
)
pos_enc = PositionEmbeddingSine(hidden_dim // 2, normalize=True)
backbone_with_pos_enc = Joiner(backbone, pos_enc)
backbone_with_pos_enc.num_channels = backbone.num_channels
transformer = Transformer(d_model=hidden_dim, return_intermediate_dec=True)
detr = DETR(
backbone_with_pos_enc, transformer, num_classes=num_classes, num_queries=100
)
if mask:
return DETRsegm(detr)
return detr
def detr_resnet50(pretrained=False, num_classes=91, return_postprocessor=False):
"""
DETR R50 with 6 encoder and 6 decoder layers.
Achieves 42/62.4 AP/AP50 on COCO val5k.
"""
model = _make_detr("resnet50", dilation=False, num_classes=num_classes)
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r50-e632da11.pth",
map_location="cpu",
check_hash=True,
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcess()
return model
def detr_resnet50_dc5(pretrained=False, num_classes=91, return_postprocessor=False):
"""
DETR-DC5 R50 with 6 encoder and 6 decoder layers.
The last block of ResNet-50 has dilation to increase
output resolution.
Achieves 43.3/63.1 AP/AP50 on COCO val5k.
"""
model = _make_detr("resnet50", dilation=True, num_classes=num_classes)
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r50-dc5-f0fb7ef5.pth",
map_location="cpu",
check_hash=True,
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcess()
return model
def detr_resnet101(pretrained=False, num_classes=91, return_postprocessor=False):
"""
DETR-DC5 R101 with 6 encoder and 6 decoder layers.
Achieves 43.5/63.8 AP/AP50 on COCO val5k.
"""
model = _make_detr("resnet101", dilation=False, num_classes=num_classes)
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r101-2c7b67e5.pth",
map_location="cpu",
check_hash=True,
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcess()
return model
def detr_resnet101_dc5(pretrained=False, num_classes=91, return_postprocessor=False):
"""
DETR-DC5 R101 with 6 encoder and 6 decoder layers.
The last block of ResNet-101 has dilation to increase
output resolution.
Achieves 44.9/64.7 AP/AP50 on COCO val5k.
"""
model = _make_detr("resnet101", dilation=True, num_classes=num_classes)
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r101-dc5-a2e86def.pth",
map_location="cpu",
check_hash=True,
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcess()
return model
def detr_resnet50_panoptic(
pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False
):
"""
DETR R50 with 6 encoder and 6 decoder layers.
Achieves 43.4 PQ on COCO val5k.
threshold is the minimum confidence required for keeping segments in the prediction
"""
model = _make_detr("resnet50", dilation=False, num_classes=num_classes, mask=True)
is_thing_map = {i: i <= 90 for i in range(250)}
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r50-panoptic-00ce5173.pth",
map_location="cpu",
check_hash=True,
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcessPanoptic(is_thing_map, threshold=threshold)
return model
def detr_resnet50_dc5_panoptic(
pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False
):
"""
DETR-DC5 R50 with 6 encoder and 6 decoder layers.
The last block of ResNet-50 has dilation to increase
output resolution.
Achieves 44.6 on COCO val5k.
threshold is the minimum confidence required for keeping segments in the prediction
"""
model = _make_detr("resnet50", dilation=True, num_classes=num_classes, mask=True)
is_thing_map = {i: i <= 90 for i in range(250)}
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r50-dc5-panoptic-da08f1b1.pth",
map_location="cpu",
check_hash=True,
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcessPanoptic(is_thing_map, threshold=threshold)
return model
def detr_resnet101_panoptic(
pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False
):
"""
DETR-DC5 R101 with 6 encoder and 6 decoder layers.
Achieves 45.1 PQ on COCO val5k.
threshold is the minimum confidence required for keeping segments in the prediction
"""
model = _make_detr("resnet101", dilation=False, num_classes=num_classes, mask=True)
is_thing_map = {i: i <= 90 for i in range(250)}
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r101-panoptic-40021d53.pth",
map_location="cpu",
check_hash=True,
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcessPanoptic(is_thing_map, threshold=threshold)
return model
| d2go-main | projects_oss/detr/detr/hub.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Train and eval functions used in main.py
"""
import math
import os
import sys
from typing import Iterable
import detr.util.misc as utils
import torch
from detr.datasets.coco_eval import CocoEvaluator
from detr.datasets.panoptic_eval import PanopticEvaluator
def train_one_epoch(
model: torch.nn.Module,
criterion: torch.nn.Module,
data_loader: Iterable,
optimizer: torch.optim.Optimizer,
device: torch.device,
epoch: int,
max_norm: float = 0,
):
model.train()
criterion.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter("lr", utils.SmoothedValue(window_size=1, fmt="{value:.6f}"))
metric_logger.add_meter(
"class_error", utils.SmoothedValue(window_size=1, fmt="{value:.2f}")
)
header = "Epoch: [{}]".format(epoch)
print_freq = 10
for samples, targets in metric_logger.log_every(data_loader, print_freq, header):
samples = samples.to(device)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
outputs = model(samples)
loss_dict = criterion(outputs, targets)
weight_dict = criterion.weight_dict
losses = sum(
loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict
)
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = utils.reduce_dict(loss_dict)
loss_dict_reduced_unscaled = {
f"{k}_unscaled": v for k, v in loss_dict_reduced.items()
}
loss_dict_reduced_scaled = {
k: v * weight_dict[k]
for k, v in loss_dict_reduced.items()
if k in weight_dict
}
losses_reduced_scaled = sum(loss_dict_reduced_scaled.values())
loss_value = losses_reduced_scaled.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
print(loss_dict_reduced)
sys.exit(1)
optimizer.zero_grad()
losses.backward()
if max_norm > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
optimizer.step()
metric_logger.update(
loss=loss_value, **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled
)
metric_logger.update(class_error=loss_dict_reduced["class_error"])
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
@torch.no_grad()
def evaluate(
model, criterion, postprocessors, data_loader, base_ds, device, output_dir
):
model.eval()
criterion.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter(
"class_error", utils.SmoothedValue(window_size=1, fmt="{value:.2f}")
)
header = "Test:"
iou_types = tuple(k for k in ("segm", "bbox") if k in postprocessors.keys())
coco_evaluator = CocoEvaluator(base_ds, iou_types)
# coco_evaluator.coco_eval[iou_types[0]].params.iouThrs = [0, 0.1, 0.5, 0.75]
panoptic_evaluator = None
if "panoptic" in postprocessors.keys():
panoptic_evaluator = PanopticEvaluator(
data_loader.dataset.ann_file,
data_loader.dataset.ann_folder,
output_dir=os.path.join(output_dir, "panoptic_eval"),
)
for samples, targets in metric_logger.log_every(data_loader, 10, header):
samples = samples.to(device)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
outputs = model(samples)
loss_dict = criterion(outputs, targets)
weight_dict = criterion.weight_dict
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = utils.reduce_dict(loss_dict)
loss_dict_reduced_scaled = {
k: v * weight_dict[k]
for k, v in loss_dict_reduced.items()
if k in weight_dict
}
loss_dict_reduced_unscaled = {
f"{k}_unscaled": v for k, v in loss_dict_reduced.items()
}
metric_logger.update(
loss=sum(loss_dict_reduced_scaled.values()),
**loss_dict_reduced_scaled,
**loss_dict_reduced_unscaled,
)
metric_logger.update(class_error=loss_dict_reduced["class_error"])
orig_target_sizes = torch.stack([t["orig_size"] for t in targets], dim=0)
results = postprocessors["bbox"](outputs, orig_target_sizes)
if "segm" in postprocessors.keys():
target_sizes = torch.stack([t["size"] for t in targets], dim=0)
results = postprocessors["segm"](
results, outputs, orig_target_sizes, target_sizes
)
res = {
target["image_id"].item(): output
for target, output in zip(targets, results)
}
if coco_evaluator is not None:
coco_evaluator.update(res)
if panoptic_evaluator is not None:
res_pano = postprocessors["panoptic"](
outputs, target_sizes, orig_target_sizes
)
for i, target in enumerate(targets):
image_id = target["image_id"].item()
file_name = f"{image_id:012d}.png"
res_pano[i]["image_id"] = image_id
res_pano[i]["file_name"] = file_name
panoptic_evaluator.update(res_pano)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
if coco_evaluator is not None:
coco_evaluator.synchronize_between_processes()
if panoptic_evaluator is not None:
panoptic_evaluator.synchronize_between_processes()
# accumulate predictions from all images
if coco_evaluator is not None:
coco_evaluator.accumulate()
coco_evaluator.summarize()
panoptic_res = None
if panoptic_evaluator is not None:
panoptic_res = panoptic_evaluator.summarize()
stats = {k: meter.global_avg for k, meter in metric_logger.meters.items()}
if coco_evaluator is not None:
if "bbox" in postprocessors.keys():
stats["coco_eval_bbox"] = coco_evaluator.coco_eval["bbox"].stats.tolist()
if "segm" in postprocessors.keys():
stats["coco_eval_masks"] = coco_evaluator.coco_eval["segm"].stats.tolist()
if panoptic_res is not None:
stats["PQ_all"] = panoptic_res["All"]
stats["PQ_th"] = panoptic_res["Things"]
stats["PQ_st"] = panoptic_res["Stuff"]
return stats, coco_evaluator
| d2go-main | projects_oss/detr/detr/engine.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Misc functions, including distributed helpers.
Mostly copy-paste from torchvision references.
"""
import datetime
import os
import pickle
import subprocess
import time
from collections import defaultdict, deque
from distutils.version import LooseVersion
from typing import List, Optional
import torch
import torch.distributed as dist
# needed due to empty tensor bug in pytorch and torchvision 0.5
import torchvision
from torch import Tensor
if LooseVersion(torchvision.__version__) < LooseVersion("0.7.0"):
from torchvision.ops import _new_empty_tensor
from torchvision.ops.misc import _output_size
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device="cuda")
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value,
)
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.tensor([tensor.numel()], device="cuda")
size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda"))
if local_size != max_size:
padding = torch.empty(
size=(max_size - local_size,), dtype=torch.uint8, device="cuda"
)
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer)) # noqa
return data_list
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that all processes
have the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.all_reduce(values)
if average:
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError(
"'{}' object has no attribute '{}'".format(type(self).__name__, attr)
)
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append("{}: {}".format(name, str(meter)))
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ""
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt="{avg:.4f}")
data_time = SmoothedValue(fmt="{avg:.4f}")
space_fmt = ":" + str(len(str(len(iterable)))) + "d"
if torch.cuda.is_available():
log_msg = self.delimiter.join(
[
header,
"[{0" + space_fmt + "}/{1}]",
"eta: {eta}",
"{meters}",
"time: {time}",
"data: {data}",
"max mem: {memory:.0f}",
]
)
else:
log_msg = self.delimiter.join(
[
header,
"[{0" + space_fmt + "}/{1}]",
"eta: {eta}",
"{meters}",
"time: {time}",
"data: {data}",
]
)
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(
log_msg.format(
i,
len(iterable),
eta=eta_string,
meters=str(self),
time=str(iter_time),
data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB,
)
)
else:
print(
log_msg.format(
i,
len(iterable),
eta=eta_string,
meters=str(self),
time=str(iter_time),
data=str(data_time),
)
)
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print(
"{} Total time: {} ({:.4f} s / it)".format(
header, total_time_str, total_time / len(iterable)
)
)
def get_sha():
cwd = os.path.dirname(os.path.abspath(__file__))
def _run(command):
return subprocess.check_output(command, cwd=cwd).decode("ascii").strip()
sha = "N/A"
diff = "clean"
branch = "N/A"
try:
sha = _run(["git", "rev-parse", "HEAD"])
subprocess.check_output(["git", "diff"], cwd=cwd)
diff = _run(["git", "diff-index", "HEAD"])
diff = "has uncommited changes" if diff else "clean"
branch = _run(["git", "rev-parse", "--abbrev-ref", "HEAD"])
except Exception:
pass
message = f"sha: {sha}, status: {diff}, branch: {branch}"
return message
def collate_fn(batch):
batch = list(zip(*batch))
batch[0] = nested_tensor_from_tensor_list(batch[0])
return tuple(batch)
def _max_by_axis(the_list: "List[List[int]]") -> "List[int]":
maxes = the_list[0]
for sublist in the_list[1:]:
for index, item in enumerate(sublist):
maxes[index] = max(maxes[index], item)
return maxes
class NestedTensor(object):
def __init__(self, tensors, mask: Optional[Tensor]):
self.tensors = tensors
self.mask = mask
def to(self, device: torch.device) -> "NestedTensor":
cast_tensor = self.tensors.to(device)
mask = self.mask
if mask is not None:
assert mask is not None
cast_mask = mask.to(device)
else:
cast_mask = None
return NestedTensor(cast_tensor, cast_mask)
def decompose(self):
return self.tensors, self.mask
def __repr__(self):
return str(self.tensors)
def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):
# TODO make this more general
if tensor_list[0].ndim == 3:
if torchvision._is_tracing():
# nested_tensor_from_tensor_list() does not export well to ONNX
# call _onnx_nested_tensor_from_tensor_list() instead
return _onnx_nested_tensor_from_tensor_list(tensor_list)
# TODO make it support different-sized images
max_size = _max_by_axis([list(img.shape) for img in tensor_list])
# min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))
batch_shape = [len(tensor_list)] + max_size
b, c, h, w = batch_shape
dtype = tensor_list[0].dtype
device = tensor_list[0].device
tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
mask = torch.ones((b, h, w), dtype=torch.bool, device=device)
for img, pad_img, m in zip(tensor_list, tensor, mask):
pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
m[: img.shape[1], : img.shape[2]] = False
else:
raise ValueError("not supported")
return NestedTensor(tensor, mask)
# _onnx_nested_tensor_from_tensor_list() is an implementation of
# nested_tensor_from_tensor_list() that is supported by ONNX tracing.
@torch.jit.unused
def _onnx_nested_tensor_from_tensor_list(tensor_list: List[Tensor]) -> NestedTensor:
max_size = []
for i in range(tensor_list[0].dim()):
max_size_i = torch.max(
torch.stack([img.shape[i] for img in tensor_list]).to(torch.float32)
).to(torch.int64)
max_size.append(max_size_i)
max_size = tuple(max_size)
# work around for
# pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
# m[: img.shape[1], :img.shape[2]] = False
# which is not yet supported in onnx
padded_imgs = []
padded_masks = []
for img in tensor_list:
padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))]
padded_img = torch.nn.functional.pad(
img, (0, padding[2], 0, padding[1], 0, padding[0])
)
padded_imgs.append(padded_img)
m = torch.zeros_like(img[0], dtype=torch.int, device=img.device)
padded_mask = torch.nn.functional.pad(
m, (0, padding[2], 0, padding[1]), "constant", 1
)
padded_masks.append(padded_mask.to(torch.bool))
tensor = torch.stack(padded_imgs)
mask = torch.stack(padded_masks)
return NestedTensor(tensor, mask=mask)
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop("force", False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if "RANK" in os.environ and "WORLD_SIZE" in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ["WORLD_SIZE"])
args.gpu = int(os.environ["LOCAL_RANK"])
elif "SLURM_PROCID" in os.environ:
args.rank = int(os.environ["SLURM_PROCID"])
args.gpu = args.rank % torch.cuda.device_count()
else:
print("Not using distributed mode")
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = "nccl"
print(
"| distributed init (rank {}): {}".format(args.rank, args.dist_url), flush=True
)
torch.distributed.init_process_group(
backend=args.dist_backend,
init_method=args.dist_url,
world_size=args.world_size,
rank=args.rank,
)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0)
@torch.no_grad()
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
if target.numel() == 0:
return [torch.zeros([], device=output.device)]
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def interpolate(
input: "Tensor",
size: "Optional[List[int]]" = None,
scale_factor: "Optional[float]" = None,
mode: str = "nearest",
align_corners: "Optional[bool]" = None,
) -> "Tensor":
"""
Equivalent to nn.functional.interpolate, but with support for empty batch sizes.
This will eventually be supported natively by PyTorch, and this
class can go away.
"""
# if float(torchvision.__version__[:3]) < 0.7:
if LooseVersion(torchvision.__version__) < LooseVersion("0.7.0"):
if input.numel() > 0:
return torch.nn.functional.interpolate(
input, size, scale_factor, mode, align_corners
)
output_shape = _output_size(2, input, size, scale_factor)
output_shape = list(input.shape[:-2]) + list(output_shape)
return _new_empty_tensor(input, output_shape)
else:
return torchvision.ops.misc.interpolate(
input, size, scale_factor, mode, align_corners
)
def inverse_sigmoid(x, eps=1e-5):
x = x.clamp(min=0, max=1)
x1 = x.clamp(min=eps)
x2 = (1 - x).clamp(min=eps)
return torch.log(x1 / x2)
| d2go-main | projects_oss/detr/detr/util/misc.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Utilities for bounding box manipulation and GIoU.
"""
import torch
from torchvision.ops.boxes import box_area
def box_cxcywh_to_xyxy(x):
x_c, y_c, w, h = x.unbind(-1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)]
return torch.stack(b, dim=-1)
def box_xyxy_to_cxcywh(x):
x0, y0, x1, y1 = x.unbind(-1)
b = [(x0 + x1) / 2, (y0 + y1) / 2, (x1 - x0), (y1 - y0)]
return torch.stack(b, dim=-1)
# modified from torchvision to also return the union
def box_iou(boxes1, boxes2):
area1 = box_area(boxes1)
area2 = box_area(boxes2)
lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]
rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
wh = (rb - lt).clamp(min=0) # [N,M,2]
inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
union = area1[:, None] + area2 - inter
iou = inter / union
return iou, union
def generalized_box_iou(boxes1, boxes2):
"""
Generalized IoU from https://giou.stanford.edu/
The boxes should be in [x0, y0, x1, y1] format
Returns a [N, M] pairwise matrix, where N = len(boxes1)
and M = len(boxes2)
"""
# degenerate boxes gives inf / nan results
# so do an early check
assert (boxes1[:, 2:] >= boxes1[:, :2]).all(), f"incorrect boxes, boxes1 {boxes1}"
assert (boxes2[:, 2:] >= boxes2[:, :2]).all(), f"incorrect boxes, boxes2 {boxes2}"
iou, union = box_iou(boxes1, boxes2)
lt = torch.min(boxes1[:, None, :2], boxes2[:, :2])
rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])
wh = (rb - lt).clamp(min=0) # [N,M,2]
area = wh[:, :, 0] * wh[:, :, 1]
return iou - (area - union) / area
def masks_to_boxes(masks):
"""Compute the bounding boxes around the provided masks
The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions.
Returns a [N, 4] tensors, with the boxes in xyxy format
"""
if masks.numel() == 0:
return torch.zeros((0, 4), device=masks.device)
h, w = masks.shape[-2:]
y = torch.arange(0, h, dtype=torch.float)
x = torch.arange(0, w, dtype=torch.float)
y, x = torch.meshgrid(y, x)
x_mask = masks * x.unsqueeze(0)
x_max = x_mask.flatten(1).max(-1)[0]
x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
y_mask = masks * y.unsqueeze(0)
y_max = y_mask.flatten(1).max(-1)[0]
y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
return torch.stack([x_min, y_min, x_max, y_max], 1)
| d2go-main | projects_oss/detr/detr/util/box_ops.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
| d2go-main | projects_oss/detr/detr/util/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Plotting utilities to visualize training logs.
"""
from pathlib import Path, PurePath
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import torch
def plot_logs(
logs,
fields=("class_error", "loss_bbox_unscaled", "mAP"),
ewm_col=0,
log_name="log.txt",
):
"""
Function to plot specific fields from training log(s). Plots both training and test results.
:: Inputs - logs = list containing Path objects, each pointing to individual dir with a log file
- fields = which results to plot from each log file - plots both training and test for each field.
- ewm_col = optional, which column to use as the exponential weighted smoothing of the plots
- log_name = optional, name of log file if different than default 'log.txt'.
:: Outputs - matplotlib plots of results in fields, color coded for each log file.
- solid lines are training results, dashed lines are test results.
"""
func_name = "plot_utils.py::plot_logs"
# verify logs is a list of Paths (list[Paths]) or single Pathlib object Path,
# convert single Path to list to avoid 'not iterable' error
if not isinstance(logs, list):
if isinstance(logs, PurePath):
logs = [logs]
print(
f"{func_name} info: logs param expects a list argument, converted to list[Path]."
)
else:
raise ValueError(
f"{func_name} - invalid argument for logs parameter.\n \
Expect list[Path] or single Path obj, received {type(logs)}"
)
# Quality checks - verify valid dir(s), that every item in list is Path object, and that log_name exists in each dir
for _, dir in enumerate(logs):
if not isinstance(dir, PurePath):
raise ValueError(
f"{func_name} - non-Path object in logs argument of {type(dir)}: \n{dir}"
)
if not dir.exists():
raise ValueError(
f"{func_name} - invalid directory in logs argument:\n{dir}"
)
# verify log_name exists
fn = Path(dir / log_name)
if not fn.exists():
print(f"-> missing {log_name}. Have you gotten to Epoch 1 in training?")
print(f"--> full path of missing log file: {fn}")
return
# load log file(s) and plot
dfs = [pd.read_json(Path(p) / log_name, lines=True) for p in logs]
fig, axs = plt.subplots(ncols=len(fields), figsize=(16, 5))
for df, color in zip(dfs, sns.color_palette(n_colors=len(logs))):
for j, field in enumerate(fields):
if field == "mAP":
coco_eval = (
pd.DataFrame(np.stack(df.test_coco_eval_bbox.dropna().values)[:, 1])
.ewm(com=ewm_col)
.mean()
)
axs[j].plot(coco_eval, c=color)
else:
df.interpolate().ewm(com=ewm_col).mean().plot(
y=[f"train_{field}", f"test_{field}"],
ax=axs[j],
color=[color] * 2,
style=["-", "--"],
)
for ax, field in zip(axs, fields):
ax.legend([Path(p).name for p in logs])
ax.set_title(field)
def plot_precision_recall(files, naming_scheme="iter"):
if naming_scheme == "exp_id":
# name becomes exp_id
names = [f.parts[-3] for f in files]
elif naming_scheme == "iter":
names = [f.stem for f in files]
else:
raise ValueError(f"not supported {naming_scheme}")
fig, axs = plt.subplots(ncols=2, figsize=(16, 5))
for f, color, name in zip(
files, sns.color_palette("Blues", n_colors=len(files)), names
):
data = torch.load(f)
# precision is n_iou, n_points, n_cat, n_area, max_det
precision = data["precision"]
recall = data["params"].recThrs
scores = data["scores"]
# take precision for all classes, all areas and 100 detections
precision = precision[0, :, :, 0, -1].mean(1)
scores = scores[0, :, :, 0, -1].mean(1)
prec = precision.mean()
rec = data["recall"][0, :, 0, -1].mean()
print(
f"{naming_scheme} {name}: mAP@50={prec * 100: 05.1f}, "
+ f"score={scores.mean():0.3f}, "
+ f"f1={2 * prec * rec / (prec + rec + 1e-8):0.3f}"
)
axs[0].plot(recall, precision, c=color)
axs[1].plot(recall, scores, c=color)
axs[0].set_title("Precision / Recall")
axs[0].legend(names)
axs[1].set_title("Scores / Recall")
axs[1].legend(names)
return fig, axs
| d2go-main | projects_oss/detr/detr/util/plot_utils.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
COCO dataset which returns image_id for evaluation.
Mostly copy-paste from https://github.com/pytorch/vision/blob/13b35ff/references/detection/coco_utils.py
"""
import os
from pathlib import Path
import detr.datasets.transforms as T
import torch
import torch.utils.data
import torchvision
from detectron2.utils.file_io import PathManager
from PIL import Image
from pycocotools import mask as coco_mask
class CocoDetection(torchvision.datasets.CocoDetection):
def __init__(self, img_folder, ann_file, transforms, return_masks):
ann_file = PathManager.get_local_path(ann_file)
super(CocoDetection, self).__init__(img_folder, ann_file)
self._transforms = transforms
self.prepare = ConvertCocoPolysToMask(return_masks)
def _load_image(self, id: int) -> Image.Image:
path = self.coco.loadImgs(id)[0]["file_name"]
with PathManager.open(os.path.join(self.root, path), "rb") as f:
image = Image.open(f).convert("RGB")
return image
def __getitem__(self, idx):
img, target = super(CocoDetection, self).__getitem__(idx)
image_id = self.ids[idx]
target = {"image_id": image_id, "annotations": target}
img, target = self.prepare(img, target)
if self._transforms is not None:
img, target = self._transforms(img, target)
return img, target
def convert_coco_poly_to_mask(segmentations, height, width):
masks = []
for polygons in segmentations:
rles = coco_mask.frPyObjects(polygons, height, width)
mask = coco_mask.decode(rles)
if len(mask.shape) < 3:
mask = mask[..., None]
mask = torch.as_tensor(mask, dtype=torch.uint8)
mask = mask.any(dim=2)
masks.append(mask)
if masks:
masks = torch.stack(masks, dim=0)
else:
masks = torch.zeros((0, height, width), dtype=torch.uint8)
return masks
class ConvertCocoPolysToMask(object):
def __init__(self, return_masks=False):
self.return_masks = return_masks
def __call__(self, image, target):
w, h = image.size
image_id = target["image_id"]
image_id = torch.tensor([image_id])
anno = target["annotations"]
anno = [obj for obj in anno if "iscrowd" not in obj or obj["iscrowd"] == 0]
boxes = [obj["bbox"] for obj in anno]
# guard against no boxes via resizing
boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4)
boxes[:, 2:] += boxes[:, :2]
boxes[:, 0::2].clamp_(min=0, max=w)
boxes[:, 1::2].clamp_(min=0, max=h)
classes = [obj["category_id"] for obj in anno]
classes = torch.tensor(classes, dtype=torch.int64)
if self.return_masks:
segmentations = [obj["segmentation"] for obj in anno]
masks = convert_coco_poly_to_mask(segmentations, h, w)
keypoints = None
if anno and "keypoints" in anno[0]:
keypoints = [obj["keypoints"] for obj in anno]
keypoints = torch.as_tensor(keypoints, dtype=torch.float32)
num_keypoints = keypoints.shape[0]
if num_keypoints:
keypoints = keypoints.view(num_keypoints, -1, 3)
keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
boxes = boxes[keep]
classes = classes[keep]
if self.return_masks:
masks = masks[keep]
if keypoints is not None:
keypoints = keypoints[keep]
target = {}
target["boxes"] = boxes
target["labels"] = classes
if self.return_masks:
target["masks"] = masks
target["image_id"] = image_id
if keypoints is not None:
target["keypoints"] = keypoints
# for conversion to coco api
area = torch.tensor([obj["area"] for obj in anno])
iscrowd = torch.tensor(
[obj["iscrowd"] if "iscrowd" in obj else 0 for obj in anno]
)
target["area"] = area[keep]
target["iscrowd"] = iscrowd[keep]
target["orig_size"] = torch.as_tensor([int(h), int(w)])
target["size"] = torch.as_tensor([int(h), int(w)])
return image, target
def make_coco_transforms(image_set):
normalize = T.Compose(
[T.ToTensor(), T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]
)
scales = [480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800]
if image_set == "train":
return T.Compose(
[
T.RandomHorizontalFlip(),
T.RandomSelect(
T.RandomResize(scales, max_size=1333),
T.Compose(
[
T.RandomResize([400, 500, 600]),
T.RandomSizeCrop(384, 600),
T.RandomResize(scales, max_size=1333),
]
),
),
normalize,
]
)
if image_set == "val":
return T.Compose(
[
T.RandomResize([800], max_size=1333),
normalize,
]
)
raise ValueError(f"unknown {image_set}")
def build(image_set, args):
if "manifold" in args.coco_path:
root = args.coco_path
PATHS = {
"train": (
os.path.join(root, "coco_train2017"),
"manifold://fair_vision_data/tree/detectron2/json_dataset_annotations/coco/instances_train2017.json",
),
"val": (
os.path.join(root, "coco_val2017"),
"manifold://fair_vision_data/tree/detectron2/json_dataset_annotations/coco/instances_val2017.json",
),
}
else:
root = Path(args.coco_path)
assert root.exists(), f"provided COCO path {root} does not exist"
mode = "instances"
PATHS = {
"train": (
root / "train2017",
root / "annotations" / f"{mode}_train2017.json",
),
"val": (root / "val2017", root / "annotations" / f"{mode}_val2017.json"),
}
img_folder, ann_file = PATHS[image_set]
dataset = CocoDetection(
img_folder,
ann_file,
transforms=make_coco_transforms(image_set),
return_masks=args.masks,
)
return dataset
| d2go-main | projects_oss/detr/detr/datasets/coco.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Transforms and data augmentation for both image + bbox.
"""
import random
import PIL
import torch
import torchvision.transforms as T
import torchvision.transforms.functional as F
from detr.util.box_ops import box_xyxy_to_cxcywh
from detr.util.misc import interpolate
def crop(image, target, region):
cropped_image = F.crop(image, *region)
target = target.copy()
i, j, h, w = region
# should we do something wrt the original size?
target["size"] = torch.tensor([h, w])
fields = ["labels", "area", "iscrowd"]
if "boxes" in target:
boxes = target["boxes"]
max_size = torch.as_tensor([w, h], dtype=torch.float32)
cropped_boxes = boxes - torch.as_tensor([j, i, j, i])
cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size)
cropped_boxes = cropped_boxes.clamp(min=0)
area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1)
target["boxes"] = cropped_boxes.reshape(-1, 4)
target["area"] = area
fields.append("boxes")
if "masks" in target:
# FIXME should we update the area here if there are no boxes?
target["masks"] = target["masks"][:, i : i + h, j : j + w]
fields.append("masks")
# remove elements for which the boxes or masks that have zero area
if "boxes" in target or "masks" in target:
# favor boxes selection when defining which elements to keep
# this is compatible with previous implementation
if "boxes" in target:
cropped_boxes = target["boxes"].reshape(-1, 2, 2)
keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1)
else:
keep = target["masks"].flatten(1).any(1)
for field in fields:
target[field] = target[field][keep]
return cropped_image, target
def hflip(image, target):
flipped_image = F.hflip(image)
w, h = image.size
target = target.copy()
if "boxes" in target:
boxes = target["boxes"]
boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor(
[-1, 1, -1, 1]
) + torch.as_tensor([w, 0, w, 0])
target["boxes"] = boxes
if "masks" in target:
target["masks"] = target["masks"].flip(-1)
return flipped_image, target
def resize(image, target, size, max_size=None):
# size can be min_size (scalar) or (w, h) tuple
def get_size_with_aspect_ratio(image_size, size, max_size=None):
w, h = image_size
if max_size is not None:
min_original_size = float(min((w, h)))
max_original_size = float(max((w, h)))
if max_original_size / min_original_size * size > max_size:
size = int(round(max_size * min_original_size / max_original_size))
if (w <= h and w == size) or (h <= w and h == size):
return (h, w)
if w < h:
ow = size
oh = int(size * h / w)
else:
oh = size
ow = int(size * w / h)
return (oh, ow)
def get_size(image_size, size, max_size=None):
if isinstance(size, (list, tuple)):
return size[::-1]
else:
return get_size_with_aspect_ratio(image_size, size, max_size)
size = get_size(image.size, size, max_size)
rescaled_image = F.resize(image, size)
if target is None:
return rescaled_image, None
ratios = tuple(
float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size)
)
ratio_width, ratio_height = ratios
target = target.copy()
if "boxes" in target:
boxes = target["boxes"]
scaled_boxes = boxes * torch.as_tensor(
[ratio_width, ratio_height, ratio_width, ratio_height]
)
target["boxes"] = scaled_boxes
if "area" in target:
area = target["area"]
scaled_area = area * (ratio_width * ratio_height)
target["area"] = scaled_area
h, w = size
target["size"] = torch.tensor([h, w])
if "masks" in target:
target["masks"] = (
interpolate(target["masks"][:, None].float(), size, mode="nearest")[:, 0]
> 0.5
)
return rescaled_image, target
def pad(image, target, padding):
# assumes that we only pad on the bottom right corners
padded_image = F.pad(image, (0, 0, padding[0], padding[1]))
if target is None:
return padded_image, None
target = target.copy()
# should we do something wrt the original size?
target["size"] = torch.tensor(padded_image.size[::-1])
if "masks" in target:
target["masks"] = torch.nn.functional.pad(
target["masks"], (0, padding[0], 0, padding[1])
)
return padded_image, target
class RandomCrop(object):
def __init__(self, size):
self.size = size
def __call__(self, img, target):
region = T.RandomCrop.get_params(img, self.size)
return crop(img, target, region)
class RandomSizeCrop(object):
def __init__(self, min_size: int, max_size: int):
self.min_size = min_size
self.max_size = max_size
def __call__(self, img: PIL.Image.Image, target: dict): # noqa: P210
w = random.randint(self.min_size, min(img.width, self.max_size))
h = random.randint(self.min_size, min(img.height, self.max_size))
region = T.RandomCrop.get_params(img, [h, w])
return crop(img, target, region)
class CenterCrop(object):
def __init__(self, size):
self.size = size
def __call__(self, img, target):
image_width, image_height = img.size
crop_height, crop_width = self.size
crop_top = int(round((image_height - crop_height) / 2.0))
crop_left = int(round((image_width - crop_width) / 2.0))
return crop(img, target, (crop_top, crop_left, crop_height, crop_width))
class RandomHorizontalFlip(object):
def __init__(self, p=0.5):
self.p = p
def __call__(self, img, target):
if random.random() < self.p:
return hflip(img, target)
return img, target
class RandomResize(object):
def __init__(self, sizes, max_size=None):
assert isinstance(sizes, (list, tuple))
self.sizes = sizes
self.max_size = max_size
def __call__(self, img, target=None):
size = random.choice(self.sizes)
return resize(img, target, size, self.max_size)
class RandomPad(object):
def __init__(self, max_pad):
self.max_pad = max_pad
def __call__(self, img, target):
pad_x = random.randint(0, self.max_pad)
pad_y = random.randint(0, self.max_pad)
return pad(img, target, (pad_x, pad_y))
class RandomSelect(object):
"""
Randomly selects between transforms1 and transforms2,
with probability p for transforms1 and (1 - p) for transforms2
"""
def __init__(self, transforms1, transforms2, p=0.5):
self.transforms1 = transforms1
self.transforms2 = transforms2
self.p = p
def __call__(self, img, target):
if random.random() < self.p:
return self.transforms1(img, target)
return self.transforms2(img, target)
class ToTensor(object):
def __call__(self, img, target):
return F.to_tensor(img), target
class RandomErasing(object):
def __init__(self, *args, **kwargs):
self.eraser = T.RandomErasing(*args, **kwargs)
def __call__(self, img, target):
return self.eraser(img), target
class Normalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, image, target=None):
image = F.normalize(image, mean=self.mean, std=self.std)
if target is None:
return image, None
target = target.copy()
h, w = image.shape[-2:]
if "boxes" in target:
boxes = target["boxes"]
boxes = box_xyxy_to_cxcywh(boxes)
boxes = boxes / torch.tensor([w, h, w, h], dtype=torch.float32)
target["boxes"] = boxes
return image, target
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, target):
for t in self.transforms:
image, target = t(image, target)
return image, target
def __repr__(self):
format_string = self.__class__.__name__ + "("
for t in self.transforms:
format_string += "\n"
format_string += " {0}".format(t)
format_string += "\n)"
return format_string
| d2go-main | projects_oss/detr/detr/datasets/transforms.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import json
import os
import detr.util.misc as utils
from detectron2.utils.file_io import PathManager
try:
from panopticapi.evaluation import pq_compute
except ImportError:
pass
class PanopticEvaluator(object):
def __init__(self, ann_file, ann_folder, output_dir="panoptic_eval"):
self.gt_json = ann_file
self.gt_folder = ann_folder
if utils.is_main_process():
if not PathManager.exists(output_dir):
PathManager.mkdir(output_dir)
self.output_dir = output_dir
self.predictions = []
def update(self, predictions):
for p in predictions:
with PathManager.open(
os.path.join(self.output_dir, p["file_name"]), "wb"
) as f:
f.write(p.pop("png_string"))
self.predictions += predictions
def synchronize_between_processes(self):
all_predictions = utils.all_gather(self.predictions)
merged_predictions = []
for p in all_predictions:
merged_predictions += p
self.predictions = merged_predictions
def summarize(self):
if utils.is_main_process():
json_data = {"annotations": self.predictions}
predictions_json = os.path.join(self.output_dir, "predictions.json")
with PathManager.open(predictions_json, "w") as f:
f.write(json.dumps(json_data))
return pq_compute(
self.gt_json,
predictions_json,
gt_folder=self.gt_folder,
pred_folder=self.output_dir,
)
return None
| d2go-main | projects_oss/detr/detr/datasets/panoptic_eval.py |
import math
import os
import random
import sys
import numpy as np
import skimage.morphology as morp
import torch
import torch.utils.data as data
import torchvision
import torchvision.transforms as transform
from detectron2.utils.file_io import PathManager
from PIL import Image, ImageFilter, ImageOps
from .coco import make_coco_transforms
class ADE20KParsing(torchvision.datasets.VisionDataset):
def __init__(self, root, split, transforms=None):
super(ADE20KParsing, self).__init__(root)
# assert exists and prepare dataset automatically
assert PathManager.exists(root), "Please setup the dataset"
self.images, self.masks = _get_ade20k_pairs(root, split)
assert len(self.images) == len(self.masks)
if len(self.images) == 0:
raise (
RuntimeError(
"Found 0 images in subfolders of: \
"
+ root
+ "\n"
)
)
self._transforms = transforms
def _mask_transform(self, mask):
target = np.array(mask).astype("int64") - 1
return target
def __getitem__(self, index):
with PathManager.open(self.images[index], "rb") as f:
img = Image.open(f).convert("RGB")
with PathManager.open(self.masks[index], "rb") as f:
mask = Image.open(f).convert("P")
w, h = img.size
## generating bbox and masks
# get different classes
mask = self._mask_transform(mask)
classes = np.unique(mask)
if -1 in classes:
classes = classes[1:]
segmasks = mask == classes[:, None, None]
# find connected component
detr_masks = []
labels = []
for i in range(len(classes)):
mask = segmasks[i]
mclass = classes[i]
connected, nslice = morp.label(
mask, connectivity=2, background=0, return_num=True
)
for j in range(1, nslice + 1):
detr_masks.append(connected == j)
labels.append(mclass)
target = {}
target["image_id"] = torch.tensor(
int(os.path.basename(self.images[index])[10:-4])
)
if len(detr_masks) > 0:
target["masks"] = torch.as_tensor(
np.stack(detr_masks, axis=0), dtype=torch.uint8
)
target["boxes"] = masks_to_boxes(target["masks"])
else:
target["masks"] = torch.as_tensor(detr_masks, dtype=torch.uint8)
target["boxes"] = target["masks"]
target["labels"] = torch.tensor(labels)
target["orig_size"] = torch.as_tensor([int(h), int(w)])
target["size"] = torch.as_tensor([int(h), int(w)])
if self._transforms is not None:
img, target = self._transforms(img, target)
return img, target
def __len__(self):
return len(self.images)
@property
def pred_offset(self):
return 1
def masks_to_boxes(masks):
"""Compute the bounding boxes around the provided masks
The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions.
Returns a [N, 4] tensors, with the boxes in xyxy format
"""
if masks.numel() == 0:
return torch.zeros((0, 4), device=masks.device)
h, w = masks.shape[-2:]
y = torch.arange(0, h, dtype=torch.float)
x = torch.arange(0, w, dtype=torch.float)
y, x = torch.meshgrid(y, x)
x_mask = masks * x.unsqueeze(0)
x_max = x_mask.flatten(1).max(-1)[0]
x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
y_mask = masks * y.unsqueeze(0)
y_max = y_mask.flatten(1).max(-1)[0]
y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
return torch.stack([x_min, y_min, x_max, y_max], 1)
def _get_ade20k_pairs(folder, split="train"):
def get_path_pairs(img_folder, mask_folder):
img_paths = []
mask_paths = []
print("Before listing", img_folder)
filenames = PathManager.ls(img_folder)
for filename in filenames:
print("found: ", filename)
basename, _ = os.path.splitext(filename)
if filename.endswith(".jpg"):
imgpath = os.path.join(img_folder, filename)
maskname = basename + ".png"
maskpath = os.path.join(mask_folder, maskname)
img_paths.append(imgpath)
mask_paths.append(maskpath)
# if PathManager.isfile(maskpath):
# else:
# print('cannot find the mask:', maskpath)
return img_paths, mask_paths
if split == "train":
img_folder = os.path.join(folder, "images/training")
mask_folder = os.path.join(folder, "annotations/training")
img_paths, mask_paths = get_path_pairs(img_folder, mask_folder)
print("len(img_paths):", len(img_paths))
assert len(img_paths) == 20210
elif split == "val":
img_folder = os.path.join(folder, "images/validation")
mask_folder = os.path.join(folder, "annotations/validation")
img_paths, mask_paths = get_path_pairs(img_folder, mask_folder)
assert len(img_paths) == 2000
else:
assert split == "trainval"
train_img_folder = os.path.join(folder, "images/training")
train_mask_folder = os.path.join(folder, "annotations/training")
val_img_folder = os.path.join(folder, "images/validation")
val_mask_folder = os.path.join(folder, "annotations/validation")
train_img_paths, train_mask_paths = get_path_pairs(
train_img_folder, train_mask_folder
)
val_img_paths, val_mask_paths = get_path_pairs(val_img_folder, val_mask_folder)
img_paths = train_img_paths + val_img_paths
mask_paths = train_mask_paths + val_mask_paths
assert len(img_paths) == 22210
return img_paths, mask_paths
def build(image_set, args):
dataset = ADE20KParsing(
args.ade_path, image_set, transforms=make_coco_transforms(image_set)
)
return dataset
| d2go-main | projects_oss/detr/detr/datasets/ade.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch.utils.data
import torchvision
from .ade import build as build_ade
from .coco import build as build_coco
def get_coco_api_from_dataset(dataset):
for _ in range(10):
# if isinstance(dataset, torchvision.datasets.CocoDetection):
# break
if isinstance(dataset, torch.utils.data.Subset):
dataset = dataset.dataset
if isinstance(dataset, torchvision.datasets.CocoDetection):
return dataset.coco
def build_dataset(image_set, args):
if args.dataset_file == "coco":
dataset = build_coco(image_set, args)
elif args.dataset_file == "coco_panoptic":
# to avoid making panopticapi required for coco
from .coco_panoptic import build as build_coco_panoptic
dataset = build_coco_panoptic(image_set, args)
elif args.dataset_file == "ade":
dataset = build_ade(image_set, args)
else:
raise ValueError(f"dataset {args.dataset_file} not supported")
return dataset
| d2go-main | projects_oss/detr/detr/datasets/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
COCO evaluator that works in distributed mode.
Mostly copy-paste from https://github.com/pytorch/vision/blob/edfd5a7/references/detection/coco_eval.py
The difference is that there is less copy-pasting from pycocotools
in the end of the file, as python3 can suppress prints with contextlib
"""
import contextlib
import copy
import os
import numpy as np
import pycocotools.mask as mask_util
import torch
from detr.util.misc import all_gather
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
class CocoEvaluator(object):
def __init__(self, coco_gt, iou_types):
assert isinstance(iou_types, (list, tuple))
coco_gt = copy.deepcopy(coco_gt)
self.coco_gt = coco_gt
self.iou_types = iou_types
self.coco_eval = {}
for iou_type in iou_types:
self.coco_eval[iou_type] = COCOeval(coco_gt, iouType=iou_type)
self.img_ids = []
self.eval_imgs = {k: [] for k in iou_types}
def update(self, predictions):
img_ids = list(np.unique(list(predictions.keys())))
self.img_ids.extend(img_ids)
for iou_type in self.iou_types:
results = self.prepare(predictions, iou_type)
# suppress pycocotools prints
with open(os.devnull, "w") as devnull:
with contextlib.redirect_stdout(devnull):
coco_dt = COCO.loadRes(self.coco_gt, results) if results else COCO()
coco_eval = self.coco_eval[iou_type]
coco_eval.cocoDt = coco_dt
coco_eval.params.imgIds = list(img_ids)
img_ids, eval_imgs = evaluate(coco_eval)
self.eval_imgs[iou_type].append(eval_imgs)
def synchronize_between_processes(self):
for iou_type in self.iou_types:
self.eval_imgs[iou_type] = np.concatenate(self.eval_imgs[iou_type], 2)
create_common_coco_eval(
self.coco_eval[iou_type], self.img_ids, self.eval_imgs[iou_type]
)
def accumulate(self):
for coco_eval in self.coco_eval.values():
coco_eval.accumulate()
def summarize(self):
for iou_type, coco_eval in self.coco_eval.items():
print("IoU metric: {}".format(iou_type))
coco_eval.summarize()
def prepare(self, predictions, iou_type):
if iou_type == "bbox":
return self.prepare_for_coco_detection(predictions)
elif iou_type == "segm":
return self.prepare_for_coco_segmentation(predictions)
elif iou_type == "keypoints":
return self.prepare_for_coco_keypoint(predictions)
else:
raise ValueError("Unknown iou type {}".format(iou_type))
def prepare_for_coco_detection(self, predictions):
coco_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
boxes = prediction["boxes"]
boxes = convert_to_xywh(boxes).tolist()
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
coco_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
"bbox": box,
"score": scores[k],
}
for k, box in enumerate(boxes)
]
)
return coco_results
def prepare_for_coco_segmentation(self, predictions):
coco_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
scores = prediction["scores"]
labels = prediction["labels"]
masks = prediction["masks"]
masks = masks > 0.5
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
rles = [
mask_util.encode(
np.array(mask[0, :, :, np.newaxis], dtype=np.uint8, order="F")
)[0]
for mask in masks
]
for rle in rles:
rle["counts"] = rle["counts"].decode("utf-8")
coco_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
"segmentation": rle,
"score": scores[k],
}
for k, rle in enumerate(rles)
]
)
return coco_results
def prepare_for_coco_keypoint(self, predictions):
coco_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
boxes = prediction["boxes"]
boxes = convert_to_xywh(boxes).tolist()
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
keypoints = prediction["keypoints"]
keypoints = keypoints.flatten(start_dim=1).tolist()
coco_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
"keypoints": keypoint,
"score": scores[k],
}
for k, keypoint in enumerate(keypoints)
]
)
return coco_results
def convert_to_xywh(boxes):
xmin, ymin, xmax, ymax = boxes.unbind(1)
return torch.stack((xmin, ymin, xmax - xmin, ymax - ymin), dim=1)
def merge(img_ids, eval_imgs):
all_img_ids = all_gather(img_ids)
all_eval_imgs = all_gather(eval_imgs)
merged_img_ids = []
for p in all_img_ids:
merged_img_ids.extend(p)
merged_eval_imgs = []
for p in all_eval_imgs:
merged_eval_imgs.append(p)
merged_img_ids = np.array(merged_img_ids)
merged_eval_imgs = np.concatenate(merged_eval_imgs, 2)
# keep only unique (and in sorted order) images
merged_img_ids, idx = np.unique(merged_img_ids, return_index=True)
merged_eval_imgs = merged_eval_imgs[..., idx]
return merged_img_ids, merged_eval_imgs
def create_common_coco_eval(coco_eval, img_ids, eval_imgs):
img_ids, eval_imgs = merge(img_ids, eval_imgs)
img_ids = list(img_ids)
eval_imgs = list(eval_imgs.flatten())
coco_eval.evalImgs = eval_imgs
coco_eval.params.imgIds = img_ids
coco_eval._paramsEval = copy.deepcopy(coco_eval.params)
#################################################################
# From pycocotools, just removed the prints and fixed
# a Python3 bug about unicode not defined
#################################################################
def evaluate(self):
"""
Run per image evaluation on given images and store results (a list of dict) in self.evalImgs
:return: None
"""
# tic = time.time()
# print('Running per image evaluation...')
p = self.params
# add backward compatibility if useSegm is specified in params
if p.useSegm is not None:
p.iouType = "segm" if p.useSegm == 1 else "bbox"
print(
"useSegm (deprecated) is not None. Running {} evaluation".format(p.iouType)
)
# print('Evaluate annotation type *{}*'.format(p.iouType))
p.imgIds = list(np.unique(p.imgIds))
if p.useCats:
p.catIds = list(np.unique(p.catIds))
p.maxDets = sorted(p.maxDets)
self.params = p
self._prepare()
# loop through images, area range, max detection number
catIds = p.catIds if p.useCats else [-1]
if p.iouType == "segm" or p.iouType == "bbox":
computeIoU = self.computeIoU
elif p.iouType == "keypoints":
computeIoU = self.computeOks
self.ious = {
(imgId, catId): computeIoU(imgId, catId)
for imgId in p.imgIds
for catId in catIds
}
evaluateImg = self.evaluateImg
maxDet = p.maxDets[-1]
evalImgs = [
evaluateImg(imgId, catId, areaRng, maxDet)
for catId in catIds
for areaRng in p.areaRng
for imgId in p.imgIds
]
# this is NOT in the pycocotools code, but could be done outside
evalImgs = np.asarray(evalImgs).reshape(len(catIds), len(p.areaRng), len(p.imgIds))
self._paramsEval = copy.deepcopy(self.params)
# toc = time.time()
# print('DONE (t={:0.2f}s).'.format(toc-tic))
return p.imgIds, evalImgs
#################################################################
# end of straight copy from pycocotools, just removing the prints
#################################################################
| d2go-main | projects_oss/detr/detr/datasets/coco_eval.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import json
import os
from pathlib import Path
import numpy as np
import torch
from detectron2.utils.file_io import PathManager
from detr.util.box_ops import masks_to_boxes
from panopticapi.utils import rgb2id
from PIL import Image
from .coco import make_coco_transforms
class CocoPanoptic:
def __init__(
self, img_folder, ann_folder, ann_file, transforms=None, return_masks=True
):
with PathManager.open(ann_file, "r") as f:
self.coco = json.load(f)
# sort 'images' field so that they are aligned with 'annotations'
# i.e., in alphabetical order
self.coco["images"] = sorted(self.coco["images"], key=lambda x: x["id"])
# sanity check
if "annotations" in self.coco:
for img, ann in zip(self.coco["images"], self.coco["annotations"]):
assert img["file_name"][:-4] == ann["file_name"][:-4]
self.img_folder = img_folder
self.ann_folder = ann_folder
self.ann_file = ann_file
self.transforms = transforms
self.return_masks = return_masks
def __getitem__(self, idx):
ann_info = (
self.coco["annotations"][idx]
if "annotations" in self.coco
else self.coco["images"][idx]
)
img_path = os.path.join(
self.img_folder, ann_info["file_name"].replace(".png", ".jpg")
)
ann_path = os.path.join(self.ann_folder, ann_info["file_name"])
with PathManager.open(img_path, "rb") as f:
img = Image.open(f).convert("RGB")
w, h = img.size
if "segments_info" in ann_info:
with PathManager.open(ann_path, "rb") as f:
masks = np.asarray(Image.open(f), dtype=np.uint32)
masks = rgb2id(masks)
ids = np.array([ann["id"] for ann in ann_info["segments_info"]])
masks = masks == ids[:, None, None]
masks = torch.as_tensor(masks, dtype=torch.uint8)
labels = torch.tensor(
[ann["category_id"] for ann in ann_info["segments_info"]],
dtype=torch.int64,
)
target = {}
target["image_id"] = torch.tensor(
[ann_info["image_id"] if "image_id" in ann_info else ann_info["id"]]
)
if self.return_masks:
target["masks"] = masks
target["labels"] = labels
target["boxes"] = masks_to_boxes(masks)
target["size"] = torch.as_tensor([int(h), int(w)])
target["orig_size"] = torch.as_tensor([int(h), int(w)])
if "segments_info" in ann_info:
for name in ["iscrowd", "area"]:
target[name] = torch.tensor(
[ann[name] for ann in ann_info["segments_info"]]
)
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target
def __len__(self):
return len(self.coco["images"])
def get_height_and_width(self, idx):
img_info = self.coco["images"][idx]
height = img_info["height"]
width = img_info["width"]
return height, width
def build(image_set, args):
if "manifold" in args.coco_path:
root = args.coco_path
PATHS = {
"train": (
os.path.join(root, "coco_train2017"),
"manifold://fair_vision_data/tree/detectron2/json_dataset_annotations/coco/panoptic_train2017.json",
),
"val": (
os.path.join(root, "coco_val2017"),
"manifold://fair_vision_data/tree/detectron2/json_dataset_annotations/coco/panoptic_val2017.json",
),
}
img_folder_path, ann_file = PATHS[image_set]
ann_folder = os.path.join(root, f"coco_panoptic_{image_set}2017")
else:
img_folder_root = Path(args.coco_path)
ann_folder_root = Path(args.coco_panoptic_path)
assert (
img_folder_root.exists()
), f"provided COCO path {img_folder_root} does not exist"
assert (
ann_folder_root.exists()
), f"provided COCO path {ann_folder_root} does not exist"
mode = "panoptic"
PATHS = {
"train": ("train2017", Path("annotations") / f"{mode}_train2017.json"),
"val": ("val2017", Path("annotations") / f"{mode}_val2017.json"),
}
img_folder, ann_file = PATHS[image_set]
img_folder_path = img_folder_root / img_folder
ann_folder = ann_folder_root / f"{mode}_{img_folder}"
ann_file = ann_folder_root / ann_file
dataset = CocoPanoptic(
img_folder_path,
ann_folder,
ann_file,
transforms=make_coco_transforms(image_set),
return_masks=args.masks,
)
return dataset
| d2go-main | projects_oss/detr/detr/datasets/coco_panoptic.py |
from typing import Dict
import numpy as np
import torch
import torch.nn.functional as F
from detectron2.modeling import build_backbone
from detectron2.utils.registry import Registry
from detr.models.backbone import Joiner
from detr.models.position_encoding import PositionEmbeddingSine
from detr.util.misc import NestedTensor
from torch import nn
DETR_MODEL_REGISTRY = Registry("DETR_MODEL")
def build_detr_backbone(cfg):
if "resnet" in cfg.MODEL.BACKBONE.NAME.lower():
d2_backbone = ResNetMaskedBackbone(cfg)
elif "fbnet" in cfg.MODEL.BACKBONE.NAME.lower():
d2_backbone = FBNetMaskedBackbone(cfg)
elif cfg.MODEL.BACKBONE.SIMPLE:
d2_backbone = SimpleSingleStageBackbone(cfg)
else:
raise NotImplementedError
N_steps = cfg.MODEL.DETR.HIDDEN_DIM // 2
centered_position_encoding = cfg.MODEL.DETR.CENTERED_POSITION_ENCODIND
backbone = Joiner(
d2_backbone,
PositionEmbeddingSine(
N_steps, normalize=True, centered=centered_position_encoding
),
)
backbone.num_channels = d2_backbone.num_channels
return backbone
def build_detr_model(cfg):
name = cfg.MODEL.DETR.NAME
return DETR_MODEL_REGISTRY.get(name)(cfg)
class ResNetMaskedBackbone(nn.Module):
"""This is a thin wrapper around D2's backbone to provide padding masking"""
def __init__(self, cfg):
super().__init__()
self.backbone = build_backbone(cfg)
backbone_shape = self.backbone.output_shape()
if cfg.MODEL.DETR.NUM_FEATURE_LEVELS > 1:
self.strides = [8, 16, 32]
else:
self.strides = [32]
if cfg.MODEL.RESNETS.RES5_DILATION == 2:
# fix dilation from d2
self.backbone.stages[-1][0].conv2.dilation = (1, 1)
self.backbone.stages[-1][0].conv2.padding = (1, 1)
self.strides[-1] = self.strides[-1] // 2
self.feature_strides = [backbone_shape[f].stride for f in backbone_shape.keys()]
self.num_channels = [backbone_shape[k].channels for k in backbone_shape.keys()]
def forward(self, tensor_list: NestedTensor):
xs = self.backbone(tensor_list.tensors)
out: Dict[str, NestedTensor] = {}
for name, x in xs.items():
m = tensor_list.mask
assert m is not None
mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0]
out[name] = NestedTensor(x, mask)
return out
class FBNetMaskedBackbone(ResNetMaskedBackbone):
"""This is a thin wrapper around D2's backbone to provide padding masking"""
def __init__(self, cfg):
nn.Module.__init__(self)
self.backbone = build_backbone(cfg)
self.out_features = cfg.MODEL.FBNET_V2.OUT_FEATURES
self.feature_strides = list(self.backbone._out_feature_strides.values())
self.num_channels = [
self.backbone._out_feature_channels[k] for k in self.out_features
]
self.strides = [
self.backbone._out_feature_strides[k] for k in self.out_features
]
class SimpleSingleStageBackbone(ResNetMaskedBackbone):
"""This is a simple wrapper for single stage backbone,
please set the required configs:
cfg.MODEL.BACKBONE.SIMPLE == True,
cfg.MODEL.BACKBONE.STRIDE, cfg.MODEL.BACKBONE.CHANNEL
"""
def __init__(self, cfg):
nn.Module.__init__(self)
self.backbone = build_backbone(cfg)
self.out_features = ["out"]
assert cfg.MODEL.BACKBONE.SIMPLE is True
self.feature_strides = [cfg.MODEL.BACKBONE.STRIDE]
self.num_channels = [cfg.MODEL.BACKBONE.CHANNEL]
self.strides = [cfg.MODEL.BACKBONE.STRIDE]
| d2go-main | projects_oss/detr/detr/models/build.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
DETR model and criterion classes.
"""
import torch
import torch.nn.functional as F
from detectron2.config import configurable
from detr.util import box_ops
from detr.util.misc import (
accuracy,
get_world_size,
interpolate,
is_dist_avail_and_initialized,
nested_tensor_from_tensor_list,
NestedTensor,
)
from torch import nn
from .backbone import build_backbone
from .build import build_detr_backbone, DETR_MODEL_REGISTRY
from .matcher import build_matcher
from .segmentation import (
DETRsegm,
dice_loss,
PostProcessPanoptic,
PostProcessSegm,
sigmoid_focal_loss,
)
from .setcriterion import SetCriterion
from .transformer import build_transformer, Transformer
@DETR_MODEL_REGISTRY.register()
class DETR(nn.Module):
"""This is the DETR module that performs object detection"""
@configurable
def __init__(
self,
backbone,
transformer,
num_classes,
num_queries,
aux_loss=False,
use_focal_loss=False,
query_embed=None,
):
"""Initializes the model.
Parameters:
backbone: torch module of the backbone to be used. See backbone.py
transformer: torch module of the transformer architecture. See transformer.py
num_classes: number of object classes
num_queries: number of object queries, ie detection slot. This is the maximal number of objects
DETR can detect in a single image. For COCO, we recommend 100 queries.
aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used.
"""
super().__init__()
self.num_queries = num_queries
self.transformer = transformer
hidden_dim = transformer.d_model
self.class_embed = nn.Linear(
hidden_dim, num_classes if use_focal_loss else num_classes + 1
)
self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)
self.query_embed = (
query_embed
if query_embed is not None
else nn.Embedding(num_queries, hidden_dim)
)
self.input_proj = nn.Conv2d(
backbone.num_channels[-1], hidden_dim, kernel_size=1
)
self.backbone = backbone
self.aux_loss = aux_loss
@classmethod
def from_config(cls, cfg):
num_classes = cfg.MODEL.DETR.NUM_CLASSES
hidden_dim = cfg.MODEL.DETR.HIDDEN_DIM
num_queries = cfg.MODEL.DETR.NUM_OBJECT_QUERIES
# Transformer parameters:
nheads = cfg.MODEL.DETR.NHEADS
dropout = cfg.MODEL.DETR.DROPOUT
dim_feedforward = cfg.MODEL.DETR.DIM_FEEDFORWARD
enc_layers = cfg.MODEL.DETR.ENC_LAYERS
dec_layers = cfg.MODEL.DETR.DEC_LAYERS
pre_norm = cfg.MODEL.DETR.PRE_NORM
# Loss parameters:
deep_supervision = cfg.MODEL.DETR.DEEP_SUPERVISION
use_focal_loss = cfg.MODEL.DETR.USE_FOCAL_LOSS
backbone = build_detr_backbone(cfg)
transformer = Transformer(
d_model=hidden_dim,
dropout=dropout,
nhead=nheads,
dim_feedforward=dim_feedforward,
num_encoder_layers=enc_layers,
num_decoder_layers=dec_layers,
normalize_before=pre_norm,
return_intermediate_dec=deep_supervision,
learnable_tgt=cfg.MODEL.DETR.LEARNABLE_TGT,
)
if cfg.MODEL.DETR.LEARNABLE_TGT:
query_embed = nn.Embedding(num_queries, hidden_dim * 2)
else:
query_embed = nn.Embedding(num_queries, hidden_dim)
return {
"backbone": backbone,
"transformer": transformer,
"num_classes": num_classes,
"num_queries": num_queries,
"query_embed": query_embed,
"aux_loss": deep_supervision,
"use_focal_loss": use_focal_loss,
}
def forward(self, samples: NestedTensor):
"""The forward expects a NestedTensor, which consists of:
- samples.tensor: batched images, of shape [batch_size x 3 x H x W]
- samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels
It returns a dict with the following elements:
- "pred_logits": the classification logits (including no-object) for all queries.
Shape= [batch_size x num_queries x (num_classes + 1)]
- "pred_boxes": The normalized boxes coordinates for all queries, represented as
(center_x, center_y, height, width). These values are normalized in [0, 1],
relative to the size of each individual image (disregarding possible padding).
See PostProcess for information on how to retrieve the unnormalized bounding box.
- "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of
dictionnaries containing the two above keys for each decoder layer.
"""
if isinstance(samples, (list, torch.Tensor)):
samples = nested_tensor_from_tensor_list(samples)
features, pos = self.backbone(samples)
# src shape (B, C, H, W)
# mask shape (B, H, W)
src, mask = features[-1].decompose()
assert mask is not None
# hs shape (NUM_LAYER, B, S, hidden_dim)
hs = self.transformer(
self.input_proj(src), mask, self.query_embed.weight, pos[-1]
)[0]
# shape (NUM_LAYER, B, S, NUM_CLASS + 1)
outputs_class = self.class_embed(hs)
# shape (NUM_LAYER, B, S, 4)
outputs_coord = self.bbox_embed(hs).sigmoid()
# pred_logits shape (B, S, NUM_CLASS + 1)
# pred_boxes shape (B, S, 4)
out = {"pred_logits": outputs_class[-1], "pred_boxes": outputs_coord[-1]}
if self.aux_loss:
out["aux_outputs"] = self._set_aux_loss(outputs_class, outputs_coord)
return out
@torch.jit.unused
def _set_aux_loss(self, outputs_class, outputs_coord):
# this is a workaround to make torchscript happy, as torchscript
# doesn't support dictionary with non-homogeneous values, such
# as a dict having both a Tensor and a list.
return [
{"pred_logits": a, "pred_boxes": b}
for a, b in zip(outputs_class[:-1], outputs_coord[:-1])
]
class PostProcess(nn.Module):
"""This module converts the model's output into the format expected by the coco api"""
@torch.no_grad()
def forward(self, outputs, target_sizes):
"""Perform the computation
Parameters:
outputs: raw outputs of the model
target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch
For evaluation, this must be the original image size (before any data augmentation)
For visualization, this should be the image size after data augment, but before padding
"""
out_logits, out_bbox = outputs["pred_logits"], outputs["pred_boxes"]
assert len(out_logits) == len(target_sizes)
assert target_sizes.shape[1] == 2
prob = F.softmax(out_logits, -1)
scores, labels = prob[..., :-1].max(-1)
# convert to [x0, y0, x1, y1] format
boxes = box_ops.box_cxcywh_to_xyxy(out_bbox)
# and from relative [0, 1] to absolute [0, height] coordinates
img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)
boxes = boxes * scale_fct[:, None, :]
results = [
{"scores": s, "labels": l, "boxes": b}
for s, l, b in zip(scores, labels, boxes)
]
return results
class MLP(nn.Module):
"""Very simple multi-layer perceptron (also called FFN)"""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(
nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])
)
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
def build(args):
# the `num_classes` naming here is somewhat misleading.
# it indeed corresponds to `max_obj_id + 1`, where max_obj_id
# is the maximum id for a class in your dataset. For example,
# COCO has a max_obj_id of 90, so we pass `num_classes` to be 91.
# As another example, for a dataset that has a single class with id 1,
# you should pass `num_classes` to be 2 (max_obj_id + 1).
# For more details on this, check the following discussion
# https://github.com/facebookresearch/detr/issues/108#issuecomment-650269223
num_classes = 20 if args.dataset_file != "coco" else 91
if args.dataset_file == "coco_panoptic":
# for panoptic, we just add a num_classes that is large enough to hold
# max_obj_id + 1, but the exact value doesn't really matter
num_classes = 250
device = torch.device(args.device)
backbone = build_backbone(args)
transformer = build_transformer(args)
model = DETR(
backbone,
transformer,
num_classes=num_classes,
num_queries=args.num_queries,
aux_loss=args.aux_loss,
)
if args.masks:
model = DETRsegm(model, freeze_detr=(args.frozen_weights is not None))
matcher = build_matcher(args)
weight_dict = {"loss_ce": 1, "loss_bbox": args.bbox_loss_coef}
weight_dict["loss_giou"] = args.giou_loss_coef
if args.masks:
weight_dict["loss_mask"] = args.mask_loss_coef
weight_dict["loss_dice"] = args.dice_loss_coef
# TODO this is a hack
if args.aux_loss:
aux_weight_dict = {}
for i in range(args.dec_layers - 1):
aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()})
weight_dict.update(aux_weight_dict)
losses = ["labels", "boxes", "cardinality"]
if args.masks:
losses += ["masks"]
criterion = SetCriterion(
num_classes,
matcher=matcher,
weight_dict=weight_dict,
eos_coef=args.eos_coef,
losses=losses,
)
criterion.to(device)
postprocessors = {"bbox": PostProcess()}
if args.masks:
postprocessors["segm"] = PostProcessSegm()
if args.dataset_file == "coco_panoptic":
is_thing_map = {i: i <= 90 for i in range(201)}
postprocessors["panoptic"] = PostProcessPanoptic(
is_thing_map, threshold=0.85
)
return model, criterion, postprocessors
| d2go-main | projects_oss/detr/detr/models/detr.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Modules to compute the matching cost and solve the corresponding LSAP.
"""
import torch
from detr.util.box_ops import box_cxcywh_to_xyxy, generalized_box_iou
from scipy.optimize import linear_sum_assignment
from torch import nn
class HungarianMatcher(nn.Module):
"""This class computes an assignment between the targets and the predictions of the network
For efficiency reasons, the targets don't include the no_object. Because of this, in general,
there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,
while the others are un-matched (and thus treated as non-objects).
"""
def __init__(
self,
cost_class: float = 1,
cost_bbox: float = 1,
cost_giou: float = 1,
use_focal_loss=False,
):
"""Creates the matcher
Params:
cost_class: This is the relative weight of the classification error in the matching cost
cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost
cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost
"""
super().__init__()
self.cost_class = cost_class
self.cost_bbox = cost_bbox
self.cost_giou = cost_giou
assert (
cost_class != 0 or cost_bbox != 0 or cost_giou != 0
), "all costs cant be 0"
self.use_focal_loss = use_focal_loss
@torch.no_grad()
def forward(self, outputs, targets):
"""Performs the matching
Params:
outputs: This is a dict that contains at least these entries:
"pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits
"pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates
targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:
"labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth
objects in the target) containing the class labels
"boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates
Returns:
A list of size batch_size, containing tuples of (index_i, index_j) where:
- index_i is the indices of the selected predictions (in order)
- index_j is the indices of the corresponding selected targets (in order)
For each batch element, it holds:
len(index_i) = len(index_j) = min(num_queries, num_target_boxes)
"""
bs, num_queries = outputs["pred_logits"].shape[:2]
# We flatten to compute the cost matrices in a batch
if self.use_focal_loss:
out_prob = outputs["pred_logits"].flatten(0, 1).sigmoid()
else:
out_prob = (
outputs["pred_logits"].flatten(0, 1).softmax(-1)
) # [batch_size * num_queries, num_classes]
out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4]
# Also concat the target labels and boxes
tgt_ids = torch.cat([v["labels"] for v in targets]) # [\sum_b NUM-BOX_b,]
tgt_bbox = torch.cat([v["boxes"] for v in targets]) # [\sum_b NUM-BOX_b, 4]
# Compute the classification cost. Contrary to the loss, we don't use the NLL,
# but approximate it in 1 - proba[target class].
# The 1 is a constant that doesn't change the matching, it can be omitted.
if self.use_focal_loss:
alpha = 0.25
gamma = 2.0
neg_cost_class = (
(1 - alpha) * (out_prob**gamma) * (-(1 - out_prob + 1e-8).log())
)
pos_cost_class = (
alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log())
)
cost_class = pos_cost_class[:, tgt_ids] - neg_cost_class[:, tgt_ids]
else:
cost_class = -out_prob[
:, tgt_ids
] # shape [batch_size * num_queries, \sum_b NUM-BOX_b]
# Compute the L1 cost between boxes
cost_bbox = torch.cdist(
out_bbox, tgt_bbox, p=1
) # shape [batch_size * num_queries,\sum_b NUM-BOX_b]
# Compute the giou cost betwen boxes
# shape [batch_size * num_queries, \sum_b NUM-BOX_b]
cost_giou = -generalized_box_iou(
box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox)
)
# Final cost matrix
C = (
self.cost_bbox * cost_bbox
+ self.cost_class * cost_class
+ self.cost_giou * cost_giou
)
C = C.view(
bs, num_queries, -1
).cpu() # shape [batch_size, num_queries, \sum_b NUM-BOX_b]
sizes = [len(v["boxes"]) for v in targets] # shape [batch_size,]
# each split c shape [batch_size, num_queries, NUM-BOX_b]
indices = [
linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))
]
# A list where each item is [row_indices, col_indices]
return [
(
torch.as_tensor(i, dtype=torch.int64),
torch.as_tensor(j, dtype=torch.int64),
)
for i, j in indices
]
def build_matcher(args):
return HungarianMatcher(
cost_class=args.set_cost_class,
cost_bbox=args.set_cost_bbox,
cost_giou=args.set_cost_giou,
)
| d2go-main | projects_oss/detr/detr/models/matcher.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# ------------------------------------------------------------------------
# Modified from Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# Modified from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# ------------------------------------------------------------------------
"""
Backbone modules.
"""
from collections import OrderedDict
from typing import Dict, List
import torch
import torch.nn.functional as F
import torchvision
from detr.util.misc import is_main_process, NestedTensor
from torch import nn
from torchvision.models._utils import IntermediateLayerGetter
from .position_encoding import build_position_encoding
class FrozenBatchNorm2d(torch.nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters are fixed.
Copy-paste from torchvision.misc.ops with added eps before rqsrt,
without which any other models than torchvision.models.resnet[18,34,50,101]
produce nans.
"""
def __init__(self, n, eps=1e-5):
super(FrozenBatchNorm2d, self).__init__()
self.register_buffer("weight", torch.ones(n))
self.register_buffer("bias", torch.zeros(n))
self.register_buffer("running_mean", torch.zeros(n))
self.register_buffer("running_var", torch.ones(n))
self.eps = eps
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
num_batches_tracked_key = prefix + "num_batches_tracked"
if num_batches_tracked_key in state_dict:
del state_dict[num_batches_tracked_key]
super(FrozenBatchNorm2d, self)._load_from_state_dict(
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
)
def forward(self, x):
# move reshapes to the beginning
# to make it fuser-friendly
w = self.weight.reshape(1, -1, 1, 1)
b = self.bias.reshape(1, -1, 1, 1)
rv = self.running_var.reshape(1, -1, 1, 1)
rm = self.running_mean.reshape(1, -1, 1, 1)
eps = self.eps
scale = w * (rv + eps).rsqrt()
bias = b - rm * scale
return x * scale + bias
class BackboneBase(nn.Module):
def __init__(
self, backbone: nn.Module, train_backbone: bool, return_interm_layers: bool
):
super().__init__()
for name, parameter in backbone.named_parameters():
if (
not train_backbone
or "layer2" not in name
and "layer3" not in name
and "layer4" not in name
):
parameter.requires_grad_(False)
if return_interm_layers:
return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"}
# return_layers = {"layer2": "0", "layer3": "1", "layer4": "2"}
self.strides = [8, 16, 32]
self.num_channels = [512, 1024, 2048]
else:
return_layers = {"layer4": "0"}
self.strides = [32]
self.num_channels = [2048]
self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)
def forward(self, tensor_list: NestedTensor):
xs = self.body(tensor_list.tensors)
out: Dict[str, NestedTensor] = {}
for name, x in xs.items():
m = tensor_list.mask
assert m is not None
mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0]
out[name] = NestedTensor(x, mask)
return out
class Backbone(BackboneBase):
"""ResNet backbone with frozen BatchNorm."""
def __init__(
self,
name: str,
train_backbone: bool,
return_interm_layers: bool,
dilation: bool,
):
norm_layer = FrozenBatchNorm2d
backbone = getattr(torchvision.models, name)(
replace_stride_with_dilation=[False, False, dilation],
pretrained=is_main_process(),
norm_layer=norm_layer,
)
assert name not in ("resnet18", "resnet34"), "number of channels are hard coded"
super().__init__(backbone, train_backbone, return_interm_layers)
if dilation:
self.strides[-1] = self.strides[-1] // 2
class Joiner(nn.Sequential):
def __init__(self, backbone, position_embedding):
super().__init__(backbone, position_embedding)
self.strides = backbone.strides
self.num_channels = backbone.num_channels
def forward(self, tensor_list: NestedTensor):
xs = self[0](tensor_list)
out: List[NestedTensor] = []
pos = []
for name, x in xs.items():
out.append(x)
# position encoding
for x in out:
pos.append(self[1](x).to(x.tensors.dtype))
# out: a list of NestedTensor
# each tensor has shape (B, C, H, W)
# each mask has shape (B, H, W)
# pos: a list of tensors, each has shape (B, C, H, W)
return out, pos
def build_backbone(args):
position_embedding = build_position_encoding(args)
train_backbone = args.lr_backbone > 0
return_interm_layers = args.masks
backbone = Backbone(
args.backbone, train_backbone, return_interm_layers, args.dilation
)
model = Joiner(backbone, position_embedding)
return model
| d2go-main | projects_oss/detr/detr/models/backbone.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Various positional encodings for the transformer.
"""
import math
import torch
from detr.util.misc import NestedTensor
from torch import nn
class PositionEmbeddingSine(nn.Module):
"""
This is a more standard version of the position embedding, very similar to the one
used by the Attention is all you need paper, generalized to work on images.
"""
def __init__(
self,
num_pos_feats=64,
temperature=10000,
normalize=False,
scale=None,
centered=False,
):
super().__init__()
self.num_pos_feats = num_pos_feats
self.temperature = temperature
self.normalize = normalize
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * math.pi
self.scale = scale
self.centered = centered
def forward(self, tensor_list: NestedTensor):
# x shape (B, C, H, W)
x = tensor_list.tensors
# mask shape (B, H, W)
mask = tensor_list.mask
assert mask is not None
not_mask = ~mask
y_embed = not_mask.cumsum(1, dtype=torch.float32) # shape (B, H, W)
x_embed = not_mask.cumsum(2, dtype=torch.float32)
if self.normalize:
eps = 1e-6
if self.centered:
y_embed = (y_embed - 0.5) / (y_embed[:, -1:, :] + eps) * self.scale
x_embed = (x_embed - 0.5) / (x_embed[:, :, -1:] + eps) * self.scale
else:
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
dim_t = self.temperature ** (
2 * (dim_t // 2) / self.num_pos_feats
) # shape (N, )
pos_x = x_embed[:, :, :, None] / dim_t # shape (B, H, W, N)
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack(
(pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4
).flatten(
3
) # shape (B, H, W, N)
pos_y = torch.stack(
(pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4
).flatten(
3
) # shape (B, H, W, N)
pos = torch.cat((pos_y, pos_x), dim=3).permute(
0, 3, 1, 2
) # shape (B, 2*N, H, W)
return pos
class PositionEmbeddingLearned(nn.Module):
"""
Absolute pos embedding, learned.
"""
def __init__(self, num_pos_feats=256):
super().__init__()
self.row_embed = nn.Embedding(50, num_pos_feats)
self.col_embed = nn.Embedding(50, num_pos_feats)
self.reset_parameters()
def reset_parameters(self):
nn.init.uniform_(self.row_embed.weight)
nn.init.uniform_(self.col_embed.weight)
def forward(self, tensor_list: NestedTensor):
x = tensor_list.tensors
h, w = x.shape[-2:]
i = torch.arange(w, device=x.device)
j = torch.arange(h, device=x.device)
x_emb = self.col_embed(i)
y_emb = self.row_embed(j)
pos = (
torch.cat(
[
x_emb.unsqueeze(0).repeat(h, 1, 1),
y_emb.unsqueeze(1).repeat(1, w, 1),
],
dim=-1,
)
.permute(2, 0, 1)
.unsqueeze(0)
.repeat(x.shape[0], 1, 1, 1)
)
return pos
def build_position_encoding(args):
N_steps = args.hidden_dim // 2
if args.position_embedding in ("v2", "sine"):
# TODO find a better way of exposing other arguments
position_embedding = PositionEmbeddingSine(N_steps, normalize=True)
elif args.position_embedding in ("v3", "learned"):
position_embedding = PositionEmbeddingLearned(N_steps)
else:
raise ValueError(f"not supported {args.position_embedding}")
return position_embedding
| d2go-main | projects_oss/detr/detr/models/position_encoding.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .detr import build
def build_model(args):
return build(args)
| d2go-main | projects_oss/detr/detr/models/__init__.py |
import copy
import torch
import torch.nn.functional as F
from torch import nn
from ..util import box_ops
from ..util.misc import (
accuracy,
get_world_size,
interpolate,
is_dist_avail_and_initialized,
nested_tensor_from_tensor_list,
)
from .segmentation import dice_loss, sigmoid_focal_loss
def _reduce_num_boxes(targets, device):
# Compute the average number of target boxes accross all nodes, for normalization purposes
num_boxes = sum(len(t["labels"]) for t in targets)
num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=device)
if is_dist_avail_and_initialized():
torch.distributed.all_reduce(num_boxes)
num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item()
return num_boxes
class SetCriterion(nn.Module):
"""This class computes the loss for DETR.
The process happens in two steps:
1) we compute hungarian assignment between ground truth boxes and the outputs of the model
2) we supervise each pair of matched ground-truth / prediction (supervise class and box)
"""
def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses):
"""Create the criterion.
Parameters:
num_classes: number of object categories, omitting the special no-object category
matcher: module able to compute a matching between targets and proposals
weight_dict: dict containing as key the names of the losses and as values their relative weight.
eos_coef: relative classification weight applied to the no-object category
losses: list of all the losses to be applied. See get_loss for list of available losses.
"""
super().__init__()
self.num_classes = num_classes
self.matcher = matcher
self.weight_dict = weight_dict
self.eos_coef = eos_coef
self.losses = losses
empty_weight = torch.ones(self.num_classes + 1)
empty_weight[-1] = self.eos_coef
self.register_buffer("empty_weight", empty_weight)
def loss_labels(self, outputs, targets, indices, num_boxes, log=True):
"""Classification loss (NLL)
targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]
"""
assert "pred_logits" in outputs
# shape (batch_size, num_queries, NUM_CLASS + 1)
src_logits = outputs["pred_logits"]
# idx = (batch_idx, src_idx)
# batch_idx shape [\sum_b num_match_b]
# src_idx shape [\sum_b num_match_b]
idx = self._get_src_permutation_idx(indices)
# targets: List[Dict[str, torch.Tensor]]. Keys
# "labels": [NUM_BOX,]
# "boxes": [NUM_BOX, 4]
# target_classes_o shape [batch_size * num_match]
target_classes_o = torch.cat(
[t["labels"][J] for t, (_, J) in zip(targets, indices)]
)
# shape (batch_size, num_queries)
target_classes = torch.full(
src_logits.shape[:2],
self.num_classes,
dtype=torch.int64,
device=src_logits.device,
)
target_classes[idx] = target_classes_o
loss_ce = F.cross_entropy(
src_logits.transpose(1, 2), target_classes, self.empty_weight
)
losses = {"loss_ce": loss_ce}
if log:
# TODO this should probably be a separate loss, not hacked in this one here
losses["class_error"] = 100 - accuracy(src_logits[idx], target_classes_o)[0]
return losses
def forground_background_loss_labels(
self, outputs, targets, indices, num_boxes, log=True
):
assert "pred_logits" in outputs
# shape (batch_size, num_queries, 1)
src_logits = outputs["pred_logits"]
batch_size, num_queries = src_logits.shape[:2]
assert src_logits.shape[2] == 1, f"expect 1 class {src_logits.shape[2]}"
idx = self._get_src_permutation_idx(indices)
target_classes_o = torch.cat(
[t["labels"][J] for t, (_, J) in zip(targets, indices)]
)
target_classes = torch.full(
src_logits.shape[:2],
1,
dtype=torch.int64,
device=src_logits.device,
)
target_classes[idx] = target_classes_o
target_classes_onehot = torch.zeros(
[src_logits.shape[0], src_logits.shape[1], 2],
dtype=src_logits.dtype,
layout=src_logits.layout,
device=src_logits.device,
)
target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1)
target_classes_onehot = target_classes_onehot[:, :, :-1]
loss_ce = (
sigmoid_focal_loss(
src_logits,
target_classes_onehot,
num_boxes,
alpha=self.focal_alpha,
gamma=2,
)
* src_logits.shape[1]
)
return {"loss_ce": loss_ce}
@torch.no_grad()
def loss_cardinality(self, outputs, targets, indices, num_boxes):
"""Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes
This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients
"""
pred_logits = outputs["pred_logits"]
device = pred_logits.device
tgt_lengths = torch.as_tensor(
[len(v["labels"]) for v in targets], device=device
)
# Count the number of predictions that are NOT "no-object" (which is the last class)
card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1)
card_err = F.l1_loss(card_pred.float(), tgt_lengths.float())
losses = {"cardinality_error": card_err}
return losses
def loss_boxes(self, outputs, targets, indices, num_boxes):
"""Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss
targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]
The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size.
"""
assert "pred_boxes" in outputs
idx = self._get_src_permutation_idx(indices)
# shape [\sum_b num_matches_b, 4]
src_boxes = outputs["pred_boxes"][idx]
# shape [\sum_b num_matches_b, 4]
target_boxes = torch.cat(
[t["boxes"][i] for t, (_, i) in zip(targets, indices)], dim=0
)
loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction="none")
losses = {}
losses["loss_bbox"] = loss_bbox.sum() / num_boxes
loss_giou = 1 - torch.diag(
box_ops.generalized_box_iou(
box_ops.box_cxcywh_to_xyxy(src_boxes),
box_ops.box_cxcywh_to_xyxy(target_boxes),
)
)
losses["loss_giou"] = loss_giou.sum() / num_boxes
return losses
def loss_masks(self, outputs, targets, indices, num_boxes):
"""Compute the losses related to the masks: the focal loss and the dice loss.
targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w]
"""
assert "pred_masks" in outputs
src_idx = self._get_src_permutation_idx(indices)
tgt_idx = self._get_tgt_permutation_idx(indices)
src_masks = outputs["pred_masks"]
src_masks = src_masks[src_idx]
masks = [t["masks"] for t in targets]
# TODO use valid to mask invalid areas due to padding in loss
target_masks, valid = nested_tensor_from_tensor_list(masks).decompose()
target_masks = target_masks.to(src_masks)
target_masks = target_masks[tgt_idx]
# upsample predictions to the target size
src_masks = interpolate(
src_masks[:, None],
size=target_masks.shape[-2:],
mode="bilinear",
align_corners=False,
)
src_masks = src_masks[:, 0].flatten(1)
target_masks = target_masks.flatten(1)
target_masks = target_masks.view(src_masks.shape)
losses = {
"loss_mask": sigmoid_focal_loss(src_masks, target_masks, num_boxes),
"loss_dice": dice_loss(src_masks, target_masks, num_boxes),
}
return losses
def _get_src_permutation_idx(self, indices):
# permute predictions following indices
batch_idx = torch.cat(
[torch.full_like(src, i) for i, (src, _) in enumerate(indices)]
) # shape [\sum_b num_match_b]
src_idx = torch.cat([src for (src, _) in indices]) # shape [\sum_b num_match_b]
return batch_idx, src_idx
def _get_tgt_permutation_idx(self, indices):
# permute targets following indices
batch_idx = torch.cat(
[torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)]
) # shape [\sum_b num_match_b]
tgt_idx = torch.cat([tgt for (_, tgt) in indices]) # shape [\sum_b num_match_b]
return batch_idx, tgt_idx
def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs):
loss_map = {
"labels": self.loss_labels,
"cardinality": self.loss_cardinality,
"boxes": self.loss_boxes,
"masks": self.loss_masks,
}
assert loss in loss_map, f"do you really want to compute {loss} loss?"
return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs)
def get_foreground_background_loss(
self, loss, outputs, targets, indices, num_boxes, **kwargs
):
loss_map = {
"labels": self.forground_background_loss_labels,
"cardinality": self.loss_cardinality,
"boxes": self.loss_boxes,
}
assert loss in loss_map, f"do you really want to compute {loss} loss?"
return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs)
def _forward(self, outputs, outputs_without_aux, targets):
# Retrieve the matching between the outputs of the last layer and the targets
# A list where each item is [row_indices, col_indices]
indices = self.matcher(outputs_without_aux, targets)
num_boxes = _reduce_num_boxes(targets, next(iter(outputs.values())).device)
# Compute all the requested losses
losses = {}
for loss in self.losses:
losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))
# In case of auxiliary losses, we repeat this process with the output of each intermediate layer.
if "aux_outputs" in outputs:
for i, aux_outputs in enumerate(outputs["aux_outputs"]):
indices = self.matcher(aux_outputs, targets)
for loss in self.losses:
if loss == "masks":
# Intermediate masks losses are too costly to compute, we ignore them.
continue
kwargs = {}
if loss == "labels":
# Logging is enabled only for the last layer
kwargs = {"log": False}
l_dict = self.get_loss(
loss, aux_outputs, targets, indices, num_boxes, **kwargs
)
l_dict = {k + f"_{i}": v for k, v in l_dict.items()}
losses.update(l_dict)
return losses
def forward(self, outputs, targets):
"""This performs the loss computation.
Parameters:
outputs: dict of tensors, see the output specification of the model for the format
targets: list of dicts, such that len(targets) == batch_size.
The expected keys in each dict depends on the losses applied, see each loss' doc
"""
# "pred_logits" shape (B, S, NUM_CLASS + 1)
# "pred_boxes" shape (B, S, 4)
outputs_without_aux = {k: v for k, v in outputs.items() if k != "aux_outputs"}
return self._forward(outputs, outputs_without_aux, targets)
class FocalLossSetCriterion(SetCriterion):
"""This class computes the loss for DETR.
The process happens in two steps:
1) we compute hungarian assignment between ground truth boxes and the outputs of the model
2) we supervise each pair of matched ground-truth / prediction (supervise class and box)
"""
def __init__(self, num_classes, matcher, weight_dict, losses, focal_alpha=0.25):
"""Create the criterion.
Parameters:
num_classes: number of object categories, omitting the special no-object category
matcher: module able to compute a matching between targets and proposals
weight_dict: dict containing as key the names of the losses and as values their relative weight.
losses: list of all the losses to be applied. See get_loss for list of available losses.
focal_alpha: alpha in Focal Loss
"""
super().__init__(num_classes, matcher, weight_dict, 0, losses)
self.focal_alpha = focal_alpha
def loss_labels(self, outputs, targets, indices, num_boxes, log=True):
"""Classification loss (NLL)
targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]
"""
assert "pred_logits" in outputs
# shape (batch_size, num_queries, num_classes)
src_logits = outputs["pred_logits"]
idx = self._get_src_permutation_idx(indices)
target_classes_o = torch.cat(
[t["labels"][J] for t, (_, J) in zip(targets, indices)]
)
target_classes = torch.full(
src_logits.shape[:2],
self.num_classes,
dtype=torch.int64,
device=src_logits.device,
)
target_classes[idx] = target_classes_o
target_classes_onehot = torch.zeros(
[src_logits.shape[0], src_logits.shape[1], src_logits.shape[2] + 1],
dtype=src_logits.dtype,
layout=src_logits.layout,
device=src_logits.device,
)
target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1)
target_classes_onehot = target_classes_onehot[:, :, :-1]
loss_ce = (
sigmoid_focal_loss(
src_logits,
target_classes_onehot,
num_boxes,
alpha=self.focal_alpha,
gamma=2,
)
* src_logits.shape[1]
)
losses = {"loss_ce": loss_ce}
if log:
# TODO this should probably be a separate loss, not hacked in this one here
losses["class_error"] = 100 - accuracy(src_logits[idx], target_classes_o)[0]
return losses
def forward(self, outputs, targets):
"""This performs the loss computation.
Parameters:
outputs: dict of tensors, see the output specification of the model for the format
targets: list of dicts, such that len(targets) == batch_size.
The expected keys in each dict depends on the losses applied, see each loss' doc
"""
outputs_without_aux = {
k: v
for k, v in outputs.items()
if k != "aux_outputs" and k != "enc_outputs"
}
losses = self._forward(outputs, outputs_without_aux, targets)
if "enc_outputs" in outputs:
num_boxes = _reduce_num_boxes(targets, next(iter(outputs.values())).device)
enc_outputs = outputs["enc_outputs"]
bin_targets = copy.deepcopy(targets)
for bt in bin_targets:
bt["labels"] = torch.zeros_like(bt["labels"])
indices = self.matcher(enc_outputs, bin_targets)
for loss in self.losses:
if loss == "masks":
# Intermediate masks losses are too costly to compute, we ignore them.
continue
kwargs = {}
if loss == "labels":
# Logging is enabled only for the last layer
kwargs["log"] = False
l_dict = self.get_foreground_background_loss(
loss, enc_outputs, bin_targets, indices, num_boxes, **kwargs
)
l_dict = {k + "_enc": v for k, v in l_dict.items()}
losses.update(l_dict)
return losses
| d2go-main | projects_oss/detr/detr/models/setcriterion.py |
# ------------------------------------------------------------------------
# Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# Modified from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# ------------------------------------------------------------------------
"""
Deformable DETR model and criterion classes.
"""
import copy
import math
import torch
import torch.nn.functional as F
from detectron2.config import configurable
from torch import nn
from ..util import box_ops
from ..util.misc import (
accuracy,
get_world_size,
interpolate,
is_dist_avail_and_initialized,
nested_tensor_from_tensor_list,
NestedTensor,
)
from .backbone import build_backbone
from .build import build_detr_backbone, DETR_MODEL_REGISTRY
from .deformable_transformer import DeformableTransformer
from .matcher import build_matcher
from .segmentation import (
DETRsegm,
dice_loss,
PostProcessPanoptic,
PostProcessSegm,
sigmoid_focal_loss,
)
from .setcriterion import FocalLossSetCriterion
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
@DETR_MODEL_REGISTRY.register()
class DeformableDETR(nn.Module):
"""This is the Deformable DETR module that performs object detection"""
@configurable
def __init__(
self,
backbone,
transformer,
num_classes,
num_queries,
num_feature_levels,
aux_loss=True,
with_box_refine=False,
two_stage=False,
bbox_embed_num_layers=3,
):
"""Initializes the model.
Parameters:
backbone: torch module of the backbone to be used. See backbone.py
transformer: torch module of the transformer architecture. See transformer.py
num_classes: number of object classes
num_queries: number of object queries, ie detection slot. This is the maximal number of objects
DETR can detect in a single image. For COCO, we recommend 100 queries.
aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used.
with_box_refine: iterative bounding box refinement
two_stage: two-stage Deformable DETR
bbox_embed_num_layers: number of FC layers in bbox_embed MLP
"""
super().__init__()
self.num_queries = num_queries
self.transformer = transformer
hidden_dim = transformer.d_model
# We will use sigmoid activation and focal loss
self.class_embed = nn.Linear(hidden_dim, num_classes)
self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, bbox_embed_num_layers)
self.num_feature_levels = num_feature_levels
if not two_stage:
self.query_embed = nn.Embedding(num_queries, hidden_dim * 2)
if num_feature_levels > 1:
num_backbone_outs = len(backbone.strides)
input_proj_list = []
for _ in range(num_backbone_outs):
in_channels = backbone.num_channels[_]
input_proj_list.append(
nn.Sequential(
nn.Conv2d(in_channels, hidden_dim, kernel_size=1),
nn.GroupNorm(32, hidden_dim),
)
)
for _ in range(num_feature_levels - num_backbone_outs):
input_proj_list.append(
nn.Sequential(
nn.Conv2d(
in_channels, hidden_dim, kernel_size=3, stride=2, padding=1
),
nn.GroupNorm(32, hidden_dim),
)
)
in_channels = hidden_dim
self.input_proj = nn.ModuleList(input_proj_list)
else:
self.input_proj = nn.ModuleList(
[
nn.Sequential(
nn.Conv2d(backbone.num_channels[0], hidden_dim, kernel_size=1),
nn.GroupNorm(32, hidden_dim),
)
]
)
self.backbone = backbone
self.aux_loss = aux_loss
self.with_box_refine = with_box_refine
self.two_stage = two_stage
prior_prob = 0.01
bias_value = -math.log((1 - prior_prob) / prior_prob)
self.class_embed.bias.data = torch.ones(num_classes) * bias_value
for proj in self.input_proj:
nn.init.xavier_uniform_(proj[0].weight, gain=1)
nn.init.constant_(proj[0].bias, 0)
num_pred = transformer.decoder.num_layers
if with_box_refine:
self.class_embed = _get_clones(self.class_embed, num_pred)
self.bbox_embed = _get_clones(self.bbox_embed, num_pred)
# initialize the box scale height/width at the 1st scale to be 0.1
nn.init.constant_(self.bbox_embed[0].layers[-1].bias.data[2:], -2.0)
# hack implementation for iterative bounding box refinement
self.transformer.decoder.bbox_embed = self.bbox_embed
else:
nn.init.constant_(self.bbox_embed.layers[-1].bias.data[2:], -2.0)
self.class_embed = nn.ModuleList(
[self.class_embed for _ in range(num_pred)]
)
self.bbox_embed = nn.ModuleList([self.bbox_embed for _ in range(num_pred)])
self.transformer.decoder.bbox_embed = None
if two_stage:
# hack implementation for two-stage
# We only predict foreground/background at the output of encoder
class_embed = nn.Linear(hidden_dim, 1)
class_embed.bias.data = torch.ones(1) * bias_value
self.transformer.encoder.class_embed = class_embed
for box_embed in self.bbox_embed:
nn.init.constant_(box_embed.layers[-1].bias.data[2:], 0.0)
self.transformer.encoder.bbox_embed = MLP(
hidden_dim, hidden_dim, 4, bbox_embed_num_layers
)
@classmethod
def from_config(cls, cfg):
num_classes = cfg.MODEL.DETR.NUM_CLASSES
hidden_dim = cfg.MODEL.DETR.HIDDEN_DIM
num_queries = cfg.MODEL.DETR.NUM_OBJECT_QUERIES
# Transformer parameters:
nheads = cfg.MODEL.DETR.NHEADS
dropout = cfg.MODEL.DETR.DROPOUT
dim_feedforward = cfg.MODEL.DETR.DIM_FEEDFORWARD
enc_layers = cfg.MODEL.DETR.ENC_LAYERS
dec_layers = cfg.MODEL.DETR.DEC_LAYERS
pre_norm = cfg.MODEL.DETR.PRE_NORM
# Loss parameters:
deep_supervision = cfg.MODEL.DETR.DEEP_SUPERVISION
num_feature_levels = cfg.MODEL.DETR.NUM_FEATURE_LEVELS
use_focal_loss = cfg.MODEL.DETR.USE_FOCAL_LOSS
backbone = build_detr_backbone(cfg)
transformer = DeformableTransformer(
d_model=hidden_dim,
nhead=nheads,
num_encoder_layers=enc_layers,
num_decoder_layers=dec_layers,
dim_feedforward=dim_feedforward,
dropout=dropout,
activation="relu",
return_intermediate_dec=True,
num_feature_levels=num_feature_levels,
dec_n_points=4,
enc_n_points=4,
two_stage=cfg.MODEL.DETR.TWO_STAGE,
two_stage_num_proposals=num_queries,
)
return {
"backbone": backbone,
"transformer": transformer,
"num_classes": num_classes,
"num_queries": num_queries,
"num_feature_levels": num_feature_levels,
"aux_loss": deep_supervision,
"with_box_refine": cfg.MODEL.DETR.WITH_BOX_REFINE,
"two_stage": cfg.MODEL.DETR.TWO_STAGE,
}
def forward(self, samples: NestedTensor):
"""The forward expects a NestedTensor, which consists of:
- samples.tensor: batched images, of shape [batch_size x 3 x H x W]
- samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels
It returns a dict with the following elements:
- "pred_logits": the classification logits (including no-object) for all queries.
Shape= [batch_size x num_queries x (num_classes + 1)]
- "pred_boxes": The normalized boxes coordinates for all queries, represented as
(center_x, center_y, height, width). These values are normalized in [0, 1],
relative to the size of each individual image (disregarding possible padding).
See PostProcess for information on how to retrieve the unnormalized bounding box.
- "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of
dictionnaries containing the two above keys for each decoder layer.
"""
if isinstance(samples, (list, torch.Tensor)):
samples = nested_tensor_from_tensor_list(samples)
# features is a list of num_levels NestedTensor.
# pos is a list of num_levels tensors. Each one has shape (B, H_l, W_l).
features, pos = self.backbone(samples)
# srcs is a list of num_levels tensor. Each one has shape (B, C, H_l, W_l)
srcs = []
# masks is a list of num_levels tensor. Each one has shape (B, H_l, W_l)
masks = []
for l, feat in enumerate(features):
# src shape: (N, C, H_l, W_l)
# mask shape: (N, H_l, W_l)
src, mask = feat.decompose()
srcs.append(self.input_proj[l](src))
masks.append(mask)
assert mask is not None
if self.num_feature_levels > len(srcs):
N, C, H, W = samples.tensor.size()
sample_mask = torch.ones((N, H, W), dtype=torch.bool, device=src.device)
for idx in range(N):
image_size = samples.image_sizes[idx]
h, w = image_size
sample_mask[idx, :h, :w] = False
# sample_mask shape (1, N, H, W)
sample_mask = sample_mask[None].float()
_len_srcs = len(srcs)
for l in range(_len_srcs, self.num_feature_levels):
if l == _len_srcs:
src = self.input_proj[l](features[-1].tensors)
else:
src = self.input_proj[l](srcs[-1])
b, _, h, w = src.size()
# mask shape (batch_size, h_l, w_l)
mask = F.interpolate(sample_mask, size=src.shape[-2:]).to(torch.bool)[0]
pos_l = self.backbone[1](NestedTensor(src, mask)).to(src.dtype)
srcs.append(src)
masks.append(mask)
pos.append(pos_l)
query_embeds = None
if not self.two_stage:
# shape (num_queries, hidden_dim*2)
query_embeds = self.query_embed.weight
# hs shape: (num_layers, batch_size, num_queries, c)
# init_reference shape: (batch_size, num_queries, 2)
# inter_references shape: (num_layers, bs, num_queries, num_levels, 2)
(
hs,
init_reference,
inter_references,
enc_outputs_class,
enc_outputs_coord_unact,
) = self.transformer(srcs, masks, pos, query_embeds)
outputs_classes = []
outputs_coords = []
for lvl in range(hs.shape[0]):
# reference shape: (num_queries, 2)
if lvl == 0:
reference = init_reference
else:
reference = inter_references[lvl - 1]
# shape (batch_size, num_queries, num_classes)
outputs_class = self.class_embed[lvl](hs[lvl])
# shape (batch_size, num_queries, 4). 4-tuple (cx, cy, w, h)
tmp = self.bbox_embed[lvl](hs[lvl])
if reference.shape[-1] == 4:
tmp += reference
else:
assert reference.shape[-1] == 2
tmp[..., :2] += reference
# shape (batch_size, num_queries, 4). 4-tuple (cx, cy, w, h)
outputs_coord = tmp.sigmoid()
outputs_classes.append(outputs_class)
outputs_coords.append(outputs_coord)
# shape (num_levels, batch_size, num_queries, num_classes)
outputs_class = torch.stack(outputs_classes)
# shape (num_levels, batch_size, num_queries, 4)
outputs_coord = torch.stack(outputs_coords)
out = {"pred_logits": outputs_class[-1], "pred_boxes": outputs_coord[-1]}
if self.aux_loss:
out["aux_outputs"] = self._set_aux_loss(outputs_class, outputs_coord)
if self.two_stage:
enc_outputs_coord = enc_outputs_coord_unact.sigmoid()
out["enc_outputs"] = {
"pred_logits": enc_outputs_class,
"pred_boxes": enc_outputs_coord,
}
return out
@torch.jit.unused
def _set_aux_loss(self, outputs_class, outputs_coord):
# this is a workaround to make torchscript happy, as torchscript
# doesn't support dictionary with non-homogeneous values, such
# as a dict having both a Tensor and a list.
return [
{"pred_logits": a, "pred_boxes": b}
for a, b in zip(outputs_class[:-1], outputs_coord[:-1])
]
class PostProcess(nn.Module):
"""This module converts the model's output into the format expected by the coco api"""
@torch.no_grad()
def forward(self, outputs, target_sizes):
"""Perform the computation
Parameters:
outputs: raw outputs of the model
target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch
For evaluation, this must be the original image size (before any data augmentation)
For visualization, this should be the image size after data augment, but before padding
"""
out_logits, out_bbox = outputs["pred_logits"], outputs["pred_boxes"]
assert len(out_logits) == len(target_sizes)
assert target_sizes.shape[1] == 2
prob = out_logits.sigmoid()
topk_values, topk_indexes = torch.topk(
prob.view(out_logits.shape[0], -1), 100, dim=1
)
scores = topk_values
topk_boxes = topk_indexes // out_logits.shape[2]
labels = topk_indexes % out_logits.shape[2]
boxes = box_ops.box_cxcywh_to_xyxy(out_bbox)
boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))
# and from relative [0, 1] to absolute [0, height] coordinates
img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)
boxes = boxes * scale_fct[:, None, :]
results = [
{"scores": s, "labels": l, "boxes": b}
for s, l, b in zip(scores, labels, boxes)
]
return results
class MLP(nn.Module):
"""Very simple multi-layer perceptron (also called FFN)"""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(
nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])
)
# initialize FC weights and bias
for i, layer in enumerate(self.layers):
if i < num_layers - 1:
nn.init.kaiming_uniform_(layer.weight, a=1)
else:
nn.init.constant_(layer.weight, 0)
nn.init.constant_(layer.bias, 0)
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
| d2go-main | projects_oss/detr/detr/models/deformable_detr.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
DETR Transformer class.
Copy-paste from torch.nn.Transformer with modifications:
* positional encodings are passed in MHattention
* extra LN at the end of encoder is removed
* decoder returns a stack of activations from all decoding layers
"""
import copy
from typing import Optional
import torch
import torch.nn.functional as F
from torch import nn, Tensor
class Transformer(nn.Module):
def __init__(
self,
d_model=512,
nhead=8,
num_encoder_layers=6,
num_decoder_layers=6,
dim_feedforward=2048,
dropout=0.1,
activation="relu",
normalize_before=False,
return_intermediate_dec=False,
learnable_tgt=False,
):
super().__init__()
encoder_layer = TransformerEncoderLayer(
d_model, nhead, dim_feedforward, dropout, activation, normalize_before
)
encoder_norm = nn.LayerNorm(d_model) if normalize_before else None
self.encoder = TransformerEncoder(
encoder_layer, num_encoder_layers, encoder_norm
)
decoder_layer = TransformerDecoderLayer(
d_model, nhead, dim_feedforward, dropout, activation, normalize_before
)
decoder_norm = nn.LayerNorm(d_model)
self.decoder = TransformerDecoder(
decoder_layer,
num_decoder_layers,
decoder_norm,
return_intermediate=return_intermediate_dec,
)
self._reset_parameters()
self.d_model = d_model
self.nhead = nhead
self.learnable_tgt = learnable_tgt
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, src, mask, query_embed, pos_embed):
# src shape (B, C, H, W)
# mask shape (B, H, W)
# query_embed shape (M, C)
# pos_embed shape (B, C, H, W)
# flatten NxCxHxW to HWxNxC
bs, c, h, w = src.shape
src = src.flatten(2).permute(2, 0, 1) # shape (L, B, C)
pos_embed = pos_embed.flatten(2).permute(2, 0, 1) # shape (L, B, C)
mask = mask.flatten(1) # shape (B, HxW)
if self.learnable_tgt:
query_embed, tgt = torch.split(query_embed, c, dim=1)
query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1) # shape (M, B, C)
tgt = tgt.unsqueeze(1).repeat(1, bs, 1) # shape (M, B, C)
else:
query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1) # shape (M, B, C)
tgt = torch.zeros_like(query_embed)
# memory shape (L, B, C)
memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed)
# hs shape (NUM_LEVEL, S, B, C)
hs = self.decoder(
tgt,
memory,
memory_key_padding_mask=mask,
pos=pos_embed,
query_pos=query_embed,
)
# return shape (NUM_LEVEL, B, S, C) and (B, C, H, W)
return hs.transpose(1, 2), memory.permute(1, 2, 0).view(bs, c, h, w)
class TransformerEncoder(nn.Module):
def __init__(self, encoder_layer, num_layers, norm=None):
super().__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
def forward(
self,
src,
mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
):
output = src
# mask, shape (L, L)
# src_key_padding_mask, shape (B, L)
for layer in self.layers:
output = layer(
output,
src_mask=mask,
src_key_padding_mask=src_key_padding_mask,
pos=pos,
)
if self.norm is not None:
output = self.norm(output)
return output
class TransformerDecoder(nn.Module):
def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False):
super().__init__()
self.layers = _get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
self.return_intermediate = return_intermediate
def forward(
self,
tgt,
memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
output = tgt
intermediate = []
# tgt shape (L, B, C)
# tgt_mask shape (L, L)
# tgt_key_padding_mask shape (B, L)
# memory_mask shape (L, S)
# memory_key_padding_mask shape (B, S)
for layer in self.layers:
output = layer(
output,
memory,
tgt_mask=tgt_mask,
memory_mask=memory_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask,
pos=pos,
query_pos=query_pos,
)
if self.return_intermediate:
intermediate.append(self.norm(output))
if self.norm is not None:
output = self.norm(output)
if self.return_intermediate:
intermediate.pop()
intermediate.append(output)
if self.return_intermediate:
return torch.stack(intermediate)
# return shape (NUM_LAYER, L, B, C)
return output.unsqueeze(0)
class TransformerEncoderLayer(nn.Module):
def __init__(
self,
d_model,
nhead,
dim_feedforward=2048,
dropout=0.1,
activation="relu",
normalize_before=False,
):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
assert not normalize_before, "normalize_before is not supported"
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward(
self,
src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
):
q = k = self.with_pos_embed(src, pos) # shape (L, B, D)
# src mask, shape (L, L)
# src_key_padding_mask: shape (B, L)
src2 = self.self_attn(
q, k, src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask
)[0]
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
class TransformerDecoderLayer(nn.Module):
def __init__(
self,
d_model,
nhead,
dim_feedforward=2048,
dropout=0.1,
activation="relu",
normalize_before=False,
):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
assert not normalize_before, "normalize_before is not supported"
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward(
self,
tgt,
memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
# tgt shape (L, B, C)
# tgt_mask shape (L, L)
# tgt_key_padding_mask shape (B, L)
q = k = self.with_pos_embed(tgt, query_pos)
tgt2 = self.self_attn(
q, k, tgt, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask
)[0]
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
# memory_mask shape (L, S)
# memory_key_padding_mask shape (B, S)
# query_pos shape (L, B, C)
tgt2 = self.multihead_attn(
self.with_pos_embed(tgt, query_pos),
self.with_pos_embed(memory, pos),
memory,
attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask,
)[0]
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
# return tgt shape (L, B, C)
return tgt
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def build_transformer(args):
return Transformer(
d_model=args.hidden_dim,
dropout=args.dropout,
nhead=args.nheads,
dim_feedforward=args.dim_feedforward,
num_encoder_layers=args.enc_layers,
num_decoder_layers=args.dec_layers,
normalize_before=args.pre_norm,
return_intermediate_dec=True,
)
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(f"activation should be relu/gelu, not {activation}.")
| d2go-main | projects_oss/detr/detr/models/transformer.py |
# ------------------------------------------------------------------------
# Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# Modified from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# ------------------------------------------------------------------------
import copy
import math
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn.init import constant_, normal_, xavier_uniform_
from ..modules import MSDeformAttn
from ..util.misc import inverse_sigmoid
# we do not use float("-inf") to avoid potential NaN during training
NEG_INF = -10000.0
class DeformableTransformer(nn.Module):
def __init__(
self,
d_model=256,
nhead=8,
num_encoder_layers=6,
num_decoder_layers=6,
dim_feedforward=1024,
dropout=0.1,
activation="relu",
return_intermediate_dec=False,
num_feature_levels=4,
dec_n_points=4,
enc_n_points=4,
two_stage=False,
two_stage_num_proposals=300,
decoder_block_grad=True,
):
super().__init__()
self.d_model = d_model
self.nhead = nhead
self.two_stage = two_stage
self.two_stage_num_proposals = two_stage_num_proposals
encoder_layer = DeformableTransformerEncoderLayer(
d_model,
dim_feedforward,
dropout,
activation,
num_feature_levels,
nhead,
enc_n_points,
)
self.encoder = DeformableTransformerEncoder(encoder_layer, num_encoder_layers)
decoder_layer = DeformableTransformerDecoderLayer(
d_model,
dim_feedforward,
dropout,
activation,
num_feature_levels,
nhead,
dec_n_points,
)
self.decoder = DeformableTransformerDecoder(
decoder_layer,
num_decoder_layers,
return_intermediate_dec,
decoder_block_grad,
)
self.level_embed = nn.Parameter(torch.Tensor(num_feature_levels, d_model))
if two_stage:
self.enc_output = nn.Linear(d_model, d_model)
self.enc_output_norm = nn.LayerNorm(d_model)
self.pos_trans = nn.Linear(d_model * 2, d_model * 2)
self.pos_trans_norm = nn.LayerNorm(d_model * 2)
else:
self.reference_points = nn.Linear(d_model, 2)
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
xavier_uniform_(p)
for m in self.modules():
if isinstance(m, MSDeformAttn):
m._reset_parameters()
if not self.two_stage:
xavier_uniform_(self.reference_points.weight, gain=1.0)
constant_(self.reference_points.bias, 0.0)
normal_(self.level_embed)
def get_proposal_pos_embed(self, proposals):
"""
Args
proposals: shape (bs, top_k, 4). Last dimension of size 4 denotes (cx, cy, w, h)
"""
num_pos_feats = 128
temperature = 10000
scale = 2 * math.pi
# shape (num_pos_feats)
dim_t = torch.arange(
num_pos_feats, dtype=torch.float32, device=proposals.device
)
dim_t = temperature ** (2 * (dim_t // 2) / num_pos_feats)
# N, L, 4
proposals = proposals.sigmoid() * scale
# pos shape: (bs, top_k, 4, num_pos_feats)
pos = proposals[:, :, :, None] / dim_t
# pos shape: (bs, top_k, 4, num_pos_feats/2, 2) -> (bs, top_k, 4 * num_pos_feats)
pos = torch.stack(
(pos[:, :, :, 0::2].sin(), pos[:, :, :, 1::2].cos()), dim=4
).flatten(2)
# pos shape: (bs, top_k, 4 * num_pos_feats) = (bs, top_k, 512)
return pos
def gen_encoder_output_proposals(self, memory, memory_padding_mask, spatial_shapes):
"""
Args:
memory: shape (bs, K, C) where K = \sum_l H_l * w_l
memory_padding_mask: shape (bs, K)
spatial_shapes: shape (num_levels, 2)
"""
N_, S_, C_ = memory.shape
proposals = []
_cur = 0
base_object_scale = 0.05
for lvl, (H_, W_) in enumerate(spatial_shapes):
# shape (bs, H_l * W_l)
mask_flatten_ = memory_padding_mask[:, _cur : (_cur + H_ * W_)].view(
N_, H_, W_, 1
)
# shape (bs, )
valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1)
# shape (bs, )
valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1)
# grid_y, grid_x shape (H_l, W_l)
grid_y, grid_x = torch.meshgrid(
torch.linspace(
0, H_ - 1, H_, dtype=torch.float32, device=memory.device
),
torch.linspace(
0, W_ - 1, W_, dtype=torch.float32, device=memory.device
),
)
# grid shape (H_l, W_l, 2)
grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1)
# scale shape (bs, 1, 1, 2)
scale = torch.cat([valid_W.unsqueeze(-1), valid_H.unsqueeze(-1)], 1).view(
N_, 1, 1, 2
)
# grid shape (bs, H_l, W_l, 2). Value could be > 1
grid = (grid.unsqueeze(0).expand(N_, -1, -1, -1) + 0.5) / scale
# wh shape (bs, H_l, W_l, 2)
wh = torch.ones_like(grid) * base_object_scale * (2.0**lvl)
# proposal shape (bs, H_l * W_l, 4)
proposal = torch.cat((grid, wh), -1).view(N_, -1, 4)
proposals.append(proposal)
_cur += H_ * W_
# shape (bs, K, 4) where K = \sum_l H_l * W_l
output_proposals = torch.cat(proposals, 1)
# shape (bs, K, 1)
output_proposals_valid = (
(output_proposals > 0.01) & (output_proposals < 0.99)
).all(-1, keepdim=True)
output_proposals = inverse_sigmoid(output_proposals)
# memory: shape (bs, K, C)
output_memory = memory
# memory_padding_mask: shape (bs, K)
output_memory = output_memory.masked_fill(
memory_padding_mask.unsqueeze(-1), float(0)
)
output_memory = output_memory.masked_fill(~output_proposals_valid, float(0))
output_memory = self.enc_output_norm(self.enc_output(output_memory))
return output_memory, output_proposals, output_proposals_valid
def get_valid_ratio(self, mask):
_, H, W = mask.shape
# shape (bs,)
valid_H = torch.sum(~mask[:, :, 0], 1)
valid_W = torch.sum(~mask[:, 0, :], 1)
valid_ratio_h = valid_H.float() / H
valid_ratio_w = valid_W.float() / W
valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1)
# shape (bs, 2)
return valid_ratio
def forward(self, srcs, masks, pos_embeds, query_embed=None):
"""
Args:
srcs: a list of num_levels tensors. Each has shape (N, C, H_l, W_l)
masks: a list of num_levels tensors. Each has shape (N, H_l, W_l)
pos_embeds: a list of num_levels tensors. Each has shape (N, C, H_l, W_l)
query_embed: a tensor has shape (num_queries, C)
"""
assert self.two_stage or query_embed is not None
# prepare input for encoder
src_flatten = []
mask_flatten = []
lvl_pos_embed_flatten = []
spatial_shapes = []
for lvl, (src, mask, pos_embed) in enumerate(zip(srcs, masks, pos_embeds)):
bs, c, h, w = src.shape
spatial_shape = (h, w)
spatial_shapes.append(spatial_shape)
# src shape (bs, h_l*w_l, c)
src = src.flatten(2).transpose(1, 2)
# mask shape (bs, h_l*w_l)
mask = mask.flatten(1)
# pos_embed shape (bs, h_l*w_l, c)
pos_embed = pos_embed.flatten(2).transpose(1, 2)
# lvl_pos_embed shape (bs, h_l*w_l, c)
lvl_pos_embed = pos_embed + self.level_embed[lvl].view(1, 1, -1)
lvl_pos_embed_flatten.append(lvl_pos_embed)
src_flatten.append(src)
mask_flatten.append(mask)
# src_flatten shape: (bs, K, c) where K = \sum_l H_l * w_l
src_flatten = torch.cat(src_flatten, 1)
# mask_flatten shape: (bs, K)
mask_flatten = torch.cat(mask_flatten, 1)
# lvl_pos_embed_flatten shape: (bs, K, c)
lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1)
# spatial_shapes shape: (num_levels, 2)
spatial_shapes = torch.as_tensor(
spatial_shapes, dtype=torch.long, device=src_flatten.device
)
# level_start_index shape: (num_levels)
level_start_index = torch.cat(
(spatial_shapes.new_zeros((1,)), spatial_shapes.prod(1).cumsum(0)[:-1])
)
# valid_ratios shape: (bs, num_levels, 2)
valid_ratios = torch.stack([self.get_valid_ratio(m) for m in masks], 1)
# encoder
# memory shape (bs, K, C) where K = \sum_l H_l * w_l
memory = self.encoder(
src_flatten,
spatial_shapes,
level_start_index,
valid_ratios,
lvl_pos_embed_flatten,
mask_flatten,
)
# prepare input for decoder
bs, _, c = memory.shape
if self.two_stage:
# output_memory shape (bs, K, C)
# output_proposals shape (bs, K, 4)
# output_proposals_valid shape (bs, K, 1)
(
output_memory,
output_proposals,
output_proposals_valid,
) = self.gen_encoder_output_proposals(memory, mask_flatten, spatial_shapes)
# hack implementation for two-stage Deformable DETR
# shape (bs, K, 1)
enc_outputs_class = self.encoder.class_embed(output_memory)
# fill in -inf foreground logit at invalid positions so that we will never pick
# top-scored proposals at those positions
enc_outputs_class.masked_fill(mask_flatten.unsqueeze(-1), NEG_INF)
enc_outputs_class.masked_fill(~output_proposals_valid, NEG_INF)
# shape (bs, K, 4)
enc_outputs_coord_unact = (
self.encoder.bbox_embed(output_memory) + output_proposals
)
topk = self.two_stage_num_proposals
# topk_proposals: indices of top items. Shape (bs, top_k)
topk_proposals = torch.topk(enc_outputs_class[..., 0], topk, dim=1)[1]
# topk_coords_unact shape (bs, top_k, 4)
topk_coords_unact = torch.gather(
enc_outputs_coord_unact, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, 4)
)
topk_coords_unact = topk_coords_unact.detach()
init_reference_out = topk_coords_unact
# shape (bs, top_k, C=512)
pos_trans_out = self.pos_trans_norm(
self.pos_trans(self.get_proposal_pos_embed(topk_coords_unact))
)
# query_embed shape (bs, top_k, c)
# tgt shape (bs, top_k, c)
query_embed, tgt = torch.split(pos_trans_out, c, dim=2)
else:
# query_embed (or tgt) shape: (num_queries, c)
query_embed, tgt = torch.split(query_embed, c, dim=1)
# query_embed shape: (batch_size, num_queries, c)
query_embed = query_embed.unsqueeze(0).expand(bs, -1, -1)
# tgt shape: (batch_size, num_queries, c)
tgt = tgt.unsqueeze(0).expand(bs, -1, -1)
# init_reference_out shape: (batch_size, num_queries, 2)
init_reference_out = self.reference_points(query_embed)
# decoder
# hs shape: (num_layers, batch_size, num_queries, c)
# inter_references shape: (num_layers, batch_size, num_queries, num_levels, 2)
hs, inter_references = self.decoder(
tgt,
init_reference_out,
memory,
spatial_shapes,
level_start_index,
valid_ratios,
query_embed,
mask_flatten,
)
inter_references_out = inter_references
if self.two_stage:
return (
hs,
init_reference_out,
inter_references_out,
enc_outputs_class,
enc_outputs_coord_unact,
)
# hs shape: (num_layers, batch_size, num_queries, c)
# init_reference_out shape: (batch_size, num_queries, 2)
# inter_references_out shape: (num_layers, bs, num_queries, num_levels, 2)
return hs, init_reference_out, inter_references_out, None, None
class DeformableTransformerEncoderLayer(nn.Module):
def __init__(
self,
d_model=256,
d_ffn=1024,
dropout=0.1,
activation="relu",
n_levels=4,
n_heads=8,
n_points=4,
):
super().__init__()
# self attention
self.self_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)
self.dropout1 = nn.Dropout(dropout)
self.norm1 = nn.LayerNorm(d_model)
# ffn
self.linear1 = nn.Linear(d_model, d_ffn)
self.activation = _get_activation_fn(activation)
self.dropout2 = nn.Dropout(dropout)
self.linear2 = nn.Linear(d_ffn, d_model)
self.dropout3 = nn.Dropout(dropout)
self.norm2 = nn.LayerNorm(d_model)
@staticmethod
def with_pos_embed(tensor, pos):
return tensor if pos is None else tensor + pos
def forward_ffn(self, src):
src2 = self.linear2(self.dropout2(self.activation(self.linear1(src))))
src = src + self.dropout3(src2)
src = self.norm2(src)
return src
def forward(
self,
src,
pos,
reference_points,
spatial_shapes,
level_start_index,
padding_mask=None,
):
"""
Args:
src: tensor, shape (bs, K, c) where K = \sum_l H_l * w_l
pos: tensor, shape (bs, K, c)
reference_points: tensor, shape (bs, K, num_levels, 2)
spatial_shapes: tensor, shape (num_levels, 2)
level_start_index: tensor, shape (num_levels,)
padding_mask: tensor, shape: (bs, K)
"""
# self attention
# shape: (bs, \sum_l H_l * w_l, c)
src2 = self.self_attn(
self.with_pos_embed(src, pos),
reference_points,
src,
spatial_shapes,
level_start_index,
padding_mask,
)
src = src + self.dropout1(src2)
src = self.norm1(src)
# ffn
src = self.forward_ffn(src)
return src
class DeformableTransformerEncoder(nn.Module):
def __init__(self, encoder_layer, num_layers):
super().__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
@staticmethod
def get_reference_points(spatial_shapes, valid_ratios, device):
reference_points_list = []
for lvl, (H_, W_) in enumerate(spatial_shapes):
# ref_y shape: (H_l, W_l)
# ref_x shape: (H_l, W_l)
ref_y, ref_x = torch.meshgrid(
torch.linspace(0.5, H_ - 0.5, H_, dtype=torch.float32, device=device),
torch.linspace(0.5, W_ - 0.5, W_, dtype=torch.float32, device=device),
)
# ref_y
# shape (None, H_l*W_l) / (N, None) = (N, H_l*W_l)
# value could be >1
ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, lvl, 1] * H_)
ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, lvl, 0] * W_)
# ref shape (N, H_l*W_l, 2)
ref = torch.stack((ref_x, ref_y), -1)
reference_points_list.append(ref)
# shape (N, K, 2) where K = \sum_l (H_l * W_l)
reference_points = torch.cat(reference_points_list, 1)
# reference_points
# shape (N, K, 1, 2) * (N, 1, num_levels, 2) = (N, K, num_levels, 2)
# ideally, value should be <1. In practice, value coule be >= 1. Thus, clamp max to 1
reference_points = reference_points[:, :, None] * valid_ratios[:, None]
reference_points = reference_points.clamp(max=1.0)
return reference_points
def forward(
self,
src,
spatial_shapes,
level_start_index,
valid_ratios,
pos=None,
padding_mask=None,
):
output = src
reference_points = self.get_reference_points(
spatial_shapes, valid_ratios, device=src.device
)
for _, layer in enumerate(self.layers):
output = layer(
output,
pos,
reference_points,
spatial_shapes,
level_start_index,
padding_mask,
)
# shape (bs, K, c) where K = \sum_l H_l * w_l
return output
class DeformableTransformerDecoderLayer(nn.Module):
def __init__(
self,
d_model=256,
d_ffn=1024,
dropout=0.1,
activation="relu",
n_levels=4,
n_heads=8,
n_points=4,
):
super().__init__()
# cross attention
self.cross_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)
self.dropout1 = nn.Dropout(dropout)
self.norm1 = nn.LayerNorm(d_model)
# self attention
self.self_attn = nn.MultiheadAttention(d_model, n_heads, dropout=dropout)
self.dropout2 = nn.Dropout(dropout)
self.norm2 = nn.LayerNorm(d_model)
# ffn
self.linear1 = nn.Linear(d_model, d_ffn)
self.activation = _get_activation_fn(activation)
self.dropout3 = nn.Dropout(dropout)
self.linear2 = nn.Linear(d_ffn, d_model)
self.dropout4 = nn.Dropout(dropout)
self.norm3 = nn.LayerNorm(d_model)
@staticmethod
def with_pos_embed(tensor, pos):
return tensor if pos is None else tensor + pos
def forward_ffn(self, tgt):
tgt2 = self.linear2(self.dropout3(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout4(tgt2)
tgt = self.norm3(tgt)
return tgt
def forward(
self,
tgt,
query_pos,
reference_points,
src,
src_spatial_shapes,
level_start_index,
src_padding_mask=None,
):
"""
Args:
tgt: tensor, shape (batch_size, num_queries, c)
query_pos: tensor, shape: (batch_size, num_queries, c)
reference_points: tensor, shape: (batch_size, num_queries, num_levels, 2/4). values \in (0, 1)
src: tensor, shape (batch_size, K, c) where K = \sum_l H_l * w_l
src_spatial_shapes: tensor, shape (num_levels, 2)
level_start_index: tensor, shape (num_levels,)
src_padding_mask: tensor, (batch_size, K)
"""
# self attention
q = k = self.with_pos_embed(tgt, query_pos)
tgt2 = self.self_attn(
q.transpose(0, 1), k.transpose(0, 1), tgt.transpose(0, 1)
)[0].transpose(0, 1)
# tgt shape: (batch_size, num_queries, c)
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
# cross attention
tgt2 = self.cross_attn(
self.with_pos_embed(tgt, query_pos),
reference_points,
src,
src_spatial_shapes,
level_start_index,
src_padding_mask,
)
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
# ffn
tgt = self.forward_ffn(tgt)
# tgt shape: (batch_size, num_queries, c)
return tgt
class DeformableTransformerDecoder(nn.Module):
def __init__(
self, decoder_layer, num_layers, return_intermediate=False, block_grad=True
):
super().__init__()
self.layers = _get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
self.return_intermediate = return_intermediate
# hack implementation for iterative bounding box refinement and two-stage Deformable DETR
self.bbox_embed = None
self.block_grad = block_grad
def forward(
self,
tgt,
reference_points_unact,
src,
src_spatial_shapes,
src_level_start_index,
src_valid_ratios,
query_pos=None,
src_padding_mask=None,
):
"""
Args:
tgt: tensor, shape (batch_size, num_queries, c)
reference_points_unact: tensor, shape (batch_size, num_queries, 2 or 4).
values \in (0, 1)
src: tensor, shape (batch_size, K, c) where K = \sum_l H_l * w_l
src_spatial_shapes: tensor, shape (num_levels, 2)
src_level_start_index: tensor, shape (num_levels,)
src_valid_ratios: tensor, shape (batch_size, num_levels, 2)
query_pos: tensor, shape: (batch_size, num_queries, c)
src_padding_mask: tensor, (bs, K)
"""
output = tgt
intermediate = []
intermediate_reference_points = []
for lid, layer in enumerate(self.layers):
reference_points = reference_points_unact.sigmoid()
if reference_points.shape[-1] == 4:
# shape: (bs, num_queries, 1, 4) * (bs, 1, num_levels, 4) = (bs, num_queries, num_levels, 4)
reference_points_input = (
reference_points[:, :, None]
* torch.cat([src_valid_ratios, src_valid_ratios], -1)[:, None]
)
else:
assert reference_points.shape[-1] == 2
# shape (bs, num_queries, 1, 2) * (bs, 1, num_levels, 2) = (bs, num_queries, num_levels, 2)
reference_points_input = (
reference_points[:, :, None] * src_valid_ratios[:, None]
)
# shape: (bs, num_queries, c)
output = layer(
output,
query_pos,
reference_points_input,
src,
src_spatial_shapes,
src_level_start_index,
src_padding_mask,
)
# hack implementation for iterative bounding box refinement
if self.bbox_embed is not None:
tmp = self.bbox_embed[lid](output)
if reference_points.shape[-1] == 4:
new_reference_points_unact = tmp + reference_points_unact
else:
assert reference_points.shape[-1] == 2
new_reference_points_unact = tmp
new_reference_points_unact[..., :2] = (
tmp[..., :2] + reference_points_unact
)
# block gradient backpropagation here to stabilize optimization
if self.block_grad:
new_reference_points_unact = new_reference_points_unact.detach()
reference_points_unact = new_reference_points_unact
else:
new_reference_points_unact = reference_points_unact
if self.return_intermediate:
intermediate.append(output)
intermediate_reference_points.append(new_reference_points_unact)
if self.return_intermediate:
# shape 1: (num_layers, batch_size, num_queries, c)
# shape 2: (num_layers, bs, num_queries, num_levels, 2)
return torch.stack(intermediate), torch.stack(intermediate_reference_points)
# output shape: (batch_size, num_queries, c)
# new_reference_points_unact shape: (bs, num_queries, num_levels, 2)
return output, new_reference_points_unact
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(f"activation should be relu/gelu, not {activation}.")
def build_deforamble_transformer(args):
return DeformableTransformer(
d_model=args.hidden_dim,
nhead=args.nheads,
num_encoder_layers=args.enc_layers,
num_decoder_layers=args.dec_layers,
dim_feedforward=args.dim_feedforward,
dropout=args.dropout,
activation="relu",
return_intermediate_dec=True,
num_feature_levels=args.num_feature_levels,
dec_n_points=args.dec_n_points,
enc_n_points=args.enc_n_points,
two_stage=args.two_stage,
two_stage_num_proposals=args.num_queries,
)
| d2go-main | projects_oss/detr/detr/models/deformable_transformer.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
This file provides the definition of the convolutional heads used to predict masks, as well as the losses
"""
import io
from collections import defaultdict
from typing import List, Optional
import detr.util.box_ops as box_ops
import torch
import torch.nn as nn
import torch.nn.functional as F
from detr.util.misc import interpolate, nested_tensor_from_tensor_list, NestedTensor
from PIL import Image
from torch import Tensor
try:
from panopticapi.utils import id2rgb, rgb2id
except ImportError:
pass
class DETRsegm(nn.Module):
def __init__(self, detr, freeze_detr=False):
super().__init__()
self.detr = detr
if freeze_detr:
for p in self.parameters():
p.requires_grad_(False)
hidden_dim, nheads = detr.transformer.d_model, detr.transformer.nhead
self.bbox_attention = MHAttentionMap(
hidden_dim, hidden_dim, nheads, dropout=0.0
)
self.mask_head = MaskHeadSmallConv(
hidden_dim + nheads, [1024, 512, 256], hidden_dim
)
def forward(self, samples: NestedTensor):
if isinstance(samples, (list, torch.Tensor)):
samples = nested_tensor_from_tensor_list(samples)
features, pos = self.detr.backbone(samples)
bs = features[-1].tensors.shape[0]
src, mask = features[-1].decompose()
assert mask is not None
src_proj = self.detr.input_proj(src)
hs, memory = self.detr.transformer(
src_proj, mask, self.detr.query_embed.weight, pos[-1]
)
outputs_class = self.detr.class_embed(hs)
outputs_coord = self.detr.bbox_embed(hs).sigmoid()
out = {"pred_logits": outputs_class[-1], "pred_boxes": outputs_coord[-1]}
if self.detr.aux_loss:
out["aux_outputs"] = self.detr._set_aux_loss(outputs_class, outputs_coord)
# FIXME h_boxes takes the last one computed, keep this in mind
bbox_mask = self.bbox_attention(hs[-1], memory, mask=mask)
seg_masks = self.mask_head(
src_proj,
bbox_mask,
[features[2].tensors, features[1].tensors, features[0].tensors],
)
outputs_seg_masks = seg_masks.view(
bs, self.detr.num_queries, seg_masks.shape[-2], seg_masks.shape[-1]
)
out["pred_masks"] = outputs_seg_masks
return out
def _expand(tensor, length: int):
return tensor.unsqueeze(1).repeat(1, int(length), 1, 1, 1).flatten(0, 1)
class MaskHeadSmallConv(nn.Module):
"""
Simple convolutional head, using group norm.
Upsampling is done using a FPN approach
"""
def __init__(self, dim, fpn_dims, context_dim):
super().__init__()
inter_dims = [
dim,
context_dim // 2,
context_dim // 4,
context_dim // 8,
context_dim // 16,
context_dim // 64,
]
self.lay1 = torch.nn.Conv2d(dim, dim, 3, padding=1)
self.gn1 = torch.nn.GroupNorm(8, dim)
self.lay2 = torch.nn.Conv2d(dim, inter_dims[1], 3, padding=1)
self.gn2 = torch.nn.GroupNorm(8, inter_dims[1])
self.lay3 = torch.nn.Conv2d(inter_dims[1], inter_dims[2], 3, padding=1)
self.gn3 = torch.nn.GroupNorm(8, inter_dims[2])
self.lay4 = torch.nn.Conv2d(inter_dims[2], inter_dims[3], 3, padding=1)
self.gn4 = torch.nn.GroupNorm(8, inter_dims[3])
self.lay5 = torch.nn.Conv2d(inter_dims[3], inter_dims[4], 3, padding=1)
self.gn5 = torch.nn.GroupNorm(8, inter_dims[4])
self.out_lay = torch.nn.Conv2d(inter_dims[4], 1, 3, padding=1)
self.dim = dim
self.adapter1 = torch.nn.Conv2d(fpn_dims[0], inter_dims[1], 1)
self.adapter2 = torch.nn.Conv2d(fpn_dims[1], inter_dims[2], 1)
self.adapter3 = torch.nn.Conv2d(fpn_dims[2], inter_dims[3], 1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_uniform_(m.weight, a=1)
nn.init.constant_(m.bias, 0)
def forward(self, x: Tensor, bbox_mask: Tensor, fpns: List[Tensor]):
x = torch.cat([_expand(x, bbox_mask.shape[1]), bbox_mask.flatten(0, 1)], 1)
x = self.lay1(x)
x = self.gn1(x)
x = F.relu(x)
x = self.lay2(x)
x = self.gn2(x)
x = F.relu(x)
cur_fpn = self.adapter1(fpns[0])
if cur_fpn.size(0) != x.size(0):
cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0))
x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest")
x = self.lay3(x)
x = self.gn3(x)
x = F.relu(x)
cur_fpn = self.adapter2(fpns[1])
if cur_fpn.size(0) != x.size(0):
cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0))
x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest")
x = self.lay4(x)
x = self.gn4(x)
x = F.relu(x)
cur_fpn = self.adapter3(fpns[2])
if cur_fpn.size(0) != x.size(0):
cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0))
x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest")
x = self.lay5(x)
x = self.gn5(x)
x = F.relu(x)
x = self.out_lay(x)
return x
class MHAttentionMap(nn.Module):
"""This is a 2D attention module, which only returns the attention softmax (no multiplication by value)"""
def __init__(self, query_dim, hidden_dim, num_heads, dropout=0.0, bias=True):
super().__init__()
self.num_heads = num_heads
self.hidden_dim = hidden_dim
self.dropout = nn.Dropout(dropout)
self.q_linear = nn.Linear(query_dim, hidden_dim, bias=bias)
self.k_linear = nn.Linear(query_dim, hidden_dim, bias=bias)
nn.init.zeros_(self.k_linear.bias)
nn.init.zeros_(self.q_linear.bias)
nn.init.xavier_uniform_(self.k_linear.weight)
nn.init.xavier_uniform_(self.q_linear.weight)
self.normalize_fact = float(hidden_dim / self.num_heads) ** -0.5
def forward(self, q, k, mask: Optional[Tensor] = None):
q = self.q_linear(q)
k = F.conv2d(
k, self.k_linear.weight.unsqueeze(-1).unsqueeze(-1), self.k_linear.bias
)
qh = q.view(
q.shape[0], q.shape[1], self.num_heads, self.hidden_dim // self.num_heads
)
kh = k.view(
k.shape[0],
self.num_heads,
self.hidden_dim // self.num_heads,
k.shape[-2],
k.shape[-1],
)
weights = torch.einsum("bqnc,bnchw->bqnhw", qh * self.normalize_fact, kh)
if mask is not None:
weights.masked_fill_(mask.unsqueeze(1).unsqueeze(1), float("-inf"))
weights = F.softmax(weights.flatten(2), dim=-1).view(weights.size())
weights = self.dropout(weights)
return weights
def dice_loss(inputs, targets, num_boxes):
"""
Compute the DICE loss, similar to generalized IOU for masks
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
"""
inputs = inputs.sigmoid()
inputs = inputs.flatten(1)
numerator = 2 * (inputs * targets).sum(1)
denominator = inputs.sum(-1) + targets.sum(-1)
loss = 1 - (numerator + 1) / (denominator + 1)
return loss.sum() / num_boxes
def sigmoid_focal_loss(
inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2
):
"""
Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
alpha: (optional) Weighting factor in range (0,1) to balance
positive vs negative examples. Default = -1 (no weighting).
gamma: Exponent of the modulating factor (1 - p_t) to
balance easy vs hard examples.
Returns:
Loss tensor
"""
prob = inputs.sigmoid()
ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
p_t = prob * targets + (1 - prob) * (1 - targets)
loss = ce_loss * ((1 - p_t) ** gamma)
if alpha >= 0:
alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
loss = alpha_t * loss
return loss.mean(1).sum() / num_boxes
class PostProcessSegm(nn.Module):
def __init__(self, threshold=0.5):
super().__init__()
self.threshold = threshold
@torch.no_grad()
def forward(self, results, outputs, orig_target_sizes, max_target_sizes):
assert len(orig_target_sizes) == len(max_target_sizes)
max_h, max_w = max_target_sizes.max(0)[0].tolist()
outputs_masks = outputs["pred_masks"].squeeze(2)
outputs_masks = F.interpolate(
outputs_masks, size=(max_h, max_w), mode="bilinear", align_corners=False
)
outputs_masks = (outputs_masks.sigmoid() > self.threshold).cpu()
for i, (cur_mask, t, tt) in enumerate(
zip(outputs_masks, max_target_sizes, orig_target_sizes)
):
img_h, img_w = t[0], t[1]
results[i]["masks"] = cur_mask[:, :img_h, :img_w].unsqueeze(1)
results[i]["masks"] = F.interpolate(
results[i]["masks"].float(), size=tuple(tt.tolist()), mode="nearest"
).byte()
return results
class PostProcessPanoptic(nn.Module):
"""This class converts the output of the model to the final panoptic result, in the format expected by the
coco panoptic API"""
def __init__(self, is_thing_map, threshold=0.85):
"""
Parameters:
is_thing_map: This is a whose keys are the class ids, and the values a boolean indicating whether
the class is a thing (True) or a stuff (False) class
threshold: confidence threshold: segments with confidence lower than this will be deleted
"""
super().__init__()
self.threshold = threshold
self.is_thing_map = is_thing_map
def forward(self, outputs, processed_sizes, target_sizes=None): # noqa: C901
"""This function computes the panoptic prediction from the model's predictions.
Parameters:
outputs: This is a dict coming directly from the model. See the model doc for the content.
processed_sizes: This is a list of tuples (or torch tensors) of sizes of the images that were passed to the
model, ie the size after data augmentation but before batching.
target_sizes: This is a list of tuples (or torch tensors) corresponding to the requested final size
of each prediction. If left to None, it will default to the processed_sizes
"""
if target_sizes is None:
target_sizes = processed_sizes
assert len(processed_sizes) == len(target_sizes)
out_logits, raw_masks, raw_boxes = (
outputs["pred_logits"],
outputs["pred_masks"],
outputs["pred_boxes"],
)
assert len(out_logits) == len(raw_masks) == len(target_sizes)
preds = []
def to_tuple(tup):
if isinstance(tup, tuple):
return tup
return tuple(tup.cpu().tolist())
for cur_logits, cur_masks, cur_boxes, size, target_size in zip(
out_logits, raw_masks, raw_boxes, processed_sizes, target_sizes
):
# we filter empty queries and detection below threshold
scores, labels = cur_logits.softmax(-1).max(-1)
keep = labels.ne(outputs["pred_logits"].shape[-1] - 1) & (
scores > self.threshold
)
cur_scores, cur_classes = cur_logits.softmax(-1).max(-1)
cur_scores = cur_scores[keep]
cur_classes = cur_classes[keep]
cur_masks = cur_masks[keep]
cur_masks = interpolate(
cur_masks[:, None], to_tuple(size), mode="bilinear"
).squeeze(1)
cur_boxes = box_ops.box_cxcywh_to_xyxy(cur_boxes[keep])
h, w = cur_masks.shape[-2:]
assert len(cur_boxes) == len(cur_classes)
# It may be that we have several predicted masks for the same stuff class.
# In the following, we track the list of masks ids for each stuff class (they are merged later on)
cur_masks = cur_masks.flatten(1)
stuff_equiv_classes = defaultdict(lambda: [])
for k, label in enumerate(cur_classes):
if not self.is_thing_map[label.item()]:
stuff_equiv_classes[label.item()].append(k)
def get_ids_area(masks, scores, dedup=False):
# This helper function creates the final panoptic segmentation image
# It also returns the area of the masks that appears on the image
m_id = masks.transpose(0, 1).softmax(-1)
if m_id.shape[-1] == 0:
# We didn't detect any mask :(
m_id = torch.zeros((h, w), dtype=torch.long, device=m_id.device)
else:
m_id = m_id.argmax(-1).view(h, w)
if dedup:
# Merge the masks corresponding to the same stuff class
for equiv in stuff_equiv_classes.values():
if len(equiv) > 1:
for eq_id in equiv:
m_id.masked_fill_(m_id.eq(eq_id), equiv[0])
final_h, final_w = to_tuple(target_size)
seg_img = Image.fromarray(id2rgb(m_id.view(h, w).cpu().numpy()))
seg_img = seg_img.resize(
size=(final_w, final_h), resample=Image.NEAREST
)
np_seg_img = (
torch.ByteTensor(torch.ByteStorage.from_buffer(seg_img.tobytes()))
.view(final_h, final_w, 3)
.numpy()
)
m_id = torch.from_numpy(rgb2id(np_seg_img))
area = []
for i in range(len(scores)):
area.append(m_id.eq(i).sum().item())
return area, seg_img
area, seg_img = get_ids_area(cur_masks, cur_scores, dedup=True)
if cur_classes.numel() > 0:
# We know filter empty masks as long as we find some
while True:
filtered_small = torch.as_tensor(
[area[i] <= 4 for i, c in enumerate(cur_classes)],
dtype=torch.bool,
device=keep.device,
)
if filtered_small.any().item():
cur_scores = cur_scores[~filtered_small]
cur_classes = cur_classes[~filtered_small]
cur_masks = cur_masks[~filtered_small]
area, seg_img = get_ids_area(cur_masks, cur_scores)
else:
break
else:
cur_classes = torch.ones(1, dtype=torch.long, device=cur_classes.device)
segments_info = []
for i, a in enumerate(area):
cat = cur_classes[i].item()
segments_info.append(
{
"id": i,
"isthing": self.is_thing_map[cat],
"category_id": cat,
"area": a,
}
)
del cur_classes
with io.BytesIO() as out:
seg_img.save(out, format="PNG")
predictions = {
"png_string": out.getvalue(),
"segments_info": segments_info,
}
preds.append(predictions)
return preds
| d2go-main | projects_oss/detr/detr/models/segmentation.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import numpy as np
import torch
import torch.nn.functional as F
from d2go.registry.builtin import META_ARCH_REGISTRY
from detectron2.modeling import detector_postprocess
from detectron2.structures import BitMasks, Boxes, ImageList, Instances
from detr.datasets.coco import convert_coco_poly_to_mask
from detr.models.backbone import Joiner
from detr.models.build import build_detr_model
from detr.models.deformable_detr import DeformableDETR
from detr.models.deformable_transformer import DeformableTransformer
from detr.models.detr import DETR
from detr.models.matcher import HungarianMatcher
from detr.models.position_encoding import PositionEmbeddingSine
from detr.models.segmentation import DETRsegm, PostProcessSegm
from detr.models.setcriterion import FocalLossSetCriterion, SetCriterion
from detr.util.box_ops import box_cxcywh_to_xyxy, box_xyxy_to_cxcywh
from detr.util.misc import NestedTensor
from torch import nn
__all__ = ["Detr"]
@META_ARCH_REGISTRY.register()
class Detr(nn.Module):
"""
Implement Detr
"""
def __init__(self, cfg):
super().__init__()
self.device = torch.device(cfg.MODEL.DEVICE)
self.use_focal_loss = cfg.MODEL.DETR.USE_FOCAL_LOSS
self.num_classes = cfg.MODEL.DETR.NUM_CLASSES
self.mask_on = cfg.MODEL.MASK_ON
dec_layers = cfg.MODEL.DETR.DEC_LAYERS
# Loss parameters:
giou_weight = cfg.MODEL.DETR.GIOU_WEIGHT
l1_weight = cfg.MODEL.DETR.L1_WEIGHT
cls_weight = cfg.MODEL.DETR.CLS_WEIGHT
deep_supervision = cfg.MODEL.DETR.DEEP_SUPERVISION
no_object_weight = cfg.MODEL.DETR.NO_OBJECT_WEIGHT
self.detr = build_detr_model(cfg)
if self.mask_on:
frozen_weights = cfg.MODEL.DETR.FROZEN_WEIGHTS
if frozen_weights != "":
print("LOAD pre-trained weights")
weight = torch.load(
frozen_weights, map_location=lambda storage, loc: storage
)["model"]
new_weight = {}
for k, v in weight.items():
if "detr." in k:
new_weight[k.replace("detr.", "")] = v
else:
print(f"Skipping loading weight {k} from frozen model")
del weight
self.detr.load_state_dict(new_weight)
del new_weight
self.detr = DETRsegm(self.detr, freeze_detr=(frozen_weights != ""))
self.seg_postprocess = PostProcessSegm
self.detr.to(self.device)
# building criterion
matcher = HungarianMatcher(
cost_class=cls_weight,
cost_bbox=l1_weight,
cost_giou=giou_weight,
use_focal_loss=self.use_focal_loss,
)
weight_dict = {"loss_ce": cls_weight, "loss_bbox": l1_weight}
weight_dict["loss_giou"] = giou_weight
if deep_supervision:
aux_weight_dict = {}
for i in range(dec_layers - 1):
aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()})
weight_dict.update(aux_weight_dict)
losses = ["labels", "boxes", "cardinality"]
if self.mask_on:
losses += ["masks"]
if self.use_focal_loss:
self.criterion = FocalLossSetCriterion(
self.num_classes,
matcher=matcher,
weight_dict=weight_dict,
losses=losses,
)
else:
self.criterion = SetCriterion(
self.num_classes,
matcher=matcher,
weight_dict=weight_dict,
eos_coef=no_object_weight,
losses=losses,
)
self.criterion.to(self.device)
pixel_mean = torch.Tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(3, 1, 1)
pixel_std = torch.Tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(3, 1, 1)
self.normalizer = lambda x: (x - pixel_mean) / pixel_std
self.to(self.device)
def forward(self, batched_inputs):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* image: Tensor, image in (C, H, W) format.
* instances: Instances
Other information that's included in the original dicts, such as:
* "height", "width" (int): the output resolution of the model, used in inference.
See :meth:`postprocess` for details.
Returns:
dict[str: Tensor]:
mapping from a named loss to a tensor storing the loss. Used during training only.
"""
images_lists = self.preprocess_image(batched_inputs)
# convert images_lists to Nested Tensor?
nested_images = self.imagelist_to_nestedtensor(images_lists)
output = self.detr(nested_images)
if self.training:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
# targets: List[Dict[str, torch.Tensor]]. Keys
# "labels": [NUM_BOX,]
# "boxes": [NUM_BOX, 4]
targets = self.prepare_targets(gt_instances)
loss_dict = self.criterion(output, targets)
weight_dict = self.criterion.weight_dict
for k in loss_dict.keys():
if k in weight_dict:
loss_dict[k] *= weight_dict[k]
return loss_dict
else:
box_cls = output["pred_logits"]
box_pred = output["pred_boxes"]
mask_pred = output["pred_masks"] if self.mask_on else None
results = self.inference(
box_cls, box_pred, mask_pred, images_lists.image_sizes
)
processed_results = []
for results_per_image, input_per_image, image_size in zip(
results, batched_inputs, images_lists.image_sizes
):
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
r = detector_postprocess(results_per_image, height, width)
processed_results.append({"instances": r})
return processed_results
def prepare_targets(self, targets):
new_targets = []
for targets_per_image in targets:
h, w = targets_per_image.image_size
image_size_xyxy = torch.as_tensor(
[w, h, w, h], dtype=torch.float, device=self.device
)
gt_classes = targets_per_image.gt_classes # shape (NUM_BOX,)
gt_boxes = targets_per_image.gt_boxes.tensor / image_size_xyxy
gt_boxes = box_xyxy_to_cxcywh(gt_boxes) # shape (NUM_BOX, 4)
new_targets.append({"labels": gt_classes, "boxes": gt_boxes})
if self.mask_on and hasattr(targets_per_image, "gt_masks"):
gt_masks = targets_per_image.gt_masks
gt_masks = convert_coco_poly_to_mask(gt_masks.polygons, h, w)
new_targets[-1].update({"masks": gt_masks})
return new_targets
def inference(self, box_cls, box_pred, mask_pred, image_sizes):
"""
Arguments:
box_cls (Tensor): tensor of shape (batch_size, num_queries, K).
The tensor predicts the classification probability for each query.
box_pred (Tensor): tensors of shape (batch_size, num_queries, 4).
The tensor predicts 4-vector (x,y,w,h) box
regression values for every queryx
image_sizes (List[torch.Size]): the input image sizes
Returns:
results (List[Instances]): a list of #images elements.
"""
assert len(box_cls) == len(image_sizes)
results = []
# For each box we assign the best class or the second best if the best on is `no_object`.
if self.use_focal_loss:
prob = box_cls.sigmoid()
# TODO make top-100 as an option for non-focal-loss as well
scores, topk_indexes = torch.topk(
prob.view(box_cls.shape[0], -1), 100, dim=1
)
topk_boxes = topk_indexes // box_cls.shape[2]
labels = topk_indexes % box_cls.shape[2]
else:
scores, labels = F.softmax(box_cls, dim=-1)[:, :, :-1].max(-1)
for i, (
scores_per_image,
labels_per_image,
box_pred_per_image,
image_size,
) in enumerate(zip(scores, labels, box_pred, image_sizes)):
result = Instances(image_size)
boxes = box_cxcywh_to_xyxy(box_pred_per_image)
if self.use_focal_loss:
boxes = torch.gather(boxes, 0, topk_boxes[i].unsqueeze(-1).repeat(1, 4))
result.pred_boxes = Boxes(boxes)
result.pred_boxes.scale(scale_x=image_size[1], scale_y=image_size[0])
if self.mask_on:
mask = F.interpolate(
mask_pred[i].unsqueeze(0),
size=image_size,
mode="bilinear",
align_corners=False,
)
mask = mask[0].sigmoid() > 0.5
B, N, H, W = mask_pred.shape
mask = BitMasks(mask.cpu()).crop_and_resize(
result.pred_boxes.tensor.cpu(), 32
)
result.pred_masks = mask.unsqueeze(1).to(mask_pred[0].device)
result.scores = scores_per_image
result.pred_classes = labels_per_image
results.append(result)
return results
def preprocess_image(self, batched_inputs):
"""
Normalize, pad and batch the input images.
"""
images = [self.normalizer(x["image"].to(self.device)) for x in batched_inputs]
images = ImageList.from_tensors(images)
return images
def imagelist_to_nestedtensor(self, images):
tensor = images.tensor
device = tensor.device
N, _, H, W = tensor.shape
masks = torch.ones((N, H, W), dtype=torch.bool, device=device)
for idx, (h, w) in enumerate(images.image_sizes):
masks[idx, :h, :w] = False
return NestedTensor(tensor, masks)
| d2go-main | projects_oss/detr/detr/d2/detr.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from d2go.config import CfgNode as CN
def add_detr_config(cfg):
"""
Add config for DETR.
"""
cfg.MODEL.DETR = CN()
cfg.MODEL.DETR.NAME = "DETR"
cfg.MODEL.DETR.NUM_CLASSES = 80
# simple backbone
cfg.MODEL.BACKBONE.SIMPLE = False
cfg.MODEL.BACKBONE.STRIDE = 1
cfg.MODEL.BACKBONE.CHANNEL = 0
# FBNet
cfg.MODEL.FBNET_V2.OUT_FEATURES = ["trunk3"]
# For Segmentation
cfg.MODEL.DETR.FROZEN_WEIGHTS = ""
# LOSS
cfg.MODEL.DETR.DEFORMABLE = False
cfg.MODEL.DETR.USE_FOCAL_LOSS = False
cfg.MODEL.DETR.CENTERED_POSITION_ENCODIND = False
cfg.MODEL.DETR.CLS_WEIGHT = 1.0
cfg.MODEL.DETR.NUM_FEATURE_LEVELS = 4
cfg.MODEL.DETR.GIOU_WEIGHT = 2.0
cfg.MODEL.DETR.L1_WEIGHT = 5.0
cfg.MODEL.DETR.DEEP_SUPERVISION = True
cfg.MODEL.DETR.NO_OBJECT_WEIGHT = 0.1
cfg.MODEL.DETR.WITH_BOX_REFINE = False
cfg.MODEL.DETR.TWO_STAGE = False
cfg.MODEL.DETR.DECODER_BLOCK_GRAD = True
# TRANSFORMER
cfg.MODEL.DETR.NHEADS = 8
cfg.MODEL.DETR.DROPOUT = 0.1
cfg.MODEL.DETR.DIM_FEEDFORWARD = 2048
cfg.MODEL.DETR.ENC_LAYERS = 6
cfg.MODEL.DETR.DEC_LAYERS = 6
cfg.MODEL.DETR.BBOX_EMBED_NUM_LAYERS = 3
cfg.MODEL.DETR.PRE_NORM = False
cfg.MODEL.DETR.HIDDEN_DIM = 256
cfg.MODEL.DETR.NUM_OBJECT_QUERIES = 100
# solver
cfg.SOLVER.OPTIMIZER = "ADAMW"
cfg.SOLVER.BACKBONE_MULTIPLIER = 0.1
# tgt & embeddings
cfg.MODEL.DETR.LEARNABLE_TGT = False
| d2go-main | projects_oss/detr/detr/d2/config.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .config import add_detr_config
from .dataset_mapper import DetrDatasetMapper
from .detr import Detr
__all__ = ["add_detr_config", "Detr", "DetrDatasetMapper"]
| d2go-main | projects_oss/detr/detr/d2/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import logging
import numpy as np
import torch
from detectron2.data import detection_utils as utils, transforms as T
__all__ = ["DetrDatasetMapper"]
def build_transform_gen(cfg, is_train):
"""
Create a list of :class:`TransformGen` from config.
Returns:
list[TransformGen]
"""
if is_train:
min_size = cfg.INPUT.MIN_SIZE_TRAIN
max_size = cfg.INPUT.MAX_SIZE_TRAIN
sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
else:
min_size = cfg.INPUT.MIN_SIZE_TEST
max_size = cfg.INPUT.MAX_SIZE_TEST
sample_style = "choice"
if sample_style == "range":
assert (
len(min_size) == 2
), "more than 2 ({}) min_size(s) are provided for ranges".format(len(min_size))
logger = logging.getLogger(__name__)
tfm_gens = []
if is_train:
tfm_gens.append(T.RandomFlip())
tfm_gens.append(T.ResizeShortestEdge(min_size, max_size, sample_style))
if is_train:
logger.info("TransformGens used in training: " + str(tfm_gens))
return tfm_gens
class DetrDatasetMapper:
"""
A callable which takes a dataset dict in Detectron2 Dataset format,
and map it into a format used by DETR.
The callable currently does the following:
1. Read the image from "file_name"
2. Applies geometric transforms to the image and annotation
3. Find and applies suitable cropping to the image and annotation
4. Prepare image and annotation to Tensors
"""
def __init__(self, cfg, is_train=True):
if cfg.INPUT.CROP.ENABLED and is_train:
self.crop_gen = [
T.ResizeShortestEdge([400, 500, 600], sample_style="choice"),
T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE),
]
else:
self.crop_gen = None
self.mask_on = cfg.MODEL.MASK_ON
self.tfm_gens = build_transform_gen(cfg, is_train)
logging.getLogger(__name__).info(
"Full TransformGens used in training: {}, crop: {}".format(
str(self.tfm_gens), str(self.crop_gen)
)
)
self.img_format = cfg.INPUT.FORMAT
self.is_train = is_train
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
image = utils.read_image(dataset_dict["file_name"], format=self.img_format)
utils.check_image_size(dataset_dict, image)
if self.crop_gen is None:
image, transforms = T.apply_transform_gens(self.tfm_gens, image)
else:
if np.random.rand() > 0.5:
image, transforms = T.apply_transform_gens(self.tfm_gens, image)
else:
image, transforms = T.apply_transform_gens(
self.tfm_gens[:-1] + self.crop_gen + self.tfm_gens[-1:], image
)
image_shape = image.shape[:2] # h, w
# Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
# but not efficient on large generic data structures due to the use of pickle & mp.Queue.
# Therefore it's important to use torch.Tensor.
dataset_dict["image"] = torch.as_tensor(
np.ascontiguousarray(image.transpose(2, 0, 1))
)
if not self.is_train:
# USER: Modify this if you want to keep them for some reason.
dataset_dict.pop("annotations", None)
return dataset_dict
if "annotations" in dataset_dict:
# USER: Modify this if you want to keep them for some reason.
for anno in dataset_dict["annotations"]:
if not self.mask_on:
anno.pop("segmentation", None)
anno.pop("keypoints", None)
# USER: Implement additional transformations if you have other types of data
annos = [
utils.transform_instance_annotations(obj, transforms, image_shape)
for obj in dataset_dict.pop("annotations")
if obj.get("iscrowd", 0) == 0
]
instances = utils.annotations_to_instances(annos, image_shape)
dataset_dict["instances"] = utils.filter_empty_instances(instances)
return dataset_dict
| d2go-main | projects_oss/detr/detr/d2/dataset_mapper.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# ------------------------------------------------------------------------------------------------
# Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------------------------------
# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
# ------------------------------------------------------------------------------------------------
import torch
import torch.nn.functional as F
from detr import _C as MSDA
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.cuda.amp.autocast_mode import custom_bwd, custom_fwd
class MSDeformAttnFunction(Function):
# The @custom_fwd and @custom_bwd decorators are used in this case to allow enabling of
# Automatic Mixed Precision when we do not have implementations of custom CUDA kernels for
# all the precision types.
#
# TODO: After implementing `ms_deform_attn` CUDA kernels for FP16, we can remove the
# custom_fwd and custom_bwd decorators
@staticmethod
@custom_fwd(cast_inputs=torch.float32)
def forward(
ctx,
value,
value_spatial_shapes,
value_level_start_index,
sampling_locations,
attention_weights,
im2col_step,
):
ctx.im2col_step = im2col_step
output = MSDA.ms_deform_attn_forward(
value,
value_spatial_shapes,
value_level_start_index,
sampling_locations,
attention_weights,
ctx.im2col_step,
)
ctx.save_for_backward(
value,
value_spatial_shapes,
value_level_start_index,
sampling_locations,
attention_weights,
)
return output
@staticmethod
@once_differentiable
@custom_bwd
def backward(ctx, grad_output):
(
value,
value_spatial_shapes,
value_level_start_index,
sampling_locations,
attention_weights,
) = ctx.saved_tensors
grad_value, grad_sampling_loc, grad_attn_weight = MSDA.ms_deform_attn_backward(
value,
value_spatial_shapes,
value_level_start_index,
sampling_locations,
attention_weights,
grad_output,
ctx.im2col_step,
)
return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
def ms_deform_attn_core_pytorch(
value, value_spatial_shapes, sampling_locations, attention_weights
):
# for debug and test only,
# need to use cuda version instead
# value shape (N, K, num_heads, channels_per_head)
N_, S_, M_, D_ = value.shape
# sampling_locations shape (N, len_q, num_heads, num_levels, num_points, 2)
_, Lq_, M_, L_, P_, _ = sampling_locations.shape
# a list of num_level tensors. Each has shape (N, H_l*W_l, num_heads, channels_per_head)
value_list = value.split([H_ * W_ for H_, W_ in value_spatial_shapes], dim=1)
sampling_grids = 2 * sampling_locations - 1
sampling_value_list = []
for lid_, (H_, W_) in enumerate(value_spatial_shapes):
# N_, H_*W_, M_, D_ -> N_, H_*W_, M_*D_ -> N_, M_*D_, H_*W_ -> N_*M_, D_, H_, W_
value_l_ = (
value_list[lid_].flatten(2).transpose(1, 2).reshape(N_ * M_, D_, H_, W_)
)
# N_, Lq_, M_, P_, 2 -> N_, M_, Lq_, P_, 2 -> N_*M_, Lq_, P_, 2
sampling_grid_l_ = sampling_grids[:, :, :, lid_].transpose(1, 2).flatten(0, 1)
# N_*M_, D_, Lq_, P_
sampling_value_l_ = F.grid_sample(
value_l_,
sampling_grid_l_,
mode="bilinear",
padding_mode="zeros",
align_corners=False,
)
sampling_value_list.append(sampling_value_l_)
# (N_, Lq_, M_, L_, P_) -> (N_, M_, Lq_, L_, P_) -> (N_, M_, 1, Lq_, L_*P_)
attention_weights = attention_weights.transpose(1, 2).reshape(
N_ * M_, 1, Lq_, L_ * P_
)
output = (
(torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights)
.sum(-1)
.view(N_, M_ * D_, Lq_)
)
return output.transpose(1, 2).contiguous()
| d2go-main | projects_oss/detr/detr/functions/ms_deform_attn_func.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# ------------------------------------------------------------------------------------------------
# Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------------------------------
# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
# ------------------------------------------------------------------------------------------------
from .ms_deform_attn_func import MSDeformAttnFunction
| d2go-main | projects_oss/detr/detr/functions/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# ------------------------------------------------------------------------------------------------
# Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------------------------------
# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
# ------------------------------------------------------------------------------------------------
import math
import warnings
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn.init import constant_, xavier_uniform_
from ..functions import MSDeformAttnFunction
def _is_power_of_2(n):
if (not isinstance(n, int)) or (n < 0):
raise ValueError(
"invalid input for _is_power_of_2: {} (type: {})".format(n, type(n))
)
return (n & (n - 1) == 0) and n != 0
class MSDeformAttn(nn.Module):
def __init__(self, d_model=256, n_levels=4, n_heads=8, n_points=4):
"""
Multi-Scale Deformable Attention Module
:param d_model hidden dimension
:param n_levels number of feature levels
:param n_heads number of attention heads
:param n_points number of sampling points per attention head per feature level
"""
super().__init__()
if d_model % n_heads != 0:
raise ValueError(
"d_model must be divisible by n_heads, but got {} and {}".format(
d_model, n_heads
)
)
_d_per_head = d_model // n_heads
# you'd better set _d_per_head to a power of 2 which is more efficient in our CUDA implementation
if not _is_power_of_2(_d_per_head):
warnings.warn(
"You'd better set d_model in MSDeformAttn to make the dimension of each attention head a power of 2 "
"which is more efficient in our CUDA implementation."
)
self.im2col_step = 64
self.d_model = d_model
self.n_levels = n_levels
self.n_heads = n_heads
self.n_points = n_points
self.sampling_offsets = nn.Linear(d_model, n_heads * n_levels * n_points * 2)
self.attention_weights = nn.Linear(d_model, n_heads * n_levels * n_points)
self.value_proj = nn.Linear(d_model, d_model)
self.output_proj = nn.Linear(d_model, d_model)
self._reset_parameters()
def _reset_parameters(self):
constant_(self.sampling_offsets.weight.data, 0.0)
# shape (num_heads,)
thetas = torch.arange(self.n_heads, dtype=torch.float32) * (
2.0 * math.pi / self.n_heads
)
# shape (num_heads, 2)
grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)
# shape (num_heads, num_levels, num_points, 2)
grid_init = (
(grid_init / grid_init.abs().max(-1, keepdim=True)[0])
.view(self.n_heads, 1, 1, 2)
.repeat(1, self.n_levels, self.n_points, 1)
)
for i in range(self.n_points):
grid_init[:, :, i, :] *= i + 1
with torch.no_grad():
self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1))
constant_(self.attention_weights.weight.data, 0.0)
constant_(self.attention_weights.bias.data, 0.0)
xavier_uniform_(self.value_proj.weight.data)
constant_(self.value_proj.bias.data, 0.0)
xavier_uniform_(self.output_proj.weight.data)
constant_(self.output_proj.bias.data, 0.0)
def forward(
self,
query,
reference_points,
input_flatten,
input_spatial_shapes,
input_level_start_index,
input_padding_mask=None,
):
"""
:param query (N, Length_{query}, C)
:param reference_points (N, Length_{query}, n_levels, 2), range in [0, 1], top-left (0,0), bottom-right (1, 1), including padding area
or (N, Length_{query}, n_levels, 4), add additional (w, h) to form reference boxes
:param input_flatten (N, \sum_{l=0}^{L-1} H_l \cdot W_l, C)
:param input_spatial_shapes (n_levels, 2), [(H_0, W_0), (H_1, W_1), ..., (H_{L-1}, W_{L-1})]
:param input_level_start_index (n_levels, ), [0, H_0*W_0, H_0*W_0+H_1*W_1, H_0*W_0+H_1*W_1+H_2*W_2, ..., H_0*W_0+H_1*W_1+...+H_{L-1}*W_{L-1}]
:param input_padding_mask (N, \sum_{l=0}^{L-1} H_l \cdot W_l), True for padding elements, False for non-padding elements
:return output (N, Length_{query}, C)
"""
N, Len_q, _ = query.shape
N, Len_in, _ = input_flatten.shape
assert (input_spatial_shapes[:, 0] * input_spatial_shapes[:, 1]).sum() == Len_in
value = self.value_proj(input_flatten)
if input_padding_mask is not None:
value = value.masked_fill(input_padding_mask[..., None], float(0))
value = value.view(N, Len_in, self.n_heads, self.d_model // self.n_heads)
sampling_offsets = self.sampling_offsets(query).view(
N, Len_q, self.n_heads, self.n_levels, self.n_points, 2
)
attention_weights = self.attention_weights(query).view(
N, Len_q, self.n_heads, self.n_levels * self.n_points
)
attention_weights = F.softmax(attention_weights, -1).view(
N, Len_q, self.n_heads, self.n_levels, self.n_points
)
# N, Len_q, n_heads, n_levels, n_points, 2
if reference_points.shape[-1] == 2:
offset_normalizer = torch.stack(
[input_spatial_shapes[..., 1], input_spatial_shapes[..., 0]], -1
)
sampling_locations = (
reference_points[:, :, None, :, None, :]
+ sampling_offsets / offset_normalizer[None, None, None, :, None, :]
)
elif reference_points.shape[-1] == 4:
sampling_locations = (
reference_points[:, :, None, :, None, :2]
+ sampling_offsets
/ self.n_points
* reference_points[:, :, None, :, None, 2:]
* 0.5
)
else:
raise ValueError(
"Last dim of reference_points must be 2 or 4, but get {} instead.".format(
reference_points.shape[-1]
)
)
output = MSDeformAttnFunction.apply(
value,
input_spatial_shapes,
input_level_start_index,
sampling_locations,
attention_weights,
self.im2col_step,
)
output = self.output_proj(output)
return output
| d2go-main | projects_oss/detr/detr/modules/ms_deform_attn.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# ------------------------------------------------------------------------------------------------
# Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------------------------------
# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
# ------------------------------------------------------------------------------------------------
from .ms_deform_attn import MSDeformAttn
| d2go-main | projects_oss/detr/detr/modules/__init__.py |
# code adapt from https://www.internalfb.com/intern/diffusion/FBS/browse/master/fbcode/mobile-vision/experimental/deit/models.py
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
import json
import math
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from aml.multimodal_video.utils.einops.lib import rearrange
from detectron2.modeling import Backbone, BACKBONE_REGISTRY
from detectron2.utils.file_io import PathManager
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.layers import trunc_normal_
from timm.models.registry import register_model
from timm.models.vision_transformer import PatchEmbed, VisionTransformer
def monkey_patch_forward(self, x):
x = self.proj(x).flatten(2).transpose(1, 2)
return x
PatchEmbed.forward = monkey_patch_forward
class DistilledVisionTransformer(VisionTransformer, Backbone):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dist_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim))
num_patches = self.patch_embed.num_patches
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 2, self.embed_dim))
self.head_dist = (
nn.Linear(self.embed_dim, self.num_classes)
if self.num_classes > 0
else nn.Identity()
)
trunc_normal_(self.dist_token, std=0.02)
trunc_normal_(self.pos_embed, std=0.02)
self.head_dist.apply(self._init_weights)
self.norm = None
def _get_pos_embed(self, H, W):
embed_size = self.pos_embed.shape[-1]
# get ride of extra tokens
pos_tokens = self.pos_embed[:, 2:, :]
npatchs = pos_tokens.shape[1]
H0 = W0 = int(math.sqrt(npatchs))
if H0 == H and W0 == W:
return self.pos_embed
# reshape to 2D
pos_tokens = pos_tokens.transpose(1, 2).reshape(-1, embed_size, H0, W0)
# interp
pos_tokens = F.interpolate(
pos_tokens,
size=(H, W),
mode="bilinear",
align_corners=False,
)
# flatten and reshape back
pos_tokens = pos_tokens.reshape(-1, embed_size, H * W).transpose(1, 2)
pos_embed = torch.cat((self.pos_embed[:, :2, :], pos_tokens), dim=1)
return pos_embed
def forward_features(self, x):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
# with slight modifications to add the dist_token
patch_size = self.patch_embed.patch_size[0]
H, W = x.shape[-2:]
H, W = H // patch_size, W // patch_size
B = x.shape[0]
x = self.patch_embed(x)
cls_tokens = self.cls_token.expand(
B, -1, -1
) # stole cls_tokens impl from Phil Wang, thanks
dist_token = self.dist_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, dist_token, x), dim=1)
# pick the spatial embed and do iterp
pos_embed = self._get_pos_embed(H, W)
x = x + pos_embed
x = self.pos_drop(x)
for blk in self.blocks:
x = blk(x)
# x = self.norm(x)
spatial = rearrange(x[:, 2:], "b (h w) c -> b c h w", h=H, w=W)
return x[:, 0], x[:, 1], spatial
def forward(self, x):
x, x_dist, x0 = self.forward_features(x)
return x0
# x = self.head(x)
# x_dist = self.head_dist(x_dist)
# if self.training:
# return x, x_dist
# else:
# # during inference, return the average of both classifier predictions
# return (x + x_dist) / 2
def _cfg(input_size=224, url="", **kwargs):
return {
"url": url,
"num_classes": 1000,
"input_size": (3, input_size, input_size),
"pool_size": None,
"crop_pct": 0.9,
"interpolation": "bilinear",
"mean": IMAGENET_DEFAULT_MEAN,
"std": IMAGENET_DEFAULT_STD,
"first_conv": "patch_embed.proj",
"classifier": "head",
**kwargs,
}
def deit_scalable_distilled(model_config, pretrained=False, **kwargs):
assert not pretrained
model = DistilledVisionTransformer(
img_size=model_config["I"],
patch_size=model_config["p"],
embed_dim=model_config["h"] * model_config["e"],
depth=model_config["d"],
num_heads=model_config["h"],
mlp_ratio=model_config["r"],
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
**kwargs,
)
model.default_cfg = _cfg(input_size=model_config["I"])
print("model arch config: {}".format(model_config))
print("model train config: {}".format(model.default_cfg))
return model
def add_deit_backbone_config(cfg):
cfg.MODEL.DEIT = type(cfg)()
cfg.MODEL.DEIT.MODEL_CONFIG = None
cfg.MODEL.DEIT.WEIGHTS = None
@BACKBONE_REGISTRY.register()
def deit_d2go_model_wrapper(cfg, _):
assert cfg.MODEL.DEIT.MODEL_CONFIG is not None
with PathManager.open(cfg.MODEL.DEIT.MODEL_CONFIG) as f:
model_config = json.load(f)
model = deit_scalable_distilled(
model_config,
num_classes=0, # set num_classes=0 to avoid building cls head
drop_rate=0,
drop_path_rate=0.1,
)
# load weights
if cfg.MODEL.DEIT.WEIGHTS is not None:
with PathManager.open(cfg.MODEL.DEIT.WEIGHTS, "rb") as f:
state_dict = torch.load(f, map_location="cpu")["model"]
rm_keys = [k for k in state_dict if "head" in k]
rm_keys = rm_keys + ["norm.weight", "norm.bias"]
print(rm_keys)
for k in rm_keys:
del state_dict[k]
model.load_state_dict(state_dict)
print(f"loaded weights from {cfg.MODEL.DEIT.WEIGHTS}")
return model
| d2go-main | projects_oss/detr/detr/backbone/deit.py |
# https://www.internalfb.com/intern/diffusion/FBS/browse/master/fbcode/mobile-vision/experimental/deit/pit_models.py
# PiT
# Copyright 2021-present NAVER Corp.
# Apache License v2.0
import json
import math
from functools import partial
import torch
import torch.nn.functional as F
from aml.multimodal_video.utils.einops.lib import rearrange
from detectron2.modeling import Backbone, BACKBONE_REGISTRY
from detectron2.utils.file_io import PathManager
from timm.models.layers import trunc_normal_
from timm.models.registry import register_model
from timm.models.vision_transformer import Block as transformer_block
from torch import nn
class Transformer(nn.Module):
def __init__(
self,
base_dim,
depth,
heads,
mlp_ratio,
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_prob=None,
):
super(Transformer, self).__init__()
self.layers = nn.ModuleList([])
embed_dim = base_dim * heads
if drop_path_prob is None:
drop_path_prob = [0.0 for _ in range(depth)]
self.blocks = nn.ModuleList(
[
transformer_block(
dim=embed_dim,
num_heads=heads,
mlp_ratio=mlp_ratio,
qkv_bias=True,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=drop_path_prob[i],
norm_layer=partial(nn.LayerNorm, eps=1e-6),
)
for i in range(depth)
]
)
def forward(self, x, cls_tokens):
h, w = x.shape[2:4]
x = rearrange(x, "b c h w -> b (h w) c")
token_length = cls_tokens.shape[1]
x = torch.cat((cls_tokens, x), dim=1)
for blk in self.blocks:
x = blk(x)
cls_tokens = x[:, :token_length]
x = x[:, token_length:]
x = rearrange(x, "b (h w) c -> b c h w", h=h, w=w)
return x, cls_tokens
class conv_head_pooling(nn.Module):
def __init__(
self,
in_feature,
out_feature,
stride,
conv_type,
padding_mode="zeros",
dilation=1,
):
super(conv_head_pooling, self).__init__()
if conv_type == "depthwise":
_groups = in_feature
else:
_groups = 1
print("_groups in conv_head_pooling: ", _groups)
self.conv = nn.Conv2d(
in_feature,
out_feature,
kernel_size=3,
padding=dilation,
dilation=dilation,
stride=stride,
padding_mode=padding_mode,
groups=_groups,
)
self.fc = nn.Linear(in_feature, out_feature)
def forward(self, x, cls_token):
x = self.conv(x)
cls_token = self.fc(cls_token)
return x, cls_token
class conv_embedding(nn.Module):
def __init__(self, in_channels, out_channels, patch_size, stride, padding):
super(conv_embedding, self).__init__()
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size=patch_size,
stride=stride,
padding=padding,
bias=True,
)
def forward(self, x):
x = self.conv(x)
return x
class PoolingTransformer(Backbone):
def __init__(
self,
image_size,
patch_size,
stride,
base_dims,
depth,
heads,
mlp_ratio,
conv_type="depthwise",
num_classes=1000,
in_chans=3,
attn_drop_rate=0.0,
drop_rate=0.0,
drop_path_rate=0.0,
dilated=False,
):
super(PoolingTransformer, self).__init__()
total_block = sum(depth)
padding = 0
block_idx = 0
self.padding = padding
self.stride = stride
width = math.floor((image_size + 2 * padding - patch_size) / stride + 1)
self.conv_type = conv_type
self.base_dims = base_dims
self.heads = heads
self.num_classes = num_classes
self.patch_size = patch_size
self.pos_embed = nn.Parameter(
torch.randn(1, base_dims[0] * heads[0], width, width), requires_grad=True
)
self.patch_embed = conv_embedding(
in_chans, base_dims[0] * heads[0], patch_size, stride, padding
)
self.cls_token = nn.Parameter(
torch.randn(1, 1, base_dims[0] * heads[0]), requires_grad=True
)
self.pos_drop = nn.Dropout(p=drop_rate)
self.transformers = nn.ModuleList([])
self.pools = nn.ModuleList([])
for stage in range(len(depth)):
drop_path_prob = [
drop_path_rate * i / total_block
for i in range(block_idx, block_idx + depth[stage])
]
block_idx += depth[stage]
self.transformers.append(
Transformer(
base_dims[stage],
depth[stage],
heads[stage],
mlp_ratio,
drop_rate,
attn_drop_rate,
drop_path_prob,
)
)
if stage < len(heads) - 1:
if stage == len(heads) - 2 and dilated:
pool_dilation = 2
pool_stride = 1
else:
pool_dilation = 1
pool_stride = 2
self.pools.append(
conv_head_pooling(
base_dims[stage] * heads[stage],
base_dims[stage + 1] * heads[stage + 1],
stride=pool_stride,
dilation=pool_dilation,
conv_type=self.conv_type,
)
)
# self.norm = nn.LayerNorm(base_dims[-1] * heads[-1], eps=1e-6)
self.embed_dim = base_dims[-1] * heads[-1]
# Classifier head
if num_classes > 0:
self.head = nn.Linear(base_dims[-1] * heads[-1], num_classes)
else:
self.head = nn.Identity()
trunc_normal_(self.pos_embed, std=0.02)
trunc_normal_(self.cls_token, std=0.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {"pos_embed", "cls_token"}
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=""):
self.num_classes = num_classes
if num_classes > 0:
self.head = nn.Linear(self.embed_dim, num_classes)
else:
self.head = nn.Identity()
def _get_pos_embed(self, H, W):
H0, W0 = self.pos_embed.shape[-2:]
if H0 == H and W0 == W:
return self.pos_embed
# interp
pos_embed = F.interpolate(
self.pos_embed,
size=(H, W),
mode="bilinear",
align_corners=False,
)
return pos_embed
def forward_features(self, x):
H, W = x.shape[-2:]
x = self.patch_embed(x)
# featuremap size after patch embeding
H = math.floor((H + 2 * self.padding - self.patch_size) / self.stride + 1)
W = math.floor((W + 2 * self.padding - self.patch_size) / self.stride + 1)
pos_embed = self._get_pos_embed(H, W)
x = self.pos_drop(x + pos_embed)
cls_tokens = self.cls_token.expand(x.shape[0], -1, -1)
for stage in range(len(self.pools)):
x, cls_tokens = self.transformers[stage](x, cls_tokens)
x, cls_tokens = self.pools[stage](x, cls_tokens)
x, cls_tokens = self.transformers[-1](x, cls_tokens)
# cls_tokens = self.norm(cls_tokens) # no gradient for layer norm, which cause failure
return cls_tokens, x
def forward(self, x):
cls_token, _ = self.forward_features(x)
cls_token = self.head(cls_token[:, 0])
return cls_token
class DistilledPoolingTransformer(PoolingTransformer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.cls_token = nn.Parameter(
torch.randn(1, 2, self.base_dims[0] * self.heads[0]), requires_grad=True
)
if self.num_classes > 0:
self.head_dist = nn.Linear(
self.base_dims[-1] * self.heads[-1], self.num_classes
)
else:
self.head_dist = nn.Identity()
trunc_normal_(self.cls_token, std=0.02)
self.head_dist.apply(self._init_weights)
def forward(self, x):
cls_token, x = self.forward_features(x)
return x
# x_cls = self.head(cls_token[:, 0])
# x_dist = self.head_dist(cls_token[:, 1])
# if self.training:
# return x_cls, x_dist
# else:
# return (x_cls + x_dist) / 2
def pit_scalable_distilled(model_config, pretrained=False, print_info=True, **kwargs):
if "conv_type" in model_config:
conv_type = model_config["conv_type"]
else:
conv_type = "depthwise"
model = DistilledPoolingTransformer(
image_size=model_config["I"],
patch_size=model_config["p"],
stride=model_config["s"],
base_dims=model_config["e"],
depth=model_config["d"],
heads=model_config["h"],
mlp_ratio=model_config["r"],
conv_type=conv_type,
**kwargs,
)
if print_info:
print("model arch config: {}".format(model_config))
assert pretrained == False, "pretrained must be False"
return model
def add_pit_backbone_config(cfg):
cfg.MODEL.PIT = type(cfg)()
cfg.MODEL.PIT.MODEL_CONFIG = None
cfg.MODEL.PIT.WEIGHTS = None
cfg.MODEL.PIT.DILATED = True
@BACKBONE_REGISTRY.register()
def pit_d2go_model_wrapper(cfg, _):
assert cfg.MODEL.PIT.MODEL_CONFIG is not None
dilated = cfg.MODEL.PIT.DILATED
with PathManager.open(cfg.MODEL.PIT.MODEL_CONFIG) as f:
model_config = json.load(f)
model = pit_scalable_distilled(
model_config,
num_classes=0, # set num_classes=0 to avoid building cls head
drop_rate=0,
drop_path_rate=0.1,
dilated=dilated,
)
# load weights
if cfg.MODEL.PIT.WEIGHTS is not None:
with PathManager.open(cfg.MODEL.PIT.WEIGHTS, "rb") as f:
state_dict = torch.load(f, map_location="cpu")["model"]
rm_keys = [k for k in state_dict if "head" in k]
rm_keys = rm_keys + ["norm.weight", "norm.bias"]
print(rm_keys)
for k in rm_keys:
del state_dict[k]
model.load_state_dict(state_dict)
print(f"loaded weights from {cfg.MODEL.PIT.WEIGHTS}")
return model
| d2go-main | projects_oss/detr/detr/backbone/pit.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import os
from typing import Any, Dict, List, Type, Union
import mobile_cv.torch.utils_pytorch.comm as comm
import pytorch_lightning as pl # type: ignore
from d2go.config import CfgNode
from d2go.runner.callbacks.quantization import QuantizationAwareTraining
from d2go.runner.lightning_task import DefaultTask
from d2go.setup import basic_argument_parser, prepare_for_launch, setup_after_launch
from d2go.trainer.api import TestNetOutput, TrainNetOutput
from d2go.trainer.helper import parse_precision_from_string
from d2go.trainer.lightning.training_loop import _do_test, _do_train
from detectron2.utils.file_io import PathManager
from pytorch_lightning.callbacks import Callback, LearningRateMonitor, TQDMProgressBar
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.strategies.ddp import DDPStrategy
from torch.distributed import get_rank
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("detectron2go.lightning.train_net")
FINAL_MODEL_CKPT = f"model_final{ModelCheckpoint.FILE_EXTENSION}"
def _get_trainer_callbacks(cfg: CfgNode) -> List[Callback]:
"""Gets the trainer callbacks based on the given D2Go Config.
Args:
cfg: The normalized ConfigNode for this D2Go Task.
Returns:
A list of configured Callbacks to be used by the Lightning Trainer.
"""
callbacks: List[Callback] = [
TQDMProgressBar(refresh_rate=10), # Arbitrary refresh_rate.
LearningRateMonitor(logging_interval="step"),
ModelCheckpoint(
dirpath=cfg.OUTPUT_DIR,
save_last=True,
),
]
if cfg.QUANTIZATION.QAT.ENABLED:
callbacks.append(QuantizationAwareTraining.from_config(cfg))
return callbacks
def _get_strategy(cfg: CfgNode) -> DDPStrategy:
return DDPStrategy(find_unused_parameters=cfg.MODEL.DDP_FIND_UNUSED_PARAMETERS)
def _get_accelerator(use_cpu: bool) -> str:
return "cpu" if use_cpu else "gpu"
def get_trainer_params(cfg: CfgNode) -> Dict[str, Any]:
use_cpu = cfg.MODEL.DEVICE.lower() == "cpu"
strategy = _get_strategy(cfg)
accelerator = _get_accelerator(use_cpu)
params = {
"max_epochs": -1,
"max_steps": cfg.SOLVER.MAX_ITER,
"val_check_interval": cfg.TEST.EVAL_PERIOD
if cfg.TEST.EVAL_PERIOD > 0
else cfg.SOLVER.MAX_ITER,
"num_nodes": comm.get_num_nodes(),
"devices": comm.get_local_size(),
"strategy": strategy,
"accelerator": accelerator,
"callbacks": _get_trainer_callbacks(cfg),
"logger": TensorBoardLogger(save_dir=cfg.OUTPUT_DIR),
"num_sanity_val_steps": 0,
"replace_sampler_ddp": False,
"precision": parse_precision_from_string(
cfg.SOLVER.AMP.PRECISION, lightning=True
)
if cfg.SOLVER.AMP.ENABLED
else 32,
}
if cfg.SOLVER.CLIP_GRADIENTS.ENABLED:
if (
cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE.lower() == "norm"
and cfg.SOLVER.CLIP_GRADIENTS.NORM_TYPE != 2.0
):
raise ValueError(
"D2Go Lightning backend supports only L2-norm for norm-based gradient clipping!"
)
params["gradient_clip_val"] = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE
params["gradient_clip_algorithm"] = cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE
# Allow specifying additional trainer parameters under `LIGHTNING_TRAINER` field.
# Please note that:
# - the `LIGHTNING_TRAINER`` is not part of "base" config, users need to add this to their default config via `_DEFAULTS_` or `get_default_cfg`.
# - this is a temporal solution due to future refactor of config system.
if hasattr(cfg, "LIGHTNING_TRAINER"):
params.update(
{
"reload_dataloaders_every_n_epochs": cfg.LIGHTNING_TRAINER.RELOAD_DATALOADERS_EVERY_N_EPOCHS,
"sync_batchnorm": cfg.LIGHTNING_TRAINER.SYNC_BATCHNORM,
"benchmark": cfg.LIGHTNING_TRAINER.BENCHMARK,
}
)
return params
def main(
cfg: CfgNode,
output_dir: str,
runner_class: Union[str, Type[DefaultTask]],
eval_only: bool = False,
) -> Union[TrainNetOutput, TestNetOutput]:
"""Main function for launching a training with lightning trainer
Args:
cfg: D2go config node
num_machines: Number of nodes used for distributed training
num_processes: Number of processes on each node.
eval_only: True if run evaluation only.
"""
task_cls: Type[DefaultTask] = setup_after_launch(cfg, output_dir, runner_class)
task = task_cls.from_config(cfg, eval_only)
trainer_params = get_trainer_params(cfg)
last_checkpoint = os.path.join(cfg.OUTPUT_DIR, "last.ckpt")
if PathManager.exists(last_checkpoint):
# resume training from checkpoint
trainer_params["resume_from_checkpoint"] = last_checkpoint
logger.info(f"Resuming training from checkpoint: {last_checkpoint}.")
trainer = pl.Trainer(**trainer_params)
if eval_only:
_do_test(trainer, task)
return TestNetOutput(
tensorboard_log_dir=trainer_params["logger"].log_dir,
accuracy=task.eval_res,
metrics=task.eval_res,
)
else:
model_configs = _do_train(cfg, trainer, task)
return TrainNetOutput(
tensorboard_log_dir=trainer_params["logger"].log_dir,
accuracy=task.eval_res,
metrics=task.eval_res,
model_configs=model_configs,
)
def argument_parser():
parser = basic_argument_parser(distributed=True, requires_output_dir=False)
# Change default runner argument
parser.set_defaults(runner="d2go.runner.lightning_task.GeneralizedRCNNTask")
parser.add_argument(
"--eval-only", action="store_true", help="perform evaluation only"
)
return parser
if __name__ == "__main__":
args = argument_parser().parse_args()
cfg, output_dir, runner_name = prepare_for_launch(args)
ret = main(
cfg,
output_dir,
runner_name,
eval_only=args.eval_only,
)
if get_rank() == 0:
print(ret)
| d2go-main | tools/lightning_train_net.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Binary to convert pytorch detectron2go model to a predictor, which contains model(s) in
deployable format (such as torchscript, caffe2, ...)
"""
import copy
import logging
import sys
from dataclasses import dataclass
from typing import Any, Dict, List, Type, Union
import mobile_cv.lut.lib.pt.flops_utils as flops_utils
from d2go.config import CfgNode, temp_defrost
from d2go.distributed import set_shared_context
from d2go.export.exporter import convert_and_export_predictor
from d2go.runner import BaseRunner
from d2go.setup import (
basic_argument_parser,
post_mortem_if_fail_for_main,
prepare_for_launch,
setup_after_launch,
setup_before_launch,
setup_root_logger,
)
logger = logging.getLogger("d2go.tools.export")
@dataclass
class ExporterOutput:
predictor_paths: Dict[str, str]
accuracy_comparison: Dict[str, Any]
def main(
cfg: CfgNode,
output_dir: str,
runner_class: Union[str, Type[BaseRunner]],
# binary specific optional arguments
predictor_types: List[str],
device: str = "cpu",
compare_accuracy: bool = False,
skip_if_fail: bool = False,
) -> ExporterOutput:
if compare_accuracy:
raise NotImplementedError(
"compare_accuracy functionality isn't currently supported."
)
# NOTE: dict for metrics of all exported models (and original pytorch model)
# ret["accuracy_comparison"] = accuracy_comparison
cfg = copy.deepcopy(cfg)
runner = setup_after_launch(cfg, output_dir, runner_class)
with temp_defrost(cfg):
cfg.merge_from_list(["MODEL.DEVICE", device])
model = runner.build_model(cfg, eval_only=True)
# NOTE: train dataset is used to avoid leakage since the data might be used for
# running calibration for quantization. test_loader is used to make sure it follows
# the inference behaviour (augmentation will not be applied).
datasets = list(cfg.DATASETS.TRAIN)
data_loader = runner.build_detection_test_loader(cfg, datasets)
logger.info("Running the pytorch model and print FLOPS ...")
first_batch = next(iter(data_loader))
input_args = (first_batch,)
flops_utils.print_model_flops(model, input_args)
predictor_paths: Dict[str, str] = {}
for typ in predictor_types:
# convert_and_export_predictor might alter the model, copy before calling it
pytorch_model = copy.deepcopy(model)
try:
predictor_path = convert_and_export_predictor(
cfg,
pytorch_model,
typ,
output_dir,
data_loader,
)
logger.info(f"Predictor type {typ} has been exported to {predictor_path}")
predictor_paths[typ] = predictor_path
except Exception as e:
logger.exception(f"Export {typ} predictor failed: {e}")
if not skip_if_fail:
raise e
return ExporterOutput(
predictor_paths=predictor_paths,
accuracy_comparison={},
)
def run_with_cmdline_args(args):
cfg, output_dir, runner_name = prepare_for_launch(args)
shared_context = setup_before_launch(cfg, output_dir, runner_name)
if shared_context is not None:
set_shared_context(shared_context)
main_func = main if args.disable_post_mortem else post_mortem_if_fail_for_main(main)
return main_func(
cfg,
output_dir,
runner_name,
# binary specific optional arguments
predictor_types=args.predictor_types,
device=args.device,
compare_accuracy=args.compare_accuracy,
skip_if_fail=args.skip_if_fail,
)
def get_parser():
parser = basic_argument_parser(distributed=False)
parser.add_argument(
"--predictor-types",
type=str,
nargs="+",
help="List of strings specify the types of predictors to export",
)
parser.add_argument(
"--device", default="cpu", help="the device to export the model on"
)
parser.add_argument(
"--compare-accuracy",
action="store_true",
help="If true, all exported models and the original pytorch model will be"
" evaluated on cfg.DATASETS.TEST",
)
parser.add_argument(
"--skip-if-fail",
action="store_true",
default=False,
help="If set, suppress the exception for failed exporting and continue to"
" export the next type of model",
)
return parser
def cli(args=None):
args = sys.argv[1:] if args is None else args
run_with_cmdline_args(get_parser().parse_args(args))
if __name__ == "__main__":
setup_root_logger()
cli()
| d2go-main | tools/exporter.py |
d2go-main | tools/__init__.py |
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Tool for benchmarking data loading
"""
import logging
import time
from dataclasses import dataclass
from typing import Type, Union
import detectron2.utils.comm as comm
import numpy as np
from d2go.config import CfgNode
from d2go.distributed import get_num_processes_per_machine, launch
from d2go.evaluation.api import AccuracyDict, MetricsDict
from d2go.runner import BaseRunner
from d2go.setup import (
basic_argument_parser,
post_mortem_if_fail_for_main,
prepare_for_launch,
setup_after_launch,
)
from d2go.utils.misc import print_metrics_table
from detectron2.fb.env import get_launch_environment
from detectron2.utils.logger import log_every_n_seconds
from fvcore.common.history_buffer import HistoryBuffer
logger = logging.getLogger("d2go.tools.benchmark_data")
@dataclass
class BenchmarkDataOutput:
accuracy: AccuracyDict[float]
metrics: MetricsDict[float]
def main(
cfg: CfgNode,
output_dir: str,
runner_class: Union[str, Type[BaseRunner]],
is_train: bool = True,
) -> BenchmarkDataOutput:
runner = setup_after_launch(cfg, output_dir, runner_class)
if is_train:
data_loader = runner.build_detection_train_loader(cfg)
else:
assert len(cfg.DATASETS.TEST) > 0, cfg.DATASETS.TEST
data_loader = runner.build_detection_test_loader(cfg, cfg.DATASETS.TEST[0])
TOTAL_BENCHMARK_TIME = (
100 if get_launch_environment() == "local" else 600
) # run benchmark for 10 min
LOGGING_METER_WINDOW_SIZE = 20
LOGGING_METER_TIME_INTERVAL = 5
WARMUP_ITERS = 5
# initialize
time_per_iter = HistoryBuffer(max_length=10000)
total_time = 0
start = time.time()
for no, batch in enumerate(data_loader):
data_time = time.time() - start
time_per_iter.update(data_time)
total_time += data_time
if no == 0:
logger.info("Show the first batch as example:\n{}".format(batch))
# Assume batch size is constant
batch_size = cfg.SOLVER.IMS_PER_BATCH // comm.get_world_size()
assert len(batch) * batch_size
median = time_per_iter.median(window_size=LOGGING_METER_WINDOW_SIZE)
avg = time_per_iter.avg(window_size=LOGGING_METER_WINDOW_SIZE)
log_every_n_seconds(
logging.INFO,
"iter: {};"
" recent per-iter seconds: {:.4f} (avg) {:.4f} (median);"
" recent per-image seconds: {:.4f} (avg) {:.4f} (median).".format(
no,
avg,
median,
avg / batch_size,
median / batch_size,
),
n=LOGGING_METER_TIME_INTERVAL,
)
# Synchronize between processes, exit when all processes are running for enough
# time. This mimic the loss.backward(), the logged time doesn't include the time
# for synchronize.
finished = comm.all_gather(total_time >= TOTAL_BENCHMARK_TIME)
if all(x for x in finished):
logger.info("Benchmarking finished after {} seconds".format(total_time))
break
start = time.time()
dataset_name = ":".join(cfg.DATASETS.TRAIN) if is_train else cfg.DATASETS.TEST[0]
time_per_iter = [x[0] for x in time_per_iter.values()]
time_per_iter = time_per_iter[
min(WARMUP_ITERS, max(len(time_per_iter) - WARMUP_ITERS, 0)) :
]
results = {
"environment": {
"num_workers": cfg.DATALOADER.NUM_WORKERS,
"world_size": comm.get_world_size(),
"processes_per_machine": get_num_processes_per_machine(),
},
"main_processes_stats": {
"batch_size_per_process": batch_size,
"per_iter_avg": np.average(time_per_iter),
"per_iter_p1": np.percentile(time_per_iter, 1, interpolation="nearest"),
"per_iter_p10": np.percentile(time_per_iter, 10, interpolation="nearest"),
"per_iter_p50": np.percentile(time_per_iter, 50, interpolation="nearest"),
"per_iter_p90": np.percentile(time_per_iter, 90, interpolation="nearest"),
"per_iter_p99": np.percentile(time_per_iter, 99, interpolation="nearest"),
"per_image_avg": np.average(time_per_iter) / batch_size,
"per_image_p1": np.percentile(time_per_iter, 1, interpolation="nearest")
/ batch_size,
"per_image_p10": np.percentile(time_per_iter, 10, interpolation="nearest")
/ batch_size,
"per_image_p50": np.percentile(time_per_iter, 50, interpolation="nearest")
/ batch_size,
"per_image_p90": np.percentile(time_per_iter, 90, interpolation="nearest")
/ batch_size,
"per_image_p99": np.percentile(time_per_iter, 99, interpolation="nearest")
/ batch_size,
},
"data_processes_stats": {}, # TODO: add worker stats
}
# Metrics follows the hierarchy of: name -> dataset -> task -> metrics -> number
metrics = {"_name_": {dataset_name: results}}
print_metrics_table(metrics)
return BenchmarkDataOutput(
accuracy=metrics,
metrics=metrics,
)
def run_with_cmdline_args(args):
cfg, output_dir, runner_name = prepare_for_launch(args)
main_func = main if args.disable_post_mortem else post_mortem_if_fail_for_main(main)
launch(
main_func,
num_processes_per_machine=args.num_processes,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
backend=args.dist_backend,
args=(cfg, output_dir, runner_name),
kwargs={
"is_train": args.is_train,
},
)
if __name__ == "__main__":
parser = basic_argument_parser(requires_output_dir=True)
parser.add_argument(
"--is-train",
type=bool,
default=True,
help="data loader is train",
)
run_with_cmdline_args(parser.parse_args())
| d2go-main | tools/benchmark_data.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Detection Training Script.
"""
import logging
import sys
from typing import Callable, Dict, List, Type, Union
import detectron2.utils.comm as comm
from d2go.config import CfgNode
from d2go.distributed import distributed_worker, launch
from d2go.runner import BaseRunner
from d2go.setup import (
basic_argument_parser,
build_basic_cli_args,
post_mortem_if_fail_for_main,
prepare_for_launch,
setup_after_launch,
setup_before_launch,
setup_root_logger,
)
from d2go.trainer.api import TestNetOutput, TrainNetOutput
from d2go.trainer.fsdp import is_fsdp_enabled
from d2go.utils.mast import gather_mast_errors, mast_error_handler
from d2go.utils.misc import (
dump_trained_model_configs,
print_metrics_table,
save_binary_outputs,
)
from detectron2.engine.defaults import create_ddp_model
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
logger = logging.getLogger("d2go.tools.train_net")
# Make sure logging is set up centrally even for e.g. dataloading workers which
# have entry points outside of D2Go.
setup_root_logger()
TrainOrTestNetOutput = Union[TrainNetOutput, TestNetOutput]
def main(
cfg: CfgNode,
output_dir: str,
runner_class: Union[str, Type[BaseRunner]],
eval_only: bool = False,
resume: bool = True, # NOTE: always enable resume when running on cluster
) -> TrainOrTestNetOutput:
logger.debug(f"Entered main for d2go, {runner_class=}")
runner = setup_after_launch(cfg, output_dir, runner_class)
model = runner.build_model(cfg)
logger.info("Model:\n{}".format(model))
if eval_only:
checkpointer = runner.build_checkpointer(cfg, model, save_dir=output_dir)
# checkpointer.resume_or_load() will skip all additional checkpointable
# which may not be desired like ema states
if resume and checkpointer.has_checkpoint():
checkpoint = checkpointer.resume_or_load(cfg.MODEL.WEIGHTS, resume=resume)
else:
checkpoint = checkpointer.load(cfg.MODEL.WEIGHTS)
train_iter = checkpoint.get("iteration", None)
model.eval()
metrics = runner.do_test(cfg, model, train_iter=train_iter)
print_metrics_table(metrics)
return TestNetOutput(
accuracy=metrics,
metrics=metrics,
)
# Use DDP if FSDP is not enabled
# TODO (T142223289): rewrite ddp wrapping as modeling hook
if not isinstance(model, FSDP):
model = create_ddp_model(
model,
fp16_compression=cfg.MODEL.DDP_FP16_GRAD_COMPRESS,
device_ids=None if cfg.MODEL.DEVICE == "cpu" else [comm.get_local_rank()],
broadcast_buffers=False,
find_unused_parameters=cfg.MODEL.DDP_FIND_UNUSED_PARAMETERS,
gradient_as_bucket_view=cfg.MODEL.DDP_GRADIENT_AS_BUCKET_VIEW,
)
logger.info("Starting train..")
trained_cfgs = runner.do_train(cfg, model, resume=resume)
final_eval = cfg.TEST.FINAL_EVAL
if final_eval:
# run evaluation after training in the same processes
metrics = runner.do_test(cfg, model)
print_metrics_table(metrics)
else:
metrics = {}
# dump config files for trained models
trained_model_configs = dump_trained_model_configs(cfg.OUTPUT_DIR, trained_cfgs)
return TrainNetOutput(
# for e2e_workflow
accuracy=metrics,
# for unit_workflow
model_configs=trained_model_configs,
metrics=metrics,
)
def wrapped_main(*args, **kwargs) -> Callable[..., TrainOrTestNetOutput]:
return mast_error_handler(main)(*args, **kwargs)
def run_with_cmdline_args(args):
cfg, output_dir, runner_name = prepare_for_launch(args)
shared_context = setup_before_launch(cfg, output_dir, runner_name)
main_func = (
wrapped_main
if args.disable_post_mortem
else post_mortem_if_fail_for_main(wrapped_main)
)
if args.run_as_worker:
logger.info("Running as worker")
result: TrainOrTestNetOutput = distributed_worker(
main_func,
args=(cfg, output_dir, runner_name),
kwargs={
"eval_only": args.eval_only,
"resume": args.resume,
},
backend=args.dist_backend,
init_method=None, # init_method is env by default
dist_params=None,
return_save_file=None,
shared_context=shared_context,
)
else:
outputs: Dict[int, TrainOrTestNetOutput] = launch(
main_func,
num_processes_per_machine=args.num_processes,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
backend=args.dist_backend,
shared_context=shared_context,
args=(cfg, output_dir, runner_name),
kwargs={
"eval_only": args.eval_only,
"resume": args.resume,
},
)
# The indices of outputs are global ranks of all workers on this node, here we
# use the local master result.
result: TrainOrTestNetOutput = outputs[args.machine_rank * args.num_processes]
# Only save result from global rank 0 for consistency.
if args.save_return_file is not None and args.machine_rank == 0:
logger.info(f"Operator result: {result}")
logger.info(f"Writing result to {args.save_return_file}.")
save_binary_outputs(args.save_return_file, result)
def cli(args=None):
logger.info(f"Inside CLI, {args=}")
parser = basic_argument_parser(requires_output_dir=False)
parser.add_argument(
"--eval-only", action="store_true", help="perform evaluation only"
)
parser.add_argument(
"--resume",
action="store_true",
help="whether to attempt to resume from the checkpoint directory",
)
args = sys.argv[1:] if args is None else args
run_with_cmdline_args(parser.parse_args(args))
def build_cli_args(
eval_only: bool = False,
resume: bool = False,
**kwargs,
) -> List[str]:
"""Returns parameters in the form of CLI arguments for train_net binary.
For the list of non-train_net-specific parameters, see build_basic_cli_args."""
args = build_basic_cli_args(**kwargs)
if eval_only:
args += ["--eval-only"]
if resume:
args += ["--resume"]
return args
if __name__ == "__main__":
gather_mast_errors(cli())
| d2go-main | tools/train_net.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Binary to evaluate predictor-based model (consist of models in deployable format such
torchscript, caffe2, etc.) using Detectron2Go system (dataloading, evaluation, etc).
"""
import logging
import sys
from typing import Callable, List, Optional, Type, Union
import torch
from d2go.config import CfgNode
from d2go.distributed import launch
from d2go.quantization.qconfig import smart_decode_backend
from d2go.runner import BaseRunner
from d2go.setup import (
basic_argument_parser,
build_basic_cli_args,
caffe2_global_init,
post_mortem_if_fail_for_main,
prepare_for_launch,
setup_after_launch,
setup_before_launch,
setup_root_logger,
)
from d2go.trainer.api import EvaluatorOutput
from d2go.utils.mast import gather_mast_errors, mast_error_handler
from d2go.utils.misc import print_metrics_table, save_binary_outputs
from mobile_cv.predictor.api import create_predictor
logger = logging.getLogger("d2go.tools.caffe2_evaluator")
def main(
cfg: CfgNode,
output_dir: str,
runner_class: Union[str, Type[BaseRunner]],
# binary specific optional arguments
predictor_path: str,
num_threads: Optional[int] = None,
caffe2_engine: Optional[int] = None,
caffe2_logging_print_net_summary: int = 0,
) -> EvaluatorOutput:
# FIXME: Ideally the quantization backend should be encoded in the torchscript model
# or the predictor, and be used automatically during the inference, without user
# manually setting the global variable.
torch.backends.quantized.engine = smart_decode_backend(cfg.QUANTIZATION.BACKEND)
print("run with quantized engine: ", torch.backends.quantized.engine)
runner = setup_after_launch(cfg, output_dir, runner_class)
caffe2_global_init(caffe2_logging_print_net_summary, num_threads)
predictor = create_predictor(predictor_path)
metrics = runner.do_test(cfg, predictor)
print_metrics_table(metrics)
return EvaluatorOutput(
accuracy=metrics,
metrics=metrics,
)
def wrapped_main(*args, **kwargs) -> Callable:
return mast_error_handler(main)(*args, **kwargs)
def run_with_cmdline_args(args):
cfg, output_dir, runner_name = prepare_for_launch(args)
shared_context = setup_before_launch(cfg, output_dir, runner_name)
main_func = (
wrapped_main
if args.disable_post_mortem
else post_mortem_if_fail_for_main(wrapped_main)
)
outputs = launch(
main_func,
args.num_processes,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
backend="GLOO",
always_spawn=False,
shared_context=shared_context,
args=(cfg, output_dir, runner_name),
kwargs={
"predictor_path": args.predictor_path,
"num_threads": args.num_threads,
"caffe2_engine": args.caffe2_engine,
"caffe2_logging_print_net_summary": args.caffe2_logging_print_net_summary,
},
)
# Only save results from global rank 0 for consistency.
if args.save_return_file is not None and args.machine_rank == 0:
save_binary_outputs(args.save_return_file, outputs[0])
def build_cli_args(
eval_only: bool = False,
resume: bool = False,
**kwargs,
) -> List[str]:
"""Returns parameters in the form of CLI arguments for evaluator binary.
For the list of non-train_net-specific parameters, see build_basic_cli_args."""
args = build_basic_cli_args(**kwargs)
if eval_only:
args += ["--eval-only"]
if resume:
args += ["--resume"]
return args
def cli(args=None):
parser = basic_argument_parser()
parser.add_argument(
"--predictor-path",
type=str,
help="Path (a directory) to the exported model that will be evaluated",
)
# === performance config ===========================================================
parser.add_argument(
"--num-threads",
type=int,
default=None,
help="Number of omp/mkl threads (per process) to use in Caffe2's GlobalInit",
)
parser.add_argument(
"--caffe2-engine",
type=str,
default=None,
help="If set, engine of all ops will be set by this value",
)
parser.add_argument(
"--caffe2_logging_print_net_summary",
type=int,
default=0,
help="Control the --caffe2_logging_print_net_summary in GlobalInit",
)
args = sys.argv[1:] if args is None else args
run_with_cmdline_args(parser.parse_args(args))
if __name__ == "__main__":
setup_root_logger()
gather_mast_errors(cli())
| d2go-main | tools/evaluator.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from d2go.setup import setup_loggers
setup_loggers(output_dir=None)
| d2go-main | tests/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
from mobile_cv.common.misc.test_utils import SubPackageInitFileTestMixin
class TestSubPackageInitFile(SubPackageInitFileTestMixin, unittest.TestCase):
def get_pacakge_name(self) -> str:
return "d2go"
| d2go-main | tests/misc/test_sub_package_init_file.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
from pathlib import Path
from d2go.utils.validation_monitor import fetch_checkpoints_till_final
from detectron2.utils.file_io import PathManager
from mobile_cv.common.misc.file_utils import make_temp_directory
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
def create_file(filename):
with PathManager.open(filename, "w") as _:
pass
class TestValidationMonitor(unittest.TestCase):
def test_fetch_checkpoints_local(self):
with make_temp_directory("test") as output_dir:
output_dir = Path(output_dir)
for i in range(5):
create_file(output_dir / f"model_{i}.pth")
create_file(output_dir / "model_final.pth")
checkpoints = list(fetch_checkpoints_till_final(output_dir))
assert len(checkpoints) == 6
def test_fetch_lightning_checkpoints_local(self):
with make_temp_directory("test") as output_dir:
output_dir = Path(output_dir)
ext = ModelCheckpoint.FILE_EXTENSION
for i in range(5):
create_file(output_dir / f"step={i}{ext}")
create_file(output_dir / f"model_final{ext}")
create_file(output_dir / f"{ModelCheckpoint.CHECKPOINT_NAME_LAST}{ext}")
checkpoints = list(fetch_checkpoints_till_final(output_dir))
self.assertEqual(len(checkpoints), 6)
| d2go-main | tests/misc/test_validation_monitor.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import unittest
import numpy as np
from d2go.config import CfgNode
from d2go.config.utils import flatten_config_dict
from d2go.runner.lightning_task import GeneralizedRCNNTask
from d2go.tools.lightning_train_net import FINAL_MODEL_CKPT, main
from d2go.utils.testing import meta_arch_helper as mah
from d2go.utils.testing.helper import enable_ddp_env, tempdir
class TestLightningTrainNet(unittest.TestCase):
def setUp(self):
# set distributed backend to none to avoid spawning child process,
# which doesn't inherit the temporary dataset
patcher = unittest.mock.patch(
"d2go.tools.lightning_train_net._get_accelerator", return_value=None
)
self.addCleanup(patcher.stop)
patcher.start()
def _get_cfg(self, tmp_dir) -> CfgNode:
return mah.create_detection_cfg(GeneralizedRCNNTask, tmp_dir)
@tempdir
@enable_ddp_env()
def test_train_net_main(self, root_dir):
"""tests the main training entry point."""
cfg = self._get_cfg(root_dir)
# set distributed backend to none to avoid spawning child process,
# which doesn't inherit the temporary dataset
main(cfg, root_dir, GeneralizedRCNNTask)
@tempdir
@enable_ddp_env()
def test_checkpointing(self, tmp_dir):
"""tests saving and loading from checkpoint."""
cfg = self._get_cfg(tmp_dir)
out = main(cfg, tmp_dir, GeneralizedRCNNTask)
ckpts = [f for f in os.listdir(tmp_dir) if f.endswith(".ckpt")]
expected_ckpts = ("last.ckpt", FINAL_MODEL_CKPT)
for ckpt in expected_ckpts:
self.assertIn(ckpt, ckpts)
cfg2 = cfg.clone()
cfg2.defrost()
# load the last checkpoint from previous training
cfg2.MODEL.WEIGHTS = os.path.join(tmp_dir, "last.ckpt")
output_dir = os.path.join(tmp_dir, "output")
out2 = main(cfg2, output_dir, GeneralizedRCNNTask, eval_only=True)
accuracy = flatten_config_dict(out.accuracy)
accuracy2 = flatten_config_dict(out2.accuracy)
for k in accuracy:
np.testing.assert_equal(accuracy[k], accuracy2[k])
| d2go-main | tests/misc/test_lightning_train_net.py |
d2go-main | tests/misc/__init__.py |
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import glob
import tempfile
import unittest
import d2go.utils.abnormal_checker as ac
import torch
class Model(torch.nn.Module):
def forward(self, x):
return {"loss": x}
class TestUtilsAbnormalChecker(unittest.TestCase):
def test_utils_abnormal_checker(self):
counter = 0
def _writer(all_data):
nonlocal counter
counter += 1
checker = ac.AbnormalLossChecker(-1, writers=[_writer])
losses = [5, 4, 3, 10, 9, 2, 5, 4]
for loss in losses:
checker.check_step({"loss": loss})
self.assertEqual(counter, 2)
def test_utils_abnormal_checker_wrapper(self):
model = Model()
with tempfile.TemporaryDirectory() as tmp_dir:
checker = ac.AbnormalLossChecker(-1, writers=[ac.FileWriter(tmp_dir)])
cmodel = ac.AbnormalLossCheckerWrapper(model, checker)
losses = [5, 4, 3, 10, 9, 2, 5, 4]
for loss in losses:
cur = cmodel(loss)
cur_gt = model(loss)
self.assertEqual(cur, cur_gt)
log_files = glob.glob(f"{tmp_dir}/*.pth")
self.assertEqual(len(log_files), 2)
GT_INVALID_INDICES = [3, 6]
logged_indices = []
for cur_log_file in log_files:
cur_log = torch.load(cur_log_file, map_location="cpu")
self.assertIsInstance(cur_log, dict)
self.assertIn("data", cur_log)
logged_indices.append(cur_log["step"])
self.assertSetEqual(set(logged_indices), set(GT_INVALID_INDICES))
| d2go-main | tests/misc/test_utils_abnormal_checker.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import glob
import logging
import os
import unittest
from d2go.config import (
auto_scale_world_size,
CfgNode,
load_full_config_from_file,
reroute_config_path,
temp_new_allowed,
)
from d2go.config.utils import (
config_dict_to_list_str,
flatten_config_dict,
get_cfg_diff_table,
get_diff_cfg,
get_from_flattened_config_dict,
)
from d2go.registry.builtin import CONFIG_UPDATER_REGISTRY
from d2go.runner import GeneralizedRCNNRunner
from d2go.utils.testing.helper import get_resource_path
from mobile_cv.common.misc.file_utils import make_temp_directory
logger = logging.getLogger(__name__)
class TestConfig(unittest.TestCase):
def test_load_configs(self):
"""Make sure configs are loadable"""
for location in ["detectron2", "detectron2go"]:
root_dir = os.path.abspath(reroute_config_path(f"{location}://."))
files = glob.glob(os.path.join(root_dir, "**/*.yaml"), recursive=True)
files = [f for f in files if "fbnas" not in f]
self.assertGreater(len(files), 0)
for fn in sorted(files):
logger.info("Loading {}...".format(fn))
GeneralizedRCNNRunner.get_default_cfg().merge_from_file(fn)
def test_load_arch_defs(self):
"""Test arch def str-to-dict conversion compatible with merging"""
default_cfg = GeneralizedRCNNRunner.get_default_cfg()
cfg = default_cfg.clone()
cfg.merge_from_file(get_resource_path("arch_def_merging.yaml"))
with make_temp_directory("detectron2go_tmp") as tmp_dir:
# Dump out config with arch def
file_name = os.path.join(tmp_dir, "test_archdef_config.yaml")
with open(file_name, "w") as f:
f.write(cfg.dump())
# Attempt to reload the config
another_cfg = default_cfg.clone()
another_cfg.merge_from_file(file_name)
def test_base_reroute(self):
default_cfg = GeneralizedRCNNRunner.get_default_cfg()
# use rerouted file as base
cfg = default_cfg.clone()
cfg.merge_from_file(get_resource_path("rerouted_base.yaml"))
self.assertEqual(cfg.MODEL.MASK_ON, True) # base is loaded
self.assertEqual(cfg.MODEL.FBNET_V2.ARCH, "test") # non-base is loaded
# use multiple files as base
cfg = default_cfg.clone()
cfg.merge_from_file(get_resource_path("rerouted_multi_base.yaml"))
self.assertEqual(cfg.MODEL.MASK_ON, True) # base is loaded
self.assertEqual(cfg.MODEL.FBNET_V2.ARCH, "FBNetV3_A") # second base is loaded
self.assertEqual(cfg.OUTPUT_DIR, "test") # non-base is loaded
def test_temp_new_allowed(self):
default_cfg = GeneralizedRCNNRunner.get_default_cfg()
def set_field(cfg):
cfg.THIS_BETTER_BE_A_NEW_CONFIG = 4
self.assertFalse("THIS_BETTER_BE_A_NEW_CONFIG" in default_cfg)
with temp_new_allowed(default_cfg):
set_field(default_cfg)
self.assertTrue("THIS_BETTER_BE_A_NEW_CONFIG" in default_cfg)
self.assertTrue(default_cfg.THIS_BETTER_BE_A_NEW_CONFIG == 4)
def test_default_cfg_dump_and_load(self):
default_cfg = GeneralizedRCNNRunner.get_default_cfg()
cfg = default_cfg.clone()
with make_temp_directory("detectron2go_tmp") as tmp_dir:
file_name = os.path.join(tmp_dir, "config.yaml")
# this is same as the one in fblearner_launch_utils_detectron2go.py
with open(file_name, "w") as f:
f.write(cfg.dump(default_flow_style=False))
# check if the dumped config file can be merged
cfg.merge_from_file(file_name)
def test_default_cfg_deprecated_keys(self):
default_cfg = GeneralizedRCNNRunner.get_default_cfg()
# a warning will be printed for deprecated keys
default_cfg.merge_from_list(["QUANTIZATION.QAT.LOAD_PRETRAINED", True])
# exception will raise for renamed keys
self.assertRaises(
KeyError,
default_cfg.merge_from_list,
["QUANTIZATION.QAT.BACKEND", "fbgemm"],
)
def test_merge_from_list_with_new_allowed(self):
"""
YACS's merge_from_list doesn't take new_allowed into account, D2Go override its behavior, and this test covers it.
"""
# new_allowed is not set
cfg = CfgNode()
cfg.A = CfgNode()
cfg.A.X = 1
self.assertRaises(Exception, cfg.merge_from_list, ["A.Y", "2"])
# new_allowed is set for sub key
cfg = CfgNode()
cfg.A = CfgNode(new_allowed=True)
cfg.A.X = 1
cfg.merge_from_list(["A.Y", "2"])
self.assertEqual(cfg.A.Y, 2) # note that the string will be converted to number
# however new_allowed is not set for root key
self.assertRaises(Exception, cfg.merge_from_list, ["B", "3"])
class TestConfigUtils(unittest.TestCase):
"""Test util functions in config/utils.py"""
def test_flatten_config_dict(self):
"""Check flatten config dict to single layer dict"""
d = {"c0": {"c1": {"c2": 3}}, "b0": {"b1": "b2"}, "a0": "a1"}
# reorder=True
fdict = flatten_config_dict(d, reorder=True)
gt = {"a0": "a1", "b0.b1": "b2", "c0.c1.c2": 3}
self.assertEqual(fdict, gt)
self.assertEqual(list(fdict.keys()), list(gt.keys()))
# reorder=False
fdict = flatten_config_dict(d, reorder=False)
gt = {"c0.c1.c2": 3, "b0.b1": "b2", "a0": "a1"}
self.assertEqual(fdict, gt)
self.assertEqual(list(fdict.keys()), list(gt.keys()))
def test_config_dict_to_list_str(self):
"""Check convert config dict to str list"""
d = {"a0": "a1", "b0": {"b1": "b2"}, "c0": {"c1": {"c2": 3}}}
str_list = config_dict_to_list_str(d)
gt = ["a0", "a1", "b0.b1", "b2", "c0.c1.c2", "3"]
self.assertEqual(str_list, gt)
def test_get_from_flattened_config_dict(self):
d = {"MODEL": {"MIN_DIM_SIZE": 360}}
self.assertEqual(
get_from_flattened_config_dict(d, "MODEL.MIN_DIM_SIZE"), 360
) # exist
self.assertEqual(
get_from_flattened_config_dict(d, "MODEL.MODEL.INPUT_SIZE"), None
) # non-exist
def test_get_diff_cfg(self):
"""check config that is diff from default config, no new keys"""
# create base config
cfg1 = CfgNode()
cfg1.A = CfgNode()
cfg1.A.Y = 2
# case 1: new allowed not set, new config has only old keys
cfg2 = cfg1.clone()
cfg2.set_new_allowed(False)
cfg2.A.Y = 3
gt = CfgNode()
gt.A = CfgNode()
gt.A.Y = 3
self.assertEqual(gt, get_diff_cfg(cfg1, cfg2))
def test_diff_cfg_no_new_allowed(self):
"""check that if new_allowed is False, new keys cause key error"""
# create base config
cfg1 = CfgNode()
cfg1.A = CfgNode()
cfg1.A.set_new_allowed(False)
cfg1.A.Y = 2
# case 2: new allowed not set, new config has new keys
cfg2 = cfg1.clone()
cfg2.A.X = 2
self.assertRaises(KeyError, get_diff_cfg, cfg1, cfg2)
def test_diff_cfg_with_new_allowed(self):
"""diff config with new keys and new_allowed set to True"""
# create base config
cfg1 = CfgNode()
cfg1.A = CfgNode()
cfg1.A.set_new_allowed(True)
cfg1.A.Y = 2
# case 3: new allowed set, new config has new keys
cfg2 = cfg1.clone()
cfg2.A.X = 2
gt = CfgNode()
gt.A = CfgNode()
gt.A.X = 2
self.assertEqual(gt, get_diff_cfg(cfg1, cfg2))
def test_get_cfg_diff_table(self):
"""Check compare two dicts"""
d1 = {"a0": "a1", "b0": {"b1": "b2"}, "c0": {"c1": {"c2": 3}}}
d2 = {"a0": "a1", "b0": {"b1": "b3"}, "c0": {"c1": {"c2": 4}}}
table = get_cfg_diff_table(d1, d2)
self.assertTrue("a0" not in table) # a0 are the same
self.assertTrue("b0.b1" in table) # b0.b1 are different
self.assertTrue("c0.c1.c2" in table) # c0.c1.c2 are different
def test_get_cfg_diff_table_mismatched_keys(self):
"""Check compare two dicts, the keys are mismatched"""
d_orig = {"a0": "a1", "b0": {"b1": "b2"}, "c0": {"c1": {"c2": 3}}}
d_new = {"a0": "a1", "b0": {"b1": "b3"}, "c0": {"c4": {"c2": 4}}}
table = get_cfg_diff_table(d_new, d_orig)
self.assertTrue("a0" not in table) # a0 are the same
self.assertTrue("b0.b1" in table) # b0.b1 are different
self.assertTrue("c0.c1.c2" in table) # c0.c1.c2 key mismatched
self.assertTrue("c0.c4.c2" in table) # c0.c4.c2 key mismatched
self.assertTrue("Key not exists" in table) # has mismatched key
class TestAutoScaleWorldSize(unittest.TestCase):
def test_8gpu_to_1gpu(self):
"""
when scaling a 8-gpu config to 1-gpu one, the batch size will be reduced by 8x
"""
cfg = GeneralizedRCNNRunner.get_default_cfg()
self.assertEqual(cfg.SOLVER.REFERENCE_WORLD_SIZE, 8)
batch_size_x8 = cfg.SOLVER.IMS_PER_BATCH
assert batch_size_x8 % 8 == 0, "default batch size is not multiple of 8"
auto_scale_world_size(cfg, new_world_size=1)
self.assertEqual(cfg.SOLVER.REFERENCE_WORLD_SIZE, 1)
self.assertEqual(cfg.SOLVER.IMS_PER_BATCH * 8, batch_size_x8)
def test_not_scale_for_zero_world_size(self):
"""
when reference world size is 0, no scaling should happen
"""
cfg = GeneralizedRCNNRunner.get_default_cfg()
self.assertEqual(cfg.SOLVER.REFERENCE_WORLD_SIZE, 8)
cfg.SOLVER.REFERENCE_WORLD_SIZE = 0
batch_size_x8 = cfg.SOLVER.IMS_PER_BATCH
auto_scale_world_size(cfg, new_world_size=1)
self.assertEqual(cfg.SOLVER.REFERENCE_WORLD_SIZE, 0)
self.assertEqual(cfg.SOLVER.IMS_PER_BATCH, batch_size_x8)
class TestConfigDefaultsGen(unittest.TestCase):
def test_case1(self):
# register in local scope
@CONFIG_UPDATER_REGISTRY.register()
def _test1(cfg):
cfg.TEST1 = CfgNode()
cfg.TEST1.X = 1
return cfg
@CONFIG_UPDATER_REGISTRY.register()
def _test2(cfg):
cfg.TEST2 = CfgNode()
cfg.TEST2.Y = 2
return cfg
filename = get_resource_path("defaults_gen_case1.yaml")
cfg = load_full_config_from_file(filename)
default_cfg = cfg.get_default_cfg()
# default value is 1
self.assertEqual(default_cfg.TEST1.X, 1)
self.assertEqual(default_cfg.TEST2.Y, 2)
# yaml file overwrites it to 3
self.assertEqual(cfg.TEST1.X, 3)
if __name__ == "__main__":
unittest.main()
| d2go-main | tests/misc/test_config.py |
import os
import tempfile
from d2go.utils.flop_calculator import dump_flops_info
from d2go.utils.testing.data_loader_helper import (
create_detection_data_loader_on_toy_dataset,
)
from d2go.utils.testing.rcnn_helper import RCNNBaseTestCases
class TestFlopCount(RCNNBaseTestCases.TemplateTestCase):
def setup_custom_test(self):
super().setup_custom_test()
self.cfg.merge_from_file("detectron2go://mask_rcnn_fbnetv3a_dsmask_C4.yaml")
def test_flop_count(self):
size_divisibility = max(self.test_model.backbone.size_divisibility, 10)
h, w = size_divisibility, size_divisibility * 2
with create_detection_data_loader_on_toy_dataset(
self.cfg, h, w, is_train=False
) as data_loader:
inputs = (next(iter(data_loader)),)
with tempfile.TemporaryDirectory(prefix="d2go_test") as output_dir:
dump_flops_info(self.test_model, inputs, output_dir)
for fname in [
"flops_str_mobilecv",
"flops_str_fvcore",
"flops_table_fvcore",
]:
outf = os.path.join(output_dir, fname + ".txt")
self.assertTrue(os.path.isfile(outf))
| d2go-main | tests/misc/test_flop_count.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import importlib
import logging
import pkgutil
import unittest
# NOTE: don't import anything related to D2/D2Go so that the test of one registry is
# isolated from others
logger = logging.getLogger(__name__)
# copied from https://stackoverflow.com/questions/3365740/how-to-import-all-submodules
def import_submodules(package, recursive=True):
"""Import all submodules of a module, recursively, including subpackages
:param package: package (name or actual module)
:type package: str | module
:rtype: dict[str, types.ModuleType]
"""
if isinstance(package, str):
package = importlib.import_module(package)
results = {}
for _loader, name, is_pkg in pkgutil.walk_packages(package.__path__):
full_name = package.__name__ + "." + name
results[full_name] = importlib.import_module(full_name)
if recursive and is_pkg:
results.update(import_submodules(full_name))
return results
class BaseRegistryPopulationTests(object):
"""
Test D2Go's core registries are populated once top-level module is imported.
"""
def get_registered_items(self):
"""return a list of registered items"""
raise NotImplementedError()
def import_all_modules(self):
"""import all related modules"""
raise NotImplementedError()
def test_is_polulated(self):
registered_before_import_all = self.get_registered_items()
self.import_all_modules()
registered_after_import_all = self.get_registered_items()
self.assertEqual(registered_before_import_all, registered_after_import_all)
class TestMetaArchRegistryPopulation(unittest.TestCase, BaseRegistryPopulationTests):
def get_registered_items(self):
from d2go.registry.builtin import META_ARCH_REGISTRY
return [k for k, v in META_ARCH_REGISTRY]
def import_all_modules(self):
import d2go.modeling
import_submodules(d2go.modeling)
class TestDataMapperRegistryPopulation(unittest.TestCase, BaseRegistryPopulationTests):
def get_registered_items(self):
from d2go.data.dataset_mappers import D2GO_DATA_MAPPER_REGISTRY
return [k for k, v in D2GO_DATA_MAPPER_REGISTRY]
def import_all_modules(self):
import d2go.data.dataset_mappers
import_submodules(d2go.data.dataset_mappers)
| d2go-main | tests/misc/test_registration.py |
#!/usr/bin/env python3
# pyre-unsafe
import os
import unittest
from unittest import mock
import torch
from d2go.runner.callbacks.quantization import (
get_default_qat_qconfig,
ModelTransform,
PostTrainingQuantization,
QuantizationAwareTraining,
rgetattr,
rhasattr,
rsetattr,
)
from d2go.utils.misc import mode
from d2go.utils.testing.helper import tempdir
from d2go.utils.testing.lightning_test_module import TestModule
from pytorch_lightning import seed_everything, Trainer
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
from torch.ao.quantization.qconfig import default_dynamic_qconfig, get_default_qconfig
from torch.ao.quantization.quantize_fx import convert_fx, prepare_fx, prepare_qat_fx
class TestUtilities(unittest.TestCase):
"""Test some basic utilities we rely on."""
def test_get_set_has(self):
"""Trivial test for generic behavior. Only support pre-existing deeply nested values."""
class TestObject(object):
def __init__(self):
self.object = None
self.set_to_five = 5
obj = TestObject()
obj.object = TestObject()
obj.object.set_to_five = 10
rsetattr(obj, "object.set_to_five", 1)
self.assertTrue(rhasattr(obj, "object.set_to_five"))
self.assertEqual(1, rgetattr(obj, "object.set_to_five"))
self.assertEqual(5, rgetattr(obj, "set_to_five"))
with self.assertRaises(AttributeError):
rsetattr(obj, "object.does_not_exist.five", 5)
class TestModelTransform(unittest.TestCase):
"""Tests ModelTransforms."""
def test_invalid_construction_type_error(self):
"""Validate construction of ModelTransforms. Always have fn, msg, and one of [step, interval]."""
with self.assertRaises(TypeError):
_ = ModelTransform()
with self.assertRaises(TypeError):
_ = ModelTransform(fn=lambda x: x)
with self.assertRaises(TypeError):
_ = ModelTransform(message="No function defined")
with self.assertRaises(TypeError):
_ = ModelTransform(
fn=lambda x: x,
message="Specified both step and interval",
step=1,
interval=1,
)
def test_positivity_value_error(self):
"""Validates ModelTransforms are constructed with only valid arguments."""
def identity(x):
return x
with self.assertRaises(ValueError):
_ = ModelTransform(fn=identity, message="Negative step", step=-1)
with self.assertRaises(ValueError):
_ = ModelTransform(fn=identity, message="Zero interval", interval=0)
with self.assertRaises(ValueError):
_ = ModelTransform(fn=identity, message="Negative interval", interval=-1)
@unittest.skip(
"FX Graph Mode Quantization API has been updated, re-enable the test after PyTorch 1.13 stable release"
)
class TestQuantizationAwareTraining(unittest.TestCase):
def test_qat_misconfiguration(self):
"""Tests failure when misconfiguring the QAT Callback."""
invalid_params = [
{"start_step": -1},
{"enable_observer": (42, 42)},
{"enable_observer": (42, 21)},
{"enable_observer": (-1, None)},
{"freeze_bn_step": -1},
]
for invalid_param in invalid_params:
with self.assertRaises(ValueError):
_ = QuantizationAwareTraining(**invalid_param)
def test_qat_transforms(self):
"""Tests the appropropriate ModelTransforms are defined with QAT."""
qat = QuantizationAwareTraining(
start_step=300, enable_observer=(350, 500), freeze_bn_step=550
)
trainer = Trainer()
module = TestModule()
qat.setup(trainer, module, stage="train")
self.assertGreater(len(qat.transforms), 0)
def assertContainsTransformsAtStep(step):
"""
Asserts at least one transform exists at the specified step and
that it is removed after the step begins.
"""
self.assertGreater(
len(
[
transform
for transform in qat.transforms
if transform.step == step
]
),
0,
f"step={step}",
)
trainer.fit_loop.global_step = step
qat.on_train_batch_start(trainer, module, batch=None, batch_idx=0)
self.assertEqual(
len(
[
transform
for transform in qat.transforms
if transform.step == step
]
),
0,
f"step={step}",
)
assertContainsTransformsAtStep(step=300)
assertContainsTransformsAtStep(step=350)
assertContainsTransformsAtStep(step=500)
assertContainsTransformsAtStep(step=550)
@tempdir
def test_qat_interval_transform(self, root_dir):
"""Tests an interval transform is applied multiple times."""
seed_everything(100)
def linear_fn_counter(mod):
if isinstance(mod, torch.nn.Linear):
linear_fn_counter.count += 1
linear_fn_counter.count = 0
model = TestModule()
num_epochs = 2
qat = QuantizationAwareTraining()
qat.transforms.append(
ModelTransform(fn=linear_fn_counter, message="Counter", interval=10)
)
trainer = Trainer(
default_root_dir=os.path.join(root_dir, "quantized"),
enable_checkpointing=False,
callbacks=[qat],
max_epochs=num_epochs,
logger=False,
)
trainer.fit(model)
# Model has 2 linear layers.
self.assertEqual(linear_fn_counter.count, 2 * (trainer.global_step // 10 + 1))
@tempdir
def test_module_quantized_during_train(self, root_dir):
"""Validate quantized aware training works as expected."""
seed_everything(100)
model = TestModule()
test_in = torch.randn(1, 32)
before_train = model.eval()(test_in)
num_epochs = 2
qat = QuantizationAwareTraining()
trainer = Trainer(
accelerator="cpu",
devices=1,
default_root_dir=os.path.join(root_dir, "quantized"),
enable_checkpointing=False,
callbacks=[qat],
max_epochs=num_epochs,
logger=False,
)
trainer.fit(model)
self.assertIsNotNone(qat.prepared)
self.assertIsNotNone(qat.quantized)
test_out = model.eval()(test_in)
self.assertGreater(
(test_out**2).sum(), 0.03, "With the given seend, L2^2 should be > 0.03."
)
base_out = qat.quantized.eval()(test_in)
self.assertTrue(torch.allclose(base_out, test_out))
# Weight changed during training.
self.assertFalse(torch.allclose(before_train, test_out))
# Validate .test() call works as expected and does not change model weights.
trainer.test(model)
self.assertTrue(torch.allclose(test_out, model.eval()(test_in)))
@tempdir
def test_quantization_without_train(self, root_dir):
"""Validate quantization occurs even without a call to .fit() first."""
seed_everything(100)
model = TestModule()
num_epochs = 2
qat = QuantizationAwareTraining()
trainer = Trainer(
default_root_dir=os.path.join(root_dir, "quantized"),
enable_checkpointing=False,
callbacks=[qat],
max_epochs=num_epochs,
logger=False,
)
trainer.test(model)
self.assertIsNotNone(qat.prepared)
self.assertIsNotNone(qat.quantized)
@tempdir
def test_attribute_preservation_qat(self, root_dir):
"""Validates we can preserve specified properties in module."""
seed_everything(100)
model = TestModule()
model.layer._added_property = 10
model._not_preserved = 15
model._added_property = 20
num_epochs = 2
qat = QuantizationAwareTraining(
preserved_attrs=["_added_property", "layer._added_property"]
)
trainer = Trainer(
default_root_dir=os.path.join(root_dir, "quantized"),
enable_checkpointing=False,
callbacks=[qat],
max_epochs=num_epochs,
logger=False,
)
trainer.fit(model)
self.assertIsNotNone(qat.prepared)
self.assertIsNotNone(qat.quantized)
# Assert properties are maintained.
self.assertTrue(hasattr(qat.prepared, "_added_property"))
self.assertTrue(hasattr(qat.prepared.layer, "_added_property"))
with self.assertRaises(AttributeError):
qat.prepared._not_preserved
@tempdir
def test_quantization_and_checkpointing(self, root_dir):
"""Validate written checkpoints can be loaded back as expected."""
seed_everything(100)
model = TestModule()
num_epochs = 2
qat = QuantizationAwareTraining()
checkpoint_dir = os.path.join(root_dir, "checkpoints")
checkpoint = ModelCheckpoint(dirpath=checkpoint_dir, save_last=True)
trainer = Trainer(
default_root_dir=os.path.join(root_dir, "quantized"),
callbacks=[qat, checkpoint],
max_epochs=num_epochs,
logger=False,
)
# Mimick failing mid-training by not running on_fit_end.
with mock.patch.object(qat, "on_fit_end"):
trainer.fit(model)
ckpt = torch.load(os.path.join(checkpoint_dir, "last.ckpt"))
model.load_state_dict(ckpt["state_dict"])
@tempdir
def test_custom_qat(self, root_dir):
"""Tests that we can customize QAT by skipping certain layers."""
class _CustomQAT(QuantizationAwareTraining):
"""Only quantize TestModule.another_layer."""
def prepare(self, model, configs, attrs):
example_inputs = (torch.rand(1, 2),)
model.another_layer = prepare_qat_fx(
model.another_layer, configs[""], example_inputs
)
return model
def convert(self, model, submodules, attrs):
model.another_layer = convert_fx(model.another_layer)
return model
seed_everything(100)
model = TestModule()
test_in = torch.randn(1, 32)
before_train = model.eval()(test_in)
num_epochs = 2
qat = _CustomQAT()
trainer = Trainer(
default_root_dir=os.path.join(root_dir, "quantized"),
enable_checkpointing=False,
callbacks=[qat],
max_epochs=num_epochs,
logger=False,
)
trainer.fit(model)
self.assertIsNotNone(qat.prepared)
self.assertIsNotNone(qat.quantized)
test_out = model.eval()(test_in)
self.assertGreater(
(test_out**2).sum(), 0.03, "With the given seend, L2^2 should be > 0.03."
)
base_out = qat.quantized.eval()(test_in)
self.assertTrue(torch.allclose(base_out, test_out))
# Weight changed during training.
self.assertFalse(torch.allclose(before_train, test_out))
# Validate .test() call works as expected and does not change model weights.
trainer.test(model)
self.assertTrue(torch.allclose(test_out, model.eval()(test_in)))
@tempdir
def test_submodule_qat(self, root_dir):
"""Tests that we can customize QAT through exposed API."""
seed_everything(100)
model = TestModule()
test_in = torch.randn(1, 32)
before_train = model.eval()(test_in)
num_epochs = 2
qat = QuantizationAwareTraining(
qconfig_dicts={"another_layer": {"": get_default_qat_qconfig()}}
)
trainer = Trainer(
default_root_dir=os.path.join(root_dir, "quantized"),
enable_checkpointing=False,
callbacks=[qat],
max_epochs=num_epochs,
logger=False,
)
trainer.fit(model)
self.assertIsNotNone(qat.prepared)
self.assertIsNotNone(qat.quantized)
test_out = model.eval()(test_in)
self.assertGreater(
(test_out**2).sum(), 0.03, "With the given seend, L2^2 should be > 0.03."
)
base_out = qat.quantized.eval()(test_in)
self.assertTrue(torch.allclose(base_out, test_out))
# Weight changed during training.
self.assertFalse(torch.allclose(before_train, test_out))
# Validate .test() call works as expected and does not change model weights.
trainer.test(model)
self.assertTrue(torch.allclose(test_out, model.eval()(test_in)))
@unittest.skip(
"FX Graph Mode Quantization API has been updated, re-enable the test after PyTorch 1.13 stable release"
)
class TestPostTrainingQuantization(unittest.TestCase):
@tempdir
def test_post_training_static_quantization(self, root_dir):
"""Validate post-training static quantization."""
seed_everything(100)
model = TestModule()
num_epochs = 4
static_quantization = PostTrainingQuantization(
qconfig_dicts={"": {"": get_default_qconfig()}}
)
trainer = Trainer(
default_root_dir=os.path.join(root_dir, "quantized"),
enable_checkpointing=False,
callbacks=[static_quantization],
max_epochs=num_epochs,
logger=False,
)
# This will both train the model + quantize it.
trainer.fit(model)
self.assertIsNotNone(static_quantization.quantized)
# Default qconfig requires calibration.
self.assertTrue(static_quantization.should_calibrate)
test_in = torch.randn(12, 32)
with mode(model, training=False) as m:
base_out = m(test_in)
with mode(static_quantization.quantized, training=False) as q:
test_out = q(test_in)
# While quantized/original won't be exact, they should be close.
self.assertLess(
((((test_out - base_out) ** 2).sum(axis=1)) ** (1 / 2)).mean(),
0.015,
"RMSE should be less than 0.015 between quantized and original.",
)
@tempdir
def test_post_training_dynamic_quantization(self, root_dir):
"""Validates post-training dynamic quantization."""
seed_everything(100)
model = TestModule()
num_epochs = 2
dynamic_quant = PostTrainingQuantization(
qconfig_dicts={"": {"": default_dynamic_qconfig}}
)
trainer = Trainer(
default_root_dir=os.path.join(root_dir, "quantized"),
enable_checkpointing=False,
callbacks=[dynamic_quant],
max_epochs=num_epochs,
logger=False,
)
# This will both train the model + quantize it.
trainer.fit(model)
self.assertIsNotNone(dynamic_quant.quantized)
# Default qconfig requires calibration.
self.assertFalse(dynamic_quant.should_calibrate)
test_in = torch.randn(12, 32)
with mode(model, training=False) as m:
base_out = m(test_in)
with mode(dynamic_quant.quantized, training=False) as q:
test_out = q(test_in)
# While quantized/original won't be exact, they should be close.
self.assertLess(
((((test_out - base_out) ** 2).sum(axis=1)) ** (1 / 2)).mean(),
0.015,
"RMSE should be less than 0.015 between quantized and original.",
)
@tempdir
def test_custom_post_training_static_quant(self, root_dir):
"""Tests that we can customize Post-Training static by skipping certain layers."""
class _CustomStaticQuant(PostTrainingQuantization):
"""Only quantize TestModule.another_layer."""
def prepare(self, model, configs, attrs):
example_inputs = (torch.randn(1, 2),)
model.another_layer = prepare_fx(
model.another_layer, configs[""], example_inputs
)
return model
def convert(self, model, submodules, attrs):
model.another_layer = convert_fx(model.another_layer)
return model
seed_everything(100)
model = TestModule()
num_epochs = 2
static_quantization = _CustomStaticQuant()
trainer = Trainer(
default_root_dir=os.path.join(root_dir, "quantized"),
enable_checkpointing=False,
callbacks=[static_quantization],
max_epochs=num_epochs,
logger=False,
num_sanity_val_steps=0,
)
trainer.fit(model)
self.assertIsNotNone(static_quantization.quantized)
test_in = torch.randn(12, 32)
with mode(model, training=False) as m:
base_out = m(test_in)
with mode(static_quantization.quantized, training=False) as q:
test_out = q(test_in)
# While quantized/original won't be exact, they should be close.
self.assertLess(
((((test_out - base_out) ** 2).sum(axis=1)) ** (1 / 2)).mean(),
0.02,
"RMSE should be less than 0.007 between quantized and original.",
)
| d2go-main | tests/runner/test_runner_lightning_quantization.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import unittest
from copy import deepcopy
from tempfile import TemporaryDirectory
from typing import Dict
import pytorch_lightning as pl # type: ignore
import torch
from d2go.config import CfgNode, temp_defrost
from d2go.quantization.modeling import set_backend_and_create_qconfig
from d2go.registry.builtin import META_ARCH_REGISTRY
from d2go.runner import create_runner
from d2go.runner.callbacks.quantization import QuantizationAwareTraining
from d2go.runner.lightning_task import GeneralizedRCNNTask
from d2go.utils.testing import meta_arch_helper as mah
from d2go.utils.testing.helper import tempdir
from detectron2.utils.events import EventStorage
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
from torch import Tensor
from torch.ao.quantization.quantize_fx import convert_fx, prepare_qat_fx
class TestLightningTask(unittest.TestCase):
def _get_cfg(self, tmp_dir: str) -> CfgNode:
cfg = mah.create_detection_cfg(GeneralizedRCNNTask, tmp_dir)
cfg.TEST.EVAL_PERIOD = cfg.SOLVER.MAX_ITER
return cfg
def _get_trainer(self, output_dir: str) -> pl.Trainer:
checkpoint_callback = ModelCheckpoint(dirpath=output_dir, save_last=True)
return pl.Trainer(
max_steps=1,
limit_train_batches=1,
num_sanity_val_steps=0,
callbacks=[checkpoint_callback],
logger=False,
)
def _compare_state_dict(
self, state1: Dict[str, Tensor], state2: Dict[str, Tensor]
) -> bool:
if state1.keys() != state2.keys():
return False
for k in state1:
if not torch.allclose(state1[k], state2[k]):
return False
return True
@tempdir
def test_load_from_checkpoint(self, tmp_dir) -> None:
task = GeneralizedRCNNTask(self._get_cfg(tmp_dir))
trainer = self._get_trainer(tmp_dir)
with EventStorage() as storage:
task.storage = storage
trainer.fit(task)
ckpt_path = os.path.join(tmp_dir, "test.ckpt")
trainer.save_checkpoint(ckpt_path)
self.assertTrue(os.path.exists(ckpt_path))
# load model weights from checkpoint
task2 = GeneralizedRCNNTask.load_from_checkpoint(ckpt_path)
self.assertTrue(
self._compare_state_dict(
task.model.state_dict(), task2.model.state_dict()
)
)
@tempdir
def test_train_ema(self, tmp_dir):
cfg = self._get_cfg(tmp_dir)
cfg.MODEL_EMA.ENABLED = True
cfg.MODEL_EMA.DECAY = 0.7
task = GeneralizedRCNNTask(cfg)
init_state = deepcopy(task.model.state_dict())
trainer = self._get_trainer(tmp_dir)
with EventStorage() as storage:
task.storage = storage
trainer.fit(task)
for k, v in task.model.state_dict().items():
init_state[k].copy_(init_state[k] * 0.7 + 0.3 * v)
self.assertTrue(
self._compare_state_dict(init_state, task.ema_state.state_dict())
)
@tempdir
def test_load_ema_weights(self, tmp_dir) -> None:
cfg = self._get_cfg(tmp_dir)
cfg.MODEL_EMA.ENABLED = True
task = GeneralizedRCNNTask(cfg)
trainer = self._get_trainer(tmp_dir)
with EventStorage() as storage:
task.storage = storage
trainer.fit(task)
# load EMA weights from checkpoint
task2 = GeneralizedRCNNTask.load_from_checkpoint(
os.path.join(tmp_dir, "last.ckpt")
)
self.assertTrue(
self._compare_state_dict(
task.ema_state.state_dict(), task2.ema_state.state_dict()
)
)
# apply EMA weights to model
task2.ema_state.apply_to(task2.model)
self.assertTrue(
self._compare_state_dict(
task.ema_state.state_dict(), task2.model.state_dict()
)
)
@tempdir
def test_ema_eval_only_mode(self, tmp_dir: TemporaryDirectory) -> None:
"""Train one model for one iteration, then check if the
second task is loaded correctly from config and applied to model.x"""
cfg = self._get_cfg(tmp_dir)
cfg.MODEL.MODELING_HOOKS = ["EMA"]
cfg.MODEL_EMA.ENABLED = True
task = GeneralizedRCNNTask(cfg)
trainer = self._get_trainer(tmp_dir)
with EventStorage() as storage:
task.storage = storage
trainer.fit(task)
# load EMA weights from checkpoint
cfg2 = self._get_cfg(tmp_dir)
cfg2.MODEL.MODELING_HOOKS = ["EMA"]
cfg2.MODEL_EMA.ENABLED = True
cfg2.MODEL_EMA.USE_EMA_WEIGHTS_FOR_EVAL_ONLY = True
cfg2.MODEL.WEIGHTS = os.path.join(tmp_dir, "last.ckpt")
task2 = GeneralizedRCNNTask.from_config(cfg2)
self.assertTrue(task2.ema_state, "EMA state is not loaded from checkpoint.")
self.assertTrue(
len(task2.ema_state.state_dict()) > 0, "EMA state should not be empty."
)
self.assertTrue(
self._compare_state_dict(
task.ema_state.state_dict(), task2.model.state_dict()
),
"Task loaded from config should apply the ema_state to the model.",
)
def test_create_runner(self):
task_cls = create_runner(
f"{GeneralizedRCNNTask.__module__}.{GeneralizedRCNNTask.__name__}"
)
self.assertTrue(task_cls == GeneralizedRCNNTask)
@tempdir
def test_build_model(self, tmp_dir):
cfg = self._get_cfg(tmp_dir)
cfg.MODEL_EMA.ENABLED = True
task = GeneralizedRCNNTask(cfg)
trainer = self._get_trainer(tmp_dir)
with EventStorage() as storage:
task.storage = storage
trainer.fit(task)
# test building untrained model
model = GeneralizedRCNNTask.build_model(cfg)
self.assertTrue(model.training)
# test loading regular weights
with temp_defrost(cfg):
cfg.MODEL.WEIGHTS = os.path.join(tmp_dir, "last.ckpt")
model = GeneralizedRCNNTask.build_model(cfg, eval_only=True)
self.assertFalse(model.training)
self.assertTrue(
self._compare_state_dict(model.state_dict(), task.model.state_dict())
)
# test loading EMA weights
with temp_defrost(cfg):
cfg.MODEL.WEIGHTS = os.path.join(tmp_dir, "last.ckpt")
cfg.MODEL_EMA.USE_EMA_WEIGHTS_FOR_EVAL_ONLY = True
model = GeneralizedRCNNTask.build_model(cfg, eval_only=True)
self.assertFalse(model.training)
self.assertTrue(
self._compare_state_dict(
model.state_dict(), task.ema_state.state_dict()
)
)
@tempdir
def test_qat(self, tmp_dir):
@META_ARCH_REGISTRY.register()
class QuantizableDetMetaArchForTest(mah.DetMetaArchForTest):
custom_config_dict = {"preserved_attributes": ["preserved_attr"]}
def __init__(self, cfg):
super().__init__(cfg)
self.avgpool.preserved_attr = "foo"
self.avgpool.not_preserved_attr = "bar"
def custom_prepare_fx(self, cfg, is_qat, example_input=None):
example_inputs = (torch.rand(1, 3, 3, 3),)
self.avgpool = prepare_qat_fx(
self.avgpool,
{"": set_backend_and_create_qconfig(cfg, is_train=self.training)},
example_inputs,
self.custom_config_dict,
)
def convert_fx_callback(model):
model.avgpool = convert_fx(
model.avgpool, convert_custom_config=model.custom_config_dict
)
return model
return self, convert_fx_callback
cfg = self._get_cfg(tmp_dir)
cfg.MODEL.META_ARCHITECTURE = "QuantizableDetMetaArchForTest"
cfg.QUANTIZATION.QAT.ENABLED = True
cfg.QUANTIZATION.EAGER_MODE = False
task = GeneralizedRCNNTask(cfg)
callbacks = [
QuantizationAwareTraining.from_config(cfg),
ModelCheckpoint(dirpath=task.cfg.OUTPUT_DIR, save_last=True),
]
trainer = pl.Trainer(
max_steps=1,
limit_train_batches=1,
num_sanity_val_steps=0,
callbacks=callbacks,
logger=False,
)
with EventStorage() as storage:
task.storage = storage
trainer.fit(task)
prepared_avgpool = task._prepared.model.avgpool
self.assertEqual(prepared_avgpool.preserved_attr, "foo")
self.assertFalse(hasattr(prepared_avgpool, "not_preserved_attr"))
with temp_defrost(cfg):
cfg.MODEL.WEIGHTS = os.path.join(tmp_dir, "last.ckpt")
model = GeneralizedRCNNTask.build_model(cfg, eval_only=True)
self.assertTrue(isinstance(model.avgpool, torch.fx.GraphModule))
@tempdir
def test_meta_arch_training_step(self, tmp_dir):
@META_ARCH_REGISTRY.register()
class DetMetaArchForWithTrainingStep(mah.DetMetaArchForTest):
def training_step(self, batch, batch_idx, opt, manual_backward):
assert batch
assert opt
assert manual_backward
# We step the optimizer for progress tracking to occur
# This is reflected in the Trainer's global_step property
# which is used to determine when to stop training
# when specifying the loop bounds with Trainer(max_steps=N)
opt.step()
return {"total_loss": 0.4}
cfg = self._get_cfg(tmp_dir)
cfg.MODEL.META_ARCHITECTURE = "DetMetaArchForWithTrainingStep"
task = GeneralizedRCNNTask(cfg)
trainer = self._get_trainer(tmp_dir)
with EventStorage() as storage:
task.storage = storage
trainer.fit(task)
| d2go-main | tests/runner/test_runner_lightning_task.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import os
import tempfile
import unittest
import d2go.runner.default_runner as default_runner
import torch
from d2go.registry.builtin import META_ARCH_REGISTRY
from d2go.runner import create_runner
from d2go.runner.training_hooks import TRAINER_HOOKS_REGISTRY
from d2go.utils.testing import helper
from d2go.utils.testing.data_loader_helper import create_local_dataset
from detectron2.evaluation import COCOEvaluator, RotatedCOCOEvaluator
from detectron2.structures import Boxes, ImageList, Instances
from mobile_cv.arch.quantization.qconfig import (
updateable_symmetric_moving_avg_minmax_config,
)
from torch.nn.parallel import DistributedDataParallel
@META_ARCH_REGISTRY.register()
class MetaArchForTest(torch.nn.Module):
def __init__(self, cfg):
super().__init__()
self.conv = torch.nn.Conv2d(3, 4, kernel_size=3, stride=1, padding=1)
self.bn = torch.nn.BatchNorm2d(4)
self.relu = torch.nn.ReLU(inplace=True)
self.avgpool = torch.nn.AdaptiveAvgPool2d((1, 1))
@property
def device(self):
return self.conv.weight.device
def forward(self, inputs):
if not self.training:
return self.inference(inputs)
images = [x["image"] for x in inputs]
images = ImageList.from_tensors(images, 1).to(self.device)
ret = self.conv(images.tensor)
ret = self.bn(ret)
ret = self.relu(ret)
ret = self.avgpool(ret)
return {"loss": ret.norm()}
def inference(self, inputs):
instance = Instances((10, 10))
instance.pred_boxes = Boxes(torch.tensor([[2.5, 2.5, 7.5, 7.5]]))
instance.scores = torch.tensor([0.9])
instance.pred_classes = torch.tensor([1], dtype=torch.int32)
ret = [{"instances": instance}]
return ret
@META_ARCH_REGISTRY.register()
class MetaArchForTestSingleValue(torch.nn.Module):
def __init__(self, cfg):
super().__init__()
self.scale_weight = torch.nn.Parameter(torch.Tensor([1.0]))
@property
def device(self):
return self.scale_weight.device
def forward(self, inputs):
if not self.training:
return self.inference(inputs)
ret = {"loss": self.scale_weight.norm() * 10.0}
print(self.scale_weight)
print(ret)
return ret
def inference(self, inputs):
instance = Instances((10, 10))
instance.pred_boxes = Boxes(
torch.tensor([[2.5, 2.5, 7.5, 7.5]], device=self.device) * self.scale_weight
)
instance.scores = torch.tensor([0.9])
instance.pred_classes = torch.tensor([1], dtype=torch.int32)
ret = [{"instances": instance}]
return ret
def _get_cfg(runner, output_dir, dataset_name):
cfg = runner.get_default_cfg()
cfg.MODEL.DEVICE = "cpu"
cfg.MODEL.META_ARCHITECTURE = "MetaArchForTest"
cfg.DATASETS.TRAIN = (dataset_name,)
cfg.DATASETS.TEST = (dataset_name,)
cfg.INPUT.MIN_SIZE_TRAIN = (10,)
cfg.INPUT.MIN_SIZE_TEST = (10,)
cfg.SOLVER.MAX_ITER = 5
cfg.SOLVER.STEPS = []
cfg.SOLVER.WARMUP_ITERS = 1
cfg.SOLVER.CHECKPOINT_PERIOD = 1
cfg.SOLVER.IMS_PER_BATCH = 2
cfg.OUTPUT_DIR = output_dir
return cfg
class TestDefaultRunner(unittest.TestCase):
def test_d2go_runner_build_model(self):
with tempfile.TemporaryDirectory() as tmp_dir:
ds_name = create_local_dataset(tmp_dir, 5, 10, 10)
runner = default_runner.Detectron2GoRunner()
cfg = _get_cfg(runner, tmp_dir, ds_name)
model = runner.build_model(cfg)
dl = runner.build_detection_train_loader(cfg)
batch = next(iter(dl))
output = model(batch)
self.assertIsInstance(output, dict)
model.eval()
output = model(batch)
self.assertIsInstance(output, list)
default_runner._close_all_tbx_writers()
def test_d2go_runner_train(self):
with tempfile.TemporaryDirectory() as tmp_dir:
ds_name = create_local_dataset(tmp_dir, 5, 10, 10)
runner = default_runner.Detectron2GoRunner()
cfg = _get_cfg(runner, tmp_dir, ds_name)
model = runner.build_model(cfg)
runner.do_train(cfg, model, resume=True)
final_model_path = os.path.join(tmp_dir, "model_final.pth")
self.assertTrue(os.path.isfile(final_model_path))
default_runner._close_all_tbx_writers()
def test_d2go_runner_test(self):
with tempfile.TemporaryDirectory() as tmp_dir:
ds_name = create_local_dataset(tmp_dir, 5, 10, 10)
runner = default_runner.Detectron2GoRunner()
cfg = _get_cfg(runner, tmp_dir, ds_name)
model = runner.build_model(cfg)
results = runner.do_test(cfg, model)
self.assertEqual(results["default"][ds_name]["bbox"]["AP"], 10.0)
default_runner._close_all_tbx_writers()
def test_d2go_build_evaluator(self):
for rotated, evaluator in [
(True, RotatedCOCOEvaluator),
(False, COCOEvaluator),
]:
with tempfile.TemporaryDirectory() as tmp_dir:
ds_name = create_local_dataset(tmp_dir, 5, 10, 10, is_rotated=rotated)
runner = default_runner.Detectron2GoRunner()
cfg = _get_cfg(runner, tmp_dir, ds_name)
ds_evaluators = runner.get_evaluator(cfg, ds_name, tmp_dir)
self.assertTrue(isinstance(ds_evaluators._evaluators[0], evaluator))
def test_create_runner(self):
runner = create_runner(
".".join(
[
default_runner.Detectron2GoRunner.__module__,
default_runner.Detectron2GoRunner.__name__,
]
)
)
self.assertTrue(isinstance(runner, default_runner.Detectron2GoRunner))
@helper.enable_ddp_env()
def test_d2go_runner_ema(self):
with tempfile.TemporaryDirectory() as tmp_dir:
ds_name = create_local_dataset(tmp_dir, 5, 10, 10)
runner = default_runner.Detectron2GoRunner()
cfg = _get_cfg(runner, tmp_dir, ds_name)
cfg.MODEL.META_ARCHITECTURE = "MetaArchForTestSingleValue"
cfg.MODEL_EMA.ENABLED = True
cfg.MODEL_EMA.DECAY = 0.9
cfg.MODEL_EMA.DECAY_WARM_UP_FACTOR = -1
def _run_train(cfg):
cfg = copy.deepcopy(cfg)
model = runner.build_model(cfg)
model = DistributedDataParallel(model, broadcast_buffers=False)
runner.do_train(cfg, model, True)
final_model_path = os.path.join(tmp_dir, "model_final.pth")
trained_weights = torch.load(final_model_path)
self.assertIn("ema_state", trained_weights)
default_runner._close_all_tbx_writers()
return final_model_path, model.module.ema_state
def _run_test(cfg, final_path, gt_ema):
cfg = copy.deepcopy(cfg)
cfg.MODEL.WEIGHTS = final_path
model = runner.build_model(cfg, eval_only=True)
self.assertGreater(len(model.ema_state.state), 0)
self.assertEqual(len(model.ema_state.state), len(gt_ema.state))
self.assertTrue(
_compare_state_dict(
model.ema_state.state_dict(), gt_ema.state_dict()
)
)
results = runner.do_test(cfg, model)
self.assertEqual(results["default"][ds_name]["bbox"]["AP"], 3.0)
self.assertEqual(results["ema"][ds_name]["bbox"]["AP"], 9.0)
default_runner._close_all_tbx_writers()
def _run_build_model_with_ema_weight(cfg, final_path, gt_ema):
cfg = copy.deepcopy(cfg)
cfg.MODEL.WEIGHTS = final_path
cfg.MODEL_EMA.USE_EMA_WEIGHTS_FOR_EVAL_ONLY = True
model = runner.build_model(cfg, eval_only=True)
self.assertTrue(
_compare_state_dict(model.state_dict(), gt_ema.state_dict())
)
final_model_path, gt_ema = _run_train(cfg)
_run_test(cfg, final_model_path, gt_ema)
_run_build_model_with_ema_weight(cfg, final_model_path, gt_ema)
def test_d2go_runner_train_qat_hook_update_stat(self):
"""Check that the qat hook is used and updates stats"""
@META_ARCH_REGISTRY.register()
class MetaArchForTestQAT(MetaArchForTest):
def prepare_for_quant(self, cfg):
"""Set the qconfig to updateable observers"""
self.qconfig = updateable_symmetric_moving_avg_minmax_config
return self
def setup(tmp_dir):
ds_name = create_local_dataset(tmp_dir, 5, 10, 10)
runner = default_runner.Detectron2GoRunner()
cfg = _get_cfg(runner, tmp_dir, ds_name)
cfg.merge_from_list(
(
["MODEL.META_ARCHITECTURE", "MetaArchForTestQAT"]
+ ["QUANTIZATION.QAT.ENABLED", "True"]
+ ["QUANTIZATION.QAT.START_ITER", "0"]
+ ["QUANTIZATION.QAT.ENABLE_OBSERVER_ITER", "0"]
)
)
return runner, cfg
# check observers have not changed their minmax vals (stats changed)
with tempfile.TemporaryDirectory() as tmp_dir:
runner, cfg = setup(tmp_dir)
model = runner.build_model(cfg)
runner.do_train(cfg, model, resume=True)
observer = model.conv.activation_post_process.activation_post_process
self.assertEqual(observer.min_val, torch.tensor(float("inf")))
self.assertEqual(observer.max_val, torch.tensor(float("-inf")))
self.assertNotEqual(observer.max_stat, torch.tensor(float("inf")))
# check observer does not change if period is > max_iter
with tempfile.TemporaryDirectory() as tmp_dir:
runner, cfg = setup(tmp_dir)
cfg.merge_from_list(
(
["QUANTIZATION.QAT.UPDATE_OBSERVER_STATS_PERIODICALLY", "True"]
+ ["QUANTIZATION.QAT.UPDATE_OBSERVER_STATS_PERIOD", "10"]
)
)
model = runner.build_model(cfg)
runner.do_train(cfg, model, resume=True)
observer = model.conv.activation_post_process.activation_post_process
self.assertEqual(observer.min_val, torch.tensor(float("inf")))
self.assertEqual(observer.max_val, torch.tensor(float("-inf")))
self.assertNotEqual(observer.max_stat, torch.tensor(float("inf")))
# check observer changes if period < max_iter
with tempfile.TemporaryDirectory() as tmp_dir:
runner, cfg = setup(tmp_dir)
cfg.merge_from_list(
(
["QUANTIZATION.QAT.UPDATE_OBSERVER_STATS_PERIODICALLY", "True"]
+ ["QUANTIZATION.QAT.UPDATE_OBSERVER_STATS_PERIOD", "1"]
)
)
model = runner.build_model(cfg)
runner.do_train(cfg, model, resume=True)
observer = model.conv.activation_post_process.activation_post_process
self.assertNotEqual(observer.min_val, torch.tensor(float("inf")))
self.assertNotEqual(observer.max_val, torch.tensor(float("-inf")))
self.assertNotEqual(observer.max_stat, torch.tensor(float("inf")))
default_runner._close_all_tbx_writers()
def test_d2go_runner_train_qat(self):
"""Make sure QAT runs"""
@META_ARCH_REGISTRY.register()
class MetaArchForTestQAT1(torch.nn.Module):
def __init__(self, cfg):
super().__init__()
self.conv = torch.nn.Conv2d(3, 4, kernel_size=3, stride=1, padding=1)
@property
def device(self):
return self.conv.weight.device
def forward(self, inputs):
images = [x["image"] for x in inputs]
images = ImageList.from_tensors(images, 1)
ret = self.conv(images.tensor)
losses = {"loss": ret.norm()}
# run the same conv again
ret1 = self.conv(images.tensor)
losses["ret1"] = ret1.norm()
return losses
def setup(tmp_dir, backend, qat_method):
ds_name = create_local_dataset(tmp_dir, 5, 10, 10)
runner = default_runner.Detectron2GoRunner()
cfg = _get_cfg(runner, tmp_dir, ds_name)
cfg.merge_from_list(
(
["MODEL.META_ARCHITECTURE", "MetaArchForTestQAT1"]
+ ["QUANTIZATION.QAT.ENABLED", "True"]
+ ["QUANTIZATION.QAT.START_ITER", "1"]
+ ["QUANTIZATION.QAT.ENABLE_OBSERVER_ITER", "0"]
+ ["QUANTIZATION.QAT.ENABLE_LEARNABLE_OBSERVER_ITER", "2"]
+ ["QUANTIZATION.QAT.DISABLE_OBSERVER_ITER", "4"]
+ ["QUANTIZATION.QAT.FREEZE_BN_ITER", "4"]
+ ["QUANTIZATION.BACKEND", backend]
+ ["QUANTIZATION.QAT.FAKE_QUANT_METHOD", qat_method]
)
)
return runner, cfg
# seems that fbgemm with learnable qat is not supported
for backend, qat_method in [
("fbgemm", "default"),
("qnnpack", "default"),
("qnnpack", "learnable"),
]:
with self.subTest(backend=backend, qat_method=qat_method):
with tempfile.TemporaryDirectory() as tmp_dir:
runner, cfg = setup(tmp_dir, backend=backend, qat_method=qat_method)
model = runner.build_model(cfg)
print(model)
runner.do_train(cfg, model, resume=True)
default_runner._close_all_tbx_writers()
def test_d2go_runner_trainer_hooks(self):
counts = 0
@TRAINER_HOOKS_REGISTRY.register()
def _check_hook_func(hooks, cfg):
nonlocal counts
counts = len(hooks)
print(hooks)
with tempfile.TemporaryDirectory() as tmp_dir:
ds_name = create_local_dataset(tmp_dir, 5, 10, 10)
runner = default_runner.Detectron2GoRunner()
cfg = _get_cfg(runner, tmp_dir, ds_name)
model = runner.build_model(cfg)
runner.do_train(cfg, model, resume=True)
default_runner._close_all_tbx_writers()
self.assertGreater(counts, 0)
def _compare_state_dict(sd1, sd2, abs_error=1e-3):
if len(sd1) != len(sd2):
return False
if set(sd1.keys()) != set(sd2.keys()):
return False
for name in sd1:
if sd1[name].dtype == torch.float32:
if torch.abs((sd1[name] - sd2[name])).max() > abs_error:
return False
elif (sd1[name] != sd2[name]).any():
return False
return True
| d2go-main | tests/runner/test_runner_default_runner.py |
d2go-main | tests/runner/__init__.py |
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import unittest
import d2go.runner.default_runner as default_runner
import torch
from d2go.registry.builtin import META_ARCH_REGISTRY
from d2go.utils.testing.data_loader_helper import create_local_dataset
from d2go.utils.testing.helper import tempdir
from detectron2.structures import ImageList
TEST_CUDA: bool = torch.cuda.is_available()
@META_ARCH_REGISTRY.register()
class MetaArchForTestSimple(torch.nn.Module):
def __init__(self, cfg):
super().__init__()
self.conv = torch.nn.Conv2d(3, 4, kernel_size=3, stride=1, padding=1)
self.bn = torch.nn.BatchNorm2d(4)
self.relu = torch.nn.ReLU(inplace=True)
self.avgpool = torch.nn.AdaptiveAvgPool2d((1, 1))
@property
def device(self):
return self.conv.weight.device
def forward(self, inputs):
images = [x["image"] for x in inputs]
images = ImageList.from_tensors(images, 1).to(self.device)
ret = self.conv(images.tensor)
ret = self.bn(ret)
ret = self.relu(ret)
ret = self.avgpool(ret)
return {"loss": ret.norm()}
def train_with_memory_profiler(output_dir, device="cpu"):
ds_name = create_local_dataset(output_dir, 5, 10, 10)
runner = default_runner.Detectron2GoRunner()
cfg = runner.get_default_cfg()
cfg.MODEL.DEVICE = device
cfg.MODEL.META_ARCHITECTURE = "MetaArchForTestSimple"
cfg.SOLVER.MAX_ITER = 10
cfg.DATASETS.TRAIN = (ds_name,)
cfg.DATASETS.TEST = (ds_name,)
cfg.OUTPUT_DIR = output_dir
cfg.MEMORY_PROFILER.ENABLED = True
cfg.MEMORY_PROFILER.LOG_N_STEPS = 3
cfg.MEMORY_PROFILER.LOG_DURING_TRAIN_AT = 5
# Register configs
runner.register(cfg)
# Create dummy data to pass to wrapper
model = runner.build_model(cfg)
runner.do_train(cfg, model, resume=True)
return cfg
class TestGPUMemoryProfiler(unittest.TestCase):
@tempdir
def test_gpu_memory_profiler_no_gpu(self, tmp_dir: str):
# GPU memory profiler should silently pass if no CUDA is available
train_with_memory_profiler(tmp_dir, device="cpu")
@tempdir
@unittest.skipIf(not TEST_CUDA, "no CUDA detected")
def test_gpu_memory_profiler_with_gpu(self, tmp_dir: str):
cfg = train_with_memory_profiler(tmp_dir, device="cuda")
n = cfg.MEMORY_PROFILER.LOG_N_STEPS
s = cfg.MEMORY_PROFILER.LOG_DURING_TRAIN_AT
save_dir = os.path.join(tmp_dir, "memory_snapshot")
self.assertTrue(os.path.exists(save_dir))
for i in [n - 1, s + n - 1]:
trace_dir = os.path.join(save_dir, f"iter{i}_rank0")
self.assertTrue(os.path.exists(trace_dir))
self.assertTrue(os.path.exists(os.path.join(trace_dir, "snapshot.pickle")))
self.assertTrue(os.path.exists(os.path.join(trace_dir, "trace_plot.html")))
self.assertTrue(
os.path.exists(os.path.join(trace_dir, "segment_plot.html"))
)
| d2go-main | tests/utils/test_gpu_memory_profiler.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import json
import os
import unittest
from typing import Dict, List, Optional, Tuple
import d2go.runner.default_runner as default_runner
import numpy as np
import torch
from d2go.registry.builtin import META_ARCH_REGISTRY
from d2go.utils.testing.data_loader_helper import (
create_toy_dataset,
LocalImageGenerator,
)
from d2go.utils.testing.helper import tempdir
from d2go.utils.visualization import DataLoaderVisWrapper, VisualizerWrapper
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.structures import Boxes, Instances
from detectron2.utils.events import EventStorage
def create_test_images_and_dataset_json(
data_dir: str, img_w: int, img_h: int, num_images: int = 10, num_classes: int = -1
) -> Tuple[str, str]:
# create image and json
image_dir = os.path.join(data_dir, "images")
os.makedirs(image_dir)
json_dataset, meta_data = create_toy_dataset(
LocalImageGenerator(image_dir, width=img_w, height=img_h),
num_images=num_images,
num_classes=num_classes,
)
json_file = os.path.join(data_dir, "annotation.json")
with open(json_file, "w") as f:
json.dump(json_dataset, f)
return image_dir, json_file
def create_dummy_input_dict(
img_w: int = 60, img_h: int = 60, bbox_list: Optional[List[List[int]]] = None
) -> Dict:
# Create dummy data
instance = Instances((img_w, img_h))
if bbox_list is not None:
instance.gt_boxes = Boxes(torch.tensor([[10, 10, 20, 20]]))
instance.gt_classes = torch.tensor([0])
input_dict = {"image": torch.zeros(3, img_w, img_h), "instances": instance}
return input_dict
@META_ARCH_REGISTRY.register()
class DummyMetaArch(torch.nn.Module):
@staticmethod
def visualize_train_input(visualizer_wrapper, input_dict):
return {"default": np.zeros((60, 60, 30)), "secondary": np.zeros((60, 60, 30))}
class ImageDictStore:
def __init__(self):
self.write_buffer = []
def add_image(self, **kwargs):
self.write_buffer.append(copy.deepcopy(kwargs))
class MockTbxWriter:
def __init__(self):
self._writer = ImageDictStore()
class TestVisualization(unittest.TestCase):
def setUp(self):
self._builtin_datasets = set(DatasetCatalog)
def tearDown(self):
# Need to remove injected dataset
injected_dataset = set(DatasetCatalog) - self._builtin_datasets
for ds in injected_dataset:
DatasetCatalog.remove(ds)
MetadataCatalog.remove(ds)
@tempdir
def test_visualizer_wrapper(self, tmp_dir: str):
image_dir, json_file = create_test_images_and_dataset_json(tmp_dir, 60, 60)
# Create config data
runner = default_runner.Detectron2GoRunner()
cfg = runner.get_default_cfg()
cfg.merge_from_list(
[
"D2GO_DATA.DATASETS.COCO_INJECTION.NAMES",
str(["inj_ds1"]),
"D2GO_DATA.DATASETS.COCO_INJECTION.IM_DIRS",
str([image_dir]),
"D2GO_DATA.DATASETS.COCO_INJECTION.JSON_FILES",
str([json_file]),
"DATASETS.TRAIN",
str(["inj_ds1"]),
]
)
# Register configs
runner.register(cfg)
DatasetCatalog.get("inj_ds1")
# Create dummy data to pass to wrapper
input_dict = create_dummy_input_dict(60, 60, [[10, 10, 20, 20]])
vis_wrapper = VisualizerWrapper(cfg)
vis_image = vis_wrapper.visualize_train_input(input_dict)
# Visualize train by default scales input image by 2
self.assertTrue(any(vis_image[20, 20, :] != 0))
self.assertFalse(any(vis_image[30, 30, :] != 0))
self.assertTrue(any(vis_image[40, 40, :] != 0))
@tempdir
def test_dataloader_visualizer_wrapper(self, tmp_dir: str):
image_dir, json_file = create_test_images_and_dataset_json(tmp_dir, 60, 60)
# Create config data
runner = default_runner.Detectron2GoRunner()
cfg = runner.get_default_cfg()
cfg.merge_from_list(
[
"D2GO_DATA.DATASETS.COCO_INJECTION.NAMES",
str(["inj_ds2"]),
"D2GO_DATA.DATASETS.COCO_INJECTION.IM_DIRS",
str([image_dir]),
"D2GO_DATA.DATASETS.COCO_INJECTION.JSON_FILES",
str([json_file]),
"DATASETS.TRAIN",
str(["inj_ds2"]),
]
)
# Register configs
runner.register(cfg)
DatasetCatalog.get("inj_ds2")
with EventStorage():
# Create mock storage for writer
mock_tbx_writer = MockTbxWriter()
# Create a wrapper around an iterable object and run once
input_dict = create_dummy_input_dict(60, 60, [[1, 1, 2, 2]])
dl_wrapper = DataLoaderVisWrapper(
cfg, mock_tbx_writer, [[input_dict], [input_dict]]
)
for _ in dl_wrapper:
break
# Check data has been written to buffer
self.assertTrue(len(mock_tbx_writer._writer.write_buffer) == 1)
vis_image_dict = mock_tbx_writer._writer.write_buffer[0]
self.assertTrue("tag" in vis_image_dict)
self.assertTrue("img_tensor" in vis_image_dict)
self.assertTrue("global_step" in vis_image_dict)
@tempdir
def test_dict_based_dataloader_visualizer_wrapper(self, tmp_dir: str):
image_dir, json_file = create_test_images_and_dataset_json(tmp_dir, 60, 60)
# Create config data
runner = default_runner.Detectron2GoRunner()
cfg = runner.get_default_cfg()
cfg.merge_from_list(
[
"D2GO_DATA.DATASETS.COCO_INJECTION.NAMES",
str(["inj_ds3"]),
"D2GO_DATA.DATASETS.COCO_INJECTION.IM_DIRS",
str([image_dir]),
"D2GO_DATA.DATASETS.COCO_INJECTION.JSON_FILES",
str([json_file]),
"DATASETS.TRAIN",
str(["inj_ds3"]),
"MODEL.META_ARCHITECTURE",
"DummyMetaArch",
]
)
# Register configs
runner.register(cfg)
DatasetCatalog.get("inj_ds3")
with EventStorage():
# Create mock storage for writer
mock_tbx_writer = MockTbxWriter()
# Create a wrapper around an iterable object and run once
input_dict = create_dummy_input_dict(60, 60, [[1, 1, 2, 2]])
dl_wrapper = DataLoaderVisWrapper(
cfg, mock_tbx_writer, [[input_dict], [input_dict]]
)
for _ in dl_wrapper:
break
# Check data has been written to buffer
self.assertTrue(len(mock_tbx_writer._writer.write_buffer) == 2)
self.assertTrue(
"train_loader_batch_0/default"
in mock_tbx_writer._writer.write_buffer[0]["tag"]
)
self.assertTrue(
"train_loader_batch_0/secondary"
in mock_tbx_writer._writer.write_buffer[1]["tag"]
)
| d2go-main | tests/utils/test_visualization.py |
d2go-main | tests/utils/__init__.py |
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import torch
from detectron2.layers import cat
from detectron2.modeling.roi_heads.fast_rcnn import fast_rcnn_inference
from detectron2.structures import Boxes
from mobile_cv.common.misc.oss_utils import is_oss
class TestBoxWithNMSLimit(unittest.TestCase):
@unittest.skipIf(is_oss(), "Caffe2 is not available for OSS")
def test_caffe2_pytorch_eq(self):
ims_per_batch = 8
post_nms_topk = 100
detections_per_im = 10
num_class = 80
score_thresh = 0.05
nms_thresh = 0.5
image_shapes = [torch.Size([800, 800])] * ims_per_batch
batch_splits = [post_nms_topk] * ims_per_batch
# NOTE: There're still some unsure minor implementation differences
# (eg. ordering when equal score across classes) causing some seeds
# don't pass the test.
# Thus set a fixed seed to make sure this test passes consistantly.
rng = torch.Generator()
rng.manual_seed(42)
boxes = []
for n in batch_splits:
box = 1000.0 * 0.5 * torch.rand(n, num_class, 4, generator=rng) + 0.001
box[:, :, -2:] += box[:, :, :2]
box = box.view(n, num_class * 4)
boxes.append(box)
scores = [torch.rand(n, num_class + 1, generator=rng) for n in batch_splits]
ref_results, ref_kept_indices = fast_rcnn_inference(
boxes,
scores,
image_shapes,
score_thresh=score_thresh,
nms_thresh=nms_thresh,
topk_per_image=detections_per_im,
)
for result, kept_index, score in zip(ref_results, ref_kept_indices, scores):
torch.testing.assert_close(
score[kept_index, result.pred_classes],
result.scores,
)
# clip is done in BBoxTransformOp
c2_boxes = []
for box, image_shape in zip(boxes, image_shapes):
num_bbox_reg_classes = box.shape[1] // 4
clipped_box = Boxes(box.reshape(-1, 4))
clipped_box.clip(image_shape)
clipped_box = clipped_box.tensor.view(-1, num_bbox_reg_classes * 4)
c2_boxes.append(clipped_box)
c2_boxes = cat(c2_boxes)
c2_scores = cat(scores)
c2_batch_splits = torch.Tensor(batch_splits)
nms_outputs = torch.ops._caffe2.BoxWithNMSLimit(
c2_scores,
c2_boxes,
c2_batch_splits,
score_thresh=float(score_thresh),
nms=float(nms_thresh),
detections_per_im=int(detections_per_im),
soft_nms_enabled=False,
soft_nms_method="linear",
soft_nms_sigma=0.5,
soft_nms_min_score_thres=0.001,
rotated=False,
cls_agnostic_bbox_reg=False,
input_boxes_include_bg_cls=False,
output_classes_include_bg_cls=False,
legacy_plus_one=False,
)
(
roi_score_nms,
roi_bbox_nms,
roi_class_nms,
roi_batch_splits_nms,
roi_keeps_nms,
roi_keeps_size_nms,
) = nms_outputs # noqa
roi_score_nms = roi_score_nms.split(roi_batch_splits_nms.int().tolist())
roi_bbox_nms = roi_bbox_nms.split(roi_batch_splits_nms.int().tolist())
roi_class_nms = roi_class_nms.split(roi_batch_splits_nms.int().tolist())
roi_keeps_nms = roi_keeps_nms.split(roi_batch_splits_nms.int().tolist())
for _score_nms, _class_nms, _keeps_nms, _score in zip(
roi_score_nms, roi_class_nms, roi_keeps_nms, scores
):
torch.testing.assert_close(
_score[_keeps_nms.to(torch.int64), _class_nms.to(torch.int64)],
_score_nms,
)
for ref, s, b, c in zip(
ref_results, roi_score_nms, roi_bbox_nms, roi_class_nms
):
s1, i1 = s.sort()
s2, i2 = ref.scores.sort()
torch.testing.assert_close(s1, s2)
torch.testing.assert_close(b[i1], ref.pred_boxes.tensor[i2])
torch.testing.assert_close(c.to(torch.int64)[i1], ref.pred_classes[i2])
for ref, k in zip(ref_kept_indices, roi_keeps_nms):
# NOTE: order might be different due to implementation
ref_set = set(ref.tolist())
k_set = set(k.tolist())
self.assertEqual(ref_set, k_set)
| d2go-main | tests/modeling/test_box_with_nms_limit.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import random
import unittest
import d2go.runner.default_runner as default_runner
import torch
from d2go.optimizer.build import build_optimizer_mapper
from d2go.utils.testing import helper
class TestArch(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(3, 4, kernel_size=5, stride=1, padding=1)
self.bn = torch.nn.BatchNorm2d(4)
self.relu = torch.nn.ReLU(inplace=True)
self.avgpool = torch.nn.AdaptiveAvgPool2d((1, 1))
self.linear = torch.nn.Linear(4, 1)
def forward(self, x):
ret = self.conv(x)
ret = self.bn(ret)
ret = self.relu(ret)
ret = self.avgpool(ret)
ret = torch.transpose(ret, 1, 3)
ret = self.linear(ret)
return ret
def _test_each_optimizer(cfg, cuda: bool = False):
print("Solver: " + str(cfg.SOLVER.OPTIMIZER))
device = "cuda:0" if cuda else "cpu"
model = TestArch().to(device)
criterion = torch.nn.BCEWithLogitsLoss()
optimizer = build_optimizer_mapper(cfg, model)
optimizer.zero_grad()
random.seed(20210912)
num_iter = 500
for _ in range(num_iter):
target = torch.empty(1, 1, 1, 1).fill_(random.randint(0, 1)).to(device)
noise = torch.rand(1, 3, 16, 16).to(device)
x = torch.add(noise, 2 * target)
y_pred = model(x)
loss = criterion(y_pred, target)
loss.backward()
optimizer.step()
n_correct = 0
n_eval = 100
for _ in range(n_eval):
target = torch.empty(1, 1, 1, 1).fill_(random.randint(0, 1)).to(device)
x = torch.add(torch.rand(1, 3, 16, 16).to(device), 2 * target)
y_pred = torch.round(torch.sigmoid(model(x)))
if y_pred == target:
n_correct += 1
print("Correct prediction rate {0}.".format(n_correct / n_eval))
def _check_param_group(self, group, num_params=None, **kwargs):
if num_params is not None:
self.assertEqual(len(group["params"]), num_params)
for key, val in kwargs.items():
self.assertEqual(group[key], val)
def get_optimizer_cfg(
lr,
weight_decay=None,
weight_decay_norm=None,
weight_decay_bias=None,
lr_mult=None,
):
runner = default_runner.Detectron2GoRunner()
cfg = runner.get_default_cfg()
if lr is not None:
cfg.SOLVER.BASE_LR = lr
if weight_decay is not None:
cfg.SOLVER.WEIGHT_DECAY = weight_decay
if weight_decay_norm is not None:
cfg.SOLVER.WEIGHT_DECAY_NORM = weight_decay_norm
if weight_decay_bias is not None:
cfg.SOLVER.WEIGHT_DECAY_BIAS = weight_decay_bias
if lr_mult is not None:
cfg.SOLVER.LR_MULTIPLIER_OVERWRITE = [lr_mult]
return cfg
class TestOptimizer(unittest.TestCase):
def test_create_optimizer_default(self):
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(3, 3, 1)
self.bn = torch.nn.BatchNorm2d(3)
def forward(self, x):
return self.bn(self.conv(x))
model = Model()
cfg = get_optimizer_cfg(
lr=1.0, weight_decay=1.0, weight_decay_norm=1.0, weight_decay_bias=1.0
)
optimizer = build_optimizer_mapper(cfg, model)
self.assertEqual(len(optimizer.param_groups), 1)
_check_param_group(
self, optimizer.param_groups[0], num_params=4, weight_decay=1.0, lr=1.0
)
def test_create_optimizer_lr(self):
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = torch.nn.Conv2d(3, 3, 1)
self.conv2 = torch.nn.Conv2d(3, 3, 1)
self.bn = torch.nn.BatchNorm2d(3)
def forward(self, x):
return self.bn(self.conv2(self.conv1(x)))
model = Model()
cfg = get_optimizer_cfg(
lr=1.0,
lr_mult={"conv1": 3.0, "conv2": 3.0},
weight_decay=2.0,
weight_decay_norm=2.0,
)
optimizer = build_optimizer_mapper(cfg, model)
self.assertEqual(len(optimizer.param_groups), 2)
_check_param_group(self, optimizer.param_groups[0], num_params=4, lr=3.0)
_check_param_group(self, optimizer.param_groups[1], num_params=2, lr=1.0)
def test_create_optimizer_weight_decay_norm(self):
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(3, 3, 1)
self.bn = torch.nn.BatchNorm2d(3)
def forward(self, x):
return self.bn(self.conv(x))
model = Model()
cfg = get_optimizer_cfg(
lr=1.0, weight_decay=1.0, weight_decay_norm=2.0, weight_decay_bias=1.0
)
optimizer = build_optimizer_mapper(cfg, model)
self.assertEqual(len(optimizer.param_groups), 2)
_check_param_group(
self, optimizer.param_groups[0], num_params=2, lr=1.0, weight_decay=1.0
)
_check_param_group(
self, optimizer.param_groups[1], num_params=2, lr=1.0, weight_decay=2.0
)
OPTIMIZER_NAMES_PART1 = ["SGD", "AdamW", "SGD_MT"]
OPTIMIZER_NAMES_PART2 = ["AdamW_MT", "Adam"]
def _test_optimizers_list(self, optimizers_list, fused: bool = False):
runner = default_runner.Detectron2GoRunner()
cfg = runner.get_default_cfg()
multipliers = [None, [{"conv": 0.1}]]
for optimizer_name in optimizers_list:
for mult in multipliers:
cfg.SOLVER.BASE_LR = 0.01
cfg.SOLVER.FUSED = fused
cfg.SOLVER.OPTIMIZER = optimizer_name
cfg.SOLVER.MULTIPLIERS = mult
_test_each_optimizer(cfg, cuda=fused)
def test_all_optimizers_part_1(self):
self._test_optimizers_list(self.OPTIMIZER_NAMES_PART1)
def test_all_optimizers_part_2(self):
self._test_optimizers_list(self.OPTIMIZER_NAMES_PART2)
def _test_full_model_grad_clipping(self, optimizers_list):
runner = default_runner.Detectron2GoRunner()
cfg = runner.get_default_cfg()
for optimizer_name in optimizers_list:
cfg.SOLVER.BASE_LR = 0.02
cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE = 0.2
cfg.SOLVER.CLIP_GRADIENTS.ENABLED = True
cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE = "full_model"
cfg.SOLVER.OPTIMIZER = optimizer_name
_test_each_optimizer(cfg)
def test_full_model_grad_clipping_part1(self):
self._test_full_model_grad_clipping(self.OPTIMIZER_NAMES_PART1)
def test_full_model_grad_clipping_part2(self):
self._test_full_model_grad_clipping(self.OPTIMIZER_NAMES_PART2)
def test_create_optimizer_custom(self):
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(3, 3, 1)
self.bn = torch.nn.BatchNorm2d(3)
def forward(self, x):
return self.bn(self.conv(x))
def get_optimizer_param_groups(self, _opts):
ret = [
{
"params": [self.conv.weight],
"lr": 10.0,
}
]
return ret
model = Model()
cfg = get_optimizer_cfg(lr=1.0, weight_decay=1.0, weight_decay_norm=0.0)
optimizer = build_optimizer_mapper(cfg, model)
self.assertEqual(len(optimizer.param_groups), 3)
_check_param_group(
self, optimizer.param_groups[0], num_params=1, lr=10.0, weight_decay=1.0
)
_check_param_group(
self, optimizer.param_groups[1], num_params=1, lr=1.0, weight_decay=1.0
)
_check_param_group(
self, optimizer.param_groups[2], num_params=2, lr=1.0, weight_decay=0.0
)
@helper.enable_ddp_env()
def test_create_optimizer_custom_ddp(self):
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(3, 3, 1)
self.bn = torch.nn.BatchNorm2d(3)
def forward(self, x):
return self.bn(self.conv(x))
def get_optimizer_param_groups(self, _opts):
ret = [
{
"params": [self.conv.weight],
"lr": 10.0,
}
]
return ret
model = Model()
model = torch.nn.parallel.DistributedDataParallel(model)
cfg = get_optimizer_cfg(lr=1.0, weight_decay=1.0, weight_decay_norm=0.0)
optimizer = build_optimizer_mapper(cfg, model)
self.assertEqual(len(optimizer.param_groups), 3)
_check_param_group(
self, optimizer.param_groups[0], num_params=1, lr=10.0, weight_decay=1.0
)
_check_param_group(
self, optimizer.param_groups[1], num_params=1, lr=1.0, weight_decay=1.0
)
_check_param_group(
self, optimizer.param_groups[2], num_params=2, lr=1.0, weight_decay=0.0
)
| d2go-main | tests/modeling/test_optimizer.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import os
import unittest
import torch
from d2go.export.exporter import convert_and_export_predictor
from d2go.runner import GeneralizedRCNNRunner
from d2go.utils.testing.data_loader_helper import (
create_detection_data_loader_on_toy_dataset,
)
from d2go.utils.testing.rcnn_helper import get_quick_test_config_opts, RCNNBaseTestCases
from mobile_cv.common.misc.file_utils import make_temp_directory
from mobile_cv.common.misc.oss_utils import is_oss
def _maybe_skip_test(self, predictor_type):
if is_oss() and "@c2_ops" in predictor_type:
self.skipTest("Caffe2 is not available for OSS")
if not torch.cuda.is_available() and "_gpu" in predictor_type:
self.skipTest("GPU is not available for exporting GPU model")
class TestFBNetV3MaskRCNNFP32(RCNNBaseTestCases.TemplateTestCase):
def setup_custom_test(self):
super().setup_custom_test()
self.cfg.merge_from_file("detectron2go://mask_rcnn_fbnetv3a_dsmask_C4.yaml")
def test_inference(self):
self._test_inference()
@RCNNBaseTestCases.expand_parameterized_test_export(
[
["torchscript@c2_ops", True],
["torchscript", True],
["torchscript_int8@c2_ops", False],
["torchscript_int8", False],
]
)
def test_export(self, predictor_type, compare_match):
_maybe_skip_test(self, predictor_type)
self._test_export(predictor_type, compare_match=compare_match)
class TestFBNetV3MaskRCNNFPNFP32(RCNNBaseTestCases.TemplateTestCase):
def setup_custom_test(self):
super().setup_custom_test()
self.cfg.merge_from_file("detectron2go://mask_rcnn_fbnetv3g_fpn.yaml")
def test_inference(self):
self._test_inference()
@RCNNBaseTestCases.expand_parameterized_test_export(
[
# FIXME: exporting c2_ops for FPN model might not pass this test for certain
# combination of image sizes and resizing targets. data points are:
# - passes before D35238890: image_size and resizing target are both 32x64 (backbone's divisibility).
# - doesn't pass after D35238890: image_size are 32x64, resizing to 5x10.
["torchscript@c2_ops", False],
["torchscript", True],
["torchscript_int8@c2_ops", False],
["torchscript_int8", False],
]
)
def test_export(self, predictor_type, compare_match):
_maybe_skip_test(self, predictor_type)
self._test_export(predictor_type, compare_match=compare_match)
class TestFBNetV3MaskRCNNQATEager(RCNNBaseTestCases.TemplateTestCase):
def setup_custom_test(self):
super().setup_custom_test()
self.cfg.merge_from_file("detectron2go://mask_rcnn_fbnetv3a_dsmask_C4.yaml")
# enable QAT
self.cfg.merge_from_list(
[
"QUANTIZATION.BACKEND",
"qnnpack",
"QUANTIZATION.QAT.ENABLED",
"True",
]
)
# FIXME: NaiveSyncBN is not supported
self.cfg.merge_from_list(["MODEL.FBNET_V2.NORM", "bn"])
def test_inference(self):
self._test_inference()
@RCNNBaseTestCases.expand_parameterized_test_export(
[
["torchscript_int8@c2_ops", False], # TODO: fix mismatch
["torchscript_int8", False], # TODO: fix mismatch
]
)
def test_export(self, predictor_type, compare_match):
_maybe_skip_test(self, predictor_type)
self._test_export(predictor_type, compare_match=compare_match)
class TestFBNetV3KeypointRCNNFP32(RCNNBaseTestCases.TemplateTestCase):
def setup_custom_test(self):
super().setup_custom_test()
self.cfg.merge_from_file("detectron2go://keypoint_rcnn_fbnetv3a_dsmask_C4.yaml")
# FIXME: have to use qnnpack due to follow error:
# Per Channel Quantization is currently disabled for transposed conv
self.cfg.merge_from_list(
[
"QUANTIZATION.BACKEND",
"qnnpack",
]
)
def test_inference(self):
self._test_inference()
@RCNNBaseTestCases.expand_parameterized_test_export(
[
["torchscript_int8@c2_ops", False], # TODO: fix mismatch
["torchscript_int8", False], # TODO: fix mismatch
]
)
def test_export(self, predictor_type, compare_match):
if is_oss() and "@c2_ops" in predictor_type:
self.skipTest("Caffe2 is not available for OSS")
self._test_export(predictor_type, compare_match=compare_match)
class TestTorchVisionExport(unittest.TestCase):
def test_export_torchvision_format(self):
runner = GeneralizedRCNNRunner()
cfg = runner.get_default_cfg()
cfg.merge_from_file("detectron2go://mask_rcnn_fbnetv3a_dsmask_C4.yaml")
cfg.merge_from_list(get_quick_test_config_opts())
cfg.merge_from_list(["MODEL.DEVICE", "cpu"])
pytorch_model = runner.build_model(cfg, eval_only=True)
from typing import Dict, List
class Wrapper(torch.nn.Module):
def __init__(self, model):
super().__init__()
self.model = model
def forward(self, inputs: List[torch.Tensor]):
x = inputs[0].unsqueeze(0) * 255
scale = 320.0 / min(x.shape[-2], x.shape[-1])
x = torch.nn.functional.interpolate(
x,
scale_factor=scale,
mode="bilinear",
align_corners=True,
recompute_scale_factor=True,
)
out = self.model(x[0])
res: Dict[str, torch.Tensor] = {}
res["boxes"] = out[0] / scale
res["labels"] = out[2]
res["scores"] = out[1]
return inputs, [res]
size_divisibility = max(pytorch_model.backbone.size_divisibility, 10)
h, w = size_divisibility, size_divisibility * 2
with create_detection_data_loader_on_toy_dataset(
cfg, h, w, is_train=False
) as data_loader:
with make_temp_directory("test_export_torchvision_format") as tmp_dir:
predictor_path = convert_and_export_predictor(
cfg,
copy.deepcopy(pytorch_model),
"torchscript",
tmp_dir,
data_loader,
)
orig_model = torch.jit.load(os.path.join(predictor_path, "model.jit"))
wrapped_model = Wrapper(orig_model)
# optionally do a forward
wrapped_model([torch.rand(3, 600, 600)])
scripted_model = torch.jit.script(wrapped_model)
scripted_model.save(os.path.join(tmp_dir, "new_file.pt"))
class TestMaskRCNNExportOptions(RCNNBaseTestCases.TemplateTestCase):
def setup_custom_test(self):
super().setup_custom_test()
self.cfg.merge_from_file("detectron2go://mask_rcnn_fbnetv3a_dsmask_C4.yaml")
def _get_test_image_sizes(self, is_train):
# postprocessing requires no resize from "data loader"
return self._get_test_image_size_no_resize(is_train)
def test_tracing_with_postprocess(self):
self.cfg.merge_from_list(["RCNN_EXPORT.INCLUDE_POSTPROCESS", True])
self._test_export("torchscript@tracing", compare_match=True)
if __name__ == "__main__":
unittest.main()
| d2go-main | tests/modeling/test_meta_arch_rcnn.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import numpy as np
import torch
from detectron2.layers import nms as box_nms
class TestNMS(unittest.TestCase):
def test_nms_cpu(self):
"""Match unit test UtilsNMSTest.TestNMS in
caffe2/operators/generate_proposals_op_util_nms_test.cc
"""
inputs = (
np.array(
[
10,
10,
50,
60,
0.5,
11,
12,
48,
60,
0.7,
8,
9,
40,
50,
0.6,
100,
100,
150,
140,
0.9,
99,
110,
155,
139,
0.8,
]
)
.astype(np.float32)
.reshape(-1, 5)
)
boxes = torch.from_numpy(inputs[:, :4])
scores = torch.from_numpy(inputs[:, 4])
test_thresh = [0.1, 0.3, 0.5, 0.8, 0.9]
gt_indices = [[1, 3], [1, 3], [1, 3], [1, 2, 3, 4], [0, 1, 2, 3, 4]]
for thresh, gt_index in zip(test_thresh, gt_indices):
keep_indices = box_nms(boxes, scores, thresh)
keep_indices = np.sort(keep_indices)
np.testing.assert_array_equal(keep_indices, np.array(gt_index))
def test_nms1_cpu(self):
"""Match unit test UtilsNMSTest.TestNMS1 in
caffe2/operators/generate_proposals_op_util_nms_test.cc
"""
boxes = torch.from_numpy(
np.array(
[
[350.9821, 161.8200, 369.9685, 205.2372],
[250.5236, 154.2844, 274.1773, 204.9810],
[471.4920, 160.4118, 496.0094, 213.4244],
[352.0421, 164.5933, 366.4458, 205.9624],
[166.0765, 169.7707, 183.0102, 232.6606],
[252.3000, 183.1449, 269.6541, 210.6747],
[469.7862, 162.0192, 482.1673, 187.0053],
[168.4862, 174.2567, 181.7437, 232.9379],
[470.3290, 162.3442, 496.4272, 214.6296],
[251.0450, 155.5911, 272.2693, 203.3675],
[252.0326, 154.7950, 273.7404, 195.3671],
[351.7479, 161.9567, 370.6432, 204.3047],
[496.3306, 161.7157, 515.0573, 210.7200],
[471.0749, 162.6143, 485.3374, 207.3448],
[250.9745, 160.7633, 264.1924, 206.8350],
[470.4792, 169.0351, 487.1934, 220.2984],
[474.4227, 161.9546, 513.1018, 215.5193],
[251.9428, 184.1950, 262.6937, 207.6416],
[252.6623, 175.0252, 269.8806, 213.7584],
[260.9884, 157.0351, 288.3554, 206.6027],
[251.3629, 164.5101, 263.2179, 202.4203],
[471.8361, 190.8142, 485.6812, 220.8586],
[248.6243, 156.9628, 264.3355, 199.2767],
[495.1643, 158.0483, 512.6261, 184.4192],
[376.8718, 168.0144, 387.3584, 201.3210],
[122.9191, 160.7433, 172.5612, 231.3837],
[350.3857, 175.8806, 366.2500, 205.4329],
[115.2958, 162.7822, 161.9776, 229.6147],
[168.4375, 177.4041, 180.8028, 232.4551],
[169.7939, 184.4330, 181.4767, 232.1220],
[347.7536, 175.9356, 355.8637, 197.5586],
[495.5434, 164.6059, 516.4031, 207.7053],
[172.1216, 194.6033, 183.1217, 235.2653],
[264.2654, 181.5540, 288.4626, 214.0170],
[111.7971, 183.7748, 137.3745, 225.9724],
[253.4919, 186.3945, 280.8694, 210.0731],
[165.5334, 169.7344, 185.9159, 232.8514],
[348.3662, 184.5187, 354.9081, 201.4038],
[164.6562, 162.5724, 186.3108, 233.5010],
[113.2999, 186.8410, 135.8841, 219.7642],
[117.0282, 179.8009, 142.5375, 221.0736],
[462.1312, 161.1004, 495.3576, 217.2208],
[462.5800, 159.9310, 501.2937, 224.1655],
[503.5242, 170.0733, 518.3792, 209.0113],
[250.3658, 195.5925, 260.6523, 212.4679],
[108.8287, 163.6994, 146.3642, 229.7261],
[256.7617, 187.3123, 288.8407, 211.2013],
[161.2781, 167.4801, 186.3751, 232.7133],
[115.3760, 177.5859, 163.3512, 236.9660],
[248.9077, 188.0919, 264.8579, 207.9718],
[108.1349, 160.7851, 143.6370, 229.6243],
[465.0900, 156.7555, 490.3561, 213.5704],
[107.5338, 173.4323, 141.0704, 235.2910],
]
).astype(np.float32)
)
scores = torch.from_numpy(
np.array(
[
0.1919,
0.3293,
0.0860,
0.1600,
0.1885,
0.4297,
0.0974,
0.2711,
0.1483,
0.1173,
0.1034,
0.2915,
0.1993,
0.0677,
0.3217,
0.0966,
0.0526,
0.5675,
0.3130,
0.1592,
0.1353,
0.0634,
0.1557,
0.1512,
0.0699,
0.0545,
0.2692,
0.1143,
0.0572,
0.1990,
0.0558,
0.1500,
0.2214,
0.1878,
0.2501,
0.1343,
0.0809,
0.1266,
0.0743,
0.0896,
0.0781,
0.0983,
0.0557,
0.0623,
0.5808,
0.3090,
0.1050,
0.0524,
0.0513,
0.4501,
0.4167,
0.0623,
0.1749,
]
).astype(np.float32)
)
gt_indices = np.array(
[
1,
6,
7,
8,
11,
12,
13,
14,
17,
18,
19,
21,
23,
24,
25,
26,
30,
32,
33,
34,
35,
37,
43,
44,
47,
50,
]
)
keep_indices = box_nms(boxes, scores, 0.5)
keep_indices = np.sort(keep_indices)
np.testing.assert_array_equal(keep_indices, gt_indices)
if __name__ == "__main__":
unittest.main()
| d2go-main | tests/modeling/test_nms.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import itertools
import unittest
import d2go.runner.default_runner as default_runner
import torch
from d2go.modeling import ema
from d2go.utils.testing import helper
class TestArch(torch.nn.Module):
def __init__(self, value=None, int_value=None):
super().__init__()
self.conv = torch.nn.Conv2d(3, 4, kernel_size=3, stride=1, padding=1)
self.bn = torch.nn.BatchNorm2d(4)
self.relu = torch.nn.ReLU(inplace=True)
self.avgpool = torch.nn.AdaptiveAvgPool2d((1, 1))
if value is not None:
self.set_const_weights(value, int_value)
def forward(self, x):
ret = self.conv(x)
ret = self.bn(ret)
ret = self.relu(ret)
ret = self.avgpool(ret)
return ret
def set_const_weights(self, value, int_value=None):
if int_value is None:
int_value = int(value)
for x in itertools.chain(self.parameters(), self.buffers()):
if x.dtype == torch.float32:
x.data.fill_(value)
else:
x.data.fill_(int_value)
def _compare_state_dict(model1, model2, abs_error=1e-3):
sd1 = model1.state_dict()
sd2 = model2.state_dict()
if len(sd1) != len(sd2):
return False
if set(sd1.keys()) != set(sd2.keys()):
return False
for name in sd1:
if sd1[name].dtype == torch.float32:
if torch.abs((sd1[name] - sd2[name])).max() > abs_error:
return False
elif (sd1[name] != sd2[name]).any():
return False
return True
class TestModelingModelEMA(unittest.TestCase):
def test_emastate(self):
model = TestArch()
state = ema.EMAState.FromModel(model)
# two for conv (conv.weight, conv.bias),
# five for bn (bn.weight, bn.bias, bn.running_mean, bn.running_var, bn.num_batches_tracked)
full_state = {
"conv.weight",
"conv.bias",
"bn.weight",
"bn.bias",
"bn.running_mean",
"bn.running_var",
"bn.num_batches_tracked",
}
self.assertEqual(len(state.state), 7)
self.assertTrue(set(state.state) == full_state)
for _, val in state.state.items():
self.assertFalse(val.requires_grad)
model1 = TestArch()
self.assertFalse(_compare_state_dict(model, model1))
state.apply_to(model1)
self.assertTrue(_compare_state_dict(model, model1))
# test ema state that excludes buffers and frozen parameters
model.conv.weight.requires_grad = False
state1 = ema.EMAState.FromModel(model, include_frozen=False)
# should exclude frozen parameter: conv.weight
self.assertTrue(full_state - set(state1.state) == {"conv.weight"})
state2 = ema.EMAState.FromModel(model, include_buffer=False)
# should exclude buffers: bn.running_mean, bn.running_var, bn.num_batches_tracked
self.assertTrue(
full_state - set(state2.state)
== {"bn.running_mean", "bn.running_var", "bn.num_batches_tracked"}
)
state3 = ema.EMAState.FromModel(
model, include_frozen=False, include_buffer=False
)
# should exclude frozen param + buffers: conv.weight, bn.running_mean, bn.running_var, bn.num_batches_tracked
self.assertTrue(set(state3.state) == {"conv.bias", "bn.weight", "bn.bias"})
def test_emastate_saveload(self):
model = TestArch()
state = ema.EMAState.FromModel(model)
model1 = TestArch()
self.assertFalse(_compare_state_dict(model, model1))
state1 = ema.EMAState()
state1.load_state_dict(state.state_dict())
state1.apply_to(model1)
self.assertTrue(_compare_state_dict(model, model1))
@helper.skip_if_no_gpu
def test_emastate_crossdevice(self):
model = TestArch()
model.cuda()
# state on gpu
state = ema.EMAState.FromModel(model)
self.assertEqual(state.device, torch.device("cuda:0"))
# target model on cpu
model1 = TestArch()
state.apply_to(model1)
self.assertEqual(next(model1.parameters()).device, torch.device("cpu"))
self.assertTrue(_compare_state_dict(copy.deepcopy(model).cpu(), model1))
# state on cpu
state1 = ema.EMAState.FromModel(model, device="cpu")
self.assertEqual(state1.device, torch.device("cpu"))
# target model on gpu
model2 = TestArch()
model2.cuda()
state1.apply_to(model2)
self.assertEqual(next(model2.parameters()).device, torch.device("cuda:0"))
self.assertTrue(_compare_state_dict(model, model2))
def test_ema_updater(self):
model = TestArch()
state = ema.EMAState()
updated_model = TestArch()
updater = ema.EMAUpdater(state, decay=0.0)
updater.init_state(model)
for _ in range(3):
cur = TestArch()
updater.update(cur)
state.apply_to(updated_model)
# weight decay == 0.0, always use new model
self.assertTrue(_compare_state_dict(updated_model, cur))
updater = ema.EMAUpdater(state, decay=1.0)
updater.init_state(model)
for _ in range(3):
cur = TestArch()
updater.update(cur)
state.apply_to(updated_model)
# weight decay == 1.0, always use init model
self.assertTrue(_compare_state_dict(updated_model, model))
def test_ema_updater_decay(self):
state = ema.EMAState()
updater = ema.EMAUpdater(state, decay=0.7)
updater.init_state(TestArch(1.0))
gt_val = 1.0
gt_val_int = 1
for idx in range(3):
updater.update(TestArch(float(idx)))
updated_model = state.get_ema_model(TestArch())
gt_val = gt_val * 0.7 + float(idx) * 0.3
gt_val_int = int(gt_val_int * 0.7 + float(idx) * 0.3)
self.assertTrue(
_compare_state_dict(updated_model, TestArch(gt_val, gt_val_int))
)
class TestModelingModelEMAHook(unittest.TestCase):
def test_ema_hook(self):
runner = default_runner.Detectron2GoRunner()
cfg = runner.get_default_cfg()
cfg.MODEL.DEVICE = "cpu"
cfg.MODEL_EMA.ENABLED = True
# use new model weights
cfg.MODEL_EMA.DECAY = 0.0
cfg.MODEL_EMA.DECAY_WARM_UP_FACTOR = -1
model = TestArch()
ema.may_build_model_ema(cfg, model)
self.assertTrue(hasattr(model, "ema_state"))
ema_hook = ema.EMAHook(cfg, model)
ema_hook.before_train()
ema_hook.before_step()
model.set_const_weights(2.0)
ema_hook.after_step()
ema_hook.after_train()
ema_checkpointers = ema.may_get_ema_checkpointer(cfg, model)
self.assertEqual(len(ema_checkpointers), 1)
out_model = TestArch()
ema_checkpointers["ema_state"].apply_to(out_model)
self.assertTrue(_compare_state_dict(out_model, model))
| d2go-main | tests/modeling/test_modeling_ema.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import unittest
import torch
from d2go.runner import GeneralizedRCNNRunner
from detectron2.modeling import build_anchor_generator, build_backbone
from detectron2.modeling.proposal_generator import rpn
logger = logging.getLogger(__name__)
# overwrite configs if specified, otherwise default config is used
RPN_CFGS = {}
class TestRPNHeads(unittest.TestCase):
def test_build_rpn_heads(self):
"""Make sure rpn heads run"""
self.assertGreater(len(rpn.RPN_HEAD_REGISTRY._obj_map), 0)
for name, builder in rpn.RPN_HEAD_REGISTRY._obj_map.items():
logger.info("Testing {}...".format(name))
cfg = GeneralizedRCNNRunner.get_default_cfg()
if name in RPN_CFGS:
cfg.merge_from_file(RPN_CFGS[name])
backbone = build_backbone(cfg)
backbone_shape = backbone.output_shape()
rpn_input_shape = [backbone_shape[x] for x in cfg.MODEL.RPN.IN_FEATURES]
rpn_head = builder(cfg, rpn_input_shape)
in_channels = list(backbone_shape.values())[0].channels
num_anchors = build_anchor_generator(cfg, rpn_input_shape).num_cell_anchors[
0
]
N, C_in, H, W = 2, in_channels, 24, 32
input = torch.rand([N, C_in, H, W], dtype=torch.float32)
LAYERS = len(cfg.MODEL.RPN.IN_FEATURES)
out = rpn_head([input] * LAYERS)
self.assertEqual(len(out), 2)
logits, bbox_reg = out
for idx in range(LAYERS):
self.assertEqual(
logits[idx].shape,
torch.Size(
[input.shape[0], num_anchors, input.shape[2], input.shape[3]]
),
)
self.assertEqual(
bbox_reg[idx].shape,
torch.Size(
[
logits[idx].shape[0],
num_anchors * 4,
logits[idx].shape[2],
logits[idx].shape[3],
]
),
)
def test_build_rpn_heads_with_rotated_anchor_generator(self):
"""Make sure rpn heads work with rotated anchor generator"""
self.assertGreater(len(rpn.RPN_HEAD_REGISTRY._obj_map), 0)
for name, builder in rpn.RPN_HEAD_REGISTRY._obj_map.items():
logger.info("Testing {}...".format(name))
cfg = GeneralizedRCNNRunner.get_default_cfg()
if name in RPN_CFGS:
cfg.merge_from_file(RPN_CFGS[name])
cfg.MODEL.ANCHOR_GENERATOR.NAME = "RotatedAnchorGenerator"
backbone = build_backbone(cfg)
backbone_shape = backbone.output_shape()
rpn_input_shape = [backbone_shape[x] for x in cfg.MODEL.RPN.IN_FEATURES]
rpn_head = builder(cfg, rpn_input_shape)
in_channels = list(backbone_shape.values())[0].channels
anchor_generator = build_anchor_generator(cfg, rpn_input_shape)
num_anchors = anchor_generator.num_cell_anchors[0]
box_dim = anchor_generator.box_dim
N, C_in, H, W = 2, in_channels, 24, 32
input = torch.rand([N, C_in, H, W], dtype=torch.float32)
LAYERS = len(cfg.MODEL.RPN.IN_FEATURES)
out = rpn_head([input] * LAYERS)
self.assertEqual(len(out), 2)
logits, bbox_reg = out
for idx in range(LAYERS):
self.assertEqual(
logits[idx].shape,
torch.Size(
[input.shape[0], num_anchors, input.shape[2], input.shape[3]]
),
)
self.assertEqual(
bbox_reg[idx].shape,
torch.Size(
[
logits[idx].shape[0],
num_anchors * box_dim,
logits[idx].shape[2],
logits[idx].shape[3],
]
),
)
if __name__ == "__main__":
unittest.main()
| d2go-main | tests/modeling/test_rpn_heads.py |
d2go-main | tests/modeling/__init__.py |
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import numpy as np
import torch
from d2go.utils.testing import rcnn_helper as rh
from detectron2.structures import Boxes
class TestRCNNHelper(unittest.TestCase):
def test_get_instances_from_image(self):
boxes = Boxes(torch.Tensor([[50, 40, 100, 80], [150, 60, 200, 120]]))
gt_kpts = torch.Tensor([75, 60, 1.0] * 21 + [175, 90, 1.0] * 21).reshape(
2, 21, 3
)
batched_inputs = rh.get_batched_inputs(2, boxes=boxes)
instances = rh.get_detected_instances_from_image(batched_inputs)
self.assertEqual(len(instances), 2)
self.assertArrayEqual(instances[0].pred_boxes.tensor, boxes.tensor)
self.assertArrayEqual(instances[0].pred_keypoints, gt_kpts)
def test_get_instances_from_image_scale_image(self):
H, W = 398, 224
all_boxes = Boxes(torch.Tensor([[50, 40, 100, 80], [150, 60, 200, 120]]))
image = rh.get_batched_inputs(1, (H, W), (H, W), all_boxes)[0]["image"]
boxes = rh.get_detected_instances_from_image([{"image": image}])[0].pred_boxes
self.assertArrayEqual(boxes.tensor, all_boxes.tensor)
# scale image by 0.5
scale_image = torch.nn.functional.interpolate(
torch.unsqueeze(image, 0),
scale_factor=(0.5, 0.5),
mode="bilinear",
align_corners=False,
recompute_scale_factor=False,
)[0]
sub_boxes = rh.get_detected_instances_from_image([{"image": scale_image}])[
0
].pred_boxes
self.assertArrayEqual(sub_boxes.tensor, [[25, 20, 50, 40], [75, 30, 100, 60]])
# scale image by 0.75
scale_image = torch.nn.functional.interpolate(
torch.unsqueeze(image, 0),
scale_factor=(0.75, 0.75),
mode="bilinear",
align_corners=False,
recompute_scale_factor=False,
)[0]
sub_boxes = rh.get_detected_instances_from_image([{"image": scale_image}])[
0
].pred_boxes
# [[37.5, 30, 75, 60], [112.5, 45, 150, 90]])
self.assertArrayEqual(sub_boxes.tensor, [[37, 30, 75, 60], [112, 45, 150, 90]])
def test_mock_rcnn_inference(self):
image_size = (1920, 1080)
resize_size = (398, 224)
scale_xy = (1080.0 / 224, 1920.0 / 398)
gt_boxes = Boxes(torch.Tensor([[50, 40, 100, 80], [150, 60, 200, 120]]))
gt_kpts = torch.Tensor([75, 60, 1.0] * 21 + [175, 90, 1.0] * 21).reshape(
2, 21, 3
)
# create inputs
batched_inputs = rh.get_batched_inputs(2, image_size, resize_size, gt_boxes)
# create model
model = rh.MockRCNNInference(image_size, resize_size)
# run without post processing
det_instances = model(batched_inputs, None, do_postprocess=False)
self.assertArrayAllClose(
det_instances[0].pred_boxes.tensor,
gt_boxes.tensor,
atol=1e-4,
)
self.assertArrayAllClose(
det_instances[0].pred_keypoints,
gt_kpts,
atol=1e-4,
)
# run with post processing
det_instances = model(batched_inputs, None, do_postprocess=True)
gt_boxes_scaled = gt_boxes.clone()
gt_boxes_scaled.scale(*scale_xy)
gt_kpts_scaled = torch.Tensor(
[75 * scale_xy[0], 60 * scale_xy[1], 1.0] * 21
+ [175 * scale_xy[0], 90 * scale_xy[1], 1.0] * 21
).reshape(2, 21, 3)
self.assertArrayAllClose(
det_instances[0]["instances"].pred_boxes.tensor,
gt_boxes_scaled.tensor,
atol=1e-4,
)
self.assertArrayAllClose(
det_instances[0]["instances"].pred_keypoints,
gt_kpts_scaled,
atol=1e-4,
)
def assertArrayEqual(self, a1, a2):
self.assertTrue(np.array_equal(a1, a2))
def assertArrayAllClose(self, a1, a2, rtol=1.0e-5, atol=1.0e-8):
self.assertTrue(np.allclose(a1, a2, rtol=rtol, atol=atol))
| d2go-main | tests/modeling/test_rcnn_helper.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import shutil
import tempfile
import unittest
import torch
from d2go.export.exporter import convert_and_export_predictor
from d2go.runner import Detectron2GoRunner
from mobile_cv.predictor.api import create_predictor
def _get_batch(height, width, is_train):
def _get_frame():
random_image = torch.rand(3, height, width).to(torch.float32)
ret = {"image": random_image}
if is_train:
mask_size = (height, width)
random_mask = torch.randint(low=0, high=2, size=mask_size).to(torch.int64)
ret["sem_seg"] = random_mask
return ret
batch_size = 2 if is_train else 1
return [
{"filename": "some_file", "width": 100, "height": 100, **_get_frame()}
for _ in range(batch_size)
]
def _get_data_loader(height, width, is_train):
inputs = _get_batch(height, width, is_train)
def get_data_loader():
while True:
yield inputs
return get_data_loader()
def _get_input_dim(model):
h = w = max(model.backbone.size_divisibility, 1)
return h, w
class BaseSemanticSegTestCase:
class TemplateTestCase(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp(prefix="test_meta_arch_semantic_seg_")
self.addCleanup(shutil.rmtree, self.test_dir)
runner = Detectron2GoRunner()
self.cfg = runner.get_default_cfg()
self.setup_custom_test()
self.cfg.merge_from_list(["MODEL.DEVICE", "cpu"])
self.test_model = runner.build_model(self.cfg, eval_only=True)
def setup_custom_test(self):
raise NotImplementedError()
def test_inference(self):
h, w = _get_input_dim(self.test_model)
inputs = _get_batch(h, w, False)
with torch.no_grad():
self.test_model(inputs)
def test_train(self):
h, w = _get_input_dim(self.test_model)
inputs = _get_batch(h, w, True)
self.test_model.train()
loss_dict = self.test_model(inputs)
losses = sum(loss_dict.values())
losses.backward()
def _test_export(self, predictor_type, compare_match=True):
h, w = _get_input_dim(self.test_model)
dl = _get_data_loader(h, w, False)
inputs = next(iter(dl))
output_dir = os.path.join(self.test_dir, "test_export")
predictor_path = convert_and_export_predictor(
self.cfg, self.test_model, predictor_type, output_dir, dl
)
predictor = create_predictor(predictor_path)
predicotr_outputs = predictor(inputs)
self.assertEqual(len(predicotr_outputs), len(inputs))
with torch.no_grad():
pytorch_outputs = self.test_model(inputs)
self.assertEqual(len(pytorch_outputs), len(inputs))
if compare_match:
for predictor_output, pytorch_output in zip(
predicotr_outputs, pytorch_outputs
):
torch.testing.assert_close(
predictor_output["sem_seg"], pytorch_output["sem_seg"]
)
class TestR50FPN(BaseSemanticSegTestCase.TemplateTestCase):
def setup_custom_test(self):
self.cfg.merge_from_file("detectron2://Misc/semantic_R_50_FPN_1x.yaml")
# discard pretrained backbone weights
self.cfg.merge_from_list(["MODEL.WEIGHTS", ""])
def test_export_torchscript(self):
self._test_export("torchscript", compare_match=True)
| d2go-main | tests/modeling/test_meta_arch_semantic_seg.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import unittest
from typing import List
import d2go.runner.default_runner as default_runner
import torch
from d2go.config import CfgNode
from d2go.modeling import modeling_hook as mh
from d2go.modeling.api import build_d2go_model, D2GoModelBuildResult
from d2go.registry.builtin import META_ARCH_REGISTRY, MODELING_HOOK_REGISTRY
@META_ARCH_REGISTRY.register()
class TestArch(torch.nn.Module):
def __init__(self, cfg):
super().__init__()
def forward(self, x):
return x * 2
# create a wrapper of the model that add 1 to the output
class PlusOneWrapper(torch.nn.Module):
def __init__(self, model: torch.nn.Module):
super().__init__()
self.model = model
def forward(self, x):
return self.model(x) + 1
@MODELING_HOOK_REGISTRY.register()
class PlusOneHook(mh.ModelingHook):
def __init__(self, cfg):
super().__init__(cfg)
def apply(self, model: torch.nn.Module) -> torch.nn.Module:
return PlusOneWrapper(model)
def unapply(self, model: torch.nn.Module) -> torch.nn.Module:
assert isinstance(model, PlusOneWrapper)
return model.model
# create a wrapper of the model that add 1 to the output
class TimesTwoWrapper(torch.nn.Module):
def __init__(self, model: torch.nn.Module):
super().__init__()
self.model = model
def forward(self, x):
return self.model(x) * 2
@MODELING_HOOK_REGISTRY.register()
class TimesTwoHook(mh.ModelingHook):
def __init__(self, cfg):
super().__init__(cfg)
def apply(self, model: torch.nn.Module) -> torch.nn.Module:
return TimesTwoWrapper(model)
def unapply(self, model: torch.nn.Module) -> torch.nn.Module:
assert isinstance(model, TimesTwoWrapper)
return model.model
class TestModelingHook(unittest.TestCase):
def test_modeling_hook_simple(self):
model = TestArch(None)
hook = PlusOneHook(None)
model_with_hook = hook.apply(model)
self.assertEqual(model_with_hook(2), 5)
original_model = hook.unapply(model_with_hook)
self.assertEqual(model, original_model)
def test_modeling_hook_cfg(self):
"""Create model with modeling hook using build_model"""
cfg = CfgNode()
cfg.MODEL = CfgNode()
cfg.MODEL.DEVICE = "cpu"
cfg.MODEL.META_ARCHITECTURE = "TestArch"
cfg.MODEL.MODELING_HOOKS = ["PlusOneHook", "TimesTwoHook"]
model_info: D2GoModelBuildResult = build_d2go_model(cfg)
model: torch.nn.Module = model_info.model
modeling_hooks: List[mh.ModelingHook] = model_info.modeling_hooks
self.assertEqual(model(2), 10)
self.assertEqual(len(modeling_hooks), 2)
self.assertTrue(hasattr(model, "_modeling_hooks"))
self.assertTrue(hasattr(model, "unapply_modeling_hooks"))
orig_model = model.unapply_modeling_hooks()
self.assertIsInstance(orig_model, TestArch)
self.assertEqual(orig_model(2), 4)
def test_modeling_hook_runner(self):
"""Create model with modeling hook from runner"""
runner = default_runner.Detectron2GoRunner()
cfg = runner.get_default_cfg()
cfg.MODEL.DEVICE = "cpu"
cfg.MODEL.META_ARCHITECTURE = "TestArch"
cfg.MODEL.MODELING_HOOKS = ["PlusOneHook", "TimesTwoHook"]
model = runner.build_model(cfg)
self.assertEqual(model(2), 10)
self.assertTrue(hasattr(model, "_modeling_hooks"))
self.assertTrue(hasattr(model, "unapply_modeling_hooks"))
orig_model = model.unapply_modeling_hooks()
self.assertIsInstance(orig_model, TestArch)
self.assertEqual(orig_model(2), 4)
default_runner._close_all_tbx_writers()
def test_modeling_hook_copy(self):
"""Create model with modeling hook, the model could be copied"""
cfg = CfgNode()
cfg.MODEL = CfgNode()
cfg.MODEL.DEVICE = "cpu"
cfg.MODEL.META_ARCHITECTURE = "TestArch"
cfg.MODEL.MODELING_HOOKS = ["PlusOneHook", "TimesTwoHook"]
model_info: D2GoModelBuildResult = build_d2go_model(cfg)
model: torch.nn.Module = model_info.model
modeling_hooks: List[mh.ModelingHook] = model_info.modeling_hooks
self.assertEqual(model(2), 10)
self.assertEqual(len(modeling_hooks), 2)
model_copy = copy.deepcopy(model)
orig_model = model.unapply_modeling_hooks()
self.assertIsInstance(orig_model, TestArch)
self.assertEqual(orig_model(2), 4)
orig_model_copy = model_copy.unapply_modeling_hooks()
self.assertEqual(orig_model_copy(2), 4)
| d2go-main | tests/modeling/test_modeling_meta_arch_modeling_hook.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import d2go.data.transforms.box_utils as bu
import d2go.modeling.image_pooler as image_pooler
import numpy as np
import torch
from d2go.utils.testing import rcnn_helper as rh
from detectron2.structures import Boxes
class TestModelingImagePooler(unittest.TestCase):
def test_image_pooler(self):
H, W = 8, 6
image = torch.zeros(3, H, W)
# xyxy
boxes = torch.Tensor([[2, 3, 5, 7]])
image[0, 3:7, 2:5] = 1
image[1, 3:7, 2:5] = 2
image[2, 3:7, 2:5] = 4
img_pooler = image_pooler.ImagePooler(resize_short=6, resize_max=12).eval()
pooled_img, pooled_box, transforms = img_pooler(image, boxes)
# check pooled images
self.assertEqual(pooled_img.shape, torch.Size([3, 8, 6]))
self.assertArrayEqual(torch.unique(pooled_img[0, :, :]), [1])
self.assertArrayEqual(torch.unique(pooled_img[1, :, :]), [2])
self.assertArrayEqual(torch.unique(pooled_img[2, :, :]), [4])
# check pooled boxes, in xyxy format
self.assertArrayEqual(pooled_box, [[0, 0, 6, 8]])
# inverse of transforms
trans_inv = transforms.inverse()
# inverse of boxes, xyxy
inversed_box = trans_inv.apply_box(pooled_box)
self.assertArrayEqual(inversed_box, boxes)
pooled_sub_box = np.array([[2, 2, 4, 6]])
inversed_sub_box = trans_inv.apply_box(pooled_sub_box)
self.assertArrayEqual(inversed_sub_box, [[3, 4, 4, 6]])
def test_image_pooler_scale_box(self):
H, W = 8, 6
image = torch.zeros(3, H, W)
# xyxy
boxes = torch.Tensor([[2, 3, 5, 7]])
image[0, 3:7, 2:5] = 1
image[1, 3:7, 2:5] = 2
image[2, 3:7, 2:5] = 4
img_pooler = image_pooler.ImagePooler(
resize_type=None, box_scale_factor=4.0
).eval()
pooled_img, pooled_box, transforms = img_pooler(image, boxes)
# check pooled images
self.assertEqual(pooled_img.shape, torch.Size([3, 8, 6]))
self.assertArrayEqual(pooled_img, image)
# check pooled boxes, in xyxy format, the box before scaling
self.assertArrayEqual(pooled_box, [[2, 3, 5, 7]])
def test_image_pooler_scale_box_large_crop_only(self):
"""Crop bbox"""
H, W = 398, 224
all_boxes = Boxes(torch.Tensor([[50, 40, 100, 80], [150, 60, 200, 120]]))
image = rh.get_batched_inputs(1, (H, W), (H, W), all_boxes)[0]["image"]
boxes = bu.get_box_union(all_boxes)
self.assertArrayEqual(boxes.tensor, [[50, 40, 200, 120]])
img_pooler = image_pooler.ImagePooler(
resize_type=None, box_scale_factor=1.0
).eval()
pooled_img, pooled_box, transforms = img_pooler(image, boxes.tensor)
self.assertEqual(pooled_img.shape, torch.Size([3, 80, 150]))
sub_boxes = rh.get_detected_instances_from_image([{"image": pooled_img}])[
0
].pred_boxes
self.assertArrayEqual(sub_boxes.tensor, [[0, 0, 50, 40], [100, 20, 150, 80]])
def test_image_pooler_scale_box_large_crop_and_scale(self):
"""Crop bbox that is scaled"""
H, W = 398, 224
all_boxes = Boxes(torch.Tensor([[50, 40, 100, 80], [150, 60, 200, 120]]))
image = rh.get_batched_inputs(1, (H, W), (H, W), all_boxes)[0]["image"]
boxes = bu.get_box_union(all_boxes)
img_pooler = image_pooler.ImagePooler(
resize_type=None, box_scale_factor=1.2
).eval()
pooled_img, pooled_box, transforms = img_pooler(image, boxes.tensor)
self.assertEqual(pooled_img.shape, torch.Size([3, 96, 180]))
# bbox with scaling in the original space
orig_crop_box = transforms.inverse().apply_box(
[0, 0, pooled_img.shape[2], pooled_img.shape[1]]
)
self.assertArrayEqual(orig_crop_box, [[35, 32, 215, 128]])
sub_boxes = rh.get_detected_instances_from_image([{"image": pooled_img}])[
0
].pred_boxes
# gt_offset_xy = (50 - 35 = 15, 40 - 32 = 8)
self.assertArrayEqual(sub_boxes.tensor, [[15, 8, 65, 48], [115, 28, 165, 88]])
def test_image_pooler_scale_box_large_crop_scale_and_resize(self):
"""Crop bbox that is scaled, resize the cropped box"""
H, W = 398, 224
all_boxes = Boxes(torch.Tensor([[50, 40, 100, 80], [150, 60, 200, 120]]))
image = rh.get_batched_inputs(1, (H, W), (H, W), all_boxes)[0]["image"]
boxes = bu.get_box_union(all_boxes)
img_pooler = image_pooler.ImagePooler(
resize_type="resize_shortest",
resize_short=48,
resize_max=180,
box_scale_factor=1.2,
).eval()
pooled_img, pooled_box, transforms = img_pooler(image, boxes.tensor)
self.assertEqual(pooled_img.shape, torch.Size([3, 48, 90]))
# bbox with scaling in the original space
orig_crop_box = transforms.inverse().apply_box(
[0, 0, pooled_img.shape[2], pooled_img.shape[1]]
)
self.assertArrayEqual(orig_crop_box, [[35, 32, 215, 128]])
# bbox without scaling in the original space
orig_boxes = transforms.inverse().apply_box(pooled_box)
self.assertArrayEqual(orig_boxes, boxes.tensor)
sub_boxes = rh.get_detected_instances_from_image([{"image": pooled_img}])[
0
].pred_boxes
# [[7.5, 4, 32.5, 24], [57.5, 14, 82.5, 44]]
self.assertArrayEqual(sub_boxes.tensor, [[7, 4, 33, 24], [57, 14, 83, 44]])
def assertArrayEqual(self, a1, a2):
self.assertTrue(np.array_equal(a1, a2))
| d2go-main | tests/modeling/test_modeling_image_pooler.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import numpy as np
import torch
from d2go.modeling.kmeans_anchors import (
add_kmeans_anchors_cfg,
compute_kmeans_anchors,
compute_kmeans_anchors_hook,
)
from d2go.runner import GeneralizedRCNNRunner
from d2go.utils.testing.data_loader_helper import register_toy_coco_dataset
from detectron2.data import DatasetCatalog, DatasetFromList, MapDataset
from detectron2.engine.train_loop import SimpleTrainer
from torch.utils.data.sampler import BatchSampler, Sampler
class IntervalSampler(Sampler):
def __init__(self, size: int, interval: int):
self._local_indices = range(0, size, interval)
def __iter__(self):
yield from self._local_indices
def __len__(self):
return len(self._local_indices)
def build_sequence_loader(cfg, dataset_name, mapper, total_samples, batch_size=1):
"""
Similar to `build_detection_test_loader` in the way that its sampler
samples dataset_dicts in order and only loops once.
"""
dataset_dicts = DatasetCatalog.get(dataset_name)
dataset = DatasetFromList(dataset_dicts)
dataset = MapDataset(dataset, mapper)
interval = max(1, int(len(dataset) / total_samples))
sampler = IntervalSampler(len(dataset), interval)
batch_sampler = BatchSampler(sampler, batch_size, drop_last=False)
def _trivial_batch_collator(batch):
return batch
data_loader = torch.utils.data.DataLoader(
dataset,
num_workers=cfg.DATALOADER.NUM_WORKERS,
batch_sampler=batch_sampler,
collate_fn=_trivial_batch_collator,
)
return data_loader
class TestKmeansAnchors(unittest.TestCase):
def setUp(self):
self.runner = GeneralizedRCNNRunner()
def _get_default_cfg(self):
cfg = self.runner.get_default_cfg()
add_kmeans_anchors_cfg(cfg)
return cfg
@unittest.skip("This can only run locally and takes significant of time")
def test_matching_previous_results(self):
cfg = self._get_default_cfg()
cfg.INPUT.MIN_SIZE_TRAIN = (144,)
cfg.MODEL.KMEANS_ANCHORS.KMEANS_ANCHORS_ON = True
cfg.MODEL.KMEANS_ANCHORS.NUM_CLUSTERS = 10
cfg.MODEL.KMEANS_ANCHORS.NUM_TRAINING_IMG = 512
cfg.MODEL.KMEANS_ANCHORS.DATASETS = ()
# NOTE: create a data loader that samples exact the same as previous
# implementation. In D2Go, we will rely on the train loader instead.
# NOTE: in order to load OV580_XRM dataset, change the IM_DIR to:
# "/mnt/vol/gfsai-east/aml/mobile-vision//dataset/oculus/hand_tracking//torch/Segmentation/OV580_XRM_640x480_V3_new_rerun/images" # noqa
data_loader = build_sequence_loader(
cfg,
# dataset_name="coco_2014_valminusminival",
# dataset_name="OV580_XRM_640x480_V3_train",
dataset_name="OV580_XRM_640x480_V3_heldOut_small_512",
mapper=self.runner.get_mapper(cfg, is_train=True),
total_samples=cfg.MODEL.KMEANS_ANCHORS.NUM_TRAINING_IMG,
batch_size=3,
)
kmeans_anchors = compute_kmeans_anchors(
cfg, data_loader, sort_by_area=False, _stride=16, _legacy_plus_one=True
)
# Taken from D9849940
reference_anchors = np.array(
[
[-15.33554182, -15.29361029, 31.33554182, 31.29361029], # noqa
[-9.34156693, -9.32553548, 25.34156693, 25.32553548], # noqa
[-6.03052776, -6.02034167, 22.03052776, 22.02034167], # noqa
[-2.25951741, -2.182888, 18.25951741, 18.182888], # noqa
[-18.93553378, -18.93553403, 34.93553378, 34.93553403], # noqa
[-12.69068356, -12.73989029, 28.69068356, 28.73989029], # noqa
[-24.73489189, -24.73489246, 40.73489189, 40.73489246], # noqa
[-4.06014466, -4.06014469, 20.06014466, 20.06014469], # noqa
[-7.61036119, -7.60467538, 23.61036119, 23.60467538], # noqa
[-10.88200579, -10.87634414, 26.88200579, 26.87634414], # noqa
]
)
np.testing.assert_allclose(kmeans_anchors, reference_anchors, atol=1e-6)
def test_build_model(self):
cfg = self._get_default_cfg()
cfg.INPUT.MIN_SIZE_TRAIN = (60,)
cfg.MODEL.KMEANS_ANCHORS.KMEANS_ANCHORS_ON = True
cfg.MODEL.KMEANS_ANCHORS.NUM_CLUSTERS = 3
cfg.MODEL.KMEANS_ANCHORS.NUM_TRAINING_IMG = 5
cfg.MODEL.KMEANS_ANCHORS.DATASETS = ("toy_dataset",)
cfg.MODEL.DEVICE = "cpu"
cfg.MODEL.ANCHOR_GENERATOR.NAME = "KMeansAnchorGenerator"
with register_toy_coco_dataset(
"toy_dataset",
image_size=(80, 60), # w, h
num_images=cfg.MODEL.KMEANS_ANCHORS.NUM_TRAINING_IMG,
):
model = self.runner.build_model(cfg)
trainer = SimpleTrainer(model, data_loader=[], optimizer=None)
trainer_hooks = [compute_kmeans_anchors_hook(self.runner, cfg)]
trainer.register_hooks(trainer_hooks)
trainer.before_train()
anchor_generator = model.proposal_generator.anchor_generator
cell_anchors = list(anchor_generator.cell_anchors)
gt_anchors = np.array(
[
[-20, -15, 20, 15] # toy_dataset's bbox is half size of image
for _ in range(cfg.MODEL.KMEANS_ANCHORS.NUM_CLUSTERS)
]
)
np.testing.assert_allclose(cell_anchors[0], gt_anchors)
if __name__ == "__main__":
unittest.main()
| d2go-main | tests/modeling/test_kmeans_anchors.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
from typing import List
from unittest import mock
import numpy as np
import torch
import torch.nn as nn
from d2go.config import CfgNode
from d2go.modeling import modeling_hook as mh
from d2go.modeling.distillation import (
_build_teacher,
_set_device,
BaseDistillationHelper,
CachedLayer,
compute_layer_losses,
DefaultLossCombiner,
DistillationModelingHook,
DomainAdaptation,
ExampleDistillationHelper,
get_default_kd_image_classification_layer_losses,
KnowledgeDistillation,
LabelDistillation,
LayerLossMetadata,
NoopPseudoLabeler,
PseudoLabeler,
record_layers,
register_layer_losses_and_to_device,
RelabelTargetInBatch,
set_cache_dict,
unrecord_layers,
)
from d2go.registry.builtin import (
DISTILLATION_ALGORITHM_REGISTRY,
DISTILLATION_HELPER_REGISTRY,
META_ARCH_REGISTRY,
)
from d2go.runner.config_defaults import add_distillation_configs
from d2go.runner.default_runner import BaseRunner
from d2go.utils.testing import helper
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.utils.file_io import PathManager
from mobile_cv.common.misc.file_utils import make_temp_directory
from mobile_cv.common.misc.mixin import dynamic_mixin, remove_dynamic_mixin
class DivideInputBy2(nn.Module):
def forward(self, batched_inputs: List):
"""Divide all targets by 2 and batch output"""
return [x / 2.0 for x in batched_inputs]
class DivideInputDictBy2(nn.Module):
def forward(self, batched_inputs: List):
"""Divide all inputs by 2 and batch output
Should be used with a pseudo labeler that will unpack the
resulting tensor
"""
output = []
for d in batched_inputs:
output.append(d["input"] / 2.0)
return torch.stack(output)
class DivideInputBy2OutputDict(nn.Module):
def forward(self, batched_inputs: List):
"""Divide all targets by 2 and return dict output"""
return {i: x / 2.0 for i, x in enumerate(batched_inputs)}
class TimesTable5OutputDict(nn.Module):
def forward(self, batched_inputs: List):
"""Return first five entries of times table for each input with a dict output"""
return {i: [x * i for i in range(1, 6)] for i, x in enumerate(batched_inputs)}
class ConstantStrOutput(nn.Module):
def forward(self, batched_inputs: List):
"""Return some string"""
return "Testing!"
class AddOne(nn.Module):
def __init__(self):
super().__init__()
self.weight = nn.Parameter(torch.Tensor([1]))
def forward(self, x):
return x + self.weight
@property
def device(self):
return self.weight.device
class AddLayers(nn.Module):
def __init__(self):
super().__init__()
self.layer0 = AddOne()
self.layer1 = AddOne()
self.layer2 = AddOne()
def forward(self, x):
x = self.layer0(x)
x = self.layer1(x)
x = self.layer2(x)
if not self.training:
return x
return {"output": x}
@property
def device(self):
return self.layer0.weight.device
class SimpleAdd(nn.Module):
def forward(self, x, y):
return x + y
class SimpleMul(nn.Module):
def forward(self, x, y):
return x * y
class TestLabeler(PseudoLabeler):
def __init__(self, teacher):
self.teacher = teacher
def label(self, x):
return self.teacher(x)
@META_ARCH_REGISTRY.register()
class TestMetaArchAddRand(nn.Module):
def __init__(self, cfg):
super().__init__()
self.weight = nn.Parameter(torch.rand(1))
def forward(self, x):
return x + self.weight
@DISTILLATION_HELPER_REGISTRY.register()
class TestHelper(BaseDistillationHelper):
def get_pseudo_labeler(self):
"""Run teacher model on inputs"""
return TestLabeler(self.teacher)
def get_preprocess_student_input(self):
return lambda x: x + 1
def get_preprocess_teacher_input(self):
return lambda x: x + 2
def get_layer_losses(self, model=None):
return [
LayerLossMetadata(
loss=SimpleAdd(),
name="add",
layer0="layer0",
layer1="layer0",
),
LayerLossMetadata(
loss=SimpleMul(),
name="mul",
layer0="layer1",
layer1="layer1",
),
]
def get_combine_losses(self):
return lambda d: {
"output": d["output"] * 0.1,
"add": d["add"] * 0.5,
"mul": d["mul"] * 10.0,
}
class TestDAHelper(BaseDistillationHelper):
def get_preprocess_domain0_input(self):
return lambda x: x["real"]
def get_preprocess_domain1_input(self):
return lambda x: x["synthetic"]
def get_layer_losses(self, model=None):
return [
LayerLossMetadata(
loss=SimpleAdd(),
name="add",
layer0="layer0",
layer1="layer0",
)
]
def get_combine_losses(self):
return lambda d0, d1, da, ta: {
"real": d0["output"] * 0.1,
"synthetic": d1["output"] * 0.5,
"add": da["add"] * 10.0,
}
class Noop(nn.Module):
def forward(self, x):
return x
def _get_input_data(n: int = 2, use_input_target: bool = False, requires_grad=False):
"""Return input data, dict if use_input_target is specified"""
if not use_input_target:
return torch.randn(n, requires_grad=requires_grad)
return [
{
"input": torch.randn(1, requires_grad=requires_grad),
"target": torch.randn(1),
}
for _ in range(n)
]
def _get_default_cfg():
cfg = CfgNode()
cfg.MODEL = CfgNode()
cfg.MODEL.DEVICE = "cpu"
cfg.MODEL.META_ARCHITECTURE = "TestArch"
add_distillation_configs(cfg)
# model_ema.add_model_ema_configs(cfg)
cfg.DISTILLATION.ALGORITHM = "LabelDistillation"
cfg.DISTILLATION.HELPER = "BaseDistillationHelper"
cfg.DISTILLATION.TEACHER.TORCHSCRIPT_FNAME = ""
cfg.DISTILLATION.TEACHER.DEVICE = ""
return cfg
class TestDistillation(unittest.TestCase):
def test_add_distillation_configs(self):
"""Check default config"""
cfg = CfgNode()
add_distillation_configs(cfg)
self.assertTrue(isinstance(cfg.DISTILLATION.TEACHER, CfgNode))
# check teacher model config is clone of student model
self.assertEqual(cfg.DISTILLATION.TEACHER.CONFIG_FNAME, "")
def test_build_teacher_torchscript(self):
"""Check can build teacher using torchscript fname in config"""
# create torchscript
model = DivideInputBy2()
traced_model = torch.jit.trace(model, torch.randn(5))
with make_temp_directory("tmp") as output_dir:
fname = f"{output_dir}/tmp.pt"
torch.jit.save(traced_model, fname)
# create teacher
cfg = _get_default_cfg()
cfg.DISTILLATION.TEACHER.TORCHSCRIPT_FNAME = fname
teacher = _build_teacher(cfg)
batched_inputs = torch.randn(5)
gt = batched_inputs / 2.0
output = teacher(batched_inputs)
torch.testing.assert_close(torch.Tensor(output), gt)
@helper.skip_if_no_gpu
def test_build_teacher_torchscript_gpu(self):
"""Check teacher moved to cuda"""
model = AddOne()
traced_model = torch.jit.trace(model, torch.randn(5))
with make_temp_directory("tmp") as output_dir:
fname = f"{output_dir}/tmp.pt"
torch.jit.save(traced_model, fname)
# create teacher
cfg = _get_default_cfg()
cfg.MODEL.DEVICE = "cuda"
cfg.DISTILLATION.TEACHER.TORCHSCRIPT_FNAME = fname
teacher = _build_teacher(cfg)
batched_inputs = torch.randn(5).to("cuda")
gt = batched_inputs + torch.Tensor([1]).to("cuda")
output = teacher(batched_inputs)
torch.testing.assert_close(torch.Tensor(output), gt)
def test_build_teacher_config(self):
"""Check build pytorch model using config"""
# build model
cfg = _get_default_cfg()
cfg.MODEL.META_ARCHITECTURE = "TestMetaArchAddRand"
gt_model = BaseRunner().build_model(cfg)
with make_temp_directory("tmp") as output_dir:
# save model
checkpointer = DetectionCheckpointer(gt_model, save_dir=output_dir)
checkpointer.save("checkpoint")
cfg.MODEL.WEIGHTS = f"{output_dir}/checkpoint.pth"
config_fname = f"{output_dir}/config.yaml"
with PathManager.open(config_fname, "w") as f:
f.write(cfg.dump())
# load model and compare to gt
cfg.DISTILLATION.TEACHER.TYPE = "config"
cfg.DISTILLATION.TEACHER.CONFIG_FNAME = config_fname
model = _build_teacher(cfg)
self.assertEqual(gt_model.weight, model.weight)
def test_build_teacher_none(self):
"""Check that we can ignore building the teacher"""
# build model
cfg = _get_default_cfg()
cfg.MODEL.META_ARCHITECTURE = "TestMetaArchAddRand"
cfg.DISTILLATION.TEACHER.TYPE = "no_teacher"
model = _build_teacher(cfg)
self.assertTrue(isinstance(model, nn.Module))
def test_override_teacher_config_gpu_on_cpu(self):
"""Teacher cuda model can be run on cpu if specified in config"""
# build model where teacher is specified on gpu but user overrides cpu
cfg = _get_default_cfg()
cfg.MODEL.META_ARCHITECTURE = "TestMetaArchAddRand"
gt_model = BaseRunner().build_model(cfg)
with make_temp_directory("tmp") as output_dir:
# save model
checkpointer = DetectionCheckpointer(gt_model, save_dir=output_dir)
checkpointer.save("checkpoint")
cfg.MODEL.WEIGHTS = f"{output_dir}/checkpoint.pth"
cfg.MODEL.DEVICE = "cuda"
config_fname = f"{output_dir}/config.yaml"
with PathManager.open(config_fname, "w") as f:
f.write(cfg.dump())
# load model and compare to gt
cfg.DISTILLATION.TEACHER.TYPE = "config"
cfg.DISTILLATION.TEACHER.CONFIG_FNAME = config_fname
cfg.DISTILLATION.TEACHER.DEVICE = "cpu"
model = _build_teacher(cfg)
self.assertEqual(gt_model.weight, model.weight)
def test_set_device(self):
"""Check teacher device is set"""
# without attr
model = Noop()
self.assertFalse(hasattr(model, "device"))
device = torch.device("cpu")
# without property
model = _set_device(model, device)
self.assertEqual(model.device, device)
# with property
model = AddOne()
model = _set_device(model, device)
self.assertEqual(model.device, device)
def test_cached_layer_tensor(self):
"""Check cached layer saves layer output"""
model = AddOne()
cache = {}
dynamic_mixin(
model,
CachedLayer,
init_dict={"label": "test_layer", "cache": cache},
)
input = torch.randn(1)
output = model(input)
torch.testing.assert_close(output, cache["test_layer"])
def test_cached_layer_list(self):
"""Check cached layer saves list"""
model = DivideInputBy2()
cache = {}
dynamic_mixin(
model,
CachedLayer,
init_dict={"label": "test_layer", "cache": cache},
)
input = [torch.randn(1) for _ in range(2)]
output = model(input)
torch.testing.assert_close(output, cache["test_layer"])
def test_cached_layer_tuple(self):
"""Check cached layer saves list"""
model = DivideInputBy2()
cache = {}
dynamic_mixin(
model,
CachedLayer,
init_dict={"label": "test_layer", "cache": cache},
)
input = (torch.randn(1) for _ in range(2))
output = model(input)
torch.testing.assert_close(output, cache["test_layer"])
def test_cached_layer_dict(self):
"""Check cached layer saves dict"""
model = DivideInputBy2OutputDict()
cache = {}
dynamic_mixin(
model,
CachedLayer,
init_dict={"label": "test_layer", "cache": cache},
)
input = [torch.randn(1) for _ in range(2)]
output = model(input)
torch.testing.assert_close(output, cache["test_layer"])
def test_cached_layer_arbitrary(self):
"""Check cached layer saves arbitrary nested data structure"""
model = TimesTable5OutputDict()
cache = {}
dynamic_mixin(
model,
CachedLayer,
init_dict={"label": "test_layer", "cache": cache},
)
input = [torch.randn(1) for _ in range(2)]
output = model(input)
torch.testing.assert_close(output, cache["test_layer"])
def test_cached_layer_unsupported(self):
"""Check cached layer doesn't save unsupported data type like strings"""
model = ConstantStrOutput()
cache = {}
dynamic_mixin(
model,
CachedLayer,
init_dict={"label": "test_layer", "cache": cache},
)
input = [torch.randn(1) for _ in range(2)]
self.assertRaises(ValueError, model, input)
def test_record_layers(self):
"""Check we can record specified layer"""
model = AddLayers()
cache = record_layers(model, ["", "layer0", "layer1", "layer2"])
input = torch.Tensor([0])
output = model(input)
torch.testing.assert_close(cache["layer0"], torch.Tensor([1]))
torch.testing.assert_close(cache["layer1"], torch.Tensor([2]))
torch.testing.assert_close(cache["layer2"], torch.Tensor([3]))
torch.testing.assert_close(cache[""], output)
def test_unrecord_layers(self):
"""Check we can remove a recorded layer"""
model = AddLayers()
_ = record_layers(model, ["", "layer0", "layer1", "layer2"])
unrecord_layers(model, ["", "layer0"])
self.assertFalse(hasattr(model.layer0, "cache"))
def test_compute_layer_losses(self):
"""Check iterating over loss dicts"""
layer_losses = [
LayerLossMetadata(
loss=lambda x, y: x + y, name="add", layer0="l00", layer1="l10"
),
LayerLossMetadata(
loss=lambda x, y: x / y, name="div", layer0="l01", layer1="l11"
),
]
layer0_cache = {"l00": torch.randn(1), "l01": torch.randn(1)}
layer1_cache = {"l10": torch.randn(1), "l11": torch.randn(1)}
output = compute_layer_losses(layer_losses, layer0_cache, layer1_cache)
torch.testing.assert_close(
output["add"], layer0_cache["l00"] + layer1_cache["l10"]
)
torch.testing.assert_close(
output["div"], layer0_cache["l01"] / layer1_cache["l11"]
)
def test_set_cache_dict(self):
"""Check we can swap the cache dict used when recording layers"""
model = AddLayers()
cache = record_layers(model, ["", "layer0", "layer1", "layer2"])
new_cache = {}
set_cache_dict(model, new_cache)
input = torch.Tensor([0])
output = model(input)
self.assertEqual(cache, {})
torch.testing.assert_close(new_cache["layer0"], torch.Tensor([1]))
torch.testing.assert_close(new_cache["layer1"], torch.Tensor([2]))
torch.testing.assert_close(new_cache["layer2"], torch.Tensor([3]))
torch.testing.assert_close(new_cache[""], output)
def test_register_layer_losses(self):
"""Check losses can be registered to model"""
model = AddOne()
ll = [
LayerLossMetadata(
loss=SimpleAdd(),
name="mul",
layer0="layer1",
layer1="layer1",
),
]
registered_losses = register_layer_losses_and_to_device(ll, model)
self.assertTrue(hasattr(model, "mul"))
self.assertEqual(model.mul, registered_losses[0].loss)
@helper.skip_if_no_gpu
def test_register_layer_losses_and_to_device(self):
"""Check losses can be registered to model"""
model = AddOne()
model = model.to("cuda")
ll = [
LayerLossMetadata(
loss=AddOne(),
name="mul",
layer0="layer1",
layer1="layer1",
),
]
register_layer_losses_and_to_device(ll, model)
self.assertEqual(model.mul.device, model.device)
class TestPseudoLabeler(unittest.TestCase):
def test_noop(self):
"""Check noop"""
pseudo_labeler = NoopPseudoLabeler()
x = np.random.randn(1)
output = pseudo_labeler.label(x)
torch.testing.assert_close(x, output)
def test_relabeltargetinbatch(self):
"""Check target is relabed using teacher"""
teacher = DivideInputDictBy2()
teacher.eval()
teacher.device = torch.device("cpu")
relabeler = RelabelTargetInBatch(teacher=teacher)
batched_inputs = _get_input_data(n=2, use_input_target=True)
gt = [{"input": d["input"], "target": d["input"] / 2.0} for d in batched_inputs]
outputs = relabeler.label(batched_inputs)
torch.testing.assert_close(outputs, gt)
class TestDistillationHelper(unittest.TestCase):
def test_registry(self):
"""Check base class in registry"""
self.assertTrue("BaseDistillationHelper" in DISTILLATION_HELPER_REGISTRY)
def test_base_distillation_helper(self):
"""Check base distillation helper returns input as output"""
dh = BaseDistillationHelper(cfg=None, teacher=None)
pseudo_labeler = dh.get_pseudo_labeler()
self.assertTrue(isinstance(pseudo_labeler, NoopPseudoLabeler))
def test_example_distillation_helper(self):
"""Example distillation uses teacher to relabel targets"""
teacher = Noop()
dh = ExampleDistillationHelper(cfg=None, teacher=teacher)
pseudo_labeler = dh.get_pseudo_labeler()
self.assertTrue(isinstance(pseudo_labeler, RelabelTargetInBatch))
self.assertTrue(isinstance(pseudo_labeler.teacher, Noop))
class TestDistillationAlgorithm(unittest.TestCase):
class LabelDistillationNoop(LabelDistillation, Noop):
"""Distillation should be used with dynamic mixin so we create
a new class with mixin of a noop to test"""
pass
def test_registry(self):
"""Check distillation teacher in registry"""
for algorithm in [
"LabelDistillation",
"KnowledgeDistillation",
"DomainAdaptation",
]:
self.assertTrue(algorithm in DISTILLATION_ALGORITHM_REGISTRY)
def test_label_distillation_inference(self):
"""Check inference defaults to student
Use LabelDistillationNoop to set student model to noop
"""
batched_inputs = _get_input_data(n=2)
gt = batched_inputs.detach().clone()
model = self.LabelDistillationNoop()
model.dynamic_mixin_init(
distillation_helper=TestHelper(cfg=None, teacher=DivideInputBy2()),
)
model.eval()
output = model(batched_inputs)
np.testing.assert_array_equal(output, gt)
def test_label_distillation_training(self):
"""Check training uses pseudo labeler
Distillation teacher should run the teacher model on the inputs and
then pass to the noop
"""
batched_inputs = _get_input_data(n=2, requires_grad=True)
gt = [x / 2.0 for x in batched_inputs]
model = self.LabelDistillationNoop()
model.dynamic_mixin_init(
distillation_helper=TestHelper(cfg=None, teacher=DivideInputBy2()),
)
model.train()
output = model(batched_inputs)
torch.testing.assert_close(output, gt)
sum(output).backward()
torch.testing.assert_close(batched_inputs.grad, torch.Tensor([0.5, 0.5]))
def test_kd_inference(self):
"""Check inference defaults to student (and preprocessing)"""
distillation_helper = TestHelper(cfg=CfgNode(), teacher=AddLayers())
model = AddLayers()
dynamic_mixin(
model,
KnowledgeDistillation,
init_dict={"distillation_helper": distillation_helper},
)
model.eval()
input = torch.randn(1)
output = model(input)
torch.testing.assert_close(output, input + 4.0)
def test_kd_train(self):
"""Check train pass results in updated loss output"""
distillation_helper = TestHelper(cfg=CfgNode(), teacher=AddLayers())
model = AddLayers()
dynamic_mixin(
model,
KnowledgeDistillation,
init_dict={"distillation_helper": distillation_helper},
)
model.train()
input = torch.randn(1)
output = model(input)
torch.testing.assert_close(output["output"], (input + 4.0) * 0.1)
torch.testing.assert_close(output["add"], ((input + 2.0) + (input + 3.0)) * 0.5)
torch.testing.assert_close(output["mul"], (input + 3.0) * (input + 4.0) * 10.0)
def test_kd_remove_dynamic_mixin(self):
"""Check removing dynamic mixin removes cached layers"""
distillation_helper = TestHelper(cfg=CfgNode(), teacher=AddLayers())
model = AddLayers()
dynamic_mixin(
model,
KnowledgeDistillation,
init_dict={"distillation_helper": distillation_helper},
)
remove_dynamic_mixin(model)
for module in model.modules():
self.assertFalse(hasattr(module, "cache"))
def test_da_inference(self):
"""Check inference defaults to student (and preprocessing)"""
distillation_helper = TestDAHelper(cfg=CfgNode(), teacher=nn.Identity())
model = AddLayers()
dynamic_mixin(
model,
DomainAdaptation,
init_dict={"distillation_helper": distillation_helper},
)
model.eval()
input = {"real": torch.randn(1), "synthetic": torch.randn(1)}
output = model(input)
torch.testing.assert_close(output, input["real"] + 3.0)
def test_da_train(self):
"""Check train pass results in updated loss output"""
distillation_helper = TestDAHelper(cfg=CfgNode(), teacher=nn.Identity())
model = AddLayers()
dynamic_mixin(
model,
DomainAdaptation,
init_dict={"distillation_helper": distillation_helper},
)
model.train()
input = {"real": torch.randn(1), "synthetic": torch.randn(1)}
output = model(input)
self.assertEqual(set(output.keys()), {"real", "synthetic", "add"})
torch.testing.assert_close(output["real"], (input["real"] + 3.0) * 0.1)
torch.testing.assert_close(
output["synthetic"], (input["synthetic"] + 3.0) * 0.5
)
torch.testing.assert_close(
output["add"], ((input["real"] + 1.0) + (input["synthetic"] + 1.0)) * 10.0
)
def test_da_remove_dynamic_mixin(self):
"""Check removing dynamic mixin removes cached layers"""
distillation_helper = TestHelper(cfg=CfgNode(), teacher=nn.Identity())
model = AddLayers()
dynamic_mixin(
model,
DomainAdaptation,
init_dict={"distillation_helper": distillation_helper},
)
remove_dynamic_mixin(model)
for module in model.modules():
self.assertFalse(hasattr(module, "cache"))
class TestDistillationModelingHook(unittest.TestCase):
_build_teacher_ref = "d2go.modeling.distillation._build_teacher"
def test_exists(self):
"""Check that the hook is registered"""
self.assertTrue("DistillationModelingHook" in mh.MODELING_HOOK_REGISTRY)
def test_init(self):
"""Check that we can build hook"""
cfg = _get_default_cfg()
with mock.patch(self._build_teacher_ref):
DistillationModelingHook(cfg)
def test_apply(self):
"""Check new model has distillation methods"""
model = Noop()
model.test_attr = "12345"
cfg = _get_default_cfg()
cfg.DISTILLATION.HELPER = "TestHelper"
with mock.patch(self._build_teacher_ref):
hook = DistillationModelingHook(cfg)
hook.apply(model)
# set teacher manually to override _build_teacher
model.pseudo_labeler.teacher = DivideInputBy2()
# check distillation attrs
self.assertTrue(isinstance(model.distillation_helper, TestHelper))
self.assertEqual(model._original_model_class, Noop)
# check retains attrs
self.assertTrue(hasattr(model, "test_attr"))
self.assertEqual(model.test_attr, "12345")
# check inference uses the baseline model which is a noop
batched_inputs = _get_input_data(n=2)
model.eval()
gt = batched_inputs.detach().clone()
output = model(batched_inputs)
torch.testing.assert_close(output, gt)
# check training uses the pseudo labeler
model.train()
gt = [x / 2.0 for x in batched_inputs]
output = model(batched_inputs)
torch.testing.assert_close(output, gt)
def test_unapply(self):
"""Check removing distillation"""
model = Noop()
cfg = _get_default_cfg()
with mock.patch(self._build_teacher_ref):
hook = DistillationModelingHook(cfg)
hook.apply(model)
hook.unapply(model)
for distillation_attr in [
"distillation_helper",
"_original_model_class",
]:
self.assertFalse(hasattr(model, distillation_attr))
# check forward is the original noop
batched_inputs = _get_input_data(n=2)
gt = batched_inputs.detach().clone()
model.train()
output = model(batched_inputs)
torch.testing.assert_close(output, gt)
class TestDistillationMiscTests(unittest.TestCase):
def test_teacher_outside_updated_parameters(self):
"""
Check that teacher values are ignored when updating student
The teacher can often be referenced in the mixed in model. A common
example is when the teacher is an attributed of the distillation
helper.
=> DistillationModel.distillation_helper.teacher
This raises the question of whether the teacher model will be affected
by calls to the mixed in model:
DisillationModel.train() => does teacher switch to training?
setup_qat(DistillationModel) => will fuse occur on the teacher modules?
The answer to these questions should be no as we want the teacher to remain static
during training (unless specified). This is the case as long as teacher is an
attribute of a non-module class (e.g., distillation_helper). This is because
modules are registered in PyTorch as part of __setattr__. __setattr__ only checks
if the value is a module or parameter. If the value is an object
(e.g., distillation_helper) which contains modules, these modules are ignored.
https://pytorch.org/docs/stable/_modules/torch/nn/modules/module.html#Module.register_parameter
This unittest builds the teacher model and checks that only the student
parameter is registered.
"""
cfg = _get_default_cfg()
cfg.MODEL.META_ARCHITECTURE = "TestMetaArchAddRand"
prebuilt_teacher = BaseRunner().build_model(cfg)
with make_temp_directory("tmp") as output_dir:
checkpointer = DetectionCheckpointer(prebuilt_teacher, save_dir=output_dir)
checkpointer.save("checkpoint")
cfg.MODEL.WEIGHTS = f"{output_dir}/checkpoint.pth"
config_fname = f"{output_dir}/config.yaml"
with PathManager.open(config_fname, "w") as f:
f.write(cfg.dump())
cfg.DISTILLATION.TEACHER.TYPE = "config"
cfg.DISTILLATION.TEACHER.CONFIG_FNAME = config_fname
cfg.DISTILLATION.HELPER = "TestHelper"
cfg.MODEL.MODELING_HOOKS = ["DistillationModelingHook"]
distilled_model = BaseRunner().build_model(cfg)
self.assertEqual(len(list(distilled_model.parameters())), 1)
class TestDistillationDefaults(unittest.TestCase):
def test_kd_image_classification_layer_losses(self):
"""Check the default returns a list of layerlossmetadata"""
layer_losses = get_default_kd_image_classification_layer_losses()
self.assertTrue(isinstance(layer_losses, List))
self.assertTrue(isinstance(layer_losses[0], LayerLossMetadata))
def test_default_loss_combiner(self):
"""Check combiner multiplies loss by weights"""
weights = {"a": torch.randn(1), "b": torch.randn(1)}
combiner = DefaultLossCombiner(weights)
input = {"a": 1.0, "b": 10.0}
output = combiner(input)
torch.testing.assert_close(output["a"], input["a"] * weights["a"])
torch.testing.assert_close(output["b"], input["b"] * weights["b"])
| d2go-main | tests/modeling/test_modeling_distillation.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import numpy as np
import torch
from detectron2.modeling.box_regression import Box2BoxTransform
class TestBox2BoxTransform(unittest.TestCase):
def test_box2box_transform(self):
"""Match unit test UtilsBoxesTest.TestBboxTransformRandom in
caffe2/operators/generate_proposals_op_util_boxes_test.cc
"""
box2box_transform = Box2BoxTransform(weights=(1.0, 1.0, 1.0, 1.0))
bbox = torch.from_numpy(
np.array(
[
175.62031555,
20.91103172,
253.352005,
155.0145874,
169.24636841,
4.85241556,
228.8605957,
105.02092743,
181.77426147,
199.82876587,
192.88427734,
214.0255127,
174.36262512,
186.75761414,
296.19091797,
231.27906799,
22.73153877,
92.02596283,
135.5695343,
208.80291748,
]
)
.astype(np.float32)
.reshape(-1, 4)
)
deltas = torch.from_numpy(
np.array(
[
0.47861834,
0.13992102,
0.14961673,
0.71495209,
0.29915856,
-0.35664671,
0.89018666,
0.70815367,
-0.03852064,
0.44466892,
0.49492538,
0.71409376,
0.28052918,
0.02184832,
0.65289006,
1.05060139,
-0.38172557,
-0.08533806,
-0.60335309,
0.79052375,
]
)
.astype(np.float32)
.reshape(-1, 4)
)
gt_bbox = (
np.array(
[
206.949539,
-30.715202,
297.387665,
244.448486,
143.871216,
-83.342888,
290.502289,
121.053398,
177.430283,
198.666245,
196.295273,
228.703079,
152.251892,
145.431564,
387.215454,
274.594238,
5.062420,
11.040955,
66.328903,
269.686218,
]
)
.astype(np.float32)
.reshape(-1, 4)
)
# Detectron2 removed box plus one
bbox[:, 2] += 1
bbox[:, 3] += 1
gt_bbox[:, 2] += 1
gt_bbox[:, 3] += 1
results = box2box_transform.apply_deltas(deltas, bbox)
np.testing.assert_allclose(results.detach().numpy(), gt_bbox, atol=1e-4)
if __name__ == "__main__":
unittest.main()
| d2go-main | tests/modeling/test_box2box_transform.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import torch.nn as nn
from d2go.model_zoo import model_zoo
class TestD2GoModelZoo(unittest.TestCase):
def test_model_zoo_pretrained(self):
configs = list(model_zoo._ModelZooUrls.CONFIG_PATH_TO_URL_SUFFIX.keys())
for cfgfile in configs:
model = model_zoo.get(cfgfile, trained=True)
self.assertTrue(isinstance(model, nn.Module))
if __name__ == "__main__":
unittest.main()
| d2go-main | tests/modeling/test_model_zoo.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import unittest
import torch
from d2go.runner.default_runner import GeneralizedRCNNRunner
from d2go.tools.exporter import main
from d2go.utils.testing.data_loader_helper import create_local_dataset
from d2go.utils.testing.rcnn_helper import get_quick_test_config_opts
from mobile_cv.common.misc.file_utils import make_temp_directory
from mobile_cv.common.misc.oss_utils import is_oss
def maskrcnn_export_caffe2_vs_torchvision_opset_format_example(self):
with make_temp_directory("export_demo") as tmp_dir:
# use a fake dataset for ci
dataset_name = create_local_dataset(tmp_dir, 5, 224, 224)
config_list = [
"DATASETS.TRAIN",
(dataset_name,),
"DATASETS.TEST",
(dataset_name,),
]
# START_WIKI_EXAMPLE_TAG
cfg = GeneralizedRCNNRunner.get_default_cfg()
cfg.merge_from_file("detectron2go://mask_rcnn_fbnetv3a_dsmask_C4.yaml")
cfg.merge_from_list(get_quick_test_config_opts())
cfg.merge_from_list(config_list)
# equivalent to running:
# exporter.par --runner GeneralizedRCNNRunner --config-file config.yaml --predictor-types torchscript tourchscript@c2_ops --output-dir tmp_dir
_ = main(
cfg,
tmp_dir,
GeneralizedRCNNRunner,
predictor_types=["torchscript@c2_ops", "torchscript"],
)
# the path can be fetched from the return of main, here just use hard-coded values
torchvision_ops_model = torch.jit.load(
os.path.join(tmp_dir, "torchscript", "model.jit")
)
caffe2_ops_model = torch.jit.load(
os.path.join(tmp_dir, "torchscript@c2_ops", "model.jit")
)
# Running inference using torchvision-style format
image = torch.zeros(1, 64, 96) # chw 3D tensor
# The exported model can run on both cpu/gpu
device = "cuda:0" if torch.cuda.is_available() else "cpu"
torchvision_ops_model = torchvision_ops_model.to(device)
torchvision_style_outputs = torchvision_ops_model(
image
) # suppose N instances are detected
# NOTE: the output are flattened tensors of the real output (which is a dict), they're
# ordered by the key in dict, which is deterministic for the given model, but it might
# be difficult to figure out just from model.jit file. The predictor_info.json from
# the same directory contains the `outputs_schema`, which indicate how the final output
# is constructed from flattened tensors.
(
pred_boxes, # torch.Size([N, 4])
pred_classes, # torch.Size([N])
pred_masks, # torch.Size([N, 1, Hmask, Wmask])
scores, # torch.Size([N])
image_sizes, # torch.Size([2])
) = torchvision_style_outputs
self.assertTrue(
all(
x.device == torch.device(device) for x in torchvision_style_outputs[:4]
),
torchvision_style_outputs,
)
torch.testing.assert_close(image_sizes, torch.tensor([64, 96]))
# Running inference using caffe2-style format
data = torch.zeros(1, 1, 64, 96)
im_info = torch.tensor([[64, 96, 1.0]])
caffe2_style_outputs = caffe2_ops_model([data, im_info])
# NOTE: the output order is determined in the order of creating the tensor during
# forward function, it's also follow the order of original Caffe2 model.
roi_bbox_nms = caffe2_style_outputs[0] # torch.Size([N, 4])
roi_score_nms = caffe2_style_outputs[1] # torch.Size([N])
roi_class_nms = caffe2_style_outputs[2] # torch.Size([N])
mask_fcn_probs = caffe2_style_outputs[3] # torch.Size([N, Cmask, Hmask, Wmask])
# relations between torchvision-style outputs and caffe2-style outputs
torch.testing.assert_close(pred_boxes, roi_bbox_nms, check_device=False)
torch.testing.assert_close(
pred_classes, roi_class_nms.to(torch.int64), check_device=False
)
torch.testing.assert_close(
pred_masks,
mask_fcn_probs[:, roi_class_nms.to(torch.int64), :, :],
check_device=False,
)
torch.testing.assert_close(scores, roi_score_nms, check_device=False)
# END_WIKI_EXAMPLE_TAG
class TestOptimizer(unittest.TestCase):
@unittest.skipIf(is_oss(), "Caffe2 is not available for OSS")
def test_maskrcnn_export_caffe2_vs_torchvision_opset_format_example(self):
maskrcnn_export_caffe2_vs_torchvision_opset_format_example(self)
| d2go-main | tests/modeling/test_rcnn_export_example.py |
#!/usr/bin/env python3
import os
import tempfile
import unittest
from collections import defaultdict
import torch
from d2go.evaluation.evaluator import inference_on_dataset, ResultCache
from detectron2.evaluation import DatasetEvaluator, DatasetEvaluators
class EvaluatorForTest(DatasetEvaluator):
def __init__(self):
self.results = []
def reset(self):
self.results.clear()
def process(self, inputs, outputs):
self.results.append(outputs)
def evaluate(self):
return sum(self.results)
class EvaluatorWithCheckpointForTest(DatasetEvaluator):
def __init__(self, save_dir):
self.results = []
self.result_cache = ResultCache(save_dir)
self._call_count = defaultdict(int)
def reset(self):
self.results.clear()
self._call_count["reset"] += 1
def has_finished_process(self):
return self.result_cache.has_cache()
def process(self, inputs, outputs):
assert not self.result_cache.has_cache()
self.results.append(outputs)
self._call_count["process"] += 1
def evaluate(self):
if not self.result_cache.has_cache():
self.result_cache.save(self.results)
else:
self.results = self.result_cache.load()
self._call_count["evaluate"] += 1
return sum(self.results)
class Model(torch.nn.Module):
def forward(self, x):
return x
class TestEvaluator(unittest.TestCase):
def test_inference(self):
model = Model()
evaluator = EvaluatorForTest()
data_loader = [1, 2, 3, 4, 5]
results = inference_on_dataset(model, data_loader, evaluator)
self.assertEqual(results, 15)
def test_inference_with_checkpoint(self):
with tempfile.TemporaryDirectory() as save_dir:
model = Model()
evaluator = EvaluatorWithCheckpointForTest(save_dir)
self.assertFalse(evaluator.has_finished_process())
data_loader = [1, 2, 3, 4, 5]
results = inference_on_dataset(model, data_loader, evaluator)
self.assertEqual(results, 15)
self.assertEqual(evaluator._call_count["reset"], 1)
self.assertEqual(evaluator._call_count["process"], 5)
self.assertEqual(evaluator._call_count["evaluate"], 1)
# run again with cache
self.assertTrue(evaluator.has_finished_process())
results = inference_on_dataset(model, data_loader, evaluator)
self.assertEqual(results, 15)
self.assertEqual(evaluator._call_count["reset"], 2)
self.assertEqual(evaluator._call_count["process"], 5)
self.assertEqual(evaluator._call_count["evaluate"], 2)
self.assertTrue(os.path.isfile(evaluator.result_cache.cache_file))
def test_evaluators_patch(self):
with tempfile.TemporaryDirectory() as save_dir:
cp_evaluator = EvaluatorWithCheckpointForTest(save_dir)
evaluator = DatasetEvaluators([cp_evaluator])
self.assertFalse(evaluator.has_finished_process())
cp_evaluator.reset()
cp_evaluator.process(1, 1)
cp_evaluator.evaluate()
self.assertTrue(evaluator.has_finished_process())
| d2go-main | tests/evaluation/test_evaluator.py |
d2go-main | tests/evaluation/__init__.py |
|
#!/usr/bin/env python3
import unittest
import torch
from d2go.evaluation.prediction_count_evaluation import PredictionCountEvaluator
from detectron2.structures.instances import Instances
class TestPredictionCountEvaluation(unittest.TestCase):
def setUp(self):
self.evaluator = PredictionCountEvaluator()
image_size = (224, 224)
self.mock_outputs = [
{"instances": Instances(image_size, scores=torch.Tensor([0.9, 0.8, 0.7]))},
{"instances": Instances(image_size, scores=torch.Tensor([0.9, 0.8, 0.7]))},
{"instances": Instances(image_size, scores=torch.Tensor([0.9, 0.8]))},
{"instances": Instances(image_size, scores=torch.Tensor([0.9, 0.8]))},
{"instances": Instances(image_size, scores=torch.Tensor([0.9]))},
]
# PredictionCountEvaluator does not depend on inputs
self.mock_inputs = [None] * len(self.mock_outputs)
def test_process_evaluate_reset(self):
self.assertEqual(len(self.evaluator.prediction_counts), 0)
self.assertEqual(len(self.evaluator.confidence_scores), 0)
# Test that `process` registers the outputs.
self.evaluator.process(self.mock_inputs, self.mock_outputs)
self.assertListEqual(self.evaluator.prediction_counts, [3, 3, 2, 2, 1])
self.assertEqual(len(self.evaluator.confidence_scores), 11)
# Test that `evaluate` returns the correct metrics.
output_metrics = self.evaluator.evaluate()
self.assertDictAlmostEqual(
output_metrics,
{
"false_positives": {
"predictions_per_image": 11 / 5,
"confidence_per_prediction": (0.9 * 5 + 0.8 * 4 + 0.7 * 2) / 11,
}
},
)
# Test that `reset` clears the evaluator state.
self.evaluator.reset()
self.assertEqual(len(self.evaluator.prediction_counts), 0)
self.assertEqual(len(self.evaluator.confidence_scores), 0)
def assertDictAlmostEqual(self, dict1, dict2):
keys1 = list(dict1.keys())
keys2 = list(dict2.keys())
# Assert lists are equal, irrespective of ordering
self.assertCountEqual(keys1, keys2)
for k, v1 in dict1.items():
v2 = dict2[k]
if isinstance(v2, list):
self.assertListEqual(v1, v2)
elif isinstance(v2, dict):
self.assertDictAlmostEqual(v1, v2)
else:
self.assertAlmostEqual(v1, v2)
| d2go-main | tests/evaluation/test_prediction_count_evaluation.py |
d2go-main | tests/export/__init__.py |
|
import unittest
from d2go.export.torchscript import (
MobileOptimizationConfig,
update_export_kwargs_from_export_method,
)
@update_export_kwargs_from_export_method
def mock_export(cls, model, input_args, save_path, export_method, **export_kwargs):
# Return the export kwargs, so that we can check to make sure it's set as expected
return export_kwargs
class TestTorchscriptExportMethods(unittest.TestCase):
def test_update_export_kwargs_from_export_method(self):
_empty_export_kwargs = {}
def try_mock_export(export_method: str, export_kwargs=_empty_export_kwargs):
return mock_export(
cls=None,
model=None,
input_args=None,
save_path=None,
export_method=export_method,
**export_kwargs,
)
export_method_string = "torchscript"
new_export_kwargs = try_mock_export(export_method_string)
self.assertNotIn("mobile_optimization", new_export_kwargs)
export_method_string = "torchscript_mobile"
new_export_kwargs = try_mock_export(export_method_string)
self.assertIn("mobile_optimization", new_export_kwargs)
self.assertEquals(
type(new_export_kwargs["mobile_optimization"]),
MobileOptimizationConfig,
)
self.assertEquals(new_export_kwargs["mobile_optimization"].backend, "CPU")
export_method_string = "torchscript_mobile-metal"
new_export_kwargs = try_mock_export(export_method_string)
self.assertEquals(new_export_kwargs["mobile_optimization"].backend, "metal")
export_method_string = "torchscript_mobile-vulkan"
new_export_kwargs = try_mock_export(export_method_string)
self.assertEquals(new_export_kwargs["mobile_optimization"].backend, "vulkan")
export_method_string = "torchscript_mobile@tracing"
new_export_kwargs = try_mock_export(export_method_string)
self.assertEquals(new_export_kwargs["jit_mode"], "trace")
export_method_string = "torchscript_mobile@scripting"
new_export_kwargs = try_mock_export(export_method_string)
self.assertEquals(new_export_kwargs["jit_mode"], "script")
| d2go-main | tests/export/test_torchscript.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import unittest
from typing import List
import torch
import torch.nn as nn
from d2go.export.api import FuncInfo, PredictorExportConfig
from d2go.export.exporter import convert_and_export_predictor
from d2go.export.torchscript import (
DefaultTorchscriptExport,
TracingAdaptedTorchscriptExport,
)
from mobile_cv.common.misc.file_utils import make_temp_directory
from mobile_cv.predictor.api import create_predictor
from parameterized import parameterized
class SimpleModel(nn.Module):
def forward(self, x):
return 2 * x
def prepare_for_export(self, cfg, inputs, predictor_type):
# pre/post processing and run_func are default values
return PredictorExportConfig(
model=self,
# model(x) -> model(*(x,))
data_generator=lambda x: (x,),
)
class TwoPartSimpleModel(nn.Module):
"""
Suppose there're some function in the middle that can't be traced, therefore we
need to export the model as two parts.
"""
def __init__(self):
super().__init__()
self.part1 = SimpleModel()
self.part2 = SimpleModel()
def forward(self, x):
x = self.part1(x)
x = TwoPartSimpleModel.non_traceable_func(x)
x = self.part2(x)
return x
def prepare_for_export(self, cfg, inputs, predictor_type):
def data_generator(x):
part1_args = (x,)
x = self.part1(x)
x = TwoPartSimpleModel.non_traceable_func(x)
part2_args = (x,)
return {"part1": part1_args, "part2": part2_args}
return PredictorExportConfig(
model={"part1": self.part1, "part2": self.part2},
data_generator=data_generator,
run_func_info=FuncInfo.gen_func_info(TwoPartSimpleModel.RunFunc, params={}),
)
@staticmethod
def non_traceable_func(x):
return x + 1 if len(x.shape) > 3 else x - 1
class RunFunc(object):
def __call__(self, model, x):
assert isinstance(model, dict)
x = model["part1"](x)
x = TwoPartSimpleModel.non_traceable_func(x)
x = model["part2"](x)
return x
class ScriptingOnlyModel(nn.Module):
"""
Example of a model that requires scripting (eg. having control loop).
"""
def forward(self, inputs: List[torch.Tensor]) -> List[torch.Tensor]:
outputs = []
for i, t in enumerate(inputs):
outputs.append(t * i)
return outputs
def prepare_for_export(self, cfg, inputs, predictor_type):
if cfg == "explicit":
return PredictorExportConfig(
model=self,
data_generator=None, # data is not needed for scripting
model_export_kwargs={
"jit_mode": "script"
}, # explicitly using script mode
)
elif cfg == "implicit":
# Sometime user wants to switch between scripting and tracing without
# touching the PredictorExportConfig
return PredictorExportConfig(
model=self,
data_generator=None, # data is not needed for scripting
)
raise NotImplementedError()
class TestExportAPI(unittest.TestCase):
def _export_simple_model(self, cfg, model, data, output_dir, predictor_type):
predictor_path = convert_and_export_predictor(
cfg,
model,
predictor_type=predictor_type,
output_dir=output_dir,
data_loader=iter([data] * 3),
)
self.assertTrue(os.path.isdir(predictor_path))
# also test loading predictor
predictor = create_predictor(predictor_path)
return predictor
def test_simple_model(self):
with make_temp_directory("test_simple_model") as tmp_dir:
model = SimpleModel()
predictor = self._export_simple_model(
None, model, torch.tensor(1), tmp_dir, predictor_type="torchscript"
)
x = torch.tensor(42)
self.assertEqual(predictor(x), model(x))
def test_simple_two_part_model(self):
with make_temp_directory("test_simple_two_part_model") as tmp_dir:
model = TwoPartSimpleModel()
predictor = self._export_simple_model(
None, model, torch.tensor(1), tmp_dir, predictor_type="torchscript"
)
x = torch.tensor(42)
self.assertEqual(predictor(x), model(x))
def test_script_only_model(self):
def _validate(predictor):
outputs = predictor([torch.tensor(1), torch.tensor(2), torch.tensor(3)])
self.assertEqual(len(outputs), 3)
self.assertEqual(
outputs, [torch.tensor(0), torch.tensor(2), torch.tensor(6)]
)
# Method 1: explicitly set jit_mode to "trace"
with make_temp_directory("test_test_script_only_model") as tmp_dir:
model = ScriptingOnlyModel()
predictor = self._export_simple_model(
"explicit", model, None, tmp_dir, predictor_type="torchscript"
)
_validate(predictor)
# Method 2: using torchscript@scripting as predictor type
with make_temp_directory("test_test_script_only_model") as tmp_dir:
model = ScriptingOnlyModel()
predictor = self._export_simple_model(
"implicit", model, None, tmp_dir, predictor_type="torchscript@scripting"
)
_validate(predictor)
class MultiTensorInSingleTensorOut(nn.Module):
def forward(self, x, y):
return x + y
@staticmethod
def get_input_args():
return (torch.tensor([2]), torch.tensor([3]))
@staticmethod
def check_outputs(new_output, original_output):
torch.testing.assert_close(new_output, torch.tensor([5]))
# NOTE: caffe2 wrapper assumes tensors are fp32
class SingleListInSingleListOut(nn.Module):
def forward(self, inputs):
x, y = inputs
return [x + y]
@staticmethod
def get_input_args():
inputs = [torch.tensor([2.0]), torch.tensor([3.0])]
return (inputs,)
@staticmethod
def check_outputs(new_output, original_output):
assert len(new_output) == 1
torch.testing.assert_close(new_output[0], torch.tensor([5.0]))
class MultiDictInMultiDictOut(nn.Module):
def forward(self, x, y):
first = {"add": x["first"] + y["first"], "sub": x["first"] - y["first"]}
second = {"add": x["second"] + y["second"], "sub": x["second"] - y["second"]}
return [first, second]
@staticmethod
def get_input_args():
return (
{"first": torch.tensor([1]), "second": torch.tensor([2])}, # x
{"first": torch.tensor([3]), "second": torch.tensor([4])}, # y
)
@staticmethod
def check_outputs(new_output, original_output):
first, second = original_output
torch.testing.assert_close(first["add"], torch.tensor([4]))
torch.testing.assert_close(first["sub"], torch.tensor([-2]))
torch.testing.assert_close(second["add"], torch.tensor([6]))
torch.testing.assert_close(second["sub"], torch.tensor([-2]))
MODEL_EXPORT_METHOD_TEST_CASES = [
[DefaultTorchscriptExport, MultiTensorInSingleTensorOut],
[DefaultTorchscriptExport, SingleListInSingleListOut],
[TracingAdaptedTorchscriptExport, MultiTensorInSingleTensorOut],
[TracingAdaptedTorchscriptExport, SingleListInSingleListOut],
[TracingAdaptedTorchscriptExport, MultiDictInMultiDictOut],
]
try:
from d2go.export.fb.caffe2 import DefaultCaffe2Export
MODEL_EXPORT_METHOD_TEST_CASES.extend(
[
# [DefaultCaffe2Export, MultiTensorInSingleTensorOut], # TODO: make caffe2 support this
[DefaultCaffe2Export, SingleListInSingleListOut],
]
)
except ImportError:
pass
class TestModelExportMethods(unittest.TestCase):
@parameterized.expand(
MODEL_EXPORT_METHOD_TEST_CASES,
name_func=lambda testcase_func, param_num, param: (
"{}_{}_{}".format(
testcase_func.__name__, param.args[0].__name__, param.args[1].__name__
)
),
)
def test_interface(self, model_export_method, test_model_class):
model = test_model_class()
input_args = test_model_class.get_input_args()
output_checker = test_model_class.check_outputs
model_export_method.test_export_and_load(
model, input_args, None, {}, output_checker
)
| d2go-main | tests/export/test_api.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import shutil
import tempfile
import unittest
import torch
from d2go.data.disk_cache import DiskCachedList, ROOT_CACHE_DIR
from d2go.data.utils import configure_dataset_creation
from d2go.runner import create_runner
from d2go.utils.testing.data_loader_helper import (
create_detection_data_loader_on_toy_dataset,
register_toy_coco_dataset,
)
class TestD2GoDatasetMapper(unittest.TestCase):
"""
This class test D2GoDatasetMapper which is used to build
data loader in GeneralizedRCNNRunner (the default runner) in Detectron2Go.
"""
def setUp(self):
self.output_dir = tempfile.mkdtemp(prefix="TestD2GoDatasetMapper_")
self.addCleanup(shutil.rmtree, self.output_dir)
def test_default_dataset(self):
runner = create_runner("d2go.runner.GeneralizedRCNNRunner")
cfg = runner.get_default_cfg()
cfg.DATASETS.TRAIN = ["default_dataset_train"]
cfg.DATASETS.TEST = ["default_dataset_test"]
cfg.OUTPUT_DIR = self.output_dir
with register_toy_coco_dataset("default_dataset_train", num_images=3):
train_loader = runner.build_detection_train_loader(cfg)
for i, data in enumerate(train_loader):
self.assertIsNotNone(data)
# for training loader, it has infinite length
if i == 6:
break
with register_toy_coco_dataset("default_dataset_test", num_images=3):
test_loader = runner.build_detection_test_loader(
cfg, dataset_name="default_dataset_test"
)
all_data = []
for data in test_loader:
all_data.append(data)
self.assertEqual(len(all_data), 3)
class _MyClass(object):
def __init__(self, x):
self.x = x
def do_something(self):
return
class TestDiskCachedDataLoader(unittest.TestCase):
def setUp(self):
# make sure the ROOT_CACHE_DIR is empty when entering the test
if os.path.exists(ROOT_CACHE_DIR):
shutil.rmtree(ROOT_CACHE_DIR)
self.output_dir = tempfile.mkdtemp(prefix="TestDiskCachedDataLoader_")
self.addCleanup(shutil.rmtree, self.output_dir)
def _count_cache_dirs(self):
if not os.path.exists(ROOT_CACHE_DIR):
return 0
return len(os.listdir(ROOT_CACHE_DIR))
def test_disk_cached_dataset_from_list(self):
"""Test the class of DiskCachedList"""
# check the discache can handel different data types
lst = [1, torch.tensor(2), _MyClass(3)]
disk_cached_lst = DiskCachedList(lst)
self.assertEqual(len(disk_cached_lst), 3)
self.assertEqual(disk_cached_lst[0], 1)
self.assertEqual(disk_cached_lst[1].item(), 2)
self.assertEqual(disk_cached_lst[2].x, 3)
# check the cache is created
cache_dir = disk_cached_lst.cache_dir
self.assertTrue(os.path.isdir(cache_dir))
# check the cache is properly released
del disk_cached_lst
self.assertFalse(os.path.isdir(cache_dir))
def test_disk_cached_dataloader(self):
"""Test the data loader backed by disk cache"""
height = 6
width = 8
runner = create_runner("d2go.runner.GeneralizedRCNNRunner")
cfg = runner.get_default_cfg()
cfg.OUTPUT_DIR = self.output_dir
cfg.DATALOADER.NUM_WORKERS = 2
def _test_data_loader(data_loader):
first_batch = next(iter(data_loader))
self.assertTrue(first_batch[0]["height"], height)
self.assertTrue(first_batch[0]["width"], width)
# enable the disk cache
cfg.merge_from_list(["D2GO_DATA.DATASETS.DISK_CACHE.ENABLED", "True"])
with configure_dataset_creation(cfg):
# no cache dir in the beginning
self.assertEqual(self._count_cache_dirs(), 0)
with create_detection_data_loader_on_toy_dataset(
cfg, height, width, is_train=True
) as train_loader:
# train loader should create one cache dir
self.assertEqual(self._count_cache_dirs(), 1)
_test_data_loader(train_loader)
with create_detection_data_loader_on_toy_dataset(
cfg, height, width, is_train=False
) as test_loader:
# test loader should create another cache dir
self.assertEqual(self._count_cache_dirs(), 2)
_test_data_loader(test_loader)
# test loader should release its cache
del test_loader
self.assertEqual(self._count_cache_dirs(), 1)
# no cache dir in the end
del train_loader
self.assertEqual(self._count_cache_dirs(), 0)
| d2go-main | tests/data/test_data_loader.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import json
import unittest
from typing import Tuple
import cv2
import numpy as np
import torchvision.transforms as T
from d2go.data.transforms.build import build_transform_gen
from d2go.runner import Detectron2GoRunner
from detectron2.data.transforms.augmentation import apply_augmentations, AugInput
def generate_test_data(
source_img: np.ndarray,
angle: float = 0,
translation: float = 0,
scale: float = 1,
shear: float = 0,
fit_in_frame: bool = True,
keep_aspect_ratio: bool = False,
) -> Tuple[str, np.ndarray]:
# Augmentation dictionary
aug_dict = {
"prob": 1.0,
"angle_range": [angle, angle],
"translation_range": [translation, translation],
"scale_range": [scale, scale],
"shear_range": [shear, shear],
"keep_aspect_ratio": keep_aspect_ratio,
"fit_in_frame": fit_in_frame,
}
aug_str = "RandomAffineOp::" + json.dumps(aug_dict)
# Get image info
img_h, img_w = source_img.shape[0:2]
center = [img_w / 2, img_h / 2]
# Compute output_size
max_size = max(img_w, img_h)
out_w, out_h = (img_w, img_h) if keep_aspect_ratio else (max_size, max_size)
if fit_in_frame:
# Warp once to figure scale adjustment
M_inv = T.functional._get_inverse_affine_matrix(
center, angle, [0, 0], 1, [shear, shear]
)
M_inv.extend([0.0, 0.0, 1.0])
M_inv = np.array(M_inv).reshape((3, 3))
M = np.linalg.inv(M_inv)
# Center in output patch
img_corners = np.array(
[
[0, 0, img_w - 1, img_w - 1],
[0, img_h - 1, 0, img_h - 1],
[1, 1, 1, 1],
]
)
new_corners = M @ img_corners
x_range = np.ceil(np.amax(new_corners[0]) - np.amin(new_corners[0]))
y_range = np.ceil(np.amax(new_corners[1]) - np.amin(new_corners[1]))
# Apply translation and scale after centering in output patch
scale_adjustment = min(out_w / x_range, out_h / y_range)
scale *= scale_adjustment
# Adjust output center location
translation_t = [translation, translation]
translation_adjustment = [(out_w - img_w) / 2, (out_h - img_h) / 2]
translation_t[0] += translation_adjustment[0]
translation_t[1] += translation_adjustment[1]
# Test data output generation
M_inv = T.functional._get_inverse_affine_matrix(
center, angle, translation_t, scale, [shear, shear]
)
M_inv = np.array(M_inv).reshape((2, 3))
exp_out_img = cv2.warpAffine(
source_img,
M_inv,
(out_w, out_h),
flags=cv2.WARP_INVERSE_MAP + cv2.INTER_LINEAR,
borderMode=cv2.BORDER_REPLICATE,
)
# Create annotations
test_bbox = [0.25 * img_w, 0.25 * img_h, 0.75 * img_h, 0.75 * img_h]
# Generate segmentation test data
segm_mask = np.zeros_like(source_img)
segm_mask[
int(test_bbox[0]) : int(test_bbox[2]), int(test_bbox[1]) : int(test_bbox[3])
] = 255
exp_out_segm = cv2.warpAffine(
segm_mask,
M_inv,
(out_w, out_h),
flags=cv2.WARP_INVERSE_MAP + cv2.INTER_NEAREST,
borderMode=cv2.BORDER_REPLICATE,
)
# Generate bounding box test data
M_inv = np.vstack([M_inv, [0.0, 0.0, 1.0]])
points = np.array(
[
[test_bbox[0], test_bbox[0], test_bbox[2], test_bbox[2]],
[test_bbox[1], test_bbox[3], test_bbox[1], test_bbox[3]],
]
).T
_xp = warp_points(points, M_inv)
out_bbox = [min(_xp[:, 0]), min(_xp[:, 1]), max(_xp[:, 0]), max(_xp[:, 1])]
return (
aug_str,
AugInput(source_img, boxes=[test_bbox], sem_seg=segm_mask),
(exp_out_img, [out_bbox], exp_out_segm),
)
def warp_points(coords: np.array, xfm_M: np.array):
coords = coords.T
ones = np.ones((1, coords.shape[1]))
coords = np.vstack((coords, ones))
M = np.linalg.inv(xfm_M)
coords = (M @ coords)[:2, :].T
return coords
class TestDataTransformsAffine(unittest.TestCase):
def _validate_results(self, aug_output, exp_outputs):
exp_img = exp_outputs[0]
self.assertTrue(
np.allclose(exp_img, aug_output.image),
f"Augmented image not the same, expecting\n{exp_img[:,:,0]} \n got\n{aug_output.image[:,:,0]} ",
)
exp_bboxes = exp_outputs[1]
self.assertTrue(
np.allclose(exp_bboxes, aug_output.boxes, atol=0.000001),
f"Augmented bbox not the same, expecting\n{exp_img[:,:,0]} \n got\n{aug_output.image[:,:,0]} ",
)
exp_segm = exp_outputs[2]
self.assertTrue(
np.allclose(exp_segm, aug_output.sem_seg),
f"Augmented segm not the same, expecting\n{exp_segm} \n got\n{aug_output.sem_seg[:,:]} ",
)
def test_affine_transforms_angle(self):
default_cfg = Detectron2GoRunner.get_default_cfg()
img_sz = 11
img = np.zeros((img_sz, img_sz, 3)).astype(np.uint8)
img[((img_sz + 1) // 2) - 1, :, :] = 255
for angle in [45, 90]:
aug_str, aug_input, exp_outputs = generate_test_data(img, angle=angle)
default_cfg.D2GO_DATA.AUG_OPS.TRAIN = [aug_str]
tfm = build_transform_gen(default_cfg, is_train=True)
# Test augmentation
aug_output, _ = apply_augmentations(tfm, aug_input)
self._validate_results(aug_output, exp_outputs)
def test_affine_transforms_translation(self):
default_cfg = Detectron2GoRunner.get_default_cfg()
img_sz = 11
img = np.zeros((img_sz, img_sz, 3)).astype(np.uint8)
img[((img_sz + 1) // 2) - 1, :, :] = 255
for translation in [0, 1, 2]:
# Test image
aug_str, aug_input, exp_outputs = generate_test_data(
img, translation=translation
)
default_cfg.D2GO_DATA.AUG_OPS.TRAIN = [aug_str]
tfm = build_transform_gen(default_cfg, is_train=True)
# Test augmentation
aug_output, _ = apply_augmentations(tfm, aug_input)
self._validate_results(aug_output, exp_outputs)
def test_affine_transforms_shear(self):
default_cfg = Detectron2GoRunner.get_default_cfg()
img_sz = 11
img = np.zeros((img_sz, img_sz, 3)).astype(np.uint8)
img[((img_sz + 1) // 2) - 1, :, :] = 255
for shear in [0, 1, 2]:
aug_str, aug_input, exp_outputs = generate_test_data(img, shear=shear)
default_cfg.D2GO_DATA.AUG_OPS.TRAIN = [aug_str]
tfm = build_transform_gen(default_cfg, is_train=True)
# Test augmentation
aug_output, _ = apply_augmentations(tfm, aug_input)
self._validate_results(aug_output, exp_outputs)
def test_affine_transforms_scale(self):
default_cfg = Detectron2GoRunner.get_default_cfg()
img_sz = 11
img = np.zeros((img_sz, img_sz, 3)).astype(np.uint8)
img[((img_sz + 1) // 2) - 1, :, :] = 255
for scale in [0.9, 1, 1.1]:
aug_str, aug_input, exp_outputs = generate_test_data(img, scale=scale)
default_cfg.D2GO_DATA.AUG_OPS.TRAIN = [aug_str]
tfm = build_transform_gen(default_cfg, is_train=True)
# Test augmentation
aug_output, _ = apply_augmentations(tfm, aug_input)
self._validate_results(aug_output, exp_outputs)
def test_affine_transforms_angle_non_square(self):
default_cfg = Detectron2GoRunner.get_default_cfg()
img_sz = 11
img = np.zeros((img_sz, img_sz - 2, 3)).astype(np.uint8)
img[((img_sz + 1) // 2) - 1, :, :] = 255
for keep_aspect_ratio in [False, True]:
aug_str, aug_input, exp_outputs = generate_test_data(
img, angle=45, keep_aspect_ratio=keep_aspect_ratio
)
default_cfg.D2GO_DATA.AUG_OPS.TRAIN = [aug_str]
tfm = build_transform_gen(default_cfg, is_train=True)
# Test augmentation
aug_output, _ = apply_augmentations(tfm, aug_input)
self._validate_results(aug_output, exp_outputs)
def test_affine_transforms_angle_no_fit_to_frame(self):
default_cfg = Detectron2GoRunner.get_default_cfg()
img_sz = 11
img = np.zeros((img_sz, img_sz, 3)).astype(np.uint8)
img[((img_sz + 1) // 2) - 1, :, :] = 255
aug_str, aug_input, exp_outputs = generate_test_data(
img, angle=45, fit_in_frame=False
)
default_cfg.D2GO_DATA.AUG_OPS.TRAIN = [aug_str]
tfm = build_transform_gen(default_cfg, is_train=True)
# Test augmentation
aug_output, _ = apply_augmentations(tfm, aug_input)
self._validate_results(aug_output, exp_outputs)
| d2go-main | tests/data/test_data_transforms_affine.py |
d2go-main | tests/data/__init__.py |
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import d2go.data.transforms.box_utils as bu
import numpy as np
import torch
from d2go.config import CfgNode
from d2go.data.transforms.build import build_transform_gen
def get_default_config():
cfg = CfgNode()
cfg.D2GO_DATA = CfgNode()
cfg.D2GO_DATA.AUG_OPS = CfgNode()
return cfg
class TestDataTransformsBoxUtils(unittest.TestCase):
def test_min_box_ar(self):
box_xywh = [4, 5, 10, 6]
target_aspect_ratio = 1.0 / 2
new_box = bu.get_min_box_aspect_ratio(box_xywh, target_aspect_ratio)
self.assertArrayEqual(torch.Tensor([4, -2, 10, 20]), new_box)
def test_get_box_from_mask(self):
img_w, img_h = 8, 6
mask = np.zeros([img_h, img_w])
self.assertEqual(mask.shape, (img_h, img_w))
mask[2:4, 3:6] = 1
box = bu.get_box_from_mask(mask)
self.assertEqual(box, (3, 2, 3, 2))
def test_get_box_from_mask_union(self):
img_w, img_h = 8, 6
mask = np.zeros([img_h, img_w])
self.assertEqual(mask.shape, (img_h, img_w))
mask[2:4, 1:4] = 1
mask[5:6, 4:8] = 1
box = bu.get_box_from_mask(mask)
self.assertEqual(box, (1, 2, 7, 4))
def test_get_box_from_mask_empty(self):
img_w, img_h = 8, 6
mask = np.zeros([img_h, img_w])
box = bu.get_box_from_mask(mask)
self.assertIsNone(box)
def test_scale_bbox_center(self):
bbox = torch.Tensor([1, 2, 4, 5])
out_bbox = bu.scale_bbox_center(bu.scale_bbox_center(bbox, 2.0), 0.5)
self.assertArrayEqual(bbox, out_bbox)
def assertArrayEqual(self, a1, a2):
self.assertTrue(np.array_equal(a1, a2))
def test_enlarge_bounding_box(self):
default_cfg = get_default_config()
default_cfg.D2GO_DATA.AUG_OPS.TRAIN = [
'EnlargeBoundingBoxOp::{"fixed_pad": 20}',
'EnlargeBoundingBoxOp::{"percentage": 0.2}',
]
enlarge_box_tfm = build_transform_gen(default_cfg, is_train=True)
boxes = np.array(
[[91, 46, 144, 111]],
dtype=np.float64,
)
transformed_bboxs = enlarge_box_tfm[0].apply_box(boxes)
expected_bboxs = np.array(
[[71, 26, 164, 131]],
dtype=np.float64,
)
err_msg = "transformed_bbox = {}, expected {}".format(
transformed_bboxs, expected_bboxs
)
self.assertTrue(np.allclose(transformed_bboxs, expected_bboxs), err_msg)
boxes = np.array(
[[91, 46, 144, 111]],
dtype=np.float64,
)
transformed_bboxs = enlarge_box_tfm[1].apply_box(boxes)
expected_bboxs = np.array(
[[85.7, 39.5, 149.3, 117.5]],
dtype=np.float64,
)
err_msg = "transformed_bbox = {}, expected {}".format(
transformed_bboxs, expected_bboxs
)
self.assertTrue(np.allclose(transformed_bboxs, expected_bboxs), err_msg)
boxes = np.array(
[[[91, 46], [144, 111]]],
dtype=np.float64,
)
transformed_bboxs = enlarge_box_tfm[1].apply_polygons(boxes)
expected_bboxs = np.array(
[[[85.7, 39.5], [149.3, 117.5]]],
dtype=np.float64,
)
err_msg = "transformed_bbox = {}, expected {}".format(
transformed_bboxs, expected_bboxs
)
self.assertTrue(np.allclose(transformed_bboxs, expected_bboxs), err_msg)
dummy_data = np.array(
[[91, 46, 144, 111]],
dtype=np.float64,
)
dummy_data_out = enlarge_box_tfm[1].apply_image(dummy_data)
expected_out = np.array(
[[91, 46, 144, 111]],
dtype=np.float64,
)
err_msg = "Apply image failed"
self.assertTrue(np.allclose(dummy_data_out, expected_out), err_msg)
default_cfg.D2GO_DATA.AUG_OPS.TRAIN = [
'EnlargeBoundingBoxOp::{"fixed_pad": 20, "box_only": true}',
]
enlarge_box_tfm = build_transform_gen(default_cfg, is_train=True)
boxes = np.array([[91, 46, 144, 111]])
transformed_bboxs = enlarge_box_tfm[0].apply_coords(boxes)
err_msg = "transformed_bbox = {}, expected {}".format(transformed_bboxs, boxes)
self.assertTrue(np.allclose(transformed_bboxs, boxes), err_msg)
| d2go-main | tests/data/test_data_transforms_box_utils.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import numpy as np
from d2go.data.transforms.build import build_transform_gen
from d2go.runner import Detectron2GoRunner
from detectron2.data.transforms.augmentation import apply_augmentations
class TestDataTransformsBlur(unittest.TestCase):
def test_gaussian_blur_transforms(self):
default_cfg = Detectron2GoRunner.get_default_cfg()
img = np.zeros((80, 60, 3)).astype(np.uint8)
img[40, 30, :] = 255
default_cfg.D2GO_DATA.AUG_OPS.TRAIN = [
'RandomGaussianBlurOp::{"prob": 1.0, "k": 3, "sigma_range": [0.5, 0.5]}'
]
tfm = build_transform_gen(default_cfg, is_train=True)
trans_img, _ = apply_augmentations(tfm, img)
self.assertEqual(img.shape, trans_img.shape)
self.assertEqual(img.dtype, trans_img.dtype)
self.assertEqual(trans_img[39, 29, 0], 3)
self.assertEqual(trans_img[40, 29, 0], 21)
| d2go-main | tests/data/test_data_transforms_blur.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import d2go.data.transforms.box_utils as bu
import numpy as np
import torch
from d2go.data.transforms import crop as tf_crop
class TestDataTransformsCrop(unittest.TestCase):
def test_transform_crop_extent_transform(self):
img_wh = (16, 11)
sem_seg = np.zeros([img_wh[1], img_wh[0]], dtype=np.uint8)
# h, w
sem_seg[5, 4] = 1
sem_seg[10, 13] = 1
sem_seg[5:11, 4:14] = 1
# src_rect: [x0, y0, x1, y1] in pixel coordinate, output_size: [h, w]
trans = tf_crop.ExtentTransform(src_rect=[4, 5, 14, 11], output_size=[6, 10])
out_mask = trans.apply_segmentation(sem_seg)
self.assertArrayEqual(out_mask.shape, torch.Tensor([6, 10]))
self.assertArrayEqual(np.unique(out_mask), torch.Tensor([1]))
trans = tf_crop.ExtentTransform(src_rect=[3, 4, 15, 11], output_size=[7, 12])
out_mask = trans.apply_segmentation(sem_seg)
self.assertArrayEqual(out_mask.shape, torch.Tensor([7, 12]))
self.assertArrayEqual(np.unique(out_mask), torch.Tensor([0, 1]))
self.assertArrayEqual(np.unique(out_mask[1:, 1:-1]), torch.Tensor([1]))
self.assertEqual(out_mask[:, 0].sum(), 0)
self.assertArrayEqual(out_mask[0, :].sum(), 0)
self.assertArrayEqual(out_mask[:, -1].sum(), 0)
def test_transform_crop_random_crop_fixed_aspect_ratio(self):
aug = tf_crop.RandomCropFixedAspectRatio([1.0 / 2])
img_wh = (16, 11)
img = np.ones([img_wh[1], img_wh[0], 3], dtype=np.uint8)
sem_seg = np.zeros([img_wh[1], img_wh[0]], dtype=np.uint8)
sem_seg[5, 4] = 1
sem_seg[10, 13] = 1
mask_xywh = bu.get_box_from_mask(sem_seg)
self.assertArrayEqual(mask_xywh, torch.Tensor([4, 5, 10, 6]))
trans = aug.get_transform(img, sem_seg)
self.assertArrayEqual(trans.src_rect, torch.Tensor([4, -2, 14, 18]))
self.assertArrayEqual(trans.output_size, torch.Tensor([20, 10]))
out_img = trans.apply_image(img)
self.assertArrayEqual(out_img.shape, torch.Tensor([20, 10, 3]))
self.assertArrayEqual(np.unique(out_img[2:13, :, :]), torch.Tensor([1]))
self.assertArrayEqual(np.unique(out_img[0:2, :, :]), torch.Tensor([0]))
self.assertArrayEqual(np.unique(out_img[13:, :, :]), torch.Tensor([0]))
out_mask = trans.apply_segmentation(sem_seg)
self.assertArrayEqual(out_mask.shape, torch.Tensor([20, 10]))
self.assertEqual(out_mask[7, 0], 1)
self.assertEqual(out_mask[12, -1], 1)
def test_transform_crop_random_crop_fixed_aspect_ratio_scale_offset(self):
aug = tf_crop.RandomCropFixedAspectRatio(
[1.0 / 2], scale_range=[0.5, 0.5], offset_scale_range=[-0.5, -0.5]
)
img_wh = (16, 11)
img = np.ones([img_wh[1], img_wh[0], 3], dtype=np.uint8)
sem_seg = np.zeros([img_wh[1], img_wh[0]], dtype=np.uint8)
sem_seg[5, 4] = 1
sem_seg[10, 13] = 1
sem_seg[5:11, 4:14] = 1
mask_xywh = bu.get_box_from_mask(sem_seg)
self.assertArrayEqual(mask_xywh, torch.Tensor([4, 5, 10, 6]))
trans = aug.get_transform(img, sem_seg)
self.assertArrayEqual(trans.src_rect, torch.Tensor([1.5, 0.0, 6.5, 10.0]))
self.assertArrayEqual(trans.output_size, torch.Tensor([10, 5]))
out_img = trans.apply_image(img)
self.assertArrayEqual(out_img.shape, torch.Tensor([10, 5, 3]))
self.assertEqual(np.unique(out_img), 1)
out_mask = trans.apply_segmentation(sem_seg)
self.assertArrayEqual(out_mask.shape, torch.Tensor([10, 5]))
self.assertEqual(np.unique(out_mask[6:, 3:]), 1)
def test_transform_crop_random_crop_fixed_aspect_ratio_empty_mask(self):
"""The sem_mask is empty (the whole image is background)"""
aug = tf_crop.RandomCropFixedAspectRatio([1.0 / 2])
img_wh = (16, 11)
img = np.ones([img_wh[1], img_wh[0], 3], dtype=np.uint8)
sem_seg = np.zeros([img_wh[1], img_wh[0]], dtype=np.uint8)
mask_xywh = bu.get_box_from_mask(sem_seg)
self.assertEqual(mask_xywh, None)
trans = aug.get_transform(img, sem_seg)
self.assertIsInstance(trans, tf_crop.NoOpTransform)
out_img = trans.apply_image(img)
self.assertArrayEqual(out_img.shape, img.shape)
out_mask = trans.apply_segmentation(sem_seg)
self.assertArrayEqual(out_mask.shape, sem_seg.shape)
def test_pad_transform(self):
crop_w, crop_h = 4, 3
full_w, full_h = 11, 9
crop_x, crop_y = 5, 6
trans = tf_crop.PadTransform(crop_x, crop_y, crop_w, crop_h, full_w, full_h)
img = np.ones([crop_h, crop_w])
trans_img = trans.apply_image(img)
self.assertArrayEqual(trans_img.shape, [full_h, full_w])
self.assertArrayEqual(np.unique(trans_img), [0, 1])
full_img_gt = np.zeros([full_h, full_w])
full_img_gt[crop_y : (crop_y + crop_h), crop_x : (crop_x + crop_w)] = 1
self.assertArrayEqual(full_img_gt, trans_img)
def test_crop_transform_inverse(self):
crop_w, crop_h = 4, 3
full_w, full_h = 11, 9
crop_x, crop_y = 5, 6
trans = tf_crop.InvertibleCropTransform(
crop_x, crop_y, crop_w, crop_h, full_w, full_h
)
full_img_gt = np.zeros([full_h, full_w])
full_img_gt[crop_y : (crop_y + crop_h), crop_x : (crop_x + crop_w)] = 1
crop_img_gt = np.ones([crop_h, crop_w])
self.assertArrayEqual(trans.apply_image(full_img_gt), crop_img_gt)
self.assertArrayEqual(trans.inverse().apply_image(crop_img_gt), full_img_gt)
self.assertArrayEqual(
trans.inverse().inverse().apply_image(full_img_gt), crop_img_gt
)
def test_pad_border_divisible_transform(self):
img_h, img_w = 10, 7
divisibility = 8
aug = tf_crop.PadBorderDivisible(divisibility)
img = np.ones([img_h, img_w, 3]) * 3
trans = aug.get_transform(img)
pad_img = trans.apply_image(img)
self.assertEqual(pad_img.shape, (16, 8, 3))
inverse_img = trans.inverse().apply_image(pad_img)
self.assertEqual(inverse_img.shape, (10, 7, 3))
self.assertArrayEqual(img, inverse_img)
mask = np.ones([img_h, img_w]) * 2
pad_mask = trans.apply_segmentation(mask)
self.assertEqual(pad_mask.shape, (16, 8))
inverse_mask = trans.inverse().apply_segmentation(pad_mask)
self.assertEqual(inverse_mask.shape, (10, 7))
self.assertArrayEqual(mask, inverse_mask)
def test_pad_to_square_augmentation(self):
img_h, img_w = 5, 3
aug = tf_crop.PadToSquare(pad_value=255)
img = np.ones([img_h, img_w, 3])
trans = aug.get_transform(img)
pad_img = trans.apply_image(img)
self.assertEqual(pad_img.shape, (5, 5, 3))
def test_random_instance_crop(self):
from detectron2.data import detection_utils as du
from detectron2.data.transforms.augmentation import AugInput, AugmentationList
from detectron2.structures import BoxMode
aug = tf_crop.RandomInstanceCrop([1.0, 1.0])
img_w, img_h = 10, 7
annotations = [
{
"category_id": 0,
"bbox": [1, 1, 4, 3],
"bbox_mode": BoxMode.XYWH_ABS,
},
{
"category_id": 0,
"bbox": [2, 2, 4, 3],
"bbox_mode": BoxMode.XYWH_ABS,
},
{
"category_id": 0,
"bbox": [6, 5, 3, 2],
"bbox_mode": BoxMode.XYWH_ABS,
},
]
img = np.ones([img_h, img_w, 3]) * 3
inputs = AugInput(image=img)
# pass additional arguments
inputs.annotations = annotations
transforms = AugmentationList([aug])(inputs)
self.assertIn(
inputs.image.shape, [torch.Size([3, 4, 3]), torch.Size([2, 3, 3])]
)
# from dataset mapper unused annotations will be filtered out due to the
# iscrowd flag
image_shape = inputs.image.shape[:2]
annos = [
du.transform_instance_annotations(
obj,
transforms,
image_shape,
)
for obj in annotations
if obj.get("iscrowd", 0) == 0
]
instances = du.annotations_to_instances(annos, image_shape)
filtered_instances = du.filter_empty_instances(instances)
self.assertEqual(len(filtered_instances), 1)
self.assertArrayEqual(
filtered_instances.gt_boxes.tensor.tolist(),
[[0, 0, image_shape[1], image_shape[0]]],
)
def assertArrayEqual(self, a1, a2):
self.assertTrue(np.array_equal(a1, a2))
| d2go-main | tests/data/test_data_transforms_crop.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import numpy as np
from d2go.data.transforms.build import build_transform_gen
from d2go.runner import Detectron2GoRunner
from detectron2.data.transforms.augmentation import apply_transform_gens
class TestDataTransforms(unittest.TestCase):
def test_build_transform_gen(self):
default_cfg = Detectron2GoRunner.get_default_cfg()
default_cfg.INPUT.MIN_SIZE_TRAIN = (30,)
default_cfg.INPUT.MIN_SIZE_TEST = 30
trans_train = build_transform_gen(default_cfg, is_train=True)
trans_test = build_transform_gen(default_cfg, is_train=False)
img = np.zeros((80, 60, 3))
trans_img_train, tl_train = apply_transform_gens(trans_train, img)
trans_img_test, tl_test = apply_transform_gens(trans_test, img)
self.assertEqual(trans_img_train.shape, (40, 30, 3))
self.assertEqual(trans_img_test.shape, (40, 30, 3))
def test_build_transform_gen_resize_square(self):
default_cfg = Detectron2GoRunner.get_default_cfg()
default_cfg.INPUT.MIN_SIZE_TRAIN = (30,)
default_cfg.INPUT.MIN_SIZE_TEST = 40
default_cfg.D2GO_DATA.AUG_OPS.TRAIN = ["ResizeShortestEdgeSquareOp"]
default_cfg.D2GO_DATA.AUG_OPS.TEST = ["ResizeShortestEdgeSquareOp"]
trans_train = build_transform_gen(default_cfg, is_train=True)
trans_test = build_transform_gen(default_cfg, is_train=False)
img = np.zeros((80, 60, 3))
trans_img_train, tl_train = apply_transform_gens(trans_train, img)
trans_img_test, tl_test = apply_transform_gens(trans_test, img)
self.assertEqual(trans_img_train.shape, (30, 30, 3))
self.assertEqual(trans_img_test.shape, (40, 40, 3))
| d2go-main | tests/data/test_data_transforms.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import numpy as np
from d2go.data.transforms.build import build_transform_gen
from d2go.runner import Detectron2GoRunner
from detectron2.data.transforms.augmentation import apply_augmentations
class TestDataTransformsAutoAug(unittest.TestCase):
def test_rand_aug_transforms(self):
default_cfg = Detectron2GoRunner.get_default_cfg()
img = np.concatenate(
[
(np.random.uniform(0, 1, size=(80, 60, 1)) * 255).astype(np.uint8),
(np.random.uniform(0, 1, size=(80, 60, 1)) * 255).astype(np.uint8),
(np.random.uniform(0, 1, size=(80, 60, 1)) * 255).astype(np.uint8),
],
axis=2,
)
default_cfg.D2GO_DATA.AUG_OPS.TRAIN = ['RandAugmentImageOp::{"num_ops": 20}']
tfm = build_transform_gen(default_cfg, is_train=True)
trans_img, _ = apply_augmentations(tfm, img)
self.assertEqual(img.shape, trans_img.shape)
self.assertEqual(img.dtype, trans_img.dtype)
def test_trivial_aug_transforms(self):
default_cfg = Detectron2GoRunner.get_default_cfg()
img = np.concatenate(
[
(np.random.uniform(0, 1, size=(80, 60, 1)) * 255).astype(np.uint8),
],
axis=2,
)
default_cfg.D2GO_DATA.AUG_OPS.TRAIN = ["TrivialAugmentWideImageOp"]
tfm = build_transform_gen(default_cfg, is_train=True)
trans_img, _ = apply_augmentations(tfm, img)
self.assertEqual(img.shape, trans_img.shape)
self.assertEqual(img.dtype, trans_img.dtype)
def test_aug_mix_transforms(self):
default_cfg = Detectron2GoRunner.get_default_cfg()
img = np.concatenate(
[
(np.random.uniform(0, 1, size=(80, 60, 1)) * 255).astype(np.uint8),
(np.random.uniform(0, 1, size=(80, 60, 1)) * 255).astype(np.uint8),
(np.random.uniform(0, 1, size=(80, 60, 1)) * 255).astype(np.uint8),
],
axis=2,
)
default_cfg.D2GO_DATA.AUG_OPS.TRAIN = ['AugMixImageOp::{"severity": 3}']
tfm = build_transform_gen(default_cfg, is_train=True)
trans_img, _ = apply_augmentations(tfm, img)
self.assertEqual(img.shape, trans_img.shape)
self.assertEqual(img.dtype, trans_img.dtype)
| d2go-main | tests/data/test_data_transforms_auto_aug.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import numpy as np
from d2go.data.transforms import color_yuv as cy
from d2go.data.transforms.build import build_transform_gen
from d2go.runner import Detectron2GoRunner
from detectron2.data.transforms.augmentation import apply_augmentations
class TestDataTransformsColorYUV(unittest.TestCase):
def test_yuv_color_transforms(self):
default_cfg = Detectron2GoRunner.get_default_cfg()
img = np.concatenate(
[
np.random.uniform(0, 1, size=(80, 60, 1)),
np.random.uniform(-0.5, 0.5, size=(80, 60, 1)),
np.random.uniform(-0.5, 0.5, size=(80, 60, 1)),
],
axis=2,
)
default_cfg.D2GO_DATA.AUG_OPS.TRAIN = [
'RandomContrastYUVOp::{"intensity_min": 0.3, "intensity_max": 0.5}',
]
low_contrast_tfm = build_transform_gen(default_cfg, is_train=True)
low_contrast, _ = apply_augmentations(low_contrast_tfm, img)
default_cfg.D2GO_DATA.AUG_OPS.TRAIN = [
'RandomSaturationYUVOp::{"intensity_min": 1.5, "intensity_max": 1.7}',
]
high_saturation_tfm = build_transform_gen(default_cfg, is_train=True)
high_saturation, _ = apply_augmentations(high_saturation_tfm, img)
# Use pixel statistics to roughly check transformed images as expected
# All channels have less variance
self.assertLess(np.var(low_contrast[:, :, 0]), np.var(img[:, :, 0]))
self.assertLess(np.var(low_contrast[:, :, 1]), np.var(img[:, :, 1]))
self.assertLess(np.var(low_contrast[:, :, 2]), np.var(img[:, :, 2]))
# 1st channel is unchanged (test w/ mean, var), 2nd + 3rd channels more variance
self.assertAlmostEqual(np.mean(high_saturation[:, :, 0]), np.mean(img[:, :, 0]))
self.assertAlmostEqual(np.var(high_saturation[:, :, 0]), np.var(img[:, :, 0]))
self.assertGreater(np.var(high_saturation[:, :, 1]), np.var(img[:, :, 1]))
self.assertGreater(np.var(high_saturation[:, :, 2]), np.var(img[:, :, 2]))
def test_transform_color_yuv_rgbyuv_convert(self):
image = np.arange(256).reshape(16, 16, 1).repeat(3, axis=2).astype(np.uint8)
tf1 = cy.RGB2YUVBT601().get_transform(image)
tf2 = cy.YUVBT6012RGB().get_transform(image)
image_yuv = tf1.apply_image(image)
image_rgb = tf2.apply_image(image_yuv)
self.assertArrayEqual((image_rgb + 0.5).astype(np.uint8), image)
def test_transform_color_yuv_rgbyuv_convert_invese(self):
image = np.arange(256).reshape(16, 16, 1).repeat(3, axis=2).astype(np.uint8)
tf = cy.RGB2YUVBT601().get_transform(image)
image_yuv = tf.apply_image(image)
image_rgb = tf.inverse().apply_image(image_yuv)
self.assertArrayEqual((image_rgb + 0.5).astype(np.uint8), image)
def assertArrayEqual(self, a1, a2):
self.assertTrue(np.array_equal(a1, a2))
| d2go-main | tests/data/test_data_transforms_color_yuv.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import json
import os
import tempfile
import unittest
import d2go.data.extended_coco as extended_coco
from d2go.data.datasets import ANN_FN, COCO_REGISTER_FUNCTION_REGISTRY, IM_DIR
from d2go.data.keypoint_metadata_registry import (
get_keypoint_metadata,
KEYPOINT_METADATA_REGISTRY,
KeypointMetadata,
)
from d2go.data.utils import (
AdhocDatasetManager,
COCOWithClassesToUse,
maybe_subsample_n_images,
)
from d2go.runner import Detectron2GoRunner
from d2go.utils.testing.data_loader_helper import (
create_toy_dataset,
LocalImageGenerator,
)
from d2go.utils.testing.helper import tempdir
from detectron2.data import DatasetCatalog, MetadataCatalog
from mobile_cv.common.misc.file_utils import make_temp_directory
def create_test_images_and_dataset_json(data_dir, num_images=10, num_classes=-1):
# create image and json
image_dir = os.path.join(data_dir, "images")
os.makedirs(image_dir)
json_dataset, meta_data = create_toy_dataset(
LocalImageGenerator(image_dir, width=80, height=60),
num_images=num_images,
num_classes=num_classes,
)
json_file = os.path.join(data_dir, "annotation.json")
with open(json_file, "w") as f:
json.dump(json_dataset, f)
return image_dir, json_file
class TestD2GoDatasets(unittest.TestCase):
def setUp(self):
self._builtin_datasets = set(DatasetCatalog)
def tearDown(self):
# Need to remove injected dataset
injected_dataset = set(DatasetCatalog) - self._builtin_datasets
for ds in injected_dataset:
DatasetCatalog.remove(ds)
MetadataCatalog.remove(ds)
def test_coco_conversions(self):
test_data_0 = {
"info": {},
"imgs": {
"img_1": {
"file_name": "0.jpg",
"width": 600,
"height": 600,
"id": "img_1",
}
},
"anns": {0: {"id": 0, "image_id": "img_1", "bbox": [30, 30, 60, 20]}},
"imgToAnns": {"img_1": [0]},
"cats": {},
}
test_data_1 = copy.deepcopy(test_data_0)
test_data_1["imgs"][123] = test_data_1["imgs"].pop("img_1")
test_data_1["imgs"][123]["id"] = 123
test_data_1["anns"][0]["image_id"] = 123
test_data_1["imgToAnns"][123] = test_data_1["imgToAnns"].pop("img_1")
for test_data, exp_output in [(test_data_0, [0, 0]), (test_data_1, [123, 123])]:
with make_temp_directory("detectron2go_tmp_dataset") as tmp_dir:
src_json = os.path.join(tmp_dir, "source.json")
out_json = os.path.join(tmp_dir, "output.json")
with open(src_json, "w") as h_in:
json.dump(test_data, h_in)
out_json = extended_coco.convert_coco_text_to_coco_detection_json(
src_json, out_json
)
self.assertEqual(out_json["images"][0]["id"], exp_output[0])
self.assertEqual(out_json["annotations"][0]["image_id"], exp_output[1])
def test_annotation_rejection(self):
img_list = [
{"id": 0, "width": 50, "height": 50, "file_name": "a.png"},
{"id": 1, "width": 50, "height": 50, "file_name": "b.png"},
{"id": 2, "width": 50, "height": 50, "file_name": "b.png"},
]
ann_list = [
[
{
"id": 0,
"image_id": 0,
"category_id": 0,
"segmentation": [[0, 0, 10, 0, 10, 10, 0, 10]],
"area": 100,
"bbox": [0, 0, 10, 10],
},
{
"id": 1,
"image_id": 0,
"category_id": 0,
"segmentation": [[0, 0, 10, 0, 10, 10, 0, 10]],
"area": 100,
"bbox": [45, 45, 10, 10],
},
{
"id": 2,
"image_id": 0,
"category_id": 0,
"segmentation": [[0, 0, 10, 0, 10, 10, 0, 10]],
"area": 100,
"bbox": [-5, -5, 10, 10],
},
{
"id": 3,
"image_id": 0,
"category_id": 0,
"segmentation": [[0, 0, 10, 0, 10, 10, 0, 10]],
"area": 0,
"bbox": [5, 5, 0, 0],
},
{
"id": 4,
"image_id": 0,
"category_id": 0,
"segmentation": [[]],
"area": 25,
"bbox": [5, 5, 5, 5],
},
],
[
{
"id": 5,
"image_id": 1,
"category_id": 0,
"segmentation": [[]],
"area": 100,
"bbox": [0, 0, 0, 0],
},
],
[],
]
out_dict_list = extended_coco.convert_to_dict_list("", [0], img_list, ann_list)
self.assertEqual(len(out_dict_list), 1)
self.assertEqual(len(out_dict_list[0]["annotations"]), 1)
out_dict_list = extended_coco.convert_to_dict_list(
"", [0], img_list, ann_list, filter_empty_annotations=False
)
self.assertEqual(len(out_dict_list), 3)
@tempdir
def test_coco_injection(self, tmp_dir):
image_dir, json_file = create_test_images_and_dataset_json(tmp_dir)
runner = Detectron2GoRunner()
cfg = runner.get_default_cfg()
cfg.merge_from_list(
[
str(x)
for x in [
"D2GO_DATA.DATASETS.COCO_INJECTION.NAMES",
["inj_ds1", "inj_ds2"],
"D2GO_DATA.DATASETS.COCO_INJECTION.IM_DIRS",
[image_dir, "/mnt/fair"],
"D2GO_DATA.DATASETS.COCO_INJECTION.JSON_FILES",
[json_file, "inj_ds2"],
]
]
)
runner.register(cfg)
inj_ds1 = DatasetCatalog.get("inj_ds1")
self.assertEqual(len(inj_ds1), 10)
for dic in inj_ds1:
self.assertEqual(dic["width"], 80)
self.assertEqual(dic["height"], 60)
@tempdir
def test_direct_copy_keys(self, tmp_dir):
image_dir, json_file = create_test_images_and_dataset_json(tmp_dir)
with tempfile.NamedTemporaryFile(prefix=tmp_dir, suffix=".json") as h_temp:
new_json_file = h_temp.name
with open(json_file, "r") as h_in:
ds = json.load(h_in)
for idx, x in enumerate(ds["images"]):
x["key1"] = idx
x["key2"] = idx
with open(new_json_file, "w") as h_out:
json.dump(ds, h_out)
loaded_ds = extended_coco.extended_coco_load(new_json_file, image_dir)
self.assertTrue("key1" not in loaded_ds[0])
self.assertTrue("key2" not in loaded_ds[0])
loaded_ds = extended_coco.extended_coco_load(
new_json_file, image_dir, image_direct_copy_keys=["key1"]
)
self.assertTrue("key1" in loaded_ds[0])
self.assertTrue("key2" not in loaded_ds[0])
@tempdir
def test_sub_dataset(self, tmp_dir):
image_dir, json_file = create_test_images_and_dataset_json(tmp_dir)
runner = Detectron2GoRunner()
cfg = runner.get_default_cfg()
cfg.merge_from_list(
[
str(x)
for x in [
"D2GO_DATA.DATASETS.COCO_INJECTION.NAMES",
["inj_ds3"],
"D2GO_DATA.DATASETS.COCO_INJECTION.IM_DIRS",
[image_dir],
"D2GO_DATA.DATASETS.COCO_INJECTION.JSON_FILES",
[json_file],
"DATASETS.TEST",
("inj_ds3",),
"D2GO_DATA.TEST.MAX_IMAGES",
1,
]
]
)
runner.register(cfg)
with maybe_subsample_n_images(cfg) as new_cfg:
test_loader = runner.build_detection_test_loader(
new_cfg, new_cfg.DATASETS.TEST[0]
)
self.assertEqual(len(test_loader), 1)
def test_coco_metadata_registry(self):
@KEYPOINT_METADATA_REGISTRY.register()
def TriangleMetadata():
return KeypointMetadata(
names=("A", "B", "C"),
flip_map=(
("A", "B"),
("B", "C"),
),
connection_rules=[
("A", "B", (102, 204, 255)),
("B", "C", (51, 153, 255)),
],
)
tri_md = get_keypoint_metadata("TriangleMetadata")
self.assertEqual(tri_md["keypoint_names"][0], "A")
self.assertEqual(tri_md["keypoint_flip_map"][0][0], "A")
self.assertEqual(tri_md["keypoint_connection_rules"][0][0], "A")
@tempdir
def test_coco_metadata_register(self, tmp_dir):
@KEYPOINT_METADATA_REGISTRY.register()
def LineMetadata():
return KeypointMetadata(
names=("A", "B"),
flip_map=(("A", "B"),),
connection_rules=[
("A", "B", (102, 204, 255)),
],
)
image_dir, json_file = create_test_images_and_dataset_json(tmp_dir)
runner = Detectron2GoRunner()
cfg = runner.get_default_cfg()
cfg.merge_from_list(
[
str(x)
for x in [
"D2GO_DATA.DATASETS.COCO_INJECTION.NAMES",
["inj_ds"],
"D2GO_DATA.DATASETS.COCO_INJECTION.IM_DIRS",
[image_dir],
"D2GO_DATA.DATASETS.COCO_INJECTION.JSON_FILES",
[json_file],
"D2GO_DATA.DATASETS.COCO_INJECTION.KEYPOINT_METADATA",
["LineMetadata"],
]
]
)
runner.register(cfg)
inj_md = MetadataCatalog.get("inj_ds")
self.assertEqual(inj_md.keypoint_names[0], "A")
self.assertEqual(inj_md.keypoint_flip_map[0][0], "A")
self.assertEqual(inj_md.keypoint_connection_rules[0][0], "A")
@tempdir
def test_coco_create_adhoc_class_to_use_dataset(self, tmp_dir):
image_dir, json_file = create_test_images_and_dataset_json(
tmp_dir, num_classes=2
)
runner = Detectron2GoRunner()
cfg = runner.get_default_cfg()
cfg.merge_from_list(
[
str(x)
for x in [
"D2GO_DATA.DATASETS.COCO_INJECTION.NAMES",
["test_adhoc_ds", "test_adhoc_ds2"],
"D2GO_DATA.DATASETS.COCO_INJECTION.IM_DIRS",
[image_dir, image_dir],
"D2GO_DATA.DATASETS.COCO_INJECTION.JSON_FILES",
[json_file, json_file],
]
]
)
runner.register(cfg)
# Test adhoc classes to use
AdhocDatasetManager.add(COCOWithClassesToUse("test_adhoc_ds", ["class_0"]))
ds_list = DatasetCatalog.get("test_adhoc_ds@1classes")
self.assertEqual(len(ds_list), 5)
# Test adhoc classes to use with suffix removal
AdhocDatasetManager.add(
COCOWithClassesToUse("test_adhoc_ds2@1classes", ["class_0"])
)
ds_list = DatasetCatalog.get("test_adhoc_ds2@1classes")
self.assertEqual(len(ds_list), 5)
@tempdir
def test_register_coco_dataset_registry(self, tmp_dir):
dummy_buffer = []
@COCO_REGISTER_FUNCTION_REGISTRY.register()
def _register_dummy_function_coco(dataset_name, split_dict):
dummy_buffer.append((dataset_name, split_dict))
image_dir, json_file = create_test_images_and_dataset_json(tmp_dir)
runner = Detectron2GoRunner()
cfg = runner.get_default_cfg()
cfg.merge_from_list(
[
str(x)
for x in [
"D2GO_DATA.DATASETS.COCO_INJECTION.NAMES",
["inj_test_registry"],
"D2GO_DATA.DATASETS.COCO_INJECTION.IM_DIRS",
[image_dir],
"D2GO_DATA.DATASETS.COCO_INJECTION.JSON_FILES",
[json_file],
"D2GO_DATA.DATASETS.COCO_INJECTION.REGISTER_FUNCTION",
"_register_dummy_function_coco",
]
]
)
runner.register(cfg)
self.assertTrue(len(dummy_buffer) == 1)
@tempdir
def test_adhoc_register_coco_dataset_registry(self, tmp_dir):
dummy_buffer = []
def _dummy_load_func():
return []
@COCO_REGISTER_FUNCTION_REGISTRY.register()
def _register_dummy_function_coco_adhoc(dataset_name, split_dict):
json_file = split_dict[ANN_FN]
image_root = split_dict[IM_DIR]
DatasetCatalog.register(dataset_name, _dummy_load_func)
MetadataCatalog.get(dataset_name).set(
evaluator_type="coco",
json_file=json_file,
image_root=image_root,
)
dummy_buffer.append((dataset_name, split_dict))
image_dir, json_file = create_test_images_and_dataset_json(tmp_dir)
runner = Detectron2GoRunner()
cfg = runner.get_default_cfg()
cfg.merge_from_list(
[
str(x)
for x in [
"D2GO_DATA.DATASETS.COCO_INJECTION.NAMES",
["inj_test_registry_adhoc"],
"D2GO_DATA.DATASETS.COCO_INJECTION.IM_DIRS",
[image_dir],
"D2GO_DATA.DATASETS.COCO_INJECTION.JSON_FILES",
[json_file],
"D2GO_DATA.DATASETS.COCO_INJECTION.REGISTER_FUNCTION",
"_register_dummy_function_coco_adhoc",
]
]
)
runner.register(cfg)
self.assertTrue(len(dummy_buffer) == 1)
# Add adhoc class that uses only the first class
AdhocDatasetManager.add(
COCOWithClassesToUse("inj_test_registry_adhoc", ["class_0"])
)
# Check that the correct register function is used
self.assertTrue(len(dummy_buffer) == 2)
| d2go-main | tests/data/test_d2go_datasets.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import numpy as np
import torch
from d2go.data.transforms import tensor as tensor_aug
from detectron2.data.transforms.augmentation import AugmentationList
class TestDataTransformsTensor(unittest.TestCase):
def test_tensor_aug(self):
"""Data augmentation that that allows torch.Tensor as input"""
img = torch.ones(3, 8, 6)
augs = [tensor_aug.Tensor2Array(), tensor_aug.Array2Tensor()]
inputs = tensor_aug.AugInput(image=img)
transforms = AugmentationList(augs)(inputs)
self.assertArrayEqual(img, inputs.image)
# inverse is the same as itself
out_img = transforms.inverse().apply_image(img)
self.assertArrayEqual(img, out_img)
def assertArrayEqual(self, a1, a2):
self.assertTrue(np.array_equal(a1, a2))
| d2go-main | tests/data/test_data_transforms_tensor.py |
d2go-main | tests/skip_init/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.