python_code
stringlengths
0
992k
repo_name
stringlengths
8
46
file_path
stringlengths
5
162
# Copyright (c) OpenMMLab. All rights reserved. # dataset settings dataset_type = 'CocoPanopticDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadPanopticAnnotations', with_bbox=True, with_mask=True, with_seg=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='SegRescale', scale_factor=1 / 4), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict(type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict(samples_per_gpu=2, workers_per_gpu=2, train=dict( type=dataset_type, ann_file=data_root + 'annotations/panoptic_train2017.json', img_prefix=data_root + 'train2017/', seg_prefix=data_root + 'annotations/panoptic_train2017/', pipeline=train_pipeline), val=dict(type=dataset_type, ann_file=data_root + 'annotations/panoptic_val2017.json', img_prefix=data_root + 'val2017/', seg_prefix=data_root + 'annotations/panoptic_val2017/', pipeline=test_pipeline), test=dict(type=dataset_type, ann_file=data_root + 'annotations/panoptic_val2017.json', img_prefix=data_root + 'val2017/', seg_prefix=data_root + 'annotations/panoptic_val2017/', pipeline=test_pipeline)) evaluation = dict(interval=1, metric=['PQ'])
ViT-Adapter-main
wsdm2023/configs/_base_/datasets/coco_panoptic.py
# Copyright (c) OpenMMLab. All rights reserved. # dataset settings dataset_type = 'VGDataset' data_root = 'data/refcoco/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='LoadRefer'), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlipWithRefer', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='TokenizeRefer', max_sent_len=128), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'refer', 'r_mask', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadRefer'), dict(type='TokenizeRefer', max_sent_len=128), dict(type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlipWithRefer'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='TokenizeRefer', max_sent_len=128), dict(type='Collect', keys=['img', 'refer', 'r_mask']), ]) ] data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict(type=dataset_type, ann_file=data_root + 'refcoco/refcoco_train.json', img_prefix=data_root + 'images', pipeline=train_pipeline), val=dict(type=dataset_type, ann_file=data_root + 'refcoco/refcoco_val.json', img_prefix=data_root + 'images', pipeline=test_pipeline), test=dict(type=dataset_type, ann_file=data_root + 'refcoco/refcoco_testA.json', img_prefix=data_root + 'images', pipeline=test_pipeline)) evaluation = dict(interval=1, metric=['IoU', 'Acc'])
ViT-Adapter-main
wsdm2023/configs/_base_/datasets/refcoco.py
# Copyright (c) OpenMMLab. All rights reserved. # dataset settings dataset_type = 'VOCDataset' data_root = 'data/VOCdevkit/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1000, 600), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict(type='MultiScaleFlipAug', img_scale=(1000, 600), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict(type='RepeatDataset', times=3, dataset=dict( type=dataset_type, ann_file=[ data_root + 'VOC2007/ImageSets/Main/trainval.txt', data_root + 'VOC2012/ImageSets/Main/trainval.txt' ], img_prefix=[data_root + 'VOC2007/', data_root + 'VOC2012/'], pipeline=train_pipeline)), val=dict(type=dataset_type, ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt', img_prefix=data_root + 'VOC2007/', pipeline=test_pipeline), test=dict(type=dataset_type, ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt', img_prefix=data_root + 'VOC2007/', pipeline=test_pipeline)) evaluation = dict(interval=1, metric='mAP')
ViT-Adapter-main
wsdm2023/configs/_base_/datasets/voc0712.py
# Copyright (c) OpenMMLab. All rights reserved. # dataset settings dataset_type = 'DeepFashionDataset' data_root = 'data/DeepFashion/In-shop/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='Resize', img_scale=(750, 1101), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict(type='MultiScaleFlipAug', img_scale=(750, 1101), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict(imgs_per_gpu=2, workers_per_gpu=1, train=dict(type=dataset_type, ann_file=data_root + 'annotations/DeepFashion_segmentation_query.json', img_prefix=data_root + 'Img/', pipeline=train_pipeline, data_root=data_root), val=dict(type=dataset_type, ann_file=data_root + 'annotations/DeepFashion_segmentation_query.json', img_prefix=data_root + 'Img/', pipeline=test_pipeline, data_root=data_root), test=dict(type=dataset_type, ann_file=data_root + 'annotations/DeepFashion_segmentation_gallery.json', img_prefix=data_root + 'Img/', pipeline=test_pipeline, data_root=data_root)) evaluation = dict(interval=5, metric=['bbox', 'segm'])
ViT-Adapter-main
wsdm2023/configs/_base_/datasets/deepfashion.py
# model settings norm_cfg = dict(type='BN', requires_grad=False) model = dict( type='FasterRCNN', backbone=dict( type='ResNet', depth=50, num_stages=3, strides=(1, 2, 2), dilations=(1, 1, 1), out_indices=(2, ), frozen_stages=1, norm_cfg=norm_cfg, norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe')), rpn_head=dict( type='RPNHead', in_channels=1024, feat_channels=1024, anchor_generator=dict( type='AnchorGenerator', scales=[2, 4, 8, 16, 32], ratios=[0.5, 1.0, 2.0], strides=[16]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)), roi_head=dict( type='StandardRoIHead', shared_head=dict( type='ResLayer', depth=50, stage=3, stride=2, dilation=1, style='caffe', norm_cfg=norm_cfg, norm_eval=True), bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), out_channels=1024, featmap_strides=[16]), bbox_head=dict( type='BBoxHead', with_avg_pool=True, roi_feat_size=7, in_channels=2048, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0))), # model training and testing settings train_cfg=dict( rpn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, match_low_quality=True, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=0, pos_weight=-1, debug=False), rpn_proposal=dict( nms_pre=12000, max_per_img=2000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.5, match_low_quality=False, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), pos_weight=-1, debug=False)), test_cfg=dict( rpn=dict( nms_pre=6000, max_per_img=1000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100)))
ViT-Adapter-main
wsdm2023/configs/_base_/models/faster_rcnn_r50_caffe_c4.py
# model settings norm_cfg = dict(type='BN', requires_grad=False) model = dict( type='MaskRCNN', backbone=dict( type='ResNet', depth=50, num_stages=3, strides=(1, 2, 2), dilations=(1, 1, 1), out_indices=(2, ), frozen_stages=1, norm_cfg=norm_cfg, norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe')), rpn_head=dict( type='RPNHead', in_channels=1024, feat_channels=1024, anchor_generator=dict( type='AnchorGenerator', scales=[2, 4, 8, 16, 32], ratios=[0.5, 1.0, 2.0], strides=[16]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)), roi_head=dict( type='StandardRoIHead', shared_head=dict( type='ResLayer', depth=50, stage=3, stride=2, dilation=1, style='caffe', norm_cfg=norm_cfg, norm_eval=True), bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), out_channels=1024, featmap_strides=[16]), bbox_head=dict( type='BBoxHead', with_avg_pool=True, roi_feat_size=7, in_channels=2048, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)), mask_roi_extractor=None, mask_head=dict( type='FCNMaskHead', num_convs=0, in_channels=2048, conv_out_channels=256, num_classes=80, loss_mask=dict( type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), # model training and testing settings train_cfg=dict( rpn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, match_low_quality=True, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=0, pos_weight=-1, debug=False), rpn_proposal=dict( nms_pre=12000, max_per_img=2000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.5, match_low_quality=False, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), mask_size=14, pos_weight=-1, debug=False)), test_cfg=dict( rpn=dict( nms_pre=6000, nms=dict(type='nms', iou_threshold=0.7), max_per_img=1000, min_bbox_size=0), rcnn=dict( score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100, mask_thr_binary=0.5)))
ViT-Adapter-main
wsdm2023/configs/_base_/models/mask_rcnn_r50_caffe_c4.py
# model settings input_size = 300 model = dict( type='SingleStageDetector', backbone=dict( type='SSDVGG', depth=16, with_last_pool=False, ceil_mode=True, out_indices=(3, 4), out_feature_indices=(22, 34), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://vgg16_caffe')), neck=dict( type='SSDNeck', in_channels=(512, 1024), out_channels=(512, 1024, 512, 256, 256, 256), level_strides=(2, 2, 1, 1), level_paddings=(1, 1, 0, 0), l2_norm_scale=20), bbox_head=dict( type='SSDHead', in_channels=(512, 1024, 512, 256, 256, 256), num_classes=80, anchor_generator=dict( type='SSDAnchorGenerator', scale_major=False, input_size=input_size, basesize_ratio_range=(0.15, 0.9), strides=[8, 16, 32, 64, 100, 300], ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.1, 0.1, 0.2, 0.2])), # model training and testing settings train_cfg=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0., ignore_iof_thr=-1, gt_max_assign_all=False), smoothl1_beta=1., allowed_border=-1, pos_weight=-1, neg_pos_ratio=3, debug=False), test_cfg=dict( nms_pre=1000, nms=dict(type='nms', iou_threshold=0.45), min_bbox_size=0, score_thr=0.02, max_per_img=200)) cudnn_benchmark = True
ViT-Adapter-main
wsdm2023/configs/_base_/models/ssd300.py
# model settings model = dict( type='CascadeRCNN', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5), rpn_head=dict( type='RPNHead', in_channels=256, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', scales=[8], ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), roi_head=dict( type='CascadeRoIHead', num_stages=3, stage_loss_weights=[1, 0.5, 0.25], bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), out_channels=256, featmap_strides=[4, 8, 16, 32]), bbox_head=[ dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.05, 0.05, 0.1, 0.1]), reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.033, 0.033, 0.067, 0.067]), reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) ]), # model training and testing settings train_cfg=dict( rpn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, match_low_quality=True, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=0, pos_weight=-1, debug=False), rpn_proposal=dict( nms_pre=2000, max_per_img=2000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=[ dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.5, match_low_quality=False, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), pos_weight=-1, debug=False), dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6, match_low_quality=False, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), pos_weight=-1, debug=False), dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.7, min_pos_iou=0.7, match_low_quality=False, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), pos_weight=-1, debug=False) ]), test_cfg=dict( rpn=dict( nms_pre=1000, max_per_img=1000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100)))
ViT-Adapter-main
wsdm2023/configs/_base_/models/cascade_rcnn_r50_fpn.py
# model settings model = dict( type='FastRCNN', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5), roi_head=dict( type='StandardRoIHead', bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), out_channels=256, featmap_strides=[4, 8, 16, 32]), bbox_head=dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0))), # model training and testing settings train_cfg=dict( rcnn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.5, match_low_quality=False, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), pos_weight=-1, debug=False)), test_cfg=dict( rcnn=dict( score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100)))
ViT-Adapter-main
wsdm2023/configs/_base_/models/fast_rcnn_r50_fpn.py
# model settings model = dict( type='RPN', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5), rpn_head=dict( type='RPNHead', in_channels=256, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', scales=[8], ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)), # model training and testing settings train_cfg=dict( rpn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=0, pos_weight=-1, debug=False)), test_cfg=dict( rpn=dict( nms_pre=2000, max_per_img=1000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0)))
ViT-Adapter-main
wsdm2023/configs/_base_/models/rpn_r50_fpn.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # model settings model = dict( type='MaskRCNN', pretrained=None, backbone=dict( type='ConvNeXt', in_chans=3, depths=[3, 3, 9, 3], dims=[96, 192, 384, 768], drop_path_rate=0.2, layer_scale_init_value=1e-6, out_indices=[0, 1, 2, 3], ), neck=dict( type='FPN', in_channels=[128, 256, 512, 1024], out_channels=256, num_outs=5), rpn_head=dict( type='RPNHead', in_channels=256, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', scales=[8], ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)), roi_head=dict( type='StandardRoIHead', bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), out_channels=256, featmap_strides=[4, 8, 16, 32]), bbox_head=dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)), mask_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), out_channels=256, featmap_strides=[4, 8, 16, 32]), mask_head=dict( type='FCNMaskHead', num_convs=4, in_channels=256, conv_out_channels=256, num_classes=80, loss_mask=dict( type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), # model training and testing settings train_cfg=dict( rpn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, match_low_quality=True, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=-1, pos_weight=-1, debug=False), rpn_proposal=dict( nms_pre=2000, max_per_img=1000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.5, match_low_quality=True, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), mask_size=28, pos_weight=-1, debug=False)), test_cfg=dict( rpn=dict( nms_pre=1000, max_per_img=1000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100, mask_thr_binary=0.5)))
ViT-Adapter-main
wsdm2023/configs/_base_/models/mask_rcnn_convnext_fpn.py
# model settings model = dict( type='RetinaNet', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs='on_input', num_outs=5), bbox_head=dict( type='RetinaHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', octave_base_scale=4, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)), # model training and testing settings train_cfg=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.4, min_pos_iou=0, ignore_iof_thr=-1), allowed_border=-1, pos_weight=-1, debug=False), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100))
ViT-Adapter-main
wsdm2023/configs/_base_/models/retinanet_r50_fpn.py
# model settings model = dict( type='FasterRCNN', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5), rpn_head=dict( type='RPNHead', in_channels=256, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', scales=[8], ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)), roi_head=dict( type='StandardRoIHead', bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), out_channels=256, featmap_strides=[4, 8, 16, 32]), bbox_head=dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0))), # model training and testing settings train_cfg=dict( rpn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, match_low_quality=True, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=-1, pos_weight=-1, debug=False), rpn_proposal=dict( nms_pre=2000, max_per_img=1000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.5, match_low_quality=False, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), pos_weight=-1, debug=False)), test_cfg=dict( rpn=dict( nms_pre=1000, max_per_img=1000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100) # soft-nms is also supported for rcnn testing # e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05) ))
ViT-Adapter-main
wsdm2023/configs/_base_/models/faster_rcnn_r50_fpn.py
# model settings model = dict( type='RPN', backbone=dict( type='ResNet', depth=50, num_stages=3, strides=(1, 2, 2), dilations=(1, 1, 1), out_indices=(2, ), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe')), neck=None, rpn_head=dict( type='RPNHead', in_channels=1024, feat_channels=1024, anchor_generator=dict( type='AnchorGenerator', scales=[2, 4, 8, 16, 32], ratios=[0.5, 1.0, 2.0], strides=[16]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)), # model training and testing settings train_cfg=dict( rpn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=0, pos_weight=-1, debug=False)), test_cfg=dict( rpn=dict( nms_pre=12000, max_per_img=2000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0)))
ViT-Adapter-main
wsdm2023/configs/_base_/models/rpn_r50_caffe_c4.py
# model settings norm_cfg = dict(type='BN', requires_grad=False) model = dict( type='FasterRCNN', backbone=dict( type='ResNet', depth=50, num_stages=4, strides=(1, 2, 2, 1), dilations=(1, 1, 1, 2), out_indices=(3, ), frozen_stages=1, norm_cfg=norm_cfg, norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe')), rpn_head=dict( type='RPNHead', in_channels=2048, feat_channels=2048, anchor_generator=dict( type='AnchorGenerator', scales=[2, 4, 8, 16, 32], ratios=[0.5, 1.0, 2.0], strides=[16]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)), roi_head=dict( type='StandardRoIHead', bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), out_channels=2048, featmap_strides=[16]), bbox_head=dict( type='Shared2FCBBoxHead', in_channels=2048, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0))), # model training and testing settings train_cfg=dict( rpn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, match_low_quality=True, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=0, pos_weight=-1, debug=False), rpn_proposal=dict( nms_pre=12000, max_per_img=2000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.5, match_low_quality=False, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), pos_weight=-1, debug=False)), test_cfg=dict( rpn=dict( nms=dict(type='nms', iou_threshold=0.7), nms_pre=6000, max_per_img=1000, min_bbox_size=0), rcnn=dict( score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100)))
ViT-Adapter-main
wsdm2023/configs/_base_/models/faster_rcnn_r50_caffe_dc5.py
# model settings model = dict( type='CascadeRCNN', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5), rpn_head=dict( type='RPNHead', in_channels=256, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', scales=[8], ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), roi_head=dict( type='CascadeRoIHead', num_stages=3, stage_loss_weights=[1, 0.5, 0.25], bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), out_channels=256, featmap_strides=[4, 8, 16, 32]), bbox_head=[ dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.05, 0.05, 0.1, 0.1]), reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.033, 0.033, 0.067, 0.067]), reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) ], mask_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), out_channels=256, featmap_strides=[4, 8, 16, 32]), mask_head=dict( type='FCNMaskHead', num_convs=4, in_channels=256, conv_out_channels=256, num_classes=80, loss_mask=dict( type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), # model training and testing settings train_cfg=dict( rpn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, match_low_quality=True, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=0, pos_weight=-1, debug=False), rpn_proposal=dict( nms_pre=2000, max_per_img=2000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=[ dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.5, match_low_quality=False, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), mask_size=28, pos_weight=-1, debug=False), dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6, match_low_quality=False, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), mask_size=28, pos_weight=-1, debug=False), dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.7, min_pos_iou=0.7, match_low_quality=False, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), mask_size=28, pos_weight=-1, debug=False) ]), test_cfg=dict( rpn=dict( nms_pre=1000, max_per_img=1000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100, mask_thr_binary=0.5)))
ViT-Adapter-main
wsdm2023/configs/_base_/models/cascade_mask_rcnn_r50_fpn.py
# model settings model = dict( type='MaskRCNN', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5), rpn_head=dict( type='RPNHead', in_channels=256, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', scales=[8], ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)), roi_head=dict( type='StandardRoIHead', bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), out_channels=256, featmap_strides=[4, 8, 16, 32]), bbox_head=dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)), mask_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), out_channels=256, featmap_strides=[4, 8, 16, 32]), mask_head=dict( type='FCNMaskHead', num_convs=4, in_channels=256, conv_out_channels=256, num_classes=80, loss_mask=dict( type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), # model training and testing settings train_cfg=dict( rpn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, match_low_quality=True, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=-1, pos_weight=-1, debug=False), rpn_proposal=dict( nms_pre=2000, max_per_img=1000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.5, match_low_quality=True, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), mask_size=28, pos_weight=-1, debug=False)), test_cfg=dict( rpn=dict( nms_pre=1000, max_per_img=1000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100, mask_thr_binary=0.5)))
ViT-Adapter-main
wsdm2023/configs/_base_/models/mask_rcnn_r50_fpn.py
# optimizer optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=None) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.001, step=[8, 11]) runner = dict(type='EpochBasedRunner', max_epochs=12)
ViT-Adapter-main
wsdm2023/configs/_base_/schedules/schedule_1x.py
# optimizer optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=None) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.001, step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24)
ViT-Adapter-main
wsdm2023/configs/_base_/schedules/schedule_2x.py
# optimizer optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=None) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.001, step=[27, 33]) runner = dict(type='EpochBasedRunner', max_epochs=36)
ViT-Adapter-main
wsdm2023/configs/_base_/schedules/schedule_3x.py
# optimizer optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=None) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=2000, warmup_ratio=0.001, step=[62, 68]) runner = dict(type='EpochBasedRunner', max_epochs=72)
ViT-Adapter-main
wsdm2023/configs/_base_/schedules/schedule_6x.py
# optimizer optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=None) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.001, step=[16, 19]) runner = dict(type='EpochBasedRunner', max_epochs=20)
ViT-Adapter-main
wsdm2023/configs/_base_/schedules/schedule_20e.py
# Copyright (c) OpenMMLab. All rights reserved. import asyncio from argparse import ArgumentParser from mmdet.apis import (async_inference_detector, inference_detector, init_detector, show_result_pyplot) import mmcv import mmcv_custom # noqa: F401,F403 import mmdet_custom # noqa: F401,F403 import os.path as osp def parse_args(): parser = ArgumentParser() parser.add_argument('img', help='Image file') parser.add_argument('config', help='Config file') parser.add_argument('checkpoint', help='Checkpoint file') parser.add_argument('--out', type=str, default="demo", help='out dir') parser.add_argument( '--device', default='cuda:0', help='Device used for inference') parser.add_argument( '--palette', default='coco', choices=['coco', 'voc', 'citys', 'random'], help='Color palette used for visualization') parser.add_argument( '--score-thr', type=float, default=0.3, help='bbox score threshold') parser.add_argument( '--async-test', action='store_true', help='whether to set async options for async inference.') args = parser.parse_args() return args def main(args): # build the model from a config file and a checkpoint file model = init_detector(args.config, args.checkpoint, device=args.device) # test a single image result = inference_detector(model, args.img) mmcv.mkdir_or_exist(args.out) out_file = osp.join(args.out, osp.basename(args.img)) # show the results model.show_result( args.img, result, score_thr=args.score_thr, show=False, bbox_color=args.palette, text_color=(200, 200, 200), mask_color=args.palette, out_file=out_file ) async def async_main(args): # build the model from a config file and a checkpoint file model = init_detector(args.config, args.checkpoint, device=args.device) # test a single image tasks = asyncio.create_task(async_inference_detector(model, args.img)) result = await asyncio.gather(tasks) # show the results show_result_pyplot( model, args.img, result[0], palette=args.palette, score_thr=args.score_thr) if __name__ == '__main__': args = parse_args() if args.async_test: asyncio.run(async_main(args)) else: main(args)
ViT-Adapter-main
detection/image_demo.py
# Copyright (c) OpenMMLab. All rights reserved. import argparse import os import os.path as osp import time import warnings import mmcv import mmcv_custom # noqa: F401,F403 import mmdet_custom # noqa: F401,F403 import torch from mmcv import Config, DictAction from mmcv.cnn import fuse_conv_bn from mmcv.parallel import MMDataParallel, MMDistributedDataParallel from mmcv.runner import (get_dist_info, init_dist, load_checkpoint, wrap_fp16_model) from mmdet.apis import multi_gpu_test, single_gpu_test from mmdet.datasets import (build_dataloader, build_dataset, replace_ImageToTensor) from mmdet.models import build_detector def parse_args(): parser = argparse.ArgumentParser( description='MMDet test (and eval) a model') parser.add_argument('config', help='test config file path') parser.add_argument('checkpoint', help='checkpoint file') parser.add_argument( '--work-dir', help='the directory to save the file containing evaluation metrics') parser.add_argument('--out', help='output result file in pickle format') parser.add_argument( '--fuse-conv-bn', action='store_true', help='Whether to fuse conv and bn, this will slightly increase' 'the inference speed') parser.add_argument('--gpu-ids', type=int, nargs='+', help='ids of gpus to use ' '(only applicable to non-distributed testing)') parser.add_argument( '--format-only', action='store_true', help='Format the output results without perform evaluation. It is' 'useful when you want to format the result to a specific format and ' 'submit it to the test server') parser.add_argument( '--eval', type=str, nargs='+', help='evaluation metrics, which depends on the dataset, e.g., "bbox",' ' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC') parser.add_argument('--show', action='store_true', help='show results') parser.add_argument('--show-dir', help='directory where painted images will be saved') parser.add_argument('--show-score-thr', type=float, default=0.3, help='score threshold (default: 0.3)') parser.add_argument('--gpu-collect', action='store_true', help='whether to use gpu to collect results.') parser.add_argument( '--tmpdir', help='tmp directory used for collecting results from multiple ' 'workers, available when gpu-collect is not specified') parser.add_argument( '--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair ' 'in xxx=yyy format will be merged into config file. If the value to ' 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' 'Note that the quotation marks are necessary and that no white space ' 'is allowed.') parser.add_argument( '--options', nargs='+', action=DictAction, help='custom options for evaluation, the key-value pair in xxx=yyy ' 'format will be kwargs for dataset.evaluate() function (deprecate), ' 'change to --eval-options instead.') parser.add_argument( '--eval-options', nargs='+', action=DictAction, help='custom options for evaluation, the key-value pair in xxx=yyy ' 'format will be kwargs for dataset.evaluate() function') parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher') parser.add_argument('--local_rank', type=int, default=0) args = parser.parse_args() if 'LOCAL_RANK' not in os.environ: os.environ['LOCAL_RANK'] = str(args.local_rank) if args.options and args.eval_options: raise ValueError( '--options and --eval-options cannot be both ' 'specified, --options is deprecated in favor of --eval-options') if args.options: warnings.warn('--options is deprecated in favor of --eval-options') args.eval_options = args.options return args def main(): args = parse_args() assert args.out or args.eval or args.format_only or args.show \ or args.show_dir, \ ('Please specify at least one operation (save/eval/format/show the ' 'results / save the results) with the argument "--out", "--eval"' ', "--format-only", "--show" or "--show-dir"') if args.eval and args.format_only: raise ValueError('--eval and --format_only cannot be both specified') if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = Config.fromfile(args.config) if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None if cfg.model.get('neck'): if isinstance(cfg.model.neck, list): for neck_cfg in cfg.model.neck: if neck_cfg.get('rfp_backbone'): if neck_cfg.rfp_backbone.get('pretrained'): neck_cfg.rfp_backbone.pretrained = None elif cfg.model.neck.get('rfp_backbone'): if cfg.model.neck.rfp_backbone.get('pretrained'): cfg.model.neck.rfp_backbone.pretrained = None # in case the test dataset is concatenated samples_per_gpu = 1 if isinstance(cfg.data.test, dict): cfg.data.test.test_mode = True samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1) if samples_per_gpu > 1: # Replace 'ImageToTensor' to 'DefaultFormatBundle' cfg.data.test.pipeline = replace_ImageToTensor( cfg.data.test.pipeline) elif isinstance(cfg.data.test, list): for ds_cfg in cfg.data.test: ds_cfg.test_mode = True samples_per_gpu = max( [ds_cfg.pop('samples_per_gpu', 1) for ds_cfg in cfg.data.test]) if samples_per_gpu > 1: for ds_cfg in cfg.data.test: ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline) if args.gpu_ids is not None: cfg.gpu_ids = args.gpu_ids else: cfg.gpu_ids = range(1) # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False if len(cfg.gpu_ids) > 1: warnings.warn( f'We treat {cfg.gpu_ids} as gpu-ids, and reset to ' f'{cfg.gpu_ids[0:1]} as gpu-ids to avoid potential error in ' 'non-distribute testing time.') cfg.gpu_ids = cfg.gpu_ids[0:1] else: distributed = True init_dist(args.launcher, **cfg.dist_params) rank, _ = get_dist_info() # allows not to create if args.work_dir is not None and rank == 0: mmcv.mkdir_or_exist(osp.abspath(args.work_dir)) timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) json_file = osp.join(args.work_dir, f'eval_{timestamp}.json') # build the dataloader dataset = build_dataset(cfg.data.test) data_loader = build_dataloader(dataset, samples_per_gpu=samples_per_gpu, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) # build the model and load checkpoint cfg.model.train_cfg = None model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg')) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') if args.fuse_conv_bn: model = fuse_conv_bn(model) # old versions did not save class info in checkpoints, this walkaround is # for backward compatibility if 'CLASSES' in checkpoint.get('meta', {}): model.CLASSES = checkpoint['meta']['CLASSES'] else: model.CLASSES = dataset.CLASSES if not distributed: model = MMDataParallel(model, device_ids=cfg.gpu_ids) outputs = single_gpu_test(model, data_loader, args.show, args.show_dir, args.show_score_thr) else: model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False) outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect) rank, _ = get_dist_info() if rank == 0: if args.out: print(f'\nwriting results to {args.out}') mmcv.dump(outputs, args.out) kwargs = {} if args.eval_options is None else args.eval_options if args.format_only: dataset.format_results(outputs, **kwargs) if args.eval: eval_kwargs = cfg.get('evaluation', {}).copy() # hard-code way to remove EvalHook args for key in [ 'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', 'rule', 'dynamic_intervals' ]: eval_kwargs.pop(key, None) eval_kwargs.update(dict(metric=args.eval, **kwargs)) metric = dataset.evaluate(outputs, **eval_kwargs) print(metric) metric_dict = dict(config=args.config, metric=metric) if args.work_dir is not None and rank == 0: mmcv.dump(metric_dict, json_file) if __name__ == '__main__': main()
ViT-Adapter-main
detection/test.py
# Copyright (c) OpenMMLab. All rights reserved. import argparse import copy import os import os.path as osp import time import warnings import mmcv import mmcv_custom # noqa: F401,F403 import mmdet_custom # noqa: F401,F403 import torch from mmcv import Config, DictAction from mmcv.runner import get_dist_info, init_dist from mmcv.utils import get_git_hash from mmdet import __version__ from mmdet.apis import init_random_seed, set_random_seed, train_detector from mmdet.datasets import build_dataset from mmdet.models import build_detector from mmdet.utils import collect_env, get_root_logger def parse_args(): parser = argparse.ArgumentParser(description='Train a detector') parser.add_argument('config', help='train config file path') parser.add_argument('--work-dir', help='the dir to save logs and models') parser.add_argument('--resume-from', help='the checkpoint file to resume from') parser.add_argument('--auto-resume', action='store_true', help='resume from the latest checkpoint automatically') parser.add_argument( '--no-validate', action='store_true', help='whether not to evaluate the checkpoint during training') group_gpus = parser.add_mutually_exclusive_group() group_gpus.add_argument('--gpus', type=int, help='number of gpus to use ' '(only applicable to non-distributed training)') group_gpus.add_argument('--gpu-ids', type=int, nargs='+', help='ids of gpus to use ' '(only applicable to non-distributed training)') parser.add_argument('--seed', type=int, default=None, help='random seed') parser.add_argument( '--deterministic', action='store_true', help='whether to set deterministic options for CUDNN backend.') parser.add_argument( '--options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair ' 'in xxx=yyy format will be merged into config file (deprecate), ' 'change to --cfg-options instead.') parser.add_argument( '--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair ' 'in xxx=yyy format will be merged into config file. If the value to ' 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' 'Note that the quotation marks are necessary and that no white space ' 'is allowed.') parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher') parser.add_argument('--local_rank', type=int, default=0) args = parser.parse_args() if 'LOCAL_RANK' not in os.environ: os.environ['LOCAL_RANK'] = str(args.local_rank) if args.options and args.cfg_options: raise ValueError( '--options and --cfg-options cannot be both ' 'specified, --options is deprecated in favor of --cfg-options') if args.options: warnings.warn('--options is deprecated in favor of --cfg-options') args.cfg_options = args.options return args def main(): args = parse_args() cfg = Config.fromfile(args.config) if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True # work_dir is determined in this priority: CLI > segment in file > filename if args.work_dir is not None: # update configs according to CLI args if args.work_dir is not None cfg.work_dir = args.work_dir elif cfg.get('work_dir', None) is None: # use config filename as default work_dir if cfg.work_dir is None cfg.work_dir = osp.join('./work_dirs', osp.splitext(osp.basename(args.config))[0]) if args.resume_from is not None: cfg.resume_from = args.resume_from cfg.auto_resume = args.auto_resume if args.gpu_ids is not None: cfg.gpu_ids = args.gpu_ids else: cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus) # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False if len(cfg.gpu_ids) > 1: warnings.warn( f'We treat {cfg.gpu_ids} as gpu-ids, and reset to ' f'{cfg.gpu_ids[0:1]} as gpu-ids to avoid potential error in ' 'non-distribute training time.') cfg.gpu_ids = cfg.gpu_ids[0:1] else: distributed = True init_dist(args.launcher, **cfg.dist_params) # re-set gpu_ids with distributed training mode _, world_size = get_dist_info() cfg.gpu_ids = range(world_size) cfg.device = 'cuda' # fix 'ConfigDict' object has no attribute 'device' # create work_dir mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) # dump config cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config))) # init the logger before other steps timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) log_file = osp.join(cfg.work_dir, f'{timestamp}.log') logger = get_root_logger(log_file=log_file, log_level=cfg.log_level) # init the meta dict to record some important information such as # environment info and seed, which will be logged meta = dict() # log env info env_info_dict = collect_env() env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()]) dash_line = '-' * 60 + '\n' logger.info('Environment info:\n' + dash_line + env_info + '\n' + dash_line) meta['env_info'] = env_info meta['config'] = cfg.pretty_text # log some basic info logger.info(f'Distributed training: {distributed}') logger.info(f'Config:\n{cfg.pretty_text}') # set random seeds seed = init_random_seed(args.seed) logger.info(f'Set random seed to {seed}, ' f'deterministic: {args.deterministic}') set_random_seed(seed, deterministic=args.deterministic) cfg.seed = seed meta['seed'] = seed meta['exp_name'] = osp.basename(args.config) model = build_detector(cfg.model, train_cfg=cfg.get('train_cfg'), test_cfg=cfg.get('test_cfg')) model.init_weights() datasets = [build_dataset(cfg.data.train)] if len(cfg.workflow) == 2: val_dataset = copy.deepcopy(cfg.data.val) val_dataset.pipeline = cfg.data.train.pipeline datasets.append(build_dataset(val_dataset)) if cfg.checkpoint_config is not None: # save mmdet version, config file content and class names in # checkpoints as meta data cfg.checkpoint_config.meta = dict(mmdet_version=__version__ + get_git_hash()[:7], CLASSES=datasets[0].CLASSES) # add an attribute for visualization convenience model.CLASSES = datasets[0].CLASSES train_detector(model, datasets, cfg, distributed=distributed, validate=(not args.no_validate), timestamp=timestamp, meta=meta) if __name__ == '__main__': main()
ViT-Adapter-main
detection/train.py
# Copyright (c) OpenMMLab. All rights reserved. import argparse import cv2 import mmcv from mmdet.apis import inference_detector, init_detector import mmcv_custom # noqa: F401,F403 import mmdet_custom # noqa: F401,F403 def parse_args(): parser = argparse.ArgumentParser(description='MMDetection video demo') parser.add_argument('video', help='Video file') parser.add_argument('config', help='Config file') parser.add_argument('checkpoint', help='Checkpoint file') parser.add_argument( '--device', default='cuda:0', help='Device used for inference') parser.add_argument( '--score-thr', type=float, default=0.3, help='Bbox score threshold') parser.add_argument('--out', type=str, help='Output video file') parser.add_argument('--show', action='store_true', help='Show video') parser.add_argument( '--wait-time', type=float, default=1, help='The interval of show (s), 0 is block') args = parser.parse_args() return args def main(): args = parse_args() assert args.out or args.show, \ ('Please specify at least one operation (save/show the ' 'video) with the argument "--out" or "--show"') model = init_detector(args.config, args.checkpoint, device=args.device) video_reader = mmcv.VideoReader(args.video) video_writer = None if args.out: fourcc = cv2.VideoWriter_fourcc(*'mp4v') video_writer = cv2.VideoWriter( args.out, fourcc, video_reader.fps, (video_reader.width, video_reader.height)) for frame in mmcv.track_iter_progress(video_reader): result = inference_detector(model, frame) frame = model.show_result(frame, result, score_thr=args.score_thr) if args.show: cv2.namedWindow('video', 0) mmcv.imshow(frame, 'video', args.wait_time) if args.out: video_writer.write(frame) if video_writer: video_writer.release() cv2.destroyAllWindows() if __name__ == '__main__': main()
ViT-Adapter-main
detection/video_demo.py
import torch import argparse import torch.nn.functional as F parser = argparse.ArgumentParser(description='Hyperparams') parser.add_argument('filename', nargs='?', type=str, default=None) args = parser.parse_args() model = torch.load(args.filename, map_location=torch.device('cpu')) # resize patch embedding from 14x14 to 16x16 patch_embed = model['patch_embed.proj.weight'] patch_embed = F.interpolate(patch_embed, size=(16, 16), mode='bilinear', align_corners=False) model['patch_embed.proj.weight'] = patch_embed # rename parameters of layer scale new_model = {} for k, v in model.items(): if "mask_token" in k: continue new_k = k.replace("ls1.gamma", 'gamma1') new_k = new_k.replace("ls2.gamma", 'gamma2') new_model[new_k] = v torch.save(new_model, args.filename.replace(".pth", "_14to16.pth"))
ViT-Adapter-main
detection/convert_14to16.py
# Copyright (c) Shanghai AI Lab. All rights reserved. from .models import * # noqa: F401,F403
ViT-Adapter-main
detection/mmdet_custom/__init__.py
# Copyright (c) Shanghai AI Lab. All rights reserved. from .backbones import * # noqa: F401,F403 from .necks import * # noqa: F401,F403 from .detectors import * # noqa: F401,F403
ViT-Adapter-main
detection/mmdet_custom/models/__init__.py
# Copyright (c) Shanghai AI Lab. All rights reserved. import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.runner import BaseModule from mmdet.models.builder import NECKS @NECKS.register_module() class ChannelMapperWithPooling(BaseModule): r"""Channel Mapper to reduce/increase channels of backbone features. This is used to reduce/increase channels of backbone features. Args: in_channels (List[int]): Number of input channels per scale. out_channels (int): Number of output channels (used at each scale). kernel_size (int, optional): kernel_size for reducing channels (used at each scale). Default: 3. conv_cfg (dict, optional): Config dict for convolution layer. Default: None. norm_cfg (dict, optional): Config dict for normalization layer. Default: None. act_cfg (dict, optional): Config dict for activation layer in ConvModule. Default: dict(type='ReLU'). num_outs (int, optional): Number of output feature maps. There would be extra_convs when num_outs larger than the length of in_channels. init_cfg (dict or list[dict], optional): Initialization config dict. Example: >>> import torch >>> in_channels = [2, 3, 5, 7] >>> scales = [340, 170, 84, 43] >>> inputs = [torch.rand(1, c, s, s) ... for c, s in zip(in_channels, scales)] >>> self = ChannelMapper(in_channels, 11, 3).eval() >>> outputs = self.forward(inputs) >>> for i in range(len(outputs)): ... print(f'outputs[{i}].shape = {outputs[i].shape}') outputs[0].shape = torch.Size([1, 11, 340, 340]) outputs[1].shape = torch.Size([1, 11, 170, 170]) outputs[2].shape = torch.Size([1, 11, 84, 84]) outputs[3].shape = torch.Size([1, 11, 43, 43]) """ def __init__(self, in_channels, out_channels, kernel_size=1, conv_cfg=None, norm_cfg=None, act_cfg=dict(type='ReLU'), num_outs=None, init_cfg=dict(type='Xavier', layer='Conv2d', distribution='uniform')): super(ChannelMapperWithPooling, self).__init__(init_cfg) assert isinstance(in_channels, list) self.extra_convs = None if num_outs is None: num_outs = len(in_channels) self.convs = nn.ModuleList() for in_channel in in_channels: self.convs.append( ConvModule(in_channel, out_channels, kernel_size, padding=(kernel_size - 1) // 2, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)) if num_outs > len(in_channels): self.extra_convs = nn.ModuleList() for i in range(len(in_channels), num_outs): self.extra_convs.append(nn.MaxPool2d(kernel_size=2, stride=2)) def forward(self, inputs): """Forward function.""" assert len(inputs) == len(self.convs) outs = [self.convs[i](inputs[i]) for i in range(len(inputs))] if self.extra_convs: for i in range(len(self.extra_convs)): outs.append(self.extra_convs[i](outs[-1])) return tuple(outs)
ViT-Adapter-main
detection/mmdet_custom/models/necks/channel_mapper.py
# Copyright (c) Shanghai AI Lab. All rights reserved. from .channel_mapper import ChannelMapperWithPooling from .extra_attention import ExtraAttention __all__ = ['ExtraAttention', 'ChannelMapperWithPooling']
ViT-Adapter-main
detection/mmdet_custom/models/necks/__init__.py
import torch.nn as nn from mmcv.runner import BaseModule, auto_fp16 from mmdet.models.builder import NECKS from timm.models.layers import trunc_normal_, DropPath import math import torch import torch.utils.checkpoint as cp class Mlp(nn.Module): """ MLP as used in Vision Transformer, MLP-Mixer and related networks """ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x class Attention(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple) attn = (q @ k.transpose(-2, -1)) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x @NECKS.register_module() class ExtraAttention(BaseModule): def __init__(self, in_channels, num_head, with_ffn=True, ffn_ratio=4.0, norm_layer=nn.LayerNorm, drop_path=0., init_values=None, with_cp=False, use_final_norm=True): super(ExtraAttention, self).__init__() assert isinstance(in_channels, list) self.in_channels = in_channels self.norm1 = norm_layer(in_channels[-1]) self.attn = Attention(dim=in_channels[-1], num_heads=num_head, qkv_bias=True) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.use_final_norm = use_final_norm self.with_cp = with_cp if with_ffn: self.norm2 = norm_layer(in_channels[-1]) hidden_features = int(in_channels[-1] * ffn_ratio) self.ffn = Mlp(in_features=in_channels[-1], hidden_features=hidden_features) else: self.ffn = None if init_values is not None: self.gamma_1 = nn.Parameter(init_values * torch.ones((in_channels[-1])), requires_grad=True) self.gamma_2 = nn.Parameter(init_values * torch.ones((in_channels[-1])), requires_grad=True) else: self.gamma_1, self.gamma_2 = None, None if self.use_final_norm: self.final_norm = norm_layer(in_channels[-1]) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm) or isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) elif isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d): fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels fan_out //= m.groups m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) if m.bias is not None: m.bias.data.zero_() @auto_fp16() def forward(self, inputs): def _inner_forward(feat): b, c, h, w = feat.shape feat = feat.flatten(2).transpose(1, 2) # add layer scale if self.gamma_1 is not None: # self-attention feat = feat + self.gamma_1 * self.drop_path(self.attn(self.norm1(feat))) else: feat = feat + self.drop_path(self.attn(self.norm1(feat))) if self.ffn is not None: # ffn if self.gamma_2 is not None: feat = feat + self.gamma_2 * self.drop_path(self.ffn(self.norm2(feat))) else: feat = feat + self.drop_path(self.ffn(self.norm2(feat))) if self.use_final_norm: feat = self.final_norm(feat) feat = feat.transpose(1, 2).reshape(b, c, h, w).contiguous() return feat """Forward function.""" if isinstance(inputs, tuple): inputs = list(inputs) assert len(inputs) == len(self.in_channels) feat = inputs[-1] if self.with_cp and feat.requires_grad: feat = cp.checkpoint(_inner_forward, feat) else: feat = _inner_forward(feat) inputs[-1] = feat # replace original feature map if isinstance(inputs, list): inputs = tuple(inputs) return inputs
ViT-Adapter-main
detection/mmdet_custom/models/necks/extra_attention.py
# Copyright (c) Shanghai AI Lab. All rights reserved. from .beit_adapter import BEiTAdapter from .uniperceiver_adapter import UniPerceiverAdapter from .vit_adapter import ViTAdapter from .vit_baseline import ViTBaseline __all__ = ['UniPerceiverAdapter', 'ViTAdapter', 'ViTBaseline', 'BEiTAdapter']
ViT-Adapter-main
detection/mmdet_custom/models/backbones/__init__.py
# Copyright (c) Shanghai AI Lab. All rights reserved. import logging import math import torch import torch.nn as nn import torch.nn.functional as F from mmdet.models.builder import BACKBONES from ops.modules import MSDeformAttn from timm.models.layers import DropPath, trunc_normal_ from torch.nn.init import normal_ from .base.vit import TIMMVisionTransformer from .adapter_modules import SpatialPriorModule, InteractionBlock, deform_inputs _logger = logging.getLogger(__name__) @BACKBONES.register_module() class ViTAdapter(TIMMVisionTransformer): def __init__(self, pretrain_size=224, num_heads=12, conv_inplane=64, n_points=4, deform_num_heads=6, init_values=0., interaction_indexes=None, with_cffn=True, cffn_ratio=0.25, deform_ratio=1.0, add_vit_feature=True, use_extra_extractor=True, *args, **kwargs): super().__init__(num_heads=num_heads, *args, **kwargs) # self.num_classes = 80 self.cls_token = None self.num_block = len(self.blocks) self.pretrain_size = (pretrain_size, pretrain_size) self.interaction_indexes = interaction_indexes self.add_vit_feature = add_vit_feature embed_dim = self.embed_dim self.level_embed = nn.Parameter(torch.zeros(3, embed_dim)) self.spm = SpatialPriorModule(inplanes=conv_inplane, embed_dim=embed_dim) self.interactions = nn.Sequential(*[ InteractionBlock(dim=embed_dim, num_heads=deform_num_heads, n_points=n_points, init_values=init_values, drop_path=self.drop_path_rate, norm_layer=self.norm_layer, with_cffn=with_cffn, cffn_ratio=cffn_ratio, deform_ratio=deform_ratio, extra_extractor=((True if i == len(interaction_indexes) - 1 else False) and use_extra_extractor)) for i in range(len(interaction_indexes)) ]) self.up = nn.ConvTranspose2d(embed_dim, embed_dim, 2, 2) self.norm1 = nn.SyncBatchNorm(embed_dim) self.norm2 = nn.SyncBatchNorm(embed_dim) self.norm3 = nn.SyncBatchNorm(embed_dim) self.norm4 = nn.SyncBatchNorm(embed_dim) self.up.apply(self._init_weights) self.spm.apply(self._init_weights) self.interactions.apply(self._init_weights) self.apply(self._init_deform_weights) normal_(self.level_embed) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm) or isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) elif isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d): fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels fan_out //= m.groups m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) if m.bias is not None: m.bias.data.zero_() def _get_pos_embed(self, pos_embed, H, W): pos_embed = pos_embed.reshape( 1, self.pretrain_size[0] // 16, self.pretrain_size[1] // 16, -1).permute(0, 3, 1, 2) pos_embed = F.interpolate(pos_embed, size=(H, W), mode='bicubic', align_corners=False).\ reshape(1, -1, H * W).permute(0, 2, 1) return pos_embed def _init_deform_weights(self, m): if isinstance(m, MSDeformAttn): m._reset_parameters() def _add_level_embed(self, c2, c3, c4): c2 = c2 + self.level_embed[0] c3 = c3 + self.level_embed[1] c4 = c4 + self.level_embed[2] return c2, c3, c4 def forward(self, x): deform_inputs1, deform_inputs2 = deform_inputs(x) # SPM forward c1, c2, c3, c4 = self.spm(x) c2, c3, c4 = self._add_level_embed(c2, c3, c4) c = torch.cat([c2, c3, c4], dim=1) # Patch Embedding forward x, H, W = self.patch_embed(x) bs, n, dim = x.shape pos_embed = self._get_pos_embed(self.pos_embed[:, 1:], H, W) x = self.pos_drop(x + pos_embed) # Interaction for i, layer in enumerate(self.interactions): indexes = self.interaction_indexes[i] x, c = layer(x, c, self.blocks[indexes[0]:indexes[-1] + 1], deform_inputs1, deform_inputs2, H, W) # Split & Reshape c2 = c[:, 0:c2.size(1), :] c3 = c[:, c2.size(1):c2.size(1) + c3.size(1), :] c4 = c[:, c2.size(1) + c3.size(1):, :] c2 = c2.transpose(1, 2).view(bs, dim, H * 2, W * 2).contiguous() c3 = c3.transpose(1, 2).view(bs, dim, H, W).contiguous() c4 = c4.transpose(1, 2).view(bs, dim, H // 2, W // 2).contiguous() c1 = self.up(c2) + c1 if self.add_vit_feature: x3 = x.transpose(1, 2).view(bs, dim, H, W).contiguous() x1 = F.interpolate(x3, scale_factor=4, mode='bilinear', align_corners=False) x2 = F.interpolate(x3, scale_factor=2, mode='bilinear', align_corners=False) x4 = F.interpolate(x3, scale_factor=0.5, mode='bilinear', align_corners=False) c1, c2, c3, c4 = c1 + x1, c2 + x2, c3 + x3, c4 + x4 # Final Norm f1 = self.norm1(c1) f2 = self.norm2(c2) f3 = self.norm3(c3) f4 = self.norm4(c4) return [f1, f2, f3, f4]
ViT-Adapter-main
detection/mmdet_custom/models/backbones/vit_adapter.py
# Copyright (c) Shanghai AI Lab. All rights reserved. import logging import math import torch import torch.nn as nn import torch.nn.functional as F from mmdet.models.builder import BACKBONES from ops.modules import MSDeformAttn from timm.models.layers import trunc_normal_ from torch.nn.init import normal_ from .base.beit import BEiT from .adapter_modules import SpatialPriorModule, InteractionBlock, deform_inputs _logger = logging.getLogger(__name__) @BACKBONES.register_module() class BEiTAdapter(BEiT): def __init__(self, pretrain_size=224, conv_inplane=64, n_points=4, deform_num_heads=6, init_values=0., cffn_ratio=0.25, deform_ratio=1.0, with_cffn=True, interaction_indexes=None, add_vit_feature=True, version='new', with_cp=False, *args, **kwargs): super().__init__(init_values=init_values, with_cp=with_cp, *args, **kwargs) # self.num_classes = 80 # self.cls_token = None self.version = version self.num_block = len(self.blocks) self.pretrain_size = (pretrain_size, pretrain_size) self.interaction_indexes = interaction_indexes self.add_vit_feature = add_vit_feature embed_dim = self.embed_dim self.level_embed = nn.Parameter(torch.zeros(3, embed_dim)) self.spm = SpatialPriorModule(inplanes=conv_inplane, embed_dim=embed_dim) self.interactions = nn.Sequential(*[ InteractionBlock(dim=embed_dim, num_heads=deform_num_heads, n_points=n_points, init_values=init_values, drop_path=self.drop_path_rate, norm_layer=self.norm_layer, with_cffn=with_cffn, cffn_ratio=cffn_ratio, deform_ratio=deform_ratio, extra_extractor=True if i == len(interaction_indexes) - 1 else False, with_cp=with_cp) for i in range(len(interaction_indexes)) ]) self.up = nn.ConvTranspose2d(embed_dim, embed_dim, 2, 2) self.norm1 = nn.SyncBatchNorm(embed_dim) self.norm2 = nn.SyncBatchNorm(embed_dim) self.norm3 = nn.SyncBatchNorm(embed_dim) self.norm4 = nn.SyncBatchNorm(embed_dim) self.up.apply(self._init_weights) self.spm.apply(self._init_weights) self.interactions.apply(self._init_weights) self.apply(self._init_deform_weights) normal_(self.level_embed) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm) or isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) elif isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d): fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels fan_out //= m.groups m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) if m.bias is not None: m.bias.data.zero_() def _get_pos_embed(self, pos_embed, H, W): pos_embed = pos_embed.reshape( 1, self.pretrain_size[0] // 16, self.pretrain_size[1] // 16, -1).permute(0, 3, 1, 2) pos_embed = F.interpolate(pos_embed, size=(H, W), mode='bicubic', align_corners=False). \ reshape(1, -1, H * W).permute(0, 2, 1) return pos_embed def _init_deform_weights(self, m): if isinstance(m, MSDeformAttn): m._reset_parameters() def _add_level_embed(self, c2, c3, c4): c2 = c2 + self.level_embed[0] c3 = c3 + self.level_embed[1] c4 = c4 + self.level_embed[2] return c2, c3, c4 def forward(self, x): deform_inputs1, deform_inputs2 = deform_inputs(x) # SPM forward c1, c2, c3, c4 = self.spm(x) c2, c3, c4 = self._add_level_embed(c2, c3, c4) c = torch.cat([c2, c3, c4], dim=1) # Patch Embedding forward x, H, W = self.patch_embed(x) bs, n, dim = x.shape if self.pos_embed is not None: pos_embed = self._get_pos_embed(self.pos_embed, H, W) x = x + pos_embed x = self.pos_drop(x) # Interaction outs = list() for i, layer in enumerate(self.interactions): indexes = self.interaction_indexes[i] x, c = layer(x, c, self.blocks[indexes[0]:indexes[-1] + 1], deform_inputs1, deform_inputs2, H, W) if self.version == 'old': outs.append(x.transpose(1, 2).view(bs, dim, H, W).contiguous()) # Split & Reshape c2 = c[:, 0:c2.size(1), :] c3 = c[:, c2.size(1):c2.size(1) + c3.size(1), :] c4 = c[:, c2.size(1) + c3.size(1):, :] c2 = c2.transpose(1, 2).view(bs, dim, H * 2, W * 2).contiguous() c3 = c3.transpose(1, 2).view(bs, dim, H, W).contiguous() c4 = c4.transpose(1, 2).view(bs, dim, H // 2, W // 2).contiguous() c1 = self.up(c2) + c1 if self.add_vit_feature: if self.version == 'old': x1, x2, x3, x4 = outs else: x = x.transpose(1, 2).view(bs, dim, H, W).contiguous() x1, x2, x3, x4 = x, x, x, x x1 = F.interpolate(x1, scale_factor=4, mode='bilinear', align_corners=False) x2 = F.interpolate(x2, scale_factor=2, mode='bilinear', align_corners=False) x4 = F.interpolate(x4, scale_factor=0.5, mode='bilinear', align_corners=False) c1, c2, c3, c4 = c1 + x1, c2 + x2, c3 + x3, c4 + x4 # Final Norm f1 = self.norm1(c1) f2 = self.norm2(c2) f3 = self.norm3(c3) f4 = self.norm4(c4) return [f1, f2, f3, f4]
ViT-Adapter-main
detection/mmdet_custom/models/backbones/beit_adapter.py
# Copyright (c) Shanghai AI Lab. All rights reserved. import logging import math import torch import torch.nn as nn import torch.nn.functional as F from mmdet.models.builder import BACKBONES from ops.modules import MSDeformAttn from timm.models.layers import DropPath, trunc_normal_ from torch.nn.init import normal_ from .base.uniperceiver import UnifiedBertEncoder from .adapter_modules import SpatialPriorModule, InteractionBlock, deform_inputs _logger = logging.getLogger(__name__) @BACKBONES.register_module() class UniPerceiverAdapter(UnifiedBertEncoder): def __init__(self, pretrain_size=224, num_heads=12, conv_inplane=64, n_points=4, deform_num_heads=6, init_values=0., with_cffn=True, cffn_ratio=0.25, deform_ratio=1.0, add_vit_feature=True, interaction_indexes=None, with_cp=False, *args, **kwargs): super().__init__(num_heads=num_heads, with_cp=with_cp, *args, **kwargs) self.num_classes = 80 self.cls_token = None self.num_block = len(self.layers) self.pretrain_size = (pretrain_size, pretrain_size) self.interaction_indexes = interaction_indexes self.add_vit_feature = add_vit_feature embed_dim = self.embed_dim self.level_embed = nn.Parameter(torch.zeros(3, embed_dim)) self.spm = SpatialPriorModule(inplanes=conv_inplane, embed_dim=embed_dim) self.interactions = nn.Sequential(*[ InteractionBlock(dim=embed_dim, num_heads=deform_num_heads, n_points=n_points, init_values=init_values, drop_path=self.drop_path_rate, norm_layer=self.norm_layer, with_cffn=with_cffn, cffn_ratio=cffn_ratio, deform_ratio=deform_ratio, extra_extractor=True if i == len(interaction_indexes) - 1 else False, with_cp=with_cp) for i in range(len(interaction_indexes)) ]) self.up = nn.ConvTranspose2d(embed_dim, embed_dim, 2, 2) self.norm1 = nn.SyncBatchNorm(embed_dim) self.norm2 = nn.SyncBatchNorm(embed_dim) self.norm3 = nn.SyncBatchNorm(embed_dim) self.norm4 = nn.SyncBatchNorm(embed_dim) self.up.apply(self._init_weights) self.spm.apply(self._init_weights) self.interactions.apply(self._init_weights) self.apply(self._init_deform_weights) normal_(self.level_embed) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm) or isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) elif isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d): fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels fan_out //= m.groups m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) if m.bias is not None: m.bias.data.zero_() def _init_deform_weights(self, m): if isinstance(m, MSDeformAttn): m._reset_parameters() def _add_level_embed(self, c2, c3, c4): c2 = c2 + self.level_embed[0] c3 = c3 + self.level_embed[1] c4 = c4 + self.level_embed[2] return c2, c3, c4 def forward(self, x): deform_inputs1, deform_inputs2 = deform_inputs(x) # SPM forward c1, c2, c3, c4 = self.spm(x) c2, c3, c4 = self._add_level_embed(c2, c3, c4) c = torch.cat([c2, c3, c4], dim=1) # Patch Embedding forward x, H, W = self.visual_embed(x) bs, n, dim = x.shape # Interaction for i, layer in enumerate(self.interactions): indexes = self.interaction_indexes[i] x, c = layer(x, c, self.layers[indexes[0]:indexes[-1] + 1], deform_inputs1, deform_inputs2, H, W) # Split & Reshape c2 = c[:, 0:c2.size(1), :] c3 = c[:, c2.size(1):c2.size(1) + c3.size(1), :] c4 = c[:, c2.size(1) + c3.size(1):, :] c2 = c2.transpose(1, 2).view(bs, dim, H * 2, W * 2).contiguous() c3 = c3.transpose(1, 2).view(bs, dim, H, W).contiguous() c4 = c4.transpose(1, 2).view(bs, dim, H // 2, W // 2).contiguous() c1 = self.up(c2) + c1 if self.add_vit_feature: x3 = x.transpose(1, 2).view(bs, dim, H, W).contiguous() x1 = F.interpolate(x3, scale_factor=4, mode='bilinear', align_corners=False) x2 = F.interpolate(x3, scale_factor=2, mode='bilinear', align_corners=False) x4 = F.interpolate(x3, scale_factor=0.5, mode='bilinear', align_corners=False) c1, c2, c3, c4 = c1 + x1, c2 + x2, c3 + x3, c4 + x4 # Final Norm f1 = self.norm1(c1) f2 = self.norm2(c2) f3 = self.norm3(c3) f4 = self.norm4(c4) return [f1, f2, f3, f4]
ViT-Adapter-main
detection/mmdet_custom/models/backbones/uniperceiver_adapter.py
import logging from functools import partial import torch import torch.nn as nn from ops.modules import MSDeformAttn from timm.models.layers import DropPath import torch.utils.checkpoint as cp _logger = logging.getLogger(__name__) def get_reference_points(spatial_shapes, device): reference_points_list = [] for lvl, (H_, W_) in enumerate(spatial_shapes): ref_y, ref_x = torch.meshgrid( torch.linspace(0.5, H_ - 0.5, H_, dtype=torch.float32, device=device), torch.linspace(0.5, W_ - 0.5, W_, dtype=torch.float32, device=device)) ref_y = ref_y.reshape(-1)[None] / H_ ref_x = ref_x.reshape(-1)[None] / W_ ref = torch.stack((ref_x, ref_y), -1) reference_points_list.append(ref) reference_points = torch.cat(reference_points_list, 1) reference_points = reference_points[:, :, None] return reference_points def deform_inputs(x): bs, c, h, w = x.shape spatial_shapes = torch.as_tensor([(h // 8, w // 8), (h // 16, w // 16), (h // 32, w // 32)], dtype=torch.long, device=x.device) level_start_index = torch.cat((spatial_shapes.new_zeros( (1,)), spatial_shapes.prod(1).cumsum(0)[:-1])) reference_points = get_reference_points([(h // 16, w // 16)], x.device) deform_inputs1 = [reference_points, spatial_shapes, level_start_index] spatial_shapes = torch.as_tensor([(h // 16, w // 16)], dtype=torch.long, device=x.device) level_start_index = torch.cat((spatial_shapes.new_zeros( (1,)), spatial_shapes.prod(1).cumsum(0)[:-1])) reference_points = get_reference_points([(h // 8, w // 8), (h // 16, w // 16), (h // 32, w // 32)], x.device) deform_inputs2 = [reference_points, spatial_shapes, level_start_index] return deform_inputs1, deform_inputs2 class ConvFFN(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.dwconv = DWConv(hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x, H, W): x = self.fc1(x) x = self.dwconv(x, H, W) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x class DWConv(nn.Module): def __init__(self, dim=768): super().__init__() self.dwconv = nn.Conv2d(dim, dim, 3, 1, 1, bias=True, groups=dim) def forward(self, x, H, W): B, N, C = x.shape n = N // 21 x1 = x[:, 0:16 * n, :].transpose(1, 2).view(B, C, H * 2, W * 2).contiguous() x2 = x[:, 16 * n:20 * n, :].transpose(1, 2).view(B, C, H, W).contiguous() x3 = x[:, 20 * n:, :].transpose(1, 2).view(B, C, H // 2, W // 2).contiguous() x1 = self.dwconv(x1).flatten(2).transpose(1, 2) x2 = self.dwconv(x2).flatten(2).transpose(1, 2) x3 = self.dwconv(x3).flatten(2).transpose(1, 2) x = torch.cat([x1, x2, x3], dim=1) return x class Extractor(nn.Module): def __init__(self, dim, num_heads=6, n_points=4, n_levels=1, deform_ratio=1.0, with_cffn=True, cffn_ratio=0.25, drop=0., drop_path=0., norm_layer=partial(nn.LayerNorm, eps=1e-6), with_cp=False): super().__init__() self.query_norm = norm_layer(dim) self.feat_norm = norm_layer(dim) self.attn = MSDeformAttn(d_model=dim, n_levels=n_levels, n_heads=num_heads, n_points=n_points, ratio=deform_ratio) self.with_cffn = with_cffn self.with_cp = with_cp if with_cffn: self.ffn = ConvFFN(in_features=dim, hidden_features=int(dim * cffn_ratio), drop=drop) self.ffn_norm = norm_layer(dim) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, query, reference_points, feat, spatial_shapes, level_start_index, H, W): def _inner_forward(query, feat): attn = self.attn(self.query_norm(query), reference_points, self.feat_norm(feat), spatial_shapes, level_start_index, None) query = query + attn if self.with_cffn: query = query + self.drop_path(self.ffn(self.ffn_norm(query), H, W)) return query if self.with_cp and query.requires_grad: query = cp.checkpoint(_inner_forward, query, feat) else: query = _inner_forward(query, feat) return query class Injector(nn.Module): def __init__(self, dim, num_heads=6, n_points=4, n_levels=1, deform_ratio=1.0, norm_layer=partial(nn.LayerNorm, eps=1e-6), init_values=0., with_cp=False): super().__init__() self.with_cp = with_cp self.query_norm = norm_layer(dim) self.feat_norm = norm_layer(dim) self.attn = MSDeformAttn(d_model=dim, n_levels=n_levels, n_heads=num_heads, n_points=n_points, ratio=deform_ratio) self.gamma = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True) def forward(self, query, reference_points, feat, spatial_shapes, level_start_index): def _inner_forward(query, feat): attn = self.attn(self.query_norm(query), reference_points, self.feat_norm(feat), spatial_shapes, level_start_index, None) return query + self.gamma * attn if self.with_cp and query.requires_grad: query = cp.checkpoint(_inner_forward, query, feat) else: query = _inner_forward(query, feat) return query class InteractionBlock(nn.Module): def __init__(self, dim, num_heads=6, n_points=4, norm_layer=partial(nn.LayerNorm, eps=1e-6), drop=0., drop_path=0., with_cffn=True, cffn_ratio=0.25, init_values=0., deform_ratio=1.0, extra_extractor=False, with_cp=False): super().__init__() self.injector = Injector(dim=dim, n_levels=3, num_heads=num_heads, init_values=init_values, n_points=n_points, norm_layer=norm_layer, deform_ratio=deform_ratio, with_cp=with_cp) self.extractor = Extractor(dim=dim, n_levels=1, num_heads=num_heads, n_points=n_points, norm_layer=norm_layer, deform_ratio=deform_ratio, with_cffn=with_cffn, cffn_ratio=cffn_ratio, drop=drop, drop_path=drop_path, with_cp=with_cp) if extra_extractor: self.extra_extractors = nn.Sequential(*[ Extractor(dim=dim, num_heads=num_heads, n_points=n_points, norm_layer=norm_layer, with_cffn=with_cffn, cffn_ratio=cffn_ratio, deform_ratio=deform_ratio, drop=drop, drop_path=drop_path, with_cp=with_cp) for _ in range(2) ]) else: self.extra_extractors = None def forward(self, x, c, blocks, deform_inputs1, deform_inputs2, H, W): x = self.injector(query=x, reference_points=deform_inputs1[0], feat=c, spatial_shapes=deform_inputs1[1], level_start_index=deform_inputs1[2]) for idx, blk in enumerate(blocks): x = blk(x, H, W) c = self.extractor(query=c, reference_points=deform_inputs2[0], feat=x, spatial_shapes=deform_inputs2[1], level_start_index=deform_inputs2[2], H=H, W=W) if self.extra_extractors is not None: for extractor in self.extra_extractors: c = extractor(query=c, reference_points=deform_inputs2[0], feat=x, spatial_shapes=deform_inputs2[1], level_start_index=deform_inputs2[2], H=H, W=W) return x, c class SpatialPriorModule(nn.Module): def __init__(self, inplanes=64, embed_dim=384): super().__init__() self.stem = nn.Sequential(*[ nn.Conv2d(3, inplanes, kernel_size=3, stride=2, padding=1, bias=False), nn.SyncBatchNorm(inplanes), nn.ReLU(inplace=True), nn.Conv2d(inplanes, inplanes, kernel_size=3, stride=1, padding=1, bias=False), nn.SyncBatchNorm(inplanes), nn.ReLU(inplace=True), nn.Conv2d(inplanes, inplanes, kernel_size=3, stride=1, padding=1, bias=False), nn.SyncBatchNorm(inplanes), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2, padding=1) ]) self.conv2 = nn.Sequential(*[ nn.Conv2d(inplanes, 2 * inplanes, kernel_size=3, stride=2, padding=1, bias=False), nn.SyncBatchNorm(2 * inplanes), nn.ReLU(inplace=True) ]) self.conv3 = nn.Sequential(*[ nn.Conv2d(2 * inplanes, 4 * inplanes, kernel_size=3, stride=2, padding=1, bias=False), nn.SyncBatchNorm(4 * inplanes), nn.ReLU(inplace=True) ]) self.conv4 = nn.Sequential(*[ nn.Conv2d(4 * inplanes, 4 * inplanes, kernel_size=3, stride=2, padding=1, bias=False), nn.SyncBatchNorm(4 * inplanes), nn.ReLU(inplace=True) ]) self.fc1 = nn.Conv2d(inplanes, embed_dim, kernel_size=1, stride=1, padding=0, bias=True) self.fc2 = nn.Conv2d(2 * inplanes, embed_dim, kernel_size=1, stride=1, padding=0, bias=True) self.fc3 = nn.Conv2d(4 * inplanes, embed_dim, kernel_size=1, stride=1, padding=0, bias=True) self.fc4 = nn.Conv2d(4 * inplanes, embed_dim, kernel_size=1, stride=1, padding=0, bias=True) def forward(self, x): c1 = self.stem(x) c2 = self.conv2(c1) c3 = self.conv3(c2) c4 = self.conv4(c3) c1 = self.fc1(c1) c2 = self.fc2(c2) c3 = self.fc3(c3) c4 = self.fc4(c4) bs, dim, _, _ = c1.shape # c1 = c1.view(bs, dim, -1).transpose(1, 2) # 4s c2 = c2.view(bs, dim, -1).transpose(1, 2) # 8s c3 = c3.view(bs, dim, -1).transpose(1, 2) # 16s c4 = c4.view(bs, dim, -1).transpose(1, 2) # 32s return c1, c2, c3, c4
ViT-Adapter-main
detection/mmdet_custom/models/backbones/adapter_modules.py
# Copyright (c) Shanghai AI Lab. All rights reserved. import logging import math import torch.nn as nn import torch.nn.functional as F from mmdet.models.builder import BACKBONES from timm.models.layers import trunc_normal_ from .base.vit import TIMMVisionTransformer from .base.vit import ResBottleneckBlock _logger = logging.getLogger(__name__) @BACKBONES.register_module() class ViTBaseline(TIMMVisionTransformer): def __init__(self, pretrain_size=224, out_indices=None, *args, **kwargs): super().__init__(*args, **kwargs) self.cls_token = None self.num_block = len(self.blocks) self.pretrain_size = (pretrain_size, pretrain_size) self.out_indices = out_indices assert out_indices is not None embed_dim = self.embed_dim self.norm1 = self.norm_layer(embed_dim) self.norm2 = self.norm_layer(embed_dim) self.norm3 = self.norm_layer(embed_dim) self.norm4 = self.norm_layer(embed_dim) self.up1 = nn.Sequential(*[ nn.ConvTranspose2d(embed_dim, embed_dim, 2, 2), nn.GroupNorm(32, embed_dim), nn.GELU(), nn.ConvTranspose2d(embed_dim, embed_dim, 2, 2) ]) self.up2 = nn.ConvTranspose2d(embed_dim, embed_dim, 2, 2) self.up3 = nn.Identity() self.up4 = nn.MaxPool2d(kernel_size=2, stride=2) self.up1.apply(self._init_weights) self.up2.apply(self._init_weights) self.up3.apply(self._init_weights) self.up4.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm) or isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) elif isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d): fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels fan_out //= m.groups m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) if m.bias is not None: m.bias.data.zero_() elif isinstance(m, ResBottleneckBlock): m.norm3.weight.data.zero_() m.norm3.bias.data.zero_() def _get_pos_embed(self, pos_embed, H, W): pos_embed = pos_embed.reshape( 1, self.pretrain_size[0] // 16, self.pretrain_size[1] // 16, -1).permute(0, 3, 1, 2) pos_embed = F.interpolate(pos_embed, size=(H, W), mode='bicubic', align_corners=False).\ reshape(1, -1, H * W).permute(0, 2, 1) return pos_embed def forward_features(self, x): outs = [] x, H, W = self.patch_embed(x) pos_embed = self._get_pos_embed(self.pos_embed[:, 1:], H, W) x = self.pos_drop(x + pos_embed) for index, blk in enumerate(self.blocks): x = blk(x, H, W) if index in self.out_indices: outs.append(x) return outs, H, W def forward(self, x): outs, H, W = self.forward_features(x) if len(outs) == 1: # for ViTDet f1 = f2 = f3 = f4 = outs[0] else: # for ViT f1, f2, f3, f4 = outs bs, n, dim = f1.shape # Final Norm f1 = self.norm1(f1).transpose(1, 2).reshape(bs, dim, H, W) f2 = self.norm2(f2).transpose(1, 2).reshape(bs, dim, H, W) f3 = self.norm3(f3).transpose(1, 2).reshape(bs, dim, H, W) f4 = self.norm4(f4).transpose(1, 2).reshape(bs, dim, H, W) f1 = self.up1(f1).contiguous() f2 = self.up2(f2).contiguous() f3 = self.up3(f3).contiguous() f4 = self.up4(f4).contiguous() return [f1, f2, f3, f4]
ViT-Adapter-main
detection/mmdet_custom/models/backbones/vit_baseline.py
# -------------------------------------------------------- # BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254) # Github source: https://github.com/microsoft/unilm/tree/master/beit # Copyright (c) 2021 Microsoft # Licensed under The MIT License [see LICENSE for details] # By Hangbo Bao # Based on timm, mmseg, setr, xcit and swin code bases # https://github.com/rwightman/pytorch-image-models/tree/master/timm # https://github.com/fudan-zvg/SETR # https://github.com/facebookresearch/xcit/ # https://github.com/microsoft/Swin-Transformer # --------------------------------------------------------' import logging import math from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint as cp from mmcv_custom import load_checkpoint from mmdet.models.builder import BACKBONES from mmdet.utils import get_root_logger from timm.models.layers import drop_path, to_2tuple, trunc_normal_ def window_partition(x, window_size): """ Args: x: (B, H, W, C) window_size (int): window size Returns: windows: (num_windows*B, window_size, window_size, C) """ B, H, W, C = x.shape x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) return windows def window_reverse(windows, window_size, H, W): """ Args: windows: (num_windows*B, window_size, window_size, C) window_size (int): Window size H (int): Height of image W (int): Width of image Returns: x: (B, H, W, C) """ B = int(windows.shape[0] / (H * W / window_size / window_size)) x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) return x class DropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob=None): super(DropPath, self).__init__() self.drop_prob = drop_prob def forward(self, x): return drop_path(x, self.drop_prob, self.training) def extra_repr(self) -> str: return 'p={}'.format(self.drop_prob) class Mlp(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.act(x) # x = self.drop(x) # commit this for the original BERT implement x = self.fc2(x) x = self.drop(x) return x class Attention(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., window_size=None, attn_head_dim=None, windowed=False): super().__init__() self.num_heads = num_heads self.windowed = windowed head_dim = dim // num_heads if attn_head_dim is not None: head_dim = attn_head_dim all_head_dim = head_dim * self.num_heads # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights self.scale = qk_scale or head_dim**-0.5 self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False) if qkv_bias: self.q_bias = nn.Parameter(torch.zeros(all_head_dim)) self.v_bias = nn.Parameter(torch.zeros(all_head_dim)) else: self.q_bias = None self.v_bias = None self.window_size = window_size self.num_relative_distance = (2 * window_size - 1) * (2 * window_size - 1) self.relative_position_bias_table = nn.Parameter( torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH # cls to token & token 2 cls & cls to cls # get pair-wise relative position index for each token inside the window coords_h = torch.arange(window_size) coords_w = torch.arange(window_size) coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 relative_coords[:, :, 0] += window_size - 1 # shift to start from 0 relative_coords[:, :, 1] += window_size - 1 relative_coords[:, :, 0] *= 2 * window_size - 1 relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww self.register_buffer('relative_position_index', relative_position_index) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(all_head_dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x, H, W, rel_pos_bias=None): def _attn_forward(x): B, N, C = x.shape qkv_bias = None if self.q_bias is not None: qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias)) # qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias) qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) q = q * self.scale attn = (q @ k.transpose(-2, -1)) if self.relative_position_bias_table is not None: relative_position_bias = \ self.relative_position_bias_table[self.relative_position_index.view(-1)].view( self.window_size * self.window_size, self.window_size * self.window_size, -1) # Wh*Ww,Wh*Ww,nH relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww # relative_position_bias = relative_position_bias[:, 1:, 1:] attn = attn + relative_position_bias.unsqueeze(0) if rel_pos_bias is not None: attn = attn + rel_pos_bias attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, -1) x = self.proj(x) x = self.proj_drop(x) return x if not self.windowed: return _attn_forward(x) else: B, L, C = x.shape x = x.view(B, H, W, C) # N_ = self.window_size * self.window_size H_ = math.ceil(H / self.window_size) * self.window_size W_ = math.ceil(W / self.window_size) * self.window_size x = F.pad(x, [0, 0, 0, W_ - W, 0, H_ - H]) # partition windows x = window_partition( x, self.window_size) # nW*B, window_size, window_size, C x = x.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C x = _attn_forward(x) # merge windows x = x.view(-1, self.window_size, self.window_size, C) x = window_reverse(x, self.window_size, H_, W_) # B H' W' C x = x[:, :H, :W, :].reshape(B, H * W, C) return x class Block(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., drop_path=0., init_values=None, act_layer=nn.GELU, norm_layer=nn.LayerNorm, window_size=None, windowed=False, attn_head_dim=None, with_cp=False): super().__init__() self.with_cp = with_cp self.norm1 = norm_layer(dim) self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, window_size=window_size, attn_head_dim=attn_head_dim, windowed=windowed) # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) if init_values is not None: self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True) self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True) else: self.gamma_1, self.gamma_2 = None, None def forward(self, x, H, W, rel_pos_bias=None): def _inner_forward(x): if self.gamma_1 is None: x = x + self.drop_path( self.attn(self.norm1(x), H, W, rel_pos_bias=rel_pos_bias)) x = x + self.drop_path(self.mlp(self.norm2(x))) else: x = x + self.drop_path(self.gamma_1 * self.attn( self.norm1(x), H, W, rel_pos_bias=rel_pos_bias)) x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x))) return x if self.with_cp and x.requires_grad: x = cp.checkpoint(_inner_forward, x) else: x = _inner_forward(x) return x class PatchEmbed(nn.Module): """Image to Patch Embedding.""" def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): super().__init__() img_size = to_2tuple(img_size) patch_size = to_2tuple(patch_size) num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) self.img_size = img_size self.patch_size = patch_size self.num_patches = num_patches self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) def forward(self, x, **kwargs): B, C, H, W = x.shape # FIXME look at relaxing size constraints # assert H == self.img_size[0] and W == self.img_size[1], \ # f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." x = self.proj(x) Hp, Wp = x.shape[2], x.shape[3] x = x.flatten(2).transpose(1, 2) return x, Hp, Wp class HybridEmbed(nn.Module): """CNN Feature Map Embedding Extract feature map from CNN, flatten, project to embedding dim.""" def __init__(self, backbone, img_size=224, feature_size=None, in_chans=3, embed_dim=768): super().__init__() assert isinstance(backbone, nn.Module) img_size = to_2tuple(img_size) self.img_size = img_size self.backbone = backbone if feature_size is None: with torch.no_grad(): # FIXME this is hacky, but most reliable way of determining the exact dim of the output feature # map for all networks, the feature metadata has reliable channel and stride info, but using # stride to calc feature dim requires info about padding of each stage that isn't captured. training = backbone.training if training: backbone.eval() o = self.backbone(torch.zeros(1, in_chans, img_size[0], img_size[1]))[-1] feature_size = o.shape[-2:] feature_dim = o.shape[1] backbone.train(training) else: feature_size = to_2tuple(feature_size) feature_dim = self.backbone.feature_info.channels()[-1] self.num_patches = feature_size[0] * feature_size[1] self.proj = nn.Linear(feature_dim, embed_dim) def forward(self, x): x = self.backbone(x)[-1] x = x.flatten(2).transpose(1, 2) x = self.proj(x) return x class RelativePositionBias(nn.Module): def __init__(self, window_size, num_heads): super().__init__() self.window_size = to_2tuple(window_size) self.num_relative_distance = (2 * window_size - 1) * (2 * window_size - 1) self.relative_position_bias_table = nn.Parameter( torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH # cls to token & token 2 cls & cls to cls # get pair-wise relative position index for each token inside the window coords_h = torch.arange(window_size) coords_w = torch.arange(window_size) coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww relative_coords = coords_flatten[:, :, None] - \ coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 relative_coords[:, :, 0] += window_size - 1 # shift to start from 0 relative_coords[:, :, 1] += window_size - 1 relative_coords[:, :, 0] *= 2 * window_size - 1 relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww self.register_buffer('relative_position_index', relative_position_index) # trunc_normal_(self.relative_position_bias_table, std=.02) def forward(self): relative_position_bias = \ self.relative_position_bias_table[self.relative_position_index.view(-1)].view( self.window_size * self.window_size, self.window_size * self.window_size, -1) # Wh*Ww,Wh*Ww,nH return relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww @BACKBONES.register_module() class BEiT(nn.Module): """Vision Transformer with support for patch or hybrid CNN input stage.""" def __init__(self, img_size=512, patch_size=16, in_chans=3, num_classes=80, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., hybrid_backbone=None, norm_layer=None, init_values=None, use_checkpoint=False, use_abs_pos_emb=False, use_rel_pos_bias=True, use_shared_rel_pos_bias=False, pretrained=None, with_cp=False, window_attn=False, window_size=14, ): super().__init__() norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) self.norm_layer = norm_layer # self.num_classes = num_classes self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models self.drop_path_rate = drop_path_rate window_attn = [window_attn] * depth if not isinstance(window_attn, list) else window_attn window_size = [window_size] * depth if not isinstance(window_size, list) else window_size logging.info('window attention:', window_attn) logging.info('window size:', window_size) if hybrid_backbone is not None: self.patch_embed = HybridEmbed(hybrid_backbone, img_size=img_size, in_chans=in_chans, embed_dim=embed_dim) else: self.patch_embed = PatchEmbed(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) num_patches = self.patch_embed.num_patches # self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) # self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if use_abs_pos_emb: self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim)) else: self.pos_embed = None self.pos_drop = nn.Dropout(p=drop_rate) if use_shared_rel_pos_bias: self.rel_pos_bias = RelativePositionBias(window_size=window_size[0], num_heads=num_heads) else: self.rel_pos_bias = None dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth) ] # stochastic depth decay rule self.use_rel_pos_bias = use_rel_pos_bias self.use_checkpoint = use_checkpoint self.blocks = nn.ModuleList([ Block(dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, with_cp=with_cp, init_values=init_values, windowed=window_attn[i], window_size=window_size[i]) for i in range(depth) ]) if self.pos_embed is not None: trunc_normal_(self.pos_embed, std=.02) # trunc_normal_(self.cls_token, std=.02) self.apply(self._init_weights) self.init_weights(pretrained) # self.fix_init_weight() def init_weights(self, pretrained=None): """Initialize the weights in backbone. Args: pretrained (str, optional): Path to pre-trained weights. Defaults to None. """ # pretrained = 'pretrained/beit_large_patch16_512_pt22k_ft22kto1k.pth' if isinstance(pretrained, str): logger = get_root_logger() load_checkpoint(self, pretrained, strict=False, logger=logger) def fix_init_weight(self): def rescale(param, layer_id): param.div_(math.sqrt(2.0 * layer_id)) for layer_id, layer in enumerate(self.blocks): rescale(layer.attn.proj.weight.data, layer_id + 1) rescale(layer.mlp.fc2.weight.data, layer_id + 1) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) def get_num_layers(self): return len(self.blocks)
ViT-Adapter-main
detection/mmdet_custom/models/backbones/base/beit.py
import logging import math import torch import torch.nn.functional as F import torch.utils.checkpoint as cp from mmcv.runner import load_checkpoint from mmdet.utils import get_root_logger from timm.models.layers import DropPath from torch import nn def window_partition(x, window_size): """ Args: x: (B, H, W, C) window_size (int): window size Returns: windows: (num_windows*B, window_size, window_size, C) """ B, H, W, C = x.shape x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) return windows def window_reverse(windows, window_size, H, W): """ Args: windows: (num_windows*B, window_size, window_size, C) window_size (int): Window size H (int): Height of image W (int): Width of image Returns: x: (B, H, W, C) """ B = int(windows.shape[0] / (H * W / window_size / window_size)) x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) return x class Attention(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim**-0.5 self.in_proj = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.out_proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x, H, W): B, N, C = x.shape qkv = self.in_proj(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple) attn = (q @ k.transpose(-2, -1)) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.out_proj(x) x = self.proj_drop(x) return x class WindowedAttention(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0., window_size=14): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** -0.5 self.in_proj = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.out_proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) self.window_size = window_size def forward(self, x, H, W): B, N, C = x.shape N_ = self.window_size * self.window_size H_ = math.ceil(H / self.window_size) * self.window_size W_ = math.ceil(W / self.window_size) * self.window_size x = x.view(B, H, W, C) x = F.pad(x, [0, 0, 0, W_ - W, 0, H_- H]) x = window_partition(x, window_size=self.window_size)# nW*B, window_size, window_size, C x = x.view(-1, N_, C) qkv = self.in_proj(x).view(-1, N_, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple) attn = (q @ k.transpose(-2, -1)) * self.scale # [B, L, num_head, N_, N_] attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) # [B, L, num_head, N_, N_] x = (attn @ v).transpose(1, 2).reshape(-1, self.window_size, self.window_size, C) x = window_reverse(x, self.window_size, H_, W_) x = x[:, :H, :W, :].reshape(B, N, C).contiguous() x = self.out_proj(x) x = self.proj_drop(x) return x class BertLayer(nn.Module): def __init__(self, hidden_size=768, intermediate_size=3072, num_attention_heads=12, drop_path_ratio=0.1, windowed=False, window_size=14, with_cp=False): super(BertLayer, self).__init__() self.with_cp = with_cp if windowed: self.self_attn = WindowedAttention(hidden_size, num_attention_heads, qkv_bias=True, attn_drop=0., proj_drop=0., window_size=window_size) else: self.self_attn = Attention(hidden_size, num_attention_heads, qkv_bias=True, attn_drop=0., proj_drop=0.) # self.intermediate = BertIntermediate(hidden_size, intermediate_size) self.linear1 = nn.Linear(hidden_size, intermediate_size) self.act_fn = nn.GELU() self.linear2 = nn.Linear(intermediate_size, hidden_size) self.drop_path = DropPath(drop_path_ratio) if drop_path_ratio > 0. else nn.Identity() self.norm1 = nn.LayerNorm(hidden_size) self.norm2 = nn.LayerNorm(hidden_size) self.gamma_1 = nn.Parameter(torch.zeros((hidden_size)), requires_grad=True) self.gamma_2 = nn.Parameter(torch.zeros((hidden_size)), requires_grad=True) def ffn_forward(self, x): x = self.linear1(x) x = self.act_fn(x) x = self.linear2(x) return x def forward(self, x, H, W): def _inner_forward(x): x = x + self.gamma_1 * self.drop_path(self.self_attn(self.norm1(x), H, W)) x = x + self.gamma_2 * self.drop_path(self.ffn_forward(self.norm2(x))) return x if self.with_cp and x.requires_grad: x = cp.checkpoint(_inner_forward, x) else: x = _inner_forward(x) return x class VisualPatchEmbedding(nn.Module): def __init__(self, in_dim=3, out_dim=768, patch_size=16, image_size=224, dropout=0.): super(VisualPatchEmbedding, self).__init__() self.embeddings_act = None self.embeddings_norm = nn.LayerNorm(out_dim) # self.embeddings_type = nn.Embedding(1, 768) self.embeddings_dropout = nn.Dropout(dropout) self.patch_embed = PatchEmbed( img_size=(image_size, image_size), patch_size=(patch_size, patch_size), in_chans=in_dim, embed_dim=out_dim, ) def forward(self, x): embeddings, H, W = self.patch_embed(x) # data_type = torch.zeros(1).long().cuda() # embeddings_type = self.embeddings_type(data_type).unsqueeze(1) # embeddings = embeddings + embeddings_type # embeddings = embeddings + self.embeddings_type.weight[0].unsqueeze(0).unsqueeze(1).to(embeddings.dtype) if self.embeddings_act is not None: embeddings = self.embeddings_act(embeddings) if self.embeddings_norm is not None: embeddings = self.embeddings_norm(embeddings) if self.embeddings_dropout is not None: embeddings = self.embeddings_dropout(embeddings) return embeddings, H, W class PatchEmbed(torch.nn.Module): """Image to Patch Embedding.""" def __init__(self, img_size=(224, 224), patch_size=(16, 16), in_chans=3, embed_dim=768): super().__init__() num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) self.img_size = img_size self.patch_size = patch_size self.num_patches = num_patches self.pretrain_size = img_size self.spatial_pos_embed = nn.Embedding(num_patches, embed_dim) self.temporal_pos_embed = nn.Embedding(8, embed_dim) self.proj = torch.nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) def _get_pos_embed(self, pos_embed, H, W): pos_embed = pos_embed.reshape( 1, self.pretrain_size[0] // 16, self.pretrain_size[1] // 16, -1).permute(0, 3, 1, 2) pos_embed = F.interpolate(pos_embed, size=(H, W), mode='bicubic', align_corners=False).\ reshape(1, -1, H * W).permute(0, 2, 1) return pos_embed def forward(self, x): B, C, H, W = x.shape x = self.proj(x).flatten(2).transpose(1, 2) # B, N, C temp_len = 1 pos_embed = self._get_pos_embed(self.spatial_pos_embed.weight.unsqueeze(0), H // 16, W // 16) temporal_pos_ids = torch.arange(temp_len, dtype=torch.long, device=x.device) temporal_pos_embed = self.temporal_pos_embed(temporal_pos_ids).unsqueeze(0) x = x + pos_embed + temporal_pos_embed return x, H // 16, W // 16 class UnifiedBertEncoder(nn.Module): def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., drop_path_rate=0., norm_layer=nn.LayerNorm, embed_layer=VisualPatchEmbedding, window_attn=False, window_size=14, with_cp=False, pretrained=None): super(UnifiedBertEncoder, self).__init__() self.embed_dim = embed_dim self.drop_path_rate = drop_path_rate self.norm_layer = norm_layer window_attn = [window_attn] * depth if not isinstance(window_attn, list) else window_attn window_size = [window_size] * depth if not isinstance(window_size, list) else window_size logging.info('window attention:', window_attn) logging.info('window size:', window_size) layers = [] dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule for i in range(depth): layers.append( BertLayer(hidden_size=embed_dim, intermediate_size=int(embed_dim * mlp_ratio), num_attention_heads=num_heads, drop_path_ratio=dpr[i], windowed=window_attn[i], window_size=window_size[i], with_cp=with_cp) ) self.layers = nn.ModuleList(layers) self.visual_embed = embed_layer(in_dim=in_chans, out_dim=embed_dim, patch_size=patch_size, image_size=img_size) self.init_weights(pretrained) def init_weights(self, pretrained=None): if isinstance(pretrained, str): logger = get_root_logger() load_checkpoint(self, pretrained, map_location='cpu', strict=False, logger=logger) def forward(self, x): x, H, W = self.visual_embed(x) for layer in self.layers: x = layer(x, H, W) return x
ViT-Adapter-main
detection/mmdet_custom/models/backbones/base/uniperceiver.py
"""Vision Transformer (ViT) in PyTorch. A PyTorch implement of Vision Transformers as described in: 'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale' - https://arxiv.org/abs/2010.11929 `How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers` - https://arxiv.org/abs/2106.10270 The official jax code is released and available at https://github.com/google-research/vision_transformer DeiT model defs and weights from https://github.com/facebookresearch/deit, paper `DeiT: Data-efficient Image Transformers` - https://arxiv.org/abs/2012.12877 Acknowledgments: * The paper authors for releasing code and weights, thanks! * I fixed my class token impl based on Phil Wang's https://github.com/lucidrains/vit-pytorch ... check it out for some einops/einsum fun * Simple transformer style inspired by Andrej Karpathy's https://github.com/karpathy/minGPT * Bert reference code checks against Huggingface Transformers and Tensorflow Bert Hacked together by / Copyright 2021 Ross Wightman """ import logging import math from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint as cp from mmcv.runner import BaseModule from mmcv_custom import my_load_checkpoint as load_checkpoint from mmdet.utils import get_root_logger from timm.models.layers import DropPath, Mlp, to_2tuple class PatchEmbed(nn.Module): """2D Image to Patch Embedding.""" def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None, flatten=True, bias=True): super().__init__() img_size = to_2tuple(img_size) patch_size = to_2tuple(patch_size) self.img_size = img_size self.patch_size = patch_size self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) self.num_patches = self.grid_size[0] * self.grid_size[1] self.flatten = flatten self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size, bias=bias) self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() def forward(self, x): x = self.proj(x) _, _, H, W = x.shape if self.flatten: x = x.flatten(2).transpose(1, 2) # BCHW -> BNC x = self.norm(x) return x, H, W class Attention(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim**-0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x, H, W): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple) attn = (q @ k.transpose(-2, -1)) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x def window_partition(x, window_size): """ Args: x: (B, H, W, C) window_size (int): window size Returns: windows: (num_windows*B, window_size, window_size, C) """ B, H, W, C = x.shape x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) return windows def window_reverse(windows, window_size, H, W): """ Args: windows: (num_windows*B, window_size, window_size, C) window_size (int): Window size H (int): Height of image W (int): Width of image Returns: x: (B, H, W, C) """ B = int(windows.shape[0] / (H * W / window_size / window_size)) x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) return x class WindowedAttention(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0., window_size=14): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim**-0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) self.window_size = window_size def forward(self, x, H, W): B, N, C = x.shape N_ = self.window_size * self.window_size H_ = math.ceil(H / self.window_size) * self.window_size W_ = math.ceil(W / self.window_size) * self.window_size qkv = self.qkv(x) # [B, N, C] qkv = qkv.transpose(1, 2).reshape(B, C * 3, H, W) # [B, C, H, W] qkv = F.pad(qkv, [0, W_ - W, 0, H_ - H], mode='constant') qkv = F.unfold(qkv, kernel_size=(self.window_size, self.window_size), stride=(self.window_size, self.window_size)) B, C_kw_kw, L = qkv.shape # L - the num of windows qkv = qkv.reshape(B, C * 3, N_, L).permute(0, 3, 2, 1) # [B, L, N_, C] qkv = qkv.reshape(B, L, N_, 3, self.num_heads, C // self.num_heads).permute(3, 0, 1, 4, 2, 5) q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple) # q,k,v [B, L, num_head, N_, C/num_head] attn = (q @ k.transpose(-2, -1)) * self.scale # [B, L, num_head, N_, N_] attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) # [B, L, num_head, N_, N_] # attn @ v = [B, L, num_head, N_, C/num_head] x = (attn @ v).permute(0, 2, 4, 3, 1).reshape(B, C_kw_kw // 3, L) x = F.fold(x, output_size=(H_, W_), kernel_size=(self.window_size, self.window_size), stride=(self.window_size, self.window_size)) # [B, C, H_, W_] x = x[:, :, :H, :W].reshape(B, C, N).transpose(-1, -2) x = self.proj(x) x = self.proj_drop(x) return x # class WindowedAttention(nn.Module): # def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0., window_size=14, pad_mode="constant"): # super().__init__() # self.num_heads = num_heads # head_dim = dim // num_heads # self.scale = head_dim ** -0.5 # # self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) # self.attn_drop = nn.Dropout(attn_drop) # self.proj = nn.Linear(dim, dim) # self.proj_drop = nn.Dropout(proj_drop) # self.window_size = window_size # self.pad_mode = pad_mode # # def forward(self, x, H, W): # B, N, C = x.shape # # N_ = self.window_size * self.window_size # H_ = math.ceil(H / self.window_size) * self.window_size # W_ = math.ceil(W / self.window_size) * self.window_size # x = x.view(B, H, W, C) # x = F.pad(x, [0, 0, 0, W_ - W, 0, H_- H], mode=self.pad_mode) # # x = window_partition(x, window_size=self.window_size)# nW*B, window_size, window_size, C # x = x.view(-1, N_, C) # # qkv = self.qkv(x).view(-1, N_, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) # q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple) # attn = (q @ k.transpose(-2, -1)) * self.scale # [B, L, num_head, N_, N_] # attn = attn.softmax(dim=-1) # attn = self.attn_drop(attn) # [B, L, num_head, N_, N_] # x = (attn @ v).transpose(1, 2).reshape(-1, self.window_size, self.window_size, C) # # x = window_reverse(x, self.window_size, H_, W_) # x = x[:, :H, :W, :].reshape(B, N, C).contiguous() # x = self.proj(x) # x = self.proj_drop(x) # return x class LayerNorm(nn.Module): """ A LayerNorm variant, popularized by Transformers, that performs point-wise mean and variance normalization over the channel dimension for inputs that have shape (batch_size, channels, height, width). https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119 # noqa B950 """ def __init__(self, normalized_shape, eps=1e-6): super().__init__() self.weight = nn.Parameter(torch.ones(normalized_shape)) self.bias = nn.Parameter(torch.zeros(normalized_shape)) self.eps = eps self.normalized_shape = (normalized_shape,) def forward(self, x): u = x.mean(1, keepdim=True) s = (x - u).pow(2).mean(1, keepdim=True) x = (x - u) / torch.sqrt(s + self.eps) x = self.weight[:, None, None] * x + self.bias[:, None, None] return x class ResBottleneckBlock(nn.Module): """ The standard bottleneck residual block without the last activation layer. It contains 3 conv layers with kernels 1x1, 3x3, 1x1. """ def __init__( self, in_channels, out_channels, bottleneck_channels, norm=LayerNorm, act_layer=nn.GELU, ): """ Args: in_channels (int): Number of input channels. out_channels (int): Number of output channels. bottleneck_channels (int): number of output channels for the 3x3 "bottleneck" conv layers. norm (str or callable): normalization for all conv layers. See :func:`layers.get_norm` for supported format. act_layer (callable): activation for all conv layers. """ super().__init__() self.conv1 = nn.Conv2d(in_channels, bottleneck_channels, 1, bias=False) self.norm1 = norm(bottleneck_channels) self.act1 = act_layer() self.conv2 = nn.Conv2d(bottleneck_channels, bottleneck_channels, 3, padding=1, bias=False,) self.norm2 = norm(bottleneck_channels) self.act2 = act_layer() self.conv3 = nn.Conv2d(bottleneck_channels, out_channels, 1, bias=False) self.norm3 = norm(out_channels) for layer in [self.norm1, self.norm2]: layer.weight.data.fill_(1.0) layer.bias.data.zero_() # zero init last norm layer. self.norm3.weight.data.zero_() self.norm3.bias.data.zero_() def forward(self, x): out = x for layer in [self.conv1, self.norm1, self.act1, self.conv2, self.norm2, self.act2, self.conv3, self.norm3]: x = layer(x) out = x + out return out class Block(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., with_cp=False, attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, windowed=False, window_size=14, use_residual=False, layer_scale=False): super().__init__() self.with_cp = with_cp self.use_residual = use_residual self.norm1 = norm_layer(dim) if windowed: self.attn = WindowedAttention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop, window_size=window_size) else: self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) self.layer_scale = layer_scale if layer_scale: self.gamma1 = nn.Parameter(torch.ones((dim)), requires_grad=True) self.gamma2 = nn.Parameter(torch.ones((dim)), requires_grad=True) if self.use_residual: # Use a residual block with bottleneck channel as dim // 2 self.residual = ResBottleneckBlock( in_channels=dim, out_channels=dim, bottleneck_channels=dim // 2, norm=LayerNorm, act_layer=act_layer, ) def forward(self, x, H, W): def _inner_forward(x): if self.layer_scale: x = x + self.drop_path(self.gamma1 * self.attn(self.norm1(x), H, W)) x = x + self.drop_path(self.gamma2 * self.mlp(self.norm2(x))) else: x = x + self.drop_path(self.attn(self.norm1(x), H, W)) x = x + self.drop_path(self.mlp(self.norm2(x))) if self.use_residual: B, N, C = x.shape x = x.reshape(B, H, W, C).permute(0, 3, 1, 2) x = self.residual(x) x = x.permute(0, 2, 3, 1).reshape(B, N, C) return x if self.with_cp and x.requires_grad: x = cp.checkpoint(_inner_forward, x) else: x = _inner_forward(x) return x class TIMMVisionTransformer(BaseModule): """Vision Transformer. A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` - https://arxiv.org/abs/2010.11929 Includes distillation token & head support for `DeiT: Data-efficient Image Transformers` - https://arxiv.org/abs/2012.12877 """ def __init__(self, img_size=224, patch_size=16, in_chans=3, residual_indices=[], embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., layer_scale=True, embed_layer=PatchEmbed, norm_layer=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU, window_attn=False, window_size=14, with_cp=False, pretrained=None): """ Args: img_size (int, tuple): input image size patch_size (int, tuple): patch size in_chans (int): number of input channels num_classes (int): number of classes for classification head embed_dim (int): embedding dimension depth (int): depth of transformer num_heads (int): number of attention heads mlp_ratio (int): ratio of mlp hidden dim to embedding dim qkv_bias (bool): enable bias for qkv if True drop_rate (float): dropout rate attn_drop_rate (float): attention dropout rate drop_path_rate (float): stochastic depth rate embed_layer (nn.Module): patch embedding layer norm_layer: (nn.Module): normalization layer pretrained: (str): pretrained path with_cp: (bool): use checkpoint or not """ super().__init__() # self.num_classes = num_classes self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models self.num_tokens = 1 norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) act_layer = act_layer or nn.GELU self.norm_layer = norm_layer self.act_layer = act_layer self.pretrain_size = img_size self.drop_path_rate = drop_path_rate self.drop_rate = drop_rate window_attn = [window_attn] * depth if not isinstance(window_attn, list) else window_attn window_size = [window_size] * depth if not isinstance(window_size, list) else window_size logging.info('window attention:', window_attn) logging.info('window size:', window_size) logging.info('layer scale:', layer_scale) self.patch_embed = embed_layer( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, ) num_patches = self.patch_embed.num_patches self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim)) self.pos_drop = nn.Dropout(p=drop_rate) dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule self.blocks = nn.Sequential(*[ Block(dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer, windowed=window_attn[i], window_size=window_size[i], layer_scale=layer_scale, with_cp=with_cp, use_residual=True if i in residual_indices else False) for i in range(depth) ]) self.init_weights(pretrained) def init_weights(self, pretrained=None): if isinstance(pretrained, str): logger = get_root_logger() load_checkpoint(self, pretrained, map_location='cpu', strict=False, logger=logger) def forward_features(self, x): x, H, W = self.patch_embed(x) cls_token = self.cls_token.expand( x.shape[0], -1, -1) # stole cls_tokens impl from Phil Wang, thanks x = torch.cat((cls_token, x), dim=1) x = self.pos_drop(x + self.pos_embed) for blk in self.blocks: x = blk(x, H, W) x = self.norm(x) return x def forward(self, x): x = self.forward_features(x) return x
ViT-Adapter-main
detection/mmdet_custom/models/backbones/base/vit.py
# Copyright (c) OpenMMLab. All rights reserved. from mmdet.models.builder import DETECTORS from mmdet.models.detectors.cascade_rcnn import CascadeRCNN from mmdet.core import (bbox2result, bbox_mapping_back, multiclass_nms, bbox2roi, merge_aug_masks, bbox_mapping) import torch import numpy as np import torch.nn.functional as F @DETECTORS.register_module() class HybridTaskCascadeAug(CascadeRCNN): """Implementation of `HTC <https://arxiv.org/abs/1901.07518>`_""" def __init__(self, **kwargs): super(HybridTaskCascadeAug, self).__init__(**kwargs) @property def with_semantic(self): """bool: whether the detector has a semantic head""" return self.roi_head.with_semantic def aug_test(self, imgs, img_metas, rescale=False): return [self.aug_test_vote(imgs, img_metas, rescale)] def merge_aug_results(self, aug_bboxes, aug_scores, img_metas): recovered_bboxes = [] for bboxes, img_info, scores in zip(aug_bboxes, img_metas, aug_scores): img_shape = img_info[0]['img_shape'] scale_factor = img_info[0]['scale_factor'] flip = img_info[0]['flip'] flip_direction = img_info[0]['flip_direction'] bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip, flip_direction) recovered_bboxes.append(bboxes) bboxes = torch.cat(recovered_bboxes, dim=0) scores = torch.cat(aug_scores, dim=0) return bboxes, scores def remove_boxes(self, boxes, scales=['s', 'm', 'l']): # print(boxes.shape, min_scale * min_scale, max_scale * max_scale) areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) flag = areas < 0.0 if 's' in scales: flag = flag | (areas <= 32.0 * 32.0) if 'm' in scales: flag = flag | ((areas > 32.0 * 32.0) & (areas <= 96.0 * 96.0)) if 'm-' in scales: flag = flag | ((areas > 32.0 * 32.0) & (areas <= 64.0 * 64.0)) if 'm+' in scales: flag = flag | ((areas > 64.0 * 64.0) & (areas <= 96.0 * 96.0)) if 'l' in scales: flag = flag | (areas > 96.0 * 96.0) if 'l-' in scales: flag = flag | ((areas > 96.0 * 96.0) & (areas < 512.0 * 512.0)) if 'l+' in scales: flag = flag | (areas > 512.0 * 512.0) keep = torch.nonzero(flag, as_tuple=False).squeeze(1) return keep def aug_bbox_forward(self, x, proposal_list, img_metas, rescale=False): if self.roi_head.with_semantic: _, semantic_feat = self.roi_head.semantic_head(x) else: semantic_feat = None num_imgs = len(proposal_list) img_shapes = tuple(meta['img_shape'] for meta in img_metas) ori_shapes = tuple(meta['ori_shape'] for meta in img_metas) scale_factors = tuple(meta['scale_factor'] for meta in img_metas) # "ms" in variable names means multi-stage ms_scores = [] rcnn_test_cfg = self.roi_head.test_cfg rois = bbox2roi(proposal_list) if rois.shape[0] == 0: # There is no proposal in the whole batch bbox_results = [[ np.zeros((0, 5), dtype=np.float32) for _ in range(self.roi_head.bbox_head[-1].num_classes) ]] * num_imgs if self.roi_head.with_mask: mask_classes = self.roi_head.mask_head[-1].num_classes segm_results = [[[] for _ in range(mask_classes)] for _ in range(num_imgs)] results = list(zip(bbox_results, segm_results)) else: results = bbox_results return results for i in range(self.roi_head.num_stages): bbox_head = self.roi_head.bbox_head[i] bbox_results = self.roi_head._bbox_forward( i, x, rois, semantic_feat=semantic_feat) # split batch bbox prediction back to each image cls_score = bbox_results['cls_score'] bbox_pred = bbox_results['bbox_pred'] num_proposals_per_img = tuple(len(p) for p in proposal_list) rois = rois.split(num_proposals_per_img, 0) cls_score = cls_score.split(num_proposals_per_img, 0) bbox_pred = bbox_pred.split(num_proposals_per_img, 0) ms_scores.append(cls_score) if i < self.roi_head.num_stages - 1: refine_rois_list = [] for j in range(num_imgs): if rois[j].shape[0] > 0: bbox_label = cls_score[j][:, :-1].argmax(dim=1) refine_rois = bbox_head.regress_by_class( rois[j], bbox_label, bbox_pred[j], img_metas[j]) refine_rois_list.append(refine_rois) rois = torch.cat(refine_rois_list) # average scores of each image by stages cls_score = [ sum([score[i] for score in ms_scores]) / float(len(ms_scores)) for i in range(num_imgs) ] # apply bbox post-processing to each image individually det_bboxes = [] det_labels = [] for i in range(num_imgs): det_bbox, det_label = self.roi_head.bbox_head[-1].get_bboxes( rois[i], cls_score[i], bbox_pred[i], img_shapes[i], scale_factors[i], rescale=rescale, cfg=None) det_bboxes.append(det_bbox) det_labels.append(det_label) return det_bboxes[0], det_labels[0], semantic_feat def aug_segm_forward(self, img_feats, det_bboxes, det_labels, semantic_feats, img_metas): rcnn_test_cfg = self.roi_head.test_cfg if det_bboxes.shape[0] == 0: segm_results = [[] for _ in range(self.roi_head.mask_head[-1].num_classes)] else: aug_masks = [] aug_img_metas = [] for x, img_meta, semantic in zip(img_feats, img_metas, semantic_feats): img_shape = img_meta[0]['img_shape'] scale_factor = img_meta[0]['scale_factor'] flip = img_meta[0]['flip'] flip_direction = img_meta[0]['flip_direction'] _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape, scale_factor, flip, flip_direction) mask_rois = bbox2roi([_bboxes]) mask_feats = self.roi_head.mask_roi_extractor[-1]( x[:len(self.roi_head.mask_roi_extractor[-1].featmap_strides)], mask_rois) if self.roi_head.with_semantic: semantic_feat = semantic mask_semantic_feat = self.roi_head.semantic_roi_extractor( [semantic_feat], mask_rois) if mask_semantic_feat.shape[-2:] != mask_feats.shape[ -2:]: mask_semantic_feat = F.adaptive_avg_pool2d( mask_semantic_feat, mask_feats.shape[-2:]) mask_feats += mask_semantic_feat last_feat = None for i in range(self.roi_head.num_stages): mask_head = self.roi_head.mask_head[i] if self.roi_head.mask_info_flow: mask_pred, last_feat = mask_head( mask_feats, last_feat) else: mask_pred = mask_head(mask_feats) aug_masks.append(mask_pred.sigmoid().cpu().numpy()) aug_img_metas.append(img_meta) merged_masks = merge_aug_masks(aug_masks, aug_img_metas, self.roi_head.test_cfg) ori_shape = img_metas[0][0]['ori_shape'] segm_results = self.roi_head.mask_head[-1].get_seg_masks( merged_masks, det_bboxes, det_labels, rcnn_test_cfg, ori_shape, scale_factor=1.0, rescale=False) return segm_results def aug_test_vote(self, imgs, img_metas, rescale=False): # recompute feats to save memory feats = self.extract_feats(imgs) aug_bboxes = [] aug_scores = [] semantic_feats = [] for i, (x, img_meta) in enumerate(zip(feats, img_metas)): proposal_list = self.rpn_head.simple_test_rpn(x, img_meta) det_bboxes, det_scores, semantic_feat = self.aug_bbox_forward(x, proposal_list, img_meta, rescale=False) restored_bboxes, _ = self.merge_aug_results([det_bboxes], [det_scores], [img_meta]) keeped = self.remove_boxes(restored_bboxes, self.test_cfg.aug.scale_ranges[i // 2]) det_bboxes, det_scores = det_bboxes[keeped, :], det_scores[keeped, :] aug_bboxes.append(det_bboxes) aug_scores.append(det_scores) semantic_feats.append(semantic_feat) # after merging, bboxes will be rescaled to the original image size merged_bboxes, merged_scores = self.merge_aug_results( aug_bboxes, aug_scores, img_metas) det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores, self.test_cfg.aug.score_thr, self.test_cfg.aug.nms, self.test_cfg.aug.max_per_img) if rescale: _det_bboxes = det_bboxes else: _det_bboxes = det_bboxes.clone() _det_bboxes[:, :4] *= img_metas[0][0]['scale_factor'] bbox_results = bbox2result(_det_bboxes, det_labels, self.roi_head.bbox_head[-1].num_classes) if self.with_mask: segm_results = self.aug_segm_forward(feats, _det_bboxes, det_labels, semantic_feats, img_metas) return bbox_results, segm_results else: return bbox_results
ViT-Adapter-main
detection/mmdet_custom/models/detectors/htc_aug.py
from .htc_aug import HybridTaskCascadeAug __all__ = ['HybridTaskCascadeAug']
ViT-Adapter-main
detection/mmdet_custom/models/detectors/__init__.py
# Copyright (c) ByteDance, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. """Mostly copy-paste from BEiT library: https://github.com/microsoft/unilm/blob/master/beit/semantic_segmentation/mmcv_custom/layer_decay_optimizer_constructor.py """ import json from mmcv.runner import (OPTIMIZER_BUILDERS, DefaultOptimizerConstructor, get_dist_info) def get_num_layer_for_vit(var_name, num_max_layer): if var_name in ('backbone.cls_token', 'backbone.mask_token', 'backbone.pos_embed', 'backbone.visual_embed'): return 0 elif var_name.startswith('backbone.visual_embed'): return 0 elif var_name.startswith('backbone.patch_embed'): return 0 elif var_name.startswith('backbone.blocks') or var_name.startswith( 'backbone.layers'): layer_id = int(var_name.split('.')[2]) return layer_id + 1 else: return num_max_layer - 1 @OPTIMIZER_BUILDERS.register_module() class LayerDecayOptimizerConstructor(DefaultOptimizerConstructor): def add_params(self, params, module, prefix='', is_dcn_module=None): """Add all parameters of module to the params list. The parameters of the given module will be added to the list of param groups, with specific rules defined by paramwise_cfg. Args: params (list[dict]): A list of param groups, it will be modified in place. module (nn.Module): The module to be added. prefix (str): The prefix of the module is_dcn_module (int|float|None): If the current module is a submodule of DCN, `is_dcn_module` will be passed to control conv_offset layer's learning rate. Defaults to None. """ parameter_groups = {} print(self.paramwise_cfg) num_layers = self.paramwise_cfg.get('num_layers') + 2 layer_decay_rate = self.paramwise_cfg.get('layer_decay_rate') print('Build LayerDecayOptimizerConstructor %f - %d' % (layer_decay_rate, num_layers)) weight_decay = self.base_wd for name, param in module.named_parameters(): if not param.requires_grad: continue # frozen weights if len(param.shape) == 1 or name.endswith('.bias') or name in ( 'pos_embed', 'cls_token', 'visual_embed'): group_name = 'no_decay' this_weight_decay = 0. else: group_name = 'decay' this_weight_decay = weight_decay layer_id = get_num_layer_for_vit(name, num_layers) group_name = 'layer_%d_%s' % (layer_id, group_name) if group_name not in parameter_groups: scale = layer_decay_rate**(num_layers - layer_id - 1) parameter_groups[group_name] = { 'weight_decay': this_weight_decay, 'params': [], 'param_names': [], 'lr_scale': scale, 'group_name': group_name, 'lr': scale * self.base_lr, } parameter_groups[group_name]['params'].append(param) parameter_groups[group_name]['param_names'].append(name) rank, _ = get_dist_info() if rank == 0: to_display = {} for key in parameter_groups: to_display[key] = { 'param_names': parameter_groups[key]['param_names'], 'lr_scale': parameter_groups[key]['lr_scale'], 'lr': parameter_groups[key]['lr'], 'weight_decay': parameter_groups[key]['weight_decay'], } print('Param groups = %s' % json.dumps(to_display, indent=2)) # state_dict = module.state_dict() # for group_name in parameter_groups: # group = parameter_groups[group_name] # for name in group["param_names"]: # group["params"].append(state_dict[name]) params.extend(parameter_groups.values())
ViT-Adapter-main
detection/mmcv_custom/layer_decay_optimizer_constructor.py
# Copyright (c) Open-MMLab. All rights reserved. import io import math import os import os.path as osp import pkgutil import time import warnings from collections import OrderedDict from importlib import import_module from tempfile import TemporaryDirectory import mmcv import numpy as np import torch import torchvision from mmcv.fileio import FileClient from mmcv.fileio import load as load_file from mmcv.parallel import is_module_wrapper from mmcv.runner import get_dist_info from mmcv.utils import mkdir_or_exist from scipy import interpolate from torch.optim import Optimizer from torch.utils import model_zoo ENV_MMCV_HOME = 'MMCV_HOME' ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME' DEFAULT_CACHE_DIR = '~/.cache' def _get_mmcv_home(): mmcv_home = os.path.expanduser( os.getenv( ENV_MMCV_HOME, os.path.join(os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'mmcv'))) mkdir_or_exist(mmcv_home) return mmcv_home def load_state_dict(module, state_dict, strict=False, logger=None): """Load state_dict to a module. This method is modified from :meth:`torch.nn.Module.load_state_dict`. Default value for ``strict`` is set to ``False`` and the message for param mismatch will be shown even if strict is False. Args: module (Module): Module that receives the state_dict. state_dict (OrderedDict): Weights. strict (bool): whether to strictly enforce that the keys in :attr:`state_dict` match the keys returned by this module's :meth:`~torch.nn.Module.state_dict` function. Default: ``False``. logger (:obj:`logging.Logger`, optional): Logger to log the error message. If not specified, print function will be used. """ unexpected_keys = [] all_missing_keys = [] err_msg = [] metadata = getattr(state_dict, '_metadata', None) state_dict = state_dict.copy() if metadata is not None: state_dict._metadata = metadata # use _load_from_state_dict to enable checkpoint version control def load(module, prefix=''): # recursively check parallel module in case that the model has a # complicated structure, e.g., nn.Module(nn.Module(DDP)) if is_module_wrapper(module): module = module.module local_metadata = {} if metadata is None else metadata.get( prefix[:-1], {}) module._load_from_state_dict(state_dict, prefix, local_metadata, True, all_missing_keys, unexpected_keys, err_msg) for name, child in module._modules.items(): if child is not None: load(child, prefix + name + '.') load(module) load = None # break load->load reference cycle # ignore "num_batches_tracked" of BN layers missing_keys = [ key for key in all_missing_keys if 'num_batches_tracked' not in key ] if unexpected_keys: err_msg.append('unexpected key in source ' f'state_dict: {", ".join(unexpected_keys)}\n') if missing_keys: err_msg.append( f'missing keys in source state_dict: {", ".join(missing_keys)}\n') rank, _ = get_dist_info() if len(err_msg) > 0 and rank == 0: err_msg.insert( 0, 'The model and loaded state dict do not match exactly\n') err_msg = '\n'.join(err_msg) if strict: raise RuntimeError(err_msg) elif logger is not None: logger.warning(err_msg) else: print(err_msg) def load_url_dist(url, model_dir=None, map_location='cpu'): """In distributed setting, this function only download checkpoint at local rank 0.""" rank, world_size = get_dist_info() rank = int(os.environ.get('LOCAL_RANK', rank)) if rank == 0: checkpoint = model_zoo.load_url(url, model_dir=model_dir, map_location=map_location) if world_size > 1: torch.distributed.barrier() if rank > 0: checkpoint = model_zoo.load_url(url, model_dir=model_dir, map_location=map_location) return checkpoint def load_pavimodel_dist(model_path, map_location=None): """In distributed setting, this function only download checkpoint at local rank 0.""" try: from pavi import modelcloud except ImportError: raise ImportError( 'Please install pavi to load checkpoint from modelcloud.') rank, world_size = get_dist_info() rank = int(os.environ.get('LOCAL_RANK', rank)) if rank == 0: model = modelcloud.get(model_path) with TemporaryDirectory() as tmp_dir: downloaded_file = osp.join(tmp_dir, model.name) model.download(downloaded_file) checkpoint = torch.load(downloaded_file, map_location=map_location) if world_size > 1: torch.distributed.barrier() if rank > 0: model = modelcloud.get(model_path) with TemporaryDirectory() as tmp_dir: downloaded_file = osp.join(tmp_dir, model.name) model.download(downloaded_file) checkpoint = torch.load(downloaded_file, map_location=map_location) return checkpoint def load_fileclient_dist(filename, backend, map_location): """In distributed setting, this function only download checkpoint at local rank 0.""" rank, world_size = get_dist_info() rank = int(os.environ.get('LOCAL_RANK', rank)) allowed_backends = ['ceph'] if backend not in allowed_backends: raise ValueError(f'Load from Backend {backend} is not supported.') if rank == 0: fileclient = FileClient(backend=backend) buffer = io.BytesIO(fileclient.get(filename)) checkpoint = torch.load(buffer, map_location=map_location) if world_size > 1: torch.distributed.barrier() if rank > 0: fileclient = FileClient(backend=backend) buffer = io.BytesIO(fileclient.get(filename)) checkpoint = torch.load(buffer, map_location=map_location) return checkpoint def get_torchvision_models(): model_urls = dict() for _, name, ispkg in pkgutil.walk_packages(torchvision.models.__path__): if ispkg: continue _zoo = import_module(f'torchvision.models.{name}') if hasattr(_zoo, 'model_urls'): _urls = getattr(_zoo, 'model_urls') model_urls.update(_urls) return model_urls def get_external_models(): mmcv_home = _get_mmcv_home() default_json_path = osp.join(mmcv.__path__[0], 'model_zoo/open_mmlab.json') default_urls = load_file(default_json_path) assert isinstance(default_urls, dict) external_json_path = osp.join(mmcv_home, 'open_mmlab.json') if osp.exists(external_json_path): external_urls = load_file(external_json_path) assert isinstance(external_urls, dict) default_urls.update(external_urls) return default_urls def get_mmcls_models(): mmcls_json_path = osp.join(mmcv.__path__[0], 'model_zoo/mmcls.json') mmcls_urls = load_file(mmcls_json_path) return mmcls_urls def get_deprecated_model_names(): deprecate_json_path = osp.join(mmcv.__path__[0], 'model_zoo/deprecated.json') deprecate_urls = load_file(deprecate_json_path) assert isinstance(deprecate_urls, dict) return deprecate_urls def _process_mmcls_checkpoint(checkpoint): state_dict = checkpoint['state_dict'] new_state_dict = OrderedDict() for k, v in state_dict.items(): if k.startswith('backbone.'): new_state_dict[k[9:]] = v new_checkpoint = dict(state_dict=new_state_dict) return new_checkpoint def _load_checkpoint(filename, map_location=None): """Load checkpoint from somewhere (modelzoo, file, url). Args: filename (str): Accept local filepath, URL, ``torchvision://xxx``, ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for details. map_location (str | None): Same as :func:`torch.load`. Default: None. Returns: dict | OrderedDict: The loaded checkpoint. It can be either an OrderedDict storing model weights or a dict containing other information, which depends on the checkpoint. """ if filename.startswith('modelzoo://'): warnings.warn('The URL scheme of "modelzoo://" is deprecated, please ' 'use "torchvision://" instead') model_urls = get_torchvision_models() model_name = filename[11:] checkpoint = load_url_dist(model_urls[model_name]) elif filename.startswith('torchvision://'): model_urls = get_torchvision_models() model_name = filename[14:] checkpoint = load_url_dist(model_urls[model_name]) elif filename.startswith('open-mmlab://'): model_urls = get_external_models() model_name = filename[13:] deprecated_urls = get_deprecated_model_names() if model_name in deprecated_urls: warnings.warn(f'open-mmlab://{model_name} is deprecated in favor ' f'of open-mmlab://{deprecated_urls[model_name]}') model_name = deprecated_urls[model_name] model_url = model_urls[model_name] # check if is url if model_url.startswith(('http://', 'https://')): checkpoint = load_url_dist(model_url) else: filename = osp.join(_get_mmcv_home(), model_url) if not osp.isfile(filename): raise IOError(f'{filename} is not a checkpoint file') checkpoint = torch.load(filename, map_location=map_location) elif filename.startswith('mmcls://'): model_urls = get_mmcls_models() model_name = filename[8:] checkpoint = load_url_dist(model_urls[model_name]) checkpoint = _process_mmcls_checkpoint(checkpoint) elif filename.startswith(('http://', 'https://')): checkpoint = load_url_dist(filename) elif filename.startswith('pavi://'): model_path = filename[7:] checkpoint = load_pavimodel_dist(model_path, map_location=map_location) elif filename.startswith('s3://'): checkpoint = load_fileclient_dist(filename, backend='ceph', map_location=map_location) else: if not osp.isfile(filename): raise IOError(f'{filename} is not a checkpoint file') checkpoint = torch.load(filename, map_location=map_location) return checkpoint def cosine_scheduler(base_value, final_value, epochs, niter_per_ep, warmup_epochs=0, start_warmup_value=0, warmup_steps=-1): warmup_schedule = np.array([]) warmup_iters = warmup_epochs * niter_per_ep if warmup_steps > 0: warmup_iters = warmup_steps print('Set warmup steps = %d' % warmup_iters) if warmup_epochs > 0: warmup_schedule = np.linspace(start_warmup_value, base_value, warmup_iters) iters = np.arange(epochs * niter_per_ep - warmup_iters) schedule = np.array([ final_value + 0.5 * (base_value - final_value) * (1 + math.cos(math.pi * i / (len(iters)))) for i in iters ]) schedule = np.concatenate((warmup_schedule, schedule)) assert len(schedule) == epochs * niter_per_ep return schedule def load_checkpoint(model, filename, map_location='cpu', strict=False, logger=None): """Load checkpoint from a file or URI. Args: model (Module): Module to load checkpoint. filename (str): Accept local filepath, URL, ``torchvision://xxx``, ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for details. map_location (str): Same as :func:`torch.load`. strict (bool): Whether to allow different params for the model and checkpoint. logger (:mod:`logging.Logger` or None): The logger for error message. Returns: dict or OrderedDict: The loaded checkpoint. """ checkpoint = _load_checkpoint(filename, map_location) # OrderedDict is a subclass of dict if not isinstance(checkpoint, dict): raise RuntimeError( f'No state_dict found in checkpoint file {filename}') # get state_dict from checkpoint if 'state_dict' in checkpoint: state_dict = checkpoint['state_dict'] elif 'model' in checkpoint: state_dict = checkpoint['model'] elif 'module' in checkpoint: state_dict = checkpoint['module'] else: state_dict = checkpoint # strip prefix of state_dict if list(state_dict.keys())[0].startswith('module.'): state_dict = {k[7:]: v for k, v in state_dict.items()} # for MoBY, load model of online branch if sorted(list(state_dict.keys()))[0].startswith('encoder'): state_dict = { k.replace('encoder.', ''): v for k, v in state_dict.items() if k.startswith('encoder.') } # reshape absolute position embedding for Swin if state_dict.get('absolute_pos_embed') is not None: absolute_pos_embed = state_dict['absolute_pos_embed'] N1, L, C1 = absolute_pos_embed.size() N2, C2, H, W = model.absolute_pos_embed.size() if N1 != N2 or C1 != C2 or L != H * W: logger.warning('Error in loading absolute_pos_embed, pass') else: state_dict['absolute_pos_embed'] = absolute_pos_embed.view( N2, H, W, C2).permute(0, 3, 1, 2) rank, _ = get_dist_info() if 'rel_pos_bias.relative_position_bias_table' in state_dict: if rank == 0: print('Expand the shared relative position embedding to each layers. ') num_layers = model.get_num_layers() rel_pos_bias = state_dict['rel_pos_bias.relative_position_bias_table'] for i in range(num_layers): state_dict['blocks.%d.attn.relative_position_bias_table' % i] = rel_pos_bias.clone() state_dict.pop('rel_pos_bias.relative_position_bias_table') all_keys = list(state_dict.keys()) for key in all_keys: if 'relative_position_index' in key: state_dict.pop(key) if 'relative_position_bias_table' in key: rel_pos_bias = state_dict[key] src_num_pos, num_attn_heads = rel_pos_bias.size() dst_num_pos, _ = model.state_dict()[key].size() dst_patch_shape = model.patch_embed.patch_shape if dst_patch_shape[0] != dst_patch_shape[1]: raise NotImplementedError() num_extra_tokens = 3 src_size = int((src_num_pos - num_extra_tokens)**0.5) dst_size = int((dst_num_pos)**0.5) if src_size != dst_size: if rank == 0: print('Position interpolate for %s from %dx%d to %dx%d' % (key, src_size, src_size, dst_size, dst_size)) extra_tokens = rel_pos_bias[-num_extra_tokens:, :] rel_pos_bias = rel_pos_bias[:-num_extra_tokens, :] def geometric_progression(a, r, n): return a * (1.0 - r**n) / (1.0 - r) left, right = 1.01, 1.5 while right - left > 1e-6: q = (left + right) / 2.0 gp = geometric_progression(1, q, src_size // 2) if gp > dst_size // 2: right = q else: left = q # if q > 1.13492: # q = 1.13492 dis = [] cur = 1 for i in range(src_size // 2): dis.append(cur) cur += q**(i + 1) r_ids = [-_ for _ in reversed(dis)] x = r_ids + [0] + dis y = r_ids + [0] + dis t = dst_size // 2.0 dx = np.arange(-t, t + 0.1, 1.0) dy = np.arange(-t, t + 0.1, 1.0) if rank == 0: print('x = {}'.format(x)) print('dx = {}'.format(dx)) all_rel_pos_bias = [] for i in range(num_attn_heads): z = rel_pos_bias[:, i].view(src_size, src_size).float().numpy() f = interpolate.interp2d(x, y, z, kind='cubic') all_rel_pos_bias.append( torch.Tensor(f(dx, dy)).contiguous().view(-1, 1).to( rel_pos_bias.device)) rel_pos_bias = torch.cat(all_rel_pos_bias, dim=-1) # new_rel_pos_bias = torch.cat((rel_pos_bias, extra_tokens), dim=0) # state_dict[key] = new_rel_pos_bias state_dict[key] = rel_pos_bias else: rel_pos_bias = rel_pos_bias[:-num_extra_tokens, :] state_dict[key] = rel_pos_bias if 'pos_embed' in state_dict: pos_embed_checkpoint = state_dict['pos_embed'] embedding_size = pos_embed_checkpoint.shape[-1] num_patches = model.patch_embed.num_patches num_extra_tokens = model.pos_embed.shape[-2] - num_patches # height (== width) for the checkpoint position embedding orig_size = int( (pos_embed_checkpoint.shape[-2] - num_extra_tokens)**0.5) # height (== width) for the new position embedding new_size = int(num_patches**0.5) # class_token and dist_token are kept unchanged if orig_size != new_size: if rank == 0: print('Position interpolate from %dx%d to %dx%d' % (orig_size, orig_size, new_size, new_size)) extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] # only the position tokens are interpolated pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute( 0, 3, 1, 2) pos_tokens = torch.nn.functional.interpolate(pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False) pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2) new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) state_dict['pos_embed'] = new_pos_embed # interpolate position bias table if needed # relative_position_bias_table_keys = [k for k in state_dict.keys() if "relative_position_bias_table" in k] # for table_key in relative_position_bias_table_keys: # table_pretrained = state_dict[table_key] # table_current = model.state_dict()[table_key] # L1, nH1 = table_pretrained.size() # L2, nH2 = table_current.size() # if nH1 != nH2: # logger.warning(f"Error in loading {table_key}, pass") # else: # if L1 != L2: # S1 = int(L1 ** 0.5) # S2 = int(L2 ** 0.5) # table_pretrained_resized = F.interpolate( # table_pretrained.permute(1, 0).view(1, nH1, S1, S1), # size=(S2, S2), mode='bicubic') # state_dict[table_key] = table_pretrained_resized.view(nH2, L2).permute(1, 0) # load state_dict load_state_dict(model, state_dict, strict, logger) return checkpoint def weights_to_cpu(state_dict): """Copy a model state_dict to cpu. Args: state_dict (OrderedDict): Model weights on GPU. Returns: OrderedDict: Model weights on GPU. """ state_dict_cpu = OrderedDict() for key, val in state_dict.items(): state_dict_cpu[key] = val.cpu() return state_dict_cpu def _save_to_state_dict(module, destination, prefix, keep_vars): """Saves module state to `destination` dictionary. This method is modified from :meth:`torch.nn.Module._save_to_state_dict`. Args: module (nn.Module): The module to generate state_dict. destination (dict): A dict where state will be stored. prefix (str): The prefix for parameters and buffers used in this module. """ for name, param in module._parameters.items(): if param is not None: destination[prefix + name] = param if keep_vars else param.detach() for name, buf in module._buffers.items(): # remove check of _non_persistent_buffers_set to allow nn.BatchNorm2d if buf is not None: destination[prefix + name] = buf if keep_vars else buf.detach() def get_state_dict(module, destination=None, prefix='', keep_vars=False): """Returns a dictionary containing a whole state of the module. Both parameters and persistent buffers (e.g. running averages) are included. Keys are corresponding parameter and buffer names. This method is modified from :meth:`torch.nn.Module.state_dict` to recursively check parallel module in case that the model has a complicated structure, e.g., nn.Module(nn.Module(DDP)). Args: module (nn.Module): The module to generate state_dict. destination (OrderedDict): Returned dict for the state of the module. prefix (str): Prefix of the key. keep_vars (bool): Whether to keep the variable property of the parameters. Default: False. Returns: dict: A dictionary containing a whole state of the module. """ # recursively check parallel module in case that the model has a # complicated structure, e.g., nn.Module(nn.Module(DDP)) if is_module_wrapper(module): module = module.module # below is the same as torch.nn.Module.state_dict() if destination is None: destination = OrderedDict() destination._metadata = OrderedDict() destination._metadata[prefix[:-1]] = local_metadata = dict( version=module._version) _save_to_state_dict(module, destination, prefix, keep_vars) for name, child in module._modules.items(): if child is not None: get_state_dict(child, destination, prefix + name + '.', keep_vars=keep_vars) for hook in module._state_dict_hooks.values(): hook_result = hook(module, destination, prefix, local_metadata) if hook_result is not None: destination = hook_result return destination def save_checkpoint(model, filename, optimizer=None, meta=None): """Save checkpoint to file. The checkpoint will have 3 fields: ``meta``, ``state_dict`` and ``optimizer``. By default ``meta`` will contain version and time info. Args: model (Module): Module whose params are to be saved. filename (str): Checkpoint filename. optimizer (:obj:`Optimizer`, optional): Optimizer to be saved. meta (dict, optional): Metadata to be saved in checkpoint. """ if meta is None: meta = {} elif not isinstance(meta, dict): raise TypeError(f'meta must be a dict or None, but got {type(meta)}') meta.update(mmcv_version=mmcv.__version__, time=time.asctime()) if is_module_wrapper(model): model = model.module if hasattr(model, 'CLASSES') and model.CLASSES is not None: # save class name to the meta meta.update(CLASSES=model.CLASSES) checkpoint = { 'meta': meta, 'state_dict': weights_to_cpu(get_state_dict(model)) } # save optimizer state dict in the checkpoint if isinstance(optimizer, Optimizer): checkpoint['optimizer'] = optimizer.state_dict() elif isinstance(optimizer, dict): checkpoint['optimizer'] = {} for name, optim in optimizer.items(): checkpoint['optimizer'][name] = optim.state_dict() if filename.startswith('pavi://'): try: from pavi import modelcloud from pavi.exception import NodeNotFoundError except ImportError: raise ImportError( 'Please install pavi to load checkpoint from modelcloud.') model_path = filename[7:] root = modelcloud.Folder() model_dir, model_name = osp.split(model_path) try: model = modelcloud.get(model_dir) except NodeNotFoundError: model = root.create_training_model(model_dir) with TemporaryDirectory() as tmp_dir: checkpoint_file = osp.join(tmp_dir, model_name) with open(checkpoint_file, 'wb') as f: torch.save(checkpoint, f) f.flush() model.create_file(checkpoint_file, name=model_name) else: mmcv.mkdir_or_exist(osp.dirname(filename)) # immediately flush buffer with open(filename, 'wb') as f: torch.save(checkpoint, f) f.flush()
ViT-Adapter-main
detection/mmcv_custom/checkpoint.py
import os.path as osp import pkgutil import time from collections import OrderedDict from importlib import import_module import mmcv import torch from torch.utils import model_zoo open_mmlab_model_urls = { 'vgg16_caffe': 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/pretrain/third_party/vgg16_caffe-292e1171.pth', # noqa: E501 'resnet50_caffe': 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/pretrain/third_party/resnet50_caffe-788b5fa3.pth', # noqa: E501 'resnet101_caffe': 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/pretrain/third_party/resnet101_caffe-3ad79236.pth', # noqa: E501 'resnext50_32x4d': 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/pretrain/third_party/resnext50-32x4d-0ab1a123.pth', # noqa: E501 'resnext101_32x4d': 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/pretrain/third_party/resnext101_32x4d-a5af3160.pth', # noqa: E501 'resnext101_64x4d': 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/pretrain/third_party/resnext101_64x4d-ee2c6f71.pth', # noqa: E501 'contrib/resnet50_gn': 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/pretrain/third_party/resnet50_gn_thangvubk-ad1730dd.pth', # noqa: E501 'detectron/resnet50_gn': 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/pretrain/third_party/resnet50_gn-9186a21c.pth', # noqa: E501 'detectron/resnet101_gn': 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/pretrain/third_party/resnet101_gn-cac0ab98.pth', # noqa: E501 'jhu/resnet50_gn_ws': 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/pretrain/third_party/resnet50_gn_ws-15beedd8.pth', # noqa: E501 'jhu/resnet101_gn_ws': 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/pretrain/third_party/resnet101_gn_ws-3e3c308c.pth', # noqa: E501 'jhu/resnext50_32x4d_gn_ws': 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/pretrain/third_party/resnext50_32x4d_gn_ws-0d87ac85.pth', # noqa: E501 'jhu/resnext101_32x4d_gn_ws': 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/pretrain/third_party/resnext101_32x4d_gn_ws-34ac1a9e.pth', # noqa: E501 'jhu/resnext50_32x4d_gn': 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/pretrain/third_party/resnext50_32x4d_gn-c7e8b754.pth', # noqa: E501 'jhu/resnext101_32x4d_gn': 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/pretrain/third_party/resnext101_32x4d_gn-ac3bb84e.pth', # noqa: E501 'msra/hrnetv2_w18': 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/pretrain/third_party/hrnetv2_w18-00eb2006.pth', # noqa: E501 'msra/hrnetv2_w32': 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/pretrain/third_party/hrnetv2_w32-dc9eeb4f.pth', # noqa: E501 'msra/hrnetv2_w40': 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/pretrain/third_party/hrnetv2_w40-ed0b031c.pth', # noqa: E501 } # yapf: disable def load_state_dict(module, state_dict, strict=False, logger=None): """Load state_dict to a module. This method is modified from :meth:`torch.nn.Module.load_state_dict`. Default value for ``strict`` is set to ``False`` and the message for param mismatch will be shown even if strict is False. Args: module (Module): Module that receives the state_dict. state_dict (OrderedDict): Weights. strict (bool): whether to strictly enforce that the keys in :attr:`state_dict` match the keys returned by this module's :meth:`~torch.nn.Module.state_dict` function. Default: ``False``. logger (:obj:`logging.Logger`, optional): Logger to log the error message. If not specified, print function will be used. """ unexpected_keys = [] own_state = module.state_dict() for name, param in state_dict.items(): if name not in own_state: unexpected_keys.append(name) continue if isinstance(param, torch.nn.Parameter): # backwards compatibility for serialized parameters param = param.data try: own_state[name].copy_(param) except Exception: raise RuntimeError( 'While copying the parameter named {}, ' 'whose dimensions in the model are {} and ' 'whose dimensions in the checkpoint are {}.'.format( name, own_state[name].size(), param.size())) missing_keys = set(own_state.keys()) - set(state_dict.keys()) err_msg = [] if unexpected_keys: err_msg.append('unexpected key in source state_dict: {}\n'.format( ', '.join(unexpected_keys))) if missing_keys: err_msg.append('missing keys in source state_dict: {}\n'.format( ', '.join(missing_keys))) err_msg = '\n'.join(err_msg) if err_msg: if strict: raise RuntimeError(err_msg) elif logger is not None: logger.warn(err_msg) else: print(err_msg) def my_load_checkpoint(model, filename, map_location=None, strict=False, logger=None): """Load checkpoint from a file or URI. Args: model (Module): Module to load checkpoint. filename (str): Either a filepath or URL or modelzoo://xxxxxxx. map_location (str): Same as :func:`torch.load`. strict (bool): Whether to allow different params for the model and checkpoint. logger (:mod:`logging.Logger` or None): The logger for error message. Returns: dict or OrderedDict: The loaded checkpoint. """ # load checkpoint from modelzoo or file or url if filename.startswith('modelzoo://'): import torchvision model_urls = dict() for _, name, ispkg in pkgutil.walk_packages( torchvision.models.__path__): if not ispkg: _zoo = import_module('torchvision.models.{}'.format(name)) if hasattr(_zoo, 'model_urls'): _urls = getattr(_zoo, 'model_urls') model_urls.update(_urls) model_name = filename[11:] checkpoint = model_zoo.load_url(model_urls[model_name]) elif filename.startswith('open-mmlab://'): model_name = filename[13:] checkpoint = model_zoo.load_url(open_mmlab_model_urls[model_name]) elif filename.startswith(('http://', 'https://')): checkpoint = model_zoo.load_url(filename) else: if not osp.isfile(filename): raise IOError('{} is not a checkpoint file'.format(filename)) checkpoint = torch.load(filename, map_location=map_location) # get state_dict from checkpoint if isinstance(checkpoint, OrderedDict): state_dict = checkpoint elif isinstance(checkpoint, dict) and 'state_dict' in checkpoint: state_dict = checkpoint['state_dict'] elif isinstance(checkpoint, dict) and 'model' in checkpoint: state_dict = checkpoint['model'] # for classification weights else: state_dict = checkpoint # fix "No state_dict found in checkpoint file" # raise RuntimeError( # 'No state_dict found in checkpoint file {}'.format(filename)) # strip prefix of state_dict if list(state_dict.keys())[0].startswith('module.'): state_dict = {k[7:]: v for k, v in checkpoint['state_dict'].items()} # load state_dict if hasattr(model, 'module'): load_state_dict(model.module, state_dict, strict, logger) else: load_state_dict(model, state_dict, strict, logger) return checkpoint def weights_to_cpu(state_dict): """Copy a model state_dict to cpu. Args: state_dict (OrderedDict): Model weights on GPU. Returns: OrderedDict: Model weights on GPU. """ state_dict_cpu = OrderedDict() for key, val in state_dict.items(): state_dict_cpu[key] = val.cpu() return state_dict_cpu def save_checkpoint(model, filename, optimizer=None, meta=None): """Save checkpoint to file. The checkpoint will have 3 fields: ``meta``, ``state_dict`` and ``optimizer``. By default ``meta`` will contain version and time info. Args: model (Module): Module whose params are to be saved. filename (str): Checkpoint filename. optimizer (:obj:`Optimizer`, optional): Optimizer to be saved. meta (dict, optional): Metadata to be saved in checkpoint. """ if meta is None: meta = {} elif not isinstance(meta, dict): raise TypeError('meta must be a dict or None, but got {}'.format( type(meta))) meta.update(mmcv_version=mmcv.__version__, time=time.asctime()) mmcv.mkdir_or_exist(osp.dirname(filename)) if hasattr(model, 'module'): model = model.module checkpoint = { 'meta': meta, 'state_dict': weights_to_cpu(model.state_dict()) } if optimizer is not None: checkpoint['optimizer'] = optimizer.state_dict() torch.save(checkpoint, filename)
ViT-Adapter-main
detection/mmcv_custom/my_checkpoint.py
# Copyright (c) Shanghai AI Lab. All rights reserved. from .checkpoint import load_checkpoint from .customized_text import CustomizedTextLoggerHook from .layer_decay_optimizer_constructor import LayerDecayOptimizerConstructor from .my_checkpoint import my_load_checkpoint __all__ = [ 'LayerDecayOptimizerConstructor', 'CustomizedTextLoggerHook', 'load_checkpoint', 'my_load_checkpoint' ]
ViT-Adapter-main
detection/mmcv_custom/__init__.py
import torch checkpoint = torch.load("../pretrained/uni-perceiver-large-L24-H1024-224size-pretrained.pth", map_location=torch.device('cpu')) checkpoint = checkpoint['model'] new_checkpoint = {} for k, v in checkpoint.items(): new_k = k.replace("fused_encoder.", "") new_k = new_k.replace("in_proj_", "in_proj.") new_k = new_k.replace("video_embed.", "visual_embed.") new_k = new_k.replace("visual_embed.embeddings.weight", "visual_embed.patch_embed.proj.weight") new_k = new_k.replace("visual_embed.embeddings.bias", "visual_embed.patch_embed.proj.bias") new_k = new_k.replace("visual_embed.embeddings_st_pos.spatial_pos_embed.weight", "visual_embed.patch_embed.spatial_pos_embed.weight") new_k = new_k.replace("visual_embed.embeddings_st_pos.temporal_pos_embed.weight", "visual_embed.patch_embed.temporal_pos_embed.weight") if "loss_prepare" in new_k: pass elif "token_embed" in new_k: pass else: new_checkpoint[new_k] = v for k, v in new_checkpoint.items(): print(k, v.shape) torch.save(new_checkpoint, "../pretrained/uni-perceiver-large-L24-H1024-224size-pretrained_converted.pth") print("saved!")
ViT-Adapter-main
detection/mmcv_custom/uniperceiver_converter.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import datetime from collections import OrderedDict import torch from mmcv.runner import HOOKS, TextLoggerHook @HOOKS.register_module() class CustomizedTextLoggerHook(TextLoggerHook): """Customized Text Logger hook. This logger prints out both lr and layer_0_lr. """ def _log_info(self, log_dict, runner): # print exp name for users to distinguish experiments # at every ``interval_exp_name`` iterations and the end of each epoch if runner.meta is not None and 'exp_name' in runner.meta: if (self.every_n_iters(runner, self.interval_exp_name)) or ( self.by_epoch and self.end_of_epoch(runner)): exp_info = f'Exp name: {runner.meta["exp_name"]}' runner.logger.info(exp_info) if log_dict['mode'] == 'train': lr_str = {} for lr_type in ['lr', 'layer_0_lr']: if isinstance(log_dict[lr_type], dict): lr_str[lr_type] = [] for k, val in log_dict[lr_type].items(): lr_str.append(f'{lr_type}_{k}: {val:.3e}') lr_str[lr_type] = ' '.join(lr_str) else: lr_str[lr_type] = f'{lr_type}: {log_dict[lr_type]:.3e}' # by epoch: Epoch [4][100/1000] # by iter: Iter [100/100000] if self.by_epoch: log_str = f'Epoch [{log_dict["epoch"]}]' \ f'[{log_dict["iter"]}/{len(runner.data_loader)}]\t' else: log_str = f'Iter [{log_dict["iter"]}/{runner.max_iters}]\t' log_str += f'{lr_str["lr"]}, {lr_str["layer_0_lr"]}, ' if 'time' in log_dict.keys(): self.time_sec_tot += (log_dict['time'] * self.interval) time_sec_avg = self.time_sec_tot / (runner.iter - self.start_iter + 1) eta_sec = time_sec_avg * (runner.max_iters - runner.iter - 1) eta_str = str(datetime.timedelta(seconds=int(eta_sec))) log_str += f'eta: {eta_str}, ' log_str += f'time: {log_dict["time"]:.3f}, ' \ f'data_time: {log_dict["data_time"]:.3f}, ' # statistic memory if torch.cuda.is_available(): log_str += f'memory: {log_dict["memory"]}, ' else: # val/test time # here 1000 is the length of the val dataloader # by epoch: Epoch[val] [4][1000] # by iter: Iter[val] [1000] if self.by_epoch: log_str = f'Epoch({log_dict["mode"]}) ' \ f'[{log_dict["epoch"]}][{log_dict["iter"]}]\t' else: log_str = f'Iter({log_dict["mode"]}) [{log_dict["iter"]}]\t' log_items = [] for name, val in log_dict.items(): # TODO: resolve this hack # these items have been in log_str if name in [ 'mode', 'Epoch', 'iter', 'lr', 'layer_0_lr', 'time', 'data_time', 'memory', 'epoch' ]: continue if isinstance(val, float): val = f'{val:.4f}' log_items.append(f'{name}: {val}') log_str += ', '.join(log_items) runner.logger.info(log_str) def log(self, runner): if 'eval_iter_num' in runner.log_buffer.output: # this doesn't modify runner.iter and is regardless of by_epoch cur_iter = runner.log_buffer.output.pop('eval_iter_num') else: cur_iter = self.get_iter(runner, inner_iter=True) log_dict = OrderedDict(mode=self.get_mode(runner), epoch=self.get_epoch(runner), iter=cur_iter) # record lr and layer_0_lr cur_lr = runner.current_lr() if isinstance(cur_lr, list): log_dict['layer_0_lr'] = min(cur_lr) log_dict['lr'] = max(cur_lr) else: assert isinstance(cur_lr, dict) log_dict['lr'], log_dict['layer_0_lr'] = {}, {} for k, lr_ in cur_lr.items(): assert isinstance(lr_, list) log_dict['layer_0_lr'].update({k: min(lr_)}) log_dict['lr'].update({k: max(lr_)}) if 'time' in runner.log_buffer.output: # statistic memory if torch.cuda.is_available(): log_dict['memory'] = self._get_max_memory(runner) log_dict = dict(log_dict, **runner.log_buffer.output) self._log_info(log_dict, runner) self._dump_log(log_dict, runner) return log_dict
ViT-Adapter-main
detection/mmcv_custom/customized_text.py
# Copyright (c) OpenMMLab. All rights reserved. checkpoint_config = dict(interval=1) # yapf:disable log_config = dict( interval=50, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook') ]) # yapf:enable custom_hooks = [dict(type='NumClassCheckHook')] # evaluation = dict(save_best='auto') dist_params = dict(backend='nccl') log_level = 'INFO' load_from = None resume_from = None workflow = [('train', 1)]
ViT-Adapter-main
detection/configs/_base_/default_runtime.py
# Copyright (c) OpenMMLab. All rights reserved. # dataset settings dataset_type = 'WIDERFaceDataset' data_root = 'data/WIDERFace/' img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile', to_float32=True), dict(type='LoadAnnotations', with_bbox=True), dict(type='PhotoMetricDistortion', brightness_delta=32, contrast_range=(0.5, 1.5), saturation_range=(0.5, 1.5), hue_delta=18), dict(type='Expand', mean=img_norm_cfg['mean'], to_rgb=img_norm_cfg['to_rgb'], ratio_range=(1, 4)), dict(type='MinIoURandomCrop', min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3), dict(type='Resize', img_scale=(300, 300), keep_ratio=False), dict(type='Normalize', **img_norm_cfg), dict(type='RandomFlip', flip_ratio=0.5), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict(type='MultiScaleFlipAug', img_scale=(300, 300), flip=False, transforms=[ dict(type='Resize', keep_ratio=False), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict(samples_per_gpu=60, workers_per_gpu=2, train=dict(type='RepeatDataset', times=2, dataset=dict(type=dataset_type, ann_file=data_root + 'train.txt', img_prefix=data_root + 'WIDER_train/', min_size=17, pipeline=train_pipeline)), val=dict(type=dataset_type, ann_file=data_root + 'val.txt', img_prefix=data_root + 'WIDER_val/', pipeline=test_pipeline), test=dict(type=dataset_type, ann_file=data_root + 'val.txt', img_prefix=data_root + 'WIDER_val/', pipeline=test_pipeline))
ViT-Adapter-main
detection/configs/_base_/datasets/wider_face.py
# Copyright (c) OpenMMLab. All rights reserved. # dataset settings dataset_type = 'CityscapesDataset' data_root = 'data/cityscapes/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='Resize', img_scale=[(2048, 800), (2048, 1024)], keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict(type='MultiScaleFlipAug', img_scale=(2048, 1024), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=1, workers_per_gpu=2, train=dict( type='RepeatDataset', times=8, dataset=dict(type=dataset_type, ann_file=data_root + 'annotations/instancesonly_filtered_gtFine_train.json', img_prefix=data_root + 'leftImg8bit/train/', pipeline=train_pipeline)), val=dict(type=dataset_type, ann_file=data_root + 'annotations/instancesonly_filtered_gtFine_val.json', img_prefix=data_root + 'leftImg8bit/val/', pipeline=test_pipeline), test=dict(type=dataset_type, ann_file=data_root + 'annotations/instancesonly_filtered_gtFine_test.json', img_prefix=data_root + 'leftImg8bit/test/', pipeline=test_pipeline)) evaluation = dict(metric=['bbox', 'segm'])
ViT-Adapter-main
detection/configs/_base_/datasets/cityscapes_instance.py
# Copyright (c) OpenMMLab. All rights reserved. # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict(type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict(type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=train_pipeline), val=dict(type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline), test=dict(type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline)) evaluation = dict(interval=1, metric='bbox')
ViT-Adapter-main
detection/configs/_base_/datasets/coco_detection.py
# Copyright (c) OpenMMLab. All rights reserved. # dataset settings _base_ = 'coco_instance.py' dataset_type = 'LVISV05Dataset' data_root = 'data/lvis_v0.5/' data = dict(samples_per_gpu=2, workers_per_gpu=2, train=dict(_delete_=True, type='ClassBalancedDataset', oversample_thr=1e-3, dataset=dict(type=dataset_type, ann_file=data_root + 'annotations/lvis_v0.5_train.json', img_prefix=data_root + 'train2017/')), val=dict(type=dataset_type, ann_file=data_root + 'annotations/lvis_v0.5_val.json', img_prefix=data_root + 'val2017/'), test=dict(type=dataset_type, ann_file=data_root + 'annotations/lvis_v0.5_val.json', img_prefix=data_root + 'val2017/')) evaluation = dict(metric=['bbox', 'segm'])
ViT-Adapter-main
detection/configs/_base_/datasets/lvis_v0.5_instance.py
# Copyright (c) OpenMMLab. All rights reserved. # dataset settings _base_ = 'coco_instance.py' dataset_type = 'LVISV1Dataset' data_root = 'data/lvis_v1/' data = dict(samples_per_gpu=2, workers_per_gpu=2, train=dict(_delete_=True, type='ClassBalancedDataset', oversample_thr=1e-3, dataset=dict(type=dataset_type, ann_file=data_root + 'annotations/lvis_v1_train.json', img_prefix=data_root)), val=dict(type=dataset_type, ann_file=data_root + 'annotations/lvis_v1_val.json', img_prefix=data_root), test=dict(type=dataset_type, ann_file=data_root + 'annotations/lvis_v1_val.json', img_prefix=data_root)) evaluation = dict(metric=['bbox', 'segm'])
ViT-Adapter-main
detection/configs/_base_/datasets/lvis_v1_instance.py
# Copyright (c) OpenMMLab. All rights reserved. # dataset settings dataset_type = 'CityscapesDataset' data_root = 'data/cityscapes/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=[(2048, 800), (2048, 1024)], keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict(type='MultiScaleFlipAug', img_scale=(2048, 1024), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=1, workers_per_gpu=2, train=dict( type='RepeatDataset', times=8, dataset=dict(type=dataset_type, ann_file=data_root + 'annotations/instancesonly_filtered_gtFine_train.json', img_prefix=data_root + 'leftImg8bit/train/', pipeline=train_pipeline)), val=dict(type=dataset_type, ann_file=data_root + 'annotations/instancesonly_filtered_gtFine_val.json', img_prefix=data_root + 'leftImg8bit/val/', pipeline=test_pipeline), test=dict(type=dataset_type, ann_file=data_root + 'annotations/instancesonly_filtered_gtFine_test.json', img_prefix=data_root + 'leftImg8bit/test/', pipeline=test_pipeline)) evaluation = dict(interval=1, metric='bbox')
ViT-Adapter-main
detection/configs/_base_/datasets/cityscapes_detection.py
# Copyright (c) OpenMMLab. All rights reserved. # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict(type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict(type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=train_pipeline), val=dict(type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline), test=dict(type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline)) evaluation = dict(metric=['bbox', 'segm'])
ViT-Adapter-main
detection/configs/_base_/datasets/coco_instance.py
# Copyright (c) OpenMMLab. All rights reserved. # dataset settings dataset_type = 'CocoPanopticDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadPanopticAnnotations', with_bbox=True, with_mask=True, with_seg=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='SegRescale', scale_factor=1 / 4), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict(type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict(samples_per_gpu=2, workers_per_gpu=2, train=dict( type=dataset_type, ann_file=data_root + 'annotations/panoptic_train2017.json', img_prefix=data_root + 'train2017/', seg_prefix=data_root + 'annotations/panoptic_train2017/', pipeline=train_pipeline), val=dict(type=dataset_type, ann_file=data_root + 'annotations/panoptic_val2017.json', img_prefix=data_root + 'val2017/', seg_prefix=data_root + 'annotations/panoptic_val2017/', pipeline=test_pipeline), test=dict(type=dataset_type, ann_file=data_root + 'annotations/panoptic_val2017.json', img_prefix=data_root + 'val2017/', seg_prefix=data_root + 'annotations/panoptic_val2017/', pipeline=test_pipeline)) evaluation = dict(interval=1, metric=['PQ'])
ViT-Adapter-main
detection/configs/_base_/datasets/coco_panoptic.py
# dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline)) evaluation = dict(metric=['bbox', 'segm'])
ViT-Adapter-main
detection/configs/_base_/datasets/coco_instance_augreg.py
# dataset settings dataset_type = 'Objects365V2Dataset' data_root = 'data/Objects365/Obj365_v2/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( type=dataset_type, ann_file=data_root + 'annotations/zhiyuan_objv2_train.json', img_prefix=data_root + 'train/', pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=data_root + 'annotations/zhiyuan_objv2_val.json', img_prefix=data_root + 'val/', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'annotations/zhiyuan_objv2_val.json', img_prefix=data_root + 'val/', pipeline=test_pipeline)) evaluation = dict(interval=1, metric='bbox')
ViT-Adapter-main
detection/configs/_base_/datasets/obj365_detection.py
# Copyright (c) OpenMMLab. All rights reserved. # dataset settings dataset_type = 'VOCDataset' data_root = 'data/VOCdevkit/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1000, 600), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict(type='MultiScaleFlipAug', img_scale=(1000, 600), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict(type='RepeatDataset', times=3, dataset=dict( type=dataset_type, ann_file=[ data_root + 'VOC2007/ImageSets/Main/trainval.txt', data_root + 'VOC2012/ImageSets/Main/trainval.txt' ], img_prefix=[data_root + 'VOC2007/', data_root + 'VOC2012/'], pipeline=train_pipeline)), val=dict(type=dataset_type, ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt', img_prefix=data_root + 'VOC2007/', pipeline=test_pipeline), test=dict(type=dataset_type, ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt', img_prefix=data_root + 'VOC2007/', pipeline=test_pipeline)) evaluation = dict(interval=1, metric='mAP')
ViT-Adapter-main
detection/configs/_base_/datasets/voc0712.py
# Copyright (c) OpenMMLab. All rights reserved. # dataset settings dataset_type = 'DeepFashionDataset' data_root = 'data/DeepFashion/In-shop/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='Resize', img_scale=(750, 1101), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict(type='MultiScaleFlipAug', img_scale=(750, 1101), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict(imgs_per_gpu=2, workers_per_gpu=1, train=dict(type=dataset_type, ann_file=data_root + 'annotations/DeepFashion_segmentation_query.json', img_prefix=data_root + 'Img/', pipeline=train_pipeline, data_root=data_root), val=dict(type=dataset_type, ann_file=data_root + 'annotations/DeepFashion_segmentation_query.json', img_prefix=data_root + 'Img/', pipeline=test_pipeline, data_root=data_root), test=dict(type=dataset_type, ann_file=data_root + 'annotations/DeepFashion_segmentation_gallery.json', img_prefix=data_root + 'Img/', pipeline=test_pipeline, data_root=data_root)) evaluation = dict(interval=5, metric=['bbox', 'segm'])
ViT-Adapter-main
detection/configs/_base_/datasets/deepfashion.py
# model settings norm_cfg = dict(type='BN', requires_grad=False) model = dict( type='FasterRCNN', backbone=dict( type='ResNet', depth=50, num_stages=3, strides=(1, 2, 2), dilations=(1, 1, 1), out_indices=(2, ), frozen_stages=1, norm_cfg=norm_cfg, norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe')), rpn_head=dict( type='RPNHead', in_channels=1024, feat_channels=1024, anchor_generator=dict( type='AnchorGenerator', scales=[2, 4, 8, 16, 32], ratios=[0.5, 1.0, 2.0], strides=[16]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)), roi_head=dict( type='StandardRoIHead', shared_head=dict( type='ResLayer', depth=50, stage=3, stride=2, dilation=1, style='caffe', norm_cfg=norm_cfg, norm_eval=True), bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), out_channels=1024, featmap_strides=[16]), bbox_head=dict( type='BBoxHead', with_avg_pool=True, roi_feat_size=7, in_channels=2048, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0))), # model training and testing settings train_cfg=dict( rpn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, match_low_quality=True, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=0, pos_weight=-1, debug=False), rpn_proposal=dict( nms_pre=12000, max_per_img=2000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.5, match_low_quality=False, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), pos_weight=-1, debug=False)), test_cfg=dict( rpn=dict( nms_pre=6000, max_per_img=1000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100)))
ViT-Adapter-main
detection/configs/_base_/models/faster_rcnn_r50_caffe_c4.py
# model settings norm_cfg = dict(type='BN', requires_grad=False) model = dict( type='MaskRCNN', backbone=dict( type='ResNet', depth=50, num_stages=3, strides=(1, 2, 2), dilations=(1, 1, 1), out_indices=(2, ), frozen_stages=1, norm_cfg=norm_cfg, norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe')), rpn_head=dict( type='RPNHead', in_channels=1024, feat_channels=1024, anchor_generator=dict( type='AnchorGenerator', scales=[2, 4, 8, 16, 32], ratios=[0.5, 1.0, 2.0], strides=[16]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)), roi_head=dict( type='StandardRoIHead', shared_head=dict( type='ResLayer', depth=50, stage=3, stride=2, dilation=1, style='caffe', norm_cfg=norm_cfg, norm_eval=True), bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), out_channels=1024, featmap_strides=[16]), bbox_head=dict( type='BBoxHead', with_avg_pool=True, roi_feat_size=7, in_channels=2048, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)), mask_roi_extractor=None, mask_head=dict( type='FCNMaskHead', num_convs=0, in_channels=2048, conv_out_channels=256, num_classes=80, loss_mask=dict( type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), # model training and testing settings train_cfg=dict( rpn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, match_low_quality=True, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=0, pos_weight=-1, debug=False), rpn_proposal=dict( nms_pre=12000, max_per_img=2000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.5, match_low_quality=False, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), mask_size=14, pos_weight=-1, debug=False)), test_cfg=dict( rpn=dict( nms_pre=6000, nms=dict(type='nms', iou_threshold=0.7), max_per_img=1000, min_bbox_size=0), rcnn=dict( score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100, mask_thr_binary=0.5)))
ViT-Adapter-main
detection/configs/_base_/models/mask_rcnn_r50_caffe_c4.py
# model settings input_size = 300 model = dict( type='SingleStageDetector', backbone=dict( type='SSDVGG', depth=16, with_last_pool=False, ceil_mode=True, out_indices=(3, 4), out_feature_indices=(22, 34), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://vgg16_caffe')), neck=dict( type='SSDNeck', in_channels=(512, 1024), out_channels=(512, 1024, 512, 256, 256, 256), level_strides=(2, 2, 1, 1), level_paddings=(1, 1, 0, 0), l2_norm_scale=20), bbox_head=dict( type='SSDHead', in_channels=(512, 1024, 512, 256, 256, 256), num_classes=80, anchor_generator=dict( type='SSDAnchorGenerator', scale_major=False, input_size=input_size, basesize_ratio_range=(0.15, 0.9), strides=[8, 16, 32, 64, 100, 300], ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.1, 0.1, 0.2, 0.2])), # model training and testing settings train_cfg=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0., ignore_iof_thr=-1, gt_max_assign_all=False), smoothl1_beta=1., allowed_border=-1, pos_weight=-1, neg_pos_ratio=3, debug=False), test_cfg=dict( nms_pre=1000, nms=dict(type='nms', iou_threshold=0.45), min_bbox_size=0, score_thr=0.02, max_per_img=200)) cudnn_benchmark = True
ViT-Adapter-main
detection/configs/_base_/models/ssd300.py
# model settings model = dict( type='CascadeRCNN', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5), rpn_head=dict( type='RPNHead', in_channels=256, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', scales=[8], ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), roi_head=dict( type='CascadeRoIHead', num_stages=3, stage_loss_weights=[1, 0.5, 0.25], bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), out_channels=256, featmap_strides=[4, 8, 16, 32]), bbox_head=[ dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.05, 0.05, 0.1, 0.1]), reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.033, 0.033, 0.067, 0.067]), reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) ]), # model training and testing settings train_cfg=dict( rpn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, match_low_quality=True, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=0, pos_weight=-1, debug=False), rpn_proposal=dict( nms_pre=2000, max_per_img=2000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=[ dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.5, match_low_quality=False, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), pos_weight=-1, debug=False), dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6, match_low_quality=False, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), pos_weight=-1, debug=False), dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.7, min_pos_iou=0.7, match_low_quality=False, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), pos_weight=-1, debug=False) ]), test_cfg=dict( rpn=dict( nms_pre=1000, max_per_img=1000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100)))
ViT-Adapter-main
detection/configs/_base_/models/cascade_rcnn_r50_fpn.py
# model settings model = dict( type='FastRCNN', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5), roi_head=dict( type='StandardRoIHead', bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), out_channels=256, featmap_strides=[4, 8, 16, 32]), bbox_head=dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0))), # model training and testing settings train_cfg=dict( rcnn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.5, match_low_quality=False, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), pos_weight=-1, debug=False)), test_cfg=dict( rcnn=dict( score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100)))
ViT-Adapter-main
detection/configs/_base_/models/fast_rcnn_r50_fpn.py
# model settings model = dict( type='RPN', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5), rpn_head=dict( type='RPNHead', in_channels=256, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', scales=[8], ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)), # model training and testing settings train_cfg=dict( rpn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=0, pos_weight=-1, debug=False)), test_cfg=dict( rpn=dict( nms_pre=2000, max_per_img=1000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0)))
ViT-Adapter-main
detection/configs/_base_/models/rpn_r50_fpn.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # model settings model = dict( type='MaskRCNN', pretrained=None, backbone=dict( type='ConvNeXt', in_chans=3, depths=[3, 3, 9, 3], dims=[96, 192, 384, 768], drop_path_rate=0.2, layer_scale_init_value=1e-6, out_indices=[0, 1, 2, 3], ), neck=dict( type='FPN', in_channels=[128, 256, 512, 1024], out_channels=256, num_outs=5), rpn_head=dict( type='RPNHead', in_channels=256, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', scales=[8], ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)), roi_head=dict( type='StandardRoIHead', bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), out_channels=256, featmap_strides=[4, 8, 16, 32]), bbox_head=dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)), mask_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), out_channels=256, featmap_strides=[4, 8, 16, 32]), mask_head=dict( type='FCNMaskHead', num_convs=4, in_channels=256, conv_out_channels=256, num_classes=80, loss_mask=dict( type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), # model training and testing settings train_cfg=dict( rpn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, match_low_quality=True, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=-1, pos_weight=-1, debug=False), rpn_proposal=dict( nms_pre=2000, max_per_img=1000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.5, match_low_quality=True, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), mask_size=28, pos_weight=-1, debug=False)), test_cfg=dict( rpn=dict( nms_pre=1000, max_per_img=1000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100, mask_thr_binary=0.5)))
ViT-Adapter-main
detection/configs/_base_/models/mask_rcnn_convnext_fpn.py
# model settings model = dict( type='RetinaNet', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs='on_input', num_outs=5), bbox_head=dict( type='RetinaHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', octave_base_scale=4, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)), # model training and testing settings train_cfg=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.4, min_pos_iou=0, ignore_iof_thr=-1), allowed_border=-1, pos_weight=-1, debug=False), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100))
ViT-Adapter-main
detection/configs/_base_/models/retinanet_r50_fpn.py
# model settings model = dict( type='FasterRCNN', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5), rpn_head=dict( type='RPNHead', in_channels=256, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', scales=[8], ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)), roi_head=dict( type='StandardRoIHead', bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), out_channels=256, featmap_strides=[4, 8, 16, 32]), bbox_head=dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0))), # model training and testing settings train_cfg=dict( rpn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, match_low_quality=True, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=-1, pos_weight=-1, debug=False), rpn_proposal=dict( nms_pre=2000, max_per_img=1000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.5, match_low_quality=False, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), pos_weight=-1, debug=False)), test_cfg=dict( rpn=dict( nms_pre=1000, max_per_img=1000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100) # soft-nms is also supported for rcnn testing # e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05) ))
ViT-Adapter-main
detection/configs/_base_/models/faster_rcnn_r50_fpn.py
# model settings model = dict( type='RPN', backbone=dict( type='ResNet', depth=50, num_stages=3, strides=(1, 2, 2), dilations=(1, 1, 1), out_indices=(2, ), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe')), neck=None, rpn_head=dict( type='RPNHead', in_channels=1024, feat_channels=1024, anchor_generator=dict( type='AnchorGenerator', scales=[2, 4, 8, 16, 32], ratios=[0.5, 1.0, 2.0], strides=[16]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)), # model training and testing settings train_cfg=dict( rpn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=0, pos_weight=-1, debug=False)), test_cfg=dict( rpn=dict( nms_pre=12000, max_per_img=2000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0)))
ViT-Adapter-main
detection/configs/_base_/models/rpn_r50_caffe_c4.py
# model settings norm_cfg = dict(type='BN', requires_grad=False) model = dict( type='FasterRCNN', backbone=dict( type='ResNet', depth=50, num_stages=4, strides=(1, 2, 2, 1), dilations=(1, 1, 1, 2), out_indices=(3, ), frozen_stages=1, norm_cfg=norm_cfg, norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe')), rpn_head=dict( type='RPNHead', in_channels=2048, feat_channels=2048, anchor_generator=dict( type='AnchorGenerator', scales=[2, 4, 8, 16, 32], ratios=[0.5, 1.0, 2.0], strides=[16]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)), roi_head=dict( type='StandardRoIHead', bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), out_channels=2048, featmap_strides=[16]), bbox_head=dict( type='Shared2FCBBoxHead', in_channels=2048, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0))), # model training and testing settings train_cfg=dict( rpn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, match_low_quality=True, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=0, pos_weight=-1, debug=False), rpn_proposal=dict( nms_pre=12000, max_per_img=2000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.5, match_low_quality=False, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), pos_weight=-1, debug=False)), test_cfg=dict( rpn=dict( nms=dict(type='nms', iou_threshold=0.7), nms_pre=6000, max_per_img=1000, min_bbox_size=0), rcnn=dict( score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100)))
ViT-Adapter-main
detection/configs/_base_/models/faster_rcnn_r50_caffe_dc5.py
# model settings model = dict( type='CascadeRCNN', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5), rpn_head=dict( type='RPNHead', in_channels=256, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', scales=[8], ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), roi_head=dict( type='CascadeRoIHead', num_stages=3, stage_loss_weights=[1, 0.5, 0.25], bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), out_channels=256, featmap_strides=[4, 8, 16, 32]), bbox_head=[ dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.05, 0.05, 0.1, 0.1]), reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.033, 0.033, 0.067, 0.067]), reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) ], mask_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), out_channels=256, featmap_strides=[4, 8, 16, 32]), mask_head=dict( type='FCNMaskHead', num_convs=4, in_channels=256, conv_out_channels=256, num_classes=80, loss_mask=dict( type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), # model training and testing settings train_cfg=dict( rpn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, match_low_quality=True, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=0, pos_weight=-1, debug=False), rpn_proposal=dict( nms_pre=2000, max_per_img=2000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=[ dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.5, match_low_quality=False, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), mask_size=28, pos_weight=-1, debug=False), dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6, match_low_quality=False, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), mask_size=28, pos_weight=-1, debug=False), dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.7, min_pos_iou=0.7, match_low_quality=False, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), mask_size=28, pos_weight=-1, debug=False) ]), test_cfg=dict( rpn=dict( nms_pre=1000, max_per_img=1000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100, mask_thr_binary=0.5)))
ViT-Adapter-main
detection/configs/_base_/models/cascade_mask_rcnn_r50_fpn.py
# model settings model = dict( type='MaskRCNN', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5), rpn_head=dict( type='RPNHead', in_channels=256, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', scales=[8], ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)), roi_head=dict( type='StandardRoIHead', bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), out_channels=256, featmap_strides=[4, 8, 16, 32]), bbox_head=dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)), mask_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), out_channels=256, featmap_strides=[4, 8, 16, 32]), mask_head=dict( type='FCNMaskHead', num_convs=4, in_channels=256, conv_out_channels=256, num_classes=80, loss_mask=dict( type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), # model training and testing settings train_cfg=dict( rpn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, match_low_quality=True, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=-1, pos_weight=-1, debug=False), rpn_proposal=dict( nms_pre=2000, max_per_img=1000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.5, match_low_quality=True, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), mask_size=28, pos_weight=-1, debug=False)), test_cfg=dict( rpn=dict( nms_pre=1000, max_per_img=1000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100, mask_thr_binary=0.5)))
ViT-Adapter-main
detection/configs/_base_/models/mask_rcnn_r50_fpn.py
# optimizer optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=None) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.001, step=[8, 11]) runner = dict(type='EpochBasedRunner', max_epochs=12)
ViT-Adapter-main
detection/configs/_base_/schedules/schedule_1x.py
# optimizer optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=None) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.001, step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24)
ViT-Adapter-main
detection/configs/_base_/schedules/schedule_2x.py
# optimizer optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=None) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.001, step=[27, 33]) runner = dict(type='EpochBasedRunner', max_epochs=36)
ViT-Adapter-main
detection/configs/_base_/schedules/schedule_3x.py
# optimizer optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=None) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=2000, warmup_ratio=0.001, step=[62, 68]) runner = dict(type='EpochBasedRunner', max_epochs=72)
ViT-Adapter-main
detection/configs/_base_/schedules/schedule_6x.py
# optimizer optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=None) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.001, step=[16, 19]) runner = dict(type='EpochBasedRunner', max_epochs=20)
ViT-Adapter-main
detection/configs/_base_/schedules/schedule_20e.py
# Copyright (c) Shanghai AI Lab. All rights reserved. _base_ = [ '../_base_/models/cascade_mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_3x.py', '../_base_/default_runtime.py' ] # pretrained = 'https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth' pretrained = 'pretrained/deit_small_patch16_224-cd65a155.pth' model = dict( backbone=dict( _delete_=True, type='ViTAdapter', patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, drop_path_rate=0.2, conv_inplane=64, n_points=4, deform_num_heads=6, cffn_ratio=0.25, deform_ratio=1.0, interaction_indexes=[[0, 2], [3, 5], [6, 8], [9, 11]], window_attn=[True, True, False, True, True, False, True, True, False, True, True, False], window_size=[14, 14, None, 14, 14, None, 14, 14, None, 14, 14, None], pretrained=pretrained), neck=dict( type='FPN', in_channels=[384, 384, 384, 384], out_channels=256, num_outs=5), roi_head=dict( bbox_head=[ dict( type='ConvFCBBoxHead', num_shared_convs=4, num_shared_fcs=1, in_channels=256, conv_out_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, reg_decoded_bbox=True, norm_cfg=dict(type='SyncBN', requires_grad=True), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=10.0)), dict( type='ConvFCBBoxHead', num_shared_convs=4, num_shared_fcs=1, in_channels=256, conv_out_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.05, 0.05, 0.1, 0.1]), reg_class_agnostic=False, reg_decoded_bbox=True, norm_cfg=dict(type='SyncBN', requires_grad=True), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=10.0)), dict( type='ConvFCBBoxHead', num_shared_convs=4, num_shared_fcs=1, in_channels=256, conv_out_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.033, 0.033, 0.067, 0.067]), reg_class_agnostic=False, reg_decoded_bbox=True, norm_cfg=dict(type='SyncBN', requires_grad=True), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=10.0)) ])) # optimizer img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) # augmentation strategy originates from DETR / Sparse RCNN train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='AutoAugment', policies=[ [ dict(type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', keep_ratio=True) ], [ dict(type='Resize', img_scale=[(400, 1333), (500, 1333), (600, 1333)], multiscale_mode='value', keep_ratio=True), dict(type='RandomCrop', crop_type='absolute_range', crop_size=(384, 600), allow_negative_crop=True), dict(type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', override=True, keep_ratio=True) ] ]), dict(type='RandomCrop', crop_type='absolute_range', crop_size=(1024, 1024), allow_negative_crop=True), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] data = dict(train=dict(pipeline=train_pipeline)) optimizer = dict( _delete_=True, type='AdamW', lr=0.0001, weight_decay=0.05, paramwise_cfg=dict( custom_keys={ 'level_embed': dict(decay_mult=0.), 'pos_embed': dict(decay_mult=0.), 'norm': dict(decay_mult=0.), 'bias': dict(decay_mult=0.) })) optimizer_config = dict(grad_clip=None) fp16 = dict(loss_scale=dict(init_scale=512)) checkpoint_config = dict( interval=1, max_keep_ckpts=3, save_last=True, )
ViT-Adapter-main
detection/configs/cascade_rcnn/cascade_mask_rcnn_deit_adapter_small_fpn_3x_coco.py
# Copyright (c) Shanghai AI Lab. All rights reserved. _base_ = [ '../_base_/models/cascade_mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_3x.py', '../_base_/default_runtime.py' ] # pretrained = 'https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth' pretrained = 'pretrained/deit_base_patch16_224-b5f2ef4d.pth' model = dict( backbone=dict( _delete_=True, type='ViTAdapter', patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, drop_path_rate=0.3, conv_inplane=64, n_points=4, deform_num_heads=12, cffn_ratio=0.25, deform_ratio=0.5, interaction_indexes=[[0, 2], [3, 5], [6, 8], [9, 11]], window_attn=[True, True, False, True, True, False, True, True, False, True, True, False], window_size=[14, 14, None, 14, 14, None, 14, 14, None, 14, 14, None], pretrained=pretrained), neck=dict( type='FPN', in_channels=[768, 768, 768, 768], out_channels=256, num_outs=5), roi_head=dict( bbox_head=[ dict( type='ConvFCBBoxHead', num_shared_convs=4, num_shared_fcs=1, in_channels=256, conv_out_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, reg_decoded_bbox=True, norm_cfg=dict(type='SyncBN', requires_grad=True), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=10.0)), dict( type='ConvFCBBoxHead', num_shared_convs=4, num_shared_fcs=1, in_channels=256, conv_out_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.05, 0.05, 0.1, 0.1]), reg_class_agnostic=False, reg_decoded_bbox=True, norm_cfg=dict(type='SyncBN', requires_grad=True), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=10.0)), dict( type='ConvFCBBoxHead', num_shared_convs=4, num_shared_fcs=1, in_channels=256, conv_out_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.033, 0.033, 0.067, 0.067]), reg_class_agnostic=False, reg_decoded_bbox=True, norm_cfg=dict(type='SyncBN', requires_grad=True), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=10.0)) ])) # optimizer img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) # augmentation strategy originates from DETR / Sparse RCNN train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='AutoAugment', policies=[ [ dict(type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', keep_ratio=True) ], [ dict(type='Resize', img_scale=[(400, 1333), (500, 1333), (600, 1333)], multiscale_mode='value', keep_ratio=True), dict(type='RandomCrop', crop_type='absolute_range', crop_size=(384, 600), allow_negative_crop=True), dict(type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', override=True, keep_ratio=True) ] ]), dict(type='RandomCrop', crop_type='absolute_range', crop_size=(1024, 1024), allow_negative_crop=True), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] data = dict(train=dict(pipeline=train_pipeline)) optimizer = dict( _delete_=True, type='AdamW', lr=0.0001, weight_decay=0.05, paramwise_cfg=dict( custom_keys={ 'level_embed': dict(decay_mult=0.), 'pos_embed': dict(decay_mult=0.), 'norm': dict(decay_mult=0.), 'bias': dict(decay_mult=0.) })) optimizer_config = dict(grad_clip=None) fp16 = dict(loss_scale=dict(init_scale=512)) checkpoint_config = dict( interval=1, max_keep_ckpts=3, save_last=True, )
ViT-Adapter-main
detection/configs/cascade_rcnn/cascade_mask_rcnn_deit_adapter_base_fpn_3x_coco.py
# Copyright (c) Shanghai AI Lab. All rights reserved. _base_ = [ '../_base_/models/cascade_mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_3x.py', '../_base_/default_runtime.py' ] # pretrained = 'https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth' pretrained = 'pretrained/deit_base_patch16_224-b5f2ef4d.pth' model = dict( backbone=dict( _delete_=True, type='ViTBaseline', patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, drop_path_rate=0.2, out_indices=[2, 5, 8, 11], window_attn=[True, True, False, True, True, False, True, True, False, True, True, False], window_size=[14, 14, None, 14, 14, None, 14, 14, None, 14, 14, None], pretrained=pretrained), neck=dict( type='FPN', in_channels=[768, 768, 768, 768], out_channels=256, num_outs=5), roi_head=dict( bbox_head=[ dict( type='ConvFCBBoxHead', num_shared_convs=4, num_shared_fcs=1, in_channels=256, conv_out_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, reg_decoded_bbox=True, norm_cfg=dict(type='SyncBN', requires_grad=True), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=10.0)), dict( type='ConvFCBBoxHead', num_shared_convs=4, num_shared_fcs=1, in_channels=256, conv_out_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.05, 0.05, 0.1, 0.1]), reg_class_agnostic=False, reg_decoded_bbox=True, norm_cfg=dict(type='SyncBN', requires_grad=True), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=10.0)), dict( type='ConvFCBBoxHead', num_shared_convs=4, num_shared_fcs=1, in_channels=256, conv_out_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.033, 0.033, 0.067, 0.067]), reg_class_agnostic=False, reg_decoded_bbox=True, norm_cfg=dict(type='SyncBN', requires_grad=True), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=10.0)) ])) # optimizer img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) # augmentation strategy originates from DETR / Sparse RCNN train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='AutoAugment', policies=[ [ dict(type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', keep_ratio=True) ], [ dict(type='Resize', img_scale=[(400, 1333), (500, 1333), (600, 1333)], multiscale_mode='value', keep_ratio=True), dict(type='RandomCrop', crop_type='absolute_range', crop_size=(384, 600), allow_negative_crop=True), dict(type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', override=True, keep_ratio=True) ] ]), dict(type='RandomCrop', crop_type='absolute_range', crop_size=(1024, 1024), allow_negative_crop=True), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] data = dict( samples_per_gpu=1, train=dict(pipeline=train_pipeline)) optimizer = dict( _delete_=True, type='AdamW', lr=0.0001, weight_decay=0.05, paramwise_cfg=dict( custom_keys={ 'level_embed': dict(decay_mult=0.), 'pos_embed': dict(decay_mult=0.), 'norm': dict(decay_mult=0.), 'bias': dict(decay_mult=0.) })) optimizer_config = dict(grad_clip=None) fp16 = dict(loss_scale=dict(init_scale=512)) checkpoint_config = dict( interval=1, max_keep_ckpts=3, save_last=True, )
ViT-Adapter-main
detection/configs/cascade_rcnn/cascade_mask_rcnn_deit_base_fpn_3x_coco.py
_base_ = [ '../_base_/datasets/coco_panoptic.py', '../_base_/default_runtime.py' ] num_things_classes = 80 num_stuff_classes = 53 num_classes = num_things_classes + num_stuff_classes # pretrained = 'https://conversationhub.blob.core.windows.net/beit-share-public/beitv2/beitv2_large_patch16_224_pt1k_ft21k.pth' pretrained = 'pretrained/beitv2_large_patch16_224_pt1k_ft21k.pth' model = dict( type='Mask2Former', backbone=dict( type='BEiTAdapter', img_size=224, patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-6, drop_path_rate=0.4, conv_inplane=64, n_points=4, deform_num_heads=16, cffn_ratio=0.25, deform_ratio=0.5, window_attn=[True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True], window_size=[14, 14, 14, 14, 14, 56, 14, 14, 14, 14, 14, 56, 14, 14, 14, 14, 14, 56, 14, 14, 14, 14, 14, 56], interaction_indexes=[[0, 5], [6, 11], [12, 17], [18, 23]], pretrained=pretrained), panoptic_head=dict( type='Mask2FormerHead', in_channels=[1024, 1024, 1024, 1024], # pass to pixel_decoder inside strides=[4, 8, 16, 32], feat_channels=256, out_channels=256, num_things_classes=num_things_classes, num_stuff_classes=num_stuff_classes, num_queries=100, num_transformer_feat_level=3, pixel_decoder=dict( type='MSDeformAttnPixelDecoder', num_outs=3, norm_cfg=dict(type='GN', num_groups=32), act_cfg=dict(type='ReLU'), encoder=dict( type='DetrTransformerEncoder', num_layers=6, transformerlayers=dict( type='BaseTransformerLayer', attn_cfgs=dict( type='MultiScaleDeformableAttention', embed_dims=256, num_heads=8, num_levels=3, num_points=4, im2col_step=64, dropout=0.0, batch_first=False, norm_cfg=None, init_cfg=None), ffn_cfgs=dict( type='FFN', embed_dims=256, feedforward_channels=1024, num_fcs=2, ffn_drop=0.0, act_cfg=dict(type='ReLU', inplace=True)), operation_order=('self_attn', 'norm', 'ffn', 'norm')), init_cfg=None), positional_encoding=dict( type='SinePositionalEncoding', num_feats=128, normalize=True), init_cfg=None), enforce_decoder_input_project=False, positional_encoding=dict( type='SinePositionalEncoding', num_feats=128, normalize=True), transformer_decoder=dict( type='DetrTransformerDecoder', return_intermediate=True, num_layers=9, transformerlayers=dict( type='DetrTransformerDecoderLayer', attn_cfgs=dict( type='MultiheadAttention', embed_dims=256, num_heads=8, attn_drop=0.0, proj_drop=0.0, dropout_layer=None, batch_first=False), ffn_cfgs=dict( embed_dims=256, feedforward_channels=2048, num_fcs=2, act_cfg=dict(type='ReLU', inplace=True), ffn_drop=0.0, dropout_layer=None, add_identity=True), feedforward_channels=2048, operation_order=('cross_attn', 'norm', 'self_attn', 'norm', 'ffn', 'norm')), init_cfg=None), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=2.0, reduction='mean', class_weight=[1.0] * num_classes + [0.1]), loss_mask=dict( type='CrossEntropyLoss', use_sigmoid=True, reduction='mean', loss_weight=5.0), loss_dice=dict( type='DiceLoss', use_sigmoid=True, activate=True, reduction='mean', naive_dice=True, eps=1.0, loss_weight=5.0)), panoptic_fusion_head=dict( type='MaskFormerFusionHead', num_things_classes=num_things_classes, num_stuff_classes=num_stuff_classes, loss_panoptic=None, init_cfg=None), train_cfg=dict( num_points=12544, oversample_ratio=3.0, importance_sample_ratio=0.75, assigner=dict( type='MaskHungarianAssigner', cls_cost=dict(type='ClassificationCost', weight=2.0), mask_cost=dict( type='CrossEntropyLossCost', weight=5.0, use_sigmoid=True), dice_cost=dict( type='DiceCost', weight=5.0, pred_act=True, eps=1.0)), sampler=dict(type='MaskPseudoSampler')), test_cfg=dict( panoptic_on=True, # For now, the dataset does not support # evaluating semantic segmentation metric. semantic_on=False, instance_on=True, # max_per_image is for instance segmentation. max_per_image=100, iou_thr=0.8, # In Mask2Former's panoptic postprocessing, # it will filter mask area where score is less than 0.5 . filter_low_score=True), init_cfg=None) # dataset settings img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile', to_float32=True), dict( type='LoadPanopticAnnotations', with_bbox=True, with_mask=True, with_seg=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='AutoAugment', policies=[ [ dict(type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', keep_ratio=True) ], [ dict(type='Resize', img_scale=[(400, 1333), (500, 1333), (600, 1333)], multiscale_mode='value', keep_ratio=True), dict(type='RandomCrop', crop_type='absolute_range', crop_size=(384, 600), allow_negative_crop=True), dict(type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', override=True, keep_ratio=True) ] ]), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle', img_to_float=True), dict( type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data_root = 'data/coco/' data = dict( samples_per_gpu=1, workers_per_gpu=2, train=dict(pipeline=train_pipeline), val=dict( pipeline=test_pipeline, ins_ann_file=data_root + 'annotations/instances_val2017.json', ), test=dict( pipeline=test_pipeline, ins_ann_file=data_root + 'annotations/instances_val2017.json', )) # optimizer optimizer = dict(type='AdamW', lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05, constructor='LayerDecayOptimizerConstructor', paramwise_cfg=dict(num_layers=24, layer_decay_rate=0.90)) optimizer_config = dict(grad_clip=dict(max_norm=0.01, norm_type=2)) # learning policy lr_config = dict( policy='step', gamma=0.1, by_epoch=False, step=[191750, 236000], warmup='linear', warmup_by_epoch=False, warmup_ratio=1.0, # no warmup warmup_iters=10) max_iters = 265500 runner = dict(type='IterBasedRunner', max_iters=max_iters) log_config = dict( interval=50, hooks=[ dict(type='TextLoggerHook', by_epoch=False), ]) interval = 5000 workflow = [('train', interval)] checkpoint_config = dict( by_epoch=False, interval=interval, save_last=True, max_keep_ckpts=3) dynamic_intervals = [(max_iters // interval * interval + 1, max_iters)] evaluation = dict( interval=interval, dynamic_intervals=dynamic_intervals, metric=['PQ', 'bbox', 'segm']) custom_hooks = [ dict( type='ExpMomentumEMAHook', resume_from=None, momentum=0.0001, priority=49) ]
ViT-Adapter-main
detection/configs/mask2former/mask2former_beitv2_adapter_large_16x1_3x_coco-panoptic.py
# Copyright (c) Shanghai AI Lab. All rights reserved. _base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # pretrained = 'https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth' pretrained = 'pretrained/deit_tiny_patch16_224-a1311bcf.pth' model = dict( backbone=dict( _delete_=True, type='ViTAdapter', patch_size=16, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4, drop_path_rate=0.1, conv_inplane=64, n_points=4, deform_num_heads=6, cffn_ratio=0.25, deform_ratio=1.0, layer_scale=False, interaction_indexes=[[0, 2], [3, 5], [6, 8], [9, 11]], window_attn=[True, True, False, True, True, False, True, True, False, True, True, False], window_size=[14, 14, None, 14, 14, None, 14, 14, None, 14, 14, None], pretrained=pretrained), neck=dict( type='FPN', in_channels=[192, 192, 192, 192], out_channels=256, num_outs=5)) data = dict( samples_per_gpu=2, workers_per_gpu=2) optimizer = dict( _delete_=True, type='AdamW', lr=0.0002, weight_decay=0.01, paramwise_cfg=dict( custom_keys={ 'level_embed': dict(decay_mult=0.), 'pos_embed': dict(decay_mult=0.), 'norm': dict(decay_mult=0.), 'bias': dict(decay_mult=0.) })) optimizer_config = dict(grad_clip=None) evaluation = dict(save_best='auto') # fp16 = dict(loss_scale=dict(init_scale=512)) checkpoint_config = dict( interval=1, max_keep_ckpts=3, save_last=True, )
ViT-Adapter-main
detection/configs/mask_rcnn/mask_rcnn_deit_adapter_tiny_fpn_1x_coco.py
# Copyright (c) Shanghai AI Lab. All rights reserved. _base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_3x.py', '../_base_/default_runtime.py' ] # pretrained = 'https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth' pretrained = 'pretrained/deit_base_patch16_224-b5f2ef4d.pth' model = dict( backbone=dict( _delete_=True, type='ViTBaseline', patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, drop_path_rate=0.1, out_indices=[2, 5, 8, 11], window_attn=[True, True, False, True, True, False, True, True, False, True, True, False], window_size=[14, 14, None, 14, 14, None, 14, 14, None, 14, 14, None], pretrained=pretrained), neck=dict( type='FPN', in_channels=[768, 768, 768, 768], out_channels=256, num_outs=5)) # optimizer img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) # augmentation strategy originates from DETR / Sparse RCNN train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='AutoAugment', policies=[ [ dict(type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', keep_ratio=True) ], [ dict(type='Resize', img_scale=[(400, 1333), (500, 1333), (600, 1333)], multiscale_mode='value', keep_ratio=True), dict(type='RandomCrop', crop_type='absolute_range', crop_size=(384, 600), allow_negative_crop=True), dict(type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', override=True, keep_ratio=True) ] ]), dict(type='RandomCrop', crop_type='absolute_range', crop_size=(1024, 1024), allow_negative_crop=True), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] data = dict(train=dict(pipeline=train_pipeline)) optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, weight_decay=0.05, paramwise_cfg=dict( custom_keys={ 'level_embed': dict(decay_mult=0.), 'pos_embed': dict(decay_mult=0.), 'norm': dict(decay_mult=0.), 'bias': dict(decay_mult=0.) })) optimizer_config = dict(grad_clip=None) fp16 = dict(loss_scale=dict(init_scale=512)) checkpoint_config = dict( interval=1, max_keep_ckpts=3, save_last=True, )
ViT-Adapter-main
detection/configs/mask_rcnn/mask_rcnn_deit_base_fpn_3x_coco.py
# Copyright (c) Shanghai AI Lab. All rights reserved. _base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_3x.py', '../_base_/default_runtime.py' ] # pretrained = 'https://github.com/czczup/ViT-Adapter/releases/download/v0.3.1/' \ # 'uni-perceiver-base-L12-H768-224size-torch-pretrained_converted.pth' pretrained = 'pretrained/uni-perceiver-base-L12-H768-224size-torch-pretrained_converted.pth' model = dict( backbone=dict( _delete_=True, type='UniPerceiverAdapter', patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, drop_path_rate=0.2, conv_inplane=64, n_points=4, deform_num_heads=12, cffn_ratio=0.25, deform_ratio=0.5, interaction_indexes=[[0, 2], [3, 5], [6, 8], [9, 11]], window_attn=[True, True, False, True, True, False, True, True, False, True, True, False], window_size=[14, 14, None, 14, 14, None, 14, 14, None, 14, 14, None], pretrained=pretrained), neck=dict( type='FPN', in_channels=[768, 768, 768, 768], out_channels=256, num_outs=5)) # optimizer img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) # augmentation strategy originates from DETR / Sparse RCNN train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='AutoAugment', policies=[ [ dict(type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', keep_ratio=True) ], [ dict(type='Resize', img_scale=[(400, 1333), (500, 1333), (600, 1333)], multiscale_mode='value', keep_ratio=True), dict(type='RandomCrop', crop_type='absolute_range', crop_size=(384, 600), allow_negative_crop=True), dict(type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', override=True, keep_ratio=True) ] ]), dict(type='RandomCrop', crop_type='absolute_range', crop_size=(1024, 1024), allow_negative_crop=True), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] data = dict(train=dict(pipeline=train_pipeline)) optimizer = dict( _delete_=True, type='AdamW', lr=0.0001, weight_decay=0.05, constructor='LayerDecayOptimizerConstructor', paramwise_cfg=dict(num_layers=12, layer_decay_rate=0.65)) optimizer_config = dict(grad_clip=None) evaluation = dict(save_best='auto') fp16 = dict(loss_scale=dict(init_scale=512)) checkpoint_config = dict( interval=1, max_keep_ckpts=3, save_last=True, )
ViT-Adapter-main
detection/configs/mask_rcnn/mask_rcnn_uniperceiver_adapter_base_fpn_3x_coco.py
# Copyright (c) Shanghai AI Lab. All rights reserved. _base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_3x.py', '../_base_/default_runtime.py' ] # pretrained = 'https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth' pretrained = 'pretrained/deit_small_patch16_224-cd65a155.pth' model = dict( backbone=dict( _delete_=True, type='ViTAdapter', patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, drop_path_rate=0.2, conv_inplane=64, n_points=4, deform_num_heads=6, cffn_ratio=0.25, deform_ratio=1.0, interaction_indexes=[[0, 2], [3, 5], [6, 8], [9, 11]], window_attn=[True, True, False, True, True, False, True, True, False, True, True, False], window_size=[14, 14, None, 14, 14, None, 14, 14, None, 14, 14, None], pretrained=pretrained), neck=dict( type='ChannelMapperWithPooling', in_channels=[384, 384, 384, 384], out_channels=256, num_outs=5)) # optimizer img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) # augmentation strategy originates from DETR / Sparse RCNN train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='AutoAugment', policies=[ [ dict(type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', keep_ratio=True) ], [ dict(type='Resize', img_scale=[(400, 1333), (500, 1333), (600, 1333)], multiscale_mode='value', keep_ratio=True), dict(type='RandomCrop', crop_type='absolute_range', crop_size=(384, 600), allow_negative_crop=True), dict(type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', override=True, keep_ratio=True) ] ]), dict(type='RandomCrop', crop_type='absolute_range', crop_size=(1024, 1024), allow_negative_crop=True), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] data = dict(train=dict(pipeline=train_pipeline)) optimizer = dict( _delete_=True, type='AdamW', lr=0.0001, weight_decay=0.05, paramwise_cfg=dict( custom_keys={ 'level_embed': dict(decay_mult=0.), 'pos_embed': dict(decay_mult=0.), 'norm': dict(decay_mult=0.), 'bias': dict(decay_mult=0.) })) optimizer_config = dict(grad_clip=None) fp16 = dict(loss_scale=dict(init_scale=512)) checkpoint_config = dict( interval=1, max_keep_ckpts=3, save_last=True, )
ViT-Adapter-main
detection/configs/mask_rcnn/mask_rcnn_deit_adapter_small_3x_coco.py
# Copyright (c) Shanghai AI Lab. All rights reserved. _base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_3x.py', '../_base_/default_runtime.py' ] # pretrained = 'https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth' pretrained = 'pretrained/deit_small_patch16_224-cd65a155.pth' model = dict( backbone=dict( _delete_=True, type='ViTBaseline', patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, drop_path_rate=0.1, out_indices=[2, 5, 8, 11], window_attn=[True, True, False, True, True, False, True, True, False, True, True, False], window_size=[14, 14, None, 14, 14, None, 14, 14, None, 14, 14, None], pretrained=pretrained), neck=dict( type='FPN', in_channels=[384, 384, 384, 384], out_channels=256, num_outs=5)) # optimizer img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) # augmentation strategy originates from DETR / Sparse RCNN train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='AutoAugment', policies=[ [ dict(type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', keep_ratio=True) ], [ dict(type='Resize', img_scale=[(400, 1333), (500, 1333), (600, 1333)], multiscale_mode='value', keep_ratio=True), dict(type='RandomCrop', crop_type='absolute_range', crop_size=(384, 600), allow_negative_crop=True), dict(type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', override=True, keep_ratio=True) ] ]), dict(type='RandomCrop', crop_type='absolute_range', crop_size=(1024, 1024), allow_negative_crop=True), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] data = dict(train=dict(pipeline=train_pipeline)) optimizer = dict( _delete_=True, type='AdamW', lr=0.0001, weight_decay=0.05, paramwise_cfg=dict( custom_keys={ 'level_embed': dict(decay_mult=0.), 'pos_embed': dict(decay_mult=0.), 'norm': dict(decay_mult=0.), 'bias': dict(decay_mult=0.) })) optimizer_config = dict(grad_clip=None) fp16 = dict(loss_scale=dict(init_scale=512)) checkpoint_config = dict( interval=1, max_keep_ckpts=3, save_last=True, )
ViT-Adapter-main
detection/configs/mask_rcnn/mask_rcnn_deit_small_fpn_3x_coco.py
# Copyright (c) Shanghai AI Lab. All rights reserved. _base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_3x.py', '../_base_/default_runtime.py' ] # pretrained = 'https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth' pretrained = 'pretrained/deit_base_patch16_224-b5f2ef4d.pth' model = dict( backbone=dict( _delete_=True, type='ViTAdapter', patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, drop_path_rate=0.3, conv_inplane=64, n_points=4, deform_num_heads=12, cffn_ratio=0.25, deform_ratio=0.5, interaction_indexes=[[0, 2], [3, 5], [6, 8], [9, 11]], window_attn=[True, True, False, True, True, False, True, True, False, True, True, False], window_size=[14, 14, None, 14, 14, None, 14, 14, None, 14, 14, None], pretrained=pretrained), neck=dict( type='FPN', in_channels=[768, 768, 768, 768], out_channels=256, num_outs=5)) # optimizer img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) # augmentation strategy originates from DETR / Sparse RCNN train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='AutoAugment', policies=[ [ dict(type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', keep_ratio=True) ], [ dict(type='Resize', img_scale=[(400, 1333), (500, 1333), (600, 1333)], multiscale_mode='value', keep_ratio=True), dict(type='RandomCrop', crop_type='absolute_range', crop_size=(384, 600), allow_negative_crop=True), dict(type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', override=True, keep_ratio=True) ] ]), dict(type='RandomCrop', crop_type='absolute_range', crop_size=(1024, 1024), allow_negative_crop=True), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] data = dict(train=dict(pipeline=train_pipeline)) optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, weight_decay=0.05, paramwise_cfg=dict( custom_keys={ 'level_embed': dict(decay_mult=0.), 'pos_embed': dict(decay_mult=0.), 'norm': dict(decay_mult=0.), 'bias': dict(decay_mult=0.) })) optimizer_config = dict(grad_clip=None) fp16 = dict(loss_scale=dict(init_scale=512)) checkpoint_config = dict( interval=1, max_keep_ckpts=3, save_last=True, )
ViT-Adapter-main
detection/configs/mask_rcnn/mask_rcnn_deit_adapter_base_fpn_3x_coco.py
# Copyright (c) Shanghai AI Lab. All rights reserved. _base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_3x.py', '../_base_/default_runtime.py' ] # pretrained = 'https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth' pretrained = 'pretrained/deit_small_patch16_224-cd65a155.pth' model = dict( backbone=dict( _delete_=True, type='ViTAdapter', patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, drop_path_rate=0.2, conv_inplane=64, n_points=4, deform_num_heads=6, cffn_ratio=0.25, deform_ratio=1.0, interaction_indexes=[[0, 2], [3, 5], [6, 8], [9, 11]], window_attn=[True, True, False, True, True, False, True, True, False, True, True, False], window_size=[14, 14, None, 14, 14, None, 14, 14, None, 14, 14, None], pretrained=pretrained), neck=dict( type='FPN', in_channels=[384, 384, 384, 384], out_channels=256, num_outs=5)) # optimizer img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) # augmentation strategy originates from DETR / Sparse RCNN train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='AutoAugment', policies=[ [ dict(type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', keep_ratio=True) ], [ dict(type='Resize', img_scale=[(400, 1333), (500, 1333), (600, 1333)], multiscale_mode='value', keep_ratio=True), dict(type='RandomCrop', crop_type='absolute_range', crop_size=(384, 600), allow_negative_crop=True), dict(type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', override=True, keep_ratio=True) ] ]), dict(type='RandomCrop', crop_type='absolute_range', crop_size=(1024, 1024), allow_negative_crop=True), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] data = dict(train=dict(pipeline=train_pipeline)) optimizer = dict( _delete_=True, type='AdamW', lr=0.0001, weight_decay=0.05, paramwise_cfg=dict( custom_keys={ 'level_embed': dict(decay_mult=0.), 'pos_embed': dict(decay_mult=0.), 'norm': dict(decay_mult=0.), 'bias': dict(decay_mult=0.) })) optimizer_config = dict(grad_clip=None) fp16 = dict(loss_scale=dict(init_scale=512)) checkpoint_config = dict( interval=1, max_keep_ckpts=3, save_last=True, )
ViT-Adapter-main
detection/configs/mask_rcnn/mask_rcnn_deit_adapter_small_fpn_3x_coco.py
# Copyright (c) Shanghai AI Lab. All rights reserved. _base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance_augreg.py', '../_base_/schedules/schedule_3x.py', '../_base_/default_runtime.py' ] # pretrained = 'https://storage.googleapis.com/vit_models/augreg/L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_384.npz' # pretrained = 'https://github.com/czczup/ViT-Adapter/releases/download/v0.1.6/L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_384.pth' pretrained = 'pretrained/L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_384.pth' model = dict( backbone=dict( _delete_=True, type='ViTAdapter', img_size=384, pretrain_size=384, patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, drop_path_rate=0.4, conv_inplane=64, n_points=4, deform_num_heads=16, cffn_ratio=0.25, deform_ratio=0.5, interaction_indexes=[[0, 5], [6, 11], [12, 17], [18, 23]], window_attn=[True, True, True, True, True, False, True, True, True, True, True, False, True, True, True, True, True, False, True, True, True, True, True, False], window_size=[14, 14, 14, 14, 14, None, 14, 14, 14, 14, 14, None, 14, 14, 14, 14, 14, None, 14, 14, 14, 14, 14, None], pretrained=pretrained), neck=dict( type='FPN', in_channels=[1024, 1024, 1024, 1024], out_channels=256, num_outs=5) ) # optimizer img_norm_cfg = dict( mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True) # augmentation strategy originates from DETR / Sparse RCNN train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='AutoAugment', policies=[ [ dict(type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', keep_ratio=True) ], [ dict(type='Resize', img_scale=[(400, 1333), (500, 1333), (600, 1333)], multiscale_mode='value', keep_ratio=True), dict(type='RandomCrop', crop_type='absolute_range', crop_size=(384, 600), allow_negative_crop=True), dict(type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', override=True, keep_ratio=True) ] ]), dict(type='RandomCrop', crop_type='absolute_range', crop_size=(1024, 1024), allow_negative_crop=True), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] data = dict(train=dict(pipeline=train_pipeline)) optimizer = dict( _delete_=True, type='AdamW', lr=0.0001, weight_decay=0.05, constructor='LayerDecayOptimizerConstructor', paramwise_cfg=dict(num_layers=24, layer_decay_rate=0.80)) optimizer_config = dict(grad_clip=None) # fp16 = dict(loss_scale=dict(init_scale=512)) checkpoint_config = dict( interval=1, max_keep_ckpts=2, save_last=True, )
ViT-Adapter-main
detection/configs/mask_rcnn/mask_rcnn_augreg_adapter_large_fpn_3x_coco.py
# Copyright (c) Shanghai AI Lab. All rights reserved. _base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_3x.py', '../_base_/default_runtime.py' ] # pretrained = 'https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth' pretrained = 'pretrained/deit_tiny_patch16_224-a1311bcf.pth' model = dict( backbone=dict( _delete_=True, type='ViTAdapter', patch_size=16, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4, drop_path_rate=0.1, conv_inplane=64, n_points=4, deform_num_heads=6, cffn_ratio=0.25, deform_ratio=1.0, interaction_indexes=[[0, 2], [3, 5], [6, 8], [9, 11]], window_attn=[True, True, False, True, True, False, True, True, False, True, True, False], window_size=[14, 14, None, 14, 14, None, 14, 14, None, 14, 14, None], pretrained=pretrained), neck=dict( type='FPN', in_channels=[192, 192, 192, 192], out_channels=256, num_outs=5)) # optimizer img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) # augmentation strategy originates from DETR / Sparse RCNN train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='AutoAugment', policies=[ [ dict(type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', keep_ratio=True) ], [ dict(type='Resize', img_scale=[(400, 1333), (500, 1333), (600, 1333)], multiscale_mode='value', keep_ratio=True), dict(type='RandomCrop', crop_type='absolute_range', crop_size=(384, 600), allow_negative_crop=True), dict(type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', override=True, keep_ratio=True) ] ]), dict(type='RandomCrop', crop_type='absolute_range', crop_size=(1024, 1024), allow_negative_crop=True), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] data = dict(train=dict(pipeline=train_pipeline)) optimizer = dict( _delete_=True, type='AdamW', lr=0.0001, weight_decay=0.05, paramwise_cfg=dict( custom_keys={ 'level_embed': dict(decay_mult=0.), 'pos_embed': dict(decay_mult=0.), 'norm': dict(decay_mult=0.), 'bias': dict(decay_mult=0.) })) optimizer_config = dict(grad_clip=None) fp16 = dict(loss_scale=dict(init_scale=512)) checkpoint_config = dict( interval=1, max_keep_ckpts=3, save_last=True, )
ViT-Adapter-main
detection/configs/mask_rcnn/mask_rcnn_deit_adapter_tiny_fpn_3x_coco.py
# Copyright (c) Shanghai AI Lab. All rights reserved. _base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance_augreg.py', '../_base_/schedules/schedule_3x.py', '../_base_/default_runtime.py' ] # pretrained = 'https://storage.googleapis.com/vit_models/augreg/L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_384.npz' # pretrained = 'https://github.com/czczup/ViT-Adapter/releases/download/v0.1.6/L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_384.pth' pretrained = 'pretrained/L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_384.pth' model = dict( backbone=dict( _delete_=True, type='ViTBaseline', img_size=384, pretrain_size=384, patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, drop_path_rate=0.4, out_indices=[5, 11, 17, 23], window_attn=[True, True, True, True, True, False, True, True, True, True, True, False, True, True, True, True, True, False, True, True, True, True, True, False], window_size=[14, 14, 14, 14, 14, None, 14, 14, 14, 14, 14, None, 14, 14, 14, 14, 14, None, 14, 14, 14, 14, 14, None], pretrained=pretrained), neck=dict( type='FPN', in_channels=[1024, 1024, 1024, 1024], out_channels=256, num_outs=5), ) # optimizer img_norm_cfg = dict( mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True) # augmentation strategy originates from DETR / Sparse RCNN train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='AutoAugment', policies=[ [ dict(type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', keep_ratio=True) ], [ dict(type='Resize', img_scale=[(400, 1333), (500, 1333), (600, 1333)], multiscale_mode='value', keep_ratio=True), dict(type='RandomCrop', crop_type='absolute_range', crop_size=(384, 600), allow_negative_crop=True), dict(type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', override=True, keep_ratio=True) ] ]), dict(type='RandomCrop', crop_type='absolute_range', crop_size=(1024, 1024), allow_negative_crop=True), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] data = dict(train=dict(pipeline=train_pipeline)) optimizer = dict( _delete_=True, type='AdamW', lr=0.0001, weight_decay=0.05, constructor='LayerDecayOptimizerConstructor', paramwise_cfg=dict(num_layers=24, layer_decay_rate=0.80)) optimizer_config = dict(grad_clip=None) # fp16 = dict(loss_scale=dict(init_scale=512)) checkpoint_config = dict( interval=1, max_keep_ckpts=2, save_last=True, )
ViT-Adapter-main
detection/configs/mask_rcnn/mask_rcnn_augreg_large_fpn_3x_coco.py
# Copyright (c) Shanghai AI Lab. All rights reserved. _base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_3x.py', '../_base_/default_runtime.py' ] # pretrained = 'https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth' pretrained = 'pretrained/deit_tiny_patch16_224-a1311bcf.pth' model = dict( backbone=dict( _delete_=True, type='ViTBaseline', patch_size=16, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4, drop_path_rate=0.1, out_indices=[2, 5, 8, 11], window_attn=[True, True, False, True, True, False, True, True, False, True, True, False], window_size=[14, 14, None, 14, 14, None, 14, 14, None, 14, 14, None], pretrained=pretrained), neck=dict( type='FPN', in_channels=[192, 192, 192, 192], out_channels=256, num_outs=5)) # optimizer img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) # augmentation strategy originates from DETR / Sparse RCNN train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='AutoAugment', policies=[ [ dict(type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', keep_ratio=True) ], [ dict(type='Resize', img_scale=[(400, 1333), (500, 1333), (600, 1333)], multiscale_mode='value', keep_ratio=True), dict(type='RandomCrop', crop_type='absolute_range', crop_size=(384, 600), allow_negative_crop=True), dict(type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', override=True, keep_ratio=True) ] ]), dict(type='RandomCrop', crop_type='absolute_range', crop_size=(1024, 1024), allow_negative_crop=True), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] data = dict(train=dict(pipeline=train_pipeline)) optimizer = dict( _delete_=True, type='AdamW', lr=0.0001, weight_decay=0.05, paramwise_cfg=dict( custom_keys={ 'level_embed': dict(decay_mult=0.), 'pos_embed': dict(decay_mult=0.), 'norm': dict(decay_mult=0.), 'bias': dict(decay_mult=0.) })) optimizer_config = dict(grad_clip=None) fp16 = dict(loss_scale=dict(init_scale=512)) checkpoint_config = dict( interval=1, max_keep_ckpts=3, save_last=True, )
ViT-Adapter-main
detection/configs/mask_rcnn/mask_rcnn_deit_tiny_fpn_3x_coco.py
# Copyright (c) Shanghai AI Lab. All rights reserved. _base_ = [ '../../_base_/models/mask_rcnn_r50_fpn.py', '../../_base_/datasets/coco_instance.py', '../../_base_/schedules/schedule_3x.py', '../../_base_/default_runtime.py' ] # pretrained = 'https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_pretrain.pth' # please download the pretrained weight to the `pretrained/` folder, # then run: `python convert_14to16.py pretrained/dinov2_vits14_pretrain.pth` pretrained = 'pretrained/dinov2_vits14_pretrain_14to16.pth' model = dict( backbone=dict( _delete_=True, type='ViTAdapter', pretrain_size=592, img_size=592, patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, drop_path_rate=0.2, conv_inplane=64, n_points=4, deform_num_heads=6, cffn_ratio=0.25, deform_ratio=1.0, interaction_indexes=[[0, 2], [3, 5], [6, 8], [9, 11]], window_attn=[True, True, False, True, True, False, True, True, False, True, True, False], window_size=[14, 14, None, 14, 14, None, 14, 14, None, 14, 14, None], pretrained=pretrained), neck=dict( type='FPN', in_channels=[384, 384, 384, 384], out_channels=256, num_outs=5)) # optimizer img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) # augmentation strategy originates from DETR / Sparse RCNN train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='AutoAugment', policies=[ [ dict(type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', keep_ratio=True) ], [ dict(type='Resize', img_scale=[(400, 1333), (500, 1333), (600, 1333)], multiscale_mode='value', keep_ratio=True), dict(type='RandomCrop', crop_type='absolute_range', crop_size=(384, 600), allow_negative_crop=True), dict(type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', override=True, keep_ratio=True) ] ]), dict(type='RandomCrop', crop_type='absolute_range', crop_size=(1024, 1024), allow_negative_crop=True), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] data = dict(train=dict(pipeline=train_pipeline)) optimizer = dict( _delete_=True, type='AdamW', lr=0.0001, weight_decay=0.05, constructor='LayerDecayOptimizerConstructor', paramwise_cfg=dict(num_layers=12, layer_decay_rate=0.70)) optimizer_config = dict(grad_clip=None) # fp16 = dict(loss_scale=dict(init_scale=512)) checkpoint_config = dict( interval=1, max_keep_ckpts=3, save_last=True, )
ViT-Adapter-main
detection/configs/mask_rcnn/dinov2/mask_rcnn_dinov2_adapter_small_fpn_3x_coco.py
# Copyright (c) Shanghai AI Lab. All rights reserved. _base_ = [ '../../_base_/models/mask_rcnn_r50_fpn.py', '../../_base_/datasets/coco_instance.py', '../../_base_/schedules/schedule_3x.py', '../../_base_/default_runtime.py' ] # pretrained = 'https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vitb14_pretrain.pth' # please download the pretrained weight to the `pretrained/` folder, # then run: `python convert_14to16.py pretrained/dinov2_vitb14_pretrain.pth` pretrained = 'pretrained/dinov2_vitb14_pretrain_14to16.pth' model = dict( backbone=dict( _delete_=True, type='ViTAdapter', pretrain_size=592, img_size=592, patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, drop_path_rate=0.3, conv_inplane=64, n_points=4, deform_num_heads=12, cffn_ratio=0.25, deform_ratio=0.5, interaction_indexes=[[0, 2], [3, 5], [6, 8], [9, 11]], window_attn=[True, True, False, True, True, False, True, True, False, True, True, False], window_size=[14, 14, None, 14, 14, None, 14, 14, None, 14, 14, None], pretrained=pretrained), neck=dict( type='FPN', in_channels=[768, 768, 768, 768], out_channels=256, num_outs=5)) # optimizer img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) # augmentation strategy originates from DETR / Sparse RCNN train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='AutoAugment', policies=[ [ dict(type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', keep_ratio=True) ], [ dict(type='Resize', img_scale=[(400, 1333), (500, 1333), (600, 1333)], multiscale_mode='value', keep_ratio=True), dict(type='RandomCrop', crop_type='absolute_range', crop_size=(384, 600), allow_negative_crop=True), dict(type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', override=True, keep_ratio=True) ] ]), dict(type='RandomCrop', crop_type='absolute_range', crop_size=(1024, 1024), allow_negative_crop=True), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] data = dict(train=dict(pipeline=train_pipeline)) optimizer = dict( _delete_=True, type='AdamW', lr=0.0001, weight_decay=0.05, constructor='LayerDecayOptimizerConstructor', paramwise_cfg=dict(num_layers=12, layer_decay_rate=0.60)) optimizer_config = dict(grad_clip=None) # fp16 = dict(loss_scale=dict(init_scale=512)) checkpoint_config = dict( interval=1, max_keep_ckpts=3, save_last=True, )
ViT-Adapter-main
detection/configs/mask_rcnn/dinov2/mask_rcnn_dinov2_adapter_base_fpn_3x_coco.py
# Copyright (c) Shanghai AI Lab. All rights reserved. _base_ = [ '../../_base_/models/mask_rcnn_r50_fpn.py', '../../_base_/datasets/coco_instance.py', '../../_base_/schedules/schedule_3x.py', '../../_base_/default_runtime.py' ] # pretrained = 'https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vitl14_pretrain.pth' # please download the pretrained weight to the `pretrained/` folder, # then run: `python convert_14to16.py pretrained/dinov2_vitl14_pretrain.pth` pretrained = 'pretrained/dinov2_vitl14_pretrain_14to16.pth' model = dict( backbone=dict( _delete_=True, type='ViTAdapter', pretrain_size=592, img_size=592, patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, drop_path_rate=0.4, conv_inplane=64, n_points=4, deform_num_heads=16, cffn_ratio=0.25, deform_ratio=0.5, interaction_indexes=[[0, 5], [6, 11], [12, 17], [18, 23]], window_attn=[True, True, True, True, True, False, True, True, True, True, True, False, True, True, True, True, True, False, True, True, True, True, True, False], window_size=[14, 14, 14, 14, 14, None, 14, 14, 14, 14, 14, None, 14, 14, 14, 14, 14, None, 14, 14, 14, 14, 14, None], pretrained=pretrained), neck=dict( type='FPN', in_channels=[1024, 1024, 1024, 1024], out_channels=256, num_outs=5) ) # optimizer img_norm_cfg = dict( mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True) # augmentation strategy originates from DETR / Sparse RCNN train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='AutoAugment', policies=[ [ dict(type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', keep_ratio=True) ], [ dict(type='Resize', img_scale=[(400, 1333), (500, 1333), (600, 1333)], multiscale_mode='value', keep_ratio=True), dict(type='RandomCrop', crop_type='absolute_range', crop_size=(384, 600), allow_negative_crop=True), dict(type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', override=True, keep_ratio=True) ] ]), dict(type='RandomCrop', crop_type='absolute_range', crop_size=(1024, 1024), allow_negative_crop=True), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] data = dict(train=dict(pipeline=train_pipeline)) optimizer = dict( _delete_=True, type='AdamW', lr=0.0001, weight_decay=0.05, constructor='LayerDecayOptimizerConstructor', paramwise_cfg=dict(num_layers=24, layer_decay_rate=0.65)) optimizer_config = dict(grad_clip=None) # fp16 = dict(loss_scale=dict(init_scale=512)) checkpoint_config = dict( interval=1, max_keep_ckpts=3, save_last=True, )
ViT-Adapter-main
detection/configs/mask_rcnn/dinov2/mask_rcnn_dinov2_adapter_large_fpn_3x_coco.py
# Copyright (c) Shanghai AI Lab. All rights reserved. _base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_3x.py', '../_base_/default_runtime.py' ] # pretrained = 'https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth' pretrained = 'pretrained/deit_small_patch16_224-cd65a155.pth' num_stages = 6 num_proposals = 300 model = dict( type='SparseRCNN', backbone=dict( type='ViTAdapter', patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, drop_path_rate=0.2, conv_inplane=64, n_points=4, deform_num_heads=6, cffn_ratio=0.25, deform_ratio=1.0, interaction_indexes=[[0, 2], [3, 5], [6, 8], [9, 11]], window_attn=[True, True, False, True, True, False, True, True, False, True, True, False], window_size=[14, 14, None, 14, 14, None, 14, 14, None, 14, 14, None], pretrained=pretrained), neck=dict( type='FPN', in_channels=[384, 384, 384, 384], out_channels=256, start_level=0, add_extra_convs='on_output', num_outs=4), rpn_head=dict( type='EmbeddingRPNHead', num_proposals=num_proposals, proposal_feature_channel=256), roi_head=dict( type='SparseRoIHead', num_stages=num_stages, stage_loss_weights=[1] * num_stages, proposal_feature_channel=256, bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2), out_channels=256, featmap_strides=[4, 8, 16, 32]), bbox_head=[ dict( type='DIIHead', num_classes=80, num_ffn_fcs=2, num_heads=8, num_cls_fcs=1, num_reg_fcs=3, feedforward_channels=2048, in_channels=256, dropout=0.0, ffn_act_cfg=dict(type='ReLU', inplace=True), dynamic_conv_cfg=dict( type='DynamicConv', in_channels=256, feat_channels=64, out_channels=256, input_feat_shape=7, act_cfg=dict(type='ReLU', inplace=True), norm_cfg=dict(type='LN')), loss_bbox=dict(type='L1Loss', loss_weight=5.0), loss_iou=dict(type='GIoULoss', loss_weight=2.0), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=2.0), bbox_coder=dict( type='DeltaXYWHBBoxCoder', clip_border=False, target_means=[0., 0., 0., 0.], target_stds=[0.5, 0.5, 1., 1.])) for _ in range(num_stages) ]), # training and testing settings train_cfg=dict( rpn=None, rcnn=[ dict( assigner=dict( type='HungarianAssigner', cls_cost=dict(type='FocalLossCost', weight=2.0), reg_cost=dict(type='BBoxL1Cost', weight=5.0), iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0)), sampler=dict(type='PseudoSampler'), pos_weight=1) for _ in range(num_stages) ]), test_cfg=dict(rpn=None, rcnn=dict(max_per_img=num_proposals))) # optimizer img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) # augmentation strategy originates from DETR / Sparse RCNN train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='AutoAugment', policies=[ [ dict(type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', keep_ratio=True) ], [ dict(type='Resize', img_scale=[(400, 1333), (500, 1333), (600, 1333)], multiscale_mode='value', keep_ratio=True), dict(type='RandomCrop', crop_type='absolute_range', crop_size=(384, 600), allow_negative_crop=True), dict(type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', override=True, keep_ratio=True) ] ]), dict(type='RandomCrop', crop_type='absolute_range', crop_size=(1024, 1024), allow_negative_crop=True), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] data = dict(train=dict(pipeline=train_pipeline)) optimizer = dict( _delete_=True, type='AdamW', lr=0.0001, weight_decay=0.05, paramwise_cfg=dict( custom_keys={ 'level_embed': dict(decay_mult=0.), 'pos_embed': dict(decay_mult=0.), 'norm': dict(decay_mult=0.), 'bias': dict(decay_mult=0.), 'rpn_head': dict(decay_mult=0.002, lr_mult=0.25), })) optimizer_config = dict(grad_clip=None) optimizer_config = dict(_delete_=True, grad_clip=dict(max_norm=1, norm_type=2)) # fp16 = dict(loss_scale=dict(init_scale=512)) checkpoint_config = dict( interval=1, max_keep_ckpts=3, save_last=True, )
ViT-Adapter-main
detection/configs/sparse_rcnn/sparse_rcnn_deit_adapter_small_fpn_3x_coco.py
# Copyright (c) Shanghai AI Lab. All rights reserved. _base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # pretrained = 'https://dl.fbaipublicfiles.com/mae/pretrain/mae_pretrain_vit_base.pth' pretrained = 'pretrained/mae_pretrain_vit_base.pth' model = dict( backbone=dict( _delete_=True, type='ViTAdapter', patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, drop_path_rate=0.1, conv_inplane=64, n_points=4, deform_num_heads=12, cffn_ratio=0.25, deform_ratio=0.5, use_extra_extractor=False, layer_scale=False, interaction_indexes=[[0, 2], [3, 5], [6, 8], [9, 11]], window_attn=[True, True, False, True, True, False, True, True, False, True, True, False], window_size=[14, 14, None, 14, 14, None, 14, 14, None, 14, 14, None], pretrained=pretrained), neck=dict( type='FPN', in_channels=[768, 768, 768, 768], out_channels=256, num_outs=5, norm_cfg=dict(type='MMSyncBN', requires_grad=True)), rpn_head=dict(num_convs=2), roi_head=dict( bbox_head=dict(type='Shared4Conv1FCBBoxHead', norm_cfg=dict(type='MMSyncBN', requires_grad=True)), mask_head=dict(norm_cfg=dict(type='MMSyncBN', requires_grad=True)), )) # optimizer img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) # augmentation strategy originates from DETR / Sparse RCNN train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=(1024, 1024), ratio_range=(0.1, 2.0), multiscale_mode='range', keep_ratio=True), dict( type='RandomCrop', crop_type='absolute_range', crop_size=(1024, 1024), recompute_bbox=True, allow_negative_crop=True), dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size=(1024, 1024)), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] data = dict(train=dict(pipeline=train_pipeline)) lr_config = dict( _delete_=True, policy='CosineAnnealing', min_lr_ratio=0.01, warmup='linear', warmup_iters=2000, warmup_ratio=0.001) runner = dict(type='EpochBasedRunner', max_epochs=25) optimizer = dict( _delete_=True, type='AdamW', lr=0.0001, weight_decay=0.05, paramwise_cfg=dict( custom_keys={ 'level_embed': dict(decay_mult=0.), 'pos_embed': dict(decay_mult=0.), 'norm': dict(decay_mult=0.), 'bias': dict(decay_mult=0.) })) optimizer_config = dict(grad_clip=None) fp16 = dict(loss_scale=dict(init_scale=512)) checkpoint_config = dict( interval=1, max_keep_ckpts=3, save_last=True, )
ViT-Adapter-main
detection/configs/upgraded_mask_rcnn/mask_rcnn_mae_adapter_base_lsj_fpn_25ep_coco.py