python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data source config class for DriveNet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.utils.data as data
import numpy as np
from nvidia_tao_pytorch.cv.deformable_detr.utils.data_source_config import build_data_source_lists_per_gpu, build_data_source_lists
from nvidia_tao_pytorch.cv.deformable_detr.dataloader.od_dataset import ODDataset, ConcateODDataset
class UniformSampler(object):
"""Uniform Sampler Class from multi-source data."""
def __init__(self,
data_sources,
transforms=None):
"""Initialize Uniform Sampler Class.
Only used in distributed training and sharded data, unifrom distribution sampling.
Args:
data_sources (dict): augmentation configuration.
transforms (dict): transforms.
"""
self.data_sources = data_sources
self.transforms = transforms
def build_data_source(self, global_rank, num_gpus):
""" Build the data source list from multi-source data.
Args:
global_rank (int): gpu global rank to load the subset of the data.
num_gpus (int): total number of gpus to be used.
Returns:
train_dataset (Dataset): training datsaet.
dataset_length (int): length of each dataset (to be used in uniform sampling).
total_images_per_gpu (int): total number of images per gpus (to be used in uniform sampling).
"""
# distribute json files to each GPU
data_source_list = build_data_source_lists_per_gpu(self.data_sources, global_rank, num_gpus)
# concate the json files per gpu, load only sepecific jsons to each gpu
dataset_per_gpu = []
dataset_length = []
total_images_per_gpu = 0
for data_source in data_source_list:
image_dir = data_source.image_dir
for _json_file in data_source.dataset_files:
ds = ODDataset(dataset_dir=image_dir, json_file=_json_file, transforms=self.transforms)
dataset_per_gpu.append(ds)
dataset_length.append(len(ds))
total_images_per_gpu = total_images_per_gpu + len(ds)
if len(dataset_per_gpu) > 1:
train_dataset = ConcateODDataset(dataset_per_gpu)
else:
train_dataset = dataset_per_gpu[0]
return train_dataset, dataset_length, total_images_per_gpu
def get_sampler(self, global_rank, num_gpus):
""" Get uniform sampler from the data source list.
Args:
global_rank (int): gpu global rank to load the subset of the data.
num_gpus (int): total number of gpus to be used.
Returns:
train_dataset (Dataset): training dataset.
train_sampler (Sampler): training sampler.
"""
train_dataset, dataset_length, total_images_per_gpu = self.build_data_source(global_rank, num_gpus)
weights = np.concatenate([[(len(train_dataset) - d_len) / len(train_dataset)] * d_len for d_len in dataset_length])
num_samples = int(total_images_per_gpu)
train_sampler = data.WeightedRandomSampler(weights, num_samples, replacement=True)
return train_dataset, train_sampler
class NonUniformSampler(object):
"""Non-Uniform Sampler Class from multi-source data."""
def __init__(self,
data_sources,
transforms=None):
"""Initialize NonUniform Sampler Class.
Only used in distributed training and sharded data, and does not apply unifrom distribution sampling
Args:
data_sources (dict): augmentation configuration
transforms (dict): transforms
"""
self.data_sources = data_sources
self.transforms = transforms
def build_data_source(self, global_rank, num_gpus):
""" Build the data source list from multi-source data.
Args:
global_rank (int): gpu global rank to load the subset of the data.
num_gpus (int): total number of gpus to be used.
Returns:
train_dataset (Dataset): training datsaet.
dataset_length (int): length of each dataset (to be used in uniform sampling).
total_images_per_gpu (int): total number of images per gpus (to be used in uniform sampling).
"""
# distribute json files to each GPU
data_source_list = build_data_source_lists_per_gpu(self.data_sources, global_rank, num_gpus)
# concate the json files per gpu, load only sepecific jsons to each gpu
dataset_per_gpu = []
dataset_length = []
total_images_per_gpu = 0
for data_source in data_source_list:
image_dir = data_source.image_dir
for _json_file in data_source.dataset_files:
ds = ODDataset(dataset_dir=image_dir, json_file=_json_file, transforms=self.transforms)
dataset_per_gpu.append(ds)
dataset_length.append(len(ds))
total_images_per_gpu = total_images_per_gpu + len(ds)
if len(dataset_per_gpu) > 1:
train_dataset = ConcateODDataset(dataset_per_gpu)
else:
train_dataset = dataset_per_gpu[0]
return train_dataset, dataset_length, total_images_per_gpu
def get_sampler(self, global_rank, num_gpus):
""" Get Default sampler from the data source list.
Args:
global_rank (int): gpu global rank to load the subset of the data.
num_gpus (int): total number of gpus to be used.
Returns:
train_dataset (Dataset): training dataset.
train_sampler (Sampler): training sampler.
"""
train_dataset, _, _ = self.build_data_source(global_rank, num_gpus)
train_sampler = torch.utils.data.RandomSampler(train_dataset)
return train_dataset, train_sampler
class DefaultSampler(object):
"""Default Sampler Class from multi or single source data."""
def __init__(self,
data_sources,
is_distributed=False,
transforms=None):
"""Default Sampler Constructor.
Args:
data_sources (dict): augmentation configuration.
transforms (dict): transforms.
is_distributed(bool): flag indicting whether torch is using distributed learning or not.
"""
self.data_sources = data_sources
self.transforms = transforms
self.is_distributed = is_distributed
def build_data_source(self):
"""Build the data source list from multi-source data.
Returns:
train_dataset: training dataset.
"""
# grab all the json files and concate them into one single dataset
data_source_list = build_data_source_lists(self.data_sources)
dataset_list = []
for data_source in data_source_list:
image_dir = data_source.image_dir
for _json_file in data_source.dataset_files:
dataset_list.append(ODDataset(dataset_dir=image_dir, json_file=_json_file, transforms=self.transforms))
if len(dataset_list) > 1:
train_dataset = ConcateODDataset(dataset_list)
else:
train_dataset = dataset_list[0]
return train_dataset
def get_sampler(self):
"""Get Default sampler from the data source list.
Returns:
train_dataset (Dataset): training dataset
train_sampler (Sampler): training sampler
"""
train_dataset = self.build_data_source()
if self.is_distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, shuffle=True)
else:
train_sampler = torch.utils.data.RandomSampler(train_dataset)
return train_dataset, train_sampler
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/deformable_detr/dataloader/sampler.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Init Module for SegFormer. """
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Init Module."""
from .evaluation import * # noqa: F401, F403
from .seg import * # noqa: F401, F403
from .utils import * # noqa: F401, F403
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/core/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Original source taken from https://github.com/open-mmlab/mmsegmentation
# Copyright 2019 OpenMMLAB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Misc Module."""
def add_prefix(inputs, prefix):
"""Add prefix for dict.
Args:
inputs (dict): The input dict with str keys.
prefix (str): The prefix to add.
Returns:
dict: The dict with keys updated with ``prefix``.
"""
outputs = dict()
for name, value in inputs.items():
outputs[f'{prefix}.{name}'] = value
return outputs
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/core/utils/misc.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Segformer Utils module."""
from .misc import add_prefix
__all__ = ['add_prefix']
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/core/utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Original source taken from https://github.com/open-mmlab/mmskeleton
# Copyright 2019 OpenMMLAB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metric Module."""
import mmcv
import numpy as np
def intersect_and_union(pred_label,
label,
num_classes,
ignore_index,
label_map=dict(),
reduce_zero_label=False):
"""Calculate intersection and Union.
Args:
pred_label (ndarray): Prediction segmentation map.
label (ndarray): Ground truth segmentation map.
num_classes (int): Number of categories.
ignore_index (int): Index that will be ignored in evaluation.
label_map (dict): Mapping old labels to new labels. The parameter will
work only when label is str. Default: dict().
reduce_zero_label (bool): Wether ignore zero label. The parameter will
work only when label is str. Default: False.
Returns:
ndarray: The intersection of prediction and ground truth histogram
on all classes.
ndarray: The union of prediction and ground truth histogram on all
classes.
ndarray: The prediction histogram on all classes.
ndarray: The ground truth histogram on all classes.
"""
if isinstance(pred_label, str):
pred_label = np.load(pred_label)
if isinstance(label, str):
label = mmcv.imread(label, flag='unchanged', backend='pillow')
# modify if custom classes
if label_map is not None:
for old_id, new_id in label_map.items():
label[label == old_id] = new_id
if reduce_zero_label:
# avoid using underflow conversion
label[label == 0] = 255
label = label - 1
label[label == 254] = 255
mask = (label != ignore_index)
pred_label = pred_label[mask]
label = label[mask]
intersect = pred_label[pred_label == label]
area_intersect, _ = np.histogram(
intersect, bins=np.arange(num_classes + 1))
area_pred_label, _ = np.histogram(
pred_label, bins=np.arange(num_classes + 1))
area_label, _ = np.histogram(label, bins=np.arange(num_classes + 1))
area_union = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def total_intersect_and_union(results,
gt_seg_maps,
num_classes,
ignore_index,
label_map=dict(),
reduce_zero_label=False):
"""Calculate Total Intersection and Union.
Args:
results (list[ndarray]): List of prediction segmentation maps.
gt_seg_maps (list[ndarray]): list of ground truth segmentation maps.
num_classes (int): Number of categories.
ignore_index (int): Index that will be ignored in evaluation.
label_map (dict): Mapping old labels to new labels. Default: dict().
reduce_zero_label (bool): Wether ignore zero label. Default: False.
Returns:
ndarray: The intersection of prediction and ground truth histogram
on all classes.
ndarray: The union of prediction and ground truth histogram on all
classes.
ndarray: The prediction histogram on all classes.
ndarray: The ground truth histogram on all classes.
"""
num_imgs = len(results)
assert len(gt_seg_maps) == num_imgs, "The number of labels:{} is not equal to number of input images:{}.".format(gt_seg_maps, num_imgs)
total_area_intersect = np.zeros((num_classes, ), dtype=np.float)
total_area_union = np.zeros((num_classes, ), dtype=np.float)
total_area_pred_label = np.zeros((num_classes, ), dtype=np.float)
total_area_label = np.zeros((num_classes, ), dtype=np.float)
for i in range(num_imgs):
area_intersect, area_union, area_pred_label, area_label = \
intersect_and_union(results[i], gt_seg_maps[i], num_classes,
ignore_index, label_map, reduce_zero_label)
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, \
total_area_pred_label, total_area_label
def mean_iou(results,
gt_seg_maps,
num_classes,
ignore_index,
nan_to_num=None,
label_map=dict(),
reduce_zero_label=False):
"""Calculate Mean Intersection and Union (mIoU)
Args:
results (list[ndarray]): List of prediction segmentation maps.
gt_seg_maps (list[ndarray]): list of ground truth segmentation maps.
num_classes (int): Number of categories.
ignore_index (int): Index that will be ignored in evaluation.
nan_to_num (int, optional): If specified, NaN values will be replaced
by the numbers defined by the user. Default: None.
label_map (dict): Mapping old labels to new labels. Default: dict().
reduce_zero_label (bool): Wether ignore zero label. Default: False.
Returns:
float: Overall accuracy on all images.
ndarray: Per category accuracy, shape (num_classes, ).
ndarray: Per category IoU, shape (num_classes, ).
"""
all_acc, acc, iou = eval_metrics(
results=results,
gt_seg_maps=gt_seg_maps,
num_classes=num_classes,
ignore_index=ignore_index,
metrics=['mIoU'],
nan_to_num=nan_to_num,
label_map=label_map,
reduce_zero_label=reduce_zero_label)
return all_acc, acc, iou
def mean_dice(results,
gt_seg_maps,
num_classes,
ignore_index,
nan_to_num=None,
label_map=dict(),
reduce_zero_label=False):
"""Calculate Mean Dice (mDice)
Args:
results (list[ndarray]): List of prediction segmentation maps.
gt_seg_maps (list[ndarray]): list of ground truth segmentation maps.
num_classes (int): Number of categories.
ignore_index (int): Index that will be ignored in evaluation.
nan_to_num (int, optional): If specified, NaN values will be replaced
by the numbers defined by the user. Default: None.
label_map (dict): Mapping old labels to new labels. Default: dict().
reduce_zero_label (bool): Wether ignore zero label. Default: False.
Returns:
float: Overall accuracy on all images.
ndarray: Per category accuracy, shape (num_classes, ).
ndarray: Per category dice, shape (num_classes, ).
"""
all_acc, acc, dice = eval_metrics(
results=results,
gt_seg_maps=gt_seg_maps,
num_classes=num_classes,
ignore_index=ignore_index,
metrics=['mDice'],
nan_to_num=nan_to_num,
label_map=label_map,
reduce_zero_label=reduce_zero_label)
return all_acc, acc, dice
def eval_metrics(results,
gt_seg_maps,
num_classes,
ignore_index,
metrics=['mIoU'],
nan_to_num=None,
label_map=dict(),
reduce_zero_label=False):
"""Calculate evaluation metrics
Args:
results (list[ndarray]): List of prediction segmentation maps.
gt_seg_maps (list[ndarray]): list of ground truth segmentation maps.
num_classes (int): Number of categories.
ignore_index (int): Index that will be ignored in evaluation.
metrics (list[str] | str): Metrics to be evaluated, 'mIoU' and 'mDice'.
nan_to_num (int, optional): If specified, NaN values will be replaced
by the numbers defined by the user. Default: None.
label_map (dict): Mapping old labels to new labels. Default: dict().
reduce_zero_label (bool): Wether ignore zero label. Default: False.
Returns:
float: Overall accuracy on all images.
ndarray: Per category accuracy, shape (num_classes, ).
ndarray: Per category evalution metrics, shape (num_classes, ).
"""
if isinstance(metrics, str):
metrics = [metrics]
allowed_metrics = ['mIoU', 'mDice']
if not set(metrics).issubset(set(allowed_metrics)):
raise KeyError('metrics {} is not supported'.format(metrics))
total_area_intersect, total_area_union, total_area_pred_label, \
total_area_label = total_intersect_and_union(results, gt_seg_maps,
num_classes, ignore_index,
label_map,
reduce_zero_label)
all_acc = total_area_intersect.sum() / total_area_label.sum()
acc = total_area_intersect / total_area_label
ret_metrics = [all_acc, acc]
for metric in metrics:
if metric == 'mIoU':
iou = total_area_intersect / total_area_union
ret_metrics.append(iou)
elif metric == 'mDice':
dice = 2 * total_area_intersect / (
total_area_pred_label + total_area_label)
ret_metrics.append(dice)
if nan_to_num is not None:
ret_metrics = [
np.nan_to_num(metric, nan=nan_to_num) for metric in ret_metrics
]
return ret_metrics
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/core/evaluation/metrics.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Original source taken from https://github.com/open-mmlab/mmskeleton
# Copyright 2019 OpenMMLAB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Class Names Module."""
import mmcv
def cityscapes_classes():
"""Cityscapes class names for external use."""
return [
'road', 'sidewalk', 'building', 'wall', 'fence', 'pole',
'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky',
'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle',
'bicycle'
]
def ade_classes():
"""ADE20K class names for external use."""
return [
'wall', 'building', 'sky', 'floor', 'tree', 'ceiling', 'road', 'bed ',
'windowpane', 'grass', 'cabinet', 'sidewalk', 'person', 'earth',
'door', 'table', 'mountain', 'plant', 'curtain', 'chair', 'car',
'water', 'painting', 'sofa', 'shelf', 'house', 'sea', 'mirror', 'rug',
'field', 'armchair', 'seat', 'fence', 'desk', 'rock', 'wardrobe',
'lamp', 'bathtub', 'railing', 'cushion', 'base', 'box', 'column',
'signboard', 'chest of drawers', 'counter', 'sand', 'sink',
'skyscraper', 'fireplace', 'refrigerator', 'grandstand', 'path',
'stairs', 'runway', 'case', 'pool table', 'pillow', 'screen door',
'stairway', 'river', 'bridge', 'bookcase', 'blind', 'coffee table',
'toilet', 'flower', 'book', 'hill', 'bench', 'countertop', 'stove',
'palm', 'kitchen island', 'computer', 'swivel chair', 'boat', 'bar',
'arcade machine', 'hovel', 'bus', 'towel', 'light', 'truck', 'tower',
'chandelier', 'awning', 'streetlight', 'booth', 'television receiver',
'airplane', 'dirt track', 'apparel', 'pole', 'land', 'bannister',
'escalator', 'ottoman', 'bottle', 'buffet', 'poster', 'stage', 'van',
'ship', 'fountain', 'conveyer belt', 'canopy', 'washer', 'plaything',
'swimming pool', 'stool', 'barrel', 'basket', 'waterfall', 'tent',
'bag', 'minibike', 'cradle', 'oven', 'ball', 'food', 'step', 'tank',
'trade name', 'microwave', 'pot', 'animal', 'bicycle', 'lake',
'dishwasher', 'screen', 'blanket', 'sculpture', 'hood', 'sconce',
'vase', 'traffic light', 'tray', 'ashcan', 'fan', 'pier', 'crt screen',
'plate', 'monitor', 'bulletin board', 'shower', 'radiator', 'glass',
'clock', 'flag'
]
def voc_classes():
"""Pascal VOC class names for external use."""
return [
'background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus',
'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',
'tvmonitor'
]
def cityscapes_palette():
"""Cityscapes palette for external use."""
return [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156],
[190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0],
[107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60],
[255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100], [0, 80, 100],
[0, 0, 230], [119, 11, 32]]
def ade_palette():
"""ADE20K palette for external use."""
return [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50],
[4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255],
[230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7],
[150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82],
[143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3],
[0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255],
[255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220],
[255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224],
[255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255],
[224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7],
[255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153],
[6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255],
[140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0],
[255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255],
[255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255],
[11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255],
[0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0],
[255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0],
[0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255],
[173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255],
[255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20],
[255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255],
[255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255],
[0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255],
[0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0],
[143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0],
[8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255],
[255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112],
[92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160],
[163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163],
[255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0],
[255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0],
[10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255],
[255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204],
[41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255],
[71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255],
[184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194],
[102, 255, 0], [92, 0, 255]]
def voc_palette():
"""Pascal VOC palette for external use."""
return [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128],
[128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0],
[192, 0, 0], [64, 128, 0], [192, 128, 0], [64, 0, 128],
[192, 0, 128], [64, 128, 128], [192, 128, 128], [0, 64, 0],
[128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64, 128]]
dataset_aliases = {
'cityscapes': ['cityscapes'],
'ade': ['ade', 'ade20k'],
'voc': ['voc', 'pascal_voc', 'voc12', 'voc12aug']
}
def get_classes(dataset):
"""Get class names of a dataset."""
alias2name = {}
for name, aliases in dataset_aliases.items():
for alias in aliases:
alias2name[alias] = name
if mmcv.is_str(dataset):
if dataset in alias2name:
labels = eval(alias2name[dataset] + '_classes()')
else:
raise ValueError(f'Unrecognized dataset: {dataset}')
else:
raise TypeError(f'dataset must a str, but got {type(dataset)}')
return labels
def get_palette(dataset):
"""Get class palette (RGB) of a dataset."""
alias2name = {}
for name, aliases in dataset_aliases.items():
for alias in aliases:
alias2name[alias] = name
if mmcv.is_str(dataset):
if dataset in alias2name:
labels = eval(alias2name[dataset] + '_palette()')
else:
raise ValueError(f'Unrecognized dataset: {dataset}')
else:
raise TypeError(f'dataset must a str, but got {type(dataset)}')
return labels
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/core/evaluation/class_names.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Original source taken from https://github.com/open-mmlab/mmskeleton
# Copyright 2019 OpenMMLAB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Eval Hooks Module."""
import os.path as osp
from mmcv.runner import Hook
from torch.utils.data import DataLoader
class EvalHook(Hook):
"""Evaluation hook.
Attributes:
dataloader (DataLoader): A PyTorch dataloader.
interval (int): Evaluation interval (by epochs). Default: 1.
"""
def __init__(self, dataloader, interval=1, by_epoch=False, **eval_kwargs):
"""Init Module."""
if not isinstance(dataloader, DataLoader):
raise TypeError('dataloader must be a pytorch DataLoader, but got '
f'{type(dataloader)}')
self.dataloader = dataloader
self.interval = interval
self.by_epoch = by_epoch
self.eval_kwargs = eval_kwargs
def after_train_iter(self, runner):
"""After train epoch hook."""
if self.by_epoch or not self.every_n_iters(runner, self.interval):
return
from nvidia_tao_pytorch.cv.segformer.inference.inferencer import single_gpu_test
runner.log_buffer.clear()
results = single_gpu_test(runner.model, self.dataloader, show=False)
self.evaluate(runner, results)
def after_train_epoch(self, runner):
"""After train epoch hook."""
if not self.by_epoch or not self.every_n_epochs(runner, self.interval):
return
from nvidia_tao_pytorch.cv.segformer.inference.inferencer import single_gpu_test
runner.log_buffer.clear()
results = single_gpu_test(runner.model, self.dataloader, show=False)
self.evaluate(runner, results)
def evaluate(self, runner, results):
"""Call evaluate function of dataset."""
eval_res = self.dataloader.dataset.evaluate(
results, logger=runner.logger, **self.eval_kwargs)
for name, val in eval_res.items():
runner.log_buffer.output[name] = val
runner.log_buffer.ready = True
class DistEvalHook(EvalHook):
"""Distributed evaluation hook.
Attributes:
dataloader (DataLoader): A PyTorch dataloader.
interval (int): Evaluation interval (by epochs). Default: 1.
tmpdir (str | None): Temporary directory to save the results of all
processes. Default: None.
gpu_collect (bool): Whether to use gpu or cpu to collect results.
Default: False.
"""
def __init__(self,
dataloader,
interval=1,
gpu_collect=False,
by_epoch=False,
**eval_kwargs):
"""Init Module."""
if not isinstance(dataloader, DataLoader):
raise TypeError(
'dataloader must be a pytorch DataLoader, but got {}'.format(
type(dataloader)))
self.dataloader = dataloader
self.interval = interval
self.gpu_collect = gpu_collect
self.by_epoch = by_epoch
self.eval_kwargs = eval_kwargs
def after_train_iter(self, runner):
"""After train epoch hook."""
if self.by_epoch or not self.every_n_iters(runner, self.interval):
return
from nvidia_tao_pytorch.cv.segformer.inference.inferencer import multi_gpu_test
runner.log_buffer.clear()
results = multi_gpu_test(
runner.model,
self.dataloader,
tmpdir=osp.join(runner.work_dir, '.eval_hook'),
gpu_collect=self.gpu_collect)
if runner.rank == 0:
print('\n')
self.evaluate(runner, results)
def after_train_epoch(self, runner):
"""After train epoch hook."""
if not self.by_epoch or not self.every_n_epochs(runner, self.interval):
return
from nvidia_tao_pytorch.cv.segformer.inference.inferencer import multi_gpu_test
runner.log_buffer.clear()
results = multi_gpu_test(
runner.model,
self.dataloader,
tmpdir=osp.join(runner.work_dir, '.eval_hook'),
gpu_collect=self.gpu_collect)
if runner.rank == 0:
print('\n')
self.evaluate(runner, results)
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/core/evaluation/eval_hooks.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Init Module."""
from .class_names import get_classes
from .eval_hooks import DistEvalHook, EvalHook
from .metrics import eval_metrics, mean_dice, mean_iou
__all__ = [
'EvalHook', 'DistEvalHook', 'mean_dice', 'mean_iou', 'eval_metrics',
'get_classes'
]
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/core/evaluation/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Init Module."""
from .builder import build_pixel_sampler
from .sampler import BasePixelSampler, OHEMPixelSampler
__all__ = ['build_pixel_sampler', 'BasePixelSampler', 'OHEMPixelSampler']
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/core/seg/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Original source taken from https://github.com/open-mmlab/mmsegmentation
# Copyright 2019 OpenMMLAB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Builder Module."""
from mmcv.utils import Registry, build_from_cfg
PIXEL_SAMPLERS = Registry('pixel sampler', scope='xxx')
def build_pixel_sampler(cfg, **default_args):
"""Build pixel sampler for segmentation map."""
return build_from_cfg(cfg, PIXEL_SAMPLERS, default_args)
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/core/seg/builder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Original source taken from https://github.com/open-mmlab/mmskeleton
# Copyright 2019 OpenMMLAB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Base Pixel Sampler."""
from abc import ABCMeta, abstractmethod
class BasePixelSampler(metaclass=ABCMeta):
"""Base class of pixel sampler."""
def __init__(self, **kwargs):
"""Init Function."""
pass
@abstractmethod
def sample(self, seg_logit, seg_label):
"""Placeholder for sample function."""
pass
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/core/seg/sampler/base_pixel_sampler.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Sampler Init Module."""
from .base_pixel_sampler import BasePixelSampler
from .ohem_pixel_sampler import OHEMPixelSampler
__all__ = ['BasePixelSampler', 'OHEMPixelSampler']
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/core/seg/sampler/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Original source taken from https://github.com/open-mmlab/mmskeleton
# Copyright 2019 OpenMMLAB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OHEM pixel Sampler."""
import torch
import torch.nn.functional as F
from nvidia_tao_pytorch.cv.segformer.core.seg.builder import PIXEL_SAMPLERS
from nvidia_tao_pytorch.cv.segformer.core.seg.sampler.base_pixel_sampler import BasePixelSampler
@PIXEL_SAMPLERS.register_module()
class OHEMPixelSampler(BasePixelSampler):
"""Online Hard Example Mining Sampler for segmentation.
Args:
context (nn.Module): The context of sampler, subclass of
:obj:`BaseDecodeHead`.
thresh (float, optional): The threshold for hard example selection.
Below which, are prediction with low confidence. If not
specified, the hard examples will be pixels of top ``min_kept``
loss. Default: None.
min_kept (int, optional): The minimum number of predictions to keep.
Default: 100000.
"""
def __init__(self, context, thresh=None, min_kept=100000):
"""Init Module."""
super(OHEMPixelSampler, self).__init__()
self.context = context
assert min_kept > 1, "Min kept should be greater than 1."
self.thresh = thresh
self.min_kept = min_kept
def sample(self, seg_logit, seg_label):
"""Sample pixels that have high loss or with low prediction confidence.
Args:
seg_logit (torch.Tensor): segmentation logits, shape (N, C, H, W)
seg_label (torch.Tensor): segmentation label, shape (N, 1, H, W)
Returns:
torch.Tensor: segmentation weight, shape (N, H, W)
"""
with torch.no_grad():
assert seg_logit.shape[2:] == seg_label.shape[2:], "Seg label last shape should be equal to the seg logit."
assert seg_label.shape[1] == 1, "seg_label should have a channel shape of 1."
seg_label = seg_label.squeeze(1).long()
batch_kept = self.min_kept * seg_label.size(0)
valid_mask = seg_label != self.context.ignore_index
seg_weight = seg_logit.new_zeros(size=seg_label.size())
valid_seg_weight = seg_weight[valid_mask]
if self.thresh is not None:
seg_prob = F.softmax(seg_logit, dim=1)
tmp_seg_label = seg_label.clone().unsqueeze(1)
tmp_seg_label[tmp_seg_label == self.context.ignore_index] = 0
seg_prob = seg_prob.gather(1, tmp_seg_label).squeeze(1)
sort_prob, sort_indices = seg_prob[valid_mask].sort()
if sort_prob.numel() > 0:
min_threshold = sort_prob[min(batch_kept,
sort_prob.numel() - 1)]
else:
min_threshold = 0.0
threshold = max(min_threshold, self.thresh)
valid_seg_weight[seg_prob[valid_mask] < threshold] = 1.
else:
losses = self.context.loss_decode(
seg_logit,
seg_label,
weight=None,
ignore_index=self.context.ignore_index,
reduction_override='none')
# faster than topk according to https://github.com/pytorch/pytorch/issues/22812 # noqa
_, sort_indices = losses[valid_mask].sort(descending=True)
valid_seg_weight[sort_indices[:batch_kept]] = 1.
seg_weight[valid_mask] = valid_seg_weight
return seg_weight
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/core/seg/sampler/ohem_pixel_sampler.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Init Module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/config/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default config file"""
from typing import Optional, List, Dict, Any
from dataclasses import dataclass, field
from omegaconf import MISSING
@dataclass
class NormConfig:
"""Configuration parameters for Normalization Preprocessing."""
type: str = "SyncBN" # Can be BN or SyncBN
requires_grad: bool = True # Whether to train the gamma beta parameters of BN
@dataclass
class TestModelConfig:
"""Configuration parameters for Inference."""
mode: str = "whole"
crop_size: Optional[List[int]] = None # Configurable
stride: Optional[List[int]] = None # Configurable
@dataclass
class LossDecodeConfig:
"""Configuration parameters for Loss."""
type: str = "CrossEntropyLoss"
use_sigmoid: bool = False
loss_weight: float = 1.0
@dataclass
class SegformerHeadConfig:
"""Configuration parameters for Segformer Head."""
# @subha TO DO: Look into align corners
in_channels: List[int] = field(default_factory=lambda: [64, 128, 320, 512]) # [64, 128, 320, 512], [32, 64, 160, 256]
in_index: List[int] = field(default_factory=lambda: [0, 1, 2, 3]) # No change
feature_strides: List[int] = field(default_factory=lambda: [4, 8, 16, 32]) # No change
channels: int = 128 # No change
dropout_ratio: float = 0.1
norm_cfg: NormConfig = NormConfig()
align_corners: bool = False
decoder_params: Dict[str, int] = field(default_factory=lambda: {"embed_dim": 768}) # 256, 512, 768 -> Configurable
loss_decode: LossDecodeConfig = LossDecodeConfig() # Non-configurable since there is only one loss
@dataclass
class MultiStepLRConfig:
"""Configuration parameters for Multi Step Optimizer."""
lr_steps: List[int] = field(default_factory=lambda: [15, 25])
lr_decay: float = 0.1
@dataclass
class PolyConfig:
"""Configuration parameters for Polynomial LR decay."""
# Check what is _delete_ is
policy: str = "poly"
warmup: str = 'linear'
warmup_iters: int = 1500
warmup_ratio: float = 1e-6
power: float = 1.0
min_lr: float = 0.0
by_epoch: bool = False
@dataclass
class LRConfig:
"""Configuration parameters for LR Scheduler."""
# Check what is _delete_ is
policy: str = "poly" # Non-configurable
warmup: str = 'linear' # Non-configurable
warmup_iters: int = 1500
warmup_ratio: float = 1e-6
power: float = 1.0
min_lr: float = 0.0
by_epoch: bool = False
@dataclass
class ParamwiseConfig:
"""Configuration parameters for Parameters."""
pos_block: Dict[str, float] = field(default_factory=lambda: {"decay_mult": 0.0})
norm: Dict[str, float] = field(default_factory=lambda: {"decay_mult": 0.0})
head: Dict[str, float] = field(default_factory=lambda: {"lr_mult": 10.0})
@dataclass
class SFOptimConfig:
"""Optimizer config."""
type: str = "AdamW"
lr: float = 0.00006
betas: List[float] = field(default_factory=lambda: [0.9, 0.999])
weight_decay: float = 0.01
paramwise_cfg: ParamwiseConfig = ParamwiseConfig()
weight_decay: float = 5e-4
@dataclass
class BackboneConfig:
"""Configuration parameters for Backbone."""
type: str = "mit_b5"
@dataclass
class SFModelConfig:
"""SF model config."""
pretrained_model_path: Optional[str] = None
backbone: BackboneConfig = BackboneConfig()
decode_head: SegformerHeadConfig = SegformerHeadConfig()
test_cfg: TestModelConfig = TestModelConfig()
input_width: int = 512
input_height: int = 512
# Use the field parameter in order to define as dictionaries
@dataclass
class RandomCropCfg:
"""Configuration parameters for Random Crop Aug."""
crop_size: List[int] = field(default_factory=lambda: [512, 512]) # Non - configurable
cat_max_ratio: float = 0.75
@dataclass
class ResizeCfg:
"""Configuration parameters for Resize Preprocessing."""
img_scale: Optional[List[int]] = None # configurable
ratio_range: List[float] = field(default_factory=lambda: [0.5, 2.0])
keep_ratio: bool = True
@dataclass
class SFAugmentationConfig:
"""Augmentation config."""
# @subha: TO Do: Add some more augmentation configurations which were not used in Segformer (later)
random_crop: RandomCropCfg = RandomCropCfg()
resize: ResizeCfg = ResizeCfg()
random_flip: Dict[str, float] = field(default_factory=lambda: {'prob': 0.5})
color_aug: Dict[str, str] = field(default_factory=lambda: {'type': 'PhotoMetricDistortion'})
@dataclass
class ImgNormConfig:
"""Configuration parameters for Img Normalization."""
mean: List[float] = field(default_factory=lambda: [123.675, 116.28, 103.53])
std: List[float] = field(default_factory=lambda: [58.395, 57.12, 57.375])
to_rgb: bool = True
@dataclass
class PipelineConfig:
"""Configuration parameters for Validation Pipe."""
img_norm_cfg: ImgNormConfig = ImgNormConfig()
multi_scale: Optional[List[int]] = None
augmentation_config: SFAugmentationConfig = SFAugmentationConfig()
Pad: Dict[str, int] = field(default_factory=lambda: {'size_ht': 1024, 'size_wd': 1024, 'pad_val': 0, 'seg_pad_val': 255}) # Non-configurable. Set based on model_input
CollectKeys: List[str] = field(default_factory=lambda: ['img', 'gt_semantic_seg'])
@dataclass
class seg_class:
"""Indiv color."""
seg_class: str = "background"
mapping_class: str = "background"
label_id: int = 0
rgb: List[int] = field(default_factory=lambda: [255, 255, 255])
@dataclass
class SFDatasetConfig:
"""Dataset Config."""
img_dir: Any = MISSING
ann_dir: Any = MISSING
pipeline: PipelineConfig = PipelineConfig()
@dataclass
class SFDatasetExpConfig:
"""Dataset config."""
data_root: str = MISSING
img_norm_cfg: ImgNormConfig = ImgNormConfig()
train_dataset: SFDatasetConfig = SFDatasetConfig()
val_dataset: SFDatasetConfig = SFDatasetConfig()
test_dataset: SFDatasetConfig = SFDatasetConfig()
palette: Optional[List[seg_class]] = None
seg_class_default: seg_class = seg_class()
dataloader: str = "Dataloader"
img_suffix: Optional[str] = None
seg_map_suffix: Optional[str] = None
repeat_data_times: int = 2
batch_size: int = 2
workers_per_gpu: int = 2
shuffle: bool = True
input_type: str = "rgb"
@dataclass
class SFExpConfig:
""" Overall Exp Config for Segformer. """
manual_seed: int = 47
distributed: bool = True
# If needed, the next line can be commented
gpu_ids: List[int] = field(default_factory=lambda: [0])
MASTER_ADDR: str = "127.0.0.1"
MASTER_PORT: int = 631
@dataclass
class TrainerConfig:
"""Train Config."""
sf_optim: SFOptimConfig = SFOptimConfig()
lr_config: LRConfig = LRConfig()
grad_clip: float = 0.0
find_unused_parameters: bool = True
@dataclass
class SFTrainExpConfig:
"""Train experiment config."""
results_dir: Optional[str] = None
encryption_key: str = MISSING
exp_config: SFExpConfig = SFExpConfig()
trainer: TrainerConfig = TrainerConfig()
num_gpus: int = 1 # non configurable here
max_iters: int = 10
logging_interval: int = 1
checkpoint_interval: int = 1
resume_training_checkpoint_path: Optional[str] = None
validation_interval: Optional[int] = 1
validate: bool = False
@dataclass
class SFInferenceExpConfig:
"""Inference experiment config."""
encryption_key: str = MISSING
results_dir: Optional[str] = None
gpu_id: int = 0
checkpoint: Optional[str] = None
exp_config: SFExpConfig = SFExpConfig()
num_gpus: int = 1 # non configurable here
trt_engine: Optional[str] = None
@dataclass
class SFEvalExpConfig:
"""Inference experiment config."""
results_dir: Optional[str] = None
encryption_key: str = MISSING
gpu_id: int = 0
checkpoint: Optional[str] = None
exp_config: SFExpConfig = SFExpConfig()
num_gpus: int = 1 # non configurable here
trt_engine: Optional[str] = None
@dataclass
class TrtConfig:
"""Trt config."""
data_type: str = "FP32"
workspace_size: int = 1024
min_batch_size: int = 1
opt_batch_size: int = 1
max_batch_size: int = 1
@dataclass
class SFExportExpConfig:
"""Export experiment config."""
results_dir: Optional[str] = None
encryption_key: str = MISSING
verify: bool = True
simplify: bool = False
batch_size: int = 1
opset_version: int = 11
trt_engine: Optional[str] = None
checkpoint: Optional[str] = None
onnx_file: Optional[str] = None
exp_config: SFExpConfig = SFExpConfig()
trt_config: TrtConfig = TrtConfig()
num_gpus: int = 1 # non configurable here
input_channel: int = 3
input_width: int = 1024
input_height: int = 1024
@dataclass
class GenTrtEngineExpConfig:
"""Gen TRT Engine experiment config."""
results_dir: Optional[str] = None
gpu_id: int = 0
onnx_file: Optional[str] = None
trt_engine: Optional[str] = None
input_channel: int = 3
input_width: int = 224
input_height: int = 224
opset_version: int = 12
batch_size: int = -1
verbose: bool = False
tensorrt: TrtConfig = TrtConfig()
@dataclass
class ExperimentConfig:
"""Experiment config."""
model: SFModelConfig = SFModelConfig()
dataset: SFDatasetExpConfig = SFDatasetExpConfig()
train: SFTrainExpConfig = SFTrainExpConfig()
evaluate: SFEvalExpConfig = SFEvalExpConfig()
inference: SFInferenceExpConfig = SFInferenceExpConfig()
gen_trt_engine: GenTrtEngineExpConfig = GenTrtEngineExpConfig()
export: SFExportExpConfig = SFExportExpConfig()
encryption_key: Optional[str] = None
results_dir: str = MISSING
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/config/default_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Original source taken from https://github.com/open-mmlab/mmsegmentation
# Copyright 2019 OpenMMLAB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collect Env Module."""
from mmcv.utils import collect_env as collect_base_env
def collect_env():
"""Collect the information of the running environments."""
env_info = collect_base_env()
# env_info['MMSegmentation'] = f'{mmseg.__version__}+{get_git_hash()[:7]}'
return env_info
if __name__ == '__main__':
"""Main Function."""
for name, val in collect_env().items():
print('{}: {}'.format(name, val))
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/utils/collect_env.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils Init Module."""
from .collect_env import collect_env
from .logger import get_root_logger, print_log
__all__ = ['get_root_logger', 'collect_env', 'print_log']
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Original source taken from https://github.com/open-mmlab/mmsegmentation
# Copyright 2019 OpenMMLAB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logging Module."""
import logging
from mmcv.utils import get_logger
def get_root_logger(log_file=None, log_level=logging.INFO):
"""Get the root logger.
The logger will be initialized if it has not been initialized. By default a
StreamHandler will be added. If `log_file` is specified, a FileHandler will
also be added. The name of the root logger is the top-level package name,
e.g., "mmseg".
Args:
log_file (str | None): The log filename. If specified, a FileHandler
will be added to the root logger.
log_level (int): The root logger level. Note that only the process of
rank 0 is affected, while other processes will set the level to
"Error" and be silent most of the time.
Returns:
logging.Logger: The root logger.
"""
logger = get_logger(name='mmseg', log_file=log_file, log_level=log_level)
return logger
def print_log(msg, logger=None, level=logging.INFO):
"""Print a log message.
Args:
msg (str): The message to be logged.
logger (logging.Logger | str | None): The logger to be used. Some
special loggers are:
- "root": the root logger obtained with `get_root_logger()`.
- "silent": no message will be printed.
- None: The `print()` method will be used to print log messages.
level (int): Logging level. Only available when `logger` is a Logger
object or "root".
"""
if logger is None:
print(msg)
elif logger == 'root':
_logger = get_root_logger()
_logger.log(level, msg)
elif isinstance(logger, logging.Logger):
logger.log(level, msg)
elif logger != 'silent':
raise TypeError(
'logger should be either a logging.Logger object, "root", '
'"silent" or None, but got {}'.format(logger))
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/utils/logger.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
"""Utils for Segformer Segmentation"""
import os
import struct
from eff.core.codec import encrypt_stream
import shutil
def check_and_create(d):
"""Create a directory."""
if not os.path.isdir(d):
os.makedirs(d, exist_ok=True)
def check_and_delete(d):
"""Delete a directory."""
if os.path.isdir(d):
shutil.rmtree(d)
def encrypt_onnx(tmp_file_name, output_file_name, key):
"""Encrypt the onnx model."""
with open(tmp_file_name, "rb") as open_temp_file, open(output_file_name,
"wb") as open_encoded_file:
# set the input name magic number
open_encoded_file.write(struct.pack("<i", 0))
encrypt_stream(
input_stream=open_temp_file, output_stream=open_encoded_file,
passphrase=key, encryption=True
)
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/utils/common_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Original source taken from https://github.com/open-mmlab/mmsegmentation
# Copyright 2019 OpenMMLAB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tao status logger for segformer """
import os
from collections import OrderedDict
from typing import Dict
import torch
from mmcv.runner.hooks import HOOKS, TextLoggerHook
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
import json
from time import strftime, gmtime
STATUS_JSON_FILENAME = "status.json"
@HOOKS.register_module()
class TaoTextLoggerHook(TextLoggerHook):
""" Logger hook in text. """
def __init__(self, *args, **kwargs):
""" init """
self.s_logger = status_logging.get_status_logger()
super(TaoTextLoggerHook, self).__init__(*args, **kwargs)
def _status_log(self, log_dict: Dict, runner) -> None:
""" Status Logging
Args:
log_dict (Dict): Dictionary with logging values
runner (class): MMLab trainer instance
"""
monitor_data = {}
monitor_data["mode"] = log_dict["mode"]
if self.by_epoch:
monitor_data["cur_epoch"] = log_dict["epoch"]
else:
monitor_data["cur_iter"] = log_dict["iter"]
if log_dict['mode'] == 'val':
if int(os.environ['LOCAL_RANK']) == 0: # In multi-GPU setting
monitor_data["mIoU"] = log_dict["mIoU"]
monitor_data["mAcc"] = log_dict["mAcc"]
self.s_logger.kpi = {
"Mean IOU": log_dict["mIoU"],
"mAcc": log_dict["mAcc"]
}
if log_dict['mode'] == 'train':
running_avg_loss = log_dict["loss"]
monitor_data["loss"] = running_avg_loss
time_sec_avg = self.time_sec_tot / (runner.iter - self.start_iter + 1) # Per iter
monitor_data["time_per_iter"] = strftime("%H:%M:%S", gmtime(time_sec_avg))
monitor_data["train_accuracy"] = log_dict["decode.acc_seg"]
self.s_logger.graphical = {
"loss": running_avg_loss,
"train_accuracy": log_dict["decode.acc_seg"]
}
try:
self.s_logger.write(
data=monitor_data,
status_level=status_logging.Status.RUNNING)
except IOError:
# We let this pass because we do not want the json file writing to crash the whole job.
pass
# Save the json file.
filename = os.path.join(runner.work_dir, STATUS_JSON_FILENAME)
try:
with open(filename, "a+") as f:
json.dump(monitor_data, f)
f.write('\n')
except IOError:
# We let this pass because we do not want the json file writing to crash the whole job.
pass
def log(self, runner) -> OrderedDict:
""" log runner """
if 'eval_iter_num' in runner.log_buffer.output:
# this doesn't modify runner.iter and is regardless of by_epoch
cur_iter = runner.log_buffer.output.pop('eval_iter_num')
else:
cur_iter = self.get_iter(runner, inner_iter=True)
log_dict = OrderedDict(
mode=self.get_mode(runner),
epoch=self.get_epoch(runner),
iter=cur_iter)
# only record lr of the first param group
cur_lr = runner.current_lr()
if isinstance(cur_lr, list):
log_dict['lr'] = cur_lr[0]
else:
assert isinstance(cur_lr, dict)
log_dict['lr'] = {}
for k, lr_ in cur_lr.items():
assert isinstance(lr_, list)
log_dict['lr'].update({k: lr_[0]})
if 'time' in runner.log_buffer.output:
# statistic memory
if torch.cuda.is_available():
log_dict['memory'] = self._get_max_memory(runner)
log_dict = dict(log_dict, **runner.log_buffer.output) # type: ignore
self._log_info(log_dict, runner)
self._dump_log(log_dict, runner)
self._status_log(log_dict, runner)
return log_dict
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/hooks/tao_status_logger.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Init Module. """
from nvidia_tao_pytorch.cv.segformer.hooks.tao_status_logger import TaoTextLoggerHook
__all__ = ['TaoTextLoggerHook']
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/hooks/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Segformer Scripts File """
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/scripts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Export of Segformer model.
"""
import os
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
from nvidia_tao_pytorch.cv.segformer.config.default_config import ExperimentConfig
from nvidia_tao_pytorch.cv.segformer.utils.common_utils import check_and_create
from nvidia_tao_pytorch.cv.segformer.model.sf_model import SFModel
from nvidia_tao_pytorch.cv.segformer.dataloader.segformer_dm import SFDataModule
from nvidia_tao_pytorch.cv.segformer.model.builder import build_segmentor
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.pointcloud.pointpillars.pcdet.utils import common_utils
import datetime
import numpy as np
from functools import partial
import onnxruntime as rt
import onnx
import torch
import torch._C
import torch.serialization
from mmcv.runner.checkpoint import load_checkpoint
from mmcv.runner import get_dist_info, init_dist
from mmcv.onnx import register_extra_symbolics
def _demo_mm_inputs(input_shape, num_classes):
"""Create a superset of inputs needed to run test or train batches.
Args:
input_shape (tuple):
input batch dimensions
num_classes (int):
number of semantic classes
"""
(N, C, H, W) = input_shape
rng = np.random.RandomState(0)
imgs = rng.rand(*input_shape)
segs = rng.randint(
low=0, high=num_classes - 1, size=(N, 1, H, W)).astype(np.uint8)
img_metas = [{
'img_shape': (H, W, C),
'ori_shape': (H, W, C),
'pad_shape': (H, W, C),
'filename': '<demo>.png',
'scale_factor': 1.0,
'flip': False}]
mm_inputs = {
'imgs': torch.FloatTensor(imgs),
'img_metas': img_metas,
'gt_semantic_seg': torch.LongTensor(segs)
}
return mm_inputs
def pytorch2onnx(model,
input_shape,
num_classes,
opset_version=11,
show=False,
output_file='tmp.onnx',
verify=False,
test_cfg=None,
simplify=False,
logger=None):
"""Export Pytorch model to ONNX model and verify the outputs are same
between Pytorch and ONNX.
Args:
model (nn.Module): Pytorch model we want to export.
input_shape (tuple): Use this input shape to construct
the corresponding dummy input and execute the model.
opset_version (int): The onnx op version. Default: 11.
show (bool): Whether print the computation graph. Default: False.
output_file (string): The path to where we store the output ONNX model.
Default: `tmp.onnx`.
verify (bool): Whether compare the outputs between Pytorch and ONNX.
Default: False.
"""
model.cuda()
model.eval()
mm_inputs = _demo_mm_inputs(input_shape, num_classes)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
# avoid input splits
img_list = [imgs]
img_meta_list = [img_metas]
# replace original forward function
origin_forward = model.forward_export
model.forward = partial(
model.forward_export, img_meta_list, False)
register_extra_symbolics(opset_version)
with torch.no_grad():
torch.onnx.export(model,
(img_list, ),
output_file,
export_params=True,
input_names=['input'],
output_names=["output"],
dynamic_axes={"input": {0: "batch_size"}, 'output': {0: 'batch_size'}},
verbose=show,
opset_version=opset_version)
logger.info('Successfully exported ONNX model.')
model.forward = origin_forward
final_tmp_onnx_path = output_file
if simplify:
logger.info('[INFO] Simplifying model...')
from onnxsim import simplify
onnx_model = onnx.load(output_file)
# simplifying dynamic model
_, C, H, W = imgs.shape
simplified_model, _ = simplify(onnx_model,
input_shapes={'input': (1, C, H, W)}, # test bz = 2
dynamic_input_shape=True,
check_n=3)
simplified_path = output_file[:-5] + "_sim.onnx"
onnx.save(simplified_model, simplified_path)
final_tmp_onnx_path = simplified_path
if verify:
# check by onnx
onnx_model = onnx.load(final_tmp_onnx_path)
onnx.checker.check_model(onnx_model)
# check the numerical value
# get pytorch output
pytorch_result = model(img_meta_list, False, img_list)[0]
# get onnx output
input_all = [node.name for node in onnx_model.graph.input]
input_initializer = [
node.name for node in onnx_model.graph.initializer
]
net_feed_input = list(set(input_all) - set(input_initializer))
assert (len(net_feed_input) == 1), "Input dimensions do not match."
sess = rt.InferenceSession(output_file)
onnx_result = sess.run(None, {net_feed_input[0]: img_list[0].detach().numpy()})[0][0, :, :, 0]
if not np.allclose(pytorch_result, onnx_result):
raise ValueError(
'The outputs are different between Pytorch and ONNX')
logger.info('The outputs are same between Pytorch and ONNX')
def run_experiment(experiment_config, results_dir):
"""Start the Export.
Args:
experiment_config (Dict): Config dictionary containing epxeriment parameters
results_dir (str): Results dir to save the exported ONNX.
"""
check_and_create(results_dir)
# Set the logger
log_file = os.path.join(results_dir, 'log_evaluate_{}.txt'.format(datetime.datetime.now().strftime('%Y%m%d-%H%M%S')))
logger = common_utils.create_logger(log_file, rank=0)
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(status_logging.StatusLogger(filename=status_file, append=True))
status_logger = status_logging.get_status_logger()
# log to file
logger.info('********************** Start logging for Export.**********************')
status_logger.write(message="**********************Start logging for Export**********************.")
num_gpus = 1
seed = experiment_config["export"]["exp_config"]["manual_seed"]
# Need to change this
rank, world_size = get_dist_info()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(0)
if "RANK" not in os.environ:
os.environ['RANK'] = str(rank)
if "WORLD_SIZE" not in os.environ:
os.environ['WORLD_SIZE'] = str(world_size)
if "MASTER_PORT" not in os.environ:
os.environ['MASTER_PORT'] = str(experiment_config["export"]["exp_config"]["MASTER_PORT"])
if "MASTER_ADDR" not in os.environ:
os.environ['MASTER_ADDR'] = experiment_config["export"]["exp_config"]["MASTER_ADDR"]
init_dist(launcher="pytorch", backend="nccl")
dm = SFDataModule(experiment_config["dataset"], num_gpus, seed, logger, "eval", experiment_config["model"]["input_height"], experiment_config["model"]["input_width"])
dm.setup()
sf_model = SFModel(experiment_config, phase="eval", num_classes=dm.num_classes)
CLASSES = dm.CLASSES
PALETTE = dm.PALETTE
model_path = experiment_config["export"]["checkpoint"]
if not model_path:
raise ValueError("You need to provide the model path for Export.")
model_to_test = build_segmentor(sf_model.model_cfg, test_cfg=None)
model_to_test = sf_model._convert_batchnorm(model_to_test)
_ = load_checkpoint(model_to_test, model_path, map_location='cpu')
model_to_test.CLASSES = CLASSES
model_to_test.PALETTE = PALETTE
output_file = experiment_config["export"]["onnx_file"]
if not output_file:
onnx_path = model_path.replace(".pth", ".onnx")
else:
onnx_path = output_file
input_channel = experiment_config["export"]["input_channel"]
input_height = experiment_config["export"]["input_height"]
input_width = experiment_config["export"]["input_width"]
input_shape = [1] + [input_channel, input_height, input_width]
pytorch2onnx(model_to_test,
input_shape,
opset_version=experiment_config["export"]["opset_version"],
show=False,
output_file=onnx_path,
verify=False,
num_classes=dm.num_classes,
test_cfg=sf_model.test_cfg,
simplify=experiment_config["export"]["simplify"],
logger=logger)
status_logger.write(message="Completed Export.", status_level=status_logging.Status.SUCCESS)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load experiment specification, additially using schema for validation/retrieving the default values.
# --config_path and --config_name will be provided by the entrypoint script.
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"), config_name="export_fan", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""Run the Export."""
try:
if cfg.export.results_dir is not None:
results_dir = cfg.export.results_dir
else:
results_dir = os.path.join(cfg.results_dir, "export")
run_experiment(experiment_config=cfg,
results_dir=results_dir)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Export was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/scripts/export.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train Segformer model."""
import os
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
from nvidia_tao_pytorch.cv.segformer.config.default_config import ExperimentConfig
from nvidia_tao_pytorch.cv.segformer.utils.common_utils import check_and_create
from nvidia_tao_pytorch.cv.segformer.model.sf_model import SFModel
from nvidia_tao_pytorch.cv.segformer.dataloader.segformer_dm import SFDataModule
from nvidia_tao_pytorch.cv.segformer.utils import collect_env, get_root_logger
from nvidia_tao_pytorch.cv.segformer.trainer.trainer import train_segmentor, set_random_seed
from nvidia_tao_pytorch.cv.segformer.dataloader.data_utils import build_dataset
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.cv.segformer.model.builder import build_segmentor
from nvidia_tao_pytorch.cv.segformer.hooks.tao_status_logger import TaoTextLoggerHook # noqa pylint: disable=W0611
from mmcv.runner import init_dist, get_dist_info
from omegaconf import OmegaConf
import warnings
import datetime
import json
import time
def get_latest_pth_model(results_dir):
"""Utility function to return the latest pth model in a dir.
Args:
results_dir (str): Results dir to save the checkpoints.
"""
trainable_ckpts = [int(item.split('.')[0].split('_')[1]) for item in os.listdir(results_dir)
if item.endswith(".pth")]
num_ckpts = len(trainable_ckpts)
if num_ckpts == 0:
return None
latest_step = sorted(trainable_ckpts, reverse=True)[0]
latest_checkpoint = os.path.join(results_dir, "iter_{}.pth".format(latest_step))
if not os.path.isfile(latest_checkpoint):
raise FileNotFoundError("Checkpoint file not found at {}")
return latest_checkpoint
def run_experiment(experiment_config, results_dir):
"""Start the training.
Args:
experiment_config (Dict): Config dictionary containing epxeriment parameters
results_dir (str): Results dir to save the trained checkpoints.
"""
check_and_create(results_dir)
# Set the logger
log_file = os.path.join(results_dir, 'log_train_{}.txt'.format(datetime.datetime.now().strftime('%Y%m%d-%H%M%S')))
logger = get_root_logger(log_file=log_file, log_level="INFO")
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([f'{k}: {v}' for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
exp_params_file = os.path.join(results_dir, "experiment_params.json")
try:
with open(exp_params_file, 'w') as fp:
exp_cfg_dict = OmegaConf.to_container(experiment_config)
json.dump(exp_cfg_dict, fp)
except Exception as e:
warnings.warn("The expeirment spec paras could not be dumped into file due to {}.".format(e))
num_gpus = experiment_config["train"]["num_gpus"]
seed = experiment_config["train"]["exp_config"]["manual_seed"]
# Need to change this
rank, world_size = get_dist_info()
# If distributed these env variables are set by torchrun
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(0)
if "RANK" not in os.environ:
os.environ['RANK'] = str(rank)
if "WORLD_SIZE" not in os.environ:
os.environ['WORLD_SIZE'] = str(world_size)
if "MASTER_PORT" not in os.environ:
os.environ['MASTER_PORT'] = str(experiment_config["train"]["exp_config"]["MASTER_PORT"])
if "MASTER_ADDR" not in os.environ:
os.environ['MASTER_ADDR'] = experiment_config["train"]["exp_config"]["MASTER_ADDR"]
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(status_logging.StatusLogger(filename=status_file, append=True))
status_logger = status_logging.get_status_logger()
# log to file
logger.info('**********************Start logging for Training**********************')
status_logger.write(message="**********************Start logging for Training**********************.")
distributed = experiment_config["train"]["exp_config"]["distributed"]
max_iters = experiment_config["train"]['max_iters']
resume_ckpt = experiment_config["train"]['resume_training_checkpoint_path']
init_dist(launcher="pytorch", backend="nccl")
dm = SFDataModule(experiment_config["dataset"], num_gpus, seed, logger, "train", experiment_config["model"]["input_height"], experiment_config["model"]["input_width"])
set_random_seed(seed, deterministic=False)
with open(os.path.join(results_dir, 'target_class_id_mapping.json'), 'w') as fp:
json.dump(dm.target_classes_train_mapping, fp)
logger.info("Completed Data Module Construction")
status_logger.write(message="Completed Data Module Construction", status_level=status_logging.Status.RUNNING)
sf_model = SFModel(experiment_config, phase="train", num_classes=dm.num_classes)
dm.setup()
sf_model.max_iters = max_iters
if not resume_ckpt:
resume_ckpt = get_latest_pth_model(results_dir)
sf_model.resume_ckpt = resume_ckpt
sf_model.checkpoint_interval = experiment_config["train"]["checkpoint_interval"]
dm.log_interval = experiment_config["train"]["logging_interval"]
datasets = [build_dataset(dm.train_data, dm.default_args)]
model = build_segmentor(
sf_model.model_cfg,
train_cfg=None,
test_cfg=None)
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
model.CLASSES = dm.CLASSES
train_segmentor(
model,
datasets,
distributed=distributed,
validate=experiment_config["train"]["validate"],
timestamp=timestamp,
meta=meta,
result_dir=results_dir,
dm=dm,
sf_model=sf_model)
status_logger.write(message="Completed Training.", status_level=status_logging.Status.SUCCESS)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load experiment specification, additially using schema for validation/retrieving the default values.
# --config_path and --config_name will be provided by the entrypoint script.
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"), config_name="train_isbi", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""Run the training process."""
try:
if cfg.train.results_dir is not None:
results_dir = cfg.train.results_dir
else:
results_dir = os.path.join(cfg.results_dir, "train")
run_experiment(experiment_config=cfg,
results_dir=results_dir)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Train was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/scripts/train.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Evaluation of Segformer model.
"""
import os
import torch
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
from nvidia_tao_pytorch.cv.segformer.config.default_config import ExperimentConfig
from nvidia_tao_pytorch.cv.segformer.utils.common_utils import check_and_create
from nvidia_tao_pytorch.cv.segformer.inference.inferencer import multi_gpu_test, single_gpu_test
from nvidia_tao_pytorch.cv.segformer.model.sf_model import SFModel
from nvidia_tao_pytorch.cv.segformer.dataloader.segformer_dm import SFDataModule
from nvidia_tao_pytorch.cv.segformer.dataloader.data_utils import build_dataloader
from nvidia_tao_pytorch.cv.segformer.model.builder import build_segmentor
from nvidia_tao_pytorch.cv.segformer.dataloader.data_utils import build_dataset
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.pointcloud.pointpillars.pcdet.utils import common_utils
from omegaconf import OmegaConf
from mmcv.runner import get_dist_info, init_dist
from mmcv.parallel import MMDistributedDataParallel, MMDataParallel
from mmcv.runner.checkpoint import load_checkpoint
import datetime
def run_experiment(experiment_config, results_dir):
"""Start the inference.
Args:
experiment_config (Dict): Config dictionary containing epxeriment parameters
results_dir (str): Results dir to save the inference images
"""
check_and_create(results_dir)
# Set the logger
log_file = os.path.join(results_dir, 'log_inference_{}.txt'.format(datetime.datetime.now().strftime('%Y%m%d-%H%M%S')))
logger = common_utils.create_logger(log_file, rank=0)
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(status_logging.StatusLogger(filename=status_file, append=True))
status_logger = status_logging.get_status_logger()
# log to file
logger.info('********************** Start logging for Inference.**********************')
status_logger.write(message="**********************Start logging for Inference**********************.")
num_gpus = experiment_config["inference"]["num_gpus"]
seed = experiment_config["inference"]["exp_config"]["manual_seed"]
# Need to change this
rank, world_size = get_dist_info()
# If distributed these are set by torchrun
if "LOCAL_RANK" not in os.environ:
os.environ['LOCAL_RANK'] = str(0)
if "RANK" not in os.environ:
os.environ['RANK'] = str(rank)
if "WORLD_SIZE" not in os.environ:
os.environ['WORLD_SIZE'] = str(world_size)
if "MASTER_PORT" not in os.environ:
os.environ['MASTER_PORT'] = str(experiment_config["inference"]["exp_config"]["MASTER_PORT"])
if "MASTER_ADDR" not in os.environ:
os.environ['MASTER_ADDR'] = experiment_config["inference"]["exp_config"]["MASTER_ADDR"]
init_dist(launcher="pytorch", backend="nccl")
dm = SFDataModule(experiment_config["dataset"], num_gpus, seed, logger, "infer", experiment_config["model"]["input_height"], experiment_config["model"]["input_width"])
if experiment_config["dataset"]["palette"]:
pallete_colors = OmegaConf.to_container(experiment_config["dataset"]["palette"])
else:
pallete_colors
dm.setup()
# test_dataset = dm.test_dataset
sf_model = SFModel(experiment_config, phase="infer", num_classes=dm.num_classes)
test_dataset = build_dataset(dm.test_data, dm.default_args)
data_loader = build_dataloader(
test_dataset,
samples_per_gpu=dm.samples_per_gpu,
workers_per_gpu=dm.workers_per_gpu,
dist=True,
shuffle=False)
CLASSES = dm.CLASSES
PALETTE = dm.PALETTE
model_path = experiment_config["inference"]["checkpoint"]
if not model_path:
raise ValueError("You need to provide the model path for Evaluation.")
model_to_test = build_segmentor(sf_model.model_cfg, test_cfg=None)
_ = load_checkpoint(model_to_test, model_path, map_location='cpu')
model_to_test.CLASSES = CLASSES
model_to_test.PALETTE = PALETTE
efficient_test = True # False
distributed = True
if not distributed:
model_to_test = MMDataParallel(model_to_test, device_ids=[0])
outputs = single_gpu_test(model_to_test, data_loader, False, results_dir,
efficient_test)
else:
model_to_test = MMDistributedDataParallel(
model_to_test.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
outputs = multi_gpu_test(model_to_test, data_loader, None,
gpu_collect=True, efficient_test=True)
rank, _ = get_dist_info()
kwargs = {}
kwargs["imgfile_prefix"] = results_dir
if rank == 0:
test_dataset.format_results(outputs, **kwargs)
status_logger.write(message="Completed Inference.", status_level=status_logging.Status.SUCCESS)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load experiment specification, additially using schema for validation/retrieving the default values.
# --config_path and --config_name will be provided by the entrypoint script.
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"), config_name="test_isbi", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""Run the Inference process."""
try:
# Obfuscate logs.
if cfg.inference.results_dir is not None:
results_dir = cfg.inference.results_dir
else:
results_dir = os.path.join(cfg.results_dir, "inference")
run_experiment(experiment_config=cfg,
results_dir=results_dir)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Inference was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/scripts/inference.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Evaluation of Segformer model.
"""
import os
import torch
from nvidia_tao_pytorch.cv.segformer.config.default_config import ExperimentConfig
from nvidia_tao_pytorch.cv.segformer.utils.common_utils import check_and_create
from nvidia_tao_pytorch.cv.segformer.inference.inferencer import multi_gpu_test, single_gpu_test
from nvidia_tao_pytorch.cv.segformer.model.sf_model import SFModel
from nvidia_tao_pytorch.cv.segformer.dataloader.segformer_dm import SFDataModule
from nvidia_tao_pytorch.cv.segformer.dataloader.data_utils import build_dataloader
from nvidia_tao_pytorch.cv.segformer.model.builder import build_segmentor
from nvidia_tao_pytorch.cv.segformer.dataloader.data_utils import build_dataset
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
from nvidia_tao_pytorch.pointcloud.pointpillars.pcdet.utils import common_utils
from mmcv.runner.checkpoint import load_checkpoint
from mmcv.runner import get_dist_info, init_dist
from mmcv.parallel import MMDistributedDataParallel, MMDataParallel
import datetime
import json
def run_experiment(experiment_config, results_dir):
"""Start the evaluate.
Args:
experiment_config (Dict): Config dictionary containing epxeriment parameters
results_dir (str): Results dir to save the evaluation result
"""
check_and_create(results_dir)
# Set the logger
log_file = os.path.join(results_dir, 'log_evaluate_{}.txt'.format(datetime.datetime.now().strftime('%Y%m%d-%H%M%S')))
logger = common_utils.create_logger(log_file, rank=0)
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(status_logging.StatusLogger(filename=status_file, append=True))
status_logger = status_logging.get_status_logger()
# log to file
logger.info('********************** Start logging for Evaluation.**********************')
status_logger.write(message="**********************Start logging for Evaluation**********************.")
num_gpus = experiment_config["evaluate"]["num_gpus"]
seed = experiment_config["evaluate"]["exp_config"]["manual_seed"]
# Need to change this
rank, world_size = get_dist_info()
# If distributed these are set by torchrun
if "LOCAL_RANK" not in os.environ:
os.environ['LOCAL_RANK'] = str(0)
if "RANK" not in os.environ:
os.environ['RANK'] = str(rank)
if "WORLD_SIZE" not in os.environ:
os.environ['WORLD_SIZE'] = str(world_size)
if "MASTER_PORT" not in os.environ:
os.environ['MASTER_PORT'] = str(experiment_config["evaluate"]["exp_config"]["MASTER_PORT"])
if "MASTER_ADDR" not in os.environ:
os.environ['MASTER_ADDR'] = experiment_config["evaluate"]["exp_config"]["MASTER_ADDR"]
init_dist(launcher="pytorch", backend="nccl")
dm = SFDataModule(experiment_config["dataset"], num_gpus, seed, logger, "eval", experiment_config["model"]["input_height"], experiment_config["model"]["input_width"])
dm.setup()
sf_model = SFModel(experiment_config, phase="eval", num_classes=dm.num_classes)
test_dataset = build_dataset(dm.test_data, dm.default_args)
data_loader = build_dataloader(
test_dataset,
samples_per_gpu=dm.samples_per_gpu,
workers_per_gpu=dm.workers_per_gpu,
dist=True,
shuffle=False)
CLASSES = dm.CLASSES
PALETTE = dm.PALETTE
model_path = experiment_config["evaluate"]["checkpoint"]
if not model_path:
raise ValueError("You need to provide the model path for Evaluation.")
model_to_test = build_segmentor(sf_model.model_cfg, test_cfg=None)
_ = load_checkpoint(model_to_test, model_path, map_location='cpu')
model_to_test.CLASSES = CLASSES
model_to_test.PALETTE = PALETTE
efficient_test = True # False
distributed = True
if not distributed:
model_to_test = MMDataParallel(model_to_test, device_ids=[0])
outputs = single_gpu_test(model_to_test, data_loader, False, results_dir,
efficient_test)
else:
model_to_test = MMDistributedDataParallel(
model_to_test.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
outputs = multi_gpu_test(model_to_test, data_loader, None,
gpu_collect=True, efficient_test=True)
rank, _ = get_dist_info()
kwargs = {}
kwargs["imgfile_prefix"] = results_dir
results_file = os.path.join(results_dir, "results_tao.json")
if rank == 0:
eval_results = test_dataset.evaluate(outputs, **kwargs)
with open(results_file, 'w') as fp:
json.dump(str(eval_results), fp)
status_logger.kpi = {"Mean IOU": eval_results["mIoU"],
"mAcc": eval_results["mAcc"],
"aAcc": eval_results["aAcc"]}
status_logger.write(status_level=status_logging.Status.RUNNING)
status_logger.write(message="Completed Evaluation.", status_level=status_logging.Status.SUCCESS)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load experiment specification, additially using schema for validation/retrieving the default values.
# --config_path and --config_name will be provided by the entrypoint script.
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"), config_name="test_isbi", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""Run the training process."""
try:
# Obfuscate logs.
if cfg.evaluate.results_dir is not None:
results_dir = cfg.evaluate.results_dir
else:
results_dir = os.path.join(cfg.results_dir, "evaluate")
run_experiment(experiment_config=cfg,
results_dir=results_dir)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Evaluation was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/scripts/evaluate.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entrypoint script for the Segformer task."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/entrypoint/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Original source taken from https://github.com/open-mmlab/mmsegmentation
# Copyright 2019 OpenMMLAB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""'Entry point script running subtasks related to Segformer."""
import importlib
import os
import pkgutil
import argparse
import subprocess
import sys
from time import time
import nvidia_tao_pytorch.cv.segformer.scripts as scripts
from nvidia_tao_pytorch.core.telemetry.nvml_utils import get_device_details
from nvidia_tao_pytorch.core.telemetry.telemetry import send_telemetry_data
def get_subtasks(package):
"""Get supported subtasks for a given task.
This function lists out the tasks in in the .scripts folder.
Returns:
subtasks (dict): Dictionary of files.
"""
module_path = package.__path__
modules = {}
# Collect modules dynamically.
for _, task, is_package in pkgutil.walk_packages(module_path):
if is_package:
continue
module_name = package.__name__ + '.' + task
module_details = {
"module_name": module_name,
"runner_path": os.path.abspath(importlib.import_module(module_name).__file__),
}
modules[task] = module_details
return modules
def launch(parser, subtasks, network=None):
"""CLI function that executes subtasks.
Args:
parser: Created parser object for a given task.
subtasks: list of subtasks for a given task.
network: Name of the network running training.
"""
if network is None:
network = "tao_pytorch"
# Subtasks for a given model.
parser.add_argument(
'subtask', default='train', choices=subtasks.keys(), help="Subtask for a given task/model.",
)
# Add standard TLT arguments.
parser.add_argument(
"-r",
"--results_dir",
help="Path to a folder where the experiment outputs should be written.",
default=None,
required=False,
)
parser.add_argument("-e", "--experiment_spec_file", help="Path to the experiment spec file.", default=None)
parser.add_argument("--gpus", "-g", type=int, default=1, help="Number of GPUs")
# Parse the arguments.
args, unknown_args = parser.parse_known_args()
script_args = ""
# Process spec file for all commands except the one for getting spec files ;)
# Make sure the user provides spec file.
if args.experiment_spec_file is None:
print("ERROR: The subtask `{}` requires the following argument: -e/--experiment_spec_file".format(args.subtask))
exit(1)
# Make sure the file exists!
if not os.path.exists(args.experiment_spec_file):
print("ERROR: The indicated experiment spec file `{}` doesn't exist!".format(args.experiment_spec_file))
exit(1)
# Split spec file_path into config path and config name.
path, name = os.path.split(args.experiment_spec_file)
if path != '':
script_args += " --config-path " + os.path.realpath(path)
script_args += " --config-name " + name
if args.gpus > 1:
if args.subtask == "export":
raise ValueError("Export does not support multi-gpu")
else:
script_args += f" {args.subtask}.num_gpus={args.gpus}"
# And add other params AFTERWARDS!
if args.results_dir:
script_args += " results_dir=" + args.results_dir
# Find relevant module and pass args.
script = subtasks[args.subtask]["runner_path"]
# Pass unknown args to call
unknown_args_as_str = " ".join(unknown_args)
# Create a system call.
call = (
f"torchrun --nproc_per_node={args.gpus} " + script + script_args + " " + unknown_args_as_str
)
process_passed = True
start = time()
try:
# Run the script.
subprocess.check_call(call, shell=True, stdout=sys.stdout, stderr=sys.stdout)
except (KeyboardInterrupt, SystemExit):
print("Command was interrupted.")
process_passed = True
except subprocess.CalledProcessError as e:
if e.output is not None:
print(e.output)
process_passed = False
end = time()
time_lapsed = int(end - start)
try:
gpu_data = list()
for device in get_device_details():
gpu_data.append(device.get_config())
send_telemetry_data(
network,
args.subtask,
gpu_data,
num_gpus=1,
time_lapsed=time_lapsed,
pass_status=process_passed
)
except Exception as e:
print("Telemetry data couldn't be sent, but the command ran successfully.")
print(f"[WARNING]: {e}")
pass
if not process_passed:
print("Execution status: FAIL")
exit(1) # returning non zero return code from the process.
print("Execution status: PASS")
def main():
"""Main entrypoint wrapper."""
# Create parser for a given task.
parser = argparse.ArgumentParser(
"segformer", add_help=True, description="Transfer Learning Toolkit"
)
# Build list of subtasks by inspecting the package.
subtasks = get_subtasks(scripts)
# Parse the arguments and launch the subtask.
launch(parser, subtasks, network="segformer")
if __name__ == '__main__':
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/entrypoint/segformer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model Init Module."""
from .backbones import * # noqa: F401,F403
from .builder import (BACKBONES, HEADS, LOSSES, SEGMENTORS, build_backbone,
build_head, build_loss, build_segmentor)
from .decode_heads import * # noqa: F401,F403
from .losses import * # noqa: F401,F403
from .segmentors import * # noqa: F401,F403
__all__ = [
'BACKBONES', 'HEADS', 'LOSSES', 'SEGMENTORS', 'build_backbone',
'build_head', 'build_loss', 'build_segmentor'
]
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/model/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Original source taken from https://github.com/open-mmlab/mmsegmentation
# Copyright 2019 OpenMMLAB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model Builder Class."""
import warnings
from mmcv.utils import Registry, build_from_cfg
from torch import nn
BACKBONES = Registry('backbone', scope='xxx')
NECKS = Registry('neck', scope='xxx')
HEADS = Registry('head', scope='xxx')
LOSSES = Registry('loss', scope='xxx')
SEGMENTORS = Registry('segmentor', scope='xxx')
def build(cfg, registry, default_args=None):
"""Build a module.
Args:
cfg (dict, list[dict]): The config of modules, is is either a dict
or a list of configs.
registry (:obj:`Registry`): A registry the module belongs to.
default_args (dict, optional): Default arguments to build the module.
Defaults to None.
Returns:
nn.Module: A built nn module.
"""
if isinstance(cfg, list):
modules = [
build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg
]
return nn.Sequential(*modules)
return build_from_cfg(cfg, registry, default_args)
def build_backbone(cfg):
"""Build backbone."""
return build(cfg, BACKBONES)
def build_neck(cfg):
"""Build neck."""
return build(cfg, NECKS)
def build_head(cfg):
"""Build head."""
return build(cfg, HEADS)
def build_loss(cfg):
"""Build loss."""
return build(cfg, LOSSES)
def build_segmentor(cfg, train_cfg=None, test_cfg=None):
"""Build segmentor."""
if train_cfg is not None or test_cfg is not None:
warnings.warn(
'train_cfg and test_cfg is deprecated, '
'please specify them in model', UserWarning)
assert cfg.get('train_cfg') is None or train_cfg is None, \
'train_cfg specified in both outer field and model field '
assert cfg.get('test_cfg') is None or test_cfg is None, \
'test_cfg specified in both outer field and model field '
return build(cfg, SEGMENTORS, dict(train_cfg=train_cfg, test_cfg=test_cfg))
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/model/builder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Main model file for SegFormer. """
import torch
from omegaconf import OmegaConf
class SFModel(object): # pylint: disable=too-many-ancestors
"""Pytorch Module for SegFormer Model."""
def __init__(self, experiment_spec, phase=None, num_classes=None):
"""Init Segformer Model module.
Args:
experiment_spec (Dict): Dictionary of the spec parameters.
phase (str): Indicates train, val or test phase.
num_classes (int): Number of classes.
"""
super().__init__()
self.experiment_spec = experiment_spec
self.dataset_config = experiment_spec["dataset"]
self.sf_config = experiment_spec["model"]
self.phase = phase
if phase == "train":
self.sf_optim = experiment_spec["train"]["trainer"]["sf_optim"]
self.sf_optim_cfg = OmegaConf.to_container(self.sf_optim)
self.lr_config = OmegaConf.to_container(experiment_spec["train"]["trainer"]["lr_config"])
self.validation_interval = experiment_spec["train"]["validation_interval"]
self.find_unused_parameters = experiment_spec["train"]["trainer"]["find_unused_parameters"]
self.train_cfg = None
self.test_cfg = OmegaConf.to_container(self.sf_config["test_cfg"])
self.model_cfg = OmegaConf.to_container(self.sf_config)
self.model_cfg["pretrained"] = self.model_cfg["pretrained_model_path"]
self.model_cfg.pop("pretrained_model_path")
self.model_cfg["type"] = "EncoderDecoder"
self.model_cfg["decode_head"]["type"] = "SegFormerHead"
self.model_cfg["backbone"]["style"] = "pytorch"
self.model_cfg.pop("input_height")
self.model_cfg.pop("input_width")
self.backbone = self.model_cfg["backbone"]["type"]
self.channels_map = {"mit_b0": [32, 64, 160, 256],
"fan_tiny_8_p4_hybrid": [128, 256, 192, 192],
"fan_large_16_p4_hybrid": [128, 256, 480, 480],
"fan_small_12_p4_hybrid": [128, 256, 384, 384],
"fan_base_16_p4_hybrid": [128, 256, 448, 448], }
if self.backbone in self.channels_map:
self.model_cfg["decode_head"]["in_channels"] = self.channels_map[self.backbone]
if "fan" in self.backbone:
self.model_cfg["decode_head"]["channels"] = 256
self.export = False
self.model_cfg["backbone"]["export"] = self.export
self.test_cfg["export"] = self.export
self.model_cfg["train_cfg"] = self.train_cfg
if self.test_cfg["mode"] == "whole":
self.test_cfg.pop("crop_size")
self.test_cfg.pop("stride")
self.model_cfg["test_cfg"] = self.test_cfg
self.model_cfg["decode_head"]["export"] = self.export
self.model_cfg["decode_head"]["num_classes"] = num_classes
self.num_classes = num_classes
self.distributed = experiment_spec["train"]["exp_config"]["distributed"]
self.checkpoint_loaded = None
self.tmp_ckpt = None
self.max_iters = 1
self.resume_ckpt = None
self.checkpoint_interval = 1000
def _convert_batchnorm(self, module):
""" Convert Sync BN during Export."""
module_output = module
if isinstance(module, torch.nn.SyncBatchNorm):
module_output = torch.nn.BatchNorm2d(module.num_features, module.eps,
module.momentum, module.affine,
module.track_running_stats)
if module.affine:
module_output.weight.data = module.weight.data.clone().detach()
module_output.bias.data = module.bias.data.clone().detach()
# keep requires_grad unchanged
module_output.weight.requires_grad = module.weight.requires_grad
module_output.bias.requires_grad = module.bias.requires_grad
module_output.running_mean = module.running_mean
module_output.running_var = module.running_var
module_output.num_batches_tracked = module.num_batches_tracked
for name, child in module.named_children():
module_output.add_module(name, self._convert_batchnorm(child))
del module
return module_output
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/model/sf_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Init Module."""
from .accuracy import Accuracy, accuracy
from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy,
cross_entropy, mask_cross_entropy)
from .utils import reduce_loss, weight_reduce_loss, weighted_loss
__all__ = [
'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy',
'mask_cross_entropy', 'CrossEntropyLoss', 'reduce_loss',
'weight_reduce_loss', 'weighted_loss'
]
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/model/losses/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Original source taken from https://github.com/open-mmlab/mmsegmentation
# Copyright 2019 OpenMMLAB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils Module."""
import functools
import torch.nn.functional as F
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
# none: 0, elementwise_mean:1, sum: 2
if reduction_enum == 0:
return loss
if reduction_enum == 1:
return loss.mean()
if reduction_enum == 2:
return loss.sum()
return None
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
# if weight is specified, apply element-wise weight
if weight is not None:
assert weight.dim() == loss.dim(), "The weights and loss dimensions are not equal."
if weight.dim() > 1:
assert weight.size(1) == 1 or weight.size(1) == loss.size(1), "Weight size and loss size should match."
loss = loss * weight
# if avg_factor is not specified, just reduce the loss
if avg_factor is None:
loss = reduce_loss(loss, reduction)
else:
# if reduction is mean, then average the loss by avg_factor
if reduction == 'mean':
loss = loss.sum() / avg_factor
# if reduction is 'none', then do nothing, otherwise raise an error
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred,
target,
weight=None,
reduction='mean',
avg_factor=None,
**kwargs):
# get element-wise loss
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/model/losses/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Original source taken from https://github.com/open-mmlab/mmsegmentation
# Copyright 2019 OpenMMLAB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cross Entropy Module."""
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import weight_reduce_loss
def cross_entropy(pred,
label,
weight=None,
class_weight=None,
reduction='mean',
avg_factor=None,
ignore_index=-100):
"""The wrapper function for :func:`F.cross_entropy`"""
# class_weight is a manual rescaling weight given to each class.
# If given, has to be a Tensor of size C element-wise losses
loss = F.cross_entropy(
pred,
label,
weight=class_weight,
reduction='none',
ignore_index=ignore_index)
# apply weights and do the reduction
if weight is not None:
weight = weight.float()
loss = weight_reduce_loss(
loss, weight=weight, reduction=reduction, avg_factor=avg_factor)
return loss
def _expand_onehot_labels(labels, label_weights, target_shape, ignore_index):
"""Expand onehot labels to match the size of prediction."""
bin_labels = labels.new_zeros(target_shape)
valid_mask = (labels >= 0) & (labels != ignore_index)
inds = torch.nonzero(valid_mask, as_tuple=True)
if inds[0].numel() > 0:
if labels.dim() == 3:
bin_labels[inds[0], labels[valid_mask], inds[1], inds[2]] = 1
else:
bin_labels[inds[0], labels[valid_mask]] = 1
valid_mask = valid_mask.unsqueeze(1).expand(target_shape).float()
if label_weights is None:
bin_label_weights = valid_mask
else:
bin_label_weights = label_weights.unsqueeze(1).expand(target_shape)
bin_label_weights *= valid_mask
return bin_labels, bin_label_weights
def binary_cross_entropy(pred,
label,
weight=None,
reduction='mean',
avg_factor=None,
class_weight=None,
ignore_index=255):
"""Calculate the binary CrossEntropy loss.
Args:
pred (torch.Tensor): The prediction with shape (N, 1).
label (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
ignore_index (int | None): The label index to be ignored. Default: 255
Returns:
torch.Tensor: The calculated loss
"""
if pred.dim() != label.dim():
assert (pred.dim() == 2 and label.dim() == 1) or (pred.dim() == 4 and label.dim() == 3), \
'Only pred shape [N, C], label shape [N] or pred shape [N, C, ' \
'H, W], label shape [N, H, W] are supported'
label, weight = _expand_onehot_labels(label, weight, pred.shape,
ignore_index)
# weighted element-wise losses
if weight is not None:
weight = weight.float()
loss = F.binary_cross_entropy_with_logits(
pred, label.float(), pos_weight=class_weight, reduction='none')
# do the reduction for the weighted loss
loss = weight_reduce_loss(
loss, weight, reduction=reduction, avg_factor=avg_factor)
return loss
def mask_cross_entropy(pred,
target,
label,
reduction='mean',
avg_factor=None,
class_weight=None,
ignore_index=None):
"""Calculate the CrossEntropy loss for masks.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the number
of classes.
target (torch.Tensor): The learning label of the prediction.
label (torch.Tensor): ``label`` indicates the class label of the mask'
corresponding object. This will be used to select the mask in the
of the class which the object belongs to when the mask prediction
if not class-agnostic.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
ignore_index (None): Placeholder, to be consistent with other loss.
Default: None.
Returns:
torch.Tensor: The calculated loss
"""
assert ignore_index is None, 'BCE loss does not support ignore_index'
# TODO: handle these two reserved arguments
assert reduction == 'mean' and avg_factor is None, "The average should be mean."
num_rois = pred.size()[0]
inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device)
pred_slice = pred[inds, label].squeeze(1)
return F.binary_cross_entropy_with_logits(
pred_slice, target, weight=class_weight, reduction='mean')[None]
@LOSSES.register_module()
class CrossEntropyLoss(nn.Module):
"""CrossEntropyLoss.
Args:
use_sigmoid (bool, optional): Whether the prediction uses sigmoid
of softmax. Defaults to False.
use_mask (bool, optional): Whether to use mask cross entropy loss.
Defaults to False.
reduction (str, optional): . Defaults to 'mean'.
Options are "none", "mean" and "sum".
class_weight (list[float], optional): Weight of each class.
Defaults to None.
loss_weight (float, optional): Weight of the loss. Defaults to 1.0.
"""
def __init__(self,
use_sigmoid=False,
use_mask=False,
reduction='mean',
class_weight=None,
loss_weight=1.0):
"""Init."""
super(CrossEntropyLoss, self).__init__()
assert (use_sigmoid is False) or (use_mask is False), "Either use_sigmoid should be true or use_mask should be true."
self.use_sigmoid = use_sigmoid
self.use_mask = use_mask
self.reduction = reduction
self.loss_weight = loss_weight
self.class_weight = class_weight
if self.use_sigmoid:
self.cls_criterion = binary_cross_entropy
elif self.use_mask:
self.cls_criterion = mask_cross_entropy
else:
self.cls_criterion = cross_entropy
def forward(self,
cls_score,
label,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
"""Forward function."""
assert reduction_override in (None, 'none', 'mean', 'sum'), "Provided Reduction Overried is not supported."
reduction = (
reduction_override if reduction_override else self.reduction)
if self.class_weight is not None:
class_weight = cls_score.new_tensor(self.class_weight)
else:
class_weight = None
loss_cls = self.loss_weight * self.cls_criterion(
cls_score,
label,
weight,
class_weight=class_weight,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_cls
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/model/losses/cross_entropy_loss.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Original source taken from https://github.com/open-mmlab/mmsegmentation
# Copyright 2019 OpenMMLAB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accuracy Module."""
import torch.nn as nn
def accuracy(pred, target, topk=1, thresh=None):
"""Calculate accuracy according to the prediction and target.
Args:
pred (torch.Tensor): The model prediction, shape (N, num_class, ...)
target (torch.Tensor): The target of each prediction, shape (N, , ...)
topk (int | tuple[int], optional): If the predictions in ``topk``
matches the target, the predictions will be regarded as
correct ones. Defaults to 1.
thresh (float, optional): If not None, predictions with scores under
this threshold are considered incorrect. Default to None.
Returns:
float | tuple[float]: If the input ``topk`` is a single integer,
the function will return a single float as accuracy. If
``topk`` is a tuple containing multiple integers, the
function will return a tuple containing accuracies of
each ``topk`` number.
"""
assert isinstance(topk, (int, tuple)), "Topk predictions should be int or tuple."
if isinstance(topk, int):
topk = (topk, )
return_single = True
else:
return_single = False
maxk = max(topk)
if pred.size(0) == 0:
accu = [pred.new_tensor(0.) for i in range(len(topk))]
return accu[0] if return_single else accu
assert pred.ndim == target.ndim + 1, "Prediction dimension should be equal to target dimension"
assert pred.size(0) == target.size(0), "The prediction size: {} is different from the target size: {}.".format(pred.size, target.size)
assert maxk <= pred.size(1), \
f'maxk {maxk} exceeds pred dimension {pred.size(1)}'
pred_value, pred_label = pred.topk(maxk, dim=1)
# transpose to shape (maxk, N, ...)
pred_label = pred_label.transpose(0, 1)
correct = pred_label.eq(target.unsqueeze(0).expand_as(pred_label))
if thresh is not None:
# Only prediction values larger than thresh are counted as correct
correct = correct & (pred_value > thresh).t()
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / target.numel()))
return res[0] if return_single else res
class Accuracy(nn.Module):
"""Accuracy calculation module."""
def __init__(self, topk=(1, ), thresh=None):
"""Module to calculate the accuracy.
Args:
topk (tuple, optional): The criterion used to calculate the
accuracy. Defaults to (1,).
thresh (float, optional): If not None, predictions with scores
under this threshold are considered incorrect. Default to None.
"""
super().__init__()
self.topk = topk
self.thresh = thresh
def forward(self, pred, target):
"""Forward function to calculate accuracy.
Args:
pred (torch.Tensor): Prediction of models.
target (torch.Tensor): Target for each prediction.
Returns:
tuple[float]: The accuracies under different topk criterions.
"""
return accuracy(pred, target, self.topk, self.thresh)
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/model/losses/accuracy.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Original source taken from https://github.com/open-mmlab/mmskeleton
# Copyright 2019 OpenMMLAB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Se Layer Module."""
import mmcv
from mmcv.cnn import ConvModule
import torch.nn as nn
from .make_divisible import make_divisible
class SELayer(nn.Module):
"""Squeeze-and-Excitation Module.
Args:
channels (int): The input (and output) channels of the SE layer.
ratio (int): Squeeze ratio in SELayer, the intermediate channel will be
``int(channels/ratio)``. Default: 16.
conv_cfg (None or dict): Config dict for convolution layer.
Default: None, which means using conv2d.
act_cfg (dict or Sequence[dict]): Config dict for activation layer.
If act_cfg is a dict, two activation layers will be configurated
by this dict. If act_cfg is a sequence of dicts, the first
activation layer will be configurated by the first dict and the
second activation layer will be configurated by the second dict.
Default: (dict(type='ReLU'), dict(type='HSigmoid', bias=3.0,
divisor=6.0)).
"""
def __init__(self,
channels,
ratio=16,
conv_cfg=None,
act_cfg=(dict(type='ReLU'),
dict(type='HSigmoid', bias=3.0, divisor=6.0))):
"""Init Module."""
super(SELayer, self).__init__()
if isinstance(act_cfg, dict):
act_cfg = (act_cfg, act_cfg)
assert len(act_cfg) == 2
assert mmcv.is_tuple_of(act_cfg, dict)
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.conv1 = ConvModule(
in_channels=channels,
out_channels=make_divisible(channels // ratio, 8),
kernel_size=1,
stride=1,
conv_cfg=conv_cfg,
act_cfg=act_cfg[0])
self.conv2 = ConvModule(
in_channels=make_divisible(channels // ratio, 8),
out_channels=channels,
kernel_size=1,
stride=1,
conv_cfg=conv_cfg,
act_cfg=act_cfg[1])
def forward(self, x):
"""Forward Module."""
out = self.global_avgpool(x)
out = self.conv1(out)
out = self.conv2(out)
return x * out
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/model/utils/se_layer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Original source taken from https://github.com/open-mmlab/mmsegmentation
# Copyright 2019 OpenMMLAB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inverted Resiual."""
from mmcv.cnn import ConvModule
from torch import nn
from torch.utils import checkpoint as cp
from .se_layer import SELayer
class InvertedResidual(nn.Module):
"""InvertedResidual block for MobileNetV2.
Args:
in_channels (int): The input channels of the InvertedResidual block.
out_channels (int): The output channels of the InvertedResidual block.
stride (int): Stride of the middle (first) 3x3 convolution.
expand_ratio (int): Adjusts number of channels of the hidden layer
in InvertedResidual by this amount.
dilation (int): Dilation rate of depthwise conv. Default: 1
conv_cfg (dict): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU6').
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
Returns:
Tensor: The output tensor.
"""
def __init__(self,
in_channels,
out_channels,
stride,
expand_ratio,
dilation=1,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU6'),
with_cp=False):
"""Init Module."""
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2], f'stride must in [1, 2]. ' \
f'But received {stride}.'
self.with_cp = with_cp
self.use_res_connect = self.stride == 1 and in_channels == out_channels
hidden_dim = int(round(in_channels * expand_ratio))
layers = []
if expand_ratio != 1:
layers.append(
ConvModule(
in_channels=in_channels,
out_channels=hidden_dim,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
layers.extend([
ConvModule(
in_channels=hidden_dim,
out_channels=hidden_dim,
kernel_size=3,
stride=stride,
padding=dilation,
dilation=dilation,
groups=hidden_dim,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg),
ConvModule(
in_channels=hidden_dim,
out_channels=out_channels,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
])
self.conv = nn.Sequential(*layers)
def forward(self, x):
"""Forward Module."""
def _inner_forward(x):
"""Inner forward."""
if self.use_res_connect:
return x + self.conv(x)
return self.conv(x)
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
return out
class InvertedResidualV3(nn.Module):
"""Inverted Residual Block for MobileNetV3.
Args:
in_channels (int): The input channels of this Module.
out_channels (int): The output channels of this Module.
mid_channels (int): The input channels of the depthwise convolution.
kernel_size (int): The kernal size of the depthwise convolution.
Default: 3.
stride (int): The stride of the depthwise convolution. Default: 1.
se_cfg (dict): Config dict for se layer. Defaul: None, which means no
se layer.
with_expand_conv (bool): Use expand conv or not. If set False,
mid_channels must be the same with in_channels. Default: True.
conv_cfg (dict): Config dict for convolution layer. Default: None,
which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU').
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
Returns:
Tensor: The output tensor.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
kernel_size=3,
stride=1,
se_cfg=None,
with_expand_conv=True,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'),
with_cp=False):
"""Init Module."""
super(InvertedResidualV3, self).__init__()
self.with_res_shortcut = (stride == 1 and in_channels == out_channels)
assert stride in [1, 2], "Stride should be 1 or 2."
self.with_cp = with_cp
self.with_se = se_cfg is not None
self.with_expand_conv = with_expand_conv
if self.with_se:
assert isinstance(se_cfg, dict), "se layer should be a dictionary."
if not self.with_expand_conv:
assert mid_channels == in_channels, "Mid channels should be equal to in channels."
if self.with_expand_conv:
self.expand_conv = ConvModule(
in_channels=in_channels,
out_channels=mid_channels,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.depthwise_conv = ConvModule(
in_channels=mid_channels,
out_channels=mid_channels,
kernel_size=kernel_size,
stride=stride,
padding=kernel_size // 2,
groups=mid_channels,
conv_cfg=dict(
type='Conv2dAdaptivePadding') if stride == 2 else conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
if self.with_se:
self.se = SELayer(**se_cfg)
self.linear_conv = ConvModule(
in_channels=mid_channels,
out_channels=out_channels,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
def forward(self, x):
"""Forward Module."""
def _inner_forward(x):
"""Inner forward."""
out = x
if self.with_expand_conv:
out = self.expand_conv(out)
out = self.depthwise_conv(out)
if self.with_se:
out = self.se(out)
out = self.linear_conv(out)
if self.with_res_shortcut:
return x + out
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
return out
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/model/utils/inverted_residual.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Original source taken from https://github.com/open-mmlab/mmskeleton
# Copyright 2019 OpenMMLAB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Res Layer Module."""
from mmcv.cnn import build_conv_layer, build_norm_layer
from torch import nn
class ResLayer(nn.Sequential):
"""ResLayer to build ResNet style backbone.
Args:
block (nn.Module): block used to build ResLayer.
inplanes (int): inplanes of block.
planes (int): planes of block.
num_blocks (int): number of blocks.
stride (int): stride of the first block. Default: 1
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottleneck. Default: False
conv_cfg (dict): dictionary to construct and config conv layer.
Default: None
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN')
multi_grid (int | None): Multi grid dilation rates of last
stage. Default: None
contract_dilation (bool): Whether contract first dilation of each layer
Default: False
"""
def __init__(self,
block,
inplanes,
planes,
num_blocks,
stride=1,
dilation=1,
avg_down=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
multi_grid=None,
contract_dilation=False,
**kwargs):
"""Init Module."""
self.block = block
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = []
conv_stride = stride
if avg_down:
conv_stride = 1
downsample.append(
nn.AvgPool2d(
kernel_size=stride,
stride=stride,
ceil_mode=True,
count_include_pad=False))
downsample.extend([
build_conv_layer(
conv_cfg,
inplanes,
planes * block.expansion,
kernel_size=1,
stride=conv_stride,
bias=False),
build_norm_layer(norm_cfg, planes * block.expansion)[1]
])
downsample = nn.Sequential(*downsample)
layers = []
if multi_grid is None:
if dilation > 1 and contract_dilation:
first_dilation = dilation // 2
else:
first_dilation = dilation
else:
first_dilation = multi_grid[0]
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=stride,
dilation=first_dilation,
downsample=downsample,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
**kwargs))
inplanes = planes * block.expansion
for i in range(1, num_blocks):
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=1,
dilation=dilation if multi_grid is None else multi_grid[i],
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
**kwargs))
super(ResLayer, self).__init__(*layers)
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/model/utils/res_layer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Original source taken from https://github.com/open-mmlab/mmskeleton
# Copyright 2019 OpenMMLAB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Up Convolution Block."""
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, build_upsample_layer
class UpConvBlock(nn.Module):
"""Upsample convolution block in decoder for UNet.
This upsample convolution block consists of one upsample module
followed by one convolution block. The upsample module expands the
high-level low-resolution feature map and the convolution block fuses
the upsampled high-level low-resolution feature map and the low-level
high-resolution feature map from encoder.
Args:
conv_block (nn.Sequential): Sequential of convolutional layers.
in_channels (int): Number of input channels of the high-level
skip_channels (int): Number of input channels of the low-level
high-resolution feature map from encoder.
out_channels (int): Number of output channels.
num_convs (int): Number of convolutional layers in the conv_block.
Default: 2.
stride (int): Stride of convolutional layer in conv_block. Default: 1.
dilation (int): Dilation rate of convolutional layer in conv_block.
Default: 1.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
conv_cfg (dict | None): Config dict for convolution layer.
Default: None.
norm_cfg (dict | None): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict | None): Config dict for activation layer in ConvModule.
Default: dict(type='ReLU').
upsample_cfg (dict): The upsample config of the upsample module in
decoder. Default: dict(type='InterpConv'). If the size of
high-level feature map is the same as that of skip feature map
(low-level feature map from encoder), it does not need upsample the
high-level feature map and the upsample_cfg is None.
dcn (bool): Use deformable convoluton in convolutional layer or not.
Default: None.
plugins (dict): plugins for convolutional layers. Default: None.
"""
def __init__(self,
conv_block,
in_channels,
skip_channels,
out_channels,
num_convs=2,
stride=1,
dilation=1,
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'),
upsample_cfg=dict(type='InterpConv'),
dcn=None,
plugins=None):
"""Init Module."""
super(UpConvBlock, self).__init__()
assert dcn is None, 'Not implemented yet.'
assert plugins is None, 'Not implemented yet.'
self.conv_block = conv_block(
in_channels=2 * skip_channels,
out_channels=out_channels,
num_convs=num_convs,
stride=stride,
dilation=dilation,
with_cp=with_cp,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
dcn=None,
plugins=None)
if upsample_cfg is not None:
self.upsample = build_upsample_layer(
cfg=upsample_cfg,
in_channels=in_channels,
out_channels=skip_channels,
with_cp=with_cp,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
else:
self.upsample = ConvModule(
in_channels,
skip_channels,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
def forward(self, skip, x):
"""Forward function."""
x = self.upsample(x)
out = torch.cat([skip, x], dim=1)
out = self.conv_block(out)
return out
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/model/utils/up_conv_block.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Original source taken from https://github.com/open-mmlab/mmskeleton
# Copyright 2019 OpenMMLAB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Norm Module."""
import torch
import math
import warnings
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
"""Trunc Normal."""
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
lower = norm_cdf((a - mean) / std)
upper = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * lower - 1, 2 * upper - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type (Tensor, float, float, float, float) -> Tensor
r"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.trunc_normal_(w)
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/model/utils/norm.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Init Module."""
from .inverted_residual import InvertedResidual, InvertedResidualV3
from .make_divisible import make_divisible
from .res_layer import ResLayer
from .self_attention_block import SelfAttentionBlock
from .up_conv_block import UpConvBlock
__all__ = [
'ResLayer', 'SelfAttentionBlock', 'make_divisible', 'InvertedResidual',
'UpConvBlock', 'InvertedResidualV3'
]
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/model/utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Original source taken from https://github.com/open-mmlab/mmskeleton
# Copyright 2019 OpenMMLAB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Self-attention Module."""
import torch
from torch import nn
from torch.nn import functional as F
from mmcv.cnn import ConvModule, constant_init
class SelfAttentionBlock(nn.Module):
"""General self-attention block/non-local block.
Please refer to https://arxiv.org/abs/1706.03762 for details about key,
query and value.
Args:
key_in_channels (int): Input channels of key feature.
query_in_channels (int): Input channels of query feature.
channels (int): Output channels of key/query transform.
out_channels (int): Output channels.
share_key_query (bool): Whether share projection weight between key
and query projection.
query_downsample (nn.Module): Query downsample module.
key_downsample (nn.Module): Key downsample module.
key_query_num_convs (int): Number of convs for key/query projection.
value_num_convs (int): Number of convs for value projection.
matmul_norm (bool): Whether normalize attention map with sqrt of
channels
with_out (bool): Whether use out projection.
conv_cfg (dict|None): Config of conv layers.
norm_cfg (dict|None): Config of norm layers.
act_cfg (dict|None): Config of activation layers.
"""
def __init__(self, key_in_channels, query_in_channels, channels,
out_channels, share_key_query, query_downsample,
key_downsample, key_query_num_convs, value_out_num_convs,
key_query_norm, value_out_norm, matmul_norm, with_out,
conv_cfg, norm_cfg, act_cfg):
"""Init Module."""
super(SelfAttentionBlock, self).__init__()
if share_key_query:
assert key_in_channels == query_in_channels
self.key_in_channels = key_in_channels
self.query_in_channels = query_in_channels
self.out_channels = out_channels
self.channels = channels
self.share_key_query = share_key_query
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.key_project = self.build_project(
key_in_channels,
channels,
num_convs=key_query_num_convs,
use_conv_module=key_query_norm,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
if share_key_query:
self.query_project = self.key_project
else:
self.query_project = self.build_project(
query_in_channels,
channels,
num_convs=key_query_num_convs,
use_conv_module=key_query_norm,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.value_project = self.build_project(
key_in_channels,
channels if with_out else out_channels,
num_convs=value_out_num_convs,
use_conv_module=value_out_norm,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
if with_out:
self.out_project = self.build_project(
channels,
out_channels,
num_convs=value_out_num_convs,
use_conv_module=value_out_norm,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
else:
self.out_project = None
self.query_downsample = query_downsample
self.key_downsample = key_downsample
self.matmul_norm = matmul_norm
self.init_weights()
def init_weights(self):
"""Initialize weight of later layer."""
if self.out_project is not None:
if not isinstance(self.out_project, ConvModule):
constant_init(self.out_project, 0)
def build_project(self, in_channels, channels, num_convs, use_conv_module,
conv_cfg, norm_cfg, act_cfg):
"""Build projection layer for key/query/value/out."""
if use_conv_module:
convs = [
ConvModule(
in_channels,
channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
]
for _ in range(num_convs - 1):
convs.append(
ConvModule(
channels,
channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
else:
convs = [nn.Conv2d(in_channels, channels, 1)]
for _ in range(num_convs - 1):
convs.append(nn.Conv2d(channels, channels, 1))
if len(convs) > 1:
convs = nn.Sequential(*convs)
else:
convs = convs[0]
return convs
def forward(self, query_feats, key_feats):
"""Forward function."""
batch_size = query_feats.size(0)
query = self.query_project(query_feats)
if self.query_downsample is not None:
query = self.query_downsample(query)
query = query.reshape(*query.shape[:2], -1)
query = query.permute(0, 2, 1).contiguous()
key = self.key_project(key_feats)
value = self.value_project(key_feats)
if self.key_downsample is not None:
key = self.key_downsample(key)
value = self.key_downsample(value)
key = key.reshape(*key.shape[:2], -1)
value = value.reshape(*value.shape[:2], -1)
value = value.permute(0, 2, 1).contiguous()
sim_map = torch.matmul(query, key)
if self.matmul_norm:
sim_map = (self.channels**-.5) * sim_map
sim_map = F.softmax(sim_map, dim=-1)
context = torch.matmul(sim_map, value)
context = context.permute(0, 2, 1).contiguous()
context = context.reshape(batch_size, -1, *query_feats.shape[2:])
if self.out_project is not None:
context = self.out_project(context)
return context
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/model/utils/self_attention_block.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for Segformer"""
import os
import csv
import torch
import shutil
import struct
import tempfile
from eff.core.codec import encrypt_stream
from nvidia_tao_pytorch.core.connectors.checkpoint_connector import decrypt_checkpoint
def patch_decrypt_checkpoint(checkpoint, key):
"""Patch decrypt checkpoint.
To make it work when using multi-gpu trained model
to single-gpu environment
"""
from functools import partial
legacy_load = torch.load
torch.load = partial(legacy_load, map_location="cpu")
checkpoint = decrypt_checkpoint(checkpoint, key)
torch.load = legacy_load
# set the encrypted status to be False when it is decrypted
checkpoint["state_dict_encrypted"] = False
return checkpoint
def encrypt_onnx(tmp_file_name, output_file_name, key):
"""Encrypt the onnx model"""
with open(tmp_file_name, "rb") as open_temp_file, open(output_file_name,
"wb") as open_encoded_file:
# set the input name magic number
open_encoded_file.write(struct.pack("<i", 0))
encrypt_stream(
input_stream=open_temp_file, output_stream=open_encoded_file,
passphrase=key, encryption_type=True
)
def check_and_create(d):
"""Create a directory."""
if not os.path.isdir(d):
os.makedirs(d)
def data_to_device(data):
"""Transfer data to GPU."""
if isinstance(data, list):
cuda_data = []
for item in data:
cuda_item = item.cuda(non_blocking=True)
cuda_data.append(cuda_item)
else:
cuda_data = data.cuda(non_blocking=True)
return cuda_data
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
"""Init"""
self.reset()
def reset(self):
"""reset parameters."""
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
"""update accuracy."""
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.reshape(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def save_checkpoint(state, is_best, checkpoint, model_best):
"""Naive checkpoint saver."""
torch.save(state, checkpoint)
if is_best:
shutil.copyfile(checkpoint, model_best)
def record_train_info(info, filename):
"""Naive log information."""
str_log = "train_loss: {} val_loss: {} train_acc@1: {} val_acc@1: {} lr: {}".format(
info['train_loss'],
info['val_loss'],
info['train_acc@1'],
info['val_acc@1'],
info['lr'])
print(str_log)
column_names = ['epoch', 'train_loss', 'val_loss', 'train_acc@1', 'val_acc@1', 'lr']
if not os.path.isfile(filename):
with open(filename, "w") as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=column_names)
writer.writeheader()
writer.writerow(info)
else: # else it exists so append without writing the header
with open(filename, "a") as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=column_names)
writer.writerow(info)
def load_model(model_path):
"""Load state dict from the model path accounting for mmcls trained models."""
temp = tempfile.NamedTemporaryFile(suffix='.pth', delete=False)
tmp_model_path = temp.name
new_state_dict = {}
checkpoint = torch.load(model_path)
for k, v in checkpoint["state_dict"].items():
if k.startswith("backbone."):
k = k[9:]
new_state_dict[k] = v
checkpoint['state_dict'] = new_state_dict
torch.save(checkpoint, tmp_model_path)
return tmp_model_path
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/model/utils/common_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Original source taken from https://github.com/open-mmlab/mmsegmentation
# Copyright 2019 OpenMMLAB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Make Divisible Operator."""
def make_divisible(value, divisor, min_value=None, min_ratio=0.9):
"""Make divisible function.
This function rounds the channel number to the nearest value that can be
divisible by the divisor. It is taken from the original tf repo. It ensures
that all layers have a channel number that is divisible by divisor. It can
be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py # noqa
Args:
value (int): The original channel number.
divisor (int): The divisor to fully divide the channel number.
min_value (int): The minimum value of the output channel.
Default: None, means that the minimum value equal to the divisor.
min_ratio (float): The minimum ratio of the rounded channel number to
the original channel number. Default: 0.9.
Returns:
int: The modified output channel number.
"""
if min_value is None:
min_value = divisor
new_value = max(min_value, int(value + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than (1-min_ratio).
if new_value < min_ratio * value:
new_value += divisor
return new_value
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/model/utils/make_divisible.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Original source taken from https://github.com/open-mmlab/mmsegmentation
# Copyright 2019 OpenMMLAB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" DropBlock, DropPath
PyTorch implementations of DropBlock and DropPath (Stochastic Depth) regularization layers.
Papers:
DropBlock: A regularization method for convolutional networks (https://arxiv.org/abs/1810.12890)
Deep Networks with Stochastic Depth (https://arxiv.org/abs/1603.09382)
Code:
DropBlock impl inspired by two Tensorflow impl that I liked:
- https://github.com/tensorflow/tpu/blob/master/models/official/resnet/resnet_model.py#L74
- https://github.com/clovaai/assembled-cnn/blob/master/nets/blocks.py
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
def drop_block_2d(
x, drop_prob: float = 0.1, block_size: int = 7, gamma_scale: float = 1.0,
with_noise: bool = False, inplace: bool = False, batchwise: bool = False):
""" DropBlock. See https://arxiv.org/pdf/1810.12890.pdf
DropBlock with an experimental gaussian noise option. This layer has been tested on a few training
runs with success, but needs further validation and possibly optimization for lower runtime impact.
"""
_, C, H, W = x.shape
total_size = W * H
clipped_block_size = min(block_size, min(W, H))
# seed_drop_rate, the gamma parameter
gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / (
(W - block_size + 1) * (H - block_size + 1))
# Forces the block to be inside the feature map.
w_i, h_i = torch.meshgrid(torch.arange(W).to(x.device), torch.arange(H).to(x.device))
valid_block = ((w_i >= clipped_block_size // 2) & (w_i < W - (clipped_block_size - 1) // 2)) & \
((h_i >= clipped_block_size // 2) & (h_i < H - (clipped_block_size - 1) // 2))
valid_block = torch.reshape(valid_block, (1, 1, H, W)).to(dtype=x.dtype)
if batchwise:
# one mask for whole batch, quite a bit faster
uniform_noise = torch.rand((1, C, H, W), dtype=x.dtype, device=x.device)
else:
uniform_noise = torch.rand_like(x)
block_mask = ((2 - gamma - valid_block + uniform_noise) >= 1).to(dtype=x.dtype)
block_mask = -F.max_pool2d(
-block_mask,
kernel_size=clipped_block_size,
stride=1,
padding=clipped_block_size // 2)
if with_noise:
normal_noise = torch.randn((1, C, H, W), dtype=x.dtype, device=x.device) if batchwise else torch.randn_like(x)
if inplace:
x.mul_(block_mask).add_(normal_noise * (1 - block_mask))
else:
x = x * block_mask + normal_noise * (1 - block_mask)
else:
normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-7)).to(x.dtype)
if inplace:
x.mul_(block_mask * normalize_scale)
else:
x = x * block_mask * normalize_scale
return x
def drop_block_fast_2d(
x: torch.Tensor, drop_prob: float = 0.1, block_size: int = 7,
gamma_scale: float = 1.0, with_noise: bool = False, inplace: bool = False, batchwise: bool = False):
""" DropBlock. See https://arxiv.org/pdf/1810.12890.pdf
DropBlock with an experimental gaussian noise option. Simplied from above without concern for valid
block mask at edges.
"""
_, C, H, W = x.shape
total_size = W * H
clipped_block_size = min(block_size, min(W, H))
gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / ((W - block_size + 1) * (H - block_size + 1))
if batchwise:
# one mask for whole batch, quite a bit faster
block_mask = torch.rand((1, C, H, W), dtype=x.dtype, device=x.device) < gamma
else:
# mask per batch element
block_mask = torch.rand_like(x) < gamma
block_mask = F.max_pool2d(
block_mask.to(x.dtype), kernel_size=clipped_block_size, stride=1, padding=clipped_block_size // 2)
if with_noise:
normal_noise = torch.randn((1, C, H, W), dtype=x.dtype, device=x.device) if batchwise else torch.randn_like(x)
if inplace:
x.mul_(1. - block_mask).add_(normal_noise * block_mask)
else:
x = x * (1. - block_mask) + normal_noise * block_mask
else:
block_mask = 1 - block_mask
normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-7)).to(dtype=x.dtype)
if inplace:
x.mul_(block_mask * normalize_scale)
else:
x = x * block_mask * normalize_scale
return x
class DropBlock2d(nn.Module):
""" DropBlock. See https://arxiv.org/pdf/1810.12890.pdf
"""
def __init__(self,
drop_prob=0.1,
block_size=7,
gamma_scale=1.0,
with_noise=False,
inplace=False,
batchwise=False,
fast=True):
"""Init Module."""
super(DropBlock2d, self).__init__()
self.drop_prob = drop_prob
self.gamma_scale = gamma_scale
self.block_size = block_size
self.with_noise = with_noise
self.inplace = inplace
self.batchwise = batchwise
self.fast = fast # FIXME finish comparisons of fast vs not
def forward(self, x):
"""Forward."""
if not self.training or not self.drop_prob:
return x
if self.fast:
return drop_block_fast_2d(
x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace, self.batchwise)
return drop_block_2d(
x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace, self.batchwise)
def drop_path(x, drop_prob: float = 0., training: bool = False):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument.
"""
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob=None):
"""Init Module."""
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
"""Forward."""
return drop_path(x, self.drop_prob, self.training)
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/model/utils/drop.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://github.com/NVlabs/FAN/blob/main/LICENSE
# -------------------------------------------------------------------------
# MIT License
#
# Copyright (c) 2021 Facebook, Inc. and its affiliates.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Modified by Daquan Zhou
# -------------------------------------------------------------------------
""" ConvNeXt
Paper: `A ConvNet for the 2020s` - https://arxiv.org/pdf/2201.03545.pdf
Original code and weights from https://github.com/facebookresearch/ConvNeXt, original copyright below
Modifications and additions for timm hacked together by / Copyright 2022, Ross Wightman
"""
from collections import OrderedDict
from functools import partial
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import named_apply, build_model_with_cfg
from timm.models.layers import ClassifierHead, SelectAdaptivePool2d, DropPath
from nvidia_tao_pytorch.cv.backbone.convnext_utils import checkpoint_filter_fn, _init_weights, LayerNorm2d, ConvMlp
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.0', 'classifier': 'head.fc',
**kwargs
}
default_cfgs = dict(
convnext_tiny=_cfg(url="https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth"),
convnext_small=_cfg(url="https://dl.fbaipublicfiles.com/convnext/convnext_small_1k_224_ema.pth"),
convnext_base=_cfg(url="https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_224_ema.pth"),
convnext_large=_cfg(url="https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_224_ema.pth"),
convnext_tiny_hnf=_cfg(url=''),
convnext_base_in22k=_cfg(
# url="https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_224.pth", num_classes=21841),
url="pretrained/convnext_base_22k_224.pth", num_classes=21841),
convnext_large_in22k=_cfg(
url="https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_224.pth", num_classes=21841),
convnext_xlarge_in22k=_cfg(
url="https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_224.pth", num_classes=21841),
)
class Mlp(nn.Module):
"""MLP Module."""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
"""Init Function."""
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.fc2 = nn.Linear(hidden_features, out_features)
self.act = act_layer()
self.drop = nn.Dropout(drop)
def forward(self, x):
"""Forward Function."""
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class ConvNeXtBlock(nn.Module):
""" ConvNeXt Block
There are two equivalent implementations:
(1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W)
(2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back
Unlike the official impl, this one allows choice of 1 or 2, 1x1 conv can be faster with appropriate
choice of LayerNorm impl, however as model size increases the tradeoffs appear to change and nn.Linear
is a better choice. This was observed with PyTorch 1.10 on 3090 GPU, it could change over time & w/ different HW.
"""
def __init__(self, dim, drop_path=0., ls_init_value=1e-6, conv_mlp=True, mlp_ratio=4, norm_layer=None):
"""Initialize ConvNext Block.
Args:
dim (int): Number of input channels.
drop_path (float): Stochastic depth rate. Default: 0.0
ls_init_value (float): Init value for Layer Scale. Default: 1e-6.
"""
super().__init__()
if not norm_layer:
norm_layer = partial(LayerNorm2d, eps=1e-6) if conv_mlp else partial(nn.LayerNorm, eps=1e-6)
mlp_layer = ConvMlp if conv_mlp else Mlp
self.use_conv_mlp = conv_mlp
self.conv_dw = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim) # depthwise conv
self.norm = norm_layer(dim)
self.mlp = mlp_layer(dim, int(mlp_ratio * dim), act_layer=nn.GELU)
self.gamma = nn.Parameter(ls_init_value * torch.ones(dim)) if ls_init_value > 0 else None
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
"""Forward function."""
shortcut = x
x = self.conv_dw(x)
if self.use_conv_mlp:
x = self.norm(x)
x = self.mlp(x)
else:
x = x.permute(0, 2, 3, 1)
x = self.norm(x)
x = self.mlp(x)
x = x.permute(0, 3, 1, 2)
if self.gamma is not None:
x = x.mul(self.gamma.reshape(1, -1, 1, 1))
x = self.drop_path(x) + shortcut
return x
class ConvNeXtStage(nn.Module):
"""ConvNeXt Stage."""
def __init__(
self, in_chs, out_chs, stride=2, depth=2, dp_rates=None, ls_init_value=1.0, conv_mlp=True,
norm_layer=None, cl_norm_layer=None, cross_stage=False):
"""Initialize ConvNext Stage.
Args:
in_chs (int): Number of input channels.
out_chs (int): Number of output channels.
"""
super().__init__()
if in_chs != out_chs or stride > 1:
self.downsample = nn.Sequential(
norm_layer(in_chs),
nn.Conv2d(in_chs, out_chs, kernel_size=stride, stride=stride),
)
else:
self.downsample = nn.Identity()
dp_rates = dp_rates or [0.] * depth
self.blocks = nn.Sequential(*[ConvNeXtBlock(
dim=out_chs, drop_path=dp_rates[j], ls_init_value=ls_init_value, conv_mlp=conv_mlp,
norm_layer=norm_layer if conv_mlp else cl_norm_layer)
for j in range(depth)]
)
def forward(self, x):
"""Forward function."""
x = self.downsample(x)
x = self.blocks(x)
return x
class ConvNeXt(nn.Module):
""" ConvNeXt
A PyTorch impl of : `A ConvNet for the 2020s` - https://arxiv.org/pdf/2201.03545.pdf
"""
def __init__(
self, in_chans=3, num_classes=1000, global_pool='avg', output_stride=32, patch_size=4,
depths=(3, 3, 9, 3), dims=(96, 192, 384, 768), ls_init_value=1e-6, conv_mlp=True, use_head=True,
head_init_scale=1., head_norm_first=False, norm_layer=None, drop_rate=0., drop_path_rate=0.,
):
""" Initialize the ConvNext Class
Args:
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
depths (tuple(int)): Number of blocks at each stage. Default: [3, 3, 9, 3]
dims (tuple(int)): Feature dimension at each stage. Default: [96, 192, 384, 768]
drop_rate (float): Head dropout rate
drop_path_rate (float): Stochastic depth rate. Default: 0.
ls_init_value (float): Init value for Layer Scale. Default: 1e-6.
head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1.
"""
super().__init__()
assert output_stride == 32
if norm_layer is None:
norm_layer = partial(LayerNorm2d, eps=1e-6)
cl_norm_layer = norm_layer if conv_mlp else partial(nn.LayerNorm, eps=1e-6)
else:
assert conv_mlp,\
'If a norm_layer is specified, conv MLP must be used so all norm expect rank-4, channels-first input'
cl_norm_layer = norm_layer
self.num_classes = num_classes
self.drop_rate = drop_rate
self.feature_info = []
# NOTE: this stem is a minimal form of ViT PatchEmbed, as used in SwinTransformer w/ patch_size = 4
self.stem = nn.Sequential(
nn.Conv2d(in_chans, dims[0], kernel_size=patch_size, stride=patch_size),
norm_layer(dims[0])
)
self.stages = nn.Sequential()
dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
curr_stride = patch_size
prev_chs = dims[0]
stages = []
# 4 feature resolution stages, each consisting of multiple residual blocks
for i in range(len(depths)):
stride = 2 if i > 0 else 1
# FIXME support dilation / output_stride
curr_stride *= stride
out_chs = dims[i]
stages.append(ConvNeXtStage(
prev_chs, out_chs, stride=stride,
depth=depths[i], dp_rates=dp_rates[i], ls_init_value=ls_init_value, conv_mlp=conv_mlp,
norm_layer=norm_layer, cl_norm_layer=cl_norm_layer)
)
prev_chs = out_chs
# NOTE feature_info use currently assumes stage 0 == stride 1, rest are stride 2
self.feature_info += [dict(num_chs=prev_chs, reduction=curr_stride, module=f'stages.{i}')]
self.stages = nn.Sequential(*stages)
self.num_features = prev_chs
if head_norm_first:
# norm -> global pool -> fc ordering, like most other nets (not compat with FB weights)
self.norm_pre = norm_layer(self.num_features) # final norm layer, before pooling
if use_head:
self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate)
else:
# pool -> norm -> fc, the default ConvNeXt ordering (pretrained FB weights)
self.norm_pre = nn.Identity()
if use_head:
self.head = nn.Sequential(OrderedDict([
('global_pool', SelectAdaptivePool2d(pool_type=global_pool)),
('norm', norm_layer(self.num_features)),
('flatten', nn.Flatten(1) if global_pool else nn.Identity()),
('drop', nn.Dropout(self.drop_rate)),
('fc', nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity())
]))
named_apply(partial(_init_weights, head_init_scale=head_init_scale), self)
def get_classifier(self):
"""Returns classifier of ConvNeXt."""
return self.head.fc
def reset_classifier(self, num_classes=0, global_pool='avg'):
"""Redefine the classification head"""
if isinstance(self.head, ClassifierHead):
# norm -> global pool -> fc
self.head = ClassifierHead(
self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate)
else:
# pool -> norm -> fc
self.head = nn.Sequential(OrderedDict([
('global_pool', SelectAdaptivePool2d(pool_type=global_pool)),
('norm', self.head.norm),
('flatten', nn.Flatten(1) if global_pool else nn.Identity()),
('drop', nn.Dropout(self.drop_rate)),
('fc', nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity())
]))
def forward_features(self, x, return_feat=False):
"""Forward Features."""
x = self.stem(x)
out_list = []
# import pdb; pdb.set_trace()
for i in range(len(self.stages)):
x = self.stages[i](x)
out_list.append(x)
x = self.norm_pre(x)
return x, out_list if return_feat else x
def forward(self, x):
"""Forward Function."""
x = self.forward_features(x)
x = self.head(x)
return x
def _create_hybrid_backbone(variant='convnext_base_in22k', pretrained=False, **kwargs):
"""Helper function to create the hybrid model."""
model = build_model_with_cfg(
ConvNeXt, variant, pretrained,
pretrained_cfg=default_cfgs[variant],
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(out_indices=(0, 1, 2, 3), flatten_sequential=True),
**kwargs)
return model
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/model/backbones/convnext_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://github.com/NVlabs/FAN/blob/main/LICENSE
"""FAN Transformer Backbone Module for Segmentation."""
import math
from functools import partial
import warnings
import torch
import torch.nn as nn
import torch.utils.checkpoint as checkpoint
from mmcv.runner import load_checkpoint
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.layers import DropPath, trunc_normal_, to_2tuple
from nvidia_tao_pytorch.cv.segformer.model.utils.common_utils import load_model
from nvidia_tao_pytorch.cv.segformer.model.builder import BACKBONES
from nvidia_tao_pytorch.cv.segformer.utils import get_root_logger
from nvidia_tao_pytorch.cv.segformer.model.backbones.convnext_utils import _create_hybrid_backbone
from nvidia_tao_pytorch.cv.backbone.fan import (PositionalEncodingFourier, Mlp, ConvPatchEmbed,
ClassAttentionBlock, adaptive_avg_pool)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': 1.0, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.proj.0.0', 'classifier': 'head',
**kwargs
}
default_cfgs = {
'fan_tiny_12_p4_224': _cfg(url=''),
'fan_small_12_p16_224': _cfg(url=''),
'fan_base_12_p16_224': _cfg(url=''),
'fan_large_12_p16_224': _cfg(url=''),
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution + batch norm"""
return torch.nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False),
nn.SyncBatchNorm(out_planes)
)
class ClassAttn(nn.Module):
"""Class Attention"""
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
# with slight modifications to do CA
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
"""Init Function"""
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.k = nn.Linear(dim, dim, bias=qkv_bias)
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
"""Forward Function"""
B, N, C = x.shape
q = self.q(x[:, 0]).unsqueeze(1).reshape(B, 1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
k = self.k(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
q = q * self.scale
v = self.v(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
attn = (q @ k.transpose(-2, -1))
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x_cls = (attn @ v).transpose(1, 2).reshape(B, 1, C)
x_cls = self.proj(x_cls)
x_cls = self.proj_drop(x_cls)
return x_cls
class DWConv(nn.Module):
"""Depth-wise convolution"""
def __init__(self, in_features, out_features=None, act_layer=nn.GELU, kernel_size=3):
"""Init Function"""
super().__init__()
out_features = out_features or in_features
padding = kernel_size // 2
self.conv1 = torch.nn.Conv2d(
in_features, in_features, kernel_size=kernel_size, padding=padding, groups=in_features)
self.act = act_layer()
self.bn = nn.SyncBatchNorm(in_features)
self.conv2 = torch.nn.Conv2d(
in_features, out_features, kernel_size=kernel_size, padding=padding, groups=out_features)
def forward(self, x, H: int, W: int):
"""Forward Function."""
B, N, C = x.shape
x = x.permute(0, 2, 1).reshape(B, C, H, W)
x = self.conv1(x)
x = self.act(x)
x = self.bn(x)
x = self.conv2(x)
x = x.reshape(B, C, N).permute(0, 2, 1)
return x
class TokenMixing(nn.Module):
"""Token Mixing"""
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
"""Init Function"""
super().__init__()
assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
# config of mlp for v processing
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.apply(self._init_weights)
def _init_weights(self, m):
"""Init Weights"""
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x, H, W):
"""Forward Function"""
B, N, C = x.shape
q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
kv = self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
k, v = kv[0], kv[1]
attn = (q * self.scale @ k.transpose(-2, -1)) # * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
Bv, Hd, Nv, Cv = v.shape # noqa pylint: disable=W0612
# v = self.norm_v(self.mlp_v(v.transpose(1, 2).reshape(Bv, Nv, Hd*Cv), H//self.sr_ratio, W//self.sr_ratio)).reshape(Bv, Nv, Hd, Cv).transpose(1, 2)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn
class HybridEmbed(nn.Module):
""" CNN Feature Map Embedding
Extract feature map from CNN, flatten, project to embedding dim.
"""
def __init__(self, backbone, img_size=224, patch_size=2, feature_size=None, in_chans=3, embed_dim=384):
"""Init Function"""
super().__init__()
assert isinstance(backbone, nn.Module), "Backbone is not of instance Module."
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.backbone = backbone
if feature_size is None:
with torch.no_grad():
# NOTE Most reliable way of determining output dims is to run forward pass
training = backbone.training
if training:
backbone.eval()
o = self.backbone.forward_features(torch.zeros(1, in_chans, img_size[0], img_size[1]))
if isinstance(o, (list, tuple)):
o = o[-1] # last feature if backbone outputs list/tuple of features
feature_size = o.shape[-2:]
feature_dim = o.shape[1]
backbone.train(training)
else:
feature_size = to_2tuple(feature_size)
if hasattr(self.backbone, 'feature_info'):
feature_dim = self.backbone.feature_info.channels()[-1]
else:
feature_dim = self.backbone.num_features
assert feature_size[0] % patch_size[0] == 0 and feature_size[1] % patch_size[1] == 0, "Feature size is not a multiple of patch size."
self.grid_size = (feature_size[0] // patch_size[0], feature_size[1] // patch_size[1])
self.num_patches = self.grid_size[0] * self.grid_size[1]
self.proj = nn.Conv2d(feature_dim, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x, return_feat=False):
"""Forward Function"""
x, out_list = self.backbone.forward_features(x, return_feat=return_feat)
_, _, H, W = x.shape
if isinstance(x, (list, tuple)):
x = x[-1] # last feature if backbone outputs list/tuple of features
x = self.proj(x).flatten(2).transpose(1, 2)
if return_feat:
return x, (H // self.patch_size[0], W // self.patch_size[1]), out_list
return x, (H // self.patch_size[0], W // self.patch_size[1])
class ChannelProcessing(nn.Module):
"""Channel Processing"""
def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0.,
drop_path=0., mlp_hidden_dim=None, act_layer=nn.GELU, drop=None, norm_layer=nn.LayerNorm):
"""Initialize ChannelProcessing class"""
super().__init__()
assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."
self.dim = dim
self.num_heads = num_heads
self.temperature = nn.Parameter(torch.ones(num_heads, 1, 1))
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.mlp_v = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
self.norm_v = norm_layer(dim)
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.apply(self._init_weights)
def _init_weights(self, m):
"""Initialize weights"""
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def _gen_attn(self, q, k, mode='none', shift_range=4, sampling_step=4):
"""Returns attention"""
_, _, N, _ = k.shape
if torch.onnx.is_in_onnx_export():
# If softmax dim is not the last dimension, then PyTorch decompose the softmax ops into
# smaller ops like ReduceMax, ReduceSum, Sub, and Div.
# As a result, ONNX export fails for opset_version >= 12.
# Here, we rearrange the transpose so that softmax is done over the last dimension.
q = q.transpose(-1, -2).softmax(-1)
k = k.transpose(-1, -2).softmax(-1)
warnings.warn("Replacing default adatpive_avg_pool2d to custom implementation for ONNX export")
# adaptive_avg_pool2d is not supported for torch to onnx export
k = adaptive_avg_pool(k.transpose(-1, -2), (N, 1))
else:
q = q.softmax(-2).transpose(-1, -2)
k = torch.nn.functional.adaptive_avg_pool2d(k.softmax(-2), (N, 1))
attn = torch.nn.functional.sigmoid(q @ k)
return attn * self.temperature
def forward(self, x, H, W, atten=None):
"""Forward functions """
B, N, C = x.shape
v = x.reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
k = x.reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
attn = self._gen_attn(q, k)
attn = self.attn_drop(attn)
Bv, Hd, Nv, Cv = v.shape
v = self.norm_v(self.mlp_v(v.transpose(1, 2).reshape(Bv, Nv, Hd * Cv), H, W)).reshape(Bv, Nv, Hd, Cv).transpose(1, 2)
repeat_time = N // attn.shape[-1]
attn = attn.repeat_interleave(repeat_time, dim=-1) if attn.shape[-1] > 1 else attn
x = (attn * v.transpose(-1, -2)).permute(0, 3, 1, 2).reshape(B, N, C)
return x, attn * v.transpose(-1, -2)
@torch.jit.ignore
def no_weight_decay(self):
"""Ignore during weight decay"""
return {'temperature'}
class FANBlock(nn.Module):
"""FAN block from https://arxiv.org/abs/2204.12451"""
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., drop_path=0., norm_layer=nn.LayerNorm, eta=1.):
"""Initialize FANBlock class"""
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = TokenMixing(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_block = ChannelProcessing
self.mlp = mlp_block(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop,
drop_path=drop_path, drop=drop, mlp_hidden_dim=int(dim * mlp_ratio))
self.gamma1 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)
self.gamma2 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)
self.H = None
self.W = None
def forward(self, x, attn=None, return_attention=False):
"""Forward Function"""
H, W = self.H, self.W
x_new, attn = self.attn(self.norm1(x), H, W)
x = x + self.drop_path(self.gamma1 * x_new)
x_new, attn = self.mlp(self.norm2(x), H, W, atten=attn)
x = x + self.drop_path(self.gamma2 * x_new)
self.H, self.W = H, W
if return_attention:
return attn
return x
class FAN(nn.Module):
"""Based on timm https://github.com/rwightman/pytorch-image-models/tree/master/timm"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., backbone=None, out_idx=-1,
act_layer=None, norm_layer=None, cls_attn_layers=2, use_pos_embed=True, eta=1., tokens_norm=False, feat_downsample=False, use_checkpoint=False,
default_cfg=None):
"""Init Function"""
super().__init__()
img_size = to_2tuple(img_size)
self.feat_downsample = feat_downsample
self.use_checkpoint = use_checkpoint
self.default_cfg = default_cfg
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim
self.out_idx = out_idx
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
act_layer = act_layer or nn.GELU
if backbone is None:
self.patch_embed = ConvPatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, act_layer=act_layer)
else:
self.patch_embed = HybridEmbed(backbone=backbone, patch_size=2, embed_dim=embed_dim)
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.use_pos_embed = use_pos_embed
if use_pos_embed:
self.pos_embed = PositionalEncodingFourier(dim=embed_dim)
self.pos_drop = nn.Dropout(p=drop_rate)
build_block = FANBlock
self.blocks = nn.ModuleList([
build_block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate,
attn_drop=attn_drop_rate, drop_path=drop_path_rate, norm_layer=norm_layer, eta=eta)
for i in range(depth)])
self.cls_attn_blocks = nn.ModuleList([
ClassAttentionBlock(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate,
attn_drop=attn_drop_rate, act_layer=act_layer, norm_layer=norm_layer, eta=eta, tokens_norm=tokens_norm)
for _ in range(cls_attn_layers)])
if isinstance(self.patch_embed, HybridEmbed) and feat_downsample:
self.learnable_downsample = nn.Conv2d(in_channels=embed_dim,
out_channels=768,
kernel_size=3,
stride=2,
padding=1,
dilation=1,
groups=1,
bias=True)
# Init weights
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def init_weights(self, pretrained=None):
"""Init Weights"""
if isinstance(pretrained, str):
logger = get_root_logger()
update_pretrained = load_model(pretrained)
load_checkpoint(self, update_pretrained, map_location='cpu', strict=False, logger=logger)
def _init_weights(self, m):
"""Init Weights"""
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
"""Ignore jit compile"""
return {'pos_embed', 'cls_token'}
def get_classifier(self):
"""Function to get classifier head."""
return self.head
def reset_classifier(self, num_classes, global_pool=''):
"""Resets head of classifier with num_classes"""
self.num_classes = num_classes
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
"""Extract Features."""
outs = []
out_index = [4, 7, 11]
B = x.shape[0]
# x is (B, N, C). (Hp, Hw) is (height in units of patches, width in units of patches)
if isinstance(self.patch_embed, HybridEmbed):
x, (Hp, Wp), out_list = self.patch_embed(x, return_feat=True)
outs = outs + out_list
out_index = [self.out_idx]
else:
x, (Hp, Wp) = self.patch_embed(x)
if self.use_pos_embed:
# `pos_embed` (B, C, Hp, Wp), reshape -> (B, C, N), permute -> (B, N, C)
pos_encoding = self.pos_embed(B, Hp, Wp).reshape(B, -1, x.shape[1]).permute(0, 2, 1)
x = x + pos_encoding
x = self.pos_drop(x)
for idx, blk in enumerate(self.blocks):
blk.H, blk.W = Hp, Wp
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
Hp, Wp = blk.H, blk.W
if idx in out_index:
outs.append(x.reshape(B, Hp, Wp, -1).permute(0, 3, 1, 2).contiguous())
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
for blk in self.cls_attn_blocks:
x = blk(x)
tmp = x[:, 1:, :].reshape(B, Hp, Wp, -1).permute(0, 3, 1, 2).contiguous()
if isinstance(self.patch_embed, HybridEmbed) and self.feat_downsample:
tmp = self.learnable_downsample(tmp)
outs.append(tmp)
else:
outs.append(x[:, 1:, :].reshape(B, Hp, Wp, -1).permute(0, 3, 1, 2).contiguous())
return outs
def forward(self, x):
"""Forward Function"""
x = self.forward_features(x)
return x
def get_last_selfattention(self, x):
"""Returns last self-attention"""
B = x.shape[0]
# x is (B, N, C). (Hp, Hw) is (height in units of patches, width in units of patches)
x, (Hp, Wp) = self.patch_embed(x)
if self.use_pos_embed:
# `pos_embed` (B, C, Hp, Wp), reshape -> (B, C, N), permute -> (B, N, C)
pos_encoding = self.pos_embed(B, Hp, Wp).reshape(B, -1, x.shape[1]).permute(0, 2, 1)
x = x + pos_encoding
x = self.pos_drop(x)
for blk in self.blocks:
x = blk(x, Hp, Wp)
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
for i, blk in enumerate(self.cls_attn_blocks):
if i < len(self.blocks) - 1:
x = blk(x)
else:
return blk(x, return_attention=True)
return None
# FAN-Hybrid models
@BACKBONES.register_module()
class fan_tiny_8_p4_hybrid(FAN):
"""FAN Hybrid Tiny"""
def __init__(self, **kwargs):
"""Init Function"""
depth = 8
model_args = dict(depths=[3, 3], dims=[128, 256, 512, 1024], use_head=False)
backbone = _create_hybrid_backbone(pretrained=False, pretrained_strict=False, **model_args)
super(fan_tiny_8_p4_hybrid, self).__init__(patch_size=16, in_chans=3, num_classes=1000, embed_dim=192, depth=depth, backbone=backbone, out_idx=7,
num_heads=8, mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.,
act_layer=None, norm_layer=None, cls_attn_layers=2, use_pos_embed=True, eta=1., tokens_norm=True)
@BACKBONES.register_module()
class fan_small_12_p4_hybrid(FAN):
"""FAN Hybrid Small"""
def __init__(self, **kwargs):
"""Init Function"""
depth = 10
model_args = dict(depths=[3, 3], dims=[128, 256, 512, 1024], use_head=False)
backbone = _create_hybrid_backbone(pretrained=False, pretrained_strict=False, **model_args)
super(fan_small_12_p4_hybrid, self).__init__(patch_size=16, in_chans=3, num_classes=1000, embed_dim=384, depth=depth, backbone=backbone, out_idx=9,
num_heads=8, mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.,
act_layer=None, norm_layer=None, cls_attn_layers=2, use_pos_embed=True, eta=1., tokens_norm=True, feat_downsample=False)
@BACKBONES.register_module()
class fan_base_16_p4_hybrid(FAN):
"""FAN Hybrid Base"""
def __init__(self, **kwargs):
"""Init Function"""
depth = 16
model_args = dict(depths=[3, 3], dims=[128, 256, 512, 1024], use_head=False)
backbone = _create_hybrid_backbone(pretrained=False, pretrained_strict=False, **model_args)
super(fan_base_16_p4_hybrid, self).__init__(patch_size=16, in_chans=3, num_classes=1000, embed_dim=448, depth=depth, backbone=backbone, out_idx=15, feat_downsample=False,
num_heads=8, mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.,
act_layer=None, norm_layer=None, cls_attn_layers=2, use_pos_embed=True, eta=1., tokens_norm=True, default_cfg=None)
@BACKBONES.register_module()
class fan_large_16_p4_hybrid(FAN):
"""FAN Hybrid Large"""
def __init__(self, **kwargs):
"""Init Function"""
depth = 22
model_args = dict(depths=[3, 5], dims=[128, 256, 512, 1024], use_head=False)
backbone = _create_hybrid_backbone(pretrained=False, pretrained_strict=False, **model_args)
super(fan_large_16_p4_hybrid, self).__init__(patch_size=16, in_chans=3, num_classes=1000, embed_dim=480, depth=depth, backbone=backbone, out_idx=18,
num_heads=10, mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.,
act_layer=None, norm_layer=None, cls_attn_layers=2, use_pos_embed=True, eta=1., tokens_norm=True, use_checkpoint=False)
# FAN-ViT Models
@BACKBONES.register_module()
class fan_small_12_p16_224(FAN):
"""FAN ViT Small"""
def __init__(self, **kwargs):
"""Init Function"""
depth = 12
super(fan_small_12_p16_224, self).__init__(patch_size=16, in_chans=3, num_classes=1000, embed_dim=384, depth=depth,
num_heads=8, mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.,
act_layer=None, norm_layer=None, cls_attn_layers=2, use_pos_embed=True, eta=1., tokens_norm=True)
@BACKBONES.register_module()
class fan_base_18_p16_224(FAN):
"""FAN ViT Base"""
def __init__(self, **kwargs):
"""Init Function"""
depth = 18
super(fan_base_18_p16_224, self).__init__(patch_size=16, in_chans=3, num_classes=1000, embed_dim=448, depth=depth,
num_heads=8, mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., se_style=True, out_idx=16,
act_layer=None, norm_layer=None, cls_attn_layers=2, use_pos_embed=True, eta=1., tokens_norm=True, use_checkpoint=False)
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/model/backbones/fan.py |
# ---------------------------------------------------------------
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the NVIDIA Source Code License
# ---------------------------------------------------------------
"""Mix Transformer Module."""
import torch
import torch.nn as nn
from functools import partial
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from nvidia_tao_pytorch.cv.segformer.model.builder import BACKBONES
from nvidia_tao_pytorch.cv.segformer.utils import get_root_logger
from nvidia_tao_pytorch.cv.segformer.model.utils.common_utils import load_model
from mmcv.runner import load_checkpoint
import math
class Mlp(nn.Module):
"""MLP Class."""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0., export=False):
"""Init Function."""
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.dwconv = DWConv(hidden_features, export)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
self.apply(self._init_weights)
def _init_weights(self, m):
"""Init Function."""
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x, H, W):
"""Forward."""
x = self.fc1(x)
x = self.dwconv(x, H, W)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
"""Attention Class."""
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., sr_ratio=1):
"""Init Module."""
super().__init__()
assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.sr_ratio = sr_ratio
if sr_ratio > 1:
self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio)
self.norm = nn.LayerNorm(dim)
self.apply(self._init_weights)
def _init_weights(self, m):
"""Init Weights."""
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x, H, W):
"""Forward."""
B, N, C = x.shape
q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
if self.sr_ratio > 1:
x_ = x.permute(0, 2, 1).reshape(B, C, H, W)
x_ = self.sr(x_).reshape(B, C, -1).permute(0, 2, 1)
x_ = self.norm(x_)
kv = self.kv(x_).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
else:
kv = self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
k, v = kv[0], kv[1]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
"""Block Class."""
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, sr_ratio=1, export=False):
"""Init Module."""
super().__init__()
self.export = export
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop=attn_drop, proj_drop=drop, sr_ratio=sr_ratio)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop, export=self.export)
self.apply(self._init_weights)
def _init_weights(self, m):
"""Init Weights."""
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x, H, W):
""" Forward Module."""
x = x + self.drop_path(self.attn(self.norm1(x), H, W))
x = x + self.drop_path(self.mlp(self.norm2(x), H, W))
return x
class OverlapPatchEmbed(nn.Module):
""" Image to Patch Embedding."""
def __init__(self, img_size=224, patch_size=7, stride=4, in_chans=3, embed_dim=768, export=False):
"""Init Function."""
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.H, self.W = img_size[0] // patch_size[0], img_size[1] // patch_size[1]
self.num_patches = self.H * self.W
self.export = export
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride,
padding=(patch_size[0] // 2, patch_size[1] // 2))
self.norm = nn.LayerNorm(embed_dim)
self.apply(self._init_weights)
def _init_weights(self, m):
"""Init Weights Function."""
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
"""Forward Function."""
x = self.proj(x)
_, _, H, W = x.shape
if self.export:
_, C, H, W = x.shape
x = x.reshape(-1, C, H * W).transpose(1, 2)
else:
x = x.flatten(2).transpose(1, 2)
x = self.norm(x)
return x, H, W
class MixVisionTransformer(nn.Module):
"""Mix ViT Class."""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dims=[64, 128, 256, 512],
num_heads=[1, 2, 4, 8], mlp_ratios=[4, 4, 4, 4], qkv_bias=False, qk_scale=None, drop_rate=0.,
attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm,
depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1], export=False):
"""Init Function."""
super().__init__()
self.num_classes = num_classes
self.depths = depths
self.export = export
# patch_embed
self.patch_embed1 = OverlapPatchEmbed(img_size=img_size, patch_size=7, stride=4, in_chans=in_chans,
embed_dim=embed_dims[0], export=self.export)
self.patch_embed2 = OverlapPatchEmbed(img_size=img_size // 4, patch_size=3, stride=2, in_chans=embed_dims[0],
embed_dim=embed_dims[1], export=self.export)
self.patch_embed3 = OverlapPatchEmbed(img_size=img_size // 8, patch_size=3, stride=2, in_chans=embed_dims[1],
embed_dim=embed_dims[2], export=self.export)
self.patch_embed4 = OverlapPatchEmbed(img_size=img_size // 16, patch_size=3, stride=2, in_chans=embed_dims[2],
embed_dim=embed_dims[3], export=self.export)
# transformer encoder
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
cur = 0
self.block1 = nn.ModuleList([Block(
dim=embed_dims[0], num_heads=num_heads[0], mlp_ratio=mlp_ratios[0], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[0], export=self.export)
for i in range(depths[0])])
self.norm1 = norm_layer(embed_dims[0])
cur += depths[0]
self.block2 = nn.ModuleList([Block(
dim=embed_dims[1], num_heads=num_heads[1], mlp_ratio=mlp_ratios[1], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[1], export=self.export)
for i in range(depths[1])])
self.norm2 = norm_layer(embed_dims[1])
cur += depths[1]
self.block3 = nn.ModuleList([Block(
dim=embed_dims[2], num_heads=num_heads[2], mlp_ratio=mlp_ratios[2], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[2], export=self.export)
for i in range(depths[2])])
self.norm3 = norm_layer(embed_dims[2])
cur += depths[2]
self.block4 = nn.ModuleList([Block(
dim=embed_dims[3], num_heads=num_heads[3], mlp_ratio=mlp_ratios[3], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[3], export=self.export)
for i in range(depths[3])])
self.norm4 = norm_layer(embed_dims[3])
# classification head
# self.head = nn.Linear(embed_dims[3], num_classes) if num_classes > 0 else nn.Identity()
self.apply(self._init_weights)
def _init_weights(self, m):
"""Init Weights."""
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def init_weights(self, pretrained=None):
"""Init Weights."""
if isinstance(pretrained, str):
logger = get_root_logger()
update_pretrained = load_model(pretrained)
load_checkpoint(self, update_pretrained, map_location='cpu', strict=False, logger=logger)
def reset_drop_path(self, drop_path_rate):
"""Reset Drop Path."""
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(self.depths))]
cur = 0
for i in range(self.depths[0]):
self.block1[i].drop_path.drop_prob = dpr[cur + i]
cur += self.depths[0]
for i in range(self.depths[1]):
self.block2[i].drop_path.drop_prob = dpr[cur + i]
cur += self.depths[1]
for i in range(self.depths[2]):
self.block3[i].drop_path.drop_prob = dpr[cur + i]
cur += self.depths[2]
for i in range(self.depths[3]):
self.block4[i].drop_path.drop_prob = dpr[cur + i]
def freeze_patch_emb(self):
"""Freeze Patch Emb."""
self.patch_embed1.requires_grad = False
@torch.jit.ignore
def no_weight_decay(self):
""" No Weight Decay."""
return {'pos_embed1', 'pos_embed2', 'pos_embed3', 'pos_embed4', 'cls_token'} # has pos_embed may be better
def get_classifier(self):
""" Get Classifier."""
return self.head
def reset_classifier(self, num_classes, global_pool=''):
"""Reset Classifier."""
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
"""Forward Features."""
B = x.shape[0]
outs = []
# stage 1
x, H, W = self.patch_embed1(x)
for _, blk in enumerate(self.block1):
x = blk(x, H, W)
x = self.norm1(x)
x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
outs.append(x)
# stage 2
x, H, W = self.patch_embed2(x)
for _, blk in enumerate(self.block2):
x = blk(x, H, W)
x = self.norm2(x)
x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
outs.append(x)
# stage 3
x, H, W = self.patch_embed3(x)
for _, blk in enumerate(self.block3):
x = blk(x, H, W)
x = self.norm3(x)
x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
outs.append(x)
# stage 4
x, H, W = self.patch_embed4(x)
for _, blk in enumerate(self.block4):
x = blk(x, H, W)
x = self.norm4(x)
x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
outs.append(x)
return outs
def forward(self, x):
"""Forward."""
x = self.forward_features(x)
# x = self.head(x)
return x
class DWConv(nn.Module):
""" Model Class."""
def __init__(self, dim=768, export=False):
""" Init Function. """
super(DWConv, self).__init__()
self.dwconv = nn.Conv2d(dim, dim, 3, 1, 1, bias=True, groups=dim)
self.export = export
def forward(self, x, H, W):
""" Forward."""
B, _, C = x.shape
x = x.transpose(1, 2).view(B, C, H, W)
x = self.dwconv(x)
if self.export:
x = x.view(-1, C, H * W).transpose(1, 2)
else:
x = x.flatten(2).transpose(1, 2)
return x
@BACKBONES.register_module()
class mit_b0(MixVisionTransformer):
""" Model Class."""
def __init__(self, **kwargs):
""" Init Function. """
super(mit_b0, self).__init__(
patch_size=4, embed_dims=[32, 64, 160, 256], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4],
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[2, 2, 2, 2], sr_ratios=[8, 4, 2, 1],
drop_rate=0.0, drop_path_rate=0.1)
self.export = kwargs["export"]
@BACKBONES.register_module()
class mit_b1(MixVisionTransformer):
""" Model Class."""
def __init__(self, **kwargs):
""" Init Function. """
super(mit_b1, self).__init__(
patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4],
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[2, 2, 2, 2], sr_ratios=[8, 4, 2, 1],
drop_rate=0.0, drop_path_rate=0.1, export=kwargs["export"])
@BACKBONES.register_module()
class mit_b2(MixVisionTransformer):
""" Model Class."""
def __init__(self, **kwargs):
""" Init Function. """
super(mit_b2, self).__init__(
patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4],
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1],
drop_rate=0.0, drop_path_rate=0.1, export=kwargs["export"])
@BACKBONES.register_module()
class mit_b3(MixVisionTransformer):
""" Model Class."""
def __init__(self, **kwargs):
""" Init Function. """
super(mit_b3, self).__init__(
patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4],
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 4, 18, 3], sr_ratios=[8, 4, 2, 1],
drop_rate=0.0, drop_path_rate=0.1, export=kwargs["export"])
@BACKBONES.register_module()
class mit_b4(MixVisionTransformer):
""" Model Class."""
def __init__(self, **kwargs):
""" Init Function. """
super(mit_b4, self).__init__(
patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4],
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 8, 27, 3], sr_ratios=[8, 4, 2, 1],
drop_rate=0.0, drop_path_rate=0.1, export=kwargs["export"])
@BACKBONES.register_module()
class mit_b5(MixVisionTransformer):
""" Model Class."""
def __init__(self, **kwargs):
""" Init Function. """
super(mit_b5, self).__init__(
patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4],
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 6, 40, 3], sr_ratios=[8, 4, 2, 1],
drop_rate=0.0, drop_path_rate=0.1, export=kwargs["export"])
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/model/backbones/mix_transformer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Init Module."""
from .mix_transformer import * # noqa: F403, F401
from .fan import * # noqa: F403, F401
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/model/backbones/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Original source taken from https://github.com/open-mmlab/mmsegmentation
# Copyright 2019 OpenMMLAB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encoder Decoder Module."""
import torch
import torch.nn as nn
import torch.nn.functional as F
from nvidia_tao_pytorch.cv.segformer.core import add_prefix
from nvidia_tao_pytorch.cv.segformer.ops import resize
from nvidia_tao_pytorch.cv.segformer.model import builder
from nvidia_tao_pytorch.cv.segformer.model.builder import SEGMENTORS
from nvidia_tao_pytorch.cv.segformer.model.segmentors.base import BaseSegmentor
@SEGMENTORS.register_module()
class EncoderDecoder(BaseSegmentor):
"""Encoder Decoder segmentors.
EncoderDecoder typically consists of backbone, decode_head, auxiliary_head.
Note that auxiliary_head is only used for deep supervision during training,
which could be dumped during inference.
"""
def __init__(self,
backbone,
decode_head,
neck=None,
auxiliary_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None,
export=False):
"""Init Module."""
super(EncoderDecoder, self).__init__()
self.backbone = builder.build_backbone(backbone)
if neck is not None:
self.neck = builder.build_neck(neck)
self._init_decode_head(decode_head)
self._init_auxiliary_head(auxiliary_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.export = export
self.init_weights(pretrained=pretrained)
assert self.with_decode_head, "The decode head was not built."
def _init_decode_head(self, decode_head):
"""Initialize ``decode_head``"""
self.decode_head = builder.build_head(decode_head)
self.align_corners = self.decode_head.align_corners
self.num_classes = self.decode_head.num_classes
def _init_auxiliary_head(self, auxiliary_head):
"""Initialize ``auxiliary_head``"""
if auxiliary_head is not None:
if isinstance(auxiliary_head, list):
self.auxiliary_head = nn.ModuleList()
for head_cfg in auxiliary_head:
self.auxiliary_head.append(builder.build_head(head_cfg))
else:
self.auxiliary_head = builder.build_head(auxiliary_head)
def init_weights(self, pretrained=None):
"""Initialize the weights in backbone and heads.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
super(EncoderDecoder, self).init_weights(pretrained)
self.backbone.init_weights(pretrained=pretrained)
self.decode_head.init_weights()
if self.with_auxiliary_head:
if isinstance(self.auxiliary_head, nn.ModuleList):
for aux_head in self.auxiliary_head:
aux_head.init_weights()
else:
self.auxiliary_head.init_weights()
def extract_feat(self, img):
"""Extract features from images."""
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
return x
def encode_decode(self, img, img_metas):
"""Encode images with backbone and decode into a semantic segmentation of the same size as input."""
x = self.extract_feat(img)
out = self._decode_head_forward_test(x, img_metas)
out = resize(
input=out,
size=img.shape[2:],
mode='bilinear',
align_corners=self.align_corners)
return out
def _decode_head_forward_train(self, x, img_metas, gt_semantic_seg):
"""Run forward function and calculate loss for decode head in training."""
losses = dict()
loss_decode = self.decode_head.forward_train(x, img_metas,
gt_semantic_seg,
self.train_cfg)
losses.update(add_prefix(loss_decode, 'decode'))
return losses
def _decode_head_forward_test(self, x, img_metas):
"""Run forward function and calculate loss for decode head in inference."""
seg_logits = self.decode_head.forward_test(x, img_metas, self.test_cfg)
return seg_logits
def _auxiliary_head_forward_train(self, x, img_metas, gt_semantic_seg):
"""Run forward function and calculate loss for auxiliary head in training."""
losses = dict()
if isinstance(self.auxiliary_head, nn.ModuleList):
for idx, aux_head in enumerate(self.auxiliary_head):
loss_aux = aux_head.forward_train(x, img_metas,
gt_semantic_seg,
self.train_cfg)
losses.update(add_prefix(loss_aux, f'aux_{idx}'))
else:
loss_aux = self.auxiliary_head.forward_train(
x, img_metas, gt_semantic_seg, self.train_cfg)
losses.update(add_prefix(loss_aux, 'aux'))
return losses
def forward_dummy(self, img):
"""Dummy forward function."""
seg_logit = self.encode_decode(img, None)
return seg_logit
def forward_train(self, img, img_metas, gt_semantic_seg):
"""Forward function for training.
Args:
img (Tensor): Input images.
img_metas (list[dict]): List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmseg/datasets/pipelines/formatting.py:Collect`.
gt_semantic_seg (Tensor): Semantic segmentation masks
used if the architecture supports semantic segmentation task.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
x = self.extract_feat(img)
losses = dict()
loss_decode = self._decode_head_forward_train(x, img_metas,
gt_semantic_seg)
losses.update(loss_decode)
if self.with_auxiliary_head:
loss_aux = self._auxiliary_head_forward_train(
x, img_metas, gt_semantic_seg)
losses.update(loss_aux)
return losses
# TODO refactor
def slide_inference(self, img, img_meta, rescale):
"""Inference by sliding-window with overlap.
If h_crop > h_img or w_crop > w_img, the small patch will be used to
decode without padding.
"""
h_stride, w_stride = self.test_cfg["stride"]
h_crop, w_crop = self.test_cfg["crop_size"]
batch_size, _, h_img, w_img = img.size()
num_classes = self.num_classes
h_grids = max(h_img - h_crop + h_stride - 1, 0) // h_stride + 1
w_grids = max(w_img - w_crop + w_stride - 1, 0) // w_stride + 1
preds = img.new_zeros((batch_size, num_classes, h_img, w_img))
count_mat = img.new_zeros((batch_size, 1, h_img, w_img))
for h_idx in range(h_grids):
for w_idx in range(w_grids):
y1 = h_idx * h_stride
x1 = w_idx * w_stride
y2 = min(y1 + h_crop, h_img)
x2 = min(x1 + w_crop, w_img)
y1 = max(y2 - h_crop, 0)
x1 = max(x2 - w_crop, 0)
crop_img = img[:, :, y1:y2, x1:x2]
crop_seg_logit = self.encode_decode(crop_img, img_meta)
preds += F.pad(crop_seg_logit,
(int(x1), int(preds.shape[3] - x2), int(y1),
int(preds.shape[2] - y2)))
count_mat[:, :, y1:y2, x1:x2] += 1
assert (count_mat == 0).sum() == 0, "Count Matrix sum should be zero."
if torch.onnx.is_in_onnx_export():
# cast count_mat to constant while exporting to ONNX
count_mat = torch.from_numpy(
count_mat.cpu().detach().numpy()).to(device=img.device)
preds = preds / count_mat
if rescale:
preds = resize(
preds,
size=img_meta[0]['ori_shape'][:2],
mode='bilinear',
align_corners=self.align_corners,
warning=False)
return preds
def whole_inference(self, img, img_meta, rescale):
"""Inference with full image."""
seg_logit = self.encode_decode(img, img_meta)
if rescale:
seg_logit = resize(
seg_logit,
size=img_meta[0]['ori_shape'][:2],
mode='bilinear',
align_corners=self.align_corners,
warning=False)
return seg_logit
def inference(self, img, img_meta, rescale):
"""Inference with slide/whole style.
Args:
img (Tensor): The input image of shape (N, 3, H, W).
img_meta (dict): Image info dict where each dict has: 'img_shape',
'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmseg/datasets/pipelines/formatting.py:Collect`.
rescale (bool): Whether rescale back to original shape.
Returns:
Tensor: The output segmentation map.
"""
assert self.test_cfg["mode"] in ['slide', 'whole'], "Test mode should be slide or whole."
ori_shape = img_meta[0]['ori_shape']
assert all(_['ori_shape'] == ori_shape for _ in img_meta), "Shape in img_meta do not match the original shape."
if self.test_cfg["mode"] == 'slide':
seg_logit = self.slide_inference(img, img_meta, rescale)
else:
seg_logit = self.whole_inference(img, img_meta, rescale)
if torch.onnx.is_in_onnx_export():
output = seg_logit.transpose(1, -1)
output = output.transpose(1, 2)
output = F.softmax(output, dim=-1)
output = output.transpose(1, -1)
output = output.transpose(-2, -1)
else:
output = F.softmax(seg_logit, dim=1)
flip = img_meta[0]['flip']
if flip:
flip_direction = img_meta[0]['flip_direction']
assert flip_direction in ['horizontal', 'vertical'], "flip_direction should be horizontal or vertical."
if flip_direction == 'horizontal':
output = output.flip(dims=(3, ))
elif flip_direction == 'vertical':
output = output.flip(dims=(2, ))
return output
def simple_test(self, img, img_meta, rescale=True):
"""Simple test with single image."""
seg_logit = self.inference(img, img_meta, rescale)
seg_pred = seg_logit.argmax(dim=1)
if torch.onnx.is_in_onnx_export():
# our inference backend only support 4D output
if self.export:
seg_pred = seg_pred.unsqueeze(3)
else:
seg_pred = seg_pred.unsqueeze(0)
return seg_pred
seg_pred = seg_pred.cpu().numpy()
# unravel batch dim
seg_pred = list(seg_pred)
return seg_pred
def aug_test(self, imgs, img_metas, rescale=True):
"""Test with augmentations.
Only rescale=True is supported.
"""
# aug_test rescale all imgs back to ori_shape for now
assert rescale, "Rescale shouls not be None."
# to save memory, we get augmented seg logit inplace
seg_logit = self.inference(imgs[0], img_metas[0], rescale)
for i in range(1, len(imgs)):
cur_seg_logit = self.inference(imgs[i], img_metas[i], rescale)
seg_logit += cur_seg_logit
seg_logit /= len(imgs)
seg_pred = seg_logit.argmax(dim=1)
seg_pred = seg_pred.cpu().numpy()
# unravel batch dim
seg_pred = list(seg_pred)
return seg_pred
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/model/segmentors/encoder_decoder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Init Module."""
from .encoder_decoder import EncoderDecoder
__all__ = ['EncoderDecoder']
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/model/segmentors/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Original source taken from https://github.com/open-mmlab/mmsegmentation
# Copyright 2019 OpenMMLAB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base Model Module."""
import logging
import warnings
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
import mmcv
from mmcv.runner import auto_fp16
import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
class BaseSegmentor(nn.Module):
"""Base class for segmentors."""
__metaclass__ = ABCMeta
def __init__(self):
"""Init Module."""
super(BaseSegmentor, self).__init__()
self.fp16_enabled = False
@property
def with_neck(self):
"""bool: whether the segmentor has neck"""
return hasattr(self, 'neck') and self.neck is not None
@property
def with_auxiliary_head(self):
"""bool: whether the segmentor has auxiliary head"""
return hasattr(self,
'auxiliary_head') and self.auxiliary_head is not None
@property
def with_decode_head(self):
"""bool: whether the segmentor has decode head"""
return hasattr(self, 'decode_head') and self.decode_head is not None
@abstractmethod
def extract_feat(self, imgs):
"""Placeholder for extract features from images."""
pass
@abstractmethod
def encode_decode(self, img, img_metas):
"""Placeholder for encode images with backbone and decode into a
semantic segmentation map of the same size as input.
"""
pass
@abstractmethod
def forward_train(self, imgs, img_metas, **kwargs):
"""Placeholder for Forward function for training."""
pass
@abstractmethod
def simple_test(self, img, img_meta, **kwargs):
"""Placeholder for single image test."""
pass
@abstractmethod
def aug_test(self, imgs, img_metas, **kwargs):
"""Placeholder for augmentation test."""
pass
def init_weights(self, pretrained=None):
"""Initialize the weights in segmentor.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
if pretrained is not None:
logger = logging.getLogger()
logger.info(f'load model from: {pretrained}')
def forward_test(self, imgs, img_metas, **kwargs):
"""
Args:
imgs (List[Tensor]): the outer list indicates test-time
augmentations and inner Tensor should have a shape NxCxHxW,
which contains all images in the batch.
img_metas (List[List[dict]]): the outer list indicates test-time
augs (multiscale, flip, etc.) and the inner list indicates
images in a batch.
"""
for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
if not isinstance(var, list):
raise TypeError(f'{name} must be a list, but got '
f'{type(var)}')
num_augs = len(imgs)
if num_augs != len(img_metas):
raise ValueError(f'num of augmentations ({len(imgs)}) != '
f'num of image meta ({len(img_metas)})')
# all images in the same aug batch all of the same ori_shape and pad
# shape
for img_meta in img_metas:
ori_shapes = [_['ori_shape'] for _ in img_meta]
assert all(shape == ori_shapes[0] for shape in ori_shapes), "Shape should be equal to original img shape."
img_shapes = [_['img_shape'] for _ in img_meta]
assert all(shape == img_shapes[0] for shape in img_shapes), "Image shapes are not equal to {}.".format(img_shapes)
pad_shapes = [_['pad_shape'] for _ in img_meta]
assert all(shape == pad_shapes[0] for shape in pad_shapes), "Pad shapes are not equal to {}.".format(pad_shapes)
if torch.cuda.is_available():
input_img = imgs[0].cuda()
return self.simple_test(input_img, img_metas[0], **kwargs)
@auto_fp16(apply_to=('img', ))
def forward(self, img, img_metas, return_loss=True, **kwargs):
"""Calls either :func:`forward_train` or :func:`forward_test` depending
on whether ``return_loss`` is ``True``.
Note this setting will change the expected inputs. When
``return_loss=True``, img and img_meta are single-nested (i.e. Tensor
and List[dict]), and when ``resturn_loss=False``, img and img_meta
should be double nested (i.e. List[Tensor], List[List[dict]]), with
the outer list indicating test time augmentations.
"""
if return_loss:
return self.forward_train(img, img_metas, **kwargs)
return self.forward_test(img, img_metas, **kwargs)
@auto_fp16(apply_to=('img', ))
def forward_export(self, img_metas, return_loss, img, **kwargs):
"""Calls either :func:`forward_train` or :func:`forward_test` depending
on whether ``return_loss`` is ``True``.
Note this setting will change the expected inputs. When
``return_loss=True``, img and img_meta are single-nested (i.e. Tensor
and List[dict]), and when ``resturn_loss=False``, img and img_meta
should be double nested (i.e. List[Tensor], List[List[dict]]), with
the outer list indicating test time augmentations.
"""
return self.forward_test(img, img_metas, **kwargs)
def train_step(self, data_batch, optimizer, **kwargs):
"""The iteration step during training.
This method defines an iteration step during training, except for the
back propagation and optimizer updating, which are done in an optimizer
hook. Note that in some complicated cases or models, the whole process
including back propagation and optimizer updating is also defined in
this method, such as GAN.
Args:
data (dict): The output of dataloader.
optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of
runner is passed to ``train_step()``. This argument is unused
and reserved.
Returns:
dict: It should contain at least 3 keys: ``loss``, ``log_vars``,
``num_samples``.
``loss`` is a tensor for back propagation, which can be a
weighted sum of multiple losses.
``log_vars`` contains all the variables to be sent to the
logger.
``num_samples`` indicates the batch size (when the model is
DDP, it means the batch size on each GPU), which is used for
averaging the logs.
"""
losses = self(**data_batch)
loss, log_vars = self._parse_losses(losses)
outputs = dict(
loss=loss,
log_vars=log_vars,
num_samples=len(data_batch['img'].data))
return outputs
def val_step(self, data_batch, **kwargs):
"""The iteration step during validation.
This method shares the same signature as :func:`train_step`, but used
during val epochs. Note that the evaluation after training epochs is
not implemented with this method, but an evaluation hook.
"""
output = self(**data_batch, **kwargs)
return output
@staticmethod
def _parse_losses(losses):
"""Parse the raw outputs (losses) of the network.
Args:
losses (dict): Raw output of the network, which usually contain
losses and other necessary information.
Returns:
tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor
which may be a weighted sum of all losses, log_vars contains
all the variables to be sent to the logger.
"""
log_vars = OrderedDict()
for loss_name, loss_value in losses.items():
if isinstance(loss_value, torch.Tensor):
log_vars[loss_name] = loss_value.mean()
elif isinstance(loss_value, list):
log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
else:
raise TypeError(
f'{loss_name} is not a tensor or list of tensors')
loss = sum(_value for _key, _value in log_vars.items()
if 'loss' in _key)
log_vars['loss'] = loss
for loss_name, loss_value in log_vars.items():
# reduce loss when distributed training
if dist.is_available() and dist.is_initialized():
loss_value = loss_value.data.clone()
dist.all_reduce(loss_value.div_(dist.get_world_size()))
log_vars[loss_name] = loss_value.item()
return loss, log_vars
def show_result(self,
img,
result,
palette=None,
win_name='',
show=False,
wait_time=0,
out_file=None):
"""Draw `result` over `img`.
Args:
img (str or Tensor): The image to be displayed.
result (Tensor): The semantic segmentation results to draw over
`img`.
palette (list[list[int]]] | np.ndarray | None): The palette of
segmentation map. If None is given, random palette will be
generated. Default: None
win_name (str): The window name.
wait_time (int): Value of waitKey param.
Default: 0.
show (bool): Whether to show the image.
Default: False.
out_file (str or None): The filename to write the image.
Default: None.
Returns:
img (Tensor): Only if not `show` or `out_file`
"""
img = mmcv.imread(img)
img = img.copy()
seg = result[0]
if palette is None:
if self.PALETTE is None:
palette = np.random.randint(
0, 255, size=(len(self.CLASSES), 3))
else:
palette = self.PALETTE
palette = np.array(palette)
assert palette.shape[0] == len(self.CLASSES), "The colors in palette is not equal to number of classes."
assert palette.shape[1] == 3, "The colors in palette should be of shape 3."
assert len(palette.shape) == 2, "Palette should be list of colors."
color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8)
for label, color in enumerate(palette):
color_seg[seg == label, :] = color
# convert to BGR
color_seg = color_seg[..., ::-1]
# from IPython import embed; embed(header='debug vis')
img = img * 0.5 + color_seg * 0.5
img = img.astype(np.uint8)
# if out_file specified, do not show image in window
if out_file is not None:
show = False
if show:
mmcv.imshow(img, win_name, wait_time)
if out_file is not None:
mmcv.imwrite(img, out_file)
if not (show or out_file):
warnings.warn('show==False and out_file is not specified, only '
'result image will be returned')
return img
return None
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/model/segmentors/base.py |
# ---------------------------------------------------------------
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the NVIDIA Source Code License
# ---------------------------------------------------------------
"""MLP Segformer Head."""
import torch.nn as nn
import torch
from mmcv.cnn import ConvModule
from nvidia_tao_pytorch.cv.segformer.ops import resize
from nvidia_tao_pytorch.cv.segformer.model.utils import * # noqa pylint: disable=W0401, W0614
from nvidia_tao_pytorch.cv.segformer.model.builder import HEADS
from nvidia_tao_pytorch.cv.segformer.model.decode_heads.decode_head import BaseDecodeHead
class MLP(nn.Module):
"""Linear Embedding."""
def __init__(self, input_dim=2048, embed_dim=768, export=False):
"""Init."""
super().__init__()
self.proj = nn.Linear(input_dim, embed_dim)
self.export = export
def forward(self, x):
"""Forward."""
if self.export:
_, C, H, W = x.shape
x = x.view(-1, C, H * W).transpose(1, 2)
else:
x = x.flatten(2).transpose(1, 2)
x = self.proj(x)
return x
@HEADS.register_module()
class SegFormerHead(BaseDecodeHead):
"""
SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers
"""
def __init__(self, feature_strides, **kwargs):
"""Init Module."""
super(SegFormerHead, self).__init__(input_transform='multiple_select', **kwargs)
assert len(feature_strides) == len(self.in_channels), "The number of feature strides:{} should be equal to number of channels: {}".format(feature_strides, len(self.in_channels))
assert min(feature_strides) == feature_strides[0], "Minimum of feature strides is not supported."
self.feature_strides = feature_strides
self.export = kwargs["export"]
c1_in_channels, c2_in_channels, c3_in_channels, c4_in_channels = self.in_channels
decoder_params = kwargs['decoder_params']
embedding_dim = decoder_params['embed_dim']
self.linear_c4 = MLP(input_dim=c4_in_channels, embed_dim=embedding_dim, export=self.export)
self.linear_c3 = MLP(input_dim=c3_in_channels, embed_dim=embedding_dim, export=self.export)
self.linear_c2 = MLP(input_dim=c2_in_channels, embed_dim=embedding_dim, export=self.export)
self.linear_c1 = MLP(input_dim=c1_in_channels, embed_dim=embedding_dim, export=self.export)
self.linear_fuse = ConvModule(
in_channels=embedding_dim * 4,
out_channels=embedding_dim,
kernel_size=1,
norm_cfg=dict(type='SyncBN', requires_grad=True)
)
self.linear_pred = nn.Conv2d(embedding_dim, self.num_classes, kernel_size=1)
def forward(self, inputs):
"""Forward."""
x = self._transform_inputs(inputs) # len=4, 1/4,1/8,1/16,1/32
c1, c2, c3, c4 = x
# MLP decoder on C1-C4 #
n, _, _, _ = c4.shape
_c4 = self.linear_c4(c4).permute(0, 2, 1).reshape(n, -1, c4.shape[2], c4.shape[3])
_c4 = resize(_c4, size=c1.size()[2:], mode='bilinear', align_corners=False)
_c3 = self.linear_c3(c3).permute(0, 2, 1).reshape(n, -1, c3.shape[2], c3.shape[3])
_c3 = resize(_c3, size=c1.size()[2:], mode='bilinear', align_corners=False)
_c2 = self.linear_c2(c2).permute(0, 2, 1).reshape(n, -1, c2.shape[2], c2.shape[3])
_c2 = resize(_c2, size=c1.size()[2:], mode='bilinear', align_corners=False)
_c1 = self.linear_c1(c1).permute(0, 2, 1).reshape(n, -1, c1.shape[2], c1.shape[3])
_c = self.linear_fuse(torch.cat([_c4, _c3, _c2, _c1], dim=1))
x = self.dropout(_c)
x = self.linear_pred(x)
return x
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/model/decode_heads/segformer_head.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Init module."""
from .segformer_head import SegFormerHead
__all__ = [
'SegFormerHead'
]
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/model/decode_heads/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Original source taken from https://github.com/open-mmlab/mmskeleton
# Copyright 2019 OpenMMLAB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decode Head."""
from abc import ABCMeta, abstractmethod
import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from mmcv.runner import auto_fp16, force_fp32
from nvidia_tao_pytorch.cv.segformer.core import build_pixel_sampler
from nvidia_tao_pytorch.cv.segformer.ops import resize
from ..builder import build_loss
from ..losses import accuracy
class BaseDecodeHead(nn.Module, metaclass=ABCMeta):
"""Base class for BaseDecodeHead.
Args:
in_channels (int|Sequence[int]): Input channels.
channels (int): Channels after modules, before conv_seg.
num_classes (int): Number of classes.
dropout_ratio (float): Ratio of dropout layer. Default: 0.1.
conv_cfg (dict|None): Config of conv layers. Default: None.
norm_cfg (dict|None): Config of norm layers. Default: None.
act_cfg (dict): Config of activation layers.
Default: dict(type='ReLU')
in_index (int|Sequence[int]): Input feature index. Default: -1
input_transform (str|None): Transformation type of input features.
Options: 'resize_concat', 'multiple_select', None.
'resize_concat': Multiple feature maps will be resize to the
same size as first one and than concat together.
Usually used in FCN head of HRNet.
'multiple_select': Multiple feature maps will be bundle into
a list and passed into decode head.
None: Only one select feature map is allowed.
Default: None.
loss_decode (dict): Config of decode loss.
Default: dict(type='CrossEntropyLoss').
ignore_index (int | None): The label index to be ignored. When using
masked BCE loss, ignore_index should be set to None. Default: 255
sampler (dict|None): The config of segmentation map sampler.
Default: None.
align_corners (bool): align_corners argument of F.interpolate.
Default: False.
"""
def __init__(self,
in_channels,
channels,
*,
num_classes,
dropout_ratio=0.1,
conv_cfg=None,
norm_cfg=None,
act_cfg=dict(type='ReLU'),
in_index=-1,
input_transform=None,
loss_decode=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
decoder_params=None,
ignore_index=255,
sampler=None,
align_corners=False,
export=False):
"""Init Module."""
super(BaseDecodeHead, self).__init__()
self._init_inputs(in_channels, in_index, input_transform)
self.channels = channels
self.num_classes = num_classes
self.dropout_ratio = dropout_ratio
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.in_index = in_index
self.loss_decode = build_loss(loss_decode)
self.ignore_index = ignore_index
self.align_corners = align_corners
self.export = export
if sampler is not None:
self.sampler = build_pixel_sampler(sampler, context=self)
else:
self.sampler = None
self.conv_seg = nn.Conv2d(channels, num_classes, kernel_size=1)
if dropout_ratio > 0:
self.dropout = nn.Dropout2d(dropout_ratio)
else:
self.dropout = None
self.fp16_enabled = False
def extra_repr(self):
"""Extra repr."""
s = f'input_transform={self.input_transform}, ' \
f'ignore_index={self.ignore_index}, ' \
f'align_corners={self.align_corners}'
return s
def _init_inputs(self, in_channels, in_index, input_transform):
"""Check and initialize input transforms.
The in_channels, in_index and input_transform must match.
Specifically, when input_transform is None, only single feature map
will be selected. So in_channels and in_index must be of type int.
When input_transform
Args:
in_channels (int|Sequence[int]): Input channels.
in_index (int|Sequence[int]): Input feature index.
input_transform (str|None): Transformation type of input features.
Options: 'resize_concat', 'multiple_select', None.
'resize_concat': Multiple feature maps will be resize to the
same size as first one and than concat together.
Usually used in FCN head of HRNet.
'multiple_select': Multiple feature maps will be bundle into
a list and passed into decode head.
None: Only one select feature map is allowed.
"""
if input_transform is not None:
assert input_transform in ['resize_concat', 'multiple_select'], "Input transform:{} is not supported."
self.input_transform = input_transform
self.in_index = in_index
if input_transform is not None:
assert isinstance(in_channels, (list, tuple)), "The input channels should be list or tuple."
assert isinstance(in_index, (list, tuple)), "The input index should be a list or tuple."
assert len(in_channels) == len(in_index), "The number of input channels: {} should be equal to input_index: {}."
if input_transform == 'resize_concat':
self.in_channels = sum(in_channels)
else:
self.in_channels = in_channels
else:
assert isinstance(in_channels, int), "The input channels should be of type int."
assert isinstance(in_index, int), "The input index should be integer."
self.in_channels = in_channels
def init_weights(self):
"""Initialize weights of classification layer."""
normal_init(self.conv_seg, mean=0, std=0.01)
def _transform_inputs(self, inputs):
"""Transform inputs for decoder.
Args:
inputs (list[Tensor]): List of multi-level img features.
Returns:
Tensor: The transformed inputs
"""
if self.input_transform == 'resize_concat':
inputs = [inputs[i] for i in self.in_index]
upsampled_inputs = [
resize(
input=x,
size=inputs[0].shape[2:],
mode='bilinear',
align_corners=self.align_corners) for x in inputs
]
inputs = torch.cat(upsampled_inputs, dim=1)
elif self.input_transform == 'multiple_select':
inputs = [inputs[i] for i in self.in_index]
else:
inputs = inputs[self.in_index]
return inputs
@auto_fp16()
@abstractmethod
def forward(self, inputs):
"""Placeholder of forward function."""
pass
def forward_train(self, inputs, img_metas, gt_semantic_seg, train_cfg):
"""Forward function for training.
Args:
inputs (list[Tensor]): List of multi-level img features.
img_metas (list[dict]): List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmseg/datasets/pipelines/formatting.py:Collect`.
gt_semantic_seg (Tensor): Semantic segmentation masks
used if the architecture supports semantic segmentation task.
train_cfg (dict): The training config.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
seg_logits = self.forward(inputs)
losses = self.losses(seg_logits, gt_semantic_seg)
return losses
def forward_test(self, inputs, img_metas, test_cfg):
"""Forward function for testing.
Args:
inputs (list[Tensor]): List of multi-level img features.
img_metas (list[dict]): List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmseg/datasets/pipelines/formatting.py:Collect`.
test_cfg (dict): The testing config.
Returns:
Tensor: Output segmentation map.
"""
return self.forward(inputs)
def cls_seg(self, feat):
"""Classify each pixel."""
if self.dropout is not None:
feat = self.dropout(feat)
output = self.conv_seg(feat)
return output
@force_fp32(apply_to=('seg_logit', ))
def losses(self, seg_logit, seg_label):
"""Compute segmentation loss."""
loss = dict()
if len(seg_label.shape) == 3:
# BS is 1 , hence unsqueeze
seg_label = torch.unsqueeze(seg_label, 0)
seg_logit = resize(
input=seg_logit,
size=seg_label.shape[2:],
mode='bilinear',
align_corners=self.align_corners)
if self.sampler is not None:
seg_weight = self.sampler.sample(seg_logit, seg_label)
else:
seg_weight = None
seg_label = seg_label.squeeze(1)
loss['loss_seg'] = self.loss_decode(
seg_logit,
seg_label,
weight=seg_weight,
ignore_index=self.ignore_index)
loss['acc_seg'] = accuracy(seg_logit, seg_label)
return loss
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/model/decode_heads/decode_head.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Original source taken from https://github.com/open-mmlab/mmsegmentation
# Copyright 2019 OpenMMLAB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inferencer Module"""
import os.path as osp
import pickle
import shutil
import tempfile
import numpy as np
import torch
import torch.distributed as dist
import mmcv
from mmcv.image import tensor2imgs
from mmcv.runner import get_dist_info
def np2tmp(array, temp_file_name=None):
"""Save ndarray to local numpy file.
Args:
array (ndarray): Ndarray to save.
temp_file_name (str): Numpy file name. If 'temp_file_name=None', this
function will generate a file name with tempfile.NamedTemporaryFile
to save ndarray. Default: None.
Returns:
str: The numpy file name.
"""
if temp_file_name is None:
temp_file_name = tempfile.NamedTemporaryFile(
suffix='.npy', delete=False).name
np.save(temp_file_name, array)
return temp_file_name
def single_gpu_test(model,
data_loader,
export=False,
show=False,
out_dir=None,
efficient_test=False):
"""Test with single GPU.
Args:
model (nn.Module): Model to be tested.
data_loader (utils.data.Dataloader): Pytorch data loader.
show (bool): Whether show results during infernece. Default: False.
out_dir (str, optional): If specified, the results will be dumped into
the directory to save output results.
efficient_test (bool): Whether save the results as local numpy files to
save CPU memory during evaluation. Default: False.
Returns:
list: The prediction results.
"""
model.eval()
results = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for _, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
if show or out_dir:
img_tensor = data['img'][0]
img_metas = data['img_metas'][0].data[0]
imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])
assert len(imgs) == len(img_metas), "The number of images: {} should be equal to number of image metas: {}".format(len(imgs), len(img_metas))
for img, img_meta in zip(imgs, img_metas):
h, w, _ = img_meta['img_shape']
img_show = img[:h, :w, :]
ori_h, ori_w = img_meta['ori_shape'][:-1]
img_show = mmcv.imresize(img_show, (ori_w, ori_h))
if isinstance(result, list):
if efficient_test:
result = [np2tmp(_) for _ in result]
results.extend(result)
else:
if efficient_test:
result = np2tmp(result)
results.append(result)
batch_size = data['img'][0].size(0)
for _ in range(batch_size):
prog_bar.update()
return results
def multi_gpu_test(model,
data_loader,
tmpdir=None,
gpu_collect=False,
efficient_test=False):
"""Test model with multiple gpus.
This method tests model with multiple gpus and collects the results
under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'
it encodes results to gpu tensors and use gpu communication for results
collection. On cpu mode it saves the results on different gpus to 'tmpdir'
and collects them by the rank 0 worker.
Args:
model (nn.Module): Model to be tested.
data_loader (utils.data.Dataloader): Pytorch data loader.
tmpdir (str): Path of directory to save the temporary results from
different gpus under cpu mode.
gpu_collect (bool): Option to use either gpu or cpu to collect results.
efficient_test (bool): Whether save the results as local numpy files to
save CPU memory during evaluation. Default: False.
Returns:
list: The prediction results.
"""
model.eval()
results = []
dataset = data_loader.dataset
rank, world_size = get_dist_info()
if rank == 0:
prog_bar = mmcv.ProgressBar(len(dataset))
for _, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
if isinstance(result, list):
if efficient_test:
result = [np2tmp(_) for _ in result]
results.extend(result)
else:
if efficient_test:
result = np2tmp(result)
results.append(result)
if rank == 0:
batch_size = data['img'][0].size(0)
for _ in range(batch_size * world_size):
prog_bar.update()
# collect results from all ranks
if gpu_collect:
results = collect_results_gpu(results, len(dataset))
else:
results = collect_results_cpu(results, len(dataset), tmpdir)
return results
def collect_results_cpu(result_part, size, tmpdir=None):
"""Collect results with CPU."""
rank, world_size = get_dist_info()
# create a tmp dir if it is not specified
if tmpdir is None:
MAX_LEN = 512
# 32 is whitespace
dir_tensor = torch.full((MAX_LEN, ),
32,
dtype=torch.uint8,
device='cuda')
if rank == 0:
tmpdir = tempfile.mkdtemp()
tmpdir = torch.tensor(
bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
dir_tensor[:len(tmpdir)] = tmpdir
dist.broadcast(dir_tensor, 0)
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
else:
mmcv.mkdir_or_exist(tmpdir)
# dump the part result to the dir
mmcv.dump(result_part, osp.join(tmpdir, 'part_{}.pkl'.format(rank)))
dist.barrier()
# collect all parts
if rank != 0:
return None
# load results of all parts from tmp dir
part_list = []
for i in range(world_size):
part_file = osp.join(tmpdir, 'part_{}.pkl'.format(i))
part_list.append(mmcv.load(part_file))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
# remove tmp dir
shutil.rmtree(tmpdir)
return ordered_results
def collect_results_gpu(result_part, size):
"""Collect results with GPU."""
rank, world_size = get_dist_info()
# dump result part to tensor with pickle
part_tensor = torch.tensor(
bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda')
# gather all result part tensor shape
shape_tensor = torch.tensor(part_tensor.shape, device='cuda')
shape_list = [shape_tensor.clone() for _ in range(world_size)]
dist.all_gather(shape_list, shape_tensor)
# padding result part tensor to max length
shape_max = torch.tensor(shape_list).max()
part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda')
part_send[:shape_tensor[0]] = part_tensor
part_recv_list = [
part_tensor.new_zeros(shape_max) for _ in range(world_size)
]
# gather all result part
dist.all_gather(part_recv_list, part_send)
if rank == 0:
part_list = []
for recv, shape in zip(part_recv_list, shape_list):
part_list.append(
pickle.loads(recv[:shape[0]].cpu().numpy().tobytes()))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
return ordered_results
return None
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/inference/inferencer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Segformer inference module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/inference/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Original source taken from https://github.com/open-mmlab/mmsegmentation
# Copyright 2019 OpenMMLAB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encoding Module."""
import torch
from torch import nn
from torch.nn import functional as F
class Encoding(nn.Module):
"""Encoding Layer: a learnable residual encoder.
Input is of shape (batch_size, channels, height, width).
Output is of shape (batch_size, num_codes, channels).
Args:
channels: dimension of the features or feature channels
num_codes: number of code words
"""
def __init__(self, channels, num_codes):
""" Init Module."""
super(Encoding, self).__init__()
# init codewords and smoothing factor
self.channels, self.num_codes = channels, num_codes
std = 1. / ((num_codes * channels)**0.5)
# [num_codes, channels]
self.codewords = nn.Parameter(
torch.empty(num_codes, channels,
dtype=torch.float).uniform_(-std, std),
requires_grad=True)
# [num_codes]
self.scale = nn.Parameter(
torch.empty(num_codes, dtype=torch.float).uniform_(-1, 0),
requires_grad=True)
@staticmethod
def scaled_l2(x, codewords, scale):
"""Scale Function."""
num_codes, channels = codewords.size()
batch_size = x.size(0)
reshaped_scale = scale.view((1, 1, num_codes))
expanded_x = x.unsqueeze(2).expand(
(batch_size, x.size(1), num_codes, channels))
reshaped_codewords = codewords.view((1, 1, num_codes, channels))
scaled_l2_norm = reshaped_scale * (
expanded_x - reshaped_codewords).pow(2).sum(dim=3)
return scaled_l2_norm
@staticmethod
def aggregate(assigment_weights, x, codewords):
""" Aggregate function."""
num_codes, channels = codewords.size()
reshaped_codewords = codewords.view((1, 1, num_codes, channels))
batch_size = x.size(0)
expanded_x = x.unsqueeze(2).expand(
(batch_size, x.size(1), num_codes, channels))
encoded_feat = (assigment_weights.unsqueeze(3) *
(expanded_x - reshaped_codewords)).sum(dim=1)
return encoded_feat
def forward(self, x):
"""Forward Function."""
assert x.dim() == 4 and x.size(1) == self.channels, "Input channels should be 3."
# [batch_size, channels, height, width]
batch_size = x.size(0)
# [batch_size, height x width, channels]
x = x.view(batch_size, self.channels, -1).transpose(1, 2).contiguous()
# assignment_weights: [batch_size, channels, num_codes]
assigment_weights = F.softmax(
self.scaled_l2(x, self.codewords, self.scale), dim=2)
# aggregate
encoded_feat = self.aggregate(assigment_weights, x, self.codewords)
return encoded_feat
def __repr__(self):
"""Repr Function."""
repr_str = self.__class__.__name__
repr_str += f'(Nx{self.channels}xHxW =>Nx{self.num_codes}' \
f'x{self.channels})'
return repr_str
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/ops/encoding.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Ops Init Module."""
from nvidia_tao_pytorch.cv.segformer.ops.encoding import Encoding
from nvidia_tao_pytorch.cv.segformer.ops.wrappers import Upsample, resize
__all__ = ['Upsample', 'resize', 'Encoding']
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/ops/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Original source taken from https://github.com/open-mmlab/mmsegmentation
# Copyright 2019 OpenMMLAB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrappers Module."""
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
def resize(input, # pylint: disable=W0622
size=None,
scale_factor=None,
mode='nearest',
align_corners=None,
warning=True):
""" Resize Function."""
if warning:
if size is not None and align_corners:
input_h, input_w = tuple(int(x) for x in input.shape[2:])
output_h, output_w = tuple(int(x) for x in size)
if output_h > input_h or output_w > output_h: # pylint: disable=R0916
if ((output_h > 1 and output_w > 1 and input_h > 1 and # pylint: disable=R0916
input_w > 1) and (output_h - 1) % (input_h - 1) and (output_w - 1) % (input_w - 1)): # pylint: disable=R0916
warnings.warn(
f'When align_corners={align_corners}, '
'the output would more aligned if '
f'input size {(input_h, input_w)} is `x+1` and '
f'out size {(output_h, output_w)} is `nx+1`')
if isinstance(size, torch.Size):
size = tuple(int(x) for x in size)
return F.interpolate(input, size, scale_factor, mode, align_corners)
class Upsample(nn.Module):
"""Upsampling Layer Class."""
def __init__(self,
size=None,
scale_factor=None,
mode='nearest',
align_corners=None):
"""Init Module."""
super(Upsample, self).__init__()
self.size = size
if isinstance(scale_factor, tuple):
self.scale_factor = tuple(float(factor) for factor in scale_factor)
else:
self.scale_factor = float(scale_factor) if scale_factor else None
self.mode = mode
self.align_corners = align_corners
def forward(self, x):
"""Forward Class."""
if not self.size:
size = [int(t * self.scale_factor) for t in x.shape[-2:]]
else:
size = self.size
return resize(x, size, None, self.mode, self.align_corners)
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/ops/wrappers.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Trainer Module. """
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/trainer/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train Segformer model."""
import random
import warnings
import numpy as np
import torch
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import build_optimizer, build_runner
from mmcv.runner.builder import RUNNERS
from mmcv.runner.iter_based_runner import IterBasedRunner
from mmcv.runner.checkpoint import save_checkpoint
from nvidia_tao_pytorch.cv.segformer.core import DistEvalHook, EvalHook
from nvidia_tao_pytorch.cv.segformer.dataloader.data_utils import build_dataloader, build_dataset
from nvidia_tao_pytorch.cv.segformer.utils import get_root_logger
import os.path as osp
from typing import Optional, Dict
def set_random_seed(seed, deterministic=False):
"""Set random seed.
Args:
seed (int): Seed to be used.
deterministic (bool): Whether to set the deterministic option for
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
to True and `torch.backends.cudnn.benchmark` to False.
Default: False.
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
@RUNNERS.register_module()
class TAOIterBasedRunner(IterBasedRunner):
"""TAO Epoch based runner.
Overrides mmcv.runner.epoch_based_runner.EpochBaseRunner to save checkpoints
without symlinks which requires root access.
"""
def save_checkpoint( # type: ignore
self,
out_dir: str,
filename_tmpl: str = 'iter_{}.pth',
meta: Optional[Dict] = None,
save_optimizer: bool = True,
create_symlink: bool = True) -> None:
"""Save checkpoint to file.
Args:
out_dir (str): Directory to save checkpoint files.
filename_tmpl (str, optional): Checkpoint file template.
Defaults to 'iter_{}.pth'.
meta (dict, optional): Metadata to be saved in checkpoint.
Defaults to None.
save_optimizer (bool, optional): Whether save optimizer.
Defaults to True.
create_symlink (bool, optional): Whether create symlink to the
latest checkpoint file. Defaults to True.
"""
if meta is None:
meta = {}
elif not isinstance(meta, dict):
raise TypeError(
f'meta should be a dict or None, but got {type(meta)}')
if self.meta is not None:
meta.update(self.meta)
# Note: meta.update(self.meta) should be done before
# meta.update(epoch=self.epoch + 1, iter=self.iter) otherwise
# there will be problems with resumed checkpoints.
# More details in https://github.com/open-mmlab/mmcv/pull/1108
meta.update(epoch=self.epoch + 1, iter=self.iter)
filename = filename_tmpl.format(self.iter + 1)
filepath = osp.join(out_dir, filename)
optimizer = self.optimizer if save_optimizer else None
save_checkpoint(self.model, filepath, optimizer=optimizer, meta=meta)
def train_segmentor(model,
dataset,
distributed=True,
validate=False,
timestamp=None,
meta=None,
result_dir=None,
dm=None,
sf_model=None):
"""Launch segmentor training.
Args:
model (nn.Module): Model instance
distributed (Bool): Flag to enable distributed training
validate (Bool): Flag to enable validation during training.
dm (Class instance): Dataloader parameters class object.
sf_model (Model instance): Segformer parameters class object.
meta (Dict): Meta data like environment variables.
"""
logger = get_root_logger("INFO")
# prepare data loaders
dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
data_loaders = [
build_dataloader(
ds,
dm.samples_per_gpu,
dm.workers_per_gpu,
dm.num_gpus,
dist=True,
seed=dm.seed,
drop_last=True) for ds in dataset
]
# put model on gpus
if distributed:
find_unused_parameters = sf_model.find_unused_parameters
# Sets the `find_unused_parameters` parameter in
# torch.nn.parallel.DistributedDataParallel
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False,
find_unused_parameters=find_unused_parameters)
else:
model = MMDataParallel(
model.cuda(0), device_ids=[0])
# build runner
cfg_optimizer = sf_model.sf_optim_cfg
lr_config = sf_model.lr_config
param_wise = cfg_optimizer["paramwise_cfg"]
cfg_optimizer["paramwise_cfg"] = dict(custom_keys=param_wise)
optimizer = build_optimizer(model, cfg_optimizer)
tao_runner = {'type': 'TAOIterBasedRunner', 'max_iters': sf_model.max_iters, "work_dir": result_dir}
warnings.warn(
'config is now expected to have a `runner` section, '
'please set `runner` in your config.', UserWarning)
runner = build_runner(
tao_runner,
default_args=dict(
model=model,
batch_processor=None,
optimizer=optimizer,
work_dir=result_dir,
logger=logger,
meta=meta))
log_config = dict(interval=dm.log_interval, hooks=[dict(type='TaoTextLoggerHook', by_epoch=False)])
checkpoint_config = dict(by_epoch=False, interval=sf_model.checkpoint_interval)
# register hooks
checkpoint_config["meta"] = dict(CLASSES=dm.CLASSES, PALETTE=dm.PALETTE)
runner.register_training_hooks(lr_config=lr_config, optimizer_config={},
checkpoint_config=checkpoint_config, log_config=log_config,
momentum_config=None)
# an ugly walkaround to make the .log and .log.json filenames the same
runner.timestamp = timestamp
# # register eval hooks
if validate:
val_dataset = build_dataset(dm.val_data, dm.default_args)
val_dataloader = build_dataloader(
val_dataset,
samples_per_gpu=1,
workers_per_gpu=1,
dist=distributed,
shuffle=False)
eval_cfg = dict(interval=sf_model.validation_interval, metric='mIoU')
eval_cfg['by_epoch'] = False
eval_hook = DistEvalHook if distributed else EvalHook
runner.register_hook(eval_hook(val_dataloader, **eval_cfg), priority='LOW')
resume_from = sf_model.resume_ckpt
load_from = None
workflow = [('train', 1)]
if resume_from:
runner.resume(resume_from)
elif load_from:
runner.load_checkpoint(load_from)
runner.run(data_loaders, workflow)
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/trainer/trainer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Original source taken from https://github.com/open-mmlab/mmskeleton
# Copyright 2019 OpenMMLAB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metrics Module."""
import mmcv
import numpy as np
def intersect_and_union(pred_label,
label,
num_classes,
ignore_index,
label_map=dict(),
reduce_zero_label=False):
"""Calculate intersection and Union.
Args:
pred_label (ndarray): Prediction segmentation map.
label (ndarray): Ground truth segmentation map.
num_classes (int): Number of categories.
ignore_index (int): Index that will be ignored in evaluation.
label_map (dict): Mapping old labels to new labels. The parameter will
work only when label is str. Default: dict().
reduce_zero_label (bool): Wether ignore zero label. The parameter will
work only when label is str. Default: False.
Returns:
ndarray: The intersection of prediction and ground truth histogram
on all classes.
ndarray: The union of prediction and ground truth histogram on all classes.
ndarray: The prediction histogram on all classes.
ndarray: The ground truth histogram on all classes.
"""
if isinstance(pred_label, str):
pred_label = np.load(pred_label)
if isinstance(label, str):
label = mmcv.imread(label, flag='unchanged', backend='pillow')
# modify if custom classes
if label_map is not None:
for old_id, new_id in label_map.items():
label[label == old_id] = new_id
if reduce_zero_label:
# avoid using underflow conversion
label[label == 0] = 255
label = label - 1
label[label == 254] = 255
mask = (label != ignore_index)
pred_label = pred_label[mask]
label = label[mask]
intersect = pred_label[pred_label == label]
area_intersect, _ = np.histogram(
intersect, bins=np.arange(num_classes + 1))
area_pred_label, _ = np.histogram(
pred_label, bins=np.arange(num_classes + 1))
area_label, _ = np.histogram(label, bins=np.arange(num_classes + 1))
area_union = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def total_intersect_and_union(results,
gt_seg_maps,
num_classes,
ignore_index,
label_map=dict(),
reduce_zero_label=False):
"""Calculate Total Intersection and Union.
Args:
results (list[ndarray]): List of prediction segmentation maps.
gt_seg_maps (list[ndarray]): list of ground truth segmentation maps.
num_classes (int): Number of categories.
ignore_index (int): Index that will be ignored in evaluation.
label_map (dict): Mapping old labels to new labels. Default: dict().
reduce_zero_label (bool): Wether ignore zero label. Default: False.
Returns:
ndarray: The intersection of prediction and ground truth histogram
on all classes.
ndarray: The union of prediction and ground truth histogram on all classes.
ndarray: The prediction histogram on all classes.
ndarray: The ground truth histogram on all classes.
"""
num_imgs = len(results)
assert len(gt_seg_maps) == num_imgs
total_area_intersect = np.zeros((num_classes, ), dtype=np.float)
total_area_union = np.zeros((num_classes, ), dtype=np.float)
total_area_pred_label = np.zeros((num_classes, ), dtype=np.float)
total_area_label = np.zeros((num_classes, ), dtype=np.float)
for i in range(num_imgs):
area_intersect, area_union, area_pred_label, area_label = \
intersect_and_union(results[i], gt_seg_maps[i], num_classes,
ignore_index, label_map, reduce_zero_label)
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, \
total_area_pred_label, total_area_label
def mean_iou(results,
gt_seg_maps,
num_classes,
ignore_index,
nan_to_num=None,
label_map=dict(),
reduce_zero_label=False):
"""Calculate Mean Intersection and Union (mIoU)
Args:
results (list[ndarray]): List of prediction segmentation maps.
gt_seg_maps (list[ndarray]): list of ground truth segmentation maps.
num_classes (int): Number of categories.
ignore_index (int): Index that will be ignored in evaluation.
nan_to_num (int, optional): If specified, NaN values will be replaced
by the numbers defined by the user. Default: None.
label_map (dict): Mapping old labels to new labels. Default: dict().
reduce_zero_label (bool): Wether ignore zero label. Default: False.
Returns:
float: Overall accuracy on all images.
ndarray: Per category accuracy, shape (num_classes, ).
ndarray: Per category IoU, shape (num_classes, ).
"""
all_acc, acc, iou = eval_metrics(
results=results,
gt_seg_maps=gt_seg_maps,
num_classes=num_classes,
ignore_index=ignore_index,
metrics=['mIoU'],
nan_to_num=nan_to_num,
label_map=label_map,
reduce_zero_label=reduce_zero_label)
return all_acc, acc, iou
def mean_dice(results,
gt_seg_maps,
num_classes,
ignore_index,
nan_to_num=None,
label_map=dict(),
reduce_zero_label=False):
"""Calculate Mean Dice (mDice)
Args:
results (list[ndarray]): List of prediction segmentation maps.
gt_seg_maps (list[ndarray]): list of ground truth segmentation maps.
num_classes (int): Number of categories.
ignore_index (int): Index that will be ignored in evaluation.
nan_to_num (int, optional): If specified, NaN values will be replaced
by the numbers defined by the user. Default: None.
label_map (dict): Mapping old labels to new labels. Default: dict().
reduce_zero_label (bool): Wether ignore zero label. Default: False.
Returns:
float: Overall accuracy on all images.
ndarray: Per category accuracy, shape (num_classes, ).
ndarray: Per category dice, shape (num_classes, ).
"""
all_acc, acc, dice = eval_metrics(
results=results,
gt_seg_maps=gt_seg_maps,
num_classes=num_classes,
ignore_index=ignore_index,
metrics=['mDice'],
nan_to_num=nan_to_num,
label_map=label_map,
reduce_zero_label=reduce_zero_label)
return all_acc, acc, dice
def eval_metrics(results,
gt_seg_maps,
num_classes,
ignore_index,
metrics=['mIoU'],
nan_to_num=None,
label_map=dict(),
reduce_zero_label=False):
"""Calculate evaluation metrics
Args:
results (list[ndarray]): List of prediction segmentation maps.
gt_seg_maps (list[ndarray]): list of ground truth segmentation maps.
num_classes (int): Number of categories.
ignore_index (int): Index that will be ignored in evaluation.
metrics (list[str] | str): Metrics to be evaluated, 'mIoU' and 'mDice'.
nan_to_num (int, optional): If specified, NaN values will be replaced
by the numbers defined by the user. Default: None.
label_map (dict): Mapping old labels to new labels. Default: dict().
reduce_zero_label (bool): Wether ignore zero label. Default: False.
Returns:
float: Overall accuracy on all images.
ndarray: Per category accuracy, shape (num_classes, ).
ndarray: Per category evalution metrics, shape (num_classes, ).
"""
if isinstance(metrics, str):
metrics = [metrics]
allowed_metrics = ['mIoU', 'mDice']
if not set(metrics).issubset(set(allowed_metrics)):
raise KeyError('metrics {} is not supported'.format(metrics))
total_area_intersect, total_area_union, total_area_pred_label, \
total_area_label = total_intersect_and_union(results, gt_seg_maps,
num_classes, ignore_index,
label_map,
reduce_zero_label)
all_acc = total_area_intersect.sum() / total_area_label.sum()
acc = total_area_intersect / total_area_label
ret_metrics = [all_acc, acc]
for metric in metrics:
if metric == 'mIoU':
iou = total_area_intersect / total_area_union
ret_metrics.append(iou)
elif metric == 'mDice':
dice = 2 * total_area_intersect / (
total_area_pred_label + total_area_label)
ret_metrics.append(dice)
if nan_to_num is not None:
ret_metrics = [
np.nan_to_num(metric, nan=nan_to_num) for metric in ret_metrics
]
return ret_metrics
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/dataloader/metrics.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Original source taken from https://github.com/open-mmlab/mmsegmentation
# Copyright 2019 OpenMMLAB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Base Dataset Module."""
import os
import os.path as osp
from functools import reduce
import mmcv
from mmcv.utils import print_log
import numpy as np
from terminaltables import AsciiTable
from torch.utils.data import Dataset
from metrics import eval_metrics
from data_utils import get_root_logger
from data_utils import Compose
from builder import DATASETS
@DATASETS.register_module()
class BaseDataset(Dataset):
"""Custom dataset for semantic segmentation. An example of file structure
is as followed.
.. code-block:: none
├── data
│ ├── my_dataset
│ │ ├── img_dir
│ │ │ ├── train
│ │ │ │ ├── xxx{img_suffix}
│ │ │ │ ├── yyy{img_suffix}
│ │ │ │ ├── zzz{img_suffix}
│ │ │ ├── val
│ │ ├── ann_dir
│ │ │ ├── train
│ │ │ │ ├── xxx{seg_map_suffix}
│ │ │ │ ├── yyy{seg_map_suffix}
│ │ │ │ ├── zzz{seg_map_suffix}
│ │ │ ├── val
The img/gt_semantic_seg pair of CustomDataset should be of the same
except suffix. A valid img/gt_semantic_seg filename pair should be like
``xxx{img_suffix}`` and ``xxx{seg_map_suffix}`` (extension is also included
in the suffix). If split is given, then ``xxx`` is specified in txt file.
Otherwise, all files in ``img_dir/``and ``ann_dir`` will be loaded.
Please refer to ``docs/tutorials/new_dataset.md`` for more details.
Args:
pipeline (list[dict]): Processing pipeline
img_dir (str): Path to image directory
img_suffix (str): Suffix of images. Default: '.jpg'
ann_dir (str, optional): Path to annotation directory. Default: None
seg_map_suffix (str): Suffix of segmentation maps. Default: '.png'
split (str, optional): Split txt file. If split is specified, only
file with suffix in the splits will be loaded. Otherwise, all
images in img_dir/ann_dir will be loaded. Default: None
data_root (str, optional): Data root for img_dir/ann_dir. Default:
None.
test_mode (bool): If test_mode=True, gt wouldn't be loaded.
ignore_index (int): The label index to be ignored. Default: 255
reduce_zero_label (bool): Whether to mark label zero as ignored.
Default: False
classes (str | Sequence[str], optional): Specify classes to load.
If is None, ``cls.CLASSES`` will be used. Default: None.
palette (Sequence[Sequence[int]]] | np.ndarray | None):
The palette of segmentation map. If None is given, and
self.PALETTE is None, random palette will be generated.
Default: None
"""
CLASSES = None
PALETTE = None
def __init__(self,
pipeline,
img_dir,
img_suffix='.jpg',
ann_dir=None,
seg_map_suffix='.png',
split=None,
data_root=None,
test_mode=False,
ignore_index=255,
reduce_zero_label=False,
classes=None,
palette=None):
"""Init Module."""
self.pipeline = Compose(pipeline)
self.img_dir = img_dir
self.img_suffix = img_suffix
self.ann_dir = ann_dir
self.seg_map_suffix = seg_map_suffix
self.split = split
self.data_root = data_root
self.test_mode = test_mode
self.ignore_index = ignore_index
self.reduce_zero_label = reduce_zero_label
self.label_map = None
self.CLASSES, self.PALETTE = self.get_classes_and_palette(
classes, palette)
# join paths if data_root is specified
if self.data_root is not None:
if not osp.isabs(self.img_dir):
self.img_dir = osp.join(self.data_root, self.img_dir)
if not (self.ann_dir is None or osp.isabs(self.ann_dir)):
self.ann_dir = osp.join(self.data_root, self.ann_dir)
if not (self.split is None or osp.isabs(self.split)):
self.split = osp.join(self.data_root, self.split)
# load annotations
self.img_infos = self.load_annotations(self.img_dir, self.img_suffix,
self.ann_dir,
self.seg_map_suffix, self.split)
def __len__(self):
"""Total number of samples of data."""
return len(self.img_infos)
def load_annotations(self, img_dir, img_suffix, ann_dir, seg_map_suffix,
split):
"""Load annotation from directory.
Args:
img_dir (str): Path to image directory
img_suffix (str): Suffix of images.
ann_dir (str|None): Path to annotation directory.
seg_map_suffix (str|None): Suffix of segmentation maps.
split (str|None): Split txt file. If split is specified, only file
with suffix in the splits will be loaded. Otherwise, all images
in img_dir/ann_dir will be loaded. Default: None
Returns:
list[dict]: All image info of dataset.
"""
img_infos = []
if split is not None:
with open(split) as f:
for line in f:
img_name = line.strip()
img_info = dict(filename=img_name + img_suffix)
if ann_dir is not None:
seg_map = img_name + seg_map_suffix
img_info['ann'] = dict(seg_map=seg_map)
img_infos.append(img_info)
else:
for img in mmcv.scandir(img_dir, img_suffix, recursive=True):
img_info = dict(filename=img)
if ann_dir is not None:
seg_map = img.replace(img_suffix, seg_map_suffix)
img_info['ann'] = dict(seg_map=seg_map)
img_infos.append(img_info)
print_log(f'Loaded {len(img_infos)} images', logger=get_root_logger())
return img_infos
def get_ann_info(self, idx):
"""Get annotation by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
return self.img_infos[idx]['ann']
def pre_pipeline(self, results):
"""Prepare results dict for pipeline."""
results['seg_fields'] = []
results['img_prefix'] = self.img_dir
results['seg_prefix'] = self.ann_dir
if self.custom_classes:
results['label_map'] = self.label_map
def __getitem__(self, idx):
"""Get training/test data after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Training/test data (with annotation if `test_mode` is set
False).
"""
if self.test_mode:
return self.prepare_test_img(idx)
return self.prepare_train_img(idx)
def prepare_train_img(self, idx):
"""Get training data and annotations after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Training data and annotation after pipeline with new keys
introduced by pipeline.
"""
img_info = self.img_infos[idx]
ann_info = self.get_ann_info(idx)
results = dict(img_info=img_info, ann_info=ann_info)
self.pre_pipeline(results)
return self.pipeline(results)
def prepare_test_img(self, idx):
"""Get testing data after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Testing data after pipeline with new keys intorduced by
piepline.
"""
img_info = self.img_infos[idx]
results = dict(img_info=img_info)
self.pre_pipeline(results)
return self.pipeline(results)
def format_results(self, results, **kwargs):
"""Place holder to format result to dataset specific output."""
pass
def get_gt_seg_maps(self, efficient_test=False):
"""Get ground truth segmentation maps for evaluation."""
gt_seg_maps = []
for img_info in self.img_infos:
print(img_info['ann']['seg_map'])
seg_map = osp.join(self.ann_dir, img_info['ann']['seg_map'])
if efficient_test:
gt_seg_map = seg_map
else:
gt_seg_map = mmcv.imread(
seg_map, flag='unchanged', backend='pillow')
gt_seg_maps.append(gt_seg_map)
return gt_seg_maps
def get_classes_and_palette(self, classes=None, palette=None):
"""Get class names of current dataset.
Args:
classes (Sequence[str] | str | None): If classes is None, use
default CLASSES defined by builtin dataset. If classes is a
string, take it as a file name. The file contains the name of
classes where each line contains one class name. If classes is
a tuple or list, override the CLASSES defined by the dataset.
palette (Sequence[Sequence[int]]] | np.ndarray | None):
The palette of segmentation map. If None is given, random
palette will be generated. Default: None
"""
if classes is None:
self.custom_classes = False
return self.CLASSES, self.PALETTE
self.custom_classes = True
if isinstance(classes, str):
# take it as a file path
class_names = mmcv.list_from_file(classes)
elif isinstance(classes, (tuple, list)):
class_names = classes
else:
raise ValueError(f'Unsupported type {type(classes)} of classes.')
if self.CLASSES:
if not set(classes).issubset(self.CLASSES):
raise ValueError('classes is not a subset of CLASSES.')
# dictionary, its keys are the old label ids and its values
# are the new label ids.
# used for changing pixel labels in load_annotations.
self.label_map = {}
for i, c in enumerate(self.CLASSES):
if c not in class_names:
self.label_map[i] = -1
else:
self.label_map[i] = classes.index(c)
palette = self.get_palette_for_custom_classes(class_names, palette)
return class_names, palette
def get_palette_for_custom_classes(self, class_names, palette=None):
""" Get Palette for Custom Classes. """
if self.label_map is not None:
# return subset of palette
palette = []
for old_id, new_id in sorted(
self.label_map.items(), key=lambda x: x[1]):
if new_id != -1:
palette.append(self.PALETTE[old_id])
palette = type(self.PALETTE)(palette)
elif palette is None:
if self.PALETTE is None:
palette = np.random.randint(0, 255, size=(len(class_names), 3))
else:
palette = self.PALETTE
return palette
def evaluate(self,
results,
metric='mIoU',
logger=None,
efficient_test=False,
**kwargs):
"""Evaluate the dataset.
Args:
results (list): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. 'mIoU' and
'mDice' are supported.
logger (logging.Logger | None | str): Logger used for printing
related information during evaluation. Default: None.
Returns:
dict[str, float]: Default metrics.
"""
if isinstance(metric, str):
metric = [metric]
allowed_metrics = ['mIoU', 'mDice']
if not set(metric).issubset(set(allowed_metrics)):
raise KeyError('metric {} is not supported'.format(metric))
eval_results = {}
gt_seg_maps = self.get_gt_seg_maps(efficient_test)
if self.CLASSES is None:
num_classes = len(
reduce(np.union1d, [np.unique(_) for _ in gt_seg_maps]))
else:
num_classes = len(self.CLASSES)
ret_metrics = eval_metrics(
results,
gt_seg_maps,
num_classes,
self.ignore_index,
metric,
label_map=self.label_map,
reduce_zero_label=self.reduce_zero_label)
class_table_data = [['Class'] + [m[1:] for m in metric] + ['Acc']]
if self.CLASSES is None:
class_names = tuple(range(num_classes))
else:
class_names = self.CLASSES
ret_metrics_round = [
np.round(ret_metric * 100, 2) for ret_metric in ret_metrics
]
for i in range(num_classes):
class_table_data.append([class_names[i]] +
[m[i] for m in ret_metrics_round[2:]] +
[ret_metrics_round[1][i]])
summary_table_data = [['Scope'] +
['m' + head
for head in class_table_data[0][1:]] + ['aAcc']]
ret_metrics_mean = [
np.round(np.nanmean(ret_metric) * 100, 2)
for ret_metric in ret_metrics
]
summary_table_data.append(['global'] + ret_metrics_mean[2:] +
[ret_metrics_mean[1]] +
[ret_metrics_mean[0]])
print_log('per class results:', logger)
table = AsciiTable(class_table_data)
print_log('\n' + table.table, logger=logger)
print_log('Summary:', logger)
table = AsciiTable(summary_table_data)
print_log('\n' + table.table, logger=logger)
for i in range(1, len(summary_table_data[0])):
eval_results[summary_table_data[0]
[i]] = summary_table_data[1][i] / 100.0
if mmcv.is_list_of(results, str):
for file_name in results:
os.remove(file_name)
return eval_results
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/dataloader/base_dataset.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Original source taken from https://github.com/open-mmlab/mmsegmentation
# Copyright 2019 OpenMMLAB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PNG mask dataset."""
import os
from nvidia_tao_pytorch.cv.segformer.dataloader.data_utils import TargetClass, get_train_class_mapping
from omegaconf import OmegaConf
from omegaconf.listconfig import ListConfig
import numpy as np
class SFDataModule(object):
""" DataModule for Segformer."""
def __init__(self, dataset_config, num_gpus, seed, logger, phase="train", input_height=512, input_width=512):
""" DataModule Initialization
Args:
dataset_config (OmegaConfig): dataset configuration
num_gpus (int): num of GPUs
seed (int): random init seed
logger (class): logger object
phase (str): phase of task
input_height (int): Input height of the model
input_width (int): Input width of the model
"""
self.phase = phase
self.input_height = input_height
self.input_width = input_width
self.dataset_config = dataset_config
self.samples_per_gpu = self.dataset_config["batch_size"]
self.dataloader = self.dataset_config["dataloader"]
self.shuffle = self.dataset_config["shuffle"]
self.num_gpus = num_gpus
self.seed = seed
self.input_type = self.dataset_config["input_type"]
# Paths to the dataset
self.data_root = self.dataset_config["data_root"]
if self.phase == "train":
self.repeat_times = self.dataset_config["repeat_data_times"]
self.train_img_dirs = self.dataset_config["train_dataset"]["img_dir"]
self.train_ann_dirs = self.dataset_config["train_dataset"]["ann_dir"]
self.val_img_dir = self.dataset_config["val_dataset"]["img_dir"]
self.val_ann_dir = self.dataset_config["val_dataset"]["ann_dir"]
assert (type(self.train_img_dirs) == ListConfig), "Image Directories should be list of directories."
assert (type(self.train_ann_dirs) == ListConfig), "Image annotation directories should be a list."
assert (type(self.val_img_dir) == str), "Currently Segformer supports only 1 validation directory."
assert (type(self.val_ann_dir) == str), "Currently Segformer supports only 1 validation directory."
# Setting up pipeline
self.train_pipeline = self.dataset_config["train_dataset"]["pipeline"]
self.train_pipeline["CollectKeys"] = ["img", "gt_semantic_seg"]
self.val_pipeline = self.dataset_config["val_dataset"]["pipeline"]
self.train_pipeline["img_norm_cfg"] = self.dataset_config["img_norm_cfg"]
self.val_pipeline["img_norm_cfg"] = self.dataset_config["img_norm_cfg"]
if self.dataset_config["seg_map_suffix"] and self.dataset_config["img_suffix"]:
self.img_suffix = self.dataset_config["img_suffix"]
# This allows provide suffixes that are not .png. For e.g., cityscapes
self.seg_map_suffix = self.dataset_config["seg_map_suffix"]
else:
self.img_suffix, self.seg_map_suffix = self.get_extensions(self.train_img_dirs[0], self.train_ann_dirs[0])
else:
# Eval / Inference
self.test_img_dir = self.dataset_config["test_dataset"]["img_dir"]
assert (type(self.test_img_dir) == str), "Currently Segformer supports only 1 test directory."
# It is not mandatory to provide the mask path for inference
if self.phase == "eval":
try:
self.test_ann_dir = self.dataset_config["test_dataset"]["ann_dir"]
except Exception as e:
raise ValueError("Test Annotation dir should be provided for evaluation {}".format(e))
else:
self.test_ann_dir = self.test_img_dir
self.test_pipeline = self.dataset_config["test_dataset"]["pipeline"]
self.test_pipeline["img_norm_cfg"] = self.dataset_config["img_norm_cfg"]
self.img_suffix, self.seg_map_suffix = self.get_extensions(self.test_img_dir, self.test_ann_dir)
self.train_dataset = None
self.val_dataset = None
self.workers_per_gpu = self.dataset_config["workers_per_gpu"]
self.num_workers = None
self.sampler_train = None
self.sampler_test = None
self.sampler_val = None
self.batch_size = self.num_gpus * self.samples_per_gpu
self.num_workers = self.num_gpus * self.workers_per_gpu
self.logger = logger
self.target_classes = self.build_target_class_list()
self.train_data = None
self.val_data = None
self.test_data = None
self.log_interval = 50
# This needs to be implemented
self.PALETTE, self.CLASSES, self.label_map, self.id_color_map = self.build_palette()
self.default_args = {"classes": tuple(self.CLASSES), "palette": self.PALETTE, "label_map": self.label_map,
"img_suffix": self.img_suffix, "seg_map_suffix": self.seg_map_suffix, "id_color_map": self.id_color_map,
"input_type": self.input_type, "logger": self.logger}
self.target_classes_train_mapping = get_train_class_mapping(self.target_classes)
self.num_classes = self.get_num_unique_train_ids()
def setup(self):
""" Function to initlaize the samplers and datasets. """
if self.phase == "train":
train_dic = {}
train_dic["type"] = "SFDataset"
train_dic["data_root"] = self.data_root
train_dic["img_dir"] = OmegaConf.to_container(self.train_img_dirs)
train_dic["ann_dir"] = OmegaConf.to_container(self.train_ann_dirs)
updated_train_pipeline = self.build_train_pipeline(self.train_pipeline)
train_dic["pipeline"] = updated_train_pipeline
train_data = {}
train_data["type"] = "RepeatDataset"
train_data["times"] = self.repeat_times
train_data["dataset"] = train_dic
train_data["img_suffix"] = self.img_suffix
train_data["seg_map_suffix"] = self.seg_map_suffix
# Val Dictionary
val_dic = {}
val_dic["type"] = "SFDataset"
val_dic["data_root"] = self.data_root
val_dic["img_dir"] = self.val_img_dir
val_dic["ann_dir"] = self.val_ann_dir
updated_val_pipeline = self.build_test_pipeline(self.val_pipeline)
val_dic["pipeline"] = updated_val_pipeline
val_data = val_dic
self.train_data = train_data
self.val_data = val_data
else:
# Test Dictionary
test_dic = {}
test_dic["type"] = "SFDataset"
test_dic["data_root"] = self.data_root
test_dic["img_dir"] = self.test_img_dir
test_dic["ann_dir"] = self.test_ann_dir
updated_test_pipeline = self.build_test_pipeline(self.test_pipeline)
test_dic["pipeline"] = updated_test_pipeline
test_data = test_dic
self.test_data = test_data
def get_extensions(self, img_dir, ann_dir):
""" Function to automatically get the image and mask extensions. """
img_suffix = os.listdir(img_dir)[0].split(".")[-1]
seg_map_suffix = os.listdir(ann_dir)[0].split(".")[-1]
return img_suffix, seg_map_suffix
def build_target_class_list(self):
"""Build a list of TargetClasses based on proto."""
target_classes = []
orig_class_label_id_map = {}
color_mapping = {}
for target_class in self.dataset_config.palette:
orig_class_label_id_map[target_class.seg_class] = target_class.label_id
color_mapping[target_class.seg_class] = target_class.rgb
class_label_id_calibrated_map = orig_class_label_id_map.copy()
for target_class in self.dataset_config.palette:
label_name = target_class.seg_class
train_name = target_class.mapping_class
class_label_id_calibrated_map[label_name] = orig_class_label_id_map[train_name]
train_ids = sorted(list(set(class_label_id_calibrated_map.values())))
train_id_calibrated_map = {}
for idx, tr_id in enumerate(train_ids):
train_id_calibrated_map[tr_id] = idx
class_train_id_calibrated_map = {}
for label_name, train_id in class_label_id_calibrated_map.items():
class_train_id_calibrated_map[label_name] = train_id_calibrated_map[train_id]
for target_class in self.dataset_config.palette:
target_classes.append(
TargetClass(target_class.seg_class, label_id=target_class.label_id,
train_id=class_train_id_calibrated_map[target_class.seg_class],
color=color_mapping[target_class.mapping_class],
train_name=target_class.mapping_class
))
for target_class in target_classes:
self.logger.info("Label Id {}: Train Id {}".format(target_class.label_id, target_class.train_id))
return target_classes
def build_palette(self):
"""Build palette, classes and label_map."""
label_map = {}
classes_color = {}
id_color_map = {}
classes = []
palette = []
for target_class in self.target_classes:
label_map[target_class.label_id] = target_class.train_id
if target_class.train_name not in classes_color.keys():
classes_color[target_class.train_id] = (target_class.train_name, target_class.color)
id_color_map[target_class.train_id] = target_class.color
keylist = list(classes_color.keys())
keylist.sort()
for train_id in keylist:
classes.append(classes_color[train_id][0])
palette.append(classes_color[train_id][1])
return palette, classes, label_map, id_color_map
def get_num_unique_train_ids(self):
"""Return the final number classes used for training.
Arguments:
target_classes: The target classes object that contain the train_id and
label_id.
Returns:
Number of classes to be segmented.
"""
train_ids = [target.train_id for target in self.target_classes]
train_ids = np.array(train_ids)
train_ids_unique = np.unique(train_ids)
return len(train_ids_unique)
def build_train_pipeline(self, train_pipeline):
""" Function to Build Train Pipeline.
Args:
train_pipeline (Dict): dictionary having the parameters for training augmentation
"""
augmentation_config = train_pipeline["augmentation_config"]
if not augmentation_config["resize"]["img_scale"]:
img_scale_min = min(self.input_height, self.input_width)
img_scale_max = 1024 if img_scale_min < 1024 else 2048
augmentation_config["resize"]["img_scale"] = [img_scale_min, img_scale_max]
updated_train_pipeline = [dict(type="LoadImageFromFile"),
dict(type="LoadAnnotations", input_type=self.input_type),
dict(type="Resize", img_scale=tuple(augmentation_config["resize"]["img_scale"]), ratio_range=tuple(augmentation_config["resize"]["ratio_range"])),
dict(type="RandomCrop", crop_size=tuple([self.input_height, self.input_width]), cat_max_ratio=augmentation_config["random_crop"]["cat_max_ratio"]),
dict(type="RandomFlip", prob=augmentation_config["random_flip"]["prob"]),
dict(type='PhotoMetricDistortion'),
dict(type="Normalize", mean=train_pipeline["img_norm_cfg"]["mean"],
std=train_pipeline["img_norm_cfg"]["std"],
to_rgb=train_pipeline["img_norm_cfg"]["to_rgb"]),
dict(type="Pad", size=(self.input_height, self.input_width), pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=train_pipeline["CollectKeys"]),
]
return updated_train_pipeline
def build_test_pipeline(self, test_pipeline):
""" Function to Build Test Pipeline.
Args:
test_pipeline (Dict): dictionary having the parameters for testing parameters
"""
augmentation_config = test_pipeline["augmentation_config"]
keep_ar = augmentation_config["resize"]["keep_ratio"]
if not test_pipeline["multi_scale"]:
test_pipeline["multi_scale"] = [self.input_height, 2048]
transforms = [
dict(type='Resize', keep_ratio=keep_ar),
dict(type='RandomFlip'),
dict(type="Normalize", mean=test_pipeline["img_norm_cfg"]["mean"],
std=test_pipeline["img_norm_cfg"]["std"], to_rgb=test_pipeline["img_norm_cfg"]["to_rgb"]),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
]
updated_test_pipeline = [dict(type="LoadImageFromFile"),
dict(type='MultiScaleFlipAug', img_scale=tuple(test_pipeline["multi_scale"]),
flip=False,
transforms=transforms)]
return updated_test_pipeline
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/dataloader/segformer_dm.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Original source taken from https://github.com/open-mmlab/mmsegmentation
# Copyright 2019 OpenMMLAB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom Dataset Module."""
import os
import os.path as osp
from functools import reduce
import mmcv
from mmcv.utils import print_log
import numpy as np
from terminaltables import AsciiTable
from torch.utils.data import Dataset
from nvidia_tao_pytorch.cv.segformer.core import eval_metrics
from nvidia_tao_pytorch.cv.segformer.utils import get_root_logger
from nvidia_tao_pytorch.cv.segformer.dataloader.builder import DATASETS
from nvidia_tao_pytorch.cv.segformer.dataloader.pipelines import Compose
@DATASETS.register_module()
class CustomDataset(Dataset):
"""Custom dataset for semantic segmentation. An example of file structure
is as followed.
.. code-block:: none
├── data
│ ├── my_dataset
│ │ ├── img_dir
│ │ │ ├── train
│ │ │ │ ├── xxx{img_suffix}
│ │ │ │ ├── yyy{img_suffix}
│ │ │ │ ├── zzz{img_suffix}
│ │ │ ├── val
│ │ ├── ann_dir
│ │ │ ├── train
│ │ │ │ ├── xxx{seg_map_suffix}
│ │ │ │ ├── yyy{seg_map_suffix}
│ │ │ │ ├── zzz{seg_map_suffix}
│ │ │ ├── val
The img/gt_semantic_seg pair of CustomDataset should be of the same
except suffix. A valid img/gt_semantic_seg filename pair should be like
``xxx{img_suffix}`` and ``xxx{seg_map_suffix}`` (extension is also included
in the suffix). If split is given, then ``xxx`` is specified in txt file.
Otherwise, all files in ``img_dir/``and ``ann_dir`` will be loaded.
Please refer to ``docs/tutorials/new_dataset.md`` for more details.
Args:
pipeline (list[dict]): Processing pipeline
img_dir (str): Path to image directory
img_suffix (str): Suffix of images. Default: '.jpg'
ann_dir (str, optional): Path to annotation directory. Default: None
seg_map_suffix (str): Suffix of segmentation maps. Default: '.png'
split (str, optional): Split txt file. If split is specified, only
file with suffix in the splits will be loaded. Otherwise, all
images in img_dir/ann_dir will be loaded. Default: None
data_root (str, optional): Data root for img_dir/ann_dir. Default:
None.
test_mode (bool): If test_mode=True, gt wouldn't be loaded.
ignore_index (int): The label index to be ignored. Default: 255
reduce_zero_label (bool): Whether to mark label zero as ignored.
Default: False
classes (str | Sequence[str], optional): Specify classes to load.
If is None, ``cls.CLASSES`` will be used. Default: None.
palette (Sequence[Sequence[int]]] | np.ndarray | None):
The palette of segmentation map. If None is given, and
self.PALETTE is None, random palette will be generated.
Default: None
"""
CLASSES = None
PALETTE = None
def __init__(self,
pipeline,
img_dir,
img_suffix='.png',
ann_dir=None,
seg_map_suffix='.png',
split=None,
data_root=None,
test_mode=False,
ignore_index=255,
reduce_zero_label=False,
classes=None,
palette=None,
label_map=None,
id_color_map=None,
input_type="rgb",
logger=None):
"""Initialize."""
self.pipeline = Compose(pipeline)
self.img_dir = img_dir
self.img_suffix = img_suffix
self.ann_dir = ann_dir
self.seg_map_suffix = seg_map_suffix
self.split = split
self.data_root = data_root
self.test_mode = test_mode
self.ignore_index = ignore_index
self.reduce_zero_label = reduce_zero_label
self.label_map = label_map
self.CLASSES, self.PALETTE = classes, palette
self.id_color_map = id_color_map
self.input_type = input_type
self.logger = logger
# join paths if data_root is specified
if self.data_root is not None:
if not osp.isabs(self.img_dir):
self.img_dir = osp.join(self.data_root, self.img_dir)
if not (self.ann_dir is None or osp.isabs(self.ann_dir)):
self.ann_dir = osp.join(self.data_root, self.ann_dir)
if not (self.split is None or osp.isabs(self.split)):
self.split = osp.join(self.data_root, self.split)
# load annotations
self.img_infos = self.load_annotations(self.img_dir, self.img_suffix,
self.ann_dir,
self.seg_map_suffix, self.split)
def __len__(self):
"""Total number of samples of data."""
return len(self.img_infos)
def load_annotations(self, img_dir, img_suffix, ann_dir, seg_map_suffix,
split):
"""Load annotation from directory.
Args:
img_dir (str): Path to image directory
img_suffix (str): Suffix of images.
ann_dir (str|None): Path to annotation directory.
seg_map_suffix (str|None): Suffix of segmentation maps.
split (str|None): Split txt file. If split is specified, only file
with suffix in the splits will be loaded. Otherwise, all images
in img_dir/ann_dir will be loaded. Default: None
Returns:
list[dict]: All image info of dataset.
"""
img_infos = []
if split is not None:
with open(split) as f:
for line in f:
img_name = line.strip()
img_info = dict(filename=img_name + img_suffix)
if ann_dir is not None:
seg_map = img_name + seg_map_suffix
img_info['ann'] = dict(seg_map=seg_map)
img_infos.append(img_info)
else:
for img in mmcv.scandir(img_dir, img_suffix, recursive=True):
img_info = dict(filename=img)
if ann_dir is not None:
seg_map = img.replace(img_suffix, seg_map_suffix)
img_info['ann'] = dict(seg_map=seg_map)
img_infos.append(img_info)
print_log(f'Loaded {len(img_infos)} images', logger=get_root_logger())
return img_infos
def get_ann_info(self, idx):
"""Get annotation by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
return self.img_infos[idx]['ann']
def pre_pipeline(self, results):
"""Prepare results dict for pipeline."""
results['seg_fields'] = []
results['img_prefix'] = self.img_dir
results['seg_prefix'] = self.ann_dir
results['label_map'] = self.label_map
def __getitem__(self, idx):
"""Get training/test data after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Training/test data (with annotation if `test_mode` is set
False).
"""
if self.test_mode:
return self.prepare_test_img(idx)
return self.prepare_train_img(idx)
def prepare_train_img(self, idx):
"""Get training data and annotations after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Training data and annotation after pipeline with new keys
introduced by pipeline.
"""
img_info = self.img_infos[idx]
ann_info = self.get_ann_info(idx)
results = dict(img_info=img_info, ann_info=ann_info)
self.pre_pipeline(results)
return self.pipeline(results)
def prepare_test_img(self, idx):
"""Get testing data after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Testing data after pipeline with new keys intorduced by
piepline.
"""
img_info = self.img_infos[idx]
results = dict(img_info=img_info)
self.pre_pipeline(results)
return self.pipeline(results)
def format_results(self, results, **kwargs):
"""Place holder to format result to dataset specific output."""
pass
def get_gt_seg_maps(self, efficient_test=False):
"""Get ground truth segmentation maps for evaluation."""
gt_seg_maps = []
for img_info in self.img_infos:
seg_map = osp.join(self.ann_dir, img_info['ann']['seg_map'])
if efficient_test:
gt_seg_map = seg_map
else:
gt_seg_map = mmcv.imread(
seg_map, flag='unchanged', backend='pillow')
if self.input_type == "grayscale":
gt_seg_map = gt_seg_map / 255
gt_seg_map = np.where(gt_seg_map > 0.5, 1, 0)
gt_seg_maps.append(gt_seg_map)
return gt_seg_maps
# We are not using this function
def get_classes_and_palette(self, classes=None, palette=None):
"""Get class names of current dataset.
Args:
classes (Sequence[str] | str | None): If classes is None, use
default CLASSES defined by builtin dataset. If classes is a
string, take it as a file name. The file contains the name of
classes where each line contains one class name. If classes is
a tuple or list, override the CLASSES defined by the dataset.
palette (Sequence[Sequence[int]]] | np.ndarray | None):
The palette of segmentation map. If None is given, random
palette will be generated. Default: None
"""
if classes is None:
self.custom_classes = False
return self.CLASSES, self.PALETTE
self.custom_classes = True
if isinstance(classes, str):
# take it as a file path
class_names = mmcv.list_from_file(classes)
elif isinstance(classes, (tuple, list)):
class_names = classes
else:
raise ValueError(f'Unsupported type {type(classes)} of classes.')
if self.CLASSES:
if not set(classes).issubset(self.CLASSES):
raise ValueError('classes is not a subset of CLASSES.')
# dictionary, its keys are the old label ids and its values
# are the new label ids.
# used for changing pixel labels in load_annotations.
self.label_map = {}
for i, c in enumerate(self.CLASSES):
if c not in class_names:
self.label_map[i] = -1
else:
self.label_map[i] = classes.index(c)
palette = self.get_palette_for_custom_classes(class_names, palette)
return class_names, palette
def get_palette_for_custom_classes(self, class_names, palette=None):
"""Function To Get Custom Classes."""
if self.label_map is not None:
# return subset of palette
palette = []
for old_id, new_id in sorted(
self.label_map.items(), key=lambda x: x[1]):
if new_id != -1:
palette.append(self.PALETTE[old_id])
palette = type(self.PALETTE)(palette)
elif palette is None:
if self.PALETTE is None:
palette = np.random.randint(0, 255, size=(len(class_names), 3))
else:
palette = self.PALETTE
return palette
def evaluate(self,
results,
metric='mIoU',
logger=None,
efficient_test=False,
**kwargs):
"""Evaluate the dataset.
Args:
results (list): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. 'mIoU' and
'mDice' are supported.
logger (logging.Logger | None | str): Logger used for printing
related information during evaluation. Default: None.
Returns:
dict[str, float]: Default metrics.
"""
if isinstance(metric, str):
metric = [metric]
allowed_metrics = ['mIoU', 'mDice']
if not set(metric).issubset(set(allowed_metrics)):
raise KeyError('metric {} is not supported'.format(metric))
eval_results = {}
gt_seg_maps = self.get_gt_seg_maps(efficient_test)
if self.CLASSES is None:
num_classes = len(
reduce(np.union1d, [np.unique(_) for _ in gt_seg_maps]))
else:
num_classes = len(self.CLASSES)
ret_metrics = eval_metrics(
results,
gt_seg_maps,
num_classes,
self.ignore_index,
metric,
label_map=self.label_map,
reduce_zero_label=self.reduce_zero_label)
class_table_data = [['Class'] + [m[1:] for m in metric] + ['Acc']]
if self.CLASSES is None:
class_names = tuple(range(num_classes))
else:
class_names = self.CLASSES
ret_metrics_round = [
np.round(ret_metric * 100, 2) for ret_metric in ret_metrics
]
for i in range(num_classes):
class_table_data.append([class_names[i]] +
[m[i] for m in ret_metrics_round[2:]] +
[ret_metrics_round[1][i]])
summary_table_data = [['Scope'] +
['m' + head
for head in class_table_data[0][1:]] + ['aAcc']]
ret_metrics_mean = [
np.round(np.nanmean(ret_metric) * 100, 2)
for ret_metric in ret_metrics
]
summary_table_data.append(['global'] + ret_metrics_mean[2:] +
[ret_metrics_mean[1]] +
[ret_metrics_mean[0]])
print_log('per class results:', logger)
table = AsciiTable(class_table_data)
print_log('\n' + table.table, logger=logger)
print_log('Summary:', logger)
table = AsciiTable(summary_table_data)
print_log('\n' + table.table, logger=logger)
for i in range(1, len(summary_table_data[0])):
eval_results[summary_table_data[0]
[i]] = summary_table_data[1][i] / 100.0
if mmcv.is_list_of(results, str):
for file_name in results:
os.remove(file_name)
return eval_results
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/dataloader/custom_dataset.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Segformer dataloader module."""
from .segformer_dataset import SFDataset
from .custom_dataset import CustomDataset
from .data_utils import ConcatDataset, RepeatDataset
__all__ = ["SFDataset", "CustomDataset", "ConcatDataset", "RepeatDataset"]
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/dataloader/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Original source taken from https://github.com/open-mmlab/mmskeleton
# Copyright 2019 OpenMMLAB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Builder Module."""
import copy
import platform
import random
import numpy as np
from mmcv.utils import Registry, build_from_cfg
if platform.system() != 'Windows':
# https://github.com/pytorch/pytorch/issues/973
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
hard_limit = rlimit[1]
soft_limit = min(4096, hard_limit)
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
DATASETS = Registry('dataset', scope='xxx')
PIPELINES = Registry('pipeline', scope='xxx')
def _concat_dataset(cfg, default_args=None):
"""Build :obj:`ConcatDataset by."""
from .dataset_wrappers import ConcatDataset
img_dir = cfg['img_dir']
ann_dir = cfg.get('ann_dir', None)
split = cfg.get('split', None)
num_img_dir = len(img_dir) if isinstance(img_dir, (list, tuple)) else 1
if ann_dir is not None:
num_ann_dir = len(ann_dir) if isinstance(ann_dir, (list, tuple)) else 1
else:
num_ann_dir = 0
if split is not None:
num_split = len(split) if isinstance(split, (list, tuple)) else 1
else:
num_split = 0
if num_img_dir > 1:
assert num_ann_dir in (num_img_dir, 0)
assert num_split in (num_img_dir, 0)
else:
assert num_split == num_ann_dir or num_ann_dir <= 1
num_dset = max(num_split, num_img_dir)
datasets = []
for i in range(num_dset):
data_cfg = copy.deepcopy(cfg)
if isinstance(img_dir, (list, tuple)):
data_cfg['img_dir'] = img_dir[i]
if isinstance(ann_dir, (list, tuple)):
data_cfg['ann_dir'] = ann_dir[i]
if isinstance(split, (list, tuple)):
data_cfg['split'] = split[i]
datasets.append(build_dataset(data_cfg, default_args))
return ConcatDataset(datasets)
def build_dataset(cfg, default_args=None):
"""Build datasets."""
from .dataset_wrappers import ConcatDataset, RepeatDataset
if isinstance(cfg, (list, tuple)):
dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
elif cfg['type'] == 'RepeatDataset':
dataset = RepeatDataset(
build_dataset(cfg['dataset'], default_args), cfg['times'])
elif isinstance(cfg.get('img_dir'), (list, tuple)) or isinstance(
cfg.get('split', None), (list, tuple)):
print("Concatenating datasets")
dataset = _concat_dataset(cfg, default_args)
else:
dataset = build_from_cfg(cfg, DATASETS, default_args)
return dataset
def worker_init_fn(worker_id, num_workers, rank, seed):
"""Worker init func for dataloader.
The seed of each worker equals to num_worker * rank + worker_id + user_seed
Args:
worker_id (int): Worker id.
num_workers (int): Number of workers.
rank (int): The rank of current process.
seed (int): The random seed to use.
"""
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed)
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/dataloader/builder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Original source taken from https://github.com/open-mmlab/mmskeleton
# Copyright 2019 OpenMMLAB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data Utils Module."""
from torch.utils.data.dataset import ConcatDataset as _ConcatDataset
import torch.distributed as dist
from torch.utils.data import DistributedSampler
from nvidia_tao_pytorch.cv.segformer.dataloader.builder import DATASETS
import copy
import random
from functools import partial
import numpy as np
from mmcv.parallel import collate
from mmcv.runner import get_dist_info
from mmcv.utils import build_from_cfg
from mmcv.utils.parrots_wrapper import DataLoader, PoolDataLoader
def _concat_dataset(cfg, default_args=None):
"""Build :obj:`ConcatDataset by."""
img_dir = cfg['img_dir']
ann_dir = cfg.get('ann_dir', None)
split = cfg.get('split', None)
num_img_dir = len(img_dir) if isinstance(img_dir, (list, tuple)) else 1
if ann_dir is not None:
num_ann_dir = len(ann_dir) if isinstance(ann_dir, (list, tuple)) else 1
else:
num_ann_dir = 0
if split is not None:
num_split = len(split) if isinstance(split, (list, tuple)) else 1
else:
num_split = 0
if num_img_dir > 1:
assert num_ann_dir in (num_img_dir, 0)
assert num_split in (num_img_dir, 0)
else:
assert num_split == num_ann_dir or num_ann_dir <= 1
num_dset = max(num_split, num_img_dir)
datasets = []
for i in range(num_dset):
data_cfg = copy.deepcopy(cfg)
if isinstance(img_dir, (list, tuple)):
data_cfg['img_dir'] = img_dir[i]
if isinstance(ann_dir, (list, tuple)):
data_cfg['ann_dir'] = ann_dir[i]
if isinstance(split, (list, tuple)):
data_cfg['split'] = split[i]
datasets.append(build_dataset(data_cfg, default_args))
return ConcatDataset(datasets)
class TargetClass(object):
"""Target class parameters."""
def __init__(self, name, label_id, train_id=None, color=None, train_name=None):
"""Constructor.
Args:
name (str): Name of the target class.
label_id (str):original label id of every pixel of the mask
train_id (str): The mapped train id of every pixel in the mask
Raises:
ValueError: On invalid input args.
"""
self.name = name
self.train_id = train_id
self.label_id = label_id
self.color = color
self.train_name = train_name
def get_train_class_mapping(target_classes):
"""Utility function that returns the mapping of the train id to orig class."""
train_id_name_mapping = {}
for target_class in target_classes:
if target_class.train_id not in train_id_name_mapping.keys():
train_id_name_mapping[target_class.train_id] = [target_class.name]
else:
train_id_name_mapping[target_class.train_id].append(target_class.name)
return train_id_name_mapping
@DATASETS.register_module()
class ConcatDataset(_ConcatDataset):
"""A wrapper of concatenated dataset.
Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but
concat the group flag for image aspect ratio.
Args:
datasets (list[:obj:`Dataset`]): A list of datasets.
"""
def __init__(self, datasets):
"""Init Module."""
super(ConcatDataset, self).__init__(datasets)
self.CLASSES = datasets[0].CLASSES
self.PALETTE = datasets[0].PALETTE
@DATASETS.register_module()
class RepeatDataset(object):
"""A wrapper of repeated dataset.
The length of repeated dataset will be `times` larger than the original
dataset. This is useful when the data loading time is long but the dataset
is small. Using RepeatDataset can reduce the data loading time between
epochs.
Args:
dataset (:obj:`Dataset`): The dataset to be repeated.
times (int): Repeat times.
"""
def __init__(self, dataset, times):
"""Init Module."""
self.dataset = dataset
self.times = times
self.CLASSES = dataset.CLASSES
self.PALETTE = dataset.PALETTE
self._ori_len = len(self.dataset)
def __getitem__(self, idx):
"""Get item from original dataset."""
return self.dataset[idx % self._ori_len]
def __len__(self):
"""The length is multiplied by ``times``"""
return self.times * self._ori_len
def is_dist_avail_and_initialized():
"""is dist initialized"""
is_dist = True
if not dist.is_available():
is_dist = False
else:
is_dist = dist.is_initialized() or False
return is_dist
def build_dataset(cfg, default_args=None):
"""Build datasets."""
if isinstance(cfg, (list, tuple)):
dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
elif cfg['type'] == 'RepeatDataset':
dataset = RepeatDataset(
build_dataset(cfg['dataset'], default_args), cfg['times'])
elif isinstance(cfg.get('img_dir'), (list, tuple)) or isinstance(
cfg.get('split', None), (list, tuple)):
print("Concatenating datasets")
dataset = _concat_dataset(cfg, default_args)
else:
dataset = build_from_cfg(cfg, DATASETS, default_args)
return dataset
def build_dataloader(dataset,
samples_per_gpu,
workers_per_gpu,
num_gpus=1,
dist=True,
shuffle=True,
seed=None,
drop_last=False,
pin_memory=True,
dataloader_type='PoolDataLoader',
**kwargs):
"""Build PyTorch DataLoader.
In distributed training, each GPU/process has a dataloader.
In non-distributed training, there is only one dataloader for all GPUs.
Args:
dataset (Dataset): A PyTorch dataset.
samples_per_gpu (int): Number of training samples on each GPU, i.e.,
batch size of each GPU.
workers_per_gpu (int): How many subprocesses to use for data loading
for each GPU.
num_gpus (int): Number of GPUs. Only used in non-distributed training.
dist (bool): Distributed training/test or not. Default: True.
shuffle (bool): Whether to shuffle the data at every epoch.
Default: True.
seed (int | None): Seed to be used. Default: None.
drop_last (bool): Whether to drop the last incomplete batch in epoch.
Default: False
pin_memory (bool): Whether to use pin_memory in DataLoader.
Default: True
dataloader_type (str): Type of dataloader. Default: 'PoolDataLoader'
kwargs: any keyword argument to be used to initialize DataLoader
Returns:
DataLoader: A PyTorch dataloader.
"""
rank, world_size = get_dist_info()
if dist:
sampler = DistributedSampler(
dataset, world_size, rank, shuffle=shuffle)
shuffle = False
batch_size = samples_per_gpu
num_workers = workers_per_gpu
else:
sampler = None
batch_size = num_gpus * samples_per_gpu
num_workers = num_gpus * workers_per_gpu
init_fn = partial(
worker_init_fn, num_workers=num_workers, rank=rank,
seed=seed) if seed is not None else None
assert dataloader_type in (
'DataLoader',
'PoolDataLoader'), f'unsupported dataloader {dataloader_type}'
if dataloader_type == 'PoolDataLoader':
dataloader = PoolDataLoader
elif dataloader_type == 'DataLoader':
dataloader = DataLoader
data_loader = dataloader(
dataset,
batch_size=batch_size,
sampler=sampler,
num_workers=num_workers,
collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),
pin_memory=pin_memory,
shuffle=shuffle,
worker_init_fn=init_fn,
drop_last=drop_last,
**kwargs)
return data_loader
def worker_init_fn(worker_id, num_workers, rank, seed):
"""Worker init func for dataloader.
The seed of each worker equals to num_worker * rank + worker_id + user_seed
Args:
worker_id (int): Worker id.
num_workers (int): Number of workers.
rank (int): The rank of current process.
seed (int): The random seed to use.
"""
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed)
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/dataloader/data_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Original source taken from https://github.com/open-mmlab/mmsegmentation
# Copyright 2019 OpenMMLAB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Segformer dataset module."""
import os
import os.path as osp
import tempfile
import mmcv
from mmcv.utils import print_log
import numpy as np
from PIL import Image
from nvidia_tao_pytorch.cv.segformer.dataloader.builder import DATASETS
from nvidia_tao_pytorch.cv.segformer.dataloader.custom_dataset import CustomDataset
from nvidia_tao_pytorch.cv.segformer.utils.common_utils import check_and_create
@DATASETS.register_module()
class SFDataset(CustomDataset):
"""SF dataset.
Custom SegFormer Dataset
"""
# The suffix to be used can be made configurable
def __init__(self, **kwargs):
"""Initialize."""
super(SFDataset, self).__init__(
**kwargs)
self.img_suffix = kwargs["img_suffix"]
self.seg_map_suffix = kwargs["seg_map_suffix"]
@staticmethod
def _convert_to_label_id(result):
"""Convert trainId to id for cityscapes."""
if isinstance(result, str):
result = np.load(result)
import cityscapesscripts.helpers.labels as CSLabels
result_copy = result.copy()
for trainId, label in CSLabels.trainId2label.items():
result_copy[result == trainId] = label.id
return result_copy
def results2img(self, results, imgfile_prefix, to_label_id):
"""Write the segmentation results to images.
Args:
results (list[list | tuple | ndarray]): Testing results of the
dataset.
imgfile_prefix (str): The filename prefix of the png files.
If the prefix is "somepath/xxx",
the png files will be named "somepath/xxx.png".
to_label_id (bool): whether convert output to label_id for
submission
Returns:
list[str: str]: result txt files which contains corresponding
semantic segmentation images.
"""
mmcv.mkdir_or_exist(imgfile_prefix)
result_files = []
prog_bar = mmcv.ProgressBar(len(self))
for idx in range(len(self)):
result = results[idx]
if isinstance(result, str):
result = np.load(result)
filename = os.path.join(self.img_dir, self.img_infos[idx]['filename'])
input_img = Image.open(filename).convert('RGB')
# For now, the visualized inference image will have the train id's
filename = self.img_infos[idx]['filename']
basename = osp.splitext(osp.basename(filename))[0]
vis_dir = osp.join(imgfile_prefix, "vis_tao")
mask_dir = osp.join(imgfile_prefix, "mask_tao")
check_and_create(vis_dir)
check_and_create(mask_dir)
png_filename = osp.join(mask_dir, f'{basename}.png')
overlay_fn = osp.join(vis_dir, f'{basename}.jpg')
output = Image.fromarray(result.astype(np.uint8)).convert('P')
output.save(png_filename)
output_palette = np.zeros((len(self.CLASSES), 3), dtype=np.uint8)
for id, color in self.id_color_map.items(): # noqa pylint: disable=W0622
output_palette[id] = color
output.putpalette(output_palette)
output = output.convert("RGB")
if self.input_type == "grayscale":
output = Image.fromarray(np.asarray(output).astype('uint8'))
else:
overlay_img = (np.asarray(input_img) / 2 + np.asarray(output) / 2).astype('uint8')
output = Image.fromarray(overlay_img)
output.save(overlay_fn)
result_files.append(png_filename)
prog_bar.update()
return result_files
def format_results(self, results, imgfile_prefix="/results", to_label_id=True):
"""Format the results into dir (standard format for Cityscapes
evaluation).
Args:
results (list): Testing results of the dataset.
imgfile_prefix (str | None): The prefix of images files. It
includes the file path and the prefix of filename, e.g.,
"a/b/prefix". If not specified, a temp file will be created.
Default: None.
to_label_id (bool): whether convert output to label_id for
submission. Default: False
Returns:
tuple: (result_files, tmp_dir), result_files is a list containing
the image paths, tmp_dir is the temporal directory created
for saving json/png files when img_prefix is not specified.
"""
assert isinstance(results, list), 'results must be a list'
assert len(results) == len(self), (
'The length of results is not equal to the dataset len: '
f'{len(results)} != {len(self)}')
if imgfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
imgfile_prefix = tmp_dir.name
else:
tmp_dir = None
result_files = self.results2img(results, imgfile_prefix, to_label_id)
return result_files, tmp_dir
def evaluate(self,
results,
metric='mIoU',
logger=None,
imgfile_prefix=None,
efficient_test=False):
"""Evaluation in Cityscapes/default protocol.
Args:
results (list): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated.
logger (logging.Logger | None | str): Logger used for printing
related information during evaluation. Default: None.
imgfile_prefix (str | None): The prefix of output image file,
for cityscapes evaluation only. It includes the file path and
the prefix of filename, e.g., "a/b/prefix".
If results are evaluated with cityscapes protocol, it would be
the prefix of output png files. The output files would be
png images under folder "a/b/prefix/xxx.png", where "xxx" is
the image name of cityscapes. If not specified, a temp file
will be created for evaluation.
Default: None.
Returns:
dict[str, float]: Cityscapes/default metrics.
"""
eval_results = dict()
metrics = metric.copy() if isinstance(metric, list) else [metric]
if 'cityscapes' in metrics:
eval_results.update(
self._evaluate_cityscapes(results, logger, imgfile_prefix))
metrics.remove('cityscapes')
if len(metrics) > 0:
eval_results.update(
super(SFDataset,
self).evaluate(results, metrics, logger, efficient_test))
return eval_results
def _evaluate_cityscapes(self, results, logger, imgfile_prefix):
"""Evaluation in Cityscapes protocol.
Args:
results (list): Testing results of the dataset.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
imgfile_prefix (str | None): The prefix of output image file
Returns:
dict[str: float]: Cityscapes evaluation results.
"""
try:
import cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling as CSEval # noqa
except ImportError:
raise ImportError('Please run "pip install cityscapesscripts" to '
'install cityscapesscripts first.')
msg = 'Evaluating in Cityscapes style'
if logger is None:
msg = '\n' + msg
print_log(msg, logger=logger)
_, tmp_dir = self.format_results(results, imgfile_prefix)
if tmp_dir is None:
result_dir = imgfile_prefix
else:
result_dir = tmp_dir.name
eval_results = dict()
print_log(f'Evaluating results under {result_dir} ...', logger=logger)
CSEval.args.evalInstLevelScore = True
CSEval.args.predictionPath = osp.abspath(result_dir)
CSEval.args.evalPixelAccuracy = True
CSEval.args.JSONOutput = False
seg_map_list = []
pred_list = []
# when evaluating with official cityscapesscripts,
# **_gtFine_labelIds.png is used
for seg_map in mmcv.scandir(
self.ann_dir, 'gtFine_labelIds.png', recursive=True):
seg_map_list.append(osp.join(self.ann_dir, seg_map))
pred_list.append(CSEval.getPrediction(CSEval.args, seg_map))
eval_results.update(
CSEval.evaluateImgLists(pred_list, seg_map_list, CSEval.args))
if tmp_dir is not None:
tmp_dir.cleanup()
return eval_results
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/dataloader/segformer_dataset.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Original source taken from https://github.com/open-mmlab/mmskeleton
# Copyright 2019 OpenMMLAB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: skip-file
# pylint: disable-all
"""Transforms Module."""
import mmcv
from mmcv.utils import deprecated_api_warning, is_tuple_of
from numpy import random
import numpy as np
from nvidia_tao_pytorch.cv.segformer.dataloader.builder import PIPELINES
@PIPELINES.register_module()
class AlignedResize(object):
"""Resize images & seg. Align
"""
def __init__(self,
img_scale=None,
multiscale_mode='range',
ratio_range=None,
keep_ratio=True,
size_divisor=32):
""" Init Module."""
if img_scale is None:
self.img_scale = None
else:
if isinstance(img_scale, list):
self.img_scale = img_scale
else:
self.img_scale = [img_scale]
assert mmcv.is_list_of(self.img_scale, tuple), "Image scale should be a tuple."
if ratio_range is not None:
# mode 1: given img_scale=None and a range of image ratio
# mode 2: given a scale and a range of image ratio
assert self.img_scale is None or len(self.img_scale) == 1, "length of img_scale should be 1."
else:
# mode 3 and 4: given multiple scales or a range of scales
assert multiscale_mode in ['value', 'range'], "Provided multiscale_mode: {} not in [value, range]".format(multiscale_mode)
self.multiscale_mode = multiscale_mode
self.ratio_range = ratio_range
self.keep_ratio = keep_ratio
self.size_divisor = size_divisor
@staticmethod
def random_select(img_scales):
"""Randomly select an img_scale from given candidates.
Args:
img_scales (list[tuple]): Images scales for selection.
Returns:
(tuple, int): Returns a tuple ``(img_scale, scale_dix)``,
where ``img_scale`` is the selected image scale and
``scale_idx`` is the selected index in the given candidates.
"""
assert mmcv.is_list_of(img_scales, tuple), "Image scales are not list or tuple."
scale_idx = np.random.randint(len(img_scales))
img_scale = img_scales[scale_idx]
return img_scale, scale_idx
@staticmethod
def random_sample(img_scales):
"""Randomly sample an img_scale when ``multiscale_mode=='range'``.
Args:
img_scales (list[tuple]): Images scale range for sampling.
There must be two tuples in img_scales, which specify the lower
and uper bound of image scales.
Returns:
(tuple, None): Returns a tuple ``(img_scale, None)``, where
``img_scale`` is sampled scale and None is just a placeholder
to be consistent with :func:`random_select`.
"""
assert mmcv.is_list_of(img_scales, tuple) and len(img_scales) == 2, "Length of Image scales is less than 2."
img_scale_long = [max(s) for s in img_scales]
img_scale_short = [min(s) for s in img_scales]
long_edge = np.random.randint(
min(img_scale_long),
max(img_scale_long) + 1)
short_edge = np.random.randint(
min(img_scale_short),
max(img_scale_short) + 1)
img_scale = (long_edge, short_edge)
return img_scale, None
@staticmethod
def random_sample_ratio(img_scale, ratio_range):
"""Randomly sample an img_scale when ``ratio_range`` is specified.
A ratio will be randomly sampled from the range specified by
``ratio_range``. Then it would be multiplied with ``img_scale`` to
generate sampled scale.
Args:
img_scale (tuple): Images scale base to multiply with ratio.
ratio_range (tuple[float]): The minimum and maximum ratio to scale
the ``img_scale``.
Returns:
(tuple, None): Returns a tuple ``(scale, None)``, where
``scale`` is sampled ratio multiplied with ``img_scale`` and
None is just a placeholder to be consistent with
:func:`random_select`.
"""
assert isinstance(img_scale, tuple) and len(img_scale) == 2, "img_scale is not tuple/ length of img_scale should be 2."
min_ratio, max_ratio = ratio_range
assert min_ratio <= max_ratio, "Max ratio for image scale should not be greater than Min ratio."
ratio = np.random.random_sample() * (max_ratio - min_ratio) + min_ratio
scale = int(img_scale[0] * ratio), int(img_scale[1] * ratio)
return scale, None
def _random_scale(self, results):
"""Randomly sample an img_scale according to ``ratio_range`` and
``multiscale_mode``.
If ``ratio_range`` is specified, a ratio will be sampled and be
multiplied with ``img_scale``.
If multiple scales are specified by ``img_scale``, a scale will be
sampled according to ``multiscale_mode``.
Otherwise, single scale will be used.
Args:
results (dict): Result dict from :obj:`dataset`.
Returns:
dict: Two new keys 'scale` and 'scale_idx` are added into
``results``, which would be used by subsequent pipelines.
"""
if self.ratio_range is not None:
if self.img_scale is None:
h, w = results['img'].shape[:2]
scale, scale_idx = self.random_sample_ratio((w, h),
self.ratio_range)
else:
scale, scale_idx = self.random_sample_ratio(
self.img_scale[0], self.ratio_range)
elif len(self.img_scale) == 1:
scale, scale_idx = self.img_scale[0], 0
elif self.multiscale_mode == 'range':
scale, scale_idx = self.random_sample(self.img_scale)
elif self.multiscale_mode == 'value':
scale, scale_idx = self.random_select(self.img_scale)
else:
raise NotImplementedError
results['scale'] = scale
results['scale_idx'] = scale_idx
def _align(self, img, size_divisor, interpolation=None):
"""Align."""
align_h = int(np.ceil(img.shape[0] / size_divisor)) * size_divisor
align_w = int(np.ceil(img.shape[1] / size_divisor)) * size_divisor
if interpolation == None: # noqa pylint: disable=C0121
img = mmcv.imresize(img, (align_w, align_h))
else:
img = mmcv.imresize(img, (align_w, align_h), interpolation=interpolation)
return img
def _resize_img(self, results):
"""Resize images with ``results['scale']``."""
if self.keep_ratio:
img, scale_factor = mmcv.imrescale(
results['img'], results['scale'], return_scale=True)
# align #
img = self._align(img, self.size_divisor)
# the w_scale and h_scale has minor difference
# a real fix should be done in the mmcv.imrescale in the future
new_h, new_w = img.shape[:2]
h, w = results['img'].shape[:2]
w_scale = new_w / w
h_scale = new_h / h
else:
img, w_scale, h_scale = mmcv.imresize(
results['img'], results['scale'], return_scale=True)
h, w = img.shape[:2]
assert int(np.ceil(h / self.size_divisor)) * self.size_divisor == h and \
int(np.ceil(w / self.size_divisor)) * self.size_divisor == w, "img size not align. h:{} w:{}".format(h, w)
scale_factor = np.array([w_scale, h_scale, w_scale, h_scale],
dtype=np.float32)
results['img'] = img
results['img_shape'] = img.shape
results['pad_shape'] = img.shape # in case that there is no padding
results['scale_factor'] = scale_factor
results['keep_ratio'] = self.keep_ratio
def _resize_seg(self, results):
"""Resize semantic segmentation map with ``results['scale']``."""
for key in results.get('seg_fields', []):
if self.keep_ratio:
gt_seg = mmcv.imrescale(
results[key], results['scale'], interpolation='nearest')
gt_seg = self._align(gt_seg, self.size_divisor, interpolation='nearest')
else:
gt_seg = mmcv.imresize(
results[key], results['scale'], interpolation='nearest')
h, w = gt_seg.shape[:2]
assert int(np.ceil(h / self.size_divisor)) * self.size_divisor == h and \
int(np.ceil(w / self.size_divisor)) * self.size_divisor == w, "gt_seg size not align. h:{} w:{}".format(h, w)
results[key] = gt_seg
def __call__(self, results):
"""Call function to resize images, bounding boxes, masks, semantic
segmentation map.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Resized results, 'img_shape', 'pad_shape', 'scale_factor',
'keep_ratio' keys are added into result dict.
"""
if 'scale' not in results:
self._random_scale(results)
self._resize_img(results)
self._resize_seg(results)
return results
def __repr__(self):
"""Reproduce str."""
repr_str = self.__class__.__name__
repr_str += (f'(img_scale={self.img_scale}, '
f'multiscale_mode={self.multiscale_mode}, '
f'ratio_range={self.ratio_range}, '
f'keep_ratio={self.keep_ratio})')
return repr_str
@PIPELINES.register_module()
class Resize(object):
"""Resize images & seg.
This transform resizes the input image to some scale. If the input dict
contains the key "scale", then the scale in the input dict is used,
otherwise the specified scale in the init method is used.
``img_scale`` can be Nong, a tuple (single-scale) or a list of tuple
(multi-scale). There are 4 multiscale modes:
- ``ratio_range is not None``:
1. When img_scale is None, img_scale is the shape of image in results
(img_scale = results['img'].shape[:2]) and the image is resized based
on the original size. (mode 1)
2. When img_scale is a tuple (single-scale), randomly sample a ratio from
the ratio range and multiply it with the image scale. (mode 2)
- ``ratio_range is None and multiscale_mode == "range"``: randomly sample a
scale from the a range. (mode 3)
- ``ratio_range is None and multiscale_mode == "value"``: randomly sample a
scale from multiple scales. (mode 4)
Args:
img_scale (tuple or list[tuple]): Images scales for resizing.
multiscale_mode (str): Either "range" or "value".
ratio_range (tuple[float]): (min_ratio, max_ratio)
keep_ratio (bool): Whether to keep the aspect ratio when resizing the
image.
"""
def __init__(self,
img_scale=None,
multiscale_mode='range',
ratio_range=None,
keep_ratio=True):
"""Init Module."""
if img_scale is None:
self.img_scale = None
else:
if isinstance(img_scale, list):
self.img_scale = img_scale
else:
self.img_scale = [img_scale]
assert mmcv.is_list_of(self.img_scale, tuple), "Image scale should be a tuple."
if ratio_range is not None:
# mode 1: given img_scale=None and a range of image ratio
# mode 2: given a scale and a range of image ratio
assert self.img_scale is None or len(self.img_scale) == 1, "Length of img_scale should be 1.Img scale is {}".format(len(self.img_scale))
else:
# mode 3 and 4: given multiple scales or a range of scales
assert multiscale_mode in ['value', 'range'], "Provided multiscale_mode: {} not in [value, range]".format(multiscale_mode)
self.multiscale_mode = multiscale_mode
self.ratio_range = ratio_range
self.keep_ratio = keep_ratio
@staticmethod
def random_select(img_scales):
"""Randomly select an img_scale from given candidates.
Args:
img_scales (list[tuple]): Images scales for selection.
Returns:
(tuple, int): Returns a tuple ``(img_scale, scale_dix)``,
where ``img_scale`` is the selected image scale and
``scale_idx`` is the selected index in the given candidates.
"""
assert mmcv.is_list_of(img_scales, tuple), "Image scale should be tuple."
scale_idx = np.random.randint(len(img_scales))
img_scale = img_scales[scale_idx]
return img_scale, scale_idx
@staticmethod
def random_sample(img_scales):
"""Randomly sample an img_scale when ``multiscale_mode=='range'``.
Args:
img_scales (list[tuple]): Images scale range for sampling.
There must be two tuples in img_scales, which specify the lower
and uper bound of image scales.
Returns:
(tuple, None): Returns a tuple ``(img_scale, None)``, where
``img_scale`` is sampled scale and None is just a placeholder
to be consistent with :func:`random_select`.
"""
assert mmcv.is_list_of(img_scales, tuple) and len(img_scales) == 2, "Image scales should be tuple."
img_scale_long = [max(s) for s in img_scales]
img_scale_short = [min(s) for s in img_scales]
long_edge = np.random.randint(
min(img_scale_long),
max(img_scale_long) + 1)
short_edge = np.random.randint(
min(img_scale_short),
max(img_scale_short) + 1)
img_scale = (long_edge, short_edge)
return img_scale, None
@staticmethod
def random_sample_ratio(img_scale, ratio_range):
"""Randomly sample an img_scale when ``ratio_range`` is specified.
A ratio will be randomly sampled from the range specified by
``ratio_range``. Then it would be multiplied with ``img_scale`` to
generate sampled scale.
Args:
img_scale (tuple): Images scale base to multiply with ratio.
ratio_range (tuple[float]): The minimum and maximum ratio to scale
the ``img_scale``.
Returns:
(tuple, None): Returns a tuple ``(scale, None)``, where
``scale`` is sampled ratio multiplied with ``img_scale`` and
None is just a placeholder to be consistent with
:func:`random_select`.
"""
assert isinstance(img_scale, tuple) and len(img_scale) == 2, "Image scale should be a tuple."
min_ratio, max_ratio = ratio_range
assert min_ratio <= max_ratio, "Max ration should not be lesser than min_ratio."
ratio = np.random.random_sample() * (max_ratio - min_ratio) + min_ratio
scale = int(img_scale[0] * ratio), int(img_scale[1] * ratio)
return scale, None
def _random_scale(self, results):
"""Randomly sample an img_scale according to ``ratio_range`` and
``multiscale_mode``.
If ``ratio_range`` is specified, a ratio will be sampled and be
multiplied with ``img_scale``.
If multiple scales are specified by ``img_scale``, a scale will be
sampled according to ``multiscale_mode``.
Otherwise, single scale will be used.
Args:
results (dict): Result dict from :obj:`dataset`.
Returns:
dict: Two new keys 'scale` and 'scale_idx` are added into
``results``, which would be used by subsequent pipelines.
"""
if self.ratio_range is not None:
if self.img_scale is None:
h, w = results['img'].shape[:2]
scale, scale_idx = self.random_sample_ratio((w, h),
self.ratio_range)
else:
scale, scale_idx = self.random_sample_ratio(
self.img_scale[0], self.ratio_range)
elif len(self.img_scale) == 1:
scale, scale_idx = self.img_scale[0], 0
elif self.multiscale_mode == 'range':
scale, scale_idx = self.random_sample(self.img_scale)
elif self.multiscale_mode == 'value':
scale, scale_idx = self.random_select(self.img_scale)
else:
raise NotImplementedError
results['scale'] = scale
results['scale_idx'] = scale_idx
def _resize_img(self, results):
"""Resize images with ``results['scale']``."""
if self.keep_ratio:
img, scale_factor = mmcv.imrescale(
results['img'], results['scale'], return_scale=True)
# the w_scale and h_scale has minor difference
# a real fix should be done in the mmcv.imrescale in the future
new_h, new_w = img.shape[:2]
h, w = results['img'].shape[:2]
w_scale = new_w / w
h_scale = new_h / h
else:
img, w_scale, h_scale = mmcv.imresize(
results['img'], results['scale'], return_scale=True)
scale_factor = np.array([w_scale, h_scale, w_scale, h_scale],
dtype=np.float32)
results['img'] = img
results['img_shape'] = img.shape
results['pad_shape'] = img.shape # in case that there is no padding
results['scale_factor'] = scale_factor
results['keep_ratio'] = self.keep_ratio
def _resize_seg(self, results):
"""Resize semantic segmentation map with ``results['scale']``."""
for key in results.get('seg_fields', []):
if self.keep_ratio:
gt_seg = mmcv.imrescale(
results[key], results['scale'], interpolation='nearest')
else:
gt_seg = mmcv.imresize(
results[key], results['scale'], interpolation='nearest')
results[key] = gt_seg
def __call__(self, results):
"""Call function to resize images, bounding boxes, masks, semantic
segmentation map.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Resized results, 'img_shape', 'pad_shape', 'scale_factor',
'keep_ratio' keys are added into result dict.
"""
if 'scale' not in results:
self._random_scale(results)
self._resize_img(results)
self._resize_seg(results)
return results
def __repr__(self):
"""Reproduce."""
repr_str = self.__class__.__name__
repr_str += (f'(img_scale={self.img_scale}, '
f'multiscale_mode={self.multiscale_mode}, '
f'ratio_range={self.ratio_range}, '
f'keep_ratio={self.keep_ratio})')
return repr_str
@PIPELINES.register_module()
class RandomFlip(object):
"""Flip the image & seg.
If the input dict contains the key "flip", then the flag will be used,
otherwise it will be randomly decided by a ratio specified in the init
method.
Args:
prob (float, optional): The flipping probability. Default: None.
direction(str, optional): The flipping direction. Options are
'horizontal' and 'vertical'. Default: 'horizontal'.
"""
@deprecated_api_warning({'flip_ratio': 'prob'}, cls_name='RandomFlip')
def __init__(self, prob=None, direction='horizontal'):
""" Init Module."""
self.prob = prob
self.direction = direction
if prob is not None:
assert prob >= 0 and prob <= 1 # noqa pylint: disable=R1716
assert direction in ['horizontal', 'vertical'], "Direction should be horizontal or vertical."
def __call__(self, results):
"""Call function to flip bounding boxes, masks, semantic segmentation
maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Flipped results, 'flip', 'flip_direction' keys are added into
result dict.
"""
if 'flip' not in results:
flip = True if np.random.rand() < self.prob else False # noqa pylint: disable=R1719
results['flip'] = flip
if 'flip_direction' not in results:
results['flip_direction'] = self.direction
if results['flip']:
# flip image
results['img'] = mmcv.imflip(
results['img'], direction=results['flip_direction'])
# flip segs
for key in results.get('seg_fields', []):
# use copy() to make numpy stride positive
results[key] = mmcv.imflip(
results[key], direction=results['flip_direction']).copy()
return results
def __repr__(self):
"""Reproduce string."""
return self.__class__.__name__ + f'(prob={self.prob})'
@PIPELINES.register_module()
class Pad(object):
"""Pad the image & mask.
There are two padding modes: (1) pad to a fixed size and (2) pad to the
minimum size that is divisible by some number.
Added keys are "pad_shape", "pad_fixed_size", "pad_size_divisor",
Args:
size (tuple, optional): Fixed padding size.
size_divisor (int, optional): The divisor of padded size.
pad_val (float, optional): Padding value. Default: 0.
seg_pad_val (float, optional): Padding value of segmentation map.
Default: 255.
"""
def __init__(self,
size=None,
size_divisor=None,
pad_val=0,
seg_pad_val=255):
"""Init Module."""
self.size = size
self.size_divisor = size_divisor
self.pad_val = pad_val
self.seg_pad_val = seg_pad_val
# only one of size and size_divisor should be valid
assert size is not None or size_divisor is not None, "Size and size divisor both cannot be None."
assert size is None or size_divisor is None, "Size and size divisor both cannot be None."
def _pad_img(self, results):
"""Pad images according to ``self.size``."""
if self.size is not None:
padded_img = mmcv.impad(
results['img'], shape=self.size, pad_val=self.pad_val)
elif self.size_divisor is not None:
padded_img = mmcv.impad_to_multiple(
results['img'], self.size_divisor, pad_val=self.pad_val)
results['img'] = padded_img
results['pad_shape'] = padded_img.shape
results['pad_fixed_size'] = self.size
results['pad_size_divisor'] = self.size_divisor
def _pad_seg(self, results):
"""Pad masks according to ``results['pad_shape']``."""
for key in results.get('seg_fields', []):
results[key] = mmcv.impad(
results[key],
shape=results['pad_shape'][:2],
pad_val=self.seg_pad_val)
def __call__(self, results):
"""Call function to pad images, masks, semantic segmentation maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Updated result dict.
"""
self._pad_img(results)
self._pad_seg(results)
return results
def __repr__(self):
""" Reproduce class."""
repr_str = self.__class__.__name__
repr_str += f'(size={self.size}, size_divisor={self.size_divisor}, ' \
f'pad_val={self.pad_val})'
return repr_str
@PIPELINES.register_module()
class Normalize(object):
"""Normalize the image.
Added key is "img_norm_cfg".
Args:
mean (sequence): Mean values of 3 channels.
std (sequence): Std values of 3 channels.
to_rgb (bool): Whether to convert the image from BGR to RGB,
default is true.
"""
def __init__(self, mean, std, to_rgb=True):
"""Init Module."""
self.mean = np.array(mean, dtype=np.float32)
self.std = np.array(std, dtype=np.float32)
self.to_rgb = to_rgb
def __call__(self, results):
"""Call function to normalize images.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Normalized results, 'img_norm_cfg' key is added into
result dict.
"""
results['img'] = mmcv.imnormalize(results['img'], self.mean, self.std,
self.to_rgb)
results['img_norm_cfg'] = dict(
mean=self.mean, std=self.std, to_rgb=self.to_rgb)
return results
def __repr__(self):
"""Reproduce string."""
repr_str = self.__class__.__name__
repr_str += f'(mean={self.mean}, std={self.std}, to_rgb=' \
f'{self.to_rgb})'
return repr_str
@PIPELINES.register_module()
class Rerange(object):
"""Rerange the image pixel value.
Args:
min_value (float or int): Minimum value of the reranged image.
Default: 0.
max_value (float or int): Maximum value of the reranged image.
Default: 255.
"""
def __init__(self, min_value=0, max_value=255):
""" Init Module."""
assert isinstance(min_value, (float, int)), "min_value of images should be float or int."
assert isinstance(max_value, (float, int)), "max_value of images should be float or int."
assert min_value < max_value, "max_value of the pixels should not be lesser than the min_value."
self.min_value = min_value
self.max_value = max_value
def __call__(self, results):
"""Call function to rerange images.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Reranged results.
"""
img = results['img']
img_min_value = np.min(img)
img_max_value = np.max(img)
assert img_min_value < img_max_value, "max_value of the pixels should not be lesser than the min_value."
# rerange to [0, 1]
img = (img - img_min_value) / (img_max_value - img_min_value)
# rerange to [min_value, max_value]
img = img * (self.max_value - self.min_value) + self.min_value
results['img'] = img
return results
def __repr__(self):
"""Reproduce string."""
repr_str = self.__class__.__name__
repr_str += f'(min_value={self.min_value}, max_value={self.max_value})'
return repr_str
@PIPELINES.register_module()
class CLAHE(object):
"""Use CLAHE method to process the image.
See `ZUIDERVELD,K. Contrast Limited Adaptive Histogram Equalization[J].
Graphics Gems, 1994:474-485.` for more information.
Args:
clip_limit (float): Threshold for contrast limiting. Default: 40.0.
tile_grid_size (tuple[int]): Size of grid for histogram equalization.
Input image will be divided into equally sized rectangular tiles.
It defines the number of tiles in row and column. Default: (8, 8).
"""
def __init__(self, clip_limit=40.0, tile_grid_size=(8, 8)):
""" Init Module."""
assert isinstance(clip_limit, (float, int)), "Clip Limit should be int or float."
self.clip_limit = clip_limit
assert is_tuple_of(tile_grid_size, int), "Tile Grid size should be of int type."
assert len(tile_grid_size) == 2, "Tile Grid size should be 2."
self.tile_grid_size = tile_grid_size
def __call__(self, results):
"""Call function to Use CLAHE method process images.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Processed results.
"""
for i in range(results['img'].shape[2]):
results['img'][:, :, i] = mmcv.clahe(
np.array(results['img'][:, :, i], dtype=np.uint8),
self.clip_limit, self.tile_grid_size)
return results
def __repr__(self):
"""Reproduce string."""
repr_str = self.__class__.__name__
repr_str += f'(clip_limit={self.clip_limit}, '\
f'tile_grid_size={self.tile_grid_size})'
return repr_str
@PIPELINES.register_module()
class RandomCrop(object):
"""Random crop the image & seg.
Args:
crop_size (tuple): Expected size after cropping, (h, w).
cat_max_ratio (float): The maximum ratio that single category could
occupy.
"""
def __init__(self, crop_size, cat_max_ratio=1., ignore_index=255):
"""Init Module."""
assert crop_size[0] > 0 and crop_size[1] > 0., "Crop size values should be greater than 0."
self.crop_size = crop_size
self.cat_max_ratio = cat_max_ratio
self.ignore_index = ignore_index
def get_crop_bbox(self, img):
"""Randomly get a crop bounding box."""
margin_h = max(img.shape[0] - self.crop_size[0], 0)
margin_w = max(img.shape[1] - self.crop_size[1], 0)
offset_h = np.random.randint(0, margin_h + 1)
offset_w = np.random.randint(0, margin_w + 1)
crop_y1, crop_y2 = offset_h, offset_h + self.crop_size[0]
crop_x1, crop_x2 = offset_w, offset_w + self.crop_size[1]
return crop_y1, crop_y2, crop_x1, crop_x2
def crop(self, img, crop_bbox):
"""Crop from ``img``"""
crop_y1, crop_y2, crop_x1, crop_x2 = crop_bbox
img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...]
return img
def __call__(self, results):
"""Call function to randomly crop images, semantic segmentation maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Randomly cropped results, 'img_shape' key in result dict is
updated according to crop size.
"""
img = results['img']
crop_bbox = self.get_crop_bbox(img)
if self.cat_max_ratio < 1.:
# Repeat 10 times
for _ in range(10):
seg_temp = self.crop(results['gt_semantic_seg'], crop_bbox)
labels, cnt = np.unique(seg_temp, return_counts=True)
cnt = cnt[labels != self.ignore_index]
if len(cnt) > 1 and np.max(cnt) / np.sum(
cnt) < self.cat_max_ratio:
break
crop_bbox = self.get_crop_bbox(img)
# crop the image
img = self.crop(img, crop_bbox)
img_shape = img.shape
results['img'] = img
results['img_shape'] = img_shape
# crop semantic seg
for key in results.get('seg_fields', []):
results[key] = self.crop(results[key], crop_bbox)
return results
def __repr__(self):
"""Reproduce string."""
return self.__class__.__name__ + f'(crop_size={self.crop_size})'
@PIPELINES.register_module()
class CenterCrop(object):
"""Center crop the image & seg.
Args:
crop_size (tuple): Expected size after cropping, (h, w).
"""
def __init__(self, crop_size, ignore_index=255):
"""Init Module."""
assert crop_size[0] > 0 and crop_size[1] > 0, "Crop size should be greater than 0."
self.crop_size = crop_size
self.ignore_index = ignore_index
def get_crop_bbox(self, img):
"""Randomly get a crop bounding box."""
margin_h = max(img.shape[0] - self.crop_size[0], 0)
margin_w = max(img.shape[1] - self.crop_size[1], 0)
offset_h = margin_h // 2 # np.random.randint(0, margin_h + 1)
offset_w = margin_w // 2 # np.random.randint(0, margin_w + 1)
crop_y1, crop_y2 = offset_h, offset_h + self.crop_size[0]
crop_x1, crop_x2 = offset_w, offset_w + self.crop_size[1]
return crop_y1, crop_y2, crop_x1, crop_x2
def crop(self, img, crop_bbox):
"""Crop from ``img``"""
crop_y1, crop_y2, crop_x1, crop_x2 = crop_bbox
img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...]
return img
def __call__(self, results):
"""Call function to randomly crop images, semantic segmentation maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Randomly cropped results, 'img_shape' key in result dict is
updated according to crop size.
"""
img = results['img']
crop_bbox = self.get_crop_bbox(img)
# crop the image
img = self.crop(img, crop_bbox)
img_shape = img.shape
results['img'] = img
results['img_shape'] = img_shape
# crop semantic seg
for key in results.get('seg_fields', []):
results[key] = self.crop(results[key], crop_bbox)
return results
def __repr__(self):
"""Reproduce string."""
return self.__class__.__name__ + f'(crop_size={self.crop_size})'
@PIPELINES.register_module()
class RandomRotate(object):
"""Rotate the image & seg.
Args:
prob (float): The rotation probability.
degree (float, tuple[float]): Range of degrees to select from. If
degree is a number instead of tuple like (min, max),
the range of degree will be (``-degree``, ``+degree``)
pad_val (float, optional): Padding value of image. Default: 0.
seg_pad_val (float, optional): Padding value of segmentation map.
Default: 255.
center (tuple[float], optional): Center point (w, h) of the rotation in
the source image. If not specified, the center of the image will be
used. Default: None.
auto_bound (bool): Whether to adjust the image size to cover the whole
rotated image. Default: False
"""
def __init__(self,
prob,
degree,
pad_val=0,
seg_pad_val=255,
center=None,
auto_bound=False):
"""Init Module."""
self.prob = prob
assert prob >= 0 and prob <= 1 # noqa pylint: disable=R1716
if isinstance(degree, (float, int)):
assert degree > 0, f'degree {degree} should be positive'
self.degree = (-degree, degree)
else:
self.degree = degree
assert len(self.degree) == 2, f'degree {self.degree} should be a ' \
f'tuple of (min, max)'
self.pal_val = pad_val
self.seg_pad_val = seg_pad_val
self.center = center
self.auto_bound = auto_bound
def __call__(self, results):
"""Call function to rotate image, semantic segmentation maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Rotated results.
"""
rotate = True if np.random.rand() < self.prob else False # noqa pylint: disable=R1719
degree = np.random.uniform(min(*self.degree), max(*self.degree))
if rotate:
# rotate image
results['img'] = mmcv.imrotate(
results['img'],
angle=degree,
border_value=self.pal_val,
center=self.center,
auto_bound=self.auto_bound)
# rotate segs
for key in results.get('seg_fields', []):
results[key] = mmcv.imrotate(
results[key],
angle=degree,
border_value=self.seg_pad_val,
center=self.center,
auto_bound=self.auto_bound,
interpolation='nearest')
return results
def __repr__(self):
"""Reproduce string."""
repr_str = self.__class__.__name__
repr_str += f'(prob={self.prob}, ' \
f'degree={self.degree}, ' \
f'pad_val={self.pal_val}, ' \
f'seg_pad_val={self.seg_pad_val}, ' \
f'center={self.center}, ' \
f'auto_bound={self.auto_bound})'
return repr_str
@PIPELINES.register_module()
class RGB2Gray(object):
"""Convert RGB image to grayscale image.
This transform calculate the weighted mean of input image channels with
``weights`` and then expand the channels to ``out_channels``. When
``out_channels`` is None, the number of output channels is the same as
input channels.
Args:
out_channels (int): Expected number of output channels after
transforming. Default: None.
weights (tuple[float]): The weights to calculate the weighted mean.
Default: (0.299, 0.587, 0.114).
"""
def __init__(self, out_channels=None, weights=(0.299, 0.587, 0.114)):
""" Init Module."""
assert out_channels is None or out_channels > 0, "Out Channels should be greater than 0."
self.out_channels = out_channels
assert isinstance(weights, tuple), "Weights should not be tuple."
for item in weights:
assert isinstance(item, (float, int)), "Item should be a float or int."
self.weights = weights
def __call__(self, results):
"""Call function to convert RGB image to grayscale image.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Result dict with grayscale image.
"""
img = results['img']
assert len(img.shape) == 3, "Image shape should be 3."
assert img.shape[2] == len(self.weights), "Image shape does not match weights shape."
weights = np.array(self.weights).reshape((1, 1, -1))
img = (img * weights).sum(2, keepdims=True)
if self.out_channels is None:
img = img.repeat(weights.shape[2], axis=2)
else:
img = img.repeat(self.out_channels, axis=2)
results['img'] = img
results['img_shape'] = img.shape
return results
def __repr__(self):
"""Reproduce string."""
repr_str = self.__class__.__name__
repr_str += f'(out_channels={self.out_channels}, ' \
f'weights={self.weights})'
return repr_str
@PIPELINES.register_module()
class AdjustGamma(object):
"""Using gamma correction to process the image.
Args:
gamma (float or int): Gamma value used in gamma correction.
Default: 1.0.
"""
def __init__(self, gamma=1.0):
"""Init Module."""
assert isinstance(gamma, (float, int)), "Gamma should be a float."
assert gamma > 0, "Gamma should be greater than 0."
self.gamma = gamma
inv_gamma = 1.0 / gamma
self.table = np.array([(i / 255.0)**inv_gamma * 255
for i in np.arange(256)]).astype('uint8')
def __call__(self, results):
"""Call function to process the image with gamma correction.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Processed results.
"""
results['img'] = mmcv.lut_transform(
np.array(results['img'], dtype=np.uint8), self.table)
return results
def __repr__(self):
"""Reproduce string."""
return self.__class__.__name__ + f'(gamma={self.gamma})'
@PIPELINES.register_module()
class MaillaryHack(object):
""" map MV 65 class to 19 class like Cityscapes
"""
def __init__(self):
"""Init module."""
self.map = [[13, 24, 41], [2, 15], [17], [6], [3], [45, 47], [48], [50], [30], [29],
[27], [19], [20, 21, 22], [55], [61], [54], [58], [57], [52]]
self.others = [i for i in range(66)]
for i in self.map:
for j in i:
if j in self.others:
self.others.remove(j)
def __call__(self, results):
"""Call function to process the image with gamma correction.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Processed results.
"""
gt_map = results['gt_semantic_seg']
# others -> 255
for value in self.others:
gt_map[gt_map == value] = 255
for index, map in enumerate(self.map): # noqa pylint: disable=W0622
for value in map: # noqa pylint: disable=W0622
gt_map[gt_map == value] = index
results['gt_semantic_seg'] = gt_map
return results
def __repr__(self):
"""Reproduce string."""
return 'MaillaryHack'
@PIPELINES.register_module()
class SegRescale(object):
"""Rescale semantic segmentation maps.
Args:
scale_factor (float): The scale factor of the final output.
"""
def __init__(self, scale_factor=1):
""" Init Module."""
self.scale_factor = scale_factor
def __call__(self, results):
"""Call function to scale the semantic segmentation map.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Result dict with semantic segmentation map scaled.
"""
for key in results.get('seg_fields', []):
if self.scale_factor != 1:
results[key] = mmcv.imrescale(
results[key], self.scale_factor, interpolation='nearest')
return results
def __repr__(self):
"""Reproduce string."""
return self.__class__.__name__ + f'(scale_factor={self.scale_factor})'
@PIPELINES.register_module()
class PhotoMetricDistortion(object):
"""Apply photometric distortion to image sequentially, every transformation
is applied with a probability of 0.5. The position of random contrast is in
second or second to last.
1. random brightness
2. random contrast (mode 0)
3. convert color from BGR to HSV
4. random saturation
5. random hue
6. convert color from HSV to BGR
7. random contrast (mode 1)
8. randomly swap channels
Args:
brightness_delta (int): delta of brightness.
contrast_range (tuple): range of contrast.
saturation_range (tuple): range of saturation.
hue_delta (int): delta of hue.
"""
def __init__(self,
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18):
"""Init Module."""
self.brightness_delta = brightness_delta
self.contrast_lower, self.contrast_upper = contrast_range
self.saturation_lower, self.saturation_upper = saturation_range
self.hue_delta = hue_delta
def convert(self, img, alpha=1, beta=0):
"""Multiple with alpha and add beat with clip."""
img = img.astype(np.float32) * alpha + beta
img = np.clip(img, 0, 255)
return img.astype(np.uint8)
def brightness(self, img):
"""Brightness distortion."""
if random.randint(2):
return self.convert(
img,
beta=random.uniform(-self.brightness_delta,
self.brightness_delta))
return img
def contrast(self, img):
"""Contrast distortion."""
if random.randint(2):
return self.convert(
img,
alpha=random.uniform(self.contrast_lower, self.contrast_upper))
return img
def saturation(self, img):
"""Saturation distortion."""
if random.randint(2):
img = mmcv.bgr2hsv(img)
img[:, :, 1] = self.convert(
img[:, :, 1],
alpha=random.uniform(self.saturation_lower,
self.saturation_upper))
img = mmcv.hsv2bgr(img)
return img
def hue(self, img):
"""Hue distortion."""
if random.randint(2):
img = mmcv.bgr2hsv(img)
img[:, :,
0] = (img[:, :, 0].astype(int) +
random.randint(-self.hue_delta, self.hue_delta)) % 180
img = mmcv.hsv2bgr(img)
return img
def __call__(self, results):
"""Call function to perform photometric distortion on images.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Result dict with images distorted.
"""
img = results['img']
# random brightness
img = self.brightness(img)
# mode == 0 --> do random contrast first
# mode == 1 --> do random contrast last
mode = random.randint(2)
if mode == 1:
img = self.contrast(img)
# random saturation
img = self.saturation(img)
# random hue
img = self.hue(img)
# random contrast
if mode == 0:
img = self.contrast(img)
results['img'] = img
return results
def __repr__(self):
"""Reproduce string."""
repr_str = self.__class__.__name__
repr_str += (f'(brightness_delta={self.brightness_delta}, '
f'contrast_range=({self.contrast_lower}, '
f'{self.contrast_upper}), '
f'saturation_range=({self.saturation_lower}, '
f'{self.saturation_upper}), '
f'hue_delta={self.hue_delta})')
return repr_str
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/dataloader/pipelines/transforms.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Original source taken from https://github.com/open-mmlab/mmskeleton
# Copyright 2019 OpenMMLAB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import mmcv
from ..builder import PIPELINES
from .compose import Compose
@PIPELINES.register_module()
class MultiScaleFlipAug(object):
"""Test-time augmentation with multiple scales and flipping.
An example configuration is as followed:
.. code-block::
img_scale=(2048, 1024),
img_ratios=[0.5, 1.0],
flip=True,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
]
After MultiScaleFLipAug with above configuration, the results are wrapped
into lists of the same length as followed:
.. code-block::
dict(
img=[...],
img_shape=[...],
scale=[(1024, 512), (1024, 512), (2048, 1024), (2048, 1024)]
flip=[False, True, False, True]
...
)
Args:
transforms (list[dict]): Transforms to apply in each augmentation.
img_scale (None | tuple | list[tuple]): Images scales for resizing.
img_ratios (float | list[float]): Image ratios for resizing
flip (bool): Whether apply flip augmentation. Default: False.
flip_direction (str | list[str]): Flip augmentation directions,
options are "horizontal" and "vertical". If flip_direction is list,
multiple flip augmentations will be applied.
It has no effect when flip == False. Default: "horizontal".
"""
def __init__(self,
transforms,
img_scale,
img_ratios=None,
flip=False,
flip_direction='horizontal'):
self.transforms = Compose(transforms)
if img_ratios is not None:
img_ratios = img_ratios if isinstance(img_ratios,
list) else [img_ratios]
assert mmcv.is_list_of(img_ratios, float), "Image ratios should be float."
if img_scale is None:
# mode 1: given img_scale=None and a range of image ratio
self.img_scale = None
assert mmcv.is_list_of(img_ratios, float), "Image ratios should be float."
elif isinstance(img_scale, tuple) and mmcv.is_list_of(
img_ratios, float):
assert len(img_scale) == 2, "Image scale length should be 2."
# mode 2: given a scale and a range of image ratio
self.img_scale = [(int(img_scale[0] * ratio),
int(img_scale[1] * ratio))
for ratio in img_ratios]
else:
# mode 3: given multiple scales
self.img_scale = img_scale if isinstance(img_scale,
list) else [img_scale]
assert mmcv.is_list_of(self.img_scale, tuple) or self.img_scale is None, "Image scale should be a tuple."
self.flip = flip
self.img_ratios = img_ratios
self.flip_direction = flip_direction if isinstance(
flip_direction, list) else [flip_direction]
assert mmcv.is_list_of(self.flip_direction, str), "Flip direction should be a string."
if not self.flip and self.flip_direction != ['horizontal']:
warnings.warn(
'flip_direction has no effect when flip is set to False')
if (self.flip and not any([t['type'] == 'RandomFlip' for t in transforms])):
warnings.warn(
'flip has no effect when RandomFlip is not in transforms')
def __call__(self, results):
"""Call function to apply test time augment transforms on results.
Args:
results (dict): Result dict contains the data to transform.
Returns:
dict[str: list]: The augmented data, where each value is wrapped
into a list.
"""
aug_data = []
if self.img_scale is None and mmcv.is_list_of(self.img_ratios, float):
h, w = results['img'].shape[:2]
img_scale = [(int(w * ratio), int(h * ratio))
for ratio in self.img_ratios]
else:
img_scale = self.img_scale
flip_aug = [False, True] if self.flip else [False]
for scale in img_scale:
for flip in flip_aug:
for direction in self.flip_direction:
_results = results.copy()
_results['scale'] = scale
_results['flip'] = flip
_results['flip_direction'] = direction
data = self.transforms(_results)
aug_data.append(data)
# list of dict to dict of list
aug_data_dict = {key: [] for key in aug_data[0]}
for data in aug_data:
for key, val in data.items():
aug_data_dict[key].append(val)
return aug_data_dict
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(transforms={self.transforms}, '
repr_str += f'img_scale={self.img_scale}, flip={self.flip})'
repr_str += f'flip_direction={self.flip_direction}'
return repr_str
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/dataloader/pipelines/test_time_aug.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Original source taken from https://github.com/open-mmlab/mmskeleton
# Copyright 2019 OpenMMLAB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pipeline Loading Pipeline."""
import os.path as osp
import mmcv
import numpy as np
from ..builder import PIPELINES
@PIPELINES.register_module()
class LoadImageFromFile(object):
"""Load an image from file.
Required keys are "img_prefix" and "img_info" (a dict that must contain the
key "filename"). Added or updated keys are "filename", "img", "img_shape",
"ori_shape" (same as `img_shape`), "pad_shape" (same as `img_shape`),
"scale_factor" (1.0) and "img_norm_cfg" (means=0 and stds=1).
Args:
to_float32 (bool): Whether to convert the loaded image to a float32
numpy array. If set to False, the loaded image is an uint8 array.
Defaults to False.
color_type (str): The flag argument for :func:`mmcv.imfrombytes`.
Defaults to 'color'.
file_client_args (dict): Arguments to instantiate a FileClient.
See :class:`mmcv.fileio.FileClient` for details.
Defaults to ``dict(backend='disk')``.
imdecode_backend (str): Backend for :func:`mmcv.imdecode`. Default:
'cv2'
"""
def __init__(self,
to_float32=False,
color_type='color',
file_client_args=dict(backend='disk'),
imdecode_backend='cv2'):
"""Init Module."""
self.to_float32 = to_float32
self.color_type = color_type
self.file_client_args = file_client_args.copy()
self.file_client = None
self.imdecode_backend = "cv2"
def __call__(self, results):
"""Call functions to load image and get image meta information.
Args:
results (dict): Result dict from :obj:`mmseg.CustomDataset`.
Returns:
dict: The dict contains loaded image and meta information.
"""
if self.file_client is None:
self.file_client = mmcv.FileClient(**self.file_client_args)
if results.get('img_prefix') is not None:
filename = osp.join(results['img_prefix'],
results['img_info']['filename'])
else:
filename = results['img_info']['filename']
img_bytes = self.file_client.get(filename)
img = mmcv.imfrombytes(
img_bytes, flag=self.color_type, backend=self.imdecode_backend)
if self.to_float32:
img = img.astype(np.float32)
results['filename'] = filename
results['ori_filename'] = results['img_info']['filename']
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
num_channels = 1 if len(img.shape) < 3 else img.shape[2]
results['img_norm_cfg'] = dict(
mean=np.zeros(num_channels, dtype=np.float32),
std=np.ones(num_channels, dtype=np.float32),
to_rgb=False)
return results
def __repr__(self):
"""Reproduce Function."""
repr_str = self.__class__.__name__
repr_str += f'(to_float32={self.to_float32},'
repr_str += f"color_type='{self.color_type}',"
repr_str += f"imdecode_backend='{self.imdecode_backend}')"
return repr_str
@PIPELINES.register_module()
class LoadAnnotations(object):
"""Load annotations for semantic segmentation.
Args:
reduce_zero_label (bool): Whether reduce all label value by 1.
Usually used for datasets where 0 is background label.
Default: False.
file_client_args (dict): Arguments to instantiate a FileClient.
See :class:`mmcv.fileio.FileClient` for details.
Defaults to ``dict(backend='disk')``.
imdecode_backend (str): Backend for :func:`mmcv.imdecode`. Default:
'pillow'
"""
def __init__(self,
reduce_zero_label=False,
file_client_args=dict(backend='disk'),
imdecode_backend='pillow',
input_type="rgb"):
"""Init Module."""
self.reduce_zero_label = reduce_zero_label
self.file_client_args = file_client_args.copy()
self.file_client = None
self.imdecode_backend = imdecode_backend
self.input_type = input_type
def __call__(self, results):
"""Call function to load multiple types annotations.
Args:
results (dict): Result dict from :obj:`mmseg.CustomDataset`.
Returns:
dict: The dict contains loaded semantic segmentation annotations.
"""
if self.file_client is None:
self.file_client = mmcv.FileClient(**self.file_client_args)
if results.get('seg_prefix', None) is not None:
filename = osp.join(results['seg_prefix'],
results['ann_info']['seg_map'])
else:
filename = results['ann_info']['seg_map']
img_bytes = self.file_client.get(filename)
gt_semantic_seg = mmcv.imfrombytes(
img_bytes, flag='unchanged',
backend=self.imdecode_backend).squeeze().astype(np.uint8)
if self.input_type == "grayscale":
gt_semantic_seg = gt_semantic_seg / 255
gt_semantic_seg = np.where(gt_semantic_seg > 0.5, 1, 0)
# modify if custom classes
if results.get('label_map', None) is not None:
for old_id, new_id in results['label_map'].items():
gt_semantic_seg[gt_semantic_seg == old_id] = new_id
# reduce zero_label
if self.reduce_zero_label:
# avoid using underflow conversion
gt_semantic_seg[gt_semantic_seg == 0] = 255
gt_semantic_seg = gt_semantic_seg - 1
gt_semantic_seg[gt_semantic_seg == 254] = 255
# converting labelId to trainId
# check if the filename has autolabel `refinement_final` - then use the following
# Convert the labels to train id
# We need to change this to do generic mapping
# Find a way to map for cityscapes
# Uncomment if needed especially for cityscapes
results['gt_semantic_seg'] = gt_semantic_seg
results['seg_fields'].append('gt_semantic_seg')
return results
def __repr__(self):
"""Reproduce Function."""
repr_str = self.__class__.__name__
repr_str += f'(reduce_zero_label={self.reduce_zero_label},'
repr_str += f"imdecode_backend='{self.imdecode_backend}')"
return repr_str
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/dataloader/pipelines/loading.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Original source taken from https://github.com/open-mmlab/mmsegmentation
# Copyright 2019 OpenMMLAB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Compose module for transforms."""
import collections
from mmcv.utils import build_from_cfg
from ..builder import PIPELINES
@PIPELINES.register_module()
class Compose(object):
"""Compose multiple transforms sequentially.
Args:
transforms (Sequence[dict | callable]): Sequence of transform object or
config dict to be composed.
"""
def __init__(self, transforms):
"""Init Module."""
assert isinstance(transforms, collections.abc.Sequence), "Transforms should be of type Sequence."
self.transforms = []
for transform in transforms:
if isinstance(transform, dict):
transform = build_from_cfg(transform, PIPELINES)
self.transforms.append(transform)
elif callable(transform):
self.transforms.append(transform)
else:
raise TypeError('transform must be callable or a dict')
def __call__(self, data):
"""Call function to apply transforms sequentially.
Args:
data (dict): A result dict contains the data to transform.
Returns:
dict: Transformed data.
"""
for t in self.transforms:
data = t(data)
if data is None:
return None
return data
def __repr__(self):
"""Reproduce string."""
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += f' {t}'
format_string += '\n)'
return format_string
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/dataloader/pipelines/compose.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Dataloader Pipeline."""
from .compose import Compose
from .formating import (Collect, ImageToTensor, ToDataContainer, ToTensor,
Transpose, to_tensor)
from .loading import LoadAnnotations, LoadImageFromFile
from .test_time_aug import MultiScaleFlipAug
from .transforms import (AlignedResize, CLAHE, AdjustGamma, Normalize, Pad,
PhotoMetricDistortion, RandomCrop, RandomFlip,
RandomRotate, Rerange, Resize, RGB2Gray, SegRescale)
__all__ = [
'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
'Transpose', 'Collect', 'LoadAnnotations', 'LoadImageFromFile',
'MultiScaleFlipAug', 'AlignedResize', 'Resize', 'RandomFlip', 'Pad', 'RandomCrop',
'Normalize', 'SegRescale', 'PhotoMetricDistortion', 'RandomRotate',
'AdjustGamma', 'CLAHE', 'Rerange', 'RGB2Gray'
]
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/dataloader/pipelines/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Original source taken from https://github.com/open-mmlab/mmsegmentation
# Copyright 2019 OpenMMLAB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: skip-file
"""Formatting Module."""
from collections.abc import Sequence
import mmcv
from mmcv.parallel import DataContainer as DC
import numpy as np
import torch
from nvidia_tao_pytorch.cv.segformer.dataloader.builder import PIPELINES
def to_tensor(data):
"""Convert objects of various python types to :obj:`torch.Tensor`.
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
:class:`Sequence`, :class:`int` and :class:`float`.
Args:
data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to
be converted.
"""
if isinstance(data, torch.Tensor):
return data
if isinstance(data, np.ndarray):
return torch.from_numpy(data)
if isinstance(data, Sequence) and not mmcv.is_str(data):
return torch.tensor(data)
if isinstance(data, int):
return torch.LongTensor([data])
if isinstance(data, float):
return torch.FloatTensor([data])
raise TypeError(f'type {type(data)} cannot be converted to tensor.')
@PIPELINES.register_module()
class ToTensor(object):
"""Convert some results to :obj:`torch.Tensor` by given keys.
Args:
keys (Sequence[str]): Keys that need to be converted to Tensor.
"""
def __init__(self, keys):
"""Init Module."""
self.keys = keys
def __call__(self, results):
"""Call function to convert data in results to :obj:`torch.Tensor`.
Args:
results (dict): Result dict contains the data to convert.
Returns:
dict: The result dict contains the data converted
to :obj:`torch.Tensor`.
"""
for key in self.keys:
results[key] = to_tensor(results[key])
return results
def __repr__(self):
"""Repr Function."""
return self.__class__.__name__ + f'(keys={self.keys})'
@PIPELINES.register_module()
class ImageToTensor(object):
"""Convert image to :obj:`torch.Tensor` by given keys.
The dimension order of input image is (H, W, C). The pipeline will convert
it to (C, H, W). If only 2 dimension (H, W) is given, the output would be
(1, H, W).
Args:
keys (Sequence[str]): Key of images to be converted to Tensor.
"""
def __init__(self, keys):
"""Init Module."""
self.keys = keys
def __call__(self, results):
"""Call function to convert image in results to :obj:`torch.Tensor` and
transpose the channel order.
Args:
results (dict): Result dict contains the image data to convert.
Returns:
dict: The result dict contains the image converted
to :obj:`torch.Tensor` and transposed to (C, H, W) order.
"""
for key in self.keys:
img = results[key]
if len(img.shape) < 3:
img = np.expand_dims(img, -1)
results[key] = to_tensor(img.transpose(2, 0, 1))
return results
def __repr__(self):
"""Reproduce string."""
return self.__class__.__name__ + f'(keys={self.keys})'
@PIPELINES.register_module()
class Transpose(object):
"""Transpose some results by given keys.
Args:
keys (Sequence[str]): Keys of results to be transposed.
order (Sequence[int]): Order of transpose.
"""
def __init__(self, keys, order):
"""Init Module."""
self.keys = keys
self.order = order
def __call__(self, results):
"""Call function to convert image in results to :obj:`torch.Tensor` and
transpose the channel order.
Args:
results (dict): Result dict contains the image data to convert.
Returns:
dict: The result dict contains the image converted
to :obj:`torch.Tensor` and transposed to (C, H, W) order.
"""
for key in self.keys:
results[key] = results[key].transpose(self.order)
return results
def __repr__(self):
"""Reproduce string."""
return self.__class__.__name__ + \
f'(keys={self.keys}, order={self.order})'
@PIPELINES.register_module()
class ToDataContainer(object):
"""Convert results to :obj:`mmcv.DataContainer` by given fields.
Args:
fields (Sequence[dict]): Each field is a dict like
``dict(key='xxx', **kwargs)``. The ``key`` in result will
be converted to :obj:`mmcv.DataContainer` with ``**kwargs``.
Default: ``(dict(key='img', stack=True),
dict(key='gt_semantic_seg'))``.
"""
def __init__(self,
fields=(dict(key='img',
stack=True), dict(key='gt_semantic_seg'))):
"""Init module."""
self.fields = fields
def __call__(self, results):
"""Call function to convert data in results to
:obj:`mmcv.DataContainer`.
Args:
results (dict): Result dict contains the data to convert.
Returns:
dict: The result dict contains the data converted to
:obj:`mmcv.DataContainer`.
"""
for field in self.fields:
field = field.copy()
key = field.pop('key')
results[key] = DC(results[key], **field)
return results
def __repr__(self):
"""Reproduce string."""
return self.__class__.__name__ + f'(fields={self.fields})'
@PIPELINES.register_module()
class DefaultFormatBundle(object):
"""Default formatting bundle.
It simplifies the pipeline of formatting common fields, including "img"
and "gt_semantic_seg". These fields are formatted as follows.
- img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True)
- gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor,
(3)to DataContainer (stack=True)
"""
def __call__(self, results):
"""Call function to transform and format common fields in results.
Args:
results (dict): Result dict contains the data to convert.
Returns:
dict: The result dict contains the data that is formatted with
default bundle.
"""
if 'img' in results:
img = results['img']
if len(img.shape) < 3:
img = np.expand_dims(img, -1)
img = np.ascontiguousarray(img.transpose(2, 0, 1))
results['img'] = DC(to_tensor(img), stack=True)
if 'gt_semantic_seg' in results:
# convert to long
results['gt_semantic_seg'] = DC(
to_tensor(results['gt_semantic_seg'][None,
...].astype(np.int64)),
stack=True)
return results
def __repr__(self):
"""Reproduce string."""
return self.__class__.__name__
@PIPELINES.register_module()
class Collect(object):
"""Collect data from the loader relevant to the specific task.
This is usually the last stage of the data loader pipeline. Typically keys
is set to some subset of "img", "gt_semantic_seg".
The "img_meta" item is always populated. The contents of the "img_meta"
dictionary depends on "meta_keys". By default this includes:
- "img_shape": shape of the image input to the network as a tuple
(h, w, c). Note that images may be zero padded on the bottom/right
if the batch tensor is larger than this shape.
- "scale_factor": a float indicating the preprocessing scale
- "flip": a boolean indicating if image flip transform was used
- "filename": path to the image file
- "ori_shape": original shape of the image as a tuple (h, w, c)
- "pad_shape": image shape after padding
- "img_norm_cfg": a dict of normalization information:
- mean - per channel mean subtraction
- std - per channel std divisor
- to_rgb - bool indicating if bgr was converted to rgb
Args:
keys (Sequence[str]): Keys of results to be collected in ``data``.
meta_keys (Sequence[str], optional): Meta keys to be converted to
``mmcv.DataContainer`` and collected in ``data[img_metas]``.
Default: ``('filename', 'ori_filename', 'ori_shape', 'img_shape',
'pad_shape', 'scale_factor', 'flip', 'flip_direction',
'img_norm_cfg')``
"""
def __init__(self,
keys,
meta_keys=('filename', 'ori_filename', 'ori_shape',
'img_shape', 'pad_shape', 'scale_factor', 'flip',
'flip_direction', 'img_norm_cfg')):
"""Init Module."""
self.keys = keys
self.meta_keys = meta_keys
def __call__(self, results):
"""Call function to collect keys in results. The keys in ``meta_keys``
will be converted to :obj:mmcv.DataContainer.
Args:
results (dict): Result dict contains the data to collect.
Returns:
dict: The result dict contains the following keys
- keys in``self.keys``
- ``img_metas``
"""
data = {}
img_meta = {}
for key in self.meta_keys:
img_meta[key] = results[key]
data['img_metas'] = DC(img_meta, cpu_only=True)
for key in self.keys:
data[key] = results[key]
return data
def __repr__(self):
"""Repr Function."""
return self.__class__.__name__ + \
f'(keys={self.keys}, meta_keys={self.meta_keys})'
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/segformer/dataloader/pipelines/formating.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OCDnet root module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# **************************************************************************
# Modified from github (https://github.com/WenmuZhou/DBNet.pytorch)
# Copyright (c) WenmuZhou
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# https://github.com/WenmuZhou/DBNet.pytorch/blob/master/LICENSE.md
# **************************************************************************
"""Segmentation Detector Representer."""
import cv2
import numpy as np
import pyclipper
from shapely.geometry import Polygon
def get_post_processing(config):
"""Run post processing."""
try:
cls = globals()[config['type']](**config['args'])
return cls
except Exception:
return None
class SegDetectorRepresenter():
"""SegDetector Representer class."""
def __init__(self, thresh=0.3, box_thresh=0.7, max_candidates=1000, unclip_ratio=1.5):
"""Initialize.
Args:
thresh (float): The threshold for binarization, which is used in generating an approximate binary map.
box_thresh (float): The bounding box threshold. If the effective area is lower than this threshold, the
prediction will be ignored, which means no text is detected.
max_candidates (int): The maximum candidate output.
unclip_ratio (float): The unclip ratio using the Vatti clipping algorithm in the probability map.
"""
self.min_size = 3
self.thresh = thresh
self.box_thresh = box_thresh
self.max_candidates = max_candidates
self.unclip_ratio = unclip_ratio
def __call__(self, batch, pred, is_output_polygon=False):
"""Generate final bbox and score.
Args:
batch (dict): Produced by dataloaders. It is a dict contains:
- image, a tensor of shape (N, C, H, W).
- polygons, the polygons of objective regions, tensor of shape (N, K, 4, 2)
- ignore_tags: tensor of shape (N, K), indicates whether a region is ignorable or not.
- shape: the original shape of images.
- filename: the original filenames of images.
pred (list): Prediction result.
"""
pred = pred[:, 0, :, :]
segmentation = self.binarize(pred)
boxes_batch = []
scores_batch = []
for batch_index in range(pred.size(0)):
height = batch['img'].shape[2]
width = batch['img'].shape[3]
if is_output_polygon:
boxes, scores = self.polygons_from_bitmap(pred[batch_index], segmentation[batch_index], width, height)
else:
boxes, scores = self.boxes_from_bitmap(pred[batch_index], segmentation[batch_index], width, height)
boxes_batch.append(boxes)
scores_batch.append(scores)
return boxes_batch, scores_batch
def binarize(self, pred):
"""Binarize."""
return pred > self.thresh
def polygons_from_bitmap(self, pred, _bitmap, dest_width, dest_height):
"""Generate the polygon according to the approximate binary map."""
assert len(_bitmap.shape) == 2
bitmap = _bitmap.cpu().numpy()
pred = pred.cpu().detach().numpy()
height, width = bitmap.shape
boxes = []
scores = []
contours, _ = cv2.findContours((bitmap * 255).astype(np.uint8), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours[:self.max_candidates]:
epsilon = 0.0001 * cv2.arcLength(contour, True)
approx = cv2.approxPolyDP(contour, epsilon, True)
points = approx.reshape((-1, 2))
if points.shape[0] < 4:
continue
score = self.box_score_fast(pred, contour.squeeze(1))
if self.box_thresh > score:
continue
if points.shape[0] > 2:
box = self.unclip(points, unclip_ratio=self.unclip_ratio)
if len(box) > 1:
continue
else:
continue
box = box.reshape(-1, 2)
_, sside = self.get_mini_boxes(box.reshape((-1, 1, 2)))
if sside < self.min_size + 2:
continue
if not isinstance(dest_width, int):
dest_width = dest_width.item()
dest_height = dest_height.item()
box[:, 0] = np.clip(np.round(box[:, 0] / width * dest_width), 0, dest_width)
box[:, 1] = np.clip(np.round(box[:, 1] / height * dest_height), 0, dest_height)
boxes.append(box)
scores.append(score)
return boxes, scores
def boxes_from_bitmap(self, pred, _bitmap, dest_width, dest_height):
"""Generate the bbox according to the approximate binary map."""
assert len(_bitmap.shape) == 2
bitmap = _bitmap.cpu().numpy()
pred = pred.cpu().detach().numpy()
height, width = bitmap.shape
# Find contours from the approximate binary map
contours, _ = cv2.findContours(
(bitmap * 255).astype(np.uint8),
cv2.RETR_LIST,
cv2.CHAIN_APPROX_SIMPLE)
# keep contours less then max candidates
num_contours = min(len(contours), self.max_candidates)
boxes = np.zeros((num_contours, 4, 2), dtype=np.int16)
scores = np.zeros((num_contours,), dtype=np.float32)
for index in range(num_contours):
contour = contours[index].squeeze(1)
points, sside = self.get_mini_boxes(contour)
# Ignore the bbox which is too small
if sside < self.min_size:
continue
points = np.array(points)
# Calculate the score
score = self.box_score_fast(pred, points)
if self.box_thresh > score:
continue
box = self.unclip(points, unclip_ratio=self.unclip_ratio).reshape(-1, 1, 2)
box, sside = self.get_mini_boxes(box)
if sside < self.min_size + 2:
continue
box = np.array(box)
if not isinstance(dest_width, int):
dest_width = dest_width.item()
dest_height = dest_height.item()
box[:, 0] = np.clip(np.round(box[:, 0] / width * dest_width), 0, dest_width)
box[:, 1] = np.clip(np.round(box[:, 1] / height * dest_height), 0, dest_height)
boxes[index, :, :] = box.astype(np.int16)
scores[index] = score
return boxes, scores
def unclip(self, box, unclip_ratio=1.5):
"""Expand the bbox."""
poly = Polygon(box)
distance = poly.area * unclip_ratio / poly.length
offset = pyclipper.PyclipperOffset() # pylint: disable=I1101
offset.AddPath(box, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON) # pylint: disable=I1101
expanded = np.array(offset.Execute(distance))
return expanded
def get_mini_boxes(self, contour):
"""Generate the bbox with minimum area, get its coordinates and the short size of height/width."""
bounding_box = cv2.minAreaRect(contour)
points = sorted(list(cv2.boxPoints(bounding_box)), key=lambda x: x[0])
index_1, index_2, index_3, index_4 = 0, 1, 2, 3
if points[1][1] > points[0][1]:
index_1 = 0
index_4 = 1
else:
index_1 = 1
index_4 = 0
if points[3][1] > points[2][1]:
index_2 = 2
index_3 = 3
else:
index_2 = 3
index_3 = 2
box = [points[index_1], points[index_2], points[index_3], points[index_4]]
return box, min(bounding_box[1])
def box_score_fast(self, bitmap, _box):
"""Calculate the bbox score according to bbox's coordinate and bitmap."""
h, w = bitmap.shape[:2]
box = _box.copy()
xmin = np.clip(np.floor(box[:, 0].min()).astype(np.int), 0, w - 1)
xmax = np.clip(np.ceil(box[:, 0].max()).astype(np.int), 0, w - 1)
ymin = np.clip(np.floor(box[:, 1].min()).astype(np.int), 0, h - 1)
ymax = np.clip(np.ceil(box[:, 1].max()).astype(np.int), 0, h - 1)
mask = np.zeros((ymax - ymin + 1, xmax - xmin + 1), dtype=np.uint8)
box[:, 0] = box[:, 0] - xmin
box[:, 1] = box[:, 1] - ymin
cv2.fillPoly(mask, box.reshape(1, -1, 2).astype(np.int32), 1)
return cv2.mean(bitmap[ymin:ymax + 1, xmin:xmax + 1], mask)[0]
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/post_processing/seg_detector_representer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Init."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/post_processing/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OCDnet config module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/config/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default config file"""
from typing import Optional, List
from dataclasses import dataclass, field
from omegaconf import MISSING
@dataclass
class ModelConfig:
"""Model config."""
backbone: str = "deformable_resnet18"
pretrained: bool = False
in_channels: int = 3
neck: str = "FPN"
inner_channels: int = 256
head: str = "DBHead"
out_channels: int = 2
k: int = 50
load_pruned_graph: bool = MISSING
pruned_graph_path: Optional[str] = None
pretrained_model_path: Optional[str] = None
@dataclass
class Optimargs:
"""Optimargs config."""
lr: float = 0.001
weight_decay: float = 0.0
amsgrad: bool = True
@dataclass
class Optimizer:
"""Optimizer config."""
type: str = "Adam"
args: Optimargs = Optimargs()
@dataclass
class Loss:
"""Loss config."""
type: str = "DBLoss"
alpha: int = 5
beta: int = 10
ohem_ratio: int = 3
@dataclass
class Postprocessingargs:
"""Postprocessingargs config."""
thresh: float = MISSING
box_thresh: float = MISSING
max_candidates: int = MISSING
unclip_ratio: float = MISSING
@dataclass
class Postprocessing:
"""Postprocessing config."""
type: str = "SegDetectorRepresenter"
args: Postprocessingargs = Postprocessingargs()
@dataclass
class Metricargs:
"""Metricargs config."""
is_output_polygon: bool = MISSING
@dataclass
class Metric:
"""Metric config."""
type: str = "QuadMetric"
args: Metricargs = Metricargs()
@dataclass
class LRSchedulerargs:
"""LRSchedulerargs config."""
warmup_epoch: int = MISSING
@dataclass
class LRScheduler:
"""LRScheduler config."""
type: str = "WarmupPolyLR"
args: LRSchedulerargs = LRSchedulerargs()
@dataclass
class Trainer:
"""Trainer config."""
is_output_polygon: bool = False
warmup_epoch: int = 3
seed: int = 2
log_iter: int = 10
clip_grad_norm: float = 5.0
show_images_iter: int = 50
tensorboard: bool = False
@dataclass
class Trainargs:
"""Train args config."""
img_mode: str = "BGR"
filter_keys: List[str] = field(default_factory=lambda: ['img_path', 'img_name', 'text_polys', 'texts', 'ignore_tags', 'shape'])
ignore_tags: List[str] = field(default_factory=lambda: ['*', '###'])
pre_processes: Optional[List[str]] = None
@dataclass
class Dataloader:
"""Train args config."""
batch_size: int = 32
shuffle: bool = True
pin_memory: bool = False
num_workers: int = 0
collate_fn: Optional[str] = ""
@dataclass
class TrainDataset:
"""Train Dataset config."""
data_name: str = "ICDAR2015Dataset"
data_path: List[str] = MISSING
args: Trainargs = Trainargs()
loader: Dataloader = Dataloader()
@dataclass
class Validateargs:
"""Validate args config."""
img_mode: str = "BGR"
filter_keys: List[str] = field(default_factory=lambda: [''])
ignore_tags: List[str] = field(default_factory=lambda: ['*', '###'])
pre_processes: Optional[List[str]] = None
@dataclass
class Validateloader:
"""Validate args config."""
batch_size: int = 1
shuffle: bool = False
pin_memory: bool = False
num_workers: int = 0
collate_fn: Optional[str] = "ICDARCollateFN"
@dataclass
class ValidateDataset:
"""Validate Dataset config."""
data_name: str = "ICDAR2015Dataset"
data_path: List[str] = MISSING
args: Validateargs = Validateargs()
loader: Validateloader = Validateloader()
@dataclass
class DataConfig:
"""Dataset config."""
train_dataset: TrainDataset = TrainDataset()
validate_dataset: ValidateDataset = ValidateDataset()
@dataclass
class TrainConfig:
"""Train experiment config."""
results_dir: Optional[str] = None
resume_training_checkpoint_path: Optional[str] = None
num_epochs: int = 50
checkpoint_interval: int = 1
validation_interval: int = 1
gpu_id: List[int] = field(default_factory=lambda: [0])
post_processing: Postprocessing = Postprocessing()
metric: Metric = Metric()
trainer: Trainer = Trainer()
loss: Loss = Loss()
optimizer: Optimizer = Optimizer()
lr_scheduler: LRScheduler = LRScheduler()
@dataclass
class InferenceConfig:
"""Inference experiment config."""
results_dir: Optional[str] = None
checkpoint: str = MISSING
trt_engine: Optional[str] = None
input_folder: str = MISSING
width: int = MISSING
height: int = MISSING
img_mode: str = MISSING
polygon: bool = True
show: bool = False
gpu_id: int = 0
post_processing: Postprocessing = Postprocessing()
@dataclass
class EvalConfig:
"""Evaluation experiment config."""
results_dir: Optional[str] = None
checkpoint: str = MISSING
trt_engine: Optional[str] = None
gpu_id: int = 0
batch_size: int = 1
post_processing: Postprocessing = Postprocessing()
metric: Metric = Metric()
@dataclass
class PruneConfig:
"""Prune experiment config."""
results_dir: Optional[str] = None
checkpoint: str = MISSING
gpu_id: int = 0
batch_size: int = 1
pruning_thresh: float = MISSING
@dataclass
class ExportConfig:
"""Export experiment config."""
results_dir: Optional[str] = None
checkpoint: str = MISSING
onnx_file: Optional[str] = None
gpu_id: int = 0
width: int = MISSING
height: int = MISSING
opset_version: int = 11
@dataclass
class CalibrationConfig:
"""Calibration config."""
cal_image_dir: str = MISSING
cal_cache_file: str = MISSING
cal_batch_size: int = 1
cal_num_batches: int = 1
@dataclass
class TrtConfig:
"""Trt config."""
data_type: str = "FP32"
workspace_size: int = 1024
min_batch_size: int = 1
opt_batch_size: int = 1
max_batch_size: int = 1
calibration: CalibrationConfig = CalibrationConfig()
@dataclass
class GenTrtEngineExpConfig:
"""Gen TRT Engine experiment config."""
results_dir: Optional[str] = None
gpu_id: int = 0
onnx_file: str = MISSING
trt_engine: str = MISSING
width: int = MISSING
height: int = MISSING
img_mode: str = "BGR"
tensorrt: TrtConfig = TrtConfig()
@dataclass
class ExperimentConfig:
"""Experiment config."""
train: TrainConfig = TrainConfig()
model: ModelConfig = ModelConfig()
evaluate: EvalConfig = EvalConfig()
dataset: DataConfig = DataConfig()
export: ExportConfig = ExportConfig()
gen_trt_engine: GenTrtEngineExpConfig = GenTrtEngineExpConfig()
inference: InferenceConfig = InferenceConfig()
prune: PruneConfig = PruneConfig()
name: str = MISSING
num_gpus: int = 1
results_dir: str = MISSING
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/config/default_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""learning rate schedulers module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/lr_schedulers/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# **************************************************************************
# Modified from github (https://github.com/WenmuZhou/DBNet.pytorch)
# Copyright (c) WenmuZhou
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# https://github.com/WenmuZhou/DBNet.pytorch/blob/master/LICENSE.md
# **************************************************************************
"""Popular Learning Rate Schedulers"""
from __future__ import division
import torch
class WarmupPolyLR(torch.optim.lr_scheduler._LRScheduler):
"""WarmupPolyLR.
Decays the learning rate via a polynomial function. The learning rate increases to initial value
during warmup stage and is reduced from the initial value to zero during the training stage.
"""
def __init__(self, optimizer, target_lr=0, max_iters=0, power=0.9, warmup_factor=1.0 / 3, warmup_iters=500,
warmup_method='linear', last_epoch=-1, warmup_epochs=0, epochs=0, **kwargs):
"""Initialize."""
if warmup_method not in ("constant", "linear"):
raise ValueError(
"Only 'constant' or 'linear' warmup_method accepted "
"got {}".format(warmup_method))
self.target_lr = target_lr
self.max_iters = max_iters
self.power = power
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
self.warmup_epochs = warmup_epochs
self.total_epochs = epochs
super(WarmupPolyLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
"""Calculate learning rate."""
N = self.max_iters - self.warmup_iters
T = self.last_epoch * self.max_iters / self.total_epochs - self.warmup_iters
if self.last_epoch < self.warmup_epochs:
if self.warmup_method == 'constant':
warmup_factor = self.warmup_factor
elif self.warmup_method == 'linear':
alpha = float(self.last_epoch) / self.warmup_epochs
warmup_factor = self.warmup_factor * (1 - alpha) + alpha
else:
raise ValueError("Unknown warmup type.")
return [self.target_lr + (base_lr - self.target_lr) * warmup_factor for base_lr in self.base_lrs]
factor = pow(1 - T / N, self.power)
return [self.target_lr + (base_lr - self.target_lr) * factor for base_lr in self.base_lrs]
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/lr_schedulers/schedulers.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# **************************************************************************
# Modified from github (https://github.com/WenmuZhou/DBNet.pytorch)
# Copyright (c) WenmuZhou
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# https://github.com/WenmuZhou/DBNet.pytorch/blob/master/LICENSE.md
# **************************************************************************
"""Metrics module."""
import numpy as np
class RunningScore(object):
"""Calcuate accuracy score."""
def __init__(self, n_classes):
"""Initialize."""
self.n_classes = n_classes
self.confusion_matrix = np.zeros((n_classes, n_classes))
def _fast_hist(self, label_true, label_pred, n_class):
mask = (label_true >= 0) & (label_true < n_class)
if np.sum((label_pred[mask] < 0)) > 0:
print(label_pred[label_pred < 0])
hist = np.bincount(n_class * label_true[mask].astype(int) +
label_pred[mask], minlength=n_class ** 2).reshape(n_class, n_class)
return hist
def update(self, label_trues, label_preds):
"""Print label_trues.dtype, label_preds.dtype."""
for lt, lp in zip(label_trues, label_preds):
try:
self.confusion_matrix += self._fast_hist(lt.flatten(), lp.flatten(), self.n_classes)
except Exception:
pass
def get_scores(self):
"""Returns accuracy score evaluation result."""
hist = self.confusion_matrix
acc = np.diag(hist).sum() / (hist.sum() + 0.0001)
acc_cls = np.diag(hist) / (hist.sum(axis=1) + 0.0001)
acc_cls = np.nanmean(acc_cls)
iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist) + 0.0001)
mean_iu = np.nanmean(iu)
freq = hist.sum(axis=1) / (hist.sum() + 0.0001)
fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
cls_iu = dict(zip(range(self.n_classes), iu))
return {'Overall Acc': acc,
'Mean Acc': acc_cls,
'FreqW Acc': fwavacc,
'Mean IoU': mean_iu, }, cls_iu
def reset(self):
"""Reset confusion matrix."""
self.confusion_matrix = np.zeros((self.n_classes, self.n_classes))
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/utils/metrics.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# **************************************************************************
# Modified from github (https://github.com/WenmuZhou/DBNet.pytorch)
# Copyright (c) WenmuZhou
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# https://github.com/WenmuZhou/DBNet.pytorch/blob/master/LICENSE.md
# **************************************************************************
"""utility module."""
import json
import logging
import pathlib
import time
import tempfile
import os
import glob
from natsort import natsorted
import cv2
import matplotlib.pyplot as plt
import numpy as np
from eff.codec import encrypt_stream, decrypt_stream
import torch
from nvidia_tao_pytorch.core.path_utils import expand_path
def get_file_list(folder_path: str, p_postfix: list = None, sub_dir: bool = True) -> list:
"""Get file list
Args:
folder_path: the folder path
p_postfix: postfix
sub_dir: check the subfolder
Returns:
Return file list
"""
assert os.path.exists(folder_path) and os.path.isdir(folder_path), "Please set valid input_folder in yaml file."
if p_postfix is None:
p_postfix = ['.jpg']
if isinstance(p_postfix, str):
p_postfix = [p_postfix]
file_list = [x for x in glob.glob(folder_path + '/*.*') if
os.path.splitext(x)[-1] in p_postfix or '.*' in p_postfix]
return natsorted(file_list)
def setup_logger(log_file_path: str = None):
"""setup logger."""
logging._warn_preinit_stderr = 0
logger = logging.getLogger('OCDNet.pytorch')
formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s: %(message)s')
ch = logging.StreamHandler()
ch.setFormatter(formatter)
logger.addHandler(ch)
if log_file_path is not None:
file_handle = logging.FileHandler(log_file_path)
file_handle.setFormatter(formatter)
logger.addHandler(file_handle)
logger.setLevel(logging.INFO)
return logger
def exe_time(func):
"""exe time."""
def newFunc(*args, **args2):
t0 = time.time()
back = func(*args, **args2)
print("{} cost {:.3f}s".format(func.__name__, time.time() - t0))
return back
return newFunc
def load(file_path: str):
"""load file."""
file_path = pathlib.Path(file_path)
func_dict = {'.txt': _load_txt, '.json': _load_json, '.list': _load_txt}
assert file_path.suffix in func_dict
return func_dict[file_path.suffix](file_path)
def _load_txt(file_path: str):
with open(file_path, 'r', encoding='utf8') as f:
content = [x.strip().strip('\ufeff').strip('\xef\xbb\xbf') for x in f.readlines()]
return content
def _load_json(file_path: str):
with open(file_path, 'r', encoding='utf8') as f:
content = json.load(f)
return content
def save(data, file_path):
"""save file."""
file_path = pathlib.Path(file_path)
func_dict = {'.txt': _save_txt, '.json': _save_json}
assert file_path.suffix in func_dict
return func_dict[file_path.suffix](data, file_path)
def _save_txt(data, file_path):
"""Write the list into a txt file"""
if not isinstance(data, list):
data = [data]
with open(file_path, mode='w', encoding='utf8') as f:
f.write('\n'.join(data))
def _save_json(data, file_path):
with open(file_path, 'w', encoding='utf-8') as json_file:
json.dump(data, json_file, ensure_ascii=False, indent=4)
def show_img(imgs: np.ndarray, title='img'):
"""show img."""
color = (len(imgs.shape) == 3 and imgs.shape[-1] == 3)
imgs = np.expand_dims(imgs, axis=0)
for i, img in enumerate(imgs):
plt.figure()
plt.title('{}_{}'.format(title, i))
plt.imshow(img, cmap=None if color else 'gray')
plt.show()
def draw_bbox(img_path, result, color=(255, 0, 0), thickness=2):
"""draw bbox."""
if isinstance(img_path, str):
img_path = cv2.imread(img_path)
img_path = img_path.copy()
for point in result:
point = point.astype(int)
cv2.polylines(img_path, [point], True, color, thickness)
return img_path
def cal_text_score(texts, gt_texts, training_masks, running_metric_text, thred=0.5):
"""cal text score."""
training_masks = training_masks.data.cpu().numpy()
pred_text = texts.data.cpu().numpy() * training_masks
pred_text[pred_text <= thred] = 0
pred_text[pred_text > thred] = 1
pred_text = pred_text.astype(np.int32)
gt_text = gt_texts.data.cpu().numpy() * training_masks
gt_text = gt_text.astype(np.int32)
running_metric_text.update(gt_text, pred_text)
score_text, _ = running_metric_text.get_scores()
return score_text
def order_points_clockwise(pts):
"""order points clockwise."""
rect = np.zeros((4, 2), dtype="float32")
s = pts.sum(axis=1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
diff = np.diff(pts, axis=1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
return rect
def order_points_clockwise_list(pts):
"""order points clockwise list."""
pts = pts.tolist()
pts.sort(key=lambda x: (x[1], x[0]))
pts[:2] = sorted(pts[:2], key=lambda x: x[0])
pts[2:] = sorted(pts[2:], key=lambda x: -x[0])
pts = np.array(pts)
return pts
def get_datalist(train_data_path):
"""Get train data list and val data list"""
train_data = []
for p in train_data_path:
if os.path.isfile(p):
with open(p, 'r', encoding='utf-8') as f:
for line in f.readlines():
line = line.strip('\n').replace('.jpg ', '.jpg\t').split('\t')
if len(line) > 1:
img_path = pathlib.Path(expand_path(line[0].strip(' ')))
label_path = pathlib.Path(expand_path(line[1].strip(' ')))
if img_path.exists() and img_path.stat().st_size > 0 and label_path.exists() and label_path.stat().st_size > 0:
train_data.append((str(img_path), str(label_path)))
else:
img_dir = os.path.join(p, "img")
label_dir = os.path.join(p, "gt")
for img in os.listdir(img_dir):
img_file = os.path.join(img_dir, img)
label = "gt_" + img.split('.')[0] + ".txt"
label_file = os.path.join(label_dir, label)
assert os.path.exists(label_file), (
f"Cannot find label file for image: {img_file}"
)
train_data.append((img_file, label_file))
return sorted(train_data)
def get_datalist_uber(train_data_path):
"""Get uber train data list and val data list"""
train_data = []
for p in train_data_path:
if os.path.isfile(p):
with open(p, 'r', encoding='utf-8') as f:
for line in f.readlines():
line = line.strip('\n').replace('.jpg ', '.jpg\t').split('\t')
if len(line) > 1:
img_path = pathlib.Path(expand_path(line[0].strip(' ')))
label_path = pathlib.Path(expand_path(line[1].strip(' ')))
if img_path.exists() and img_path.stat().st_size > 0 and label_path.exists() and label_path.stat().st_size > 0:
train_data.append((str(img_path), str(label_path)))
else:
img_dir = os.path.join(p, "img")
label_dir = os.path.join(p, "gt")
for img in os.listdir(img_dir):
img_file = os.path.join(img_dir, img)
label = "truth_" + img.split('.')[0] + ".txt"
label_file = os.path.join(label_dir, label)
assert os.path.exists(label_file), (
f"Cannot find label file for image: {img_file}"
)
train_data.append((img_file, label_file))
return sorted(train_data)
def parse_config(config: dict) -> dict:
"""parse config."""
import anyconfig
base_file_list = config.pop('base')
base_config = {}
for base_file in base_file_list:
tmp_config = anyconfig.load(open(base_file, 'rb'))
if 'base' in tmp_config:
tmp_config = parse_config(tmp_config)
anyconfig.merge(tmp_config, base_config)
base_config = tmp_config
anyconfig.merge(base_config, config)
return base_config
def save_result(result_path, box_list, score_list, is_output_polygon):
"""save result."""
if is_output_polygon:
with open(result_path, 'wt') as res:
for i, box in enumerate(box_list):
box = box.reshape(-1).tolist()
result = ",".join([str(int(x)) for x in box])
score = score_list[i]
res.write(result + ',' + str(score) + "\n")
else:
with open(result_path, 'wt') as res:
for i, box in enumerate(box_list):
score = score_list[i]
box = box.reshape(-1).tolist()
result = ",".join([str(int(x)) for x in box])
res.write(result + ',' + str(score) + "\n")
def expand_polygon(polygon):
"""Expand bbox which has only one character."""
(x, y), (w, h), angle = cv2.minAreaRect(np.float32(polygon))
if angle < -45:
w, h = h, w
angle += 90
new_w = w + h
box = ((x, y), (new_w, h), angle)
points = cv2.boxPoints(box)
return order_points_clockwise(points)
def mkdir(path):
""" make directory if not existing """
if not os.path.exists(path):
os.makedirs(path)
def encrypt_pytorch(tmp_file_name, output_file_name, key):
"""Encrypt the pytorch model"""
with open(tmp_file_name, "rb") as open_temp_file, open(output_file_name,
"wb") as open_encoded_file:
encrypt_stream(
input_stream=open_temp_file, output_stream=open_encoded_file,
passphrase=key, encryption=True
)
def save_checkpoint(state, filename, key):
"""Save the checkpoint."""
handle, temp_name = tempfile.mkstemp(".tlt")
os.close(handle)
torch.save(state, temp_name)
encrypt_pytorch(temp_name, filename, key)
os.remove(temp_name)
def decrypt_pytorch(input_file_name, output_file_name, key):
"""Decrypt the TLT model to Pytorch model"""
with open(input_file_name, "rb") as open_temp_file, open(output_file_name,
"wb") as open_encoded_file:
decrypt_stream(
input_stream=open_temp_file, output_stream=open_encoded_file,
passphrase=key, encryption=True
)
def load_checkpoint(model_path, to_cpu=False):
"""Helper function to load a saved checkpoint."""
loc_type = torch.device('cpu') if to_cpu else None
loaded_state = torch.load(model_path, map_location=loc_type)
return loaded_state
def create_logger(log_file=None, rank=0, log_level=logging.INFO):
"""Create logger."""
logger = logging.getLogger(__name__)
logger.setLevel(log_level if rank == 0 else 'ERROR')
formatter = logging.Formatter('%(asctime)s %(levelname)5s %(message)s')
if log_file is not None:
file_handler = logging.FileHandler(filename=log_file)
file_handler.setLevel(log_level if rank == 0 else 'ERROR')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/utils/util.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Initialize."""
# flake8: noqa: F401, F403
from .util import *
from .metrics import *
from .common_utils import *
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for ocdnet."""
import os
import torch
import struct
import json
import numpy as np
import tempfile
from eff.codec import encrypt_stream
def encrypt_onnx(tmp_file_name, output_file_name, key):
"""Encrypt the onnx model."""
with open(tmp_file_name, "rb") as open_temp_file, open(output_file_name,
"wb") as open_encoded_file:
# set the input name magic number
open_encoded_file.write(struct.pack("<i", 0))
encrypt_stream(
input_stream=open_temp_file, output_stream=open_encoded_file,
passphrase=key, encryption=True
)
def encrypt_pytorch(tmp_file_name, output_file_name, key):
"""Encrypt the pytorch model"""
with open(tmp_file_name, "rb") as open_temp_file, open(output_file_name,
"wb") as open_encoded_file:
encrypt_stream(
input_stream=open_temp_file, output_stream=open_encoded_file,
passphrase=key, encryption=True
)
def save_checkpoint(state, filename, key):
"""Save the checkpoint."""
if False and 'optimizer_state' in state:
optimizer_state = state['optimizer_state']
state.pop('optimizer_state', None)
optimizer_filename = '{}_optim.pth'.format(filename)
torch.save({'optimizer_state': optimizer_state}, optimizer_filename)
handle, temp_name = tempfile.mkstemp(".tlt")
os.close(handle)
torch.save(state, temp_name)
encrypt_pytorch(temp_name, filename, key)
os.remove(temp_name)
def check_and_create(d):
"""Create a directory."""
if not os.path.isdir(d):
os.makedirs(d)
def load_json_from_file(file_path):
"""Load data from a JSON file."""
with open(file_path, 'r') as f:
data = json.load(f)
return data
def write_np_to_file(file_path, data):
"""Write Numpy array to file."""
np.save(file=file_path, arr=data, allow_pickle=False)
def data_to_device(data):
"""Transfer data to GPU."""
if isinstance(data, list):
cuda_data = []
for item in data:
cuda_item = item.cuda(non_blocking=True)
cuda_data.append(cuda_item)
else:
cuda_data = data.cuda(non_blocking=True)
return cuda_data
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/utils/common_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""get_metric module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/utils/ocr_metric/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""icdar2015 module."""
# flake8: noqa: F401
from .quad_metric import QuadMetric
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/utils/ocr_metric/icdar2015/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# **************************************************************************
# Modified from github (https://github.com/WenmuZhou/DBNet.pytorch)
# Copyright (c) WenmuZhou
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# https://github.com/WenmuZhou/DBNet.pytorch/blob/master/LICENSE.md
# **************************************************************************
"""quad_metric module."""
import numpy as np
from .detection.iou import DetectionIoUEvaluator
def get_metric(config):
"""get metric."""
try:
if 'args' not in config:
args = {}
else:
args = config['args']
if isinstance(args, dict):
cls = globals()[config['type']](**args)
else:
cls = globals()[config['type']](args)
return cls
except Exception:
return None
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
"""Initialize."""
self.reset()
def reset(self):
"""reset."""
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
"""update."""
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
return self
class QuadMetric():
"""QuadMetric class."""
def __init__(self, is_output_polygon=False):
"""Initialize."""
self.is_output_polygon = is_output_polygon
self.evaluator = DetectionIoUEvaluator(is_output_polygon=is_output_polygon)
def measure(self, batch, output, box_thresh=0.6):
"""Measure the quad metric
Args:
batch (dict): Produced by dataloaders. It is a dict of image, polygons and ignore_tags.
The image is a tensor with shape (N, C, H, W). The polygons is a tensor of
shape (N, K, 4, 2). The ignore_tags is a tensor of shape (N, K), indicates
whether a region is ignorable or not. The shape is the original shape of images.
The filename is the original filenames of images.
output: The prediction polygons and scores.
"""
results = []
gt_polyons_batch = batch['text_polys']
ignore_tags_batch = batch['ignore_tags']
pred_polygons_batch = np.array(output[0])
pred_scores_batch = np.array(output[1])
for polygons, pred_polygons, pred_scores, ignore_tags in zip(gt_polyons_batch, pred_polygons_batch, pred_scores_batch, ignore_tags_batch):
gt = [dict(points=np.int64(polygons[i]), ignore=ignore_tags[i]) for i in range(len(polygons))]
if self.is_output_polygon:
pred = [dict(points=pred_polygons[i]) for i in range(len(pred_polygons))]
else:
pred = []
for i in range(pred_polygons.shape[0]):
if pred_scores[i] >= box_thresh:
pred.append(dict(points=pred_polygons[i, :, :].astype(np.int)))
res = self.evaluator.evaluate_image(gt, pred)
results.append(res)
return results
def validate_measure(self, batch, output, box_thresh=0.6):
"""validate measure."""
return self.measure(batch, output, box_thresh)
def evaluate_measure(self, batch, output):
"""evaluate measure."""
return self.measure(batch, output), np.linspace(0, batch['image'].shape[0]).tolist()
def gather_measure(self, raw_metrics):
"""gather measure."""
raw_metrics = [image_metrics
for batch_metrics in raw_metrics
for image_metrics in batch_metrics]
result = self.evaluator.combine_results(raw_metrics)
precision = AverageMeter()
recall = AverageMeter()
hmean = AverageMeter()
precision.update(result['precision'], n=len(raw_metrics))
recall.update(result['recall'], n=len(raw_metrics))
hmean_score = 2 * precision.val * recall.val / (precision.val + recall.val + 1e-8)
hmean.update(hmean_score)
return {
'precision': precision,
'recall': recall,
'hmean': hmean
}
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/utils/ocr_metric/icdar2015/quad_metric.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""icdar2015 detection module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/utils/ocr_metric/icdar2015/detection/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# **************************************************************************
# Modified from github (https://github.com/WenmuZhou/DBNet.pytorch)
# Copyright (c) WenmuZhou
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# https://github.com/WenmuZhou/DBNet.pytorch/blob/master/LICENSE.md
# **************************************************************************
"""Detection IOU module."""
# pylint: disable=W0612,R1705
import numpy as np
from shapely.geometry import Polygon
import cv2
def iou_rotate(box_a, box_b, method='union'):
"""iou rotate."""
rect_a = cv2.minAreaRect(box_a)
rect_b = cv2.minAreaRect(box_b)
r1 = cv2.rotatedRectangleIntersection(rect_a, rect_b)
if r1[0] == 0:
return 0
else:
inter_area = cv2.contourArea(r1[1])
area_a = cv2.contourArea(box_a)
area_b = cv2.contourArea(box_b)
union_area = area_a + area_b - inter_area
if union_area == 0 or inter_area == 0:
return 0
if method == 'union':
iou = inter_area / union_area
elif method == 'intersection':
iou = inter_area / min(area_a, area_b)
else:
raise NotImplementedError
return iou
class DetectionIoUEvaluator(object):
"""Define the evaluator.
The evaluator will
- Iterate through the ground truth, save the ones which are valid and not_care.
- Iterate through the predicted polygon, save the ones which are valid and not_care.
- Calculate the number of valid ground truth and predicted polygons
- Calculate the number when ground truth and predicted polygons are matched
- Calculate recall, precision and hmean
"""
def __init__(self, is_output_polygon=False, iou_constraint=0.5, area_precision_constraint=0.5):
"""Initialize."""
self.is_output_polygon = is_output_polygon
self.iou_constraint = iou_constraint
self.area_precision_constraint = area_precision_constraint
def evaluate_image(self, gt, pred):
"""evaluate image."""
def get_union(pD, pG):
return Polygon(pD).union(Polygon(pG)).area
def get_intersection_over_union(pD, pG):
return get_intersection(pD, pG) / get_union(pD, pG)
def get_intersection(pD, pG):
return Polygon(pD).intersection(Polygon(pG)).area
def compute_ap(conf_list, match_list, num_gt_care):
correct = 0
AP = 0
if len(conf_list) > 0:
conf_list = np.array(conf_list)
match_list = np.array(match_list)
sorted_ind = np.argsort(-conf_list)
conf_list = conf_list[sorted_ind]
match_list = match_list[sorted_ind]
for n in range(len(conf_list)):
match = match_list[n]
if match:
correct += 1
AP += float(correct) / (n + 1)
if num_gt_care > 0:
AP /= num_gt_care
return AP
per_sample_metrics = {}
matched_sum = 0
num_globalcare_gt = 0
num_globalcare_det = 0
recall = 0
precision = 0
hmean = 0
det_matched = 0
iou_mat = np.empty([1, 1])
gt_pols = []
det_pols = []
gt_pol_points = []
det_pol_points = []
# Array of Ground Truth Polygons' keys marked as don't Care
gt_dontcare_pols_mum = []
# Array of Detected Polygons' matched with a don't Care GT
det_dontcare_pols_num = []
pairs = []
det_matched_nums = []
evaluation_log = ""
# Iterate through the ground truth
for n in range(len(gt)):
points = gt[n]['points']
dont_care = gt[n]['ignore']
if not Polygon(points).is_valid or not Polygon(points).is_simple:
continue
# Save the valid polygon
gt_pol = points
gt_pols.append(gt_pol)
gt_pol_points.append(points)
# Save the dont_care polygon
if dont_care:
gt_dontcare_pols_mum.append(len(gt_pols) - 1)
evaluation_log += "GT polygons: " + str(len(gt_pols)) + (" (" + str(len(
gt_dontcare_pols_mum)) + " don't care)\n" if len(gt_dontcare_pols_mum) > 0 else "\n")
# Iterate through the predicted polygons
for n in range(len(pred)):
points = pred[n]['points']
if not Polygon(points).is_valid or not Polygon(points).is_simple:
continue
# Save the valid polygon
det_pol = points
det_pols.append(det_pol)
det_pol_points.append(points)
if len(gt_dontcare_pols_mum) > 0:
# Iterate through the dont_care polygons, calculate the intersection against predicted polygon
for dontcare_pol in gt_dontcare_pols_mum:
# Find the dont_care polygon
dontcare_pol = gt_pols[dontcare_pol]
# Calculate the intersection between dont_care polygon and predicted polygon
intersected_area = get_intersection(dontcare_pol, det_pol)
# Calculate the area of predicted polygon
pd_dimensions = Polygon(det_pol).area
# Calcuate precision
precision = 0 if pd_dimensions == 0 else intersected_area / pd_dimensions
# Save the polygon number if precision is higher than the constraint
if (precision > self.area_precision_constraint):
det_dontcare_pols_num.append(len(det_pols) - 1)
break
evaluation_log += "DET polygons: " + str(len(det_pols)) + (" (" + str(len(
det_dontcare_pols_num)) + " don't care)\n" if len(det_dontcare_pols_num) > 0 else "\n")
# If both groud truth polygon and predicted polygon are valid and available
if len(gt_pols) > 0 and len(det_pols) > 0:
# Calculate IoU and precision matrixs
output_shape = [len(gt_pols), len(det_pols)]
iou_mat = np.empty(output_shape)
gt_rect_mat = np.zeros(len(gt_pols), np.int8)
det_rect_mat = np.zeros(len(det_pols), np.int8)
# Iterate through the ground truth and the predicted polygons, then calculate the IOU
for gt_num in range(len(gt_pols)):
for det_num in range(len(det_pols)):
pG = gt_pols[gt_num]
pD = det_pols[det_num]
iou_mat[gt_num, det_num] = get_intersection_over_union(pD, pG)
for gt_num in range(len(gt_pols)):
for det_num in range(len(det_pols)):
if gt_rect_mat[gt_num] == 0 and det_rect_mat[det_num] == 0 and gt_num not in gt_dontcare_pols_mum and det_num not in det_dontcare_pols_num:
# Check if ground truth and predicted polygons are matched, and save the number
if iou_mat[gt_num, det_num] > self.iou_constraint:
gt_rect_mat[gt_num] = 1
det_rect_mat[det_num] = 1
det_matched += 1
pairs.append({'gt': gt_num, 'det': det_num})
det_matched_nums.append(det_num)
evaluation_log += "Match GT #" + \
str(gt_num) + " with Det #" + str(det_num) + "\n"
# Calcuate number of valid ground truth and predicted polygons
num_gt_care = (len(gt_pols) - len(gt_dontcare_pols_mum))
num_det_care = (len(det_pols) - len(det_dontcare_pols_num))
# Calcuate recall, precision and hmean
if num_gt_care == 0:
recall = float(1)
precision = float(0) if num_det_care > 0 else float(1)
else:
recall = float(det_matched) / num_gt_care
precision = 0 if num_det_care == 0 else float(
det_matched) / num_det_care
hmean = 0 if (precision + recall) == 0 else 2.0 * \
precision * recall / (precision + recall)
matched_sum += det_matched
num_globalcare_gt += num_gt_care
num_globalcare_det += num_det_care
per_sample_metrics = {
'precision': precision,
'recall': recall,
'hmean': hmean,
'pairs': pairs,
'iou_mat': [] if len(det_pols) > 100 else iou_mat.tolist(),
'gt_pol_points': gt_pol_points,
'det_pol_points': det_pol_points,
'gt_care': num_gt_care,
'det_care': num_det_care,
'gt_dontcare': gt_dontcare_pols_mum,
'det_dontcare': det_dontcare_pols_num,
'det_matched': det_matched,
'evaluation_log': evaluation_log
}
return per_sample_metrics
def combine_results(self, results):
"""combine results."""
num_globalcare_gt = 0
num_globalcare_det = 0
matched_sum = 0
for result in results:
num_globalcare_gt += result['gt_care']
num_globalcare_det += result['det_care']
matched_sum += result['det_matched']
method_recall = 0 if num_globalcare_gt == 0 else float(
matched_sum) / num_globalcare_gt
method_precision = 0 if num_globalcare_det == 0 else float(
matched_sum) / num_globalcare_det
method_hmean = 0 if method_recall + method_precision == 0 else 2 * \
method_recall * method_precision / (method_recall + method_precision)
method_metrics = {'precision': method_precision,
'recall': method_recall, 'hmean': method_hmean}
return method_metrics
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/utils/ocr_metric/icdar2015/detection/iou.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Version string for the PyTorch pruning."""
__version__ = "0.2.7"
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/pruning/version.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Root module for PyTorch model pruning."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/pruning/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Importance module."""
# pylint: disable=R1710
import torch
import torch.nn as nn
from . import functional
class Importance:
"""Importance class."""
pass
def rescale(x):
"""Rescale."""
return (x - x.min()) / (x.max() - x.min())
class MagnitudeImportance(Importance):
"""Magnitude Importance."""
def __init__(self, p=1, local=False, reduction="mean"):
"""Initialize."""
self.p = p
self.local = local
self.reduction = reduction
@torch.no_grad()
def __call__(self, plan):
"""Call function."""
importance_mat = []
non_importance = True
for dep, idxs in plan:
layer = dep.target.module
prune_fn = dep.handler
if prune_fn in [
functional.prune_conv_out_channel,
functional.prune_linear_out_channel,
]:
w = (layer.weight)[idxs]
this_importance = torch.norm(torch.flatten(w, 1), dim=1, p=self.p)
importance_mat.append(this_importance)
non_importance = False
elif prune_fn in [
functional.prune_conv_in_channel,
functional.prune_linear_in_channel,
]:
w = (layer.weight)[:, idxs].transpose(0, 1)
w = torch.flatten(w, 1)
if w.shape[0] != importance_mat[0].shape[0]: # for conv-flatten-linear
if (
w.shape[0] % importance_mat[0].shape[0] != 0
): # TODO: support Group Convs
continue
w = w.view(
importance_mat[0].shape[0],
w.shape[0] // importance_mat[0].shape[0],
w.shape[1],
)
this_importance = torch.norm(torch.flatten(w, 1), dim=1, p=self.p)
importance_mat.append(this_importance)
non_importance = False
elif prune_fn == functional.prune_batchnorm:
if layer.affine is not None:
w = (layer.weight)[idxs].view(-1, 1)
this_importance = torch.norm(w, dim=1, p=self.p)
importance_mat.append(this_importance)
if self.local:
break
importance_mat = torch.stack(importance_mat, dim=0)
if non_importance:
return None
if self.reduction == "sum":
return importance_mat.sum(dim=0)
if self.reduction == "mean":
return importance_mat.mean(dim=0)
if self.reduction == "max":
return importance_mat.max(dim=0)[0]
if self.reduction == "min":
return importance_mat.min(dim=0)[0]
if self.reduction == "prod":
return importance_mat.prod(dim=0)
class RandomImportance(Importance):
"""Random Importance."""
@torch.no_grad()
def __call__(self, plan):
"""Call function."""
_, idxs = plan[0]
return torch.randn((len(idxs), ))
class SensitivityImportance(Importance):
"""Sensitivity Importance."""
def __init__(self, local=False, reduction="mean") -> None:
"""Initialize."""
self.local = local
self.reduction = reduction
def __call__(self, loss, plan):
"""Call function."""
loss.backward()
with torch.no_grad():
importance = 0
n_layers = 0
for dep, idxs in plan:
layer = dep.target.module
prune_fn = dep.handler
n_layers += 1
if prune_fn in [
functional.prune_conv_out_channel,
functional.prune_linear_in_channel,
]:
w_dw = (layer.weight * layer.weight.grad)[idxs]
importance += torch.norm(torch.flatten(w_dw, 1), dim=1)
if layer.bias:
w_dw = (layer.bias * layer.bias.grad)[idxs].view(-1, 1)
importance += torch.norm(w_dw, dim=1)
elif prune_fn in [
functional.prune_conv_in_channel,
functional.prune_linear_in_channel,
]:
w_dw = (layer.weight * layer.weight.grad)[:, idxs].transpose(0, 1)
importance += torch.norm(torch.flatten(w_dw, 1), dim=1)
elif prune_fn == functional.prune_batchnorm:
if layer.affine:
w_dw = (layer.weight * layer.weight.grad)[idxs].view(-1, 1)
importance += torch.norm(w_dw, dim=1)
w_dw = (layer.bias * layer.bias.grad)[idxs].view(-1, 1)
importance += torch.norm(w_dw, dim=1)
else:
n_layers -= 1
if self.local:
break
if self.reduction == "sum":
return importance
if self.reduction == "mean":
return importance / n_layers
class HessianImportance(Importance):
"""Hessian Importance."""
def __init__(self) -> None:
"""Initialize."""
pass
class BNScaleImportance(Importance):
"""BNScale Importance."""
def __init__(self, group_level=False, reduction='mean'):
"""Initialize."""
self.group_level = group_level
self.reduction = reduction
def __call__(self, plan):
"""Call function."""
importance_mat = []
for dep, _ in plan:
# Conv-BN
module = dep.target.module
if isinstance(module, nn.BatchNorm2d) and module.affine:
imp = torch.abs(module.weight.data)
importance_mat.append(imp)
if not self.group_level:
return imp
importance_mat = torch.stack(importance_mat, dim=0)
if self.reduction == "sum":
return importance_mat.sum(dim=0)
if self.reduction == "mean":
return importance_mat.mean(dim=0)
if self.reduction == "max":
return importance_mat.max(dim=0)[0]
if self.reduction == "min":
return importance_mat.min(dim=0)[0]
class StrcuturalImportance(Importance):
"""Strcutural Importance."""
def __init__(self, p=1, local=False, reduction="mean"):
"""Initialize."""
self.p = p
self.local = local
self.reduction = reduction
@torch.no_grad()
def __call__(self, plan):
"""Call function."""
importance_mat = []
non_importance = True
for dep, idxs in plan:
layer = dep.target.module
prune_fn = dep.handler
if prune_fn in [
functional.prune_conv_out_channel,
functional.prune_linear_out_channel,
]:
w = (layer.weight)[idxs]
this_importance = torch.norm(torch.flatten(w, 1), dim=1, p=self.p)
importance_mat.append(rescale(this_importance))
non_importance = False
elif prune_fn in [
functional.prune_conv_in_channel,
functional.prune_linear_in_channel,
]:
w = (layer.weight)[:, idxs].transpose(0, 1)
w = torch.flatten(w, 1)
if w.shape[0] != importance_mat[0].shape[0]: # for conv-flatten-linear
if (
w.shape[0] % importance_mat[0].shape[0] != 0
): # TODO: support Group Convs
continue
w = w.view(
importance_mat[0].shape[0],
w.shape[0] // importance_mat[0].shape[0],
w.shape[1],
)
this_importance = torch.norm(torch.flatten(w, 1), dim=1, p=self.p)
importance_mat.append(rescale(this_importance))
non_importance = False
elif prune_fn == functional.prune_batchnorm:
if layer.affine is not None:
w = (layer.weight)[idxs].view(-1, 1)
this_importance = torch.norm(w, dim=1, p=self.p)
importance_mat.append(rescale(this_importance))
if self.local:
break
importance_mat = torch.stack(importance_mat, dim=0)
if non_importance:
return None
if self.reduction == "sum":
return importance_mat.sum(dim=0)
if self.reduction == "mean":
return importance_mat.mean(dim=0)
if self.reduction == "max":
return importance_mat.max(dim=0)[0]
if self.reduction == "min":
return importance_mat.min(dim=0)[0]
if self.reduction == "prod":
return importance_mat.prod(dim=0)
class LAMPImportance(Importance):
"""LAMP Importance."""
def __init__(self, p=2, local=False, reduction="mean"):
"""Initialize."""
self.p = p
self.local = local
self.reduction = reduction
@torch.no_grad()
def __call__(self, plan):
"""Call function."""
importance = 0
n_layers = 0
non_importance = True
for dep, idxs in plan:
layer = dep.target.module
prune_fn = dep.handler
n_layers += 1
if prune_fn in [
functional.prune_conv_out_channel,
functional.prune_linear_out_channel,
]:
w = (layer.weight)[idxs]
this_importance = torch.norm(torch.flatten(w, 1), dim=1, p=self.p)
this_importance = rescale(this_importance)
importance += this_importance
non_importance = False
elif prune_fn in [
functional.prune_conv_in_channel,
functional.prune_linear_in_channel,
]:
w = (layer.weight)[:, idxs].transpose(0, 1)
w = torch.flatten(w, 1)
if w.shape[0] != importance.shape[0]: # for conv-flatten-linear
if (
w.shape[0] % importance.shape[0] != 0
): # TODO: support Group Convs
continue
w = w.view(
importance.shape[0],
w.shape[0] // importance.shape[0],
w.shape[1],
)
this_importance = torch.norm(torch.flatten(w, 1), dim=1, p=self.p)
this_importance = rescale(this_importance)
importance += this_importance
non_importance = False
elif prune_fn == functional.prune_batchnorm:
continue
if layer.affine is not None:
w = (layer.weight)[idxs].view(-1, 1)
importance += rescale(torch.norm(w, dim=1, p=self.p))
else:
n_layers -= 1
if self.local:
break
argsort_idx = torch.argsort(importance).tolist()[::-1] # [7, 5, 2, 3, 1, ...]
sorted_importance = importance[argsort_idx]
cumsum_importance = torch.cumsum(sorted_importance, dim=0)
sorted_importance = sorted_importance / cumsum_importance
inversed_idx = torch.arange(len(sorted_importance))[argsort_idx].tolist() # [0, 1, 2, 3, ..., ]
importance = sorted_importance[inversed_idx]
if non_importance:
return None
if self.reduction == "sum":
return importance
if self.reduction == "mean":
return importance / n_layers
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/pruning/torch_pruning/importance.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Init module."""
# flake8: noqa: F401, F403
from .dependency import *
from .functional import *
from . import metric, utils, strategy, helpers, importance, pruner
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/pruning/torch_pruning/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Strategy module."""
import torch
from abc import abstractclassmethod, ABC
from typing import Sequence
import random
def round_pruning_amount(total_parameters, n_to_prune, round_to):
"""round the parameter amount after pruning to an integer multiple of `round_to`."""
n_remain = round_to * max(int(total_parameters - n_to_prune) // round_to, 1)
return max(total_parameters - n_remain, 0)
class BaseStrategy(ABC):
"""Base Strategy class."""
def __call__(self, *args, **kwargs):
"""Use Base Strategy."""
return self.apply(*args, **kwargs)
@abstractclassmethod
def apply(cls, weights, amount=0.0, round_to=1) -> Sequence[int]:
""" Apply the strategy on weights with user specified pruning percentage.
Args:
amount (float): the percentage of weights to be pruned (amount<1.0) or the amount of weights to be pruned (amount>=1.0)
round_to (int): the number to which the number of pruned channels is rounded.
"""
raise NotImplementedError
class RandomStrategy(BaseStrategy):
"""Random Strategy class."""
def apply(self, weights, amount=0.0, round_to=1) -> Sequence[int]:
"""Return indices."""
if amount <= 0:
return []
n = len(weights)
n_to_prune = int(amount * n) if amount < 1.0 else amount
n_to_prune = round_pruning_amount(n, n_to_prune, round_to)
if n_to_prune == 0:
return []
indices = random.sample(list(range(n)), k=n_to_prune)
return indices
class LNStrategy(BaseStrategy):
"""LNStrategy class."""
def __init__(self, p):
"""Initialize."""
self.p = p
def apply(self, weights, amount=0.0, round_to=1) -> Sequence[int]:
"""Return indices."""
if amount <= 0:
return []
n = len(weights)
l1_norm = torch.norm(weights.view(n, -1), p=self.p, dim=1)
n_to_prune = int(amount * n) if amount < 1.0 else amount
n_to_prune = round_pruning_amount(n, n_to_prune, round_to)
if n_to_prune == 0:
return []
threshold = torch.kthvalue(l1_norm, k=n_to_prune).values
indices = torch.nonzero(l1_norm <= threshold).view(-1).tolist()
return indices
class L1Strategy(LNStrategy):
"""L1Strategy class."""
def __init__(self):
"""Initialize."""
super(L1Strategy, self).__init__(p=1)
class L2Strategy(LNStrategy):
"""L2Strategy class."""
def __init__(self):
"""Initialize."""
super(L2Strategy, self).__init__(p=2)
class GroupLNStrategy(ABC):
"""GroupLNStrategy class."""
def __call__(self, *args, **kwargs):
"""Call function."""
return self.apply(*args, **kwargs)
@abstractclassmethod
def apply(cls, group, amount=0.0, round_to=1) -> Sequence[int]:
""" Apply the strategy on weights with user specified pruning percentage.
Args:
amount (float): the percentage of weights to be pruned (amount<1.0) or the amount of weights to be pruned (amount>=1.0)
round_to (int): the number to which the number of pruned channels is rounded.
"""
for dep, idxs in cls._plans:
_, __ = dep.handler(dep.target.module, idxs, dry_run=True)
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/pruning/torch_pruning/strategy.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metric module."""
# pylint: disable=R1705
import torch
from .dependency import TORCH_CONV, TORCH_BATCHNORM, TORCH_PRELU, TORCH_LINEAR
def norm(weights, p=1, norm_dim=0, idxs=None, reduction='sum'):
"""Norm."""
l1_norm = torch.norm(weights.transpose(0, norm_dim).flatten(1), p=p, dim=1)
if idxs is not None:
l1_norm = l1_norm[idxs]
if reduction == 'sum':
return l1_norm.sum()
return l1_norm
class NormMetric:
"""Norm Metric class."""
def __init__(self, p, reduction='sum'):
"""Initilize."""
self.p = p
self.reduction = reduction
@torch.no_grad()
def __call__(self, layer, idxs):
"""Call function."""
if isinstance(layer, (TORCH_CONV, TORCH_LINEAR)):
weight_norm = norm(layer.weight, p=self.p, norm_dim=0, idxs=idxs, reduction=self.reduction)
if layer.bias is not None:
weight_norm += norm(layer.bias.unsqueeze(-1), p=self.p, norm_dim=0, idxs=idxs, reduction=self.reduction)
return weight_norm
elif isinstance(layer, TORCH_BATCHNORM):
if layer.weight is not None:
weight_norm = norm(layer.weight.unsqueeze(-1), p=self.p, norm_dim=0, idxs=idxs, reduction=self.reduction) \
+ norm(layer.bias.unsqueeze(-1), p=self.p, norm_dim=0, idxs=idxs, reduction=self.reduction)
else:
weight_norm = 0
return weight_norm
elif isinstance(layer, TORCH_PRELU):
if layer.num_parameters == 1:
return 0
else:
return norm(layer.weight.unsqueeze(-1), p=self.p, norm_dim=0, idxs=idxs, reduction=self.reduction)
else:
raise NotImplementedError()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/pruning/torch_pruning/metric.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility module."""
# pylint: disable=R1705
from .dependency import TORCH_CONV, TORCH_BATCHNORM, TORCH_PRELU, TORCH_LINEAR
import torch
import thop
def count_prunable_params_of_modules(module):
"""Count prunable params of moduls."""
if isinstance(module, (TORCH_CONV, TORCH_LINEAR)):
num_params = module.weight.numel()
if module.bias is not None:
num_params += module.bias.numel()
return num_params
elif isinstance(module, TORCH_BATCHNORM):
num_params = module.running_mean.numel() + module.running_var.numel()
if module.affine:
num_params += module.weight.numel() + module.bias.numel()
return num_params
elif isinstance(module, TORCH_PRELU):
if len(module.weight) == 1:
return 0
else:
return module.weight.numel
else:
return 0
def count_prunable_in_channels(module):
"""Count prunable in-channels."""
if isinstance(module, TORCH_CONV):
return module.weight.shape[1]
elif isinstance(module, TORCH_LINEAR):
return module.in_features
elif isinstance(module, TORCH_BATCHNORM):
return module.num_features
elif isinstance(module, TORCH_PRELU):
if len(module.weight) == 1:
return 0
else:
return len(module.weight)
else:
return 0
def count_prunable_out_channels(module):
"""Count prunable out-channels."""
if isinstance(module, TORCH_CONV):
return module.weight.shape[0]
elif isinstance(module, TORCH_LINEAR):
return module.out_features
elif isinstance(module, TORCH_BATCHNORM):
return module.num_features
elif isinstance(module, TORCH_PRELU):
if len(module.weight) == 1:
return 0
else:
return len(module.weight)
else:
return 0
def count_params(module):
"""Count params"""
return sum([p.numel() for p in module.parameters()])
def count_macs_and_params(model, input_size, example_inputs=None):
"""Count macs and params."""
if example_inputs is None:
example_inputs = torch.randn(*input_size)
macs, params = thop.profile(model, inputs=(example_inputs, ), verbose=False)
return macs, params
def count_total_prunable_channels(model):
"""Count total prunable channels."""
in_ch = 0
out_ch = 0
for m in model.modules():
out_ch += count_prunable_out_channels(m)
in_ch += count_prunable_in_channels(m)
return out_ch, in_ch
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/pruning/torch_pruning/utils.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.