python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entrypoint script for the Metric Learning Recognition task."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/metric_learning_recognition/entrypoint/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metric-learning recognition Model Module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/metric_learning_recognition/model/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main PTL model file for Metric Learning Recognition."""
from collections import defaultdict
import os
import pandas as pd
import numpy as np
import torch
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_only
from pytorch_metric_learning import losses, miners, testers
from pytorch_metric_learning.utils.accuracy_calculator import AccuracyCalculator
from nvidia_tao_pytorch.cv.metric_learning_recognition.model.build_nn_model import build_model
from nvidia_tao_pytorch.cv.metric_learning_recognition.dataloader.build_data_loader import build_dataloader
from nvidia_tao_pytorch.cv.re_identification.utils.scheduler import WarmupMultiStepLR
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
class MLRecogModel(pl.LightningModule):
"""PTL module for single stream Metric Learning Recognition. The training
process is to minimize the distances between the embeddings of the same class
and maximize the distances between the embeddings of different classes. The
validation process is to evaluate the similarity search performance of the
model on the validation reference and query datasets. The validation process
only supports running on a single GPU.
"""
def __init__(self, experiment_spec, results_dir, subtask="train"):
"""Initializes training for Metric Learning Recognition model.
Args:
experiment_spec (DictConfig): Configuration File
results_dir (String): Path to save results
subtask (String): The purpose of the model. Can be "train", "evaluate", "export", "inference" only
"""
super().__init__()
self.experiment_spec = experiment_spec
self.results_dir = results_dir
self.subtask = subtask
self.status_logging_dict = {"train_loss": 0.0,
"val Precision at Rank 1": 0.0}
if subtask == "train":
checkpoint = self.experiment_spec["train"]["resume_training_checkpoint_path"]
elif subtask in ("evaluate", "export", "inference"):
checkpoint = self.experiment_spec[subtask]["checkpoint"]
if checkpoint:
# Failure should always be caught before or after this warning
if not os.path.exists(checkpoint):
checkpoint_to_load = False
else:
checkpoint_to_load = True
else:
checkpoint_to_load = False
self._build_model(experiment_spec, checkpoint_to_load=checkpoint_to_load)
# Activates manual optimization
self.automatic_optimization = False
if self.subtask == "train":
status_logging.get_status_logger().write(
message="Preparing for training",
status_level=status_logging.Status.RUNNING)
self.my_loss_func = self.__make_loss(experiment_spec)
(self.train_loader, self.query_loader, self.gallery_loader,
self.dataset_dict) = build_dataloader(
cfg=self.experiment_spec,
mode="train")
self.class_dict = self.dataset_dict["query"].class_dict
self.load_tester()
def load_tester(self):
"""Loads a `pytorch_metric_learning.testers.GlobalTwoStreamEmbeddingSpaceTester` to prepare for gallery-query similarity search evaluation."""
# suppress end test print results
def end_test_hook(tester):
pass
self.tester = testers.GlobalEmbeddingSpaceTester(
batch_size=self.experiment_spec["train"]["val_batch_size"],
end_of_testing_hook=end_test_hook,
dataloader_num_workers=self.experiment_spec["dataset"]["workers"],
accuracy_calculator=AccuracyCalculator(
k="max_bin_count",
return_per_class=self.experiment_spec[self.subtask]["report_accuracy_per_class"]),
)
def _build_model(self, experiment_spec, checkpoint_to_load=False):
self.model = build_model(
experiment_spec,
checkpoint_to_load=checkpoint_to_load)
if self.subtask != "train":
self.model.eval()
def train_dataloader(self):
"""Builds the dataloader for training.
Returns:
train_loader (torch.utils.data.Dataloader): Traininig Data.
"""
return self.train_loader
def configure_optimizers(self):
"""Configure optimizers for training.
Returns:
optim_dict1 (Dict[String, Object]): a map for trunk's optimizer, monitor and lr scheduler
optim_dict2 ( Dict[String, Object]): a map for embedder's optimizer, monitor and lr scheduler
"""
self.train_config = self.experiment_spec["train"]
self.optim_config = self.train_config["optim"]
optimizers = self.__make_optimizer()
self.schedulers = {}
for k, opt in optimizers.items():
sch = WarmupMultiStepLR(
opt, self.optim_config["steps"],
gamma=self.optim_config["gamma"],
warmup_factor=self.optim_config["warmup_factor"],
warmup_iters=self.optim_config["warmup_iters"],
warmup_method=self.optim_config["warmup_method"],
last_epoch=self.current_epoch - 1,
)
self.schedulers[k] = sch
optim_dict1 = {
"optimizer": optimizers["trunk"],
'monitor': None,
"lr_scheduler": self.schedulers["trunk"]
}
optim_dict2 = {
"optimizer": optimizers["embedder"],
'monitor': None,
"lr_scheduler": self.schedulers["embedder"]
}
return (optim_dict1, optim_dict2)
def __make_module_optimizer(self, model_name):
if model_name == "embedder":
model = self.model.embedder
elif model_name == "trunk":
model = self.model.trunk
params = []
for key, value in model.named_parameters():
if not value.requires_grad:
continue
lr = self.optim_config[model_name]["base_lr"]
weight_decay = self.optim_config[model_name]["weight_decay"]
if "bias" in key:
lr = self.optim_config[model_name]["base_lr"] * self.optim_config[model_name]["bias_lr_factor"]
weight_decay = self.optim_config[model_name]["weight_decay_bias"]
params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}]
if self.optim_config["name"] == 'SGD':
optimizer = getattr(torch.optim, self.optim_config["name"])(params, momentum=self.optim_config[model_name]["momentum"])
else:
optimizer = getattr(torch.optim, self.optim_config["name"])(params)
return optimizer
def __make_optimizer(self):
embedder_optimizer = self.__make_module_optimizer("embedder")
trunk_optimizer = self.__make_module_optimizer("trunk")
optimizers = {
"embedder": embedder_optimizer,
"trunk": trunk_optimizer
}
return optimizers
def training_step(self, batch):
"""Training step.
Args:
batch (torch.Tensor): Batch of data
Returns:
loss (torch.float32): Loss value for each step in training
"""
data, labels = batch
data = data.float()
opt1, opt2 = self.optimizers()
self.optimizer_dict = {'trunk': opt1, "embedder": opt2}
self._zero_grad()
outputs = self.model(data)
loss = self.my_loss_func(outputs, labels)
self.log("train_loss", loss, on_step=True, on_epoch=True, prog_bar=True)
self.log("trunk_base_lr", self.schedulers["trunk"].get_lr()[0],
on_step=False, on_epoch=True, prog_bar=True, sync_dist=True)
self.log("embedder_base_lr", self.schedulers["embedder"].get_lr()[0],
on_step=False, on_epoch=True, prog_bar=True, sync_dist=True)
self.manual_backward(loss)
# clip gradients
for opt in self.optimizer_dict.values():
self.clip_gradients(
opt,
gradient_clip_val=self.experiment_spec['train']['clip_grad_norm'],
gradient_clip_algorithm="norm"
)
self._step_optimizers()
self.current_loss = loss
return loss
def get_query_accuracy(self):
"""Obtains the metric results of gallery-query similarity search evaluation.
Returns:
all_accuracies (Dict[str, float]): a map of default accuracy metrics from
`pytorch_metric_learning.utils.accuracy_calculator.AccuracyCalculator`.
Explanations see
https://kevinmusgrave.github.io/pytorch-metric-learning/accuracy_calculation/#explanations-of-the-default-accuracy-metrics
"""
if self.subtask == "train":
epoch_log = self.current_epoch
else:
epoch_log = "eval mode"
all_accuracies = self.tester.test(self.dataset_dict,
epoch_log, # used for logging
self.model, # your model
splits_to_eval=[('query', ['gallery'])]
)
return all_accuracies
def on_train_epoch_end(self):
"""Action on training epoch end."""
self._step_schedulers()
@rank_zero_only
def update_validation_metrics(self):
"""Updates the validation metrics at the end of each validation epoch."""
all_accuracies = self.get_query_accuracy()
ami = all_accuracies['query']['AMI_level0']
nmi = all_accuracies['query']['NMI_level0']
mean_avg_prec = all_accuracies['query']['mean_average_precision_level0']
mean_reciprocal_rank = all_accuracies['query']['mean_reciprocal_rank_level0']
mean_r_precision = all_accuracies['query']['r_precision_level0']
val_accuracy = all_accuracies['query']['precision_at_1_level0']
self.status_logging_dict['val_AMI'] = ami
self.status_logging_dict['val_NMI'] = nmi
if self.experiment_spec['train']['report_accuracy_per_class']:
self.status_logging_dict['val Mean Average Precision'] = sum(mean_avg_prec) / len(mean_avg_prec)
self.status_logging_dict['val Mean Reciprocal Rank'] = sum(mean_reciprocal_rank) / len(mean_reciprocal_rank)
self.status_logging_dict['val r-Precision'] = sum(mean_r_precision) / len(mean_r_precision)
self.status_logging_dict['val Precision at Rank 1'] = sum(val_accuracy) / len(val_accuracy)
else:
self.status_logging_dict['val Mean Average Precision'] = mean_avg_prec
self.status_logging_dict['val Mean Reciprocal Rank'] = mean_reciprocal_rank
self.status_logging_dict['val r-Precision'] = mean_r_precision
self.status_logging_dict['val Precision at Rank 1'] = val_accuracy
# print out validation results
print("============================================")
print(f"Validation results at epoch {self.current_epoch}:")
for k, v in self.status_logging_dict.items():
if "val" in k:
print(f"{k}: {v:.4f}")
if self.experiment_spec['train']['report_accuracy_per_class']:
print("\nValidation results per class:")
for k, v in all_accuracies['query'].items():
if "level" in k:
if isinstance(v, list):
print(f" {k[:-7]}:")
for i, vv in enumerate(v):
print(f" {self.class_dict[i]}: {vv:.4f}")
print("============================================")
def training_epoch_end(self, training_step_outputs):
"""Generates train and validation metrics and the end of training epoch.
training_step_outputs (List[Dict[str, torch.Tensor]]): List of outputs from training_step.
"""
average_train_loss = 0.0
for out in training_step_outputs:
average_train_loss += out['loss'].item()
average_train_loss /= len(training_step_outputs)
report_loss = np.around(average_train_loss, decimals=4)
report_trunk_lr = '{:.4e}'.format(self.schedulers['trunk'].get_lr()[0])
report_embedder_lr = '{:.4e}'.format(self.schedulers['embedder'].get_lr()[0])
self.status_logging_dict['train_loss'] = report_loss
self.status_logging_dict['trunk_base_lr'] = report_trunk_lr
self.status_logging_dict['embedder_base_lr'] = report_embedder_lr
# validation_epoch_end does not work here. Manually set it up.
# add one to match the checkpoint saving epochs
if (self.current_epoch + 1) % self.experiment_spec['train']['checkpoint_interval'] == 0:
self.update_validation_metrics()
# status loggings are rank zero only
status_logging.get_status_logger().kpi = self.status_logging_dict
status_logging.get_status_logger().write(
message="Train and eval metrics generated.",
status_level=status_logging.Status.RUNNING
)
def _step_optimizers(self):
for v in self.optimizer_dict.values():
v.step()
def _step_schedulers(self):
for v in self.schedulers.values():
v.step()
def _zero_grad(self):
self.model.zero_grad()
for v in self.optimizer_dict.values():
v.zero_grad()
def on_train_epoch_start(self):
"""Perform on start of every epoch."""
print('\n')
def forward(self, x):
"""Forward of the Metric Learning Recognition model.
Args:
x (torch.Tensor): Batch of data
Returns:
output (torch.Tensor): Output of the model (class score, feats)
"""
output = self.model(x)
return output
def __make_loss(self, cfg):
self.optim_config = cfg["train"]["optim"]
loss_func = losses.TripletMarginLoss(
margin=self.optim_config["triplet_loss_margin"],
smooth_loss=self.experiment_spec["train"]["smooth_loss"])
mining_func = miners.MultiSimilarityMiner(
epsilon=self.optim_config["miner_function_margin"])
def calculate_loss(embeddings, labels):
indices_tuple = mining_func(embeddings, labels)
metric_loss = loss_func(embeddings, labels, indices_tuple)
return metric_loss
return calculate_loss
def report_accuracies(self, acc_dict, save_results=False):
"""Converts the metrics results map to a pd.DataFrame table to display
top1 precisions of all classes.
Args:
acc_dict (Dict[str, float]): A map of metrics and results obtained from
`self.get_query_accuracies()`
save_results (Boolean): If True, the derived dataframe would be saved
to a csv file at output_dir/accuracy_per_class.csv
Returns:
df (pd.DataFrame): A table of top1 precision of all classes.
"""
output = defaultdict(dict)
count = 0
for idx in self.class_dict:
class_name = self.class_dict[idx]
if class_name in self.dataset_dict["query"].empty_classes:
output[class_name]['top1_acc'] = None
count += 1
else:
output[class_name]['top1_acc'] = \
acc_dict['query']['precision_at_1_level0'][idx - count]
df = pd.DataFrame.from_dict(output, orient='index')
if save_results:
df.to_csv(os.path.join(
self.results_dir,
"accuracy_per_class.csv"))
return df
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/metric_learning_recognition/model/pl_ml_recog_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The top model builder interface."""
import os
import torch
import torchvision.models as torch_model
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
class MLPSeq(torch.nn.Module):
"""This block implements a series of MLP layers given the input sizes."""
def __init__(self, layer_sizes, final_relu=False):
"""Initiates the sequential module of MLP layers.
Args:
layer_sizes (List[List]): a nested list of MLP layer sizes
final_relu (Boolean): if True, a ReLu activation layer is added after each MLP layer.
"""
super().__init__()
layer_list = []
layer_sizes = [int(x) for x in layer_sizes]
num_layers = len(layer_sizes) - 1
final_relu_layer = num_layers if final_relu else num_layers - 1
for i in range(len(layer_sizes) - 1):
input_size = layer_sizes[i]
curr_size = layer_sizes[i + 1]
if i < final_relu_layer:
layer_list.append(torch.nn.ReLU(inplace=True))
layer_list.append(torch.nn.Linear(input_size, curr_size))
self.net = torch.nn.Sequential(*layer_list)
self.last_linear = self.net[-1]
def forward(self, x):
"""Sequential MLP forward."""
return self.net(x)
class Baseline(torch.nn.Module):
"""Base model for Metric Learning Recognition model. The model consists of a
backbone (trunk) and a feature extractor (embedder). The backbone has a softmax
layer and it would be replaced by an identity layer.
"""
def __init__(self, trunk, embedder):
"""Initiates the joint modules of the backbone and feature extractors.
Args:
embedder (torch.Module): The MLP layers with embedding vector outputs
trunk (torch.Module): the backbone with fc layer removed
"""
super().__init__()
self.embedder = embedder
self.trunk = trunk
def forward(self, x):
"""Joint forward function for the backbone and the feature extractor."""
features_extracted = self.trunk(x)
output_embeds = self.embedder(features_extracted)
return output_embeds
def load_param(self, model_path):
"""Load paramaters for the model from a .pth format pretrained weights.
Args:
model_path (str): Model path.
"""
param_dict = torch.load(model_path)
if "state_dict" in param_dict:
param_dict = param_dict["state_dict"]
for i in param_dict:
if 'fc' in i:
continue
if ("net" in i) or ("bias" in i):
j = "embedder." + i
else:
j = "trunk." + i
if j in self.state_dict(destination=None).keys():
self.state_dict(destination=None)[j].copy_(param_dict[i])
def build_model(cfg, checkpoint_to_load=False):
"""Builds metric learning recognition model according to config. If
`checkpoint_to_load` is True, nothing would be returned as the model is already
loaded somewhere else. If `checkpoint_to_load` is False, the function would
do following things: first the model trunk and embedder would be initialized randomly.
if `model.pretrain_choice` is `imagenet`, the pretrained weights from
`Torch IMAGENET1K_V2` would be loaded to the trunk. If `model.pretrained_model_path`
is specified, the pretrained weights from the weights file would be loaded to
the trunk. If `model.pretrain_choice` is empty and `model.pretrained_model_path`
is not specified, the trunk would keep its random weights. Notice that the
embedder would not be loaded with pretrained weights in any case.
In the end, the embedder and trunk would be combined to a Baseline model and
the model would be returned.
Args:
cfg (DictConfig): Hydra config object.
checkpoint_to_load (Bool): If True, a checkpoint would be loaded after building the model so the pretrained weights should not be loaded.
Returns:
model (torch.Module): the Baseline torch module.
"""
status_logging.get_status_logger().write(
message="Constructing model graph...",
status_level=status_logging.Status.RUNNING)
model_configs = cfg["model"]
trunk_model = model_configs["backbone"]
embed_dim = model_configs["feat_dim"]
# torchvision model
load_weights = None
if not checkpoint_to_load and model_configs["pretrain_choice"] == "imagenet":
status_logging.get_status_logger().write(
message="Loading ImageNet pretrained weights to trunk...",
status_level=status_logging.Status.RUNNING)
if trunk_model == "resnet_50":
load_weights = torch_model.ResNet50_Weights.IMAGENET1K_V2
elif trunk_model == "resnet_101":
load_weights = torch_model.ResNet101_Weights.IMAGENET1K_V2
else:
error_mesage = "`model.backbone` only supports resnet_50 and resnet_101 at this moment."
status_logging.get_status_logger().write(
message=error_mesage,
status_level=status_logging.Status.FAILURE)
raise ValueError(error_mesage)
trunk = torch_model.__dict__[trunk_model.replace("_", "")](
weights=load_weights,
progress=False)
trunk_output_size = trunk.fc.in_features
trunk.fc = torch.nn.Identity()
embedder = MLPSeq([trunk_output_size, embed_dim])
model = Baseline(trunk=trunk, embedder=embedder)
if checkpoint_to_load:
status_logging.get_status_logger().write(
message="Skipped loading pretrained model as checkpoint is to load.",
status_level=status_logging.Status.SKIPPED)
else:
status_logging.get_status_logger().write(
message=f"Loading pretrained model to trunk: {model_configs['pretrained_model_path']}. Embedder pretrain weights loading is not supported now.",
status_level=status_logging.Status.RUNNING)
resume_ckpt = model_configs["pretrained_model_path"]
if resume_ckpt:
if not os.path.exists(resume_ckpt):
error_mesage = "`model.pretrained_model_path` file does not exist."
status_logging.get_status_logger().write(
message=error_mesage,
status_level=status_logging.Status.FAILURE)
raise ValueError(error_mesage)
model.load_param(resume_ckpt)
return model
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/metric_learning_recognition/model/build_nn_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inferencer."""
import os
import pandas as pd
import torch
import numpy as np
from tqdm import tqdm
from pytorch_metric_learning.utils.inference import InferenceModel
from nvidia_tao_pytorch.cv.metric_learning_recognition.utils.match_finder import EmbeddingKNN
from nvidia_tao_pytorch.cv.metric_learning_recognition.model.build_nn_model import build_model
from nvidia_tao_pytorch.cv.metric_learning_recognition.model.pl_ml_recog_model import MLRecogModel
from nvidia_tao_pytorch.cv.metric_learning_recognition.dataloader.transforms import build_transforms
from nvidia_tao_pytorch.cv.metric_learning_recognition.dataloader.build_data_loader import build_inference_dataloader
from nvidia_tao_pytorch.cv.metric_learning_recognition.dataloader.build_data_loader import build_dataloader
from nvidia_tao_pytorch.cv.metric_learning_recognition.utils.common_utils import no_folders_in
from nvidia_tao_pytorch.cv.re_identification.utils.common_utils import read_image
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
class Inferencer():
"""Pytorch model inferencer."""
def __init__(self, cfg, results_dir):
"""Inferencer for Metric Learning Recognition model. The inferencer would
load the model and process the dataset, and run inference on the inputs.
Three formats of inputs are supported: a single image, a folder of images,
a folder of classification dataset. The inferencer would return the predicted
class if the input is a single image, and return a csv table of predicted
classes if the input is a folder of images or a folder of classification
dataset.
During inference, the model would be loaded from the checkpoint specified
in the config. If no checkpoint is specified, the model would be initialized
randomly. The model would be loaded to the device specified in the config.
The model would be set to eval mode during inference. The reference dataset
would be loaded from the `dataset.val_dataset.reference` specified in the
config. The query dataset would be loaded from `inference.input_path` specified
in the config. The dataset would be processed with the non-train mode
transforms returned from the
`cv.metric_learning_recognition.dataloader.transforms.build_transforms`
function.
The reference and query embeddings would be generated from the model. The
K nearest neighbors of the query embeddings would be found from the reference
embeddings. The classes of the K nearest neighbors would be the returned.
If the input is a folder of images or a folder of classification dataset,
a csv table would be generated and it would also include the distances
of the query embeddings from the reference neighbors.
Args:
cfg (DictConfig): Hydra config object for inference task
results_dir (String): path to save the results
"""
device = "cuda" if torch.cuda.is_available() else "cpu"
if device == "cuda":
gpu_id = cfg["inference"]["gpu_id"]
torch.cuda.set_device(gpu_id)
self.experiment_spec = cfg
self.results_dir = results_dir
self.__load_model()
self.model.to(device)
_, _, _, self.dataset_dict = build_dataloader(cfg, mode="inference")
self.transforms = build_transforms(cfg, is_train=False)
self.__load_inferencer()
self.class_dict = self.dataset_dict["gallery"].class_dict
self.topk = cfg["inference"]["topk"]
def __load_model(self):
if not self.experiment_spec["inference"]["checkpoint"]:
self.model = build_model(self.experiment_spec, checkpoint_to_load=False)
status_logging.get_status_logger().write(
message="No weights loaded, model initialized randomly.",
status_level=status_logging.Status.SKIPPED)
else:
status_logging.get_status_logger().write(
message=f"Loading checkpoint: {self.experiment_spec['inference']['checkpoint']}",
status_level=status_logging.Status.STARTED
)
self.model = MLRecogModel.load_from_checkpoint(
self.experiment_spec["inference"]["checkpoint"],
map_location="cpu",
experiment_spec=self.experiment_spec,
results_dir=self.results_dir,
subtask="inference")
self.model.eval()
def __load_inferencer(self):
# TODO: reset before and after for better mem control?
infernce_knn_func = EmbeddingKNN(reset_before=False,
reset_after=False)
self.inference_model = InferenceModel(self.model,
knn_func=infernce_knn_func)
self.inference_model.train_knn(self.dataset_dict["gallery"])
def preprocess(self, image_path):
"""Preprocesses a single image file to inferencer.
Args:
image_path (str): path of an image file.
Returns:
image_tensor (torch.Tensor): image tensor with shape (1, C, W, H).
"""
image = read_image(image_path)
image_tensor = self.transforms(image)
image_tensor = image_tensor.unsqueeze(0)
return image_tensor
def infer_image(self):
"""Infers the class of a single image tensor.
Returns:
class_idx (int): the index of predicted class
"""
image = self.experiment_spec['inference']['input_path']
device = "cuda" if torch.cuda.is_available() else "cpu"
img = self.preprocess(image).to(device)
_, indices = self.inference_model.get_nearest_neighbors(img, k=self.topk)
class_indices = [self.dataset_dict["gallery"][i][1] for i in indices[0]]
# class_idx = Counter(class_indices).most_common(1)[0][0]
class_ids = [self.class_dict[i] for i in class_indices]
status_logging.get_status_logger().write(
message=f"input image: {image}, predicted top {self.topk} class names: {class_ids}",
status_level=status_logging.Status.SUCCESS)
return class_ids
def infer_image_dir(self):
"""Infers all images in an image folder or a classification folder.
Returns:
final_df (pd.DataFrame): a table displaying image file path,
top k predicted classes, topk distances
"""
inference_dataloader = build_inference_dataloader(
self.experiment_spec)
dfs = []
for img_batch in tqdm(inference_dataloader):
distances, indices = self.inference_model.get_nearest_neighbors(
img_batch[0], k=self.topk)
class_indices = [self.dataset_dict["gallery"][i][1] for i in indices.flatten()]
class_labels = np.array([self.class_dict[idx] for idx in
class_indices]).reshape(len(img_batch[0]), -1)
df = pd.DataFrame(zip(list(img_batch[1]), class_labels.tolist(), distances.tolist()))
dfs.append(df)
csv_f = os.path.join(self.results_dir, 'result.csv')
final_df = pd.concat(dfs)
final_df.to_csv(csv_f, header=False, index=False)
status_logging.get_status_logger().write(
message=f"result saved at {csv_f}",
status_level=status_logging.Status.SUCCESS)
return final_df
def infer(self):
"""Runs inference for files at `cfg.inference.input_path`.
Returns:
output (int / pd.DataFrame): If self.experiment_spec.inference.inference_input_type
is `image`,the output is a integer of the predicted class index
If self.experiment_spec.inference.inference_input_type is `xx_folder`
the output is a table displaying `file_name, topk predicted
classes, topk distances` of the examples in the folder line by line
"""
# check input
input_file = self.experiment_spec["inference"]["input_path"]
inference_input_type = self.experiment_spec["inference"]["inference_input_type"]
if os.path.isdir(input_file):
if (not no_folders_in(input_file)) and inference_input_type != "classification_folder":
raise ValueError("Folders detected in the dataset.query_dataset, The inference_input_type should be classification_folder")
if no_folders_in(input_file) and inference_input_type != "image_folder":
raise ValueError("No folders detected in the dataset.query_dataset, The inference_input_type should be image_folder")
elif inference_input_type != "image":
raise ValueError("The input is not a folder, try 'image' as the inference_input_type")
if inference_input_type == "image":
output = self.infer_image()
else:
output = self.infer_image_dir()
return output
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/metric_learning_recognition/inference/inferencer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metric-learning recognition inference module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/metric_learning_recognition/inference/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Original source taken from https://github.com/michuanhaohao/reid-strong-baseline
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Builds Transforms based on training and validation."""
import torchvision.transforms as T
from nvidia_tao_pytorch.cv.re_identification.dataloader.transforms import RandomErasing
def build_transforms(model_config, is_train=True):
"""Returns training or validation dataloader transforms. The transforms include
transferring the Image file to torch.Tensor, random crop, random horizontal flip,
color jitter, gaussian blur, normalization and random erasing.
Whether to use random rotation, color jitter and gaussian blur is specified
in the config. The mean and std of normalization is specified in the config.
The size of random crop is specified in the config. The probability of random
horizontal flip is specified in the config. The kernel size and sigma of
gaussian blur is specified in the config. The brightness, contrast, saturation
and hue of color jitter is specified in the config. The probability of random
erasing is specified in the config.
Args:
model_config (DictConfig): Configuration file
is_train (Boolean): True for training, False for Testing & Validation
Returns:
transform (torchvision.transforms.Compose): Image transform for traning, testing & validation data
"""
normalize_transform = T.Normalize(mean=model_config['dataset']['pixel_mean'],
std=model_config['dataset']['pixel_std'])
input_size = (model_config['model']['input_width'], model_config['model']['input_height'])
if is_train:
transforms = [
T.RandomResizedCrop(input_size),
T.RandomHorizontalFlip(p=model_config['dataset']['prob']),
]
if model_config['dataset']['random_rotation']:
transforms.append(T.RandomRotation(degrees=(0, 180), expand=False))
if model_config['dataset']['color_augmentation']['enabled'] is True:
color_aug_params = model_config['dataset']['color_augmentation']
transforms.append(T.ColorJitter(brightness=color_aug_params['brightness'],
contrast=color_aug_params['contrast'],
saturation=color_aug_params['saturation'],
hue=color_aug_params['hue']))
if model_config['dataset']['gaussian_blur']['enabled'] is True:
gauss_params = model_config['dataset']['gaussian_blur']
transforms.append(T.GaussianBlur(kernel_size=list(gauss_params['kernel']),
sigma=gauss_params['sigma']))
transforms += [
T.ToTensor(),
normalize_transform,
RandomErasing(probability=model_config['dataset']['re_prob'],
mean=model_config['dataset']['pixel_mean'])
]
transform = T.Compose(transforms)
else:
w, h = input_size
# takes 1/1.14 of the image after cropping
init_resize = (int(w * 1.14), int(h * 1.14))
transform = T.Compose([
T.Resize(init_resize),
T.CenterCrop(input_size),
T.ToTensor(),
normalize_transform
])
return transform
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/metric_learning_recognition/dataloader/transforms.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metric-learning recognition dataloader module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/metric_learning_recognition/dataloader/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build torch data loader."""
import yaml
from torch.utils.data import DataLoader
from pytorch_metric_learning import samplers
from nvidia_tao_pytorch.cv.metric_learning_recognition.dataloader.transforms import build_transforms
from nvidia_tao_pytorch.cv.metric_learning_recognition.dataloader.datasets.inference_datasets import InferenceImageFolder, InferenceImageDataset
from nvidia_tao_pytorch.cv.metric_learning_recognition.dataloader.datasets.image_datasets import MetricLearnImageFolder
def build_dataloader(cfg, mode="train"):
"""Build torch dataloader.
Args:
cfg (DictConfig): Hydra config object
is_train (Boolean): True for train tasks. False for evaluation tasks
is_inference (Boolean): True for inference tasks. False for non-inference
tasks.Cannot be true the same time when is_train = True
Returns:
train_loader (Dataloader): Train dataloader
query_loader (Dataloader): Val dataloader, used for query jobs in validation or test
gallery_loader (Dataloader): Val dataloader, used for reference job in validation or test
dataset_dict (Dict): a dictionary of train, query and gallery datasets
"""
assert mode in ["train", "eval", "inference"], "mode can only be train, eval or inference"
dataset_configs = cfg["dataset"]
val_transforms = build_transforms(cfg, is_train=False)
num_workers = dataset_configs["workers"]
train_loader, query_loader, gallery_loader = None, None, None
train_dataset = None
query_dataset = None
class_mapping = None
if dataset_configs["class_map"]:
with open(dataset_configs["class_map"], "r") as f:
class_mapping = yaml.load(f, Loader=yaml.FullLoader)
if mode == "train":
train_transforms = build_transforms(cfg, is_train=True)
train_dataset = MetricLearnImageFolder(dataset_configs['train_dataset'],
transform=train_transforms,
class_mapping=class_mapping)
sampler = samplers.MPerClassSampler(train_dataset.targets,
m=cfg["dataset"]["num_instance"],
batch_size=cfg["train"]["batch_size"],
length_before_new_iter=len(train_dataset))
train_loader = DataLoader(
train_dataset, batch_size=cfg["train"]["batch_size"],
sampler=sampler,
num_workers=num_workers
)
val_batch_size = cfg["train"]["val_batch_size"]
elif mode == "inference":
val_batch_size = cfg["inference"]["batch_size"]
elif mode == "eval":
val_batch_size = cfg["evaluate"]["batch_size"]
gallery_dataset = MetricLearnImageFolder(dataset_configs["val_dataset"]["reference"],
transform=val_transforms,
class_mapping=class_mapping)
gallery_loader = DataLoader(
gallery_dataset, batch_size=val_batch_size,
shuffle=False, num_workers=num_workers
)
if mode in ("eval", "train"):
# inference mode has query folder as inference.input_path
query_dataset = MetricLearnImageFolder(dataset_configs["val_dataset"]["query"],
transform=val_transforms,
class_to_idx=gallery_dataset.class_to_idx,
classes=gallery_dataset.classes,
class_mapping=class_mapping)
query_loader = DataLoader(
query_dataset, batch_size=val_batch_size,
shuffle=False, num_workers=num_workers
)
dataset_dict = {
"train": train_dataset,
"query": query_dataset,
"gallery": gallery_dataset
}
return train_loader, query_loader, gallery_loader, dataset_dict
def build_inference_dataloader(cfg):
"""Create a dataloader for inference task.
Args:
cfg (DictConfig): Hydra config object
Returns:
dataloader (InferenceImageFolder / InferenceImageDataset): If
cfg.inference.input_path is a classification folder and
cfg.inference.inference_input_type is correctly marked as
`classification_folder`, it returns a InferenceImageFolder. If
cfg.inference.input_path is a folder of images and
cfg.inference.inference_input_type is correctly marked as `image_folder`,
it returns a InferenceImageDataset
"""
image_folder_type = cfg["inference"]["inference_input_type"]
if image_folder_type == "classification_folder":
dataset_builder = InferenceImageFolder
elif image_folder_type == "image_folder":
dataset_builder = InferenceImageDataset
val_transform = build_transforms(cfg, is_train=False)
inference_dataset = dataset_builder(
cfg['inference']['input_path'],
transform=val_transform)
dataloader = DataLoader(inference_dataset,
cfg['inference']['batch_size'],
shuffle=False)
return dataloader
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/metric_learning_recognition/dataloader/build_data_loader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset for Metric Learning Recognition inference."""
import os
import torch
from torchvision import transforms
from nvidia_tao_pytorch.cv.metric_learning_recognition.dataloader.datasets.image_datasets import MetricLearnImageFolder
from nvidia_tao_pytorch.cv.re_identification.utils.common_utils import read_image
VALID_IMAGE_EXT = ['.jpg', '.jpeg', '.png']
class InferenceImageFolder(MetricLearnImageFolder):
"""This class inherits from :class:`MetricLearnImageFolder`. It prepares the
data loader from the a classification dataset folder.
In __getitem__ instead of returning image tensor and the target, it returns
image tensor and the image path.
"""
def __getitem__(self, index):
"""Retrieves the (image, image path) from data index.
Args:
index (int): Index of the data to retrieve
Returns:
sample (torch.Tensor): the image tensor
path (String): the directory of the image file
"""
path, _ = self.samples[index]
sample = self.loader(path)
if self.transform:
sample = self.transform(sample)
return sample, path
class InferenceImageDataset(torch.utils.data.Dataset):
"""This class inherits from :class:`torch.utils.data.Dataset`. It prepares
data loader from an image folder.
In __getitem__, it returns image tensor and the image path.
"""
def __init__(self, image_folder, transform=None):
"""Initiates Dataset for inference image folder input.
Args:
image_folder(String): path of image folder
transform(torchvision.transorms.Compose): the composed transforms
"""
self.paths = [os.path.join(image_folder, imgname)
for imgname in sorted(os.listdir(image_folder))
if os.path.splitext(imgname)[1].lower()
in VALID_IMAGE_EXT]
self.transform = transform
def __len__(self):
"""Gets the length of datasets"""
return len(self.paths)
def __getitem__(self, index):
"""Retrieves the (image, image path) from data index."""
image = read_image(self.paths[index])
if self.transform is not None:
image = self.transform(image)
else:
image = transforms.ToTensor()(image)
return image, self.paths[index]
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/metric_learning_recognition/dataloader/datasets/inference_datasets.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Original source of `_make_dataset` function taken from https://github.com/pytorch/vision/blob/31a4ef9f815a86a924d0faa7709e091b5118f00d/torchvision/datasets/folder.py#L48
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image Dataset for Metric Learning Recognition model training."""
import os
import logging
from typing import Callable, cast, Dict, List, Optional, Tuple
from torchvision.datasets.folder import (has_file_allowed_extension, find_classes,
default_loader, IMG_EXTENSIONS, Any)
from torchvision.datasets import DatasetFolder
def _make_dataset(
directory: str,
class_to_idx: Optional[Dict[str, int]] = None,
extensions: Optional[Tuple[str, ...]] = None,
is_valid_file: Optional[Callable[[str], bool]] = None,
return_empty_class: Optional[bool] = False) -> List[Tuple[str, int]]:
directory = os.path.expanduser(directory)
if class_to_idx is None:
_, class_to_idx = find_classes(directory)
elif not class_to_idx:
raise ValueError("'class_to_index' must have at least one entry to collect any samples.")
both_none = extensions is None and is_valid_file is None
both_something = extensions is not None and is_valid_file is not None
if both_none or both_something:
raise ValueError("Both extensions and is_valid_file cannot be None or not None at the same time")
if extensions is not None:
def is_valid_file_func(x: str) -> bool:
return has_file_allowed_extension(x, cast(Tuple[str, ...], extensions))
is_valid_file = is_valid_file_func
is_valid_file = cast(Callable[[str], bool], is_valid_file)
instances = []
available_classes = set()
for target_class in sorted(class_to_idx.keys()):
class_index = class_to_idx[target_class]
target_dir = os.path.join(directory, target_class)
if not os.path.isdir(target_dir):
continue
for root, _, fnames in sorted(os.walk(target_dir, followlinks=True)):
for fname in sorted(fnames):
path = os.path.join(root, fname)
if is_valid_file(path):
item = path, class_index
instances.append(item)
if target_class not in available_classes:
available_classes.add(target_class)
# replace empty class error with warning
empty_classes = set(class_to_idx.keys()) - available_classes
if empty_classes:
empty_class_report = ""
for clas in empty_classes:
empty_class_report += clas + " "
logging.warning(f"Empty classes detected in {directory}: {empty_class_report}")
if return_empty_class:
return instances, empty_classes
return instances
class MetricLearnImageFolder(DatasetFolder):
"""This class inherits from :class:`torchvision.datasets.DatasetFolder`.
The functions are similar to `torchvision.datasets.ImageFolder` that it
creates a dataloader from a classification dataset input, except
that it allows the existance of empty class folders. Users can also assign
custom `classes` and `class_to_idx` to the class.
"""
def __init__(
self,
root: str,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
loader: Callable[[str], Any] = default_loader,
is_valid_file: Optional[Callable[[str], bool]] = None,
class_to_idx=None,
classes=None,
class_mapping=None,
):
"""Intiates Dataset for image folder input.
Args:
root (String): Root directory path.
transform (Callable, Optional): A function/transform that takes in an PIL image and returns a transformed version. E.g, transforms.RandomCrop
target_transform (Callable, Optional): A function/transform that takes in the target and transforms it.
loader (Callable, Optional): A function to load an image given its path.
is_valid_file (Boolean, Optional): A function that takes path of an Image file and check if the file is a valid file (used to check of corrupt files)
class_to_idx (Dict[str, int], Optional): Dictionary mapping each class to an index.
classes (List[str], Optional): List of all classes.
class_mapping (Dict[str, str], Optional): Dictionary mapping each class to a new class name.
"""
super(DatasetFolder, self).__init__(root=root,
transform=transform,
target_transform=target_transform)
self.loader = loader
self.extensions = IMG_EXTENSIONS if is_valid_file is None else None
default_classes, default_class_to_idx = self.find_classes(self.root)
# check class assigned
if classes:
class_set = set(classes)
for clas in default_classes:
if clas not in class_set:
raise ValueError("The image folder classes should be a subset of the assigned classes.")
if class_to_idx:
for clas in default_class_to_idx:
if clas not in class_to_idx:
raise ValueError("The image folder classes should be a subset of the assigned classes.")
else:
classes = default_classes
class_to_idx = default_class_to_idx
samples, empty_classes = self.make_dataset(self.root, class_to_idx,
self.extensions, is_valid_file)
self.empty_classes = empty_classes
self.classes = classes
self.samples = samples
self.class_to_idx = class_to_idx
self.targets = [s[1] for s in self.samples]
self.imgs = self.samples
if class_mapping:
# check class mapping dict first:
for class_name in class_mapping:
if class_name not in self.class_to_idx:
raise ValueError(f"Class {class_name} is not in the dataset.")
for class_name in self.class_to_idx:
if class_name not in class_mapping:
raise ValueError(f"Class {class_name} is not in the class mapping dict.")
self.class_dict = {self.class_to_idx[k]: class_mapping[k] for k in self.class_to_idx}
else:
self.class_dict = {self.class_to_idx[k]: k for k in self.class_to_idx}
@staticmethod
def make_dataset(
directory: str,
class_to_idx: Dict[str, int],
extensions: Optional[Tuple[str, ...]] = None,
is_valid_file: Optional[Callable[[str], bool]] = None,
) -> List[Tuple[str, int]]:
"""Generates a list of samples of a form (path_to_sample, class).
This can be overridden to e.g. read files from a compressed zip file instead of from the disk.
Args:
directory (String): root dataset directory, corresponding to ``self.root``.
class_to_idx (Dict[str, int]): Dictionary mapping class name to class index.
extensions (Optional): A list of allowed extensions.
Either extensions or is_valid_file should be passed. Defaults to None.
is_valid_file (Optional): A function that takes path of a file
and checks if the file is a valid file
(used to check of corrupt files) both extensions and
is_valid_file should not be passed. Defaults to None.
Raises:
ValueError: In case ``class_to_idx`` is empty.
ValueError: In case ``extensions`` and ``is_valid_file`` are None or both are not None.
FileNotFoundError: In case no valid file was found for any class.
Returns:
List[Tuple[str, int]]: samples of a form (path_to_sample, class)
"""
if class_to_idx is None:
# prevent potential bug since make_dataset() would use the class_to_idx logic of the
# find_classes() function, instead of using that of the find_classes() method, which
# is potentially overridden and thus could have a different logic.
raise ValueError(
"The class_to_idx parameter cannot be None."
)
return _make_dataset(directory, class_to_idx,
extensions=extensions,
is_valid_file=is_valid_file,
return_empty_class=True)
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/metric_learning_recognition/dataloader/datasets/image_datasets.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metric-learning recognition datasets module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/metric_learning_recognition/dataloader/datasets/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DINO module."""
# Temporarily override torch versioning from DLFW so that we disable warning from fairscale
# about torch version during ddp_sharded training. Fairscale doesn't handle commit versions well
# E.g. 1.13.0a0+d0d6b1f
import torch
import re
numbering = re.search(r"^(\d+).(\d+).(\d+)([^\+]*)(\+\S*)?$", torch.__version__)
torch.__version__ = ".".join([str(numbering.group(n)) for n in range(1, 4)])
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/dino/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DINO config module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/dino/config/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default config file."""
from typing import Optional, List, Dict
from dataclasses import dataclass, field
from omegaconf import MISSING
@dataclass
class DINODatasetConvertConfig:
"""Dataset Convert config."""
input_source: Optional[str] = None
data_root: Optional[str] = None
results_dir: str = MISSING
image_dir_name: Optional[str] = None
label_dir_name: Optional[str] = None
val_split: int = 0
num_shards: int = 20
num_partitions: int = 1
partition_mode: Optional[str] = None
image_extension: str = ".jpg"
mapping_path: Optional[str] = None
@dataclass
class DINOAugmentationConfig:
"""Augmentation config."""
scales: List[int] = field(default_factory=lambda: [480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800],
metadata={"description": "Random Scales for Augmentation"})
input_mean: List[float] = field(default_factory=lambda: [0.485, 0.456, 0.406],
metadata={"description": "Pixel mean value"})
input_std: List[float] = field(default_factory=lambda: [0.229, 0.224, 0.225],
metadata={"description": "Pixel Standard deviation value"})
train_random_resize: List[int] = field(default_factory=lambda: [400, 500, 600],
metadata={"description": "Training Randome Resize"})
horizontal_flip_prob: float = 0.5
train_random_crop_min: int = 384
train_random_crop_max: int = 600
random_resize_max_size: int = 1333
test_random_resize: int = 800
fixed_padding: bool = True
@dataclass
class DINODatasetConfig:
"""Dataset config."""
train_sampler: str = "default_sampler"
train_data_sources: Optional[List[Dict[str, str]]] = None
val_data_sources: Optional[List[Dict[str, str]]] = None
test_data_sources: Optional[Dict[str, str]] = None
infer_data_sources: Optional[Dict[str, str]] = None
batch_size: int = 4
workers: int = 8
pin_memory: bool = True
dataset_type: str = "serialized"
num_classes: int = 91
eval_class_ids: Optional[List[int]] = None
augmentation: DINOAugmentationConfig = DINOAugmentationConfig()
@dataclass
class DINOModelConfig:
"""DINO model config."""
pretrained_backbone_path: Optional[str] = None
backbone: str = "resnet_50"
num_queries: int = 300
num_feature_levels: int = 4
cls_loss_coef: float = 2.0
bbox_loss_coef: float = 5.0
giou_loss_coef: float = 2.0
# DINO training specific
interm_loss_coef: float = 1.0
num_select: int = 300
no_interm_box_loss: bool = False
# DINO model arch specific
pre_norm: bool = False # Add layer norm in encoder or not
two_stage_type: str = 'standard'
decoder_sa_type: str = 'sa'
embed_init_tgt: bool = True
fix_refpoints_hw: int = -1
pe_temperatureH: int = 20
pe_temperatureW: int = 20
return_interm_indices: List[int] = field(default_factory=lambda: [1, 2, 3, 4],
metadata={"description": "Indices to return from backbone"})
# for DN
use_dn: bool = True
dn_number: int = 100
dn_box_noise_scale: float = 1.0
dn_label_noise_ratio: float = 0.5
focal_alpha: float = 0.25
clip_max_norm: float = 0.1
dropout_ratio: float = 0.0
hidden_dim: int = 256
nheads: int = 8
enc_layers: int = 6
dec_layers: int = 6
dim_feedforward: int = 2048
dec_n_points: int = 4
enc_n_points: int = 4
aux_loss: bool = True
dilation: bool = False
train_backbone: bool = True
loss_types: List[str] = field(default_factory=lambda: ['labels', 'boxes'],
metadata={"description": "Losses to be used during training"})
backbone_names: List[str] = field(default_factory=lambda: ["backbone.0"],
metadata={"description": "Backbone name"})
linear_proj_names: List[str] = field(default_factory=lambda: ['reference_points', 'sampling_offsets'],
metadata={"description": "Linear Projection names"})
@dataclass
class OptimConfig:
"""Optimizer config."""
optimizer: str = "AdamW"
monitor_name: str = "val_loss" # {val_loss, train_loss}
lr: float = 2e-4
lr_backbone: float = 2e-5
lr_linear_proj_mult: float = 0.1
momentum: float = 0.9
weight_decay: float = 1e-4
lr_scheduler: str = "MultiStep"
lr_steps: List[int] = field(default_factory=lambda: [11], # 11, 20, 30
metadata={"description": "learning rate decay steps"})
lr_step_size: int = 11
lr_decay: float = 0.1
@dataclass
class DINOTrainExpConfig:
"""Train experiment config."""
num_gpus: int = 1
num_nodes: int = 1
resume_training_checkpoint_path: Optional[str] = None
pretrained_model_path: Optional[str] = None
validation_interval: int = 1
clip_grad_norm: float = 0.1
is_dry_run: bool = False
conf_threshold: float = 0.0
results_dir: Optional[str] = None
num_epochs: int = 12 # 12, 24, 36
checkpoint_interval: int = 1
optim: OptimConfig = OptimConfig()
precision: str = "fp32"
distributed_strategy: str = "ddp"
activation_checkpoint: bool = True
@dataclass
class DINOInferenceExpConfig:
"""Inference experiment config."""
num_gpus: int = 1
results_dir: Optional[str] = None
checkpoint: Optional[str] = None
trt_engine: Optional[str] = None
color_map: Dict[str, str] = MISSING
conf_threshold: float = 0.5
is_internal: bool = False
input_width: Optional[int] = None
input_height: Optional[int] = None
@dataclass
class DINOEvalExpConfig:
"""Evaluation experiment config."""
num_gpus: int = 1
results_dir: Optional[str] = None
input_width: Optional[int] = None
input_height: Optional[int] = None
checkpoint: Optional[str] = None
trt_engine: Optional[str] = None
conf_threshold: float = 0.0
@dataclass
class CalibrationConfig:
"""Calibration config."""
cal_image_dir: List[str] = MISSING
cal_cache_file: str = MISSING
cal_batch_size: int = 1
cal_batches: int = 1
@dataclass
class TrtConfig:
"""Trt config."""
data_type: str = "FP32"
workspace_size: int = 1024
min_batch_size: int = 1
opt_batch_size: int = 1
max_batch_size: int = 1
calibration: CalibrationConfig = CalibrationConfig()
@dataclass
class DINOExportExpConfig:
"""Export experiment config."""
results_dir: Optional[str] = None
gpu_id: int = 0
checkpoint: str = MISSING
onnx_file: str = MISSING
on_cpu: bool = False
input_channel: int = 3
input_width: int = 960
input_height: int = 544
opset_version: int = 12
batch_size: int = -1
verbose: bool = False
@dataclass
class DINOGenTrtEngineExpConfig:
"""Gen TRT Engine experiment config."""
results_dir: Optional[str] = None
gpu_id: int = 0
onnx_file: str = MISSING
trt_engine: Optional[str] = None
input_channel: int = 3
input_width: int = 960
input_height: int = 544
opset_version: int = 12
batch_size: int = -1
verbose: bool = False
tensorrt: TrtConfig = TrtConfig()
@dataclass
class ExperimentConfig:
"""Experiment config."""
model: DINOModelConfig = DINOModelConfig()
dataset: DINODatasetConfig = DINODatasetConfig()
train: DINOTrainExpConfig = DINOTrainExpConfig()
evaluate: DINOEvalExpConfig = DINOEvalExpConfig()
inference: DINOInferenceExpConfig = DINOInferenceExpConfig()
export: DINOExportExpConfig = DINOExportExpConfig()
gen_trt_engine: DINOGenTrtEngineExpConfig = DINOGenTrtEngineExpConfig()
encryption_key: Optional[str] = None
results_dir: str = MISSING
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/dino/config/default_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Generates TRT compatible DDETR onnx model. """
import torch
from torch.onnx import register_custom_op_symbolic
import onnx
import numpy as np
import onnx_graphsurgeon as gs
# register plugin
def nvidia_msda(g, value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights):
"""Returns nvidia_msda."""
return g.op("nvidia::MultiscaleDeformableAttnPlugin_TRT", value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights)
class ONNXExporter(object):
"""Onnx Exporter"""
@classmethod
def setUpClass(cls):
"""SetUpclass to set the manual seed for reproduceability"""
torch.manual_seed(123)
def export_model(self, model, batch_size, onnx_file, dummy_input, do_constant_folding=False, opset_version=12,
output_names=None, input_names=None, verbose=False):
""" Export_model.
The do_constant_folding = False avoids MultiscaleDeformableAttnPlugin_TRT error (tensors on 2 devices) when torch > 1.9.0.
However, it would cause tensorrt 8.0.3.4 (nvcr.io/nvidia/pytorch:21.11-py3 env) reports clip node error.
This error is fixed in tensorrt >= 8.2.1.8 (nvcr.io/nvidia/tensorrt:22.01-py3).
Args:
model (nn.Module): torch model to export.
batch_size (int): batch size of the ONNX model. -1 means dynamic batch size.
onnx_file (str): output path of the onnx file.
dummy_input (torch.Tensor): input tensor.
do_constant_folding (bool): flag to indicate whether to fold constants in the ONNX model.
opset_version (int): opset_version of the ONNX file.
output_names (str): output names of the ONNX file.
input_names (str): input names of the ONNX file.
verbose (bool): verbosity level.
"""
if batch_size is None or batch_size == -1:
dynamic_axes = {"inputs": {0: "batch"}, "pred_logits": {0: "batch"}, "pred_boxes": {0: "batch"}}
else:
dynamic_axes = None
# CPU version requires opset_version > 16
if not next(model.parameters()).is_cuda and opset_version < 16:
print(f"CPU version of Deformable MHA requires opset version larger than 16. Overriding provided opset {opset_version} to 16.")
opset_version = 16
register_custom_op_symbolic('nvidia::MultiscaleDeformableAttnPlugin_TRT', nvidia_msda, opset_version)
torch.onnx.export(model, dummy_input, onnx_file,
input_names=input_names, output_names=output_names, export_params=True,
training=torch.onnx.TrainingMode.EVAL, opset_version=opset_version, do_constant_folding=do_constant_folding,
custom_opsets={"nvidia": 1}, verbose=verbose, dynamic_axes=dynamic_axes)
@staticmethod
def check_onnx(onnx_file):
"""Check onnx file.
Args:
onnx_file (str): path to ONNX file.
"""
model = onnx.load(onnx_file)
onnx.checker.check_model(model)
@staticmethod
def onnx_change(onnx_file):
"""Make dino onnx compatible with TRT. Additionally, fold constants.
Args:
onnx_file (str): path to ONNX file.
"""
graph = gs.import_onnx(onnx.load(onnx_file))
for node in graph.nodes:
if node.op == "MultiscaleDeformableAttnPlugin_TRT":
node.attrs = {"name": "MultiscaleDeformableAttnPlugin_TRT", "version": "1", "namespace": ""}
new_inputs = []
for i, inp in enumerate(node.inputs):
if i in (1, 2) and hasattr(inp, "values"):
new_inp = gs.Constant(name=inp.name, values=inp.values.astype(np.int32))
new_inputs.append(new_inp)
else:
new_inputs.append(inp)
node.inputs = new_inputs
# Setting constant folding in torch result in error due to some layers still in CPU
# Constant folding is required to replace K value in TopK as doesn't support dynamic K value
# Limit workspace size to 1GB to disable folding for MatMul
graph.fold_constants(size_threshold=1024 * 1024 * 1024)
graph.cleanup().toposort()
onnx.save(gs.export_onnx(graph), onnx_file)
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/dino/utils/onnx_export.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DINO utils module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/dino/utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert ODDataset to sharded json format."""
import os
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
from nvidia_tao_pytorch.cv.dino.config.default_config import DINODatasetConvertConfig
from nvidia_tao_pytorch.cv.deformable_detr.scripts.convert import run_experiment
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load experiment specification, additially using schema for validation/retrieving the default values.
# --config_path and --config_name will be provided by the entrypoint script.
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"), config_name="convert", schema=DINODatasetConvertConfig
)
def main(cfg: DINODatasetConvertConfig) -> None:
"""Run the convert dataset process."""
try:
run_experiment(experiment_config=cfg,
results_dir=cfg.results_dir)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Dataset convert finished successfully"
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Dataset convert was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/dino/scripts/convert.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DINO scripts module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/dino/scripts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Export DINO model to ONNX."""
import os
import torch
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
from nvidia_tao_pytorch.core.utilities import update_results_dir
from nvidia_tao_pytorch.cv.action_recognition.utils.common_utils import check_and_create, encrypt_onnx
from nvidia_tao_pytorch.core.cookbooks.tlt_pytorch_cookbook import TLTPyTorchCookbook
from nvidia_tao_pytorch.cv.dino.utils.onnx_export import ONNXExporter
from nvidia_tao_pytorch.cv.dino.config.default_config import ExperimentConfig
from nvidia_tao_pytorch.cv.dino.model.pl_dino_model import DINOPlModel
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load experiment specification, additially using schema for validation/retrieving the default values.
# --config_path and --config_name will be provided by the entrypoint script.
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"), config_name="export", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""CLI wrapper to run export.
This function parses the command line interface for tlt-export, instantiates the respective
exporter and serializes the trained model to an etlt file. The tools also runs optimization
to the int8 backend.
Args:
cl_args(list): Arguments to parse.
Returns:
No explicit returns.
"""
try:
torch.backends.cudnn.allow_tf32 = False
torch.backends.cuda.matmul.allow_tf32 = False
cfg = update_results_dir(cfg, task="export")
run_export(cfg, cfg.results_dir)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Export finished successfully"
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Export was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
def run_export(experiment_config, results_dir):
"""Wrapper to run export of tlt models.
Args:
args (dict): Dictionary of parsed arguments to run export.
Returns:
No explicit returns.
"""
check_and_create(results_dir)
# Set status logging
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
append=True
)
)
status_logging.get_status_logger().write(
status_level=status_logging.Status.STARTED,
message="Starting DINO export"
)
gpu_id = experiment_config.export.gpu_id
torch.cuda.set_device(gpu_id)
# Parsing command line arguments.
model_path = experiment_config.export.checkpoint
key = experiment_config.encryption_key
# set the encryption key:
TLTPyTorchCookbook.set_passphrase(key)
output_file = experiment_config.export.onnx_file
input_channel = experiment_config.export.input_channel
input_width = experiment_config.export.input_width
input_height = experiment_config.export.input_height
opset_version = experiment_config.export.opset_version
batch_size = experiment_config.export.batch_size
on_cpu = experiment_config.export.on_cpu
if batch_size is None or batch_size == -1:
input_batch_size = 1
else:
input_batch_size = batch_size
# Set default output filename if the filename
# isn't provided over the command line.
if output_file is None:
split_name = os.path.splitext(model_path)[0]
output_file = "{}.onnx".format(split_name)
# Warn the user if an exported file already exists.
assert not os.path.exists(output_file), "Default onnx file {} already "\
"exists".format(output_file)
# Make an output directory if necessary.
output_root = os.path.dirname(os.path.realpath(output_file))
if not os.path.exists(output_root):
os.makedirs(output_root)
# load model
pl_model = DINOPlModel.load_from_checkpoint(model_path,
map_location='cpu' if on_cpu else 'cuda',
experiment_spec=experiment_config,
export=True)
model = pl_model.model
model.eval()
if not on_cpu:
model.cuda()
input_names = ['inputs']
output_names = ["pred_logits", "pred_boxes"]
# create dummy input
if on_cpu:
dummy_input = torch.ones(input_batch_size, input_channel, input_height, input_width, device='cpu')
else:
dummy_input = torch.ones(input_batch_size, input_channel, input_height, input_width, device='cuda')
if output_file.endswith('.etlt'):
tmp_onnx_file = output_file.replace('.etlt', '.onnx')
else:
tmp_onnx_file = output_file
onnx_export = ONNXExporter()
onnx_export.export_model(model, batch_size,
tmp_onnx_file,
dummy_input,
input_names=input_names,
opset_version=opset_version,
output_names=output_names,
do_constant_folding=False,
verbose=experiment_config.export.verbose)
onnx_export.check_onnx(tmp_onnx_file)
onnx_export.onnx_change(tmp_onnx_file)
if output_file.endswith('.etlt') and key:
# encrypt the onnx if and only if key is provided and output file name ends with .etlt
encrypt_onnx(tmp_file_name=tmp_onnx_file,
output_file_name=output_file,
key=key)
os.remove(tmp_onnx_file)
print(f"ONNX file stored at {output_file}")
if __name__ == "__main__":
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/dino/scripts/export.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train DINO model."""
import os
import re
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.strategies import DDPStrategy
from nvidia_tao_pytorch.core.callbacks.loggers import TAOStatusLogger
from nvidia_tao_pytorch.core.connectors.checkpoint_connector import TLTCheckpointConnector
from nvidia_tao_pytorch.core.cookbooks.tlt_pytorch_cookbook import TLTPyTorchCookbook
from nvidia_tao_pytorch.core.utilities import update_results_dir
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
from nvidia_tao_pytorch.cv.dino.config.default_config import ExperimentConfig
from nvidia_tao_pytorch.cv.dino.model.pl_dino_model import DINOPlModel
from nvidia_tao_pytorch.cv.deformable_detr.dataloader.od_data_module import ODDataModule
from nvidia_tao_pytorch.cv.deformable_detr.utils.misc import check_and_create
def run_experiment(experiment_config,
results_dir,
key):
"""Start the training."""
# set the encryption key:
TLTPyTorchCookbook.set_passphrase(key)
dm = ODDataModule(experiment_config.dataset)
# find_unuser_parameters=False and activation_checkpoint combination
# requires every output in forward function to participate in
# loss calculation. When return_interm_indices < 4, we must disable
# activation checkpointing
if experiment_config.train.activation_checkpoint and \
len(experiment_config.model.return_interm_indices) < 4 and \
experiment_config.train.num_gpus > 1:
experiment_config.train.activation_checkpoint = False
print("Disabling activation checkpointing since model is smaller")
activation_checkpoint = experiment_config.train.activation_checkpoint
# Load pretrained model as starting point if pretrained path is provided,
pretrained_path = experiment_config.train.pretrained_model_path
if pretrained_path is not None:
pt_model = DINOPlModel.load_from_checkpoint(pretrained_path,
map_location="cpu",
experiment_spec=experiment_config)
else:
pt_model = DINOPlModel(experiment_config)
total_epochs = experiment_config.train.num_epochs
check_and_create(results_dir)
status_logger_callback = TAOStatusLogger(
results_dir,
append=True,
num_epochs=total_epochs
)
status_logging.set_status_logger(status_logger_callback.logger)
num_gpus = experiment_config.train.num_gpus
num_nodes = experiment_config.train.num_nodes
validation_interval = experiment_config.train.validation_interval
ckpt_inter = experiment_config.train.checkpoint_interval
assert ckpt_inter <= total_epochs, (
f"Checkpoint interval {ckpt_inter} > Number of epochs {total_epochs}."
f"Please set experiment_config.train.checkpoint_interval < {total_epochs}"
)
assert validation_interval <= total_epochs, (
f"Validation interval {validation_interval} > Number of epochs {total_epochs}."
f"Please set experiment_config.train.validation_interval < {total_epochs}"
)
clip_grad_norm = experiment_config.train.clip_grad_norm
is_dry_run = experiment_config.train.is_dry_run
distributed_strategy = experiment_config.train.distributed_strategy
if experiment_config.train.precision.lower() in ["fp16", "fp32"]:
precision = int(experiment_config.train.precision.replace("fp", ""))
else:
raise NotImplementedError(f"{experiment_config.train.precision} is not supported. Only fp32 and fp16 are supported")
sync_batchnorm = False
strategy = None
if num_gpus > 1:
# By default find_unused_parameters is set to True in Lightning for backward compatibility
# This introduces extra overhead and can't work with activation checkpointing
# Ref: https://pytorch-lightning.readthedocs.io/en/1.8.5/advanced/model_parallel.html#when-using-ddp-strategies-set-find-unused-parameters-false
# TODO: Starting from PTL 2.0, find_usued_parameters is set to False by default
if distributed_strategy.lower() == "ddp" and activation_checkpoint:
strategy = DDPStrategy(find_unused_parameters=False)
elif distributed_strategy.lower() == "ddp" and not activation_checkpoint:
strategy = 'ddp'
elif distributed_strategy.lower() == "ddp_sharded":
strategy = 'ddp_sharded'
# Override to FP16 for ddp_sharded as there's an error with FP32 during Positional Embedding forward pass
print("Overriding Precision to FP16 for ddp_sharded")
precision = 16
else:
raise NotImplementedError(f"{distributed_strategy} is not implemented. Only ddp and ddp_sharded are supported")
if "fan" in experiment_config.model.backbone:
print("Setting sync batch norm")
sync_batchnorm = True
trainer = Trainer(devices=num_gpus,
num_nodes=num_nodes,
max_epochs=total_epochs,
check_val_every_n_epoch=validation_interval,
default_root_dir=results_dir,
accelerator='gpu',
strategy=strategy,
precision=precision,
gradient_clip_val=clip_grad_norm,
replace_sampler_ddp=False,
sync_batchnorm=sync_batchnorm,
fast_dev_run=is_dry_run)
# Overload connector to enable intermediate ckpt encryption & decryption.
resume_ckpt = experiment_config.train.resume_training_checkpoint_path
if resume_ckpt and resume_ckpt.endswith('.tlt'):
if resume_ckpt is not None:
trainer._checkpoint_connector = TLTCheckpointConnector(trainer, resume_from_checkpoint=resume_ckpt)
else:
trainer._checkpoint_connector = TLTCheckpointConnector(trainer)
resume_ckpt = None
# setup checkpointer:
ModelCheckpoint.FILE_EXTENSION = ".pth"
checkpoint_callback = ModelCheckpoint(every_n_epochs=ckpt_inter,
dirpath=results_dir,
save_on_train_epoch_end=True,
monitor=None,
save_top_k=-1,
filename='dino_model_{epoch:03d}')
if resume_ckpt:
status_logging.get_status_logger().write(
message=f"Resuming training from checkpoint: {resume_ckpt}",
status_level=status_logging.Status.STARTED
)
resumed_epoch = re.search('epoch=(\\d+)', resume_ckpt)
if resumed_epoch:
resumed_epoch = int(resumed_epoch.group(1))
else:
resumed_epoch = 0
status_logger_callback.epoch_counter = resumed_epoch + 1 # make sure callback epoch matches resumed epoch
trainer.callbacks.append(status_logger_callback)
trainer.callbacks.append(checkpoint_callback)
trainer.fit(pt_model, dm, ckpt_path=resume_ckpt or None)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load experiment specification, additially using schema for validation/retrieving the default values.
# --config_path and --config_name will be provided by the entrypoint script.
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"), config_name="train", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""Run the training process."""
try:
cfg = update_results_dir(cfg, task="train")
run_experiment(experiment_config=cfg,
key=cfg.encryption_key,
results_dir=cfg.results_dir)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Training finished successfully"
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Training was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/dino/scripts/train.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Inference on single patch. """
import os
from pytorch_lightning import Trainer
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.core.utilities import update_results_dir
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
from nvidia_tao_pytorch.core.cookbooks.tlt_pytorch_cookbook import TLTPyTorchCookbook
from nvidia_tao_pytorch.cv.deformable_detr.dataloader.od_data_module import ODDataModule
from nvidia_tao_pytorch.cv.deformable_detr.utils.misc import check_and_create
from nvidia_tao_pytorch.cv.dino.config.default_config import ExperimentConfig
from nvidia_tao_pytorch.cv.dino.model.pl_dino_model import DINOPlModel
def run_experiment(experiment_config, model_path, key, results_dir=None):
"""Start the inference."""
if not model_path:
raise FileNotFoundError("inference.checkpoint is not set!")
# set the encryption key:
TLTPyTorchCookbook.set_passphrase(key)
check_and_create(results_dir)
# Set status logging
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
append=True
)
)
status_logging.get_status_logger().write(
status_level=status_logging.Status.STARTED,
message="Starting DINO inference"
)
# tlt inference
if model_path.endswith('.tlt') or model_path.endswith('.pth'):
num_gpus = experiment_config.inference.num_gpus
# build data module
dm = ODDataModule(experiment_config.dataset)
dm.setup(stage="predict")
# Run inference using tlt model
acc_flag = None
if num_gpus > 1:
acc_flag = "ddp"
model = DINOPlModel.load_from_checkpoint(model_path,
map_location="cpu",
experiment_spec=experiment_config)
trainer = Trainer(devices=num_gpus,
default_root_dir=results_dir,
accelerator='gpu',
strategy=acc_flag)
trainer.predict(model, datamodule=dm)
elif model_path.endswith('.engine'):
raise NotImplementedError("TensorRT inference is supported through tao-deploy. "
"Please use tao-deploy to generate TensorRT enigne and run inference.")
else:
raise NotImplementedError("Model path format is only supported for .tlt or .pth")
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load experiment specification, additially using schema for validation/retrieving the default values.
# --config_path and --config_name will be provided by the entrypoint script.
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"), config_name="infer", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""Run the inference process."""
try:
cfg = update_results_dir(cfg, task="inference")
run_experiment(experiment_config=cfg,
key=cfg.encryption_key,
model_path=cfg.inference.checkpoint,
results_dir=cfg.results_dir)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Inference finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Inference was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/dino/scripts/inference.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluate a trained DINO model."""
import os
from pytorch_lightning import Trainer
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
from nvidia_tao_pytorch.core.utilities import update_results_dir
from nvidia_tao_pytorch.core.cookbooks.tlt_pytorch_cookbook import TLTPyTorchCookbook
from nvidia_tao_pytorch.cv.deformable_detr.dataloader.od_data_module import ODDataModule
from nvidia_tao_pytorch.cv.deformable_detr.utils.misc import check_and_create
from nvidia_tao_pytorch.cv.dino.config.default_config import ExperimentConfig
from nvidia_tao_pytorch.cv.dino.model.pl_dino_model import DINOPlModel
def run_experiment(experiment_config, model_path, key, results_dir=None):
"""Run experiment."""
if not model_path:
raise FileNotFoundError("evaluate.checkpoint is not set!")
# set the encryption key:
TLTPyTorchCookbook.set_passphrase(key)
check_and_create(results_dir)
# Set status logging
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
append=True
)
)
status_logging.get_status_logger().write(
status_level=status_logging.Status.STARTED,
message="Starting DINO evaluation"
)
# tlt inference
if model_path.endswith('.tlt') or model_path.endswith('.pth'):
# build dataloader
dm = ODDataModule(experiment_config.dataset)
dm.setup(stage="test")
# build model and load from the given checkpoint
model = DINOPlModel.load_from_checkpoint(model_path,
map_location="cpu",
experiment_spec=experiment_config)
num_gpus = experiment_config.evaluate.num_gpus
acc_flag = None
if num_gpus > 1:
acc_flag = "ddp"
trainer = Trainer(devices=num_gpus,
default_root_dir=results_dir,
accelerator='gpu',
strategy=acc_flag)
trainer.test(model, datamodule=dm)
elif model_path.endswith('.engine'):
raise NotImplementedError("TensorRT evaluation is supported through tao-deploy. "
"Please use tao-deploy to generate TensorRT enigne and run evaluation.")
else:
raise NotImplementedError("Model path format is only supported for .tlt or .pth")
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load experiment specification, additially using schema for validation/retrieving the default values.
# --config_path and --config_name will be provided by the entrypoint script.
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"), config_name="evaluate", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""Run the evaluate process."""
try:
cfg = update_results_dir(cfg, task="evaluate")
run_experiment(experiment_config=cfg,
key=cfg.encryption_key,
model_path=cfg.evaluate.checkpoint,
results_dir=cfg.results_dir)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Evaluation finished successfully"
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Evaluation was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/dino/scripts/evaluate.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entrypoint script for the DINO task."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/dino/entrypoint/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""'Entry point' script running subtasks related to DINO."""
import importlib
import os
import pkgutil
import argparse
import subprocess
import sys
from time import time
import nvidia_tao_pytorch.cv.dino.scripts as scripts
from nvidia_tao_pytorch.core.telemetry.nvml_utils import get_device_details
from nvidia_tao_pytorch.core.telemetry.telemetry import send_telemetry_data
def get_subtasks(package):
"""Get supported subtasks for a given task.
This function lists out the tasks in in the .scripts folder.
Returns:
subtasks (dict): Dictionary of files.
"""
module_path = package.__path__
modules = {}
# Collect modules dynamically.
for _, task, is_package in pkgutil.walk_packages(module_path):
if is_package:
continue
module_name = package.__name__ + '.' + task
module_details = {
"module_name": module_name,
"runner_path": os.path.abspath(importlib.import_module(module_name).__file__),
}
modules[task] = module_details
return modules
def launch(parser, subtasks, network=None):
"""CLI function that executes subtasks.
Args:
parser: Created parser object for a given task.
subtasks: list of subtasks for a given task.
network (str): Name of the network.
"""
# Subtasks for a given model.
parser.add_argument(
'subtask', default='train', choices=subtasks.keys(), help="Subtask for a given task/model.",
)
# Add standard TLT arguments.
parser.add_argument(
"-r",
"--results_dir",
help="Path to a folder where the experiment outputs should be written.",
default=None,
required=False,
)
parser.add_argument(
"--gpus",
help="Number of GPUs to run the train subtask.",
default=None,
type=int,
)
parser.add_argument(
"--num_nodes",
help="Number of nodes to run the train subtask.",
default=None,
type=int
)
parser.add_argument("-k", "--key", help="User specific encoding key to save or load a .tlt model.")
parser.add_argument("-e", "--experiment_spec_file", help="Path to the experiment spec file.", default=None)
# Parse the arguments.
args, unknown_args = parser.parse_known_args()
script_args = ""
# Process spec file for all commands except the one for getting spec files ;)
# Make sure the user provides spec file.
if args.experiment_spec_file is None:
print("ERROR: The subtask `{}` requires the following argument: -e/--experiment_spec_file".format(args.subtask))
exit(1)
# Make sure the file exists!
if not os.path.exists(args.experiment_spec_file):
print("ERROR: The indicated experiment spec file `{}` doesn't exist!".format(args.experiment_spec_file))
exit(1)
# Split spec file_path into config path and config name.
path, name = os.path.split(args.experiment_spec_file)
if path != '':
script_args += " --config-path " + os.path.realpath(path)
script_args += " --config-name " + name
# And add other params AFTERWARDS!
if args.results_dir:
script_args += " results_dir=" + args.results_dir
if args.subtask in ["train", "evaluate", "inference"]:
if args.gpus:
script_args += f" {args.subtask}.num_gpus={args.gpus}"
if args.subtask in ["train"]:
if args.num_nodes:
script_args += f" {args.subtask}.num_nodes={args.num_nodes}"
# Add encryption key.
if args.subtask in ["train", "evaluate", "inference", "export"]:
if args.key is not None:
script_args += " encryption_key=" + args.key
# Find relevant module and pass args.
script = subtasks[args.subtask]["runner_path"]
# Pass unknown args to call
unknown_args_as_str = " ".join(unknown_args)
# Create a system call.
call = "python " + script + script_args + " " + unknown_args_as_str
process_passed = True
start = time()
try:
# Run the script.
subprocess.check_call(call, shell=True, stdout=sys.stdout, stderr=sys.stdout)
except (KeyboardInterrupt, SystemExit):
print("Command was interrupted.")
process_passed = True
except subprocess.CalledProcessError as e:
if e.output is not None:
print(e.output)
process_passed = False
end = time()
time_lapsed = int(end - start)
try:
gpu_data = list()
for device in get_device_details():
gpu_data.append(device.get_config())
send_telemetry_data(
network,
args.subtask,
gpu_data,
num_gpus=args.gpus,
time_lapsed=time_lapsed,
pass_status=process_passed
)
except Exception as e:
print("Telemetry data couldn't be sent, but the command ran successfully.")
print(f"[Error]: {e}")
pass
if not process_passed:
print("Execution status: FAIL")
exit(1) # returning non zero return code from the process.
print("Execution status: PASS")
def main():
"""Main entrypoint wrapper."""
# Create parser for a given task.
parser = argparse.ArgumentParser(
"dino", add_help=True, description="TAO Toolkit"
)
# Build list of subtasks by inspecting the package.
subtasks = get_subtasks(scripts)
# Parse the arguments and launch the subtask.
launch(parser, subtasks, network="dino")
if __name__ == '__main__':
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/dino/entrypoint/dino.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Model functions. """
import torch
from torch import nn, Tensor
import math
import torch.nn.functional as F
def gen_encoder_output_proposals(memory: Tensor, memory_padding_mask: Tensor, spatial_shapes: Tensor, learnedwh: Tensor = None, export: bool = False):
"""Generate proposals from the output of the encoder.
Args:
memory (torch.Tensor): bs, r'\'sum{hw}, d_model
memory_padding_mask (torch.Tensor): bs, r'\'sum{hw}
spatial_shapes (torch.Tensor): nlevel, 2
learnedwh (torch.Tensor): 2
export (bool): whether the model is in export stage.
Returns:
output_memory (torch.Tensor): bs, r'\'sum{hw}, d_model
output_proposals (torch.Tensor): bs, r'\'sum{hw}', 4
"""
N_, _, _ = memory.shape
proposals = []
_cur = 0
for lvl, (H_, W_) in enumerate(spatial_shapes):
if export: # Fixed dimensions for export in onnx
H_, W_ = int(H_), int(W_)
else:
H_, W_ = spatial_shapes[lvl, 0], spatial_shapes[lvl, 1]
mask_flatten_ = memory_padding_mask[:, _cur:(_cur + H_ * W_)].view(N_, H_, W_, 1)
valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1)
valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1)
grid_y, grid_x = torch.meshgrid(torch.linspace(0, H_ - 1, H_, dtype=torch.float32, device=memory.device),
torch.linspace(0, W_ - 1, W_, dtype=torch.float32, device=memory.device))
grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1) # H_, W_, 2
scale = torch.cat([valid_W.unsqueeze(-1), valid_H.unsqueeze(-1)], 1).view(N_, 1, 1, 2)
grid = (grid.unsqueeze(0).expand(N_, -1, -1, -1) + 0.5) / scale
if learnedwh is not None:
wh = torch.ones_like(grid) * learnedwh.sigmoid() * (2.0 ** lvl)
else:
wh = torch.ones_like(grid) * 0.05 * (2.0 ** lvl)
proposal = torch.cat((grid, wh), -1).view(N_, -1, 4)
proposals.append(proposal)
_cur += (H_ * W_)
output_proposals = torch.cat(proposals, 1)
output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(-1, keepdim=True)
output_proposals = torch.log(output_proposals / (1 - output_proposals)) # unsigmoid
output_proposals = output_proposals.masked_fill(memory_padding_mask.unsqueeze(-1), float('inf'))
output_proposals = output_proposals.masked_fill(~output_proposals_valid, float('inf'))
output_memory = memory
output_memory = output_memory.masked_fill(memory_padding_mask.unsqueeze(-1), float(0))
output_memory = output_memory.masked_fill(~output_proposals_valid, float(0))
return output_memory, output_proposals
class RandomBoxPerturber():
"""Random Box Perturber Class."""
def __init__(self, x_noise_scale=0.2, y_noise_scale=0.2, w_noise_scale=0.2, h_noise_scale=0.2) -> None:
"""Initialize RandomBoxPerturber Class.
Args:
x_noise_scale (float): scale of noise applied to x dimension
y_noise_scale (float): scale of noise applied to y dimension
w_noise_scale (float): scale of noise applied to w dimension
h_noise_scale (float): scale of noise applied to h dimension
"""
self.noise_scale = torch.Tensor([x_noise_scale, y_noise_scale, w_noise_scale, h_noise_scale])
def __call__(self, refanchors: Tensor) -> Tensor:
"""Call function."""
_, _, query_dim = refanchors.shape
device = refanchors.device
noise_raw = torch.rand_like(refanchors)
noise_scale = self.noise_scale.to(device)[:query_dim]
new_refanchors = refanchors * (1 + (noise_raw - 0.5) * noise_scale)
return new_refanchors.clamp_(0, 1)
class MLP(nn.Module):
"""Simple multi-layer perceptron (FFN)."""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
"""FFN Initialization.
Args:
input_dim (int): input dimension.
hidden_dim (int): hidden dimension.
output_dim (int): output dimension.
num_layers (int): number of layers.
"""
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
def forward(self, x):
"""Forward function."""
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2):
"""Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
Args:
inputs (torch.Tensor): A float tensor of arbitrary shape.
The predictions for each example.
targets (torch.Tensor): A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
alpha (float): (optional) Weighting factor in range (0,1) to balance
positive vs negative examples. Default = -1 (no weighting).
gamma (float): Exponent of the modulating factor (1 - p_t) to
balance easy vs hard examples.
Returns:
Loss tensor
"""
prob = inputs.sigmoid()
ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
p_t = prob * targets + (1 - prob) * (1 - targets)
loss = ce_loss * ((1 - p_t) ** gamma)
if alpha >= 0:
alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
loss = alpha_t * loss
return loss.mean(1).sum() / num_boxes
def _get_activation_fn(activation):
"""Return an activation function given a string.
Args:
activation (str): type of activation function.
Returns:
PyTorch activation layer.
Raises:
RuntimeError: if unsupported activation type is provided.
"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
if activation == "prelu":
return nn.PReLU()
if activation == "selu":
return F.selu
raise RuntimeError(F"activation should be relu/gelu/glu/prelu/selu, not {activation}.")
def gen_sineembed_for_position(pos_tensor):
"""Generate sine embedding for position encoding.
Args:
pos_tensor (torch.Tensor): Positional Encoding.
Returns:
pos (torch.Tensor): Sine Embedding.
"""
# n_query, bs, _ = pos_tensor.size()
# sineembed_tensor = torch.zeros(n_query, bs, 256)
scale = 2 * math.pi
dim_t = torch.arange(128, dtype=torch.float32, device=pos_tensor.device)
dim_t = 10000 ** (2 * (dim_t // 2) / 128)
x_embed = pos_tensor[:, :, 0] * scale
y_embed = pos_tensor[:, :, 1] * scale
pos_x = x_embed[:, :, None] / dim_t
pos_y = y_embed[:, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, 0::2].sin(), pos_x[:, :, 1::2].cos()), dim=3).flatten(2)
pos_y = torch.stack((pos_y[:, :, 0::2].sin(), pos_y[:, :, 1::2].cos()), dim=3).flatten(2)
if pos_tensor.shape[-1] == 2:
pos = torch.cat((pos_y, pos_x), dim=2)
elif pos_tensor.shape[-1] == 4:
w_embed = pos_tensor[:, :, 2] * scale
pos_w = w_embed[:, :, None] / dim_t
pos_w = torch.stack((pos_w[:, :, 0::2].sin(), pos_w[:, :, 1::2].cos()), dim=3).flatten(2)
h_embed = pos_tensor[:, :, 3] * scale
pos_h = h_embed[:, :, None] / dim_t
pos_h = torch.stack((pos_h[:, :, 0::2].sin(), pos_h[:, :, 1::2].cos()), dim=3).flatten(2)
pos = torch.cat((pos_y, pos_x, pos_w, pos_h), dim=2)
else:
raise ValueError("Unknown pos_tensor shape(-1):{}".format(pos_tensor.size(-1)))
return pos
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/dino/model/model_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Matcher module. """
import torch
from torch import nn
from scipy.optimize import linear_sum_assignment
from nvidia_tao_pytorch.cv.deformable_detr.utils.box_ops import box_cxcywh_to_xyxy, generalized_box_iou
class HungarianMatcher(nn.Module):
"""This class computes an assignment between the targets and the predictions of the network.
For efficiency reasons, the targets don't include the no_object. Because of this, in general,
there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,
while the others are un-matched (and thus treated as non-objects).
"""
def __init__(self, cost_class: float = 1, cost_bbox: float = 1, cost_giou: float = 1, focal_alpha: float = 0.25):
"""Creates the matcher.
Args:
cost_class (float): This is the relative weight of the classi dfication error in the matching cost.
cost_bbox (float): This is the relative weight of the L1 error of the bounding box coordinates in the matching cost.
cost_giou (float): This is the relative weight of the giou loss of the bounding box in the matching cost.
"""
super().__init__()
self.cost_class = cost_class
self.cost_bbox = cost_bbox
self.cost_giou = cost_giou
assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, "all costs cant be 0"
self.focal_alpha = focal_alpha
@torch.no_grad()
def forward(self, outputs, targets):
""" Performs the matching
Args:
outputs (dict): This is a dict that contains at least these entries:
"pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits
"pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates
targets (dict): This is a list of targets (len(targets) = batch_size), where each target is a dict containing:
"labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth
objects in the target) containing the class labels
"boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates
Returns:
A list of size batch_size, containing tuples of (index_i, index_j) where:
- index_i is the indices of the selected predictions (in order)
- index_j is the indices of the corresponding selected targets (in order)
For each batch element, it holds:
len(index_i) = len(index_j) = min(num_queries, num_target_boxes)
"""
bs, num_queries = outputs["pred_logits"].shape[:2]
# We flatten to compute the cost matrices in a batch
out_prob = outputs["pred_logits"].flatten(0, 1).sigmoid() # [batch_size * num_queries, num_classes]
out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4]
# Also concat the target labels and boxes
tgt_ids = torch.cat([v["labels"] for v in targets])
tgt_bbox = torch.cat([v["boxes"] for v in targets])
# Compute the classification cost.
alpha = self.focal_alpha
gamma = 2.0
neg_cost_class = (1 - alpha) * (out_prob ** gamma) * (-(1 - out_prob + 1e-8).log())
pos_cost_class = alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log())
cost_class = pos_cost_class[:, tgt_ids] - neg_cost_class[:, tgt_ids]
# Compute the L1 cost between boxes
cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1)
# Compute the giou cost betwen boxes
cost_giou = -generalized_box_iou(box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox))
# Final cost matrix
C = self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou
C = C.view(bs, num_queries, -1).cpu()
sizes = [len(v["boxes"]) for v in targets]
indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))]
return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
class SimpleMinsumMatcher(nn.Module):
"""This class computes an assignment between the targets and the predictions of the network.
For efficiency reasons, the targets don't include the no_object. Because of this, in general,
there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,
while the others are un-matched (and thus treated as non-objects).
"""
def __init__(self, cost_class: float = 1, cost_bbox: float = 1, cost_giou: float = 1, focal_alpha: float = 0.25):
"""Creates the matcher.
Args:
cost_class (float): This is the relative weight of the classification error in the matching cost
cost_bbox (float): This is the relative weight of the L1 error of the bounding box coordinates in the matching cost
cost_giou (float): This is the relative weight of the giou loss of the bounding box in the matching cost
"""
super().__init__()
self.cost_class = cost_class
self.cost_bbox = cost_bbox
self.cost_giou = cost_giou
assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, "all costs cant be 0"
self.focal_alpha = focal_alpha
@torch.no_grad()
def forward(self, outputs, targets):
""" Performs the matching.
Args:
outputs (dict): This is a dict that contains at least these entries:
"pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits
"pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates
targets (dict): This is a list of targets (len(targets) = batch_size), where each target is a dict containing:
"labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth
objects in the target) containing the class labels
"boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates
Returns:
A list of size batch_size, containing tuples of (index_i, index_j) where:
- index_i is the indices of the selected predictions (in order)
- index_j is the indices of the corresponding selected targets (in order)
For each batch element, it holds:
len(index_i) = len(index_j) = min(num_queries, num_target_boxes)
"""
bs, num_queries = outputs["pred_logits"].shape[:2]
# We flatten to compute the cost matrices in a batch
out_prob = outputs["pred_logits"].flatten(0, 1).sigmoid() # [batch_size * num_queries, num_classes]
out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4]
# Also concat the target labels and boxes
tgt_ids = torch.cat([v["labels"] for v in targets])
tgt_bbox = torch.cat([v["boxes"] for v in targets])
# Compute the classification cost.
alpha = self.focal_alpha
gamma = 2.0
neg_cost_class = (1 - alpha) * (out_prob ** gamma) * (-(1 - out_prob + 1e-8).log())
pos_cost_class = alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log())
cost_class = pos_cost_class[:, tgt_ids] - neg_cost_class[:, tgt_ids]
# Compute the L1 cost between boxes
cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1)
# Compute the giou cost betwen boxes
cost_giou = -generalized_box_iou(box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox))
# Final cost matrix
C = self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou
C = C.view(bs, num_queries, -1)
sizes = [len(v["boxes"]) for v in targets]
indices = []
device = C.device
for i, (c, _size) in enumerate(zip(C.split(sizes, -1), sizes)):
weight_mat = c[i]
idx_i = weight_mat.min(0)[1]
idx_j = torch.arange(_size).to(device)
indices.append((idx_i, idx_j))
return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/dino/model/matcher.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FAN Backbone for DINO"""
import math
from functools import partial
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, trunc_normal_, to_2tuple
from nvidia_tao_pytorch.cv.backbone.convnext_utils import _create_hybrid_backbone
from nvidia_tao_pytorch.cv.backbone.fan import (PositionalEncodingFourier, Mlp, ConvPatchEmbed,
ClassAttentionBlock, adaptive_avg_pool)
class HybridEmbed(nn.Module):
""" CNN Feature Map Embedding
Extract feature map from CNN, flatten, project to embedding dim.
"""
def __init__(self, backbone, img_size=224, patch_size=2, feature_size=None, in_chans=3, embed_dim=384):
"""Initialize HybridEmbedding class"""
super().__init__()
assert isinstance(backbone, nn.Module)
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.backbone = backbone
if feature_size is None:
with torch.no_grad():
# NOTE Most reliable way of determining output dims is to run forward pass
training = backbone.training
if training:
backbone.eval()
o = self.backbone.forward_features(torch.zeros(1, in_chans, img_size[0], img_size[1]))
if isinstance(o, (list, tuple)):
o = o[-1] # last feature if backbone outputs list/tuple of features
feature_size = o.shape[-2:]
feature_dim = o.shape[1]
backbone.train(training)
else:
feature_size = to_2tuple(feature_size)
if hasattr(self.backbone, 'feature_info'):
feature_dim = self.backbone.feature_info[-1]['num_chs']
else:
feature_dim = self.backbone.num_features
assert feature_size[0] % patch_size[0] == 0 and feature_size[1] % patch_size[1] == 0
self.grid_size = (feature_size[0] // patch_size[0], feature_size[1] // patch_size[1])
self.num_patches = self.grid_size[0] * self.grid_size[1]
self.proj = nn.Conv2d(feature_dim, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x, return_feat=False):
"""Forward function"""
x, out_list = self.backbone.forward_features(x, return_feat=return_feat)
_, _, H, W = x.shape
if isinstance(x, (list, tuple)):
x = x[-1] # last feature if backbone outputs list/tuple of features
x = self.proj(x).flatten(2).transpose(1, 2)
if return_feat:
return x, (H // self.patch_size[0], W // self.patch_size[1]), out_list
return x, (H // self.patch_size[0], W // self.patch_size[1])
class ChannelProcessing(nn.Module):
"""Channel Processing"""
def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0.,
mlp_hidden_dim=None, act_layer=nn.GELU, drop=None, norm_layer=nn.LayerNorm, cha_sr_ratio=1):
"""Initialize ChannelProcessing class"""
super().__init__()
assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."
self.dim = dim
self.num_heads = num_heads
self.temperature = nn.Parameter(torch.ones(num_heads, 1, 1))
self.cha_sr_ratio = cha_sr_ratio if num_heads > 1 else 1
self.mlp_v = Mlp(in_features=dim // self.cha_sr_ratio, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
self.norm_v = norm_layer(dim // self.cha_sr_ratio)
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.apply(self._init_weights)
def _init_weights(self, m):
"""Initialize weights"""
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def _gen_attn(self, q, k):
"""Returns attention"""
_, _, N, _ = k.shape
if torch.onnx.is_in_onnx_export():
# If softmax dim is not the last dimension, then PyTorch decompose the softmax ops into
# smaller ops like ReduceMax, ReduceSum, Sub, and Div.
# As a result, ONNX export fails for opset_version >= 12.
# Here, we rearrange the transpose so that softmax is done over the last dimension.
q = q.transpose(-1, -2).softmax(-1)
k = k.transpose(-1, -2).softmax(-1)
warnings.warn("Replacing default adatpive_avg_pool2d to custom implementation for ONNX export")
# adaptive_avg_pool2d is not supported for torch to onnx export
k = adaptive_avg_pool(k.transpose(-1, -2), (N, 1))
else:
q = q.softmax(-2).transpose(-1, -2)
k = torch.nn.functional.adaptive_avg_pool2d(k.softmax(-2), (N, 1))
attn = torch.sigmoid(q @ k)
return attn * self.temperature
def forward(self, x, H, W):
"""Forward functions """
_, N, C = x.shape
v = x.reshape(-1, N, self.num_heads, C // self.num_heads // self.cha_sr_ratio).permute(0, 2, 1, 3)
q = self.q(x).reshape(-1, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
k = x.reshape(-1, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
attn = self._gen_attn(q, k)
attn = self.attn_drop(attn)
Bv, Hd, Nv, Cv = v.shape
v = self.norm_v(self.mlp_v(v.transpose(1, 2).reshape(Bv, Nv, Hd * Cv), H, W)).reshape(Bv, Nv, Hd, Cv).transpose(1, 2)
repeat_time = N // attn.shape[-1]
attn = attn.repeat_interleave(repeat_time, dim=-1) if attn.shape[-1] > 1 else attn
x = (attn * v.transpose(-1, -2)).permute(0, 3, 1, 2).reshape(-1, N, C)
return x, attn * v.transpose(-1, -2)
@torch.jit.ignore
def no_weight_decay(self):
"""Ignore during weight decay"""
return {'temperature'}
class TokenMixing(nn.Module):
"""Token Mixing"""
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
"""Initialize TokenMixing class"""
super().__init__()
assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.fast_attn = hasattr(torch._C._nn, '_scaled_dot_product_attention') # pylint:disable=I1101
cha_sr = 1
self.q = nn.Linear(dim, dim // cha_sr, bias=qkv_bias)
self.kv = nn.Linear(dim, dim * 2 // cha_sr, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.apply(self._init_weights)
def _init_weights(self, m):
"""Initialize weights"""
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
"""Forward function"""
B, N, C = x.shape
q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
kv = self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
k, v = kv[0], kv[1]
if torch.onnx.is_in_onnx_export() or not self.fast_attn:
attn = (q * self.scale @ k.transpose(-2, -1))
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
else:
# Since Torch 1.14, scaled_dot_product_attention has been optimized for performance
x, attn = F._scaled_dot_product_attention(
q, k, v,
dropout_p=self.attn_drop.p,
)
x = x.transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn
class FANBlock(nn.Module):
"""FAN block from https://arxiv.org/abs/2204.12451"""
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0.,
drop_path=0., norm_layer=nn.LayerNorm, eta=1.):
"""Initialize FANBlock class"""
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = TokenMixing(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = ChannelProcessing(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop,
drop=drop, mlp_hidden_dim=int(dim * mlp_ratio))
self.gamma1 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)
self.gamma2 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)
self.H = None
self.W = None
def forward(self, x, attn=None, return_attention=False):
"""Forward function"""
H, W = self.H, self.W
x_new, attn = self.attn(self.norm1(x))
x = x + self.drop_path(self.gamma1 * x_new)
x_new, attn = self.mlp(self.norm2(x), H, W)
x = x + self.drop_path(self.gamma2 * x_new)
self.H, self.W = H, W
if return_attention:
return attn
return x
class FAN(nn.Module):
"""FAN implementation from https://arxiv.org/abs/2204.12451
Based on timm https://github.com/rwightman/pytorch-image-models/tree/master/timm
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., backbone=None,
act_layer=None, norm_layer=None, cls_attn_layers=2, use_pos_embed=True, eta=1., tokens_norm=False,
out_index=-1, out_channels=None, out_indices=[0, 1, 2, 3], patch_embed="ConvNext", activation_checkpoint=True):
"""Initialize FAN class"""
super(FAN, self).__init__()
img_size = to_2tuple(img_size)
self.activation_checkpoint = activation_checkpoint
self.out_index = out_index
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim
self.out_channels = out_channels
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
act_layer = act_layer or nn.GELU
if patch_embed == "ConvNext":
self.patch_embed = HybridEmbed(backbone=backbone, patch_size=2, embed_dim=embed_dim)
else:
self.patch_embed = ConvPatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, act_layer=act_layer)
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.use_pos_embed = use_pos_embed
if use_pos_embed:
self.pos_embed = PositionalEncodingFourier(dim=embed_dim)
self.pos_drop = nn.Dropout(p=drop_rate)
build_block = FANBlock
self.blocks = nn.ModuleList([
build_block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate,
attn_drop=attn_drop_rate, drop_path=drop_path_rate, norm_layer=norm_layer, eta=eta)
for _ in range(depth)])
self.cls_attn_blocks = nn.ModuleList([
ClassAttentionBlock(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate,
attn_drop=attn_drop_rate, act_layer=act_layer, norm_layer=norm_layer, eta=eta, tokens_norm=tokens_norm)
for _ in range(cls_attn_layers)])
self.out_indices = out_indices
for i_layer in self.out_indices:
layer = nn.LayerNorm(self.out_channels[i_layer])
layer_name = f'out_norm{i_layer}'
self.add_module(layer_name, layer)
self.learnable_downsample = nn.Conv2d(in_channels=embed_dim,
out_channels=768,
kernel_size=3,
stride=2,
padding=1,
dilation=1,
groups=1,
bias=True)
# Init weights
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
"""Initialize weights"""
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
"""layers to ignore for weight decay"""
return {'pos_embed', 'cls_token'}
def get_classifier(self):
"""Returns classifier"""
return self.head
def reset_classifier(self, num_classes, global_pool=''):
"""Redefine classifier of FAN"""
self.num_classes = num_classes
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
"""Extract features
Args:
x: tensor
Returns:
final_outs: dictionary containing indice name as key and indice feature as value
"""
outs = []
B = x.shape[0]
if isinstance(self.patch_embed, HybridEmbed):
x, (Hp, Wp), out_list = self.patch_embed(x, return_feat=True)
outs = outs + out_list
out_index = [self.out_index]
else:
x, (Hp, Wp) = self.patch_embed(x)
if self.use_pos_embed:
pos_encoding = self.pos_embed(B, Hp, Wp).reshape(B, -1, x.shape[1]).permute(0, 2, 1)
x = x + pos_encoding
x = self.pos_drop(x)
for idx, blk in enumerate(self.blocks):
blk.H, blk.W = Hp, Wp
# Disable activation checkpointing during ONNX export
if torch.onnx.is_in_onnx_export() or not self.activation_checkpoint:
x = blk(x)
else:
x = checkpoint.checkpoint(blk, x)
Hp, Wp = blk.H, blk.W
if idx in out_index:
outs.append(x.reshape(B, Hp, Wp, -1).permute(0, 3, 1, 2).contiguous())
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
for blk in self.cls_attn_blocks:
x = blk(x)
x = x[:, 1:, :].reshape(B, Hp, Wp, -1).permute(0, 3, 1, 2).contiguous()
x = self.learnable_downsample(x)
outs.append(x)
final_outs = {}
for i, out in enumerate(outs):
if i in self.out_indices:
out = out.permute(0, 2, 3, 1).contiguous()
norm_layer = getattr(self, f'out_norm{i}')
out = norm_layer(out)
final_outs[f'p{i}'] = out.permute(0, 3, 1, 2).contiguous()
del outs
return final_outs
def forward(self, x):
"""Forward functions"""
outs = self.forward_features(x)
return outs
def get_last_selfattention(self, x):
"""Returns last self-attention"""
B = x.shape[0]
# x is (B, N, C). (Hp, Hw) is (height in units of patches, width in units of patches)
x, (Hp, Wp) = self.patch_embed(x)
if self.use_pos_embed:
# `pos_embed` (B, C, Hp, Wp), reshape -> (B, C, N), permute -> (B, N, C)
pos_encoding = self.pos_embed(B, Hp, Wp).reshape(B, -1, x.shape[1]).permute(0, 2, 1)
x = x + pos_encoding
x = self.pos_drop(x)
for blk in self.blocks:
x = blk(x, Hp, Wp)
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
attn = None
for i, blk in enumerate(self.cls_attn_blocks):
if i < len(self.blocks) - 1:
x = blk(x)
else:
# return attention of the last block
attn = blk(x, return_attention=True)
return attn
return attn
def checkpoint_filter_fn(state_dict, model):
"""Filter loaded checkpoints"""
if 'model' in state_dict:
state_dict = state_dict['model']
use_pos_embed = getattr(model, 'pos_embed', None) is not None
pos_embed_keys = [k for k in state_dict if k.startswith('pos_embed')]
for k in pos_embed_keys:
if use_pos_embed:
state_dict[k.replace('pos_embeder.', 'pos_embed.')] = state_dict.pop(k)
else:
del state_dict[k]
if 'cls_attn_blocks.0.attn.qkv.weight' in state_dict and 'cls_attn_blocks.0.attn.q.weight' in model.state_dict():
num_ca_blocks = len(model.cls_attn_blocks)
for i in range(num_ca_blocks):
qkv_weight = state_dict.pop(f'cls_attn_blocks.{i}.attn.qkv.weight')
qkv_weight = qkv_weight.reshape(3, -1, qkv_weight.shape[-1])
for j, subscript in enumerate('qkv'):
state_dict[f'cls_attn_blocks.{i}.attn.{subscript}.weight'] = qkv_weight[j]
qkv_bias = state_dict.pop(f'cls_attn_blocks.{i}.attn.qkv.bias', None)
if qkv_bias is not None:
qkv_bias = qkv_bias.reshape(3, -1)
for j, subscript in enumerate('qkv'):
state_dict[f'cls_attn_blocks.{i}.attn.{subscript}.bias'] = qkv_bias[j]
return state_dict
def fan_tiny_8_p4_hybrid(out_indices=[0, 1, 2, 3], activation_checkpoint=True, **kwargs):
"""FAN Hybrid Tiny
Args:
out_indices (list): List of block indices to return as feature
"""
depth = 8
model_args = dict(depths=[3, 3], dims=[128, 256, 512, 1024], use_head=False)
backbone = _create_hybrid_backbone(pretrained=False, pretrained_strict=False, **model_args)
model = FAN(patch_size=16, in_chans=3, num_classes=80, embed_dim=192, depth=depth, backbone=backbone,
num_heads=8, mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.3,
act_layer=None, norm_layer=None, cls_attn_layers=2, use_pos_embed=True, eta=1., tokens_norm=True,
out_index=7, out_channels=[128, 256, 192, 768], out_indices=out_indices,
activation_checkpoint=activation_checkpoint, **kwargs)
return model
def fan_small_12_p4_hybrid(out_indices=[0, 1, 2, 3], activation_checkpoint=True, **kwargs):
"""FAN Hybrid Small
Args:
out_indices (list): List of block indices to return as feature
"""
depth = 10
model_args = dict(depths=[3, 3], dims=[128, 256, 512, 1024], use_head=False)
backbone = _create_hybrid_backbone(pretrained=False, pretrained_strict=False, **model_args)
model = FAN(patch_size=16, in_chans=3, num_classes=80, embed_dim=384, depth=depth, backbone=backbone,
num_heads=8, mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.3,
act_layer=None, norm_layer=None, cls_attn_layers=2, use_pos_embed=True, eta=1., tokens_norm=True,
out_index=9, out_channels=[128, 256, 384, 768], out_indices=out_indices,
activation_checkpoint=activation_checkpoint, **kwargs)
return model
def fan_base_12_p4_hybrid(out_indices=[0, 1, 2, 3], activation_checkpoint=True, **kwargs):
"""FAN Hybrid Base
Args:
out_indices (list): List of block indices to return as feature
"""
depth = 16
model_args = dict(depths=[3, 3], dims=[128, 256, 512, 1024], use_head=False)
backbone = _create_hybrid_backbone(pretrained=False, pretrained_strict=False, **model_args)
model = FAN(patch_size=16, in_chans=3, num_classes=80, embed_dim=448, depth=depth, backbone=backbone,
num_heads=8, mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.3,
act_layer=None, norm_layer=None, cls_attn_layers=2, use_pos_embed=True, eta=1., tokens_norm=True,
out_index=15, out_channels=[128, 256, 448, 768], out_indices=out_indices,
activation_checkpoint=activation_checkpoint, **kwargs)
return model
def fan_large_12_p4_hybrid(out_indices=[0, 1, 2, 3], activation_checkpoint=True, **kwargs):
"""FAN Hybrid Large
Args:
out_indices (list): List of block indices to return as feature
"""
depth = 22
model_args = dict(depths=[3, 5], dims=[128, 256, 512, 1024], use_head=False)
backbone = _create_hybrid_backbone(pretrained=False, pretrained_strict=False, **model_args)
model = FAN(patch_size=16, in_chans=3, num_classes=80, embed_dim=480, depth=depth, backbone=backbone,
num_heads=10, mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.3,
act_layer=None, norm_layer=None, cls_attn_layers=2, use_pos_embed=True, eta=1., tokens_norm=True,
out_index=18, out_channels=[128, 256, 480, 768], out_indices=out_indices,
activation_checkpoint=activation_checkpoint, **kwargs)
return model
fan_model_dict = {
'fan_tiny': fan_tiny_8_p4_hybrid,
'fan_small': fan_small_12_p4_hybrid,
'fan_base': fan_base_12_p4_hybrid,
'fan_large': fan_large_12_p4_hybrid
}
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/dino/model/fan.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Backbone modules. """
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from torchvision.models._utils import IntermediateLayerGetter
from typing import Optional
from typing import Dict, List
from nvidia_tao_pytorch.cv.deformable_detr.utils.misc import get_global_rank, load_pretrained_weights
from nvidia_tao_pytorch.cv.deformable_detr.model.resnet import resnet50
from nvidia_tao_pytorch.cv.deformable_detr.model.backbone import FrozenBatchNorm2d
from nvidia_tao_pytorch.cv.deformable_detr.model.gc_vit import gc_vit_model_dict
from nvidia_tao_pytorch.cv.dino.model.fan import fan_model_dict
class BackboneBase(nn.Module):
"""BackboneBase class."""
def __init__(self, model_name, backbone: nn.Module, train_backbone: bool, num_channels: int, return_interm_indices: list, export: bool):
"""Initialize the Backbone Base Class.
Args:
model_name (str): backbone model name.
backbone (nn.Module): backbone torch module.
train_backbone (bool): flag whether we want to train the backbone or not.
num_channels (int): channel size.
return_interm_indices (list): list of layer indices to reutrn as backbone features.
export (bool): flag to indicate whehter exporting to onnx or not.
"""
super().__init__()
self.export = export
self.model_name = model_name
if 'resnet' in model_name:
for name, parameter in backbone.named_parameters():
if not train_backbone or 'layer2' not in name and 'layer3' not in name and 'layer4' not in name:
parameter.requires_grad_(False)
return_layers = {}
# 4 scale: {'layer2': '1', 'layer3': '2', 'layer4': '3'}
# 5 scale: {'layer1': '0', 'layer2': '1', 'layer3': '2', 'layer4': '3'}
for layer_index in return_interm_indices:
return_layers.update({"layer{}".format(layer_index + 1): "{}".format(layer_index)})
self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)
elif 'fan' in model_name or 'gc_vit' in model_name:
for name, parameter in backbone.named_parameters():
if not train_backbone:
parameter.requires_grad_(False)
# FAN Small case
# 4 scale: {'patch_embed.backbone.stages.1': 'p1', 'blocks.9': 'p2', 'learnable_downsample': 'p4'}
# 5 scale: {'patch_embed.backbone.stages.0': 'p0', 'patch_embed.backbone.stages.1': 'p1', 'blocks.9': 'p2', 'learnable_downsample': 'p4'}
self.body = backbone
self.num_channels = num_channels
self.return_interm_indices = return_interm_indices
def forward(self, input_tensors):
"""Forward function for Backboone base.
Args:
input_tensors (torch.Tensor): input tensor.
Returns:
out (torch.Tensor): output tensor.
"""
if self.export:
batch_shape = input_tensors.shape
dtype = input_tensors.dtype
device = input_tensors.device
# when exporting, the input shape is fixed and no padding mask is needed.
masks = torch.zeros((batch_shape[0], 1, batch_shape[2], batch_shape[3]), dtype=dtype, device=device)
input_tensor = input_tensors
else:
masks = input_tensors[:, 3:4]
input_tensor = input_tensors[:, :3]
xs = self.body(input_tensor)
out: Dict[str, torch.Tensor] = {}
for name, x in xs.items():
mask = F.interpolate(masks.float(), size=x.shape[-2:])
mask = mask.to(torch.bool)
out[name] = (x, mask)
return out
class Backbone(BackboneBase):
"""Backbone for DINO."""
def __init__(self, name: str,
pretrained_backbone_path: Optional[str],
train_backbone: bool,
return_interm_indices: list,
dilation: bool,
export: bool,
activation_checkpoint: bool):
"""Initialize the Backbone Class.
Args:
pretrained_backbone_path (str): optional path to the pretrained backbone.
train_backbone (bool): flag whether we want to train the backbone or not.
return_interm_indices (list): list of layer indices to reutrn as backbone features.
dilation (bool): flag whether we can to use dilation or not.
export (bool): flag to indicate whehter exporting to onnx or not.
activation_checkpoint (bool): flag to indicate whether to run activation checkpointing during training.
Raises:
ValueError: If return_interm_indices does not have valid range or has duplicate index.
NotImplementedError: If invalid backbone name was provided.
"""
return_interm_indices = np.array(return_interm_indices)
if not np.logical_and(return_interm_indices >= 0, return_interm_indices <= 4).all():
raise ValueError(f"Invalid range for return_interm_indices. "
f"Provided return_interm_indices is {return_interm_indices}.")
if len(np.unique(return_interm_indices)) != len(return_interm_indices):
raise ValueError(f"Duplicate index in the provided return_interm_indices: {return_interm_indices}")
if name == 'resnet_50':
if export:
_norm_layer = nn.BatchNorm2d
else:
_norm_layer = FrozenBatchNorm2d
backbone = resnet50(norm_layer=_norm_layer,
replace_stride_with_dilation=[False, False, dilation])
num_channels_all = np.array([256, 512, 1024, 2048])
num_channels = num_channels_all[return_interm_indices]
elif 'fan' in name:
if name not in fan_model_dict:
raise NotImplementedError(f"{name} is not supported FAN backbone. "
f"Supported architecutres: {fan_model_dict.keys()}")
backbone = fan_model_dict[name](out_indices=return_interm_indices,
activation_checkpoint=activation_checkpoint)
num_channels_all = np.array(backbone.out_channels)
num_channels = num_channels_all[return_interm_indices]
elif 'gc_vit' in name:
if name not in gc_vit_model_dict:
raise NotImplementedError(f"{name} is not supported GCViT backbone. "
f"Supported architecutres: {gc_vit_model_dict.keys()}")
backbone = gc_vit_model_dict[name](out_indices=return_interm_indices,
activation_checkpoint=activation_checkpoint)
num_channels_all = np.array(backbone.num_features)
num_channels = num_channels_all[return_interm_indices]
else:
supported_arch = list(fan_model_dict.keys()) + list(gc_vit_model_dict.keys()) + ["resnet_50"]
raise NotImplementedError(f"Backbone {name} is not implemented. Supported architectures {supported_arch}")
if pretrained_backbone_path:
checkpoint = load_pretrained_weights(pretrained_backbone_path)
_tmp_st_output = backbone.load_state_dict(checkpoint, strict=False)
if get_global_rank() == 0:
print(f"Loaded pretrained weights from {pretrained_backbone_path}")
print(f"{_tmp_st_output}")
super().__init__(name, backbone, train_backbone, num_channels, return_interm_indices, export)
class Joiner(nn.Sequential):
"""Joiner Class."""
def __init__(self, backbone):
"""Initialize the Joiner Class.
Args:
backbone (nn.Module): backbone module.
"""
super().__init__(backbone)
self.num_channels = backbone.num_channels
def forward(self, input_tensors):
"""Forward function for Joiner to prepare the backbone output into transformer input format.
Args:
input_tensors (torch.Tensor): input tensor.
Returns:
out (List[Tensor]): list of tensor (feature vectors from backbone).
"""
xs = self[0](input_tensors)
out: List[torch.Tensor] = []
for _, x in sorted(xs.items()):
out.append(x)
return out
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/dino/model/backbone.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Main PTL model file for DINO. """
import copy
import datetime
import os
import json
from typing import Any, Dict
import torch
from torch.optim.lr_scheduler import MultiStepLR, StepLR
from fairscale.optim import OSS
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_only
from nvidia_tao_pytorch.core.cookbooks.tlt_pytorch_cookbook import TLTPyTorchCookbook
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.cv.action_recognition.utils.common_utils import patch_decrypt_checkpoint
from nvidia_tao_pytorch.pointcloud.pointpillars.pcdet.utils import common_utils
from nvidia_tao_pytorch.cv.deformable_detr.dataloader.od_dataset import CoCoDataMerge
from nvidia_tao_pytorch.cv.deformable_detr.utils.coco import COCO
from nvidia_tao_pytorch.cv.deformable_detr.utils.coco_eval import CocoEvaluator
from nvidia_tao_pytorch.cv.dino.model.build_nn_model import build_model
from nvidia_tao_pytorch.cv.dino.model.matcher import HungarianMatcher
from nvidia_tao_pytorch.cv.dino.model.criterion import SetCriterion
from nvidia_tao_pytorch.cv.deformable_detr.model.post_process import PostProcess, save_inference_prediction, threshold_predictions
# pylint:disable=too-many-ancestors
class DINOPlModel(pl.LightningModule):
"""PTL module for DINO Object Detection Model."""
def __init__(self, experiment_spec, export=False):
"""Init training for DINO Model."""
super().__init__()
self.experiment_spec = experiment_spec
self.dataset_config = experiment_spec.dataset
self.model_config = experiment_spec.model
self.eval_class_ids = self.dataset_config["eval_class_ids"]
self.dataset_type = self.dataset_config["dataset_type"]
if self.dataset_type not in ("serialized", "default"):
raise ValueError(f"{self.dataset_type} is not supported. Only serialized and default are supported.")
# init the model
self._build_model(export)
self._build_criterion()
self.status_logging_dict = {}
def _build_model(self, export):
"""Internal function to build the model."""
self.model = build_model(experiment_config=self.experiment_spec, export=export)
def _build_criterion(self):
"""Internal function to build the loss function."""
self.matcher = HungarianMatcher(cost_class=self.model_config["cls_loss_coef"],
cost_bbox=self.model_config["bbox_loss_coef"],
cost_giou=self.model_config["giou_loss_coef"])
weight_dict = {'loss_ce': self.model_config["cls_loss_coef"],
'loss_bbox': self.model_config["bbox_loss_coef"],
'loss_giou': self.model_config["giou_loss_coef"]}
clean_weight_dict_wo_dn = copy.deepcopy(weight_dict)
# for de-noising training
if self.model_config['use_dn']:
weight_dict['loss_ce_dn'] = self.model_config["cls_loss_coef"]
weight_dict['loss_bbox_dn'] = self.model_config["bbox_loss_coef"]
weight_dict['loss_giou_dn'] = self.model_config["giou_loss_coef"]
clean_weight_dict = copy.deepcopy(weight_dict)
if self.model_config["aux_loss"]:
aux_weight_dict = {}
for i in range(self.model_config["dec_layers"] - 1):
aux_weight_dict.update({k + f'_{i}': v for k, v in clean_weight_dict.items()})
weight_dict.update(aux_weight_dict)
if self.model_config['two_stage_type'] != 'no':
interm_weight_dict = {}
_coeff_weight_dict = {
'loss_ce': 1.0,
'loss_bbox': 1.0 if not self.model_config['no_interm_box_loss'] else 0.0,
'loss_giou': 1.0 if not self.model_config['no_interm_box_loss'] else 0.0,
}
interm_weight_dict.update({f'{k}_interm': v * self.model_config['interm_loss_coef'] * _coeff_weight_dict[k] for k, v in clean_weight_dict_wo_dn.items()})
weight_dict.update(interm_weight_dict)
self.weight_dict = copy.deepcopy(weight_dict)
self.criterion = SetCriterion(self.dataset_config["num_classes"], matcher=self.matcher,
losses=self.model_config['loss_types'], focal_alpha=self.model_config["focal_alpha"])
# nms_iou_threshold is always 0 in original DINO
self.box_processors = PostProcess(num_select=self.model_config['num_select'])
def configure_optimizers(self):
"""Configure optimizers for training."""
self.train_config = self.experiment_spec.train
param_dicts = [
{"params": [p for n, p in self.model.named_parameters() if "backbone" not in n and p.requires_grad]},
{
"params": [p for n, p in self.model.named_parameters() if "backbone" in n and p.requires_grad],
"lr": self.train_config['optim']['lr_backbone'],
}
]
if self.train_config.optim.optimizer == 'SGD':
base_optimizer = torch.optim.SGD(params=param_dicts,
lr=self.train_config.optim.lr,
momentum=self.train_config.optim.momentum,
weight_decay=self.train_config.optim.weight_decay)
elif self.train_config.optim.optimizer == 'AdamW':
base_optimizer = torch.optim.AdamW(params=param_dicts,
lr=self.train_config.optim.lr,
weight_decay=self.train_config.optim.weight_decay)
else:
raise NotImplementedError(f"Optimizer {self.train_config.optim.optimizer} is not implemented")
if self.train_config.distributed_strategy == "ddp_sharded":
# Override force_broadcast_object=False in PTL
optim = OSS(params=base_optimizer.param_groups, optim=type(base_optimizer), force_broadcast_object=True, **base_optimizer.defaults)
else:
optim = base_optimizer
optim_dict = {}
optim_dict["optimizer"] = optim
scheduler_type = self.train_config.optim.lr_scheduler
if scheduler_type == "MultiStep":
lr_scheduler = MultiStepLR(optimizer=optim,
milestones=self.train_config.optim.lr_steps,
gamma=self.train_config.optim.lr_decay,
verbose=True)
elif scheduler_type == "StepLR":
lr_scheduler = StepLR(optimizer=optim,
step_size=self.train_config.optim.lr_step_size,
gamma=self.train_config.optim.lr_decay,
verbose=True)
else:
raise NotImplementedError("LR Scheduler {} is not implemented".format(scheduler_type))
optim_dict["lr_scheduler"] = lr_scheduler
optim_dict['monitor'] = self.train_config.optim.monitor_name
return optim_dict
def training_step(self, batch, batch_idx):
"""Training step."""
data, targets, _ = batch
batch_size = data.shape[0]
if self.model_config['use_dn']:
outputs = self.model(data, targets)
else:
outputs = self.model(data)
# loss
loss_dict = self.criterion(outputs, targets)
losses = sum(loss_dict[k] * self.weight_dict[k] for k in loss_dict.keys() if k in self.weight_dict)
self.log("train_loss", losses, on_step=False, on_epoch=True, prog_bar=True, sync_dist=True, batch_size=batch_size)
self.log("train_class_error", loss_dict['class_error'], on_step=False, on_epoch=True, prog_bar=False, sync_dist=True, batch_size=batch_size)
self.log("train_loss_ce", loss_dict['loss_ce'], on_step=False, on_epoch=True, prog_bar=False, sync_dist=True, batch_size=batch_size)
self.log("train_loss_bbox", loss_dict['loss_bbox'], on_step=False, on_epoch=True, prog_bar=False, sync_dist=True, batch_size=batch_size)
self.log("train_loss_giou", loss_dict['loss_giou'], on_step=False, on_epoch=True, prog_bar=False, sync_dist=True, batch_size=batch_size)
return {'loss': losses}
def training_epoch_end(self, training_step_outputs):
"""Log Training metrics to status.json"""
average_train_loss = 0.0
for out in training_step_outputs:
average_train_loss += out['loss'].item()
average_train_loss /= len(training_step_outputs)
self.status_logging_dict["train_loss"] = average_train_loss
status_logging.get_status_logger().kpi = self.status_logging_dict
status_logging.get_status_logger().write(
message="Train and Val metrics generated.",
status_level=status_logging.Status.RUNNING
)
training_step_outputs.clear()
def on_validation_epoch_start(self) -> None:
"""
Validation epoch start.
Reset coco evaluator for each epoch.
"""
if self.dataset_type == "serialized":
# Load from scratch since COCO object is not instantiated for SerializedDatasetFromList
coco_lists = []
for source in self.dataset_config["val_data_sources"]:
with open(source["json_file"], "r") as f:
tmp = json.load(f)
coco_lists.append(COCO(tmp))
coco = COCO(CoCoDataMerge(coco_lists))
self.val_coco_evaluator = CocoEvaluator(coco, iou_types=['bbox'], eval_class_ids=self.eval_class_ids)
else:
self.val_coco_evaluator = CocoEvaluator(self.trainer.datamodule.val_dataset.coco, iou_types=['bbox'], eval_class_ids=self.eval_class_ids)
def validation_step(self, batch, batch_idx):
"""Validation step."""
data, targets, image_names = batch
batch_size = data.shape[0]
if self.model_config['use_dn']:
outputs = self.model(data, targets)
else:
outputs = self.model(data)
loss_dict = self.criterion(outputs, targets)
losses = sum(loss_dict[k] * self.weight_dict[k] for k in loss_dict.keys() if k in self.weight_dict)
orig_target_sizes = torch.stack([t["orig_size"] for t in targets], dim=0)
results = self.box_processors(outputs, orig_target_sizes, image_names)
res = {target['image_id'].item(): output for target, output in zip(targets, results)}
self.val_coco_evaluator.update(res)
self.log("val_loss", losses, on_step=False, on_epoch=True, prog_bar=True, sync_dist=True, batch_size=batch_size)
self.log("val_class_error", loss_dict['class_error'], on_step=False, on_epoch=True, prog_bar=False, sync_dist=True, batch_size=batch_size)
self.log("val_loss_ce", loss_dict['loss_ce'], on_step=False, on_epoch=True, prog_bar=False, sync_dist=True, batch_size=batch_size)
self.log("val_loss_bbox", loss_dict['loss_bbox'], on_step=False, on_epoch=True, prog_bar=False, sync_dist=True, batch_size=batch_size)
self.log("val_loss_giou", loss_dict['loss_giou'], on_step=False, on_epoch=True, prog_bar=False, sync_dist=True, batch_size=batch_size)
return losses
def validation_epoch_end(self, outputs):
"""
Validation epoch end.
Compute mAP at the end of epoch.
"""
self.val_coco_evaluator.synchronize_between_processes()
self.val_coco_evaluator.overall_accumulate()
self.val_coco_evaluator.overall_summarize(is_print=False)
mAP = self.val_coco_evaluator.coco_eval['bbox'].stats[0]
mAP50 = self.val_coco_evaluator.coco_eval['bbox'].stats[1]
if self.trainer.is_global_zero:
print("\n Validation mAP : {}\n".format(mAP))
print("\n Validation mAP50 : {}\n".format(mAP50))
self.log("val_mAP", mAP, rank_zero_only=True, sync_dist=True)
self.log("val_mAP50", mAP50, rank_zero_only=True, sync_dist=True)
self.status_logging_dict["val_mAP"] = str(mAP)
self.status_logging_dict["val_mAP50"] = str(mAP50)
average_val_loss = 0.0
for out in outputs:
average_val_loss += out.item()
average_val_loss /= len(outputs)
self.status_logging_dict["val_loss"] = average_val_loss
outputs.clear()
def on_test_epoch_start(self) -> None:
"""
Test epoch start.
Reset coco evaluator at start.
"""
if self.dataset_type == "serialized":
# Load from scratch since COCO object is not instantiated for SerializedDatasetFromList
with open(self.dataset_config["test_data_sources"]["json_file"], "r") as f:
tmp = json.load(f)
coco = COCO(tmp)
self.test_coco_evaluator = CocoEvaluator(coco, iou_types=['bbox'], eval_class_ids=self.eval_class_ids)
else:
self.test_coco_evaluator = CocoEvaluator(self.trainer.datamodule.test_dataset.coco, iou_types=['bbox'], eval_class_ids=self.eval_class_ids)
def test_step(self, batch, batch_idx):
"""Test step. Evaluate."""
data, targets, image_names = batch
outputs = self.model(data)
batch_size = data.shape[0]
loss_dict = self.criterion(outputs, targets)
losses = sum(loss_dict[k] * self.weight_dict[k] for k in loss_dict.keys() if k in self.weight_dict)
orig_target_sizes = torch.stack([t["orig_size"] for t in targets], dim=0)
results = self.box_processors(outputs, orig_target_sizes, image_names)
if self.experiment_spec.evaluate.conf_threshold > 0:
filtered_res = threshold_predictions(results, self.experiment_spec.evaluate.conf_threshold)
else:
filtered_res = results
res = {target['image_id'].item(): output for target, output in zip(targets, filtered_res)}
self.test_coco_evaluator.update(res)
self.log("test_loss", losses, on_step=False, on_epoch=True, prog_bar=False, sync_dist=True, batch_size=batch_size)
self.log("test_class_error", loss_dict['class_error'], on_step=False, on_epoch=True, prog_bar=False, sync_dist=True, batch_size=batch_size)
self.log("test_loss_ce", loss_dict['loss_ce'], on_step=False, on_epoch=True, prog_bar=False, sync_dist=True, batch_size=batch_size)
self.log("test_loss_bbox", loss_dict['loss_bbox'], on_step=False, on_epoch=True, prog_bar=False, sync_dist=True, batch_size=batch_size)
self.log("test_loss_giou", loss_dict['loss_giou'], on_step=False, on_epoch=True, prog_bar=False, sync_dist=True, batch_size=batch_size)
def test_epoch_end(self, outputs):
"""
Test epoch end.
Compute mAP at the end of epoch.
"""
self.test_coco_evaluator.synchronize_between_processes()
self.test_coco_evaluator.overall_accumulate()
self.test_coco_evaluator.overall_summarize(is_print=True)
mAP = self.test_coco_evaluator.coco_eval['bbox'].stats[0]
mAP50 = self.test_coco_evaluator.coco_eval['bbox'].stats[1]
self.log("test_mAP", mAP, rank_zero_only=True)
self.log("test_mAP50", mAP50, rank_zero_only=True)
# Log the evaluation results to a file
log_file = os.path.join(self.experiment_spec.results_dir, 'log_eval_{}.txt'.format(datetime.datetime.now().strftime('%Y%m%d-%H%M%S')))
logger = common_utils.create_logger(log_file, rank=0)
if self.trainer.is_global_zero:
logger.info('**********************Start logging Evaluation Results **********************')
logger.info('*************** mAP *****************')
logger.info('mAP : %2.2f' % mAP)
logger.info('*************** mAP50 *****************')
logger.info('mAP50 : %2.2f' % mAP50)
self.status_logging_dict["test_mAP"] = str(mAP)
self.status_logging_dict["test_mAP50"] = str(mAP50)
status_logging.get_status_logger().kpi = self.status_logging_dict
status_logging.get_status_logger().write(
message="Evaluation metrics generated.",
status_level=status_logging.Status.RUNNING
)
outputs.clear()
def predict_step(self, batch, batch_idx):
"""Predict step. Inference."""
data, targets, image_names = batch
outputs = self.model(data)
orig_target_sizes = torch.stack([t["orig_size"] for t in targets], dim=0)
pred_results = self.box_processors(outputs, orig_target_sizes, image_names)
return pred_results
@rank_zero_only
def on_predict_batch_end(self, outputs, batch, batch_idx, dataloader_idx):
"""
Predict batch end.
Save the result inferences at the end of batch.
"""
output_dir = self.experiment_spec.results_dir
label_map = self.trainer.datamodule.pred_dataset.label_map
color_map = self.experiment_spec.inference.color_map
conf_threshold = self.experiment_spec.inference.conf_threshold
is_internal = self.experiment_spec.inference.is_internal
save_inference_prediction(outputs, output_dir, conf_threshold, label_map, color_map, is_internal)
def forward(self, x):
"""Forward of the dino model."""
outputs = self.model(x)
return outputs
def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
"""Encrpyt the checkpoint. The encryption is done in TLTCheckpointConnector."""
pass
def on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
"""Decrpyt the checkpoint."""
if checkpoint.get("state_dict_encrypted", False):
# Retrieve encryption key from TLTPyTorchCookbook.
key = TLTPyTorchCookbook.get_passphrase()
if key is None:
raise PermissionError("Cannot access model state dict without the encryption key")
checkpoint = patch_decrypt_checkpoint(checkpoint, key)
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/dino/model/pl_dino_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" DeNoising components. """
import torch
from nvidia_tao_pytorch.cv.deformable_detr.utils.misc import inverse_sigmoid
def prepare_for_cdn(dn_args, training, num_queries, num_classes, hidden_dim, label_enc):
"""A major difference of DINO from DN-DETR is that the author process pattern embedding pattern embedding in its detector
forward function and use learnable tgt embedding, so we change this function a little bit.
Args:
dn_args (tuple): targets, dn_number, label_noise_ratio, box_noise_scale.
training (bool): if it is training or inference.
num_queries (int): number of queires.
num_classes (int): number of classes.
hidden_dim (int): transformer hidden dim.
label_enc (nn.Module): encode labels in dn.
Returns:
input_query_label (torch.Tensor): input query for label. None if not training.
input_query_bbox (torch.Tensor): input query for bounding box. None if not training.
attn_mask (torch.Tensor): attention mask. None if not training.
dn_meta (dict): meta information of de-nosing. None if not training.
"""
if training:
targets, dn_number, label_noise_ratio, box_noise_scale = dn_args
# positive and negative dn queries
dn_number = dn_number * 2
known = [(torch.ones_like(t['labels'])).cuda() for t in targets]
batch_size = len(known)
known_num = [sum(k) for k in known]
if int(max(known_num)) == 0:
dn_number = 1
else:
if dn_number >= 100:
dn_number = dn_number // (int(max(known_num) * 2))
elif dn_number < 1:
dn_number = 1
if dn_number == 0:
dn_number = 1
unmask_bbox = unmask_label = torch.cat(known)
labels = torch.cat([t['labels'] for t in targets])
boxes = torch.cat([t['boxes'] for t in targets])
batch_idx = torch.cat([torch.full_like(t['labels'].long(), i) for i, t in enumerate(targets)])
known_indice = torch.nonzero(unmask_label + unmask_bbox)
known_indice = known_indice.view(-1)
known_indice = known_indice.repeat(2 * dn_number, 1).view(-1)
known_labels = labels.repeat(2 * dn_number, 1).view(-1)
known_bid = batch_idx.repeat(2 * dn_number, 1).view(-1)
known_bboxs = boxes.repeat(2 * dn_number, 1)
known_labels_expaned = known_labels.clone()
known_bbox_expand = known_bboxs.clone()
if label_noise_ratio > 0:
p = torch.rand_like(known_labels_expaned.float())
chosen_indice = torch.nonzero(p < (label_noise_ratio * 0.5)).view(-1) # half of bbox prob
new_label = torch.randint_like(chosen_indice, 0, num_classes) # randomly put a new one here
known_labels_expaned.scatter_(0, chosen_indice, new_label)
single_pad = int(max(known_num))
pad_size = int(single_pad * 2 * dn_number)
positive_idx = torch.tensor(range(len(boxes))).long().cuda().unsqueeze(0).repeat(dn_number, 1)
positive_idx += (torch.tensor(range(dn_number)) * len(boxes) * 2).long().cuda().unsqueeze(1)
positive_idx = positive_idx.flatten()
negative_idx = positive_idx + len(boxes)
if box_noise_scale > 0:
known_bbox_ = torch.zeros_like(known_bboxs)
known_bbox_[:, :2] = known_bboxs[:, :2] - known_bboxs[:, 2:] / 2
known_bbox_[:, 2:] = known_bboxs[:, :2] + known_bboxs[:, 2:] / 2
diff = torch.zeros_like(known_bboxs)
diff[:, :2] = known_bboxs[:, 2:] / 2
diff[:, 2:] = known_bboxs[:, 2:] / 2
rand_sign = torch.randint_like(known_bboxs, low=0, high=2, dtype=torch.float32) * 2.0 - 1.0
rand_part = torch.rand_like(known_bboxs)
rand_part[negative_idx] += 1.0
rand_part *= rand_sign
known_bbox_ = known_bbox_ + torch.mul(rand_part,
diff).cuda() * box_noise_scale
known_bbox_ = known_bbox_.clamp(min=0.0, max=1.0)
known_bbox_expand[:, :2] = (known_bbox_[:, :2] + known_bbox_[:, 2:]) / 2
known_bbox_expand[:, 2:] = known_bbox_[:, 2:] - known_bbox_[:, :2]
m = known_labels_expaned.long().to('cuda')
input_label_embed = label_enc(m)
input_bbox_embed = inverse_sigmoid(known_bbox_expand)
padding_label = torch.zeros(pad_size, hidden_dim).cuda()
padding_bbox = torch.zeros(pad_size, 4).cuda()
input_query_label = padding_label.repeat(batch_size, 1, 1)
input_query_bbox = padding_bbox.repeat(batch_size, 1, 1)
map_known_indice = torch.tensor([]).to('cuda')
if len(known_num):
map_known_indice = torch.cat([torch.tensor(range(num)) for num in known_num]) # [1,2, 1,2,3]
map_known_indice = torch.cat([map_known_indice + single_pad * i for i in range(2 * dn_number)]).long()
if len(known_bid):
input_query_label[(known_bid.long(), map_known_indice)] = input_label_embed
input_query_bbox[(known_bid.long(), map_known_indice)] = input_bbox_embed
tgt_size = pad_size + num_queries
attn_mask = torch.ones(tgt_size, tgt_size).to('cuda') < 0
# match query cannot see the reconstruct
attn_mask[pad_size:, :pad_size] = True
# reconstruct cannot see each other
for i in range(dn_number):
if i == 0:
attn_mask[single_pad * 2 * i:single_pad * 2 * (i + 1), single_pad * 2 * (i + 1):pad_size] = True
if i == dn_number - 1:
attn_mask[single_pad * 2 * i:single_pad * 2 * (i + 1), :single_pad * i * 2] = True
else:
attn_mask[single_pad * 2 * i:single_pad * 2 * (i + 1), single_pad * 2 * (i + 1):pad_size] = True
attn_mask[single_pad * 2 * i:single_pad * 2 * (i + 1), :single_pad * 2 * i] = True
dn_meta = {
'pad_size': pad_size,
'num_dn_group': dn_number,
}
else:
input_query_label = None
input_query_bbox = None
attn_mask = None
dn_meta = None
return input_query_label, input_query_bbox, attn_mask, dn_meta
def dn_post_process(outputs_class, outputs_coord, dn_meta, aux_loss, _set_aux_loss):
"""Post process of dn after output from the transformer.
Put the dn part in the dn_meta.
"""
if dn_meta and dn_meta['pad_size'] > 0:
output_known_class = outputs_class[:, :, :dn_meta['pad_size'], :]
output_known_coord = outputs_coord[:, :, :dn_meta['pad_size'], :]
outputs_class = outputs_class[:, :, dn_meta['pad_size']:, :]
outputs_coord = outputs_coord[:, :, dn_meta['pad_size']:, :]
out = {'pred_logits': output_known_class[-1], 'pred_boxes': output_known_coord[-1]}
if aux_loss:
out['aux_outputs'] = _set_aux_loss(output_known_class, output_known_coord)
dn_meta['output_known_lbs_bboxes'] = out
return outputs_class, outputs_coord
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/dino/model/dn_components.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Various positional encodings for the transformer."""
import math
import torch
from torch import nn
class PositionEmbeddingSineHW(nn.Module):
"""For DINO, different temperature values (usually 20) are applied along width and height respectively."""
def __init__(self, num_pos_feats=64, temperatureH=10000, temperatureW=10000, normalize=False, scale=None):
"""Initialize PositionEmbeddingSineHW Class"""
super().__init__()
self.num_pos_feats = num_pos_feats
self.temperatureH = temperatureH
self.temperatureW = temperatureW
self.normalize = normalize
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * math.pi
self.scale = scale
def forward(self, not_mask, device):
"""Forward"""
y_embed = not_mask.cumsum(1, dtype=torch.float32)
x_embed = not_mask.cumsum(2, dtype=torch.float32)
if self.normalize:
eps = 1e-6
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
dim_tx = torch.arange(self.num_pos_feats, dtype=torch.float32, device=device)
dim_tx = self.temperatureW ** (2 * (dim_tx // 2) / self.num_pos_feats)
pos_x = x_embed[:, :, :, None] / dim_tx
dim_ty = torch.arange(self.num_pos_feats, dtype=torch.float32, device=device)
dim_ty = self.temperatureH ** (2 * (dim_ty // 2) / self.num_pos_feats)
pos_y = y_embed[:, :, :, None] / dim_ty
pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos
class PositionEmbeddingSineHWExport(nn.Module):
"""ONNX/TRT compatible PositionEmbedding"""
def __init__(self, num_pos_feats=64, temperatureH=10000, temperatureW=10000, normalize=False, scale=None):
"""Initialize PositionEmbeddingSineHWExport Class"""
super().__init__()
self.num_pos_feats = num_pos_feats
self.temperatureH = temperatureH
self.temperatureW = temperatureW
self.normalize = normalize
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * math.pi
self.scale = scale
def forward(self, batch_shape, device):
""" Forward """
not_mask = torch.ones(batch_shape.tolist(), dtype=torch.bool, device=device)
y_embed = not_mask.cumsum(1, dtype=torch.float32)
x_embed = not_mask.cumsum(2, dtype=torch.float32)
if self.normalize:
eps = 1e-6
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
dim_tx = torch.arange(self.num_pos_feats, dtype=torch.float32, device=device)
dim_tx = self.temperatureW ** (2 * (dim_tx // 2) / self.num_pos_feats)
pos_x = x_embed[:, :, :, None] / dim_tx
dim_ty = torch.arange(self.num_pos_feats, dtype=torch.float32, device=device)
dim_ty = self.temperatureH ** (2 * (dim_ty // 2) / self.num_pos_feats)
pos_y = y_embed[:, :, :, None] / dim_ty
pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/dino/model/position_encoding.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DINO model module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/dino/model/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Deformable Transformer functions. """
import math
import random
import copy
from typing import Optional
import torch
from torch import nn, Tensor
import torch.utils.checkpoint as checkpoint
from nvidia_tao_pytorch.core.modules.activation.activation import MultiheadAttention
from nvidia_tao_pytorch.cv.dino.model.model_utils import gen_encoder_output_proposals, MLP, _get_activation_fn, gen_sineembed_for_position
from nvidia_tao_pytorch.cv.deformable_detr.utils.misc import inverse_sigmoid
from nvidia_tao_pytorch.cv.deformable_detr.model.ops.modules import MSDeformAttn
class DeformableTransformer(nn.Module):
""" Deformable Transfromer module """
def __init__(self, d_model=256, nhead=8,
num_queries=300, export=False,
activation_checkpoint=True,
num_encoder_layers=6,
num_decoder_layers=6,
dim_feedforward=2048, dropout=0.0,
activation="relu", normalize_before=False,
return_intermediate_dec=False,
num_patterns=0,
modulate_hw_attn=False,
# for deformable encoder
deformable_decoder=False,
num_feature_levels=1,
enc_n_points=4,
dec_n_points=4,
# init query
decoder_query_perturber=None,
add_channel_attention=False,
random_refpoints_xy=False,
# two stage
two_stage_type='no', # ['no', 'standard']
two_stage_pat_embed=0,
two_stage_add_query_num=0,
two_stage_learn_wh=False,
two_stage_keep_all_tokens=False,
# evo of #anchors
dec_layer_number=None,
rm_self_attn_layers=None,
key_aware_type=None,
# layer share
layer_share_type=None,
# for detach
rm_detach=None,
decoder_sa_type='ca',
module_seq=['sa', 'ca', 'ffn'],
# for dn
embed_init_tgt=False,
use_detached_boxes_dec_out=False,
):
"""Initialize Encoder-Decoder Class for DINO.
Args:
d_model (int): size of the hidden dimension.
nheads (int): number of heads.
num_queries (int): number of queries to be used in D-DETR encoder-decoder.
export (bool): flag to indicate if the current model is being used for ONNX export.
activation_checkpoint (bool): flag to indicate if activation checkpointing is used.
num_encoder_layers (int): number of encoder layers.
num_decoder_layers (int): number of decoder layers.
dim_feedforward (int): dimension of the feedforward layer.
dropout (float): probability for the dropout layer.
activation (str): type of activation.
normalize_before (bool): whether to add LayerNorm before the encoder.
return_intermediate_dec (bool): whether to return intermediate decoder.
num_patterns (int): number of patterns in encoder-decoder.
modulate_hw_attn (bool): whether to apply modulated HW attentions.
deformable_attention (bool): whether to apply deformable attention.
num_feature_levels (int): Number of levels to extract from the backbone feature maps.
enc_n_points (int): number of reference points in the encoder.
dec_n_points (int): number of reference points in the decoder.
decoder_query_perturber (class): RandomBoxPertuber.
add_channel_attention (bool): whether to add channel attention.
random_refpoints_xy (bool): whether to randomly initialize reference point embedding.
two_stage_type (str): type of two stage in DINO.
two_stage_pat_embed (int): size of the patch embedding for the second stage.
two_stage_add_query_num (int): size of the target embedding.
two_stage_learn_wh (bool): add embedding for learnable w and h.
two_stage_keep_all_tokens (bool): whether to keep all tokens in the second stage.
dec_layer_number (int): number of decoder layers.
rm_self_attn_layers (bool): remove self-attention in decoder.
key_aware_type (str): type of key_aware in cross-attention.
layer_share_type (str): type of layer sharing.
rm_detach (list): list of names to remove detach.
decoder_sa_type (str): type of self-attention in the decoder.
module_seq (list): sequence of modules in the forward function.
embed_init_tgt (bool): whether to add target embedding.
use_detached_boxes_dec_out (bool): use detached box decoder output in the reference points.
"""
super().__init__()
self.num_feature_levels = num_feature_levels
self.num_encoder_layers = num_encoder_layers
self.num_decoder_layers = num_decoder_layers
self.deformable_decoder = deformable_decoder
self.two_stage_keep_all_tokens = two_stage_keep_all_tokens
self.num_queries = num_queries
self.random_refpoints_xy = random_refpoints_xy
self.use_detached_boxes_dec_out = use_detached_boxes_dec_out
self.export = export
assert layer_share_type in [None, 'encoder', 'decoder', 'both']
enc_layer_share = layer_share_type in ['encoder', 'both']
dec_layer_share = layer_share_type in ['decoder', 'both']
assert layer_share_type is None
self.decoder_sa_type = decoder_sa_type
supported_decoder_types = ['sa', 'ca_label', 'ca_content']
if decoder_sa_type not in supported_decoder_types:
raise NotImplementedError(
f"Decoder type {decoder_sa_type} unsupported. Please set the decoder type to any one of {supported_decoder_types}"
)
encoder_layer = DeformableTransformerEncoderLayer(d_model, dim_feedforward,
dropout, activation,
num_feature_levels, nhead, enc_n_points,
add_channel_attention=add_channel_attention,
export=export)
encoder_norm = nn.LayerNorm(d_model) if normalize_before else None
self.encoder = TransformerEncoder(
encoder_layer, num_encoder_layers,
encoder_norm, d_model=d_model,
num_queries=num_queries,
enc_layer_share=enc_layer_share,
two_stage_type=two_stage_type,
export=export,
activation_checkpoint=activation_checkpoint
)
# choose decoder layer type
if deformable_decoder:
decoder_layer = DeformableTransformerDecoderLayer(d_model, dim_feedforward,
dropout, activation,
num_feature_levels, nhead, dec_n_points,
key_aware_type=key_aware_type,
decoder_sa_type=decoder_sa_type,
module_seq=module_seq,
export=export)
else:
raise NotImplementedError
decoder_norm = nn.LayerNorm(d_model)
self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm,
export=export,
activation_checkpoint=activation_checkpoint,
return_intermediate=return_intermediate_dec,
d_model=d_model, query_dim=4,
modulate_hw_attn=modulate_hw_attn,
num_feature_levels=num_feature_levels,
deformable_decoder=deformable_decoder,
decoder_query_perturber=decoder_query_perturber,
dec_layer_number=dec_layer_number,
dec_layer_share=dec_layer_share,
use_detached_boxes_dec_out=use_detached_boxes_dec_out)
self.d_model = d_model
self.nhead = nhead
self.dec_layers = num_decoder_layers
self.num_queries = num_queries # useful for single stage model only
if not isinstance(num_patterns, int):
try:
num_patterns = int(num_patterns)
except Exception:
print("num_patterns should be int but {}".format(type(num_patterns)))
num_patterns = 0
self.num_patterns = num_patterns
if num_feature_levels > 1:
if self.num_encoder_layers > 0:
self.level_embed = nn.Parameter(torch.Tensor(num_feature_levels, d_model))
else:
self.level_embed = None
self.embed_init_tgt = embed_init_tgt
if (two_stage_type != 'no' and embed_init_tgt) or (two_stage_type == 'no'):
self.tgt_embed = nn.Embedding(self.num_queries, d_model)
nn.init.normal_(self.tgt_embed.weight.data)
else:
self.tgt_embed = None
# for two stage
self.two_stage_type = two_stage_type
self.two_stage_pat_embed = two_stage_pat_embed
self.two_stage_add_query_num = two_stage_add_query_num
self.two_stage_learn_wh = two_stage_learn_wh
assert two_stage_type in ['no', 'standard'], f"unknown param {two_stage_type} of two_stage_type"
if two_stage_type == 'standard':
# anchor selection at the output of encoder
self.enc_output = nn.Linear(d_model, d_model)
self.enc_output_norm = nn.LayerNorm(d_model)
if two_stage_pat_embed > 0:
self.pat_embed_for_2stage = nn.Parameter(torch.Tensor(two_stage_pat_embed, d_model))
nn.init.normal_(self.pat_embed_for_2stage)
if two_stage_add_query_num > 0:
self.tgt_embed = nn.Embedding(self.two_stage_add_query_num, d_model)
if two_stage_learn_wh:
self.two_stage_wh_embedding = nn.Embedding(1, 2)
else:
self.two_stage_wh_embedding = None
if two_stage_type == 'no':
self.init_ref_points(num_queries) # init self.refpoint_embed
self.enc_out_class_embed = None
self.enc_out_bbox_embed = None
# evolution of anchors
self.dec_layer_number = dec_layer_number
if dec_layer_number is not None:
if self.two_stage_type != 'no' or num_patterns == 0:
assert dec_layer_number[0] == num_queries, f"dec_layer_number[0]({dec_layer_number[0]}) != num_queries({num_queries})"
else:
assert dec_layer_number[0] == num_queries * num_patterns, f"dec_layer_number[0]({dec_layer_number[0]}) != num_queries({num_queries}) * num_patterns({num_patterns})"
self._reset_parameters()
self.rm_self_attn_layers = rm_self_attn_layers
if rm_self_attn_layers is not None:
print("Removing the self-attn in {} decoder layers".format(rm_self_attn_layers))
for lid, dec_layer in enumerate(self.decoder.layers):
if lid in rm_self_attn_layers:
dec_layer.rm_self_attn_modules()
self.rm_detach = rm_detach
if self.rm_detach:
assert isinstance(rm_detach, list)
assert any([i in ['enc_ref', 'enc_tgt', 'dec'] for i in rm_detach])
self.decoder.rm_detach = rm_detach
def _reset_parameters(self):
""" Reset parmaeters """
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
for m in self.modules():
if isinstance(m, MSDeformAttn):
m._reset_parameters()
if self.num_feature_levels > 1 and self.level_embed is not None:
nn.init.normal_(self.level_embed)
if self.two_stage_learn_wh:
nn.init.constant_(self.two_stage_wh_embedding.weight, math.log(0.05 / (1 - 0.05)))
def get_valid_ratio(self, mask):
""" Compute the valid ratio from given mask """
_, H, W = mask.shape
temp_mask = mask.bool()
valid_H = torch.sum((~temp_mask).float()[:, :, 0], 1)
valid_W = torch.sum((~temp_mask).float()[:, 0, :], 1)
valid_ratio_h = valid_H.float() / H
valid_ratio_w = valid_W.float() / W
valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1)
return valid_ratio
def init_ref_points(self, use_num_queries):
"""Initialize reference points"""
self.refpoint_embed = nn.Embedding(use_num_queries, 4)
if self.random_refpoints_xy:
self.refpoint_embed.weight.data[:, :2].uniform_(0, 1)
self.refpoint_embed.weight.data[:, :2] = inverse_sigmoid(self.refpoint_embed.weight.data[:, :2])
self.refpoint_embed.weight.data[:, :2].requires_grad = False
def forward(self, srcs, masks, refpoint_embed, pos_embeds, tgt, attn_mask=None):
"""Encoder-Decoder forward function.
Args:
srcs (torch.Tensor): List of multi features [bs, ci, hi, wi].
masks (torch.Tensor): List of multi masks [bs, hi, wi].
refpoint_embed (torch.Tensor): [bs, num_dn, 4]. None in infer.
pos_embeds (torch.Tensor): List of multi pos embeds [bs, ci, hi, wi].
tgt (torch.Tensor): [bs, num_dn, d_model]. None in infer.
Returns:
hs (torch.Tensor): (n_dec, bs, nq, d_model)
references (torch.Tensor): sigmoid coordinates. (n_dec+1, bs, bq, 4)
hs_enc (torch.Tensor): (n_enc+1, bs, nq, d_model) or (1, bs, nq, d_model) or None
ref_enc (torch.Tensor): sigmoid coordinates. \
(n_enc+1, bs, nq, query_dim) or (1, bs, nq, query_dim) or None
"""
# prepare input for encoder
src_flatten = []
mask_flatten = []
lvl_pos_embed_flatten = []
if self.export:
spatial_shapes = []
else:
spatial_shapes = torch.empty(len(srcs), 2, dtype=torch.int32, device=srcs[0].device)
for lvl, (src, mask, pos_embed) in enumerate(zip(srcs, masks, pos_embeds)):
bs, _, h, w = src.shape
if self.export: # Input shape is fixed for export in onnx/tensorRT
spatial_shapes.append(torch.tensor([[h, w]], dtype=torch.int32, device=srcs[0].device))
else: # Used for dynamic input shape
spatial_shapes[lvl, 0], spatial_shapes[lvl, 1] = h, w
src = src.flatten(2).transpose(1, 2) # bs, hw, c
mask = mask.flatten(1) # bs, hw
pos_embed = pos_embed.flatten(2).transpose(1, 2) # bs, hw, c
if self.num_feature_levels > 1 and self.level_embed is not None:
lvl_pos_embed = pos_embed + self.level_embed[lvl].view(1, 1, -1)
else:
lvl_pos_embed = pos_embed
lvl_pos_embed_flatten.append(lvl_pos_embed)
src_flatten.append(src)
mask_flatten.append(mask)
src_flatten = torch.cat(src_flatten, 1) # bs, \sum{hxw}, c
mask_flatten = torch.cat(mask_flatten, 1) # bs, \sum{hxw}
lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1) # bs, \sum{hxw}, c
if isinstance(spatial_shapes, list):
spatial_shapes = torch.cat(spatial_shapes, 0)
level_start_index = torch.cat((spatial_shapes.new_zeros((1, )), spatial_shapes.prod(1).cumsum(0)[:-1]))
valid_ratios = torch.stack([self.get_valid_ratio(m) for m in masks], 1)
# two stage
enc_topk_proposals = enc_refpoint_embed = None
#########################################################
# Begin Encoder
#########################################################
memory, _, _ = self.encoder(src_flatten,
pos=lvl_pos_embed_flatten,
level_start_index=level_start_index,
spatial_shapes=spatial_shapes,
valid_ratios=valid_ratios,
key_padding_mask=mask_flatten,
ref_token_index=enc_topk_proposals, # bs, nq
ref_token_coord=enc_refpoint_embed, # bs, nq, 4
)
#########################################################
# End Encoder
# - memory: bs, \sum{hw}, c
# - mask_flatten: bs, \sum{hw}
# - lvl_pos_embed_flatten: bs, \sum{hw}, c
# - enc_intermediate_output: None or (nenc+1, bs, nq, c) or (nenc, bs, nq, c)
# - enc_intermediate_refpoints: None or (nenc+1, bs, nq, c) or (nenc, bs, nq, c)
#########################################################
if self.two_stage_type == 'standard':
if self.two_stage_learn_wh:
input_hw = self.two_stage_wh_embedding.weight[0]
else:
input_hw = None
output_memory, output_proposals = gen_encoder_output_proposals(memory, mask_flatten, spatial_shapes, input_hw, export=self.export)
output_memory = self.enc_output_norm(self.enc_output(output_memory))
if self.two_stage_pat_embed > 0:
bs, nhw, _ = output_memory.shape
# output_memory: bs, n, 256; self.pat_embed_for_2stage: k, 256
output_memory = output_memory.repeat(1, self.two_stage_pat_embed, 1)
_pats = self.pat_embed_for_2stage.repeat_interleave(nhw, 0)
output_memory = output_memory + _pats
output_proposals = output_proposals.repeat(1, self.two_stage_pat_embed, 1)
if self.two_stage_add_query_num > 0:
assert refpoint_embed is not None
output_memory = torch.cat((output_memory, tgt), dim=1)
output_proposals = torch.cat((output_proposals, refpoint_embed), dim=1)
enc_outputs_class_unselected = self.enc_out_class_embed(output_memory)
enc_outputs_coord_unselected = self.enc_out_bbox_embed(output_memory) + output_proposals # (bs, \sum{hw}, 4) unsigmoid
topk = self.num_queries
topk_proposals = torch.topk(enc_outputs_class_unselected.max(-1)[0], topk, dim=1)[1] # bs, nq
# gather boxes
refpoint_embed_undetach = torch.gather(enc_outputs_coord_unselected, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, 4)) # unsigmoid
refpoint_embed_ = refpoint_embed_undetach.detach()
init_box_proposal = torch.gather(output_proposals, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, 4)).sigmoid() # sigmoid
# gather tgt
tgt_undetach = torch.gather(output_memory, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, self.d_model))
if self.embed_init_tgt:
tgt_ = self.tgt_embed.weight[:, None, :].repeat(1, bs, 1).transpose(0, 1) # nq, bs, d_model
else:
tgt_ = tgt_undetach.detach()
if refpoint_embed is not None:
refpoint_embed = torch.cat([refpoint_embed, refpoint_embed_], dim=1)
tgt = torch.cat([tgt, tgt_], dim=1)
else:
refpoint_embed, tgt = refpoint_embed_, tgt_
elif self.two_stage_type == 'no':
tgt_ = self.tgt_embed.weight[:, None, :].repeat(1, bs, 1).transpose(0, 1) # nq, bs, d_model
refpoint_embed_ = self.refpoint_embed.weight[:, None, :].repeat(1, bs, 1).transpose(0, 1) # nq, bs, 4
if refpoint_embed is not None:
refpoint_embed = torch.cat([refpoint_embed, refpoint_embed_], dim=1)
tgt = torch.cat([tgt, tgt_], dim=1)
else:
refpoint_embed, tgt = refpoint_embed_, tgt_
if self.num_patterns > 0:
tgt_embed = tgt.repeat(1, self.num_patterns, 1)
refpoint_embed = refpoint_embed.repeat(1, self.num_patterns, 1)
tgt_pat = self.patterns.weight[None, :, :].repeat_interleave(self.num_queries, 1) # 1, n_q*n_pat, d_model
tgt = tgt_embed + tgt_pat
init_box_proposal = refpoint_embed_.sigmoid()
else:
raise NotImplementedError("unknown two_stage_type {}".format(self.two_stage_type))
#########################################################
# End preparing tgt
# - tgt: bs, NQ, d_model
# - refpoint_embed(unsigmoid): bs, NQ, d_model
#########################################################
#########################################################
# Begin Decoder
#########################################################
hs, references = self.decoder(tgt=tgt.transpose(0, 1),
memory=memory.transpose(0, 1),
memory_key_padding_mask=mask_flatten,
pos=lvl_pos_embed_flatten.transpose(0, 1),
refpoints_unsigmoid=refpoint_embed.transpose(0, 1),
level_start_index=level_start_index,
spatial_shapes=spatial_shapes,
valid_ratios=valid_ratios,
tgt_mask=attn_mask)
#########################################################
# End Decoder
# hs: n_dec, bs, nq, d_model
# references: n_dec+1, bs, nq, query_dim
#########################################################
#########################################################
# Begin postprocess
#########################################################
if self.two_stage_type == 'standard':
if self.two_stage_keep_all_tokens:
hs_enc = output_memory.unsqueeze(0)
ref_enc = enc_outputs_coord_unselected.unsqueeze(0)
init_box_proposal = output_proposals
else:
hs_enc = tgt_undetach.unsqueeze(0)
ref_enc = refpoint_embed_undetach.sigmoid().unsqueeze(0)
else:
hs_enc = ref_enc = None
#########################################################
# End postprocess
# hs_enc: (n_enc+1, bs, nq, d_model) or (1, bs, nq, d_model) or (n_enc, bs, nq, d_model) or None
# ref_enc: (n_enc+1, bs, nq, query_dim) or (1, bs, nq, query_dim) or (n_enc, bs, nq, d_model) or None
#########################################################
return hs, references, hs_enc, ref_enc, init_box_proposal
# hs: (n_dec, bs, nq, d_model)
# references: sigmoid coordinates. (n_dec+1, bs, bq, 4)
# hs_enc: (n_enc+1, bs, nq, d_model) or (1, bs, nq, d_model) or None
# ref_enc: sigmoid coordinates. \
# (n_enc+1, bs, nq, query_dim) or (1, bs, nq, query_dim) or None
class TransformerEncoder(nn.Module):
""" Deformable Transfromer Encoder module """
def __init__(self,
encoder_layer, num_layers, norm=None, d_model=256,
num_queries=300,
enc_layer_share=False, enc_layer_dropout_prob=None,
two_stage_type='no', # ['no', 'standard']
export=False, activation_checkpoint=True):
""" Initializes the Transformer Encoder Module """
super().__init__()
# prepare layers
if num_layers > 0:
self.layers = _get_clones(encoder_layer, num_layers, layer_share=enc_layer_share)
else:
self.layers = []
del encoder_layer
self.activation_checkpoint = activation_checkpoint
self.export = export
self.query_scale = None
self.num_queries = num_queries
self.num_layers = num_layers
self.norm = norm
self.d_model = d_model
self.enc_layer_dropout_prob = enc_layer_dropout_prob
if enc_layer_dropout_prob is not None:
assert isinstance(enc_layer_dropout_prob, list)
assert len(enc_layer_dropout_prob) == num_layers
for i in enc_layer_dropout_prob:
assert 0.0 <= i <= 1.0
self.two_stage_type = two_stage_type
@staticmethod
def get_reference_points(spatial_shapes, valid_ratios, device, export=False):
""" get reference points """
reference_points_list = []
for lvl, (H_, W_) in enumerate(spatial_shapes):
if export: # Fixed dimensions for export in onnx
H_, W_ = int(H_), int(W_)
else:
H_, W_ = spatial_shapes[lvl, 0], spatial_shapes[lvl, 1]
ref_y, ref_x = torch.meshgrid(torch.linspace(0.5, H_ - 0.5, H_, dtype=torch.float32, device=device),
torch.linspace(0.5, W_ - 0.5, W_, dtype=torch.float32, device=device))
ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, lvl, 1] * H_)
ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, lvl, 0] * W_)
ref = torch.stack((ref_x, ref_y), -1)
reference_points_list.append(ref)
reference_points = torch.cat(reference_points_list, 1)
reference_points = reference_points[:, :, None] * valid_ratios[:, None]
return reference_points
def forward(self,
src: Tensor,
pos: Tensor,
spatial_shapes: Tensor,
level_start_index: Tensor,
valid_ratios: Tensor,
key_padding_mask: Tensor,
ref_token_index: Optional[Tensor] = None,
ref_token_coord: Optional[Tensor] = None
):
"""Deformable Encoder forward functions.
Args:
src (torch.Tensor): [bs, sum(hi*wi), 256].
pos (torch.Tensor): pos embed for src. [bs, sum(hi*wi), 256].
spatial_shapes (torch.Tensor): h,w of each level [num_level, 2].
level_start_index (torch.Tensor): [num_level] start point of level in sum(hi*wi)..
valid_ratios (torch.Tensor): [bs, num_level, 2].
key_padding_mask (torch.Tensor): [bs, sum(hi*wi)].
ref_token_index (torch.Tensor): bs, nq.
ref_token_coord (torch.Tensor): bs, nq, 4.
Returns:
output (torch.Tensor): [bs, sum(hi*wi), 256].
reference_points (torch.Tensor): [bs, sum(hi*wi), num_level, 2].
"""
if self.two_stage_type in ['no', 'standard']:
assert ref_token_index is None
output = src
# preparation and reshape
if self.num_layers > 0:
reference_points = self.get_reference_points(spatial_shapes, valid_ratios, device=src.device, export=self.export)
intermediate_output = []
intermediate_ref = []
if ref_token_index is not None:
out_i = torch.gather(output, 1, ref_token_index.unsqueeze(-1).repeat(1, 1, self.d_model))
intermediate_output.append(out_i)
intermediate_ref.append(ref_token_coord)
# main process
for layer_id, layer in enumerate(self.layers):
# main process
dropflag = False
if self.enc_layer_dropout_prob is not None:
prob = random.random()
if prob < self.enc_layer_dropout_prob[layer_id]:
dropflag = True
if not dropflag:
if self.export or not self.activation_checkpoint:
output = layer(src=output,
pos=pos,
reference_points=reference_points,
spatial_shapes=spatial_shapes,
level_start_index=level_start_index,
key_padding_mask=key_padding_mask)
else:
output = checkpoint.checkpoint(layer,
output,
pos,
reference_points,
spatial_shapes,
level_start_index,
key_padding_mask)
# aux loss
if (layer_id != self.num_layers - 1) and ref_token_index is not None:
out_i = torch.gather(output, 1, ref_token_index.unsqueeze(-1).repeat(1, 1, self.d_model))
intermediate_output.append(out_i)
intermediate_ref.append(ref_token_coord)
if self.norm is not None:
output = self.norm(output)
if ref_token_index is not None:
intermediate_output = torch.stack(intermediate_output) # n_enc/n_enc-1, bs, \sum{hw}, d_model
intermediate_ref = torch.stack(intermediate_ref)
else:
intermediate_output = intermediate_ref = None
return output, intermediate_output, intermediate_ref
class TransformerDecoder(nn.Module):
""" Deformable Transfromer Decoder module """
def __init__(self, decoder_layer, num_layers,
norm=None, export=False,
activation_checkpoint=True,
return_intermediate=False,
d_model=256, query_dim=4,
modulate_hw_attn=False,
num_feature_levels=1,
deformable_decoder=False,
decoder_query_perturber=None,
dec_layer_number=None, # number of queries each layer in decoder
dec_layer_share=False,
dec_layer_dropout_prob=None,
use_detached_boxes_dec_out=False
):
""" Initializes the Transformer Decoder Module """
super().__init__()
self.export = export
self.activation_checkpoint = activation_checkpoint
if num_layers > 0:
self.layers = _get_clones(decoder_layer, num_layers, layer_share=dec_layer_share)
else:
self.layers = []
self.num_layers = num_layers
self.norm = norm
self.return_intermediate = return_intermediate
assert return_intermediate, "support return_intermediate only"
self.query_dim = query_dim
assert query_dim in [2, 4], "query_dim should be 2/4 but {}".format(query_dim)
self.num_feature_levels = num_feature_levels
self.use_detached_boxes_dec_out = use_detached_boxes_dec_out
self.ref_point_head = MLP(query_dim // 2 * d_model, d_model, d_model, 2)
if not deformable_decoder:
self.query_pos_sine_scale = MLP(d_model, d_model, d_model, 2)
else:
self.query_pos_sine_scale = None
self.query_scale = None
self.bbox_embed = None
self.class_embed = None
self.d_model = d_model
self.modulate_hw_attn = modulate_hw_attn
self.deformable_decoder = deformable_decoder
if not deformable_decoder and modulate_hw_attn:
self.ref_anchor_head = MLP(d_model, d_model, 2, 2)
else:
self.ref_anchor_head = None
self.decoder_query_perturber = decoder_query_perturber
self.box_pred_damping = None
self.dec_layer_number = dec_layer_number
if dec_layer_number is not None:
assert isinstance(dec_layer_number, list)
assert len(dec_layer_number) == num_layers
self.dec_layer_dropout_prob = dec_layer_dropout_prob
if dec_layer_dropout_prob is not None:
assert isinstance(dec_layer_dropout_prob, list)
assert len(dec_layer_dropout_prob) == num_layers
for i in dec_layer_dropout_prob:
assert 0.0 <= i <= 1.0
self.rm_detach = None
def forward(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
refpoints_unsigmoid: Optional[Tensor] = None, # num_queries, bs, 2
# for memory
level_start_index: Optional[Tensor] = None, # num_levels
spatial_shapes: Optional[Tensor] = None, # bs, num_levels, 2
valid_ratios: Optional[Tensor] = None,
):
""" Deformable Decoder forward function.
Args:
tgt (torch.Tensor): nq, bs, d_model.
memory (torch.Tensor): hw, bs, d_model.
pos (torch.Tensor): hw, bs, d_model.
refpoints_unsigmoid (torch.Tensor): nq, bs, 2/4.
valid_ratios/spatial_shapes (torch.Tensor): bs, nlevel, 2.
"""
output = tgt
intermediate = []
reference_points = refpoints_unsigmoid.sigmoid()
ref_points = [reference_points]
for layer_id, layer in enumerate(self.layers):
# preprocess ref points
if self.training and self.decoder_query_perturber is not None and layer_id != 0:
reference_points = self.decoder_query_perturber(reference_points)
if self.deformable_decoder:
if reference_points.shape[-1] == 4:
# nq, bs, nlevel, 4
reference_points_input = reference_points[:, :, None] * torch.cat([valid_ratios, valid_ratios], -1)[None, :]
else:
assert reference_points.shape[-1] == 2
reference_points_input = reference_points[:, :, None] * valid_ratios[None, :]
query_sine_embed = gen_sineembed_for_position(reference_points_input[:, :, 0, :]) # nq, bs, 256*2
else:
query_sine_embed = gen_sineembed_for_position(reference_points) # nq, bs, 256*2
reference_points_input = None
# conditional query
raw_query_pos = self.ref_point_head(query_sine_embed) # nq, bs, 256
pos_scale = self.query_scale(output) if self.query_scale is not None else 1
query_pos = pos_scale * raw_query_pos
if not self.deformable_decoder:
query_sine_embed = query_sine_embed[..., :self.d_model] * self.query_pos_sine_scale(output)
# modulated HW attentions
if not self.deformable_decoder and self.modulate_hw_attn:
refHW_cond = self.ref_anchor_head(output).sigmoid() # nq, bs, 2
query_sine_embed[..., self.d_model // 2:] *= (refHW_cond[..., 0] / reference_points[..., 2]).unsqueeze(-1)
query_sine_embed[..., :self.d_model // 2] *= (refHW_cond[..., 1] / reference_points[..., 3]).unsqueeze(-1)
# random drop some layers if needed
dropflag = False
if self.dec_layer_dropout_prob is not None:
prob = random.random()
if prob < self.dec_layer_dropout_prob[layer_id]:
dropflag = True
if not dropflag:
if self.export or not self.activation_checkpoint:
output = layer(tgt=output,
tgt_query_pos=query_pos,
tgt_query_sine_embed=query_sine_embed,
tgt_key_padding_mask=tgt_key_padding_mask,
tgt_reference_points=reference_points_input,
memory=memory,
memory_key_padding_mask=memory_key_padding_mask,
memory_level_start_index=level_start_index,
memory_spatial_shapes=spatial_shapes,
memory_pos=pos,
self_attn_mask=tgt_mask,
cross_attn_mask=memory_mask)
else:
output = checkpoint.checkpoint(layer,
output,
query_pos,
query_sine_embed,
tgt_key_padding_mask,
reference_points_input,
memory,
memory_key_padding_mask,
level_start_index,
spatial_shapes,
pos,
tgt_mask,
memory_mask)
# iter update
if self.bbox_embed is not None:
reference_before_sigmoid = inverse_sigmoid(reference_points)
delta_unsig = self.bbox_embed[layer_id](output) # pylint: disable=E1136
outputs_unsig = delta_unsig + reference_before_sigmoid
new_reference_points = outputs_unsig.sigmoid()
# select # ref points
if self.dec_layer_number is not None and layer_id != self.num_layers - 1:
nq_now = new_reference_points.shape[0]
select_number = self.dec_layer_number[layer_id + 1]
if nq_now != select_number:
# pylint: disable=E1136
class_unselected = self.class_embed[layer_id](output) # nq, bs, 91
topk_proposals = torch.topk(class_unselected.max(-1)[0], select_number, dim=0)[1] # new_nq, bs
new_reference_points = torch.gather(new_reference_points, 0, topk_proposals.unsqueeze(-1).repeat(1, 1, 4)) # unsigmoid
if self.rm_detach and 'dec' in self.rm_detach:
reference_points = new_reference_points
else:
reference_points = new_reference_points.detach()
if self.use_detached_boxes_dec_out:
ref_points.append(reference_points)
else:
ref_points.append(new_reference_points)
intermediate.append(self.norm(output))
if self.dec_layer_number is not None and layer_id != self.num_layers - 1:
if nq_now != select_number:
output = torch.gather(output, 0, topk_proposals.unsqueeze(-1).repeat(1, 1, self.d_model)) # unsigmoid
return [
[itm_out.transpose(0, 1) for itm_out in intermediate],
[itm_refpoint.transpose(0, 1) for itm_refpoint in ref_points]
]
class DeformableTransformerEncoderLayer(nn.Module):
""" Deformable Transfromer Encoder Layer module """
def __init__(self,
d_model=256, d_ffn=1024,
dropout=0.1, activation="relu",
n_levels=4, n_heads=8, n_points=4,
add_channel_attention=False,
export=False,
):
""" Initializes the Transformer Encoder Layer """
super().__init__()
self.export = export
# self attention
self.self_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)
self.dropout1 = nn.Dropout(dropout)
self.norm1 = nn.LayerNorm(d_model)
# ffn
self.linear1 = nn.Linear(d_model, d_ffn)
self.activation = _get_activation_fn(activation)
self.dropout2 = nn.Dropout(dropout)
self.linear2 = nn.Linear(d_ffn, d_model)
self.dropout3 = nn.Dropout(dropout)
self.norm2 = nn.LayerNorm(d_model)
# channel attention
self.add_channel_attention = add_channel_attention
if add_channel_attention:
self.activ_channel = _get_activation_fn('dyrelu')
self.norm_channel = nn.LayerNorm(d_model)
@staticmethod
def with_pos_embed(tensor, pos):
""" Add positional Embedding to the tensor """
return tensor if pos is None else tensor + pos
def forward_ffn(self, src):
"""Feed-forward network forward function"""
src2 = self.linear2(self.dropout2(self.activation(self.linear1(src))))
src = src + self.dropout3(src2)
src = self.norm2(src)
return src
def forward(self, src, pos, reference_points, spatial_shapes, level_start_index, key_padding_mask=None):
""" forward function for Encoder Layer"""
# self attention
src2 = self.self_attn(self.with_pos_embed(src, pos),
reference_points, src,
spatial_shapes,
level_start_index,
key_padding_mask,
export=self.export)
src = src + self.dropout1(src2)
src = self.norm1(src)
# ffn
src = self.forward_ffn(src)
# channel attn
if self.add_channel_attention:
src = self.norm_channel(src + self.activ_channel(src))
return src
class DeformableTransformerDecoderLayer(nn.Module):
""" Deformable Transfromer Decoder Layer module """
def __init__(self, d_model=256, d_ffn=1024,
dropout=0.1, activation="relu",
n_levels=4, n_heads=8, n_points=4,
key_aware_type=None,
decoder_sa_type='sa',
module_seq=['sa', 'ca', 'ffn'],
export=False
):
""" Initializes the Transformer Decoder Layer """
super().__init__()
self.module_seq = module_seq
assert sorted(module_seq) == ['ca', 'ffn', 'sa']
self.export = export
# cross attention
self.cross_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)
self.dropout1 = nn.Dropout(dropout)
self.norm1 = nn.LayerNorm(d_model)
# self attention
if self.export:
# Starting from PyT 1.14, _scaled_dot_product_attention has been switched to C++ backend
# which is not exportable as ONNX operator
# However, the training / eval time can be greatly optimized by Torch selecting the optimal
# attention mechanism under the hood
self.self_attn = MultiheadAttention(d_model, n_heads, dropout=dropout)
else:
self.self_attn = nn.MultiheadAttention(d_model, n_heads, dropout=dropout)
self.dropout2 = nn.Dropout(dropout)
self.norm2 = nn.LayerNorm(d_model)
# ffn
self.linear1 = nn.Linear(d_model, d_ffn)
self.activation = _get_activation_fn(activation)
self.dropout3 = nn.Dropout(dropout)
self.linear2 = nn.Linear(d_ffn, d_model)
self.dropout4 = nn.Dropout(dropout)
self.norm3 = nn.LayerNorm(d_model)
self.key_aware_type = key_aware_type
self.key_aware_proj = None
self.decoder_sa_type = decoder_sa_type
assert decoder_sa_type in ['sa', 'ca_label', 'ca_content']
if decoder_sa_type == 'ca_content':
self.self_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)
def rm_self_attn_modules(self):
"""Remove self attention module"""
self.self_attn = None
self.dropout2 = None
self.norm2 = None
@staticmethod
def with_pos_embed(tensor, pos):
""" Add positional Embedding to the tensor """
return tensor if pos is None else tensor + pos
def forward_ffn(self, tgt):
"""Feed-forward network forward function"""
tgt2 = self.linear2(self.dropout3(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout4(tgt2)
tgt = self.norm3(tgt)
return tgt
def forward_sa(self,
# for tgt
tgt: Optional[Tensor], # nq, bs, d_model
tgt_query_pos: Optional[Tensor] = None, # pos for query. MLP(Sine(pos))
tgt_query_sine_embed: Optional[Tensor] = None, # pos for query. Sine(pos)
tgt_key_padding_mask: Optional[Tensor] = None,
tgt_reference_points: Optional[Tensor] = None, # nq, bs, 4
# for memory
memory: Optional[Tensor] = None, # hw, bs, d_model
memory_key_padding_mask: Optional[Tensor] = None,
memory_level_start_index: Optional[Tensor] = None, # num_levels
memory_spatial_shapes: Optional[Tensor] = None, # bs, num_levels, 2
memory_pos: Optional[Tensor] = None, # pos for memory
# sa
self_attn_mask: Optional[Tensor] = None, # mask used for self-attention
cross_attn_mask: Optional[Tensor] = None, # mask used for cross-attention
):
"""Self-Attention forward function"""
# self attention
if self.self_attn is not None:
if self.decoder_sa_type == 'sa':
q = k = self.with_pos_embed(tgt, tgt_query_pos)
tgt2 = self.self_attn(q, k, tgt, attn_mask=self_attn_mask)[0]
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
elif self.decoder_sa_type == 'ca_label':
bs = tgt.shape[1]
k = v = self.label_embedding.weight[:, None, :].repeat(1, bs, 1)
tgt2 = self.self_attn(tgt, k, v, attn_mask=self_attn_mask)[0]
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
elif self.decoder_sa_type == 'ca_content':
tgt2 = self.self_attn(self.with_pos_embed(tgt, tgt_query_pos).transpose(0, 1),
tgt_reference_points.transpose(0, 1).contiguous(),
memory.transpose(0, 1), memory_spatial_shapes,
memory_level_start_index, memory_key_padding_mask).transpose(0, 1)
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
else:
raise NotImplementedError("Unknown decoder_sa_type {}".format(self.decoder_sa_type))
return tgt
def forward_ca(self,
# for tgt
tgt: Optional[Tensor], # nq, bs, d_model
tgt_query_pos: Optional[Tensor] = None, # pos for query. MLP(Sine(pos))
tgt_query_sine_embed: Optional[Tensor] = None, # pos for query. Sine(pos)
tgt_key_padding_mask: Optional[Tensor] = None,
tgt_reference_points: Optional[Tensor] = None, # nq, bs, 4
# for memory
memory: Optional[Tensor] = None, # hw, bs, d_model
memory_key_padding_mask: Optional[Tensor] = None,
memory_level_start_index: Optional[Tensor] = None, # num_levels
memory_spatial_shapes: Optional[Tensor] = None, # bs, num_levels, 2
memory_pos: Optional[Tensor] = None, # pos for memory
# sa
self_attn_mask: Optional[Tensor] = None, # mask used for self-attention
cross_attn_mask: Optional[Tensor] = None, # mask used for cross-attention
):
"""Cross-Attention forward function"""
# cross attention
if self.key_aware_type is not None:
if self.key_aware_type == 'mean':
tgt = tgt + memory.mean(0, keepdim=True)
elif self.key_aware_type == 'proj_mean':
tgt = tgt + self.key_aware_proj(memory).mean(0, keepdim=True)
else:
raise NotImplementedError("Unknown key_aware_type: {}".format(self.key_aware_type))
tgt2 = self.cross_attn(self.with_pos_embed(tgt, tgt_query_pos).transpose(0, 1),
tgt_reference_points.transpose(0, 1).contiguous(),
memory.transpose(0, 1), memory_spatial_shapes, memory_level_start_index, memory_key_padding_mask,
export=self.export).transpose(0, 1)
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
return tgt
def forward(self,
# for tgt
tgt: Optional[Tensor], # nq, bs, d_model
tgt_query_pos: Optional[Tensor] = None, # pos for query. MLP(Sine(pos))
tgt_query_sine_embed: Optional[Tensor] = None, # pos for query. Sine(pos)
tgt_key_padding_mask: Optional[Tensor] = None,
tgt_reference_points: Optional[Tensor] = None, # nq, bs, 4
# for memory
memory: Optional[Tensor] = None, # hw, bs, d_model
memory_key_padding_mask: Optional[Tensor] = None,
memory_level_start_index: Optional[Tensor] = None, # num_levels
memory_spatial_shapes: Optional[Tensor] = None, # bs, num_levels, 2
memory_pos: Optional[Tensor] = None, # pos for memory
# sa
self_attn_mask: Optional[Tensor] = None, # mask used for self-attention
cross_attn_mask: Optional[Tensor] = None, # mask used for cross-attention
):
"""Forward function"""
for funcname in self.module_seq:
if funcname == 'ffn':
tgt = self.forward_ffn(tgt)
elif funcname == 'ca':
tgt = self.forward_ca(tgt, tgt_query_pos, tgt_query_sine_embed,
tgt_key_padding_mask, tgt_reference_points,
memory, memory_key_padding_mask, memory_level_start_index,
memory_spatial_shapes, memory_pos, self_attn_mask, cross_attn_mask)
elif funcname == 'sa':
tgt = self.forward_sa(tgt, tgt_query_pos, tgt_query_sine_embed,
tgt_key_padding_mask, tgt_reference_points,
memory, memory_key_padding_mask, memory_level_start_index,
memory_spatial_shapes, memory_pos, self_attn_mask, cross_attn_mask)
else:
raise ValueError(f'Unknown funcname {funcname}')
return tgt
def _get_clones(module, N, layer_share=False):
""" get clones """
if layer_share:
return nn.ModuleList([module for i in range(N)])
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/dino/model/deformable_transformer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Criterion Loss functions. """
import torch
import torch.nn.functional as F
from torch import nn
from nvidia_tao_pytorch.cv.dino.model.model_utils import sigmoid_focal_loss
from nvidia_tao_pytorch.cv.deformable_detr.utils import box_ops
from nvidia_tao_pytorch.cv.deformable_detr.utils.misc import (accuracy, get_world_size, is_dist_avail_and_initialized)
class SetCriterion(nn.Module):
""" This class computes the loss for DINO.
The process happens in two steps:
1) Compute hungarian assignment between ground truth boxes and the outputs of the model
2) Supervise each pair of matched ground-truth / prediction (supervise class and box)
"""
def __init__(self, num_classes, matcher, focal_alpha, losses):
""" Create the criterion.
Args:
num_classes (int): number of object categories, omitting the special no-object category
matcher (nn.Module): module able to compute a matching between targets and proposals
losses (list[str]): list of all the losses to be applied. See get_loss for list of available losses.
focal_alpha (float): alpha in Focal Loss
"""
super().__init__()
self.num_classes = num_classes
self.matcher = matcher
self.losses = losses
self.focal_alpha = focal_alpha
def loss_labels(self, outputs, targets, indices, num_boxes, log=True):
"""Classification loss (Binary focal loss).
Args:
outputs (dict[torch.Tensor]): computed outputs
targets List[dict]): target annotations.
targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]
indices (list): matching indices
num_boxes (int): number of bounding boxes
Returns:
classification loss
"""
assert 'pred_logits' in outputs
src_logits = outputs['pred_logits']
idx = self._get_src_permutation_idx(indices)
target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)])
target_classes = torch.full(src_logits.shape[:2], self.num_classes,
dtype=torch.int64, device=src_logits.device)
target_classes[idx] = target_classes_o
target_classes_onehot = torch.zeros([src_logits.shape[0], src_logits.shape[1], src_logits.shape[2] + 1],
dtype=src_logits.dtype, layout=src_logits.layout, device=src_logits.device)
target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1)
target_classes_onehot = target_classes_onehot[:, :, :-1]
loss_ce = sigmoid_focal_loss(src_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2) * src_logits.shape[1]
losses = {'loss_ce': loss_ce}
if log:
# TODO this should probably be a separate loss, not hacked in this one here
losses['class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0]
return losses
def loss_boxes(self, outputs, targets, indices, num_boxes):
"""Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss.
Args:
outputs (dict[torch.Tensor]): computed outputs
targets (List[dict]): target annotations
targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]
target boxes are expected in format (center_x, center_y, w, h), normalized by the image size.
indices (list): matching indices
num_boxes (int): number of bounding boxes
Returns:
bbox loss and giou loss
"""
assert 'pred_boxes' in outputs
idx = self._get_src_permutation_idx(indices)
src_boxes = outputs['pred_boxes'][idx]
target_boxes = torch.cat([t['boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0)
loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none')
losses = {}
losses['loss_bbox'] = loss_bbox.sum() / num_boxes
loss_giou = 1 - torch.diag(box_ops.generalized_box_iou(
box_ops.box_cxcywh_to_xyxy(src_boxes),
box_ops.box_cxcywh_to_xyxy(target_boxes)))
losses['loss_giou'] = loss_giou.sum() / num_boxes
# calculate the x,y and h,w loss
with torch.no_grad():
losses['loss_xy'] = loss_bbox[..., :2].sum() / num_boxes
losses['loss_hw'] = loss_bbox[..., 2:].sum() / num_boxes
return losses
def _get_src_permutation_idx(self, indices):
"""Permute predictions following indices.
Args:
indices (list): matching indices.
"""
batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])
src_idx = torch.cat([src for (src, _) in indices])
return batch_idx, src_idx
def _get_tgt_permutation_idx(self, indices):
"""Permute targets following indices.
Args:
indices (list): matching indices.
"""
batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])
tgt_idx = torch.cat([tgt for (_, tgt) in indices])
return batch_idx, tgt_idx
def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs):
"""Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss.
Args:
loss (str): name of the loss to get
outputs (dict[torch.Tensor]): computed outputs
targets (List[dict]): target annotations
indices (list): matching indices
num_boxes (int): number of bounding boxes
Returns:
the loss value given the loss name
"""
loss_map = {
'labels': self.loss_labels,
'boxes': self.loss_boxes,
}
assert loss in loss_map, f'do you really want to compute {loss} loss?'
return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs)
def forward(self, outputs, targets, return_indices=False):
""" Performs the loss computation.
Args:
outputs (dict[torch.Tensor]): dict of tensors, see the output specification of the model for the format
targets (List[dict]): list of dicts, such that len(targets) == batch_size.
The expected keys in each dict depends on the losses applied, see each loss' doc
Returns:
losses (dict): Dictionary of computed losses
"""
outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs'}
device = next(iter(outputs.values())).device
indices = self.matcher(outputs_without_aux, targets)
if return_indices:
indices0_copy = indices
indices_list = []
# Compute the average number of target boxes accross all nodes, for normalization purposes
num_boxes = sum(len(t["labels"]) for t in targets)
num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=device)
if is_dist_avail_and_initialized():
torch.distributed.all_reduce(num_boxes)
num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item()
# Compute all the requested losses
losses = {}
# prepare for dn loss
dn_meta = outputs['dn_meta']
if self.training and dn_meta and 'output_known_lbs_bboxes' in dn_meta:
output_known_lbs_bboxes, single_pad, scalar = self.prep_for_dn(dn_meta)
dn_pos_idx = []
dn_neg_idx = []
for i in range(len(targets)):
if len(targets[i]['labels']) > 0:
t = torch.range(0, len(targets[i]['labels']) - 1).long().cuda()
t = t.unsqueeze(0).repeat(scalar, 1)
tgt_idx = t.flatten()
output_idx = (torch.tensor(range(scalar)) * single_pad).long().cuda().unsqueeze(1) + t
output_idx = output_idx.flatten()
else:
output_idx = tgt_idx = torch.tensor([]).long().cuda()
dn_pos_idx.append((output_idx, tgt_idx))
dn_neg_idx.append((output_idx + single_pad // 2, tgt_idx))
output_known_lbs_bboxes = dn_meta['output_known_lbs_bboxes']
l_dict = {}
for loss in self.losses:
kwargs = {}
if 'labels' in loss:
kwargs = {'log': False}
l_dict.update(self.get_loss(loss, output_known_lbs_bboxes, targets, dn_pos_idx, num_boxes * scalar, **kwargs))
l_dict = {f'{k}_dn': v for k, v in l_dict.items()}
losses.update(l_dict)
else:
l_dict = dict()
l_dict['loss_bbox_dn'] = torch.as_tensor(0.).to('cuda')
l_dict['loss_giou_dn'] = torch.as_tensor(0.).to('cuda')
l_dict['loss_ce_dn'] = torch.as_tensor(0.).to('cuda')
l_dict['loss_xy_dn'] = torch.as_tensor(0.).to('cuda')
l_dict['loss_hw_dn'] = torch.as_tensor(0.).to('cuda')
losses.update(l_dict)
for loss in self.losses:
losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))
# In case of auxiliary losses, we repeat this process with the output of each intermediate layer.
if 'aux_outputs' in outputs:
for idx, aux_outputs in enumerate(outputs['aux_outputs']):
indices = self.matcher(aux_outputs, targets)
if return_indices:
indices_list.append(indices)
for loss in self.losses:
kwargs = {}
if loss == 'labels':
# Logging is enabled only for the last layer
kwargs = {'log': False}
l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes, **kwargs)
l_dict = {k + f'_{idx}': v for k, v in l_dict.items()}
losses.update(l_dict)
if self.training and dn_meta and 'output_known_lbs_bboxes' in dn_meta:
aux_outputs_known = output_known_lbs_bboxes['aux_outputs'][idx]
l_dict = {}
for loss in self.losses:
kwargs = {}
if 'labels' in loss:
kwargs = {'log': False}
l_dict.update(self.get_loss(loss, aux_outputs_known, targets, dn_pos_idx,
num_boxes * scalar, **kwargs))
l_dict = {k + f'_dn_{idx}': v for k, v in l_dict.items()}
losses.update(l_dict)
else:
l_dict = dict()
l_dict['loss_bbox_dn'] = torch.as_tensor(0.).to('cuda')
l_dict['loss_giou_dn'] = torch.as_tensor(0.).to('cuda')
l_dict['loss_ce_dn'] = torch.as_tensor(0.).to('cuda')
l_dict['loss_xy_dn'] = torch.as_tensor(0.).to('cuda')
l_dict['loss_hw_dn'] = torch.as_tensor(0.).to('cuda')
l_dict = {k + f'_{idx}': v for k, v in l_dict.items()}
losses.update(l_dict)
# interm_outputs loss
if 'interm_outputs' in outputs:
interm_outputs = outputs['interm_outputs']
indices = self.matcher(interm_outputs, targets)
if return_indices:
indices_list.append(indices)
for loss in self.losses:
kwargs = {}
if loss == 'labels':
# Logging is enabled only for the last layer
kwargs = {'log': False}
l_dict = self.get_loss(loss, interm_outputs, targets, indices, num_boxes, **kwargs)
l_dict = {f'{k}_interm': v for k, v in l_dict.items()}
losses.update(l_dict)
# enc output loss
if 'enc_outputs' in outputs:
for i, enc_outputs in enumerate(outputs['enc_outputs']):
indices = self.matcher(enc_outputs, targets)
if return_indices:
indices_list.append(indices)
for loss in self.losses:
kwargs = {}
if loss == 'labels':
# Logging is enabled only for the last layer
kwargs = {'log': False}
l_dict = self.get_loss(loss, enc_outputs, targets, indices, num_boxes, **kwargs)
l_dict = {k + f'_enc_{i}': v for k, v in l_dict.items()}
losses.update(l_dict)
if return_indices:
indices_list.append(indices0_copy)
return losses, indices_list
return losses
def prep_for_dn(self, dn_meta):
"""Prepare for de-noising.
Args:
dn_meta (dict): meta information about de-noising.
"""
output_known_lbs_bboxes = dn_meta['output_known_lbs_bboxes']
num_dn_groups, pad_size = dn_meta['num_dn_group'], dn_meta['pad_size']
assert pad_size % num_dn_groups == 0
single_pad = pad_size // num_dn_groups
return output_known_lbs_bboxes, single_pad, num_dn_groups
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/dino/model/criterion.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" DINO model. """
import copy
import math
import warnings
import torch
import torch.nn.functional as F
from torch import nn
from nvidia_tao_pytorch.cv.dino.model.dn_components import prepare_for_cdn, dn_post_process
from nvidia_tao_pytorch.cv.dino.model.model_utils import MLP
from nvidia_tao_pytorch.cv.deformable_detr.utils.misc import (tensor_from_tensor_list, inverse_sigmoid)
class DINO(nn.Module):
""" This is the Cross-Attention Detector module that performs object detection """
def __init__(self, backbone, position_embedding, transformer,
num_classes, num_queries,
aux_loss=False,
random_refpoints_xy=False,
fix_refpoints_hw=-1,
num_feature_levels=1,
nheads=8,
# two stage
two_stage_type='no', # ['no', 'standard']
two_stage_add_query_num=0,
dec_pred_class_embed_share=True,
dec_pred_bbox_embed_share=True,
two_stage_class_embed_share=True,
two_stage_bbox_embed_share=True,
decoder_sa_type='sa',
num_patterns=0,
dn_number=100,
dn_box_noise_scale=0.4,
dn_label_noise_ratio=0.5,
dn_labelbook_size=100,
export=False):
""" Initializes the model.
Args:
backbone (torch.Tensor): torch module of the backbone to be used. See backbone.py.
transformer (torch.Tensor): torch module of the transformer architecture. See deformable_transformer.py.
num_classes (int): number of object classes.
num_queries (int): number of object queries, ie detection slot. This is the maximal number of objects.
DINO can detect in a single image. For COCO, we recommend 300 queries.
aux_loss (bool): True if auxiliary decoding losses (loss at each decoder layer) are to be used.
random_refpoints_xy (bool): whether to randomly initialize reference point embedding.
fix_refpoints_hw (int): -1(default): learn w and h for each box seperately
>0 : given fixed number
-2 : learn a shared w and h
num_feature_levels (int): Number of levels to extract from the backbone feature maps.
nheads (int): number of heads.
two_stage_type (str): type of two stage in DINO.
two_stage_add_query_num (int): size of the target embedding.
dec_pred_class_embed_share (bool): whether to share embedding for decoder classification prediction.
dec_pred_bbox_embed_share (bool): whether to share embedding for decoder bounding box prediction.
two_stage_bbox_embed_share (bool): whether to share embedding for two stage bounding box.
two_stage_class_embed_share (bool): whether to share embedding for two stage classification.
decoder_layer_noise (bool): a flag to add random perturbation to decoder query.
num_patterns (int): number of patterns in encoder-decoder.
dn_number (bool): the number of de-noising queries in DINO
dn_box_noise_scale (float): the scale of noise applied to boxes during contrastive de-noising.
If this value is 0, noise is not applied.
dn_label_noise_ratio (float): the scale of noise applied to labels during contrastive de-noising.
If this value is 0, noise is not applied.
dn_labelbook_size (int): de-nosing labelbook size. should be same as number of classes
export (bool): flag to indicate if the current model is being used for ONNX export.
"""
super().__init__()
self.num_queries = num_queries
self.transformer = transformer
self.num_classes = num_classes
self.hidden_dim = hidden_dim = transformer.d_model
self.num_feature_levels = num_feature_levels
self.nheads = nheads
self.label_enc = nn.Embedding(dn_labelbook_size + 1, hidden_dim)
# setting query dim
self.query_dim = 4
self.random_refpoints_xy = random_refpoints_xy
self.fix_refpoints_hw = fix_refpoints_hw
# for dn training
self.num_patterns = num_patterns
self.dn_number = dn_number
self.dn_box_noise_scale = dn_box_noise_scale
self.dn_label_noise_ratio = dn_label_noise_ratio
self.dn_labelbook_size = dn_labelbook_size
# prepare input projection layers
if num_feature_levels > 1:
num_backbone_outs = len(backbone.num_channels)
input_proj_list = []
for _ in range(num_backbone_outs):
in_channels = backbone.num_channels[_]
input_proj_list.append(nn.Sequential(
nn.Conv2d(in_channels, hidden_dim, kernel_size=1),
nn.GroupNorm(32, hidden_dim),
))
for _ in range(num_feature_levels - num_backbone_outs):
input_proj_list.append(nn.Sequential(
nn.Conv2d(in_channels, hidden_dim, kernel_size=3, stride=2, padding=1),
nn.GroupNorm(32, hidden_dim),
))
in_channels = hidden_dim
self.input_proj = nn.ModuleList(input_proj_list)
else:
assert two_stage_type == 'no', "two_stage_type should be no if num_feature_levels=1 !!!"
self.input_proj = nn.ModuleList([
nn.Sequential(
nn.Conv2d(backbone.num_channels[-1], hidden_dim, kernel_size=1),
nn.GroupNorm(32, hidden_dim),
)])
self.position_embedding = position_embedding
self.export = export
self.backbone = backbone
self.aux_loss = aux_loss
self.export = export
if self.export:
warnings.warn("Setting aux_loss to be False for export")
self.aux_loss = False
self.box_pred_damping = None
self.iter_update = True
# prepare pred layers
self.dec_pred_class_embed_share = dec_pred_class_embed_share
self.dec_pred_bbox_embed_share = dec_pred_bbox_embed_share
# prepare class & box embed
_class_embed = nn.Linear(hidden_dim, num_classes)
_bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)
# init the two embed layers
prior_prob = 0.01
bias_value = -math.log((1 - prior_prob) / prior_prob)
_class_embed.bias.data = torch.ones(self.num_classes) * bias_value
nn.init.constant_(_bbox_embed.layers[-1].weight.data, 0)
nn.init.constant_(_bbox_embed.layers[-1].bias.data, 0)
if dec_pred_bbox_embed_share:
box_embed_layerlist = [_bbox_embed for i in range(transformer.num_decoder_layers)]
else:
box_embed_layerlist = [copy.deepcopy(_bbox_embed) for i in range(transformer.num_decoder_layers)]
if dec_pred_class_embed_share:
class_embed_layerlist = [_class_embed for i in range(transformer.num_decoder_layers)]
else:
class_embed_layerlist = [copy.deepcopy(_class_embed) for i in range(transformer.num_decoder_layers)]
self.bbox_embed = nn.ModuleList(box_embed_layerlist)
self.class_embed = nn.ModuleList(class_embed_layerlist)
self.transformer.decoder.bbox_embed = self.bbox_embed
self.transformer.decoder.class_embed = self.class_embed
# two stage
self.two_stage_type = two_stage_type
self.two_stage_add_query_num = two_stage_add_query_num
assert two_stage_type in ['no', 'standard'], "unknown param {} of two_stage_type".format(two_stage_type)
if two_stage_type != 'no':
if two_stage_bbox_embed_share:
if not (dec_pred_class_embed_share and dec_pred_bbox_embed_share):
raise ValueError("two_stage_bbox_embed_share was set to true but "
f"dec_pred_class_embed_share was set to {dec_pred_class_embed_share} "
f"dec_pred_bbox_embed_share was set to {dec_pred_bbox_embed_share}")
self.transformer.enc_out_bbox_embed = _bbox_embed
else:
self.transformer.enc_out_bbox_embed = copy.deepcopy(_bbox_embed)
if two_stage_class_embed_share:
assert dec_pred_class_embed_share and dec_pred_bbox_embed_share
self.transformer.enc_out_class_embed = _class_embed
else:
self.transformer.enc_out_class_embed = copy.deepcopy(_class_embed)
self.refpoint_embed = None
if self.two_stage_add_query_num > 0:
self.init_ref_points(two_stage_add_query_num)
self.decoder_sa_type = decoder_sa_type
assert decoder_sa_type in ['sa', 'ca_label', 'ca_content']
if decoder_sa_type == 'ca_label':
self.label_embedding = nn.Embedding(num_classes, hidden_dim)
for layer in self.transformer.decoder.layers:
layer.label_embedding = self.label_embedding
else:
for layer in self.transformer.decoder.layers:
layer.label_embedding = None
self.label_embedding = None
self._reset_parameters()
def _reset_parameters(self):
# init input_proj
for proj in self.input_proj:
nn.init.xavier_uniform_(proj[0].weight, gain=1)
nn.init.constant_(proj[0].bias, 0)
def init_ref_points(self, use_num_queries):
"""Initialize reference points"""
self.refpoint_embed = nn.Embedding(use_num_queries, self.query_dim)
if self.random_refpoints_xy:
self.refpoint_embed.weight.data[:, :2].uniform_(0, 1)
self.refpoint_embed.weight.data[:, :2] = inverse_sigmoid(self.refpoint_embed.weight.data[:, :2])
self.refpoint_embed.weight.data[:, :2].requires_grad = False
if self.fix_refpoints_hw > 0:
print("fix_refpoints_hw: {}".format(self.fix_refpoints_hw))
assert self.random_refpoints_xy
self.refpoint_embed.weight.data[:, 2:] = self.fix_refpoints_hw
self.refpoint_embed.weight.data[:, 2:] = inverse_sigmoid(self.refpoint_embed.weight.data[:, 2:])
self.refpoint_embed.weight.data[:, 2:].requires_grad = False
elif int(self.fix_refpoints_hw) == -1:
pass
elif int(self.fix_refpoints_hw) == -2:
print('learn a shared h and w')
assert self.random_refpoints_xy
self.refpoint_embed = nn.Embedding(use_num_queries, 2)
self.refpoint_embed.weight.data[:, :2].uniform_(0, 1)
self.refpoint_embed.weight.data[:, :2] = inverse_sigmoid(self.refpoint_embed.weight.data[:, :2])
self.refpoint_embed.weight.data[:, :2].requires_grad = False
self.hw_embed = nn.Embedding(1, 1)
else:
raise NotImplementedError('Unknown fix_refpoints_hw {}'.format(self.fix_refpoints_hw))
def forward(self, samples, targets=None):
""" Forward function of DINO Model
Args:
samples (torch.Tensor): batched images, of shape [batch_size x 3 x H x W]
targets (dict): batched annotations
Returns:
pred_logits (torch.Tensor): the classification logits (including no-object) for all queries. Shape= [batch_size x num_queries x (num_classes + 1)]
pred_boxes (torch.Tensor): The normalized boxes coordinates for all queries, represented as(center_x, center_y, height, width)
"""
if not isinstance(samples, torch.Tensor):
samples = tensor_from_tensor_list(samples)
features = self.backbone(samples)
srcs = []
masks = []
for level, feat in enumerate(features):
src = feat[0]
mask = (feat[1].float()[:, 0].bool())
srcs.append(self.input_proj[level](src))
masks.append(mask)
if self.num_feature_levels > len(srcs):
_len_srcs = len(srcs)
for li in range(_len_srcs, self.num_feature_levels):
if li == _len_srcs:
src = self.input_proj[li](features[-1][0])
else:
src = self.input_proj[li](srcs[-1])
srcs.append(src)
if self.export:
m = torch.zeros((src.shape[0], 1, src.shape[2], src.shape[3]), dtype=src.dtype, device=src.device)
else:
m = samples[:, 3:4]
mask = F.interpolate(m.float(), size=src.shape[-2:]).to(torch.bool)
masks.append(mask.float()[:, 0].bool())
# build positional embedding
pos = []
for mask in masks:
if self.export:
N, H, W = mask.shape
tensor_shape = torch.tensor([N, H, W], device=src.device)
pos.append(self.position_embedding(tensor_shape, src.device))
else:
not_mask = ~mask
pos.append(self.position_embedding(not_mask, src.device))
if self.dn_number > 0 or targets is not None:
input_query_label, input_query_bbox, attn_mask, dn_meta =\
prepare_for_cdn(dn_args=(targets, self.dn_number, self.dn_label_noise_ratio, self.dn_box_noise_scale),
training=self.training, num_queries=self.num_queries, num_classes=self.num_classes,
hidden_dim=self.hidden_dim, label_enc=self.label_enc)
else:
assert targets is None
input_query_bbox = input_query_label = attn_mask = dn_meta = None
hs, reference, hs_enc, ref_enc, init_box_proposal = self.transformer(srcs, masks, input_query_bbox, pos, input_query_label, attn_mask)
# In case num object=0
hs[0] += self.label_enc.weight[0, 0] * 0.0
# deformable-detr-like anchor update
# reference_before_sigmoid = inverse_sigmoid(reference[:-1]) # n_dec, bs, nq, 4
outputs_coord_list = []
for layer_ref_sig, layer_bbox_embed, layer_hs in zip(reference[:-1], self.bbox_embed, hs):
layer_delta_unsig = layer_bbox_embed(layer_hs)
layer_outputs_unsig = layer_delta_unsig + inverse_sigmoid(layer_ref_sig)
layer_outputs_unsig = layer_outputs_unsig.sigmoid()
outputs_coord_list.append(layer_outputs_unsig)
outputs_coord_list = torch.stack(outputs_coord_list)
outputs_class = torch.stack([layer_cls_embed(layer_hs) for
layer_cls_embed, layer_hs in zip(self.class_embed, hs)])
if self.dn_number > 0 and dn_meta is not None:
outputs_class, outputs_coord_list = \
dn_post_process(outputs_class, outputs_coord_list,
dn_meta, self.aux_loss, self._set_aux_loss)
out = {'pred_logits': outputs_class[-1], 'pred_boxes': outputs_coord_list[-1]}
if self.aux_loss:
out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord_list)
# for encoder output
if hs_enc is not None:
# prepare intermediate outputs
interm_coord = ref_enc[-1]
interm_class = self.transformer.enc_out_class_embed(hs_enc[-1])
if not self.export:
out['interm_outputs'] = {'pred_logits': interm_class, 'pred_boxes': interm_coord}
out['interm_outputs_for_matching_pre'] = {'pred_logits': interm_class, 'pred_boxes': init_box_proposal}
# prepare enc outputs
if hs_enc.shape[0] > 1:
enc_outputs_coord = []
enc_outputs_class = []
for layer_box_embed, layer_class_embed, layer_hs_enc, layer_ref_enc in zip(self.enc_bbox_embed, self.enc_class_embed, hs_enc[:-1], ref_enc[:-1]):
layer_enc_delta_unsig = layer_box_embed(layer_hs_enc)
layer_enc_outputs_coord_unsig = layer_enc_delta_unsig + inverse_sigmoid(layer_ref_enc)
layer_enc_outputs_coord = layer_enc_outputs_coord_unsig.sigmoid()
layer_enc_outputs_class = layer_class_embed(layer_hs_enc)
enc_outputs_coord.append(layer_enc_outputs_coord)
enc_outputs_class.append(layer_enc_outputs_class)
if not self.export:
out['enc_outputs'] = [
{'pred_logits': a, 'pred_boxes': b} for a, b in zip(enc_outputs_class, enc_outputs_coord)
]
if not self.export:
out['dn_meta'] = dn_meta
return out
@torch.jit.unused
def _set_aux_loss(self, outputs_class, outputs_coord):
"""This is a workaround to make torchscript happy, as torchscript
doesn't support dictionary with non-homogeneous values, such
as a dict having both a Tensor and a list.
"""
return [{'pred_logits': a, 'pred_boxes': b}
for a, b in zip(outputs_class[:-1], outputs_coord[:-1])]
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/dino/model/dino.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The build nn module model."""
import torch.nn as nn
from nvidia_tao_pytorch.cv.dino.model.backbone import Backbone, Joiner
from nvidia_tao_pytorch.cv.dino.model.deformable_transformer import DeformableTransformer
from nvidia_tao_pytorch.cv.dino.model.dino import DINO
from nvidia_tao_pytorch.cv.dino.model.position_encoding import PositionEmbeddingSineHW, PositionEmbeddingSineHWExport
class DINOModel(nn.Module):
"""DINO model module."""
def __init__(self,
num_classes=4,
hidden_dim=256,
pretrained_backbone_path=None,
backbone='resnet50',
train_backbone=True,
num_feature_levels=2,
nheads=8,
enc_layers=6,
dec_layers=6,
dim_feedforward=1024,
dec_n_points=4,
enc_n_points=4,
num_queries=300,
aux_loss=True,
dilation=False,
dropout_ratio=0.3,
export=False,
activation_checkpoint=True,
return_interm_indices=[1, 2, 3, 4],
pre_norm=False,
num_patterns=0,
decoder_layer_noise=False,
dln_xy_noise=0.2,
dln_hw_noise=0.2,
add_channel_attention=False,
random_refpoints_xy=False,
two_stage_type='standard',
two_stage_pat_embed=0,
two_stage_add_query_num=0,
two_stage_learn_wh=False,
two_stage_keep_all_tokens=False,
decoder_sa_type='sa',
embed_init_tgt=True,
use_detached_boxes_dec_out=False,
fix_refpoints_hw=-1,
dec_pred_class_embed_share=True,
dec_pred_bbox_embed_share=True,
two_stage_bbox_embed_share=False,
two_stage_class_embed_share=False,
use_dn=True,
dn_number=100,
dn_box_noise_scale=1.0,
dn_label_noise_ratio=0.5,
pe_temperatureH=20,
pe_temperatureW=20
):
"""Initialize DINO Model.
Args:
num_classes (int): number of classes for the model.
hidden_dim (int): size of the hidden dimension.
pretrained_backbone_path (str): pretrained backbone path.
If not provided, train from scratch.
backbone (str): type of backbone architecture.
train_backbone (bool): whether to train backbone or not.
num_feature_levels (int): Number of levels to extract from the backbone feature maps.
nheads (int): number of heads.
enc_layers (int): number of encoder layers.
dec_layers (int): number of decoder layers.
dim_feedforward (int): dimension of the feedforward layer.
dec_n_points (int): number of reference points in the decoder.
enc_n_points (int): number of reference points in the encoder.
num_queries (int): number of queries to be used in D-DETR encoder-decoder.
aux_loss (bool): flag to indicate if auxiliary loss is used.
dilation (bool): flag to indicate if dilation is used (only for ResNet).
dropout_ratio (float): probability for the dropout layer.
export (bool): flag to indicate if the current model is being used for ONNX export.
activation_checkpoint (bool): flag to indicate if activation checkpointing is used.
return_interm_indices (list): indices of feature level to use.
pre_norm (bool): whether to add LayerNorm before the encoder.
num_patterns (int): number of patterns in encoder-decoder.
decoder_layer_noise (bool): a flag to add random perturbation to decoder query.
dln_xy_noise (float): scale of noise applied along xy dimension during random perturbation.
dln_hw_noise (float): scale of noise applied along wh dimension during random perturbation.
add_channel_attention (bool): whether to add channel attention.
random_refpoints_xy (bool): whether to randomly initialize reference point embedding.
two_stage_type (str): type of two stage in DINO.
two_stage_pat_embed (int): size of the patch embedding for the second stage.
two_stage_add_query_num (int): size of the target embedding.
two_stage_learn_wh (bool): add embedding for learnable w and h.
two_stage_keep_all_tokens (bool): whether to keep all tokens in the second stage.
decoder_sa_type (str): type of self-attention in the decoder.
embed_init_tgt (bool): whether to add target embedding.
use_detached_boxes_dec_out (bool): use detached box decoder output in the reference points.
fix_refpoints_hw (int): If this value is -1, width and height are learned seperately for each box.
If this value is -2, a shared w and h are learned.
A value greater than 0 specifies learning with a fixed number.
dec_pred_class_embed_share (bool): whether to share embedding for decoder classification prediction.
dec_pred_bbox_embed_share (bool): whether to share embedding for decoder bounding box prediction.
two_stage_bbox_embed_share (bool): whether to share embedding for two stage bounding box.
two_stage_class_embed_share (bool): whether to share embedding for two stage classification.
use_dn (bool): a flag specifying whether to enbable contrastive de-noising training in DINO.
dn_number (bool): the number of de-noising queries in DINO
dn_box_noise_scale (float): the scale of noise applied to boxes during contrastive de-noising.
If this value is 0, noise is not applied.
dn_label_noise_ratio (float): the scale of noise applied to labels during contrastive de-noising.
If this value is 0, noise is not applied.
pe_temperatureH (int): the temperature applied to the height dimension of Positional Sine Embedding.
pe_temperatureW (int): the temperature applied to the width dimension of Positional Sine Embedding
"""
super(__class__, self).__init__() # pylint:disable=undefined-variable
# TODO: Update position_embedding in the build stage
# build positional encoding. only support PositionEmbeddingSine
if export:
position_embedding = PositionEmbeddingSineHWExport(hidden_dim // 2,
temperatureH=pe_temperatureH,
temperatureW=pe_temperatureW,
normalize=True)
else:
position_embedding = PositionEmbeddingSineHW(hidden_dim // 2,
temperatureH=pe_temperatureH,
temperatureW=pe_temperatureW,
normalize=True)
# build backbone
if num_feature_levels != len(return_interm_indices):
raise ValueError(f"num_feature_levels: {num_feature_levels} does not match the size of "
f"return_interm_indices: {return_interm_indices}")
# Index 4 is not part of the backbone but taken from index 3 with conv 3x3 stride 2
return_interm_indices = [r for r in return_interm_indices if r != 4]
backbone_only = Backbone(backbone,
pretrained_backbone_path,
train_backbone,
return_interm_indices,
dilation,
export,
activation_checkpoint)
# Keep joiner for backward compatibility
joined_backbone = Joiner(backbone_only)
decoder_query_perturber = None
if decoder_layer_noise:
from nvidia_tao_pytorch.cv.dino.model.model_utils import RandomBoxPerturber
decoder_query_perturber = RandomBoxPerturber(x_noise_scale=dln_xy_noise,
y_noise_scale=dln_xy_noise,
w_noise_scale=dln_hw_noise,
h_noise_scale=dln_hw_noise)
# build tranformer
transformer = DeformableTransformer(
d_model=hidden_dim,
nhead=nheads,
export=export,
activation_checkpoint=activation_checkpoint,
num_encoder_layers=enc_layers,
num_decoder_layers=dec_layers,
dim_feedforward=dim_feedforward,
dropout=dropout_ratio,
activation="relu",
return_intermediate_dec=True,
num_feature_levels=num_feature_levels,
enc_n_points=enc_n_points,
dec_n_points=dec_n_points,
num_queries=num_queries,
normalize_before=pre_norm,
num_patterns=num_patterns,
modulate_hw_attn=True,
deformable_decoder=True,
decoder_query_perturber=decoder_query_perturber,
add_channel_attention=add_channel_attention,
random_refpoints_xy=random_refpoints_xy,
# two stage
two_stage_type=two_stage_type, # ['no', 'standard', 'early']
two_stage_pat_embed=two_stage_pat_embed,
two_stage_add_query_num=two_stage_add_query_num,
two_stage_learn_wh=two_stage_learn_wh,
two_stage_keep_all_tokens=two_stage_keep_all_tokens,
dec_layer_number=None,
rm_self_attn_layers=None,
key_aware_type=None,
layer_share_type=None,
rm_detach=None,
decoder_sa_type=decoder_sa_type,
module_seq=['sa', 'ca', 'ffn'],
embed_init_tgt=embed_init_tgt,
use_detached_boxes_dec_out=use_detached_boxes_dec_out
)
# build deformable detr model
self.model = DINO(joined_backbone,
position_embedding,
transformer,
num_classes=num_classes,
num_queries=num_queries,
aux_loss=aux_loss,
export=export,
random_refpoints_xy=random_refpoints_xy,
fix_refpoints_hw=fix_refpoints_hw,
num_feature_levels=num_feature_levels,
nheads=nheads,
dec_pred_class_embed_share=dec_pred_class_embed_share,
dec_pred_bbox_embed_share=dec_pred_bbox_embed_share,
# two stage
two_stage_type=two_stage_type,
# box_share
two_stage_bbox_embed_share=two_stage_bbox_embed_share,
two_stage_class_embed_share=two_stage_class_embed_share,
decoder_sa_type=decoder_sa_type,
num_patterns=num_patterns,
dn_number=dn_number if use_dn else 0,
dn_box_noise_scale=dn_box_noise_scale,
dn_label_noise_ratio=dn_label_noise_ratio,
dn_labelbook_size=num_classes)
def forward(self, x, targets=None):
"""model forward function"""
x = self.model(x, targets)
return x
def build_model(experiment_config,
export=False):
""" Build dino model according to configuration.
Args:
experiment_config (OmegaConf): experiment configuration.
export (bool): flag to indicate onnx export.
Returns:
model (nn.Module): DINO model.
"""
model_config = experiment_config.model
dataset_config = experiment_config.dataset
num_classes = dataset_config.num_classes
backbone = model_config.backbone
dropout_ratio = model_config.dropout_ratio
hidden_dim = model_config.hidden_dim
num_feature_levels = model_config.num_feature_levels
nheads = model_config.nheads
enc_layers = model_config.enc_layers
dec_layers = model_config.dec_layers
dim_feedforward = model_config.dim_feedforward
dec_n_points = model_config.dec_n_points
enc_n_points = model_config.enc_n_points
num_queries = model_config.num_queries
aux_loss = model_config.aux_loss
dilation = model_config.dilation
train_backbone = model_config.train_backbone
pretrained_backbone = model_config.pretrained_backbone_path
# DINO specific
return_interm_indices = model_config.return_interm_indices
pre_norm = model_config.pre_norm
two_stage_type = model_config.two_stage_type
decoder_sa_type = model_config.decoder_sa_type
embed_init_tgt = model_config.embed_init_tgt
fix_refpoints_hw = model_config.fix_refpoints_hw
use_dn = model_config.use_dn
dn_number = model_config.dn_number
dn_box_noise_scale = model_config.dn_box_noise_scale
dn_label_noise_ratio = model_config.dn_label_noise_ratio
pe_temperatureH = model_config.pe_temperatureH
pe_temperatureW = model_config.pe_temperatureW
activation_checkpoint = experiment_config.train.activation_checkpoint
model = DINOModel(num_classes=num_classes,
hidden_dim=hidden_dim,
pretrained_backbone_path=pretrained_backbone,
backbone=backbone,
train_backbone=train_backbone,
num_feature_levels=num_feature_levels,
nheads=nheads,
enc_layers=enc_layers,
dec_layers=dec_layers,
dim_feedforward=dim_feedforward,
dec_n_points=dec_n_points,
enc_n_points=enc_n_points,
num_queries=num_queries,
aux_loss=aux_loss,
dilation=dilation,
dropout_ratio=dropout_ratio,
export=export,
activation_checkpoint=activation_checkpoint,
return_interm_indices=return_interm_indices,
decoder_sa_type=decoder_sa_type,
embed_init_tgt=embed_init_tgt,
use_dn=use_dn,
dn_number=dn_number,
dn_box_noise_scale=dn_box_noise_scale,
dn_label_noise_ratio=dn_label_noise_ratio,
pe_temperatureH=pe_temperatureH,
pe_temperatureW=pe_temperatureW,
pre_norm=pre_norm,
two_stage_type=two_stage_type,
fix_refpoints_hw=fix_refpoints_hw,
)
return model
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/dino/model/build_nn_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""optical inspection root module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/optical_inspection/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Action recognition config module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/optical_inspection/config/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default config file"""
from typing import Optional, List, Dict
from dataclasses import dataclass, field
from omegaconf import MISSING
@dataclass
class OIModelConfig:
"""Optical recognition model config."""
model_type: str = "Siamese_3"
margin: float = 2.0
model_backbone: str = "custom"
embedding_vectors: int = 5
imagenet_pretrained: bool = False
@dataclass
class OptimConfig:
"""Optimizer config."""
type: str = "Adam"
lr: float = 5e-4
momentum: float = 0.9
weight_decay: float = 5e-4
@dataclass
class OIAugmentationConfig:
"""Augmentation config."""
rgb_input_mean: List[float] = field(default_factory=lambda: [0.485, 0.456, 0.406])
rgb_input_std: List[float] = field(default_factory=lambda: [0.229, 0.224, 0.225])
@dataclass
class DataPathFormat:
"""Dataset Path experiment config."""
csv_path: str = MISSING
images_dir: str = MISSING
@dataclass
class OIDatasetConfig:
"""Dataset config."""
train_dataset: DataPathFormat = DataPathFormat()
validation_dataset: DataPathFormat = DataPathFormat()
test_dataset: DataPathFormat = DataPathFormat()
infer_dataset: DataPathFormat = DataPathFormat()
image_ext: Optional[str] = None
batch_size: int = 32
workers: int = 8
fpratio_sampling: float = 0.1
num_input: int = 8
input_map: Optional[Dict[str, int]] = None
grid_map: Optional[Dict[str, int]] = None
concat_type: Optional[str] = None
output_shape: List[int] = field(default_factory=lambda: [100, 100])
augmentation_config: OIAugmentationConfig = OIAugmentationConfig()
@dataclass
class TensorBoardLogger:
"""Configuration for the tensorboard logger."""
enabled: bool = False
infrequent_logging_frequency: int = 2 # Defined per epoch
@dataclass
class OITrainExpConfig:
"""Train experiment config."""
optim: OptimConfig = OptimConfig()
num_epochs: int = 10
checkpoint_interval: int = 2
validation_interval: int = 2
loss: Optional[str] = None
clip_grad_norm: float = 0.0
gpu_ids: List[int] = field(default_factory=lambda: [0])
results_dir: Optional[str] = None
tensorboard: Optional[TensorBoardLogger] = TensorBoardLogger()
resume_training_checkpoint_path: Optional[str] = None
pretrained_model_path: Optional[str] = None
@dataclass
class OIInferenceExpConfig:
"""Inference experiment config."""
checkpoint: str = MISSING
trt_engine: str = MISSING
gpu_id: int = 0
results_dir: Optional[str] = None
batch_size: int = 1
@dataclass
class OIEvalExpConfig:
"""Evaluation experiment config."""
checkpoint: str = MISSING
gpu_id: int = 0
batch_size: int = 1
results_dir: Optional[str] = None
@dataclass
class OIExportExpConfig:
"""Export experiment config."""
results_dir: Optional[str] = None
checkpoint: str = MISSING
onnx_file: Optional[str] = None
opset_version: Optional[int] = 12
gpu_id: int = 0
on_cpu: bool = False
input_height: int = 400
input_width: int = 100
input_channel: int = 3
batch_size: int = -1
do_constant_folding: bool = False
@dataclass
class CalibrationConfig:
"""Calibration config."""
cal_image_dir: List[str] = MISSING
cal_cache_file: str = MISSING
cal_batch_size: int = 1
cal_batches: int = 1
@dataclass
class TrtConfig:
"""Trt config."""
data_type: str = "fp16"
workspace_size: int = 1024
min_batch_size: int = 1
opt_batch_size: int = 1
max_batch_size: int = 1
calibration: CalibrationConfig = CalibrationConfig()
@dataclass
class OIGenTrtEngineExpConfig:
"""Gen TRT Engine experiment config."""
results_dir: Optional[str] = None
gpu_id: int = 0
onnx_file: str = MISSING
trt_engine: Optional[str] = None
input_channel: int = 3
input_width: int = 400
input_height: int = 100
opset_version: int = 12
batch_size: int = -1
verbose: bool = False
tensorrt: TrtConfig = TrtConfig()
@dataclass
class OIDatasetConvertConfig:
"""Dataset Convert experiment config."""
root_dataset_dir: Optional[str] = None
data_convert_output_dir: Optional[str] = None
train_pcb_dataset_dir: Optional[str] = None
val_pcb_dataset_dir: Optional[str] = None
all_pcb_dataset_dir: Optional[str] = None
golden_csv_dir: Optional[str] = None
project_name: Optional[str] = None
bot_top: Optional[str] = None
@dataclass
class OIExperimentConfig:
"""Experiment config."""
model: OIModelConfig = OIModelConfig()
dataset: OIDatasetConfig = OIDatasetConfig()
train: OITrainExpConfig = OITrainExpConfig()
evaluate: OIEvalExpConfig = OIEvalExpConfig()
export: OIExportExpConfig = OIExportExpConfig()
inference: OIInferenceExpConfig = OIInferenceExpConfig()
dataset_convert: OIDatasetConvertConfig = OIDatasetConvertConfig()
gen_trt_engine: OIGenTrtEngineExpConfig = OIGenTrtEngineExpConfig()
encryption_key: Optional[str] = None
results_dir: str = MISSING
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/optical_inspection/config/default_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Group transformation for action recognition"""
import numpy as np
from PIL import Image
import random
import torch
class GroupWorker(object):
"""Wrapper for group transformation using torchvision."""
def __init__(self, worker):
"""Init worker."""
self.worker = worker
def __call__(self, img_group):
"""img_group: PIL Images list."""
return [self.worker(img) for img in img_group]
class GroupRandomCrop(object):
"""RandomCrop for the group of frames."""
def __init__(self, size):
"""Init."""
self.size = size
def __call__(self, img_group):
"""img_group: PIL Images list."""
w, h = img_group[0].size
th, tw = self.size
out_images = []
x1 = random.randint(0, w - tw)
y1 = random.randint(0, h - th)
for img in img_group:
assert (img.size[0] == w and img.size[1] == h)
if w == tw and h == th:
out_images.append(img)
else:
out_images.append(img.crop((x1, y1, x1 + tw, y1 + th)))
return out_images
class MultiScaleCrop(object):
"""
Crop images with a list of randomly selected scales.
Args:
input_size (int | tuple[int]): (w, h) of network input.
scales (list[float]): width and height scales to be selected.
"""
def __init__(self,
input_size,
scales=[1, 0.875, 0.75, 0.66],
max_distort=0,
fix_crop=True,
more_fix_crop=True):
"""max_distort: introducing aspect-ratio augmentation."""
self.scales = scales
self.max_distort = max_distort
self.fix_crop = fix_crop
self.more_fix_crop = more_fix_crop
self.input_size = input_size
self.interpolation = Image.BILINEAR
def __call__(self, img_group):
"""img_group: PIL Images list."""
im_size = img_group[0].size
crop_w, crop_h, offset_w, offset_h = self._sample_crop_patch(im_size)
crop_img_group = [img.crop((offset_w, offset_h, offset_w + crop_w, offset_h + crop_h))
for img in img_group]
ret_img_group = [img.resize((self.input_size[0], self.input_size[1]), self.interpolation)
for img in crop_img_group]
return ret_img_group
def _fill_crop_size(self, img_w, img_h):
"""Generate crop size collections."""
base_size = min(img_w, img_h)
crop_sizes = [int(base_size * s) for s in self.scales]
crop_w = [self.input_size[0] if abs(x - self.input_size[0]) < 3 else x for x in crop_sizes]
crop_h = [self.input_size[1] if abs(x - self.input_size[1]) < 3 else x for x in crop_sizes]
candidate_sizes = []
for i, h in enumerate(crop_h):
for j, w in enumerate(crop_w):
if abs(i - j) <= self.max_distort:
candidate_sizes.append((w, h))
return candidate_sizes
def _fill_fix_offset(self, image_w, image_h, crop_w, crop_h):
"""Generate crop offset collections."""
w_step = (image_w - crop_w) // 4
h_step = (image_h - crop_h) // 4
ret = []
ret.append((0, 0)) # upper left
ret.append((4 * w_step, 0)) # upper right
ret.append((0, 4 * h_step)) # lower left
ret.append((4 * w_step, 4 * h_step)) # lower right
ret.append((2 * w_step, 2 * h_step)) # center
if self.more_fix_crop:
ret.append((0, 2 * h_step)) # center left
ret.append((4 * w_step, 2 * h_step)) # center right
ret.append((2 * w_step, 4 * h_step)) # lower center
ret.append((2 * w_step, 0 * h_step)) # upper center
ret.append((1 * w_step, 1 * h_step)) # upper left quarter
ret.append((3 * w_step, 1 * h_step)) # upper right quarter
ret.append((1 * w_step, 3 * h_step)) # lower left quarter
ret.append((3 * w_step, 3 * h_step)) # lower righ quarter
return ret
def _sample_crop_patch(self, im_size):
"""Random choose crop patch."""
img_w, img_h = im_size
# find a crop size
candidate_sizes = self._fill_crop_size(img_w, img_h)
crop_width, crop_height = random.choice(candidate_sizes)
if not self.fix_crop:
w_offset = random.randint(0, img_w - crop_width)
h_offset = random.randint(0, img_h - crop_height)
else:
offsets = self._fill_fix_offset(img_w, img_h, crop_width, crop_height)
w_offset, h_offset = random.choice(offsets)
return crop_width, crop_height, w_offset, h_offset
class GroupRandomHorizontalFlip(object):
"""Random horizontal flip group of frames."""
def __init__(self, flip_prob=0.5, is_flow=False):
"""Init."""
self.flip_prob = flip_prob
self.is_flow = is_flow
def __call__(self, img_group):
"""img_group: PIL Images list."""
if random.random() < self.flip_prob:
ret = [img.transpose(Image.FLIP_LEFT_RIGHT) for img in img_group]
# @TODO(tylerz): figure out the right way to flip optical flow
else:
ret = img_group
return ret
class GroupNormalize(object):
"""Normalize the group of frames. substract mean -> divide std."""
def __init__(self, mean, std):
"""Init."""
self.mean = mean
self.std = std
def __call__(self, tensor):
"""tensor: torch tensor CTHW."""
if len(self.mean) != 0 and len(self.std) != 0:
rep_mean = self.mean * (tensor.size()[0] // len(self.mean))
rep_std = self.std * (tensor.size()[0] // len(self.std))
# TODO: make efficient
for t, m, s in zip(tensor, rep_mean, rep_std):
t.sub_(m).div_(s)
elif len(self.mean) != 0 and len(self.std) == 0:
rep_mean = self.mean * (tensor.size()[0] // len(self.mean))
# TODO: make efficient
for t, m in zip(tensor, rep_mean):
t.sub_(m)
elif len(self.std) != 0 and len(self.mean) == 0:
rep_std = self.std * (tensor.size()[0] // len(self.std))
# TODO: make efficient
for t, s in zip(tensor, rep_std):
t.div_(s)
return tensor
class GroupThreeCrop(object):
"""Crop group of frames. Crop three parts of each frames."""
def __init__(self, size):
"""Init."""
self.size = size
def __call__(self, img_group):
"""img_group: PIL Images list."""
w, h = img_group[0].size
th, tw = self.size
assert th == h or tw == w
if th == h:
w_step = (w - tw) // 2
offsets = []
offsets.append((0, 0)) # left
offsets.append((2 * w_step, 0)) # right
offsets.append((w_step, 0)) # middle
elif tw == w:
h_step = (h - th) // 2
offsets = []
offsets.append((0, 0)) # top
offsets.append((0, 2 * h_step)) # down
offsets.append((0, h_step)) # middle
new_clips = []
for ow, oh in offsets:
for cur_img in img_group:
# crop_img = cur_img[oh:oh+th, ow:ow+tw, :]
crop_img = cur_img.crop((ow, oh, ow + tw, oh + th))
new_clips.append(crop_img)
return new_clips
class ToTorchFormatTensor(object):
""" Converts numpy.ndarray (T x H x W x C) in the range
[0, 255] to a torch.FloatTensor of shape (C x T x H x W) in the range [0.0, 1.0]
"""
def __init__(self, div=True):
"""Init."""
self.div = div
def __call__(self, pic):
"""pic: ndarray (THWC)"""
if isinstance(pic, np.ndarray):
# handle numpy array
# put it from THWC to CTHW format
imgs = torch.from_numpy(pic).permute(3, 0, 1, 2).contiguous()
else:
raise TypeError("pic should be numpy.ndarray")
return imgs.float().div(255) if self.div else imgs.float()
class ToNumpyNDArray(object):
"""Convert PIL Images to nd array."""
def __call__(self, img_group):
"""img_group: PIL Images list."""
if img_group[0].mode == 'L':
return np.array([np.stack((np.array(img_group[x]), np.array(img_group[x + 1])), axis=-1)
for x in range(0, len(img_group), 2)])
if img_group[0].mode == 'RGB':
return np.array([np.array(x) for x in img_group])
return np.array([])
class GroupJointWorker(object):
"""Wrapper for joint group transformation using torchvision."""
def __init__(self, worker):
"""Init."""
self.worker = worker
def __call__(self, img_group):
"""img_group: two PIL Images lists for rgb and of respectively."""
rgb_group, of_group = img_group
rgb_group = [self.worker(img) for img in rgb_group]
of_group = [self.worker(img) for img in of_group]
return [rgb_group, of_group]
class JointWorker(object):
"""Wrapper for joint group transformation using other group op."""
def __init__(self, worker):
"""Init."""
self.worker = worker
def __call__(self, img_group):
"""img_group: two PIL Images lists or ndarray for rgb and of respectively."""
rgb_group, of_group = img_group
rgb_ret_group = self.worker(rgb_group)
of_ret_group = self.worker(of_group)
return [rgb_ret_group, of_ret_group]
class GroupJointRandomCrop(object):
"""Group random crop for joint training."""
def __init__(self, size):
"""init."""
self.size = size
def __call__(self, img_group):
"""img_group: two PIL Images lists for rgb and of respectively."""
rgb_group, of_group = img_group
w, h = rgb_group[0].size
th, tw = self.size
out_rgb_images = []
out_of_images = []
x1 = random.randint(0, w - tw)
y1 = random.randint(0, h - th)
for img in rgb_group:
assert (img.size[0] == w and img.size[1] == h)
if w == tw and h == th:
out_rgb_images.append(img)
else:
out_rgb_images.append(img.crop((x1, y1, x1 + tw, y1 + th)))
for img in of_group:
assert (img.size[0] == w and img.size[1] == h)
if w == tw and h == th:
out_of_images.append(img)
else:
out_of_images.append(img.crop((x1, y1, x1 + tw, y1 + th)))
return [out_rgb_images, out_of_images]
class JointMultiScaleCrop(MultiScaleCrop):
"""MultiScaleCrop for joint training."""
def __call__(self, img_group):
"""img_group: two PIL Images lists for rgb and of respectively."""
rgb_group, of_group = img_group
im_size = rgb_group[0].size
crop_w, crop_h, offset_w, offset_h = self._sample_crop_patch(im_size)
rgb_crop_img_group = [img.crop((offset_w, offset_h, offset_w + crop_w, offset_h + crop_h))
for img in rgb_group]
rgb_ret_img_group = [img.resize((self.input_size[0], self.input_size[1]),
self.interpolation) for img in rgb_crop_img_group]
of_crop_img_group = [img.crop((offset_w, offset_h, offset_w + crop_w, offset_h + crop_h))
for img in of_group]
of_ret_img_group = [img.resize((self.input_size[0], self.input_size[1]), self.interpolation)
for img in of_crop_img_group]
return [rgb_ret_img_group, of_ret_img_group]
class GroupJointRandomHorizontalFlip(object):
"""Group random horizontal flip for joint training."""
def __init__(self, flip_prob=0.5):
"""Init."""
self.flip_prob = flip_prob
def __call__(self, img_group):
"""img_group: two PIL Images lists for rgb and of respectively."""
rgb_group, of_group = img_group
if random.random() < self.flip_prob:
rgb_ret = [img.transpose(Image.FLIP_LEFT_RIGHT) for img in rgb_group]
of_ret = [img.transpose(Image.FLIP_LEFT_RIGHT) for img in of_group]
else:
rgb_ret = rgb_group
of_ret = of_group
return [rgb_ret, of_ret]
class GroupJointNormalize(object):
"""Group normalization for joint training."""
def __init__(self, rgb_input_mean, rgb_input_std,
of_input_mean, of_input_std):
"""Init"""
self.rgb_normalize = GroupNormalize(rgb_input_mean,
rgb_input_std)
self.of_normalize = GroupNormalize(of_input_mean,
of_input_std)
def __call__(self, img_group):
"""img_group: two torch tensors for rgb and of respectively."""
rgb_group, of_group = img_group
rgb_ret_group = self.rgb_normalize(rgb_group)
of_ret_group = self.of_normalize(of_group)
return [rgb_ret_group, of_ret_group]
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/optical_inspection/utils/group_transforms.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Daatset Preprocess - Integrates into Factory camera system pipeline"""
import pandas as pd
import os
import numpy as np
import shutil
from nvidia_tao_pytorch.core.tlt_logging import logging
def get_boards_perproject(df, project_name, top_bot, compare_csv_path):
"""Get Golden boards per project
Args:
df (pd.DataFrame): The input DataFrame containing board projects information.
top_bot (str): The top/bottom configuration.
compare_csv_path (str): The path to the compare CSV files.
project_name (str): The name of the project to get information for.
"""
files_project = df[(df['project'] == project_name) & (df['TB'] == top_bot)]['Files']
project_boards_csv = pd.DataFrame()
for fname in files_project:
train0 = pd.read_csv(compare_csv_path + fname)
project_boards_csv = pd.concat([train0, project_boards_csv], axis=0)
project_boards_csv = project_boards_csv.reset_index()
return files_project, project_boards_csv
def copy_golden_images(comparefile, proj_name, top_bot, golden_main_path, image_root, goldencsv_fname):
"""Copy golden images for a specific project and top/bottom configuration.
Args:
comparefile (pd.DataFrame): The DataFrame containing the project information.
proj_name (str): The name of the project.
top_bot (str): The top/bottom configuration.
golden_main_path (str): The path to the golden main directory to save the golden images at.
image_root (str): The root path to the image directory.
goldencsv_fname (str): The file name for the output golden CSV.
"""
golden_path = '/'.join([golden_main_path, 'images', proj_name + top_bot, ''])
if not os.path.exists(golden_path):
os.mkdir(golden_path)
lighting_conditions = list(comparefile['LightingCondition'].unique())
cnt_df = comparefile.groupby(
['CompName', 'TB_Panel', 'BoardSN']).size().reset_index(name='counts')
comparefile = pd.merge(
comparefile, cnt_df, how='left', on=['CompName', 'TB_Panel', 'BoardSN'])
comparefile = comparefile.loc[comparefile['counts'] == 4]
comparefile = comparefile.loc[(comparefile['MachineDefect'] == 'PASS') &
(comparefile['ConfirmDefect'] == 'PASS')]
goldenclean = comparefile.drop_duplicates(
subset=['CompName', 'TB_Panel'], keep="last").reset_index()
goldenclean = goldenclean[['CompName', 'directory', 'TB_Panel', 'BoardSN', 'Project', 'TOP_BOT']]
for row in range(0, goldenclean.shape[0], 1):
records = goldenclean.iloc[row, :]
image_name = records['CompName'] + '@' + str(records['TB_Panel'])
image_loc = image_root + records['directory']
for light in lighting_conditions:
img_light_path = image_loc + '/' + image_name + '_' + light + '.jpg'
shutil.copy(img_light_path, golden_path)
goldenclean.rename({'boardSN': 'boardSN_used'}, axis=1, inplace=True)
goldenclean['directory'] = golden_path
goldenclean.to_csv(goldencsv_fname, index=False, encoding='utf-8', header=True)
def create_golden_forprojects(df, root_data_path, golden_csv_path, compare_csv_path, project_list):
"""Create golden paths for multiple projects.
Args:
df (pd.DataFrame): The input DataFrame containing board information.
root_data_path (str): The root path to the data directory.
golden_csv_path (str): The path where the golden CSV files will be saved.
compare_csv_path (str): The path to the compare CSV files.
project_list (list): A list of projects to copy golden images for.
"""
projects_list = project_list
for proj_info in projects_list:
project_name = proj_info.name
top_bot = proj_info.top_bot
_, csvfiles_concat = get_boards_perproject(df, project_name, top_bot, compare_csv_path)
goldencsv_fname = golden_csv_path + project_name + top_bot + '.csv'
if os.path.exists(goldencsv_fname):
continue
logging.info('creating Golden for {}{}'.format(project_name, top_bot))
golden_main_path = root_data_path + 'dlout/'
image_root = root_data_path + 'images/'
copy_golden_images(csvfiles_concat, project_name, top_bot, golden_main_path, image_root, goldencsv_fname)
def preprocess_boards_cam(summaryfiles_path, top_bot):
"""Pre-process boards"""
def get_top_bot(x):
# print(x)
if x.find('~') != -1:
return x.split('_')[1].split('~')[1]
return x.split('_')[2]
def get_boardsn(x):
# print(x)
if x.find('~') != -1:
return x.split('_')[2]
return x.split('_')[3]
df = pd.DataFrame(os.listdir(summaryfiles_path), columns=['Files'])
df = df.loc[df['Files'].str.endswith('.csv')]
df['project'] = df['Files'].apply(lambda x: x.split('_')[1].split('~')[0])
df['boardname'] = df['Files'].apply(lambda x: get_boardsn(x))
df['TB'] = df['Files'].apply(lambda x: get_top_bot(x))
if top_bot != 'all':
if (top_bot[0] in ['TOP', 'BOT']):
df = df.loc[(df['TB'].isin(top_bot))]
else:
logging.error("INPUT VALID VALUE FOR top_bot")
return df
def aggr(ser_):
"""Checking PASS FAIL files and aggregating"""
allpass = all([x == 'PASS' for x in ser_])
if allpass:
return 'PASS'
return '_'.join(np.unique([x for x in ser_ if x != 'PASS']))
def get_top_bot(topbot):
"""Get Top Bot"""
if topbot == 'BOT':
return 'AOI_B'
return 'AOI_T'
def aggr_status_across_lights(df):
"""Aggregating status across lights"""
comp_board_cnst_cols = ['CompName', 'directory']
for col in ['ConfirmDefect', 'MachineDefect', 'Status']:
combined = df.groupby(comp_board_cnst_cols)[col].apply(aggr).reset_index()
df = df.drop(col, axis=1)
df = pd.merge(df, combined, how='left', on=comp_board_cnst_cols)
return df
def get_allcomp_csv(df_, compare_csv_path, data_path):
"""Getting all components CSV
Args:
df_ (pd.DataFrame): The input DataFrame containing the project information.
data_path (str): The path to the data directory.
compare_csv_path (str): The path to the CSV file containing compare images.
"""
files = df_['Files']
match_csv = pd.DataFrame()
for fname in files:
train0 = pd.read_csv(data_path + compare_csv_path + fname)
checkprojectpath = True
if checkprojectpath:
dir_tuple = train0.iloc[0, :]
project = dir_tuple['Project']
top_bot = dir_tuple['TOP_BOT']
if dir_tuple.directory.split('/')[0] != project:
train0['directory'] = project + '/' + get_top_bot(top_bot) + '/' + dir_tuple['directory']
match_csv = pd.concat([train0, match_csv], axis=0)
comp_on_board_cols = ['Project', 'BoardSN', 'TOP_BOT', 'CompName', 'TB_Panel', 'directory']
cnt = match_csv.groupby(comp_on_board_cols).size().reset_index(name='light_count')
match_csv = pd.merge(match_csv, cnt, how='left', on=comp_on_board_cols)
num_img = match_csv.shape[0]
match_csv = match_csv.loc[match_csv['light_count'] == 4]
print('Out of {}, dropped {} images due to != 4 lighting conditions'.format(
num_img, num_img - match_csv.shape[0]))
false_call_idx = match_csv['ConfirmDefect'].isna()
print("In ConfirmDefect, for {} rows, NaN replaced with PASS".format(
match_csv[false_call_idx].shape[0]))
match_csv.loc[false_call_idx, 'ConfirmDefect'] = 'PASS'
return match_csv
def move_golden(data_path, goldencsv, savemaplight):
"""Moving golden images to images directory
Args:
data_path (str): The path to the data directory.
goldencsv (str): The golden CSV dataframe.
savemaplight (str): Flag indicating whether {SaveMapLight} is present in
original image paths before pre-processing the dataset paths.
"""
golden_path = data_path + 'images/golden'
if not os.path.exists(golden_path):
os.mkdir(golden_path)
golden_path = data_path + 'images/golden/images'
if not os.path.exists(golden_path):
os.mkdir(golden_path)
golden_path = data_path + 'images/' + goldencsv.directory[0]
if not os.path.exists(golden_path):
os.mkdir(golden_path)
for row in range(0, goldencsv.shape[0], 1):
records = goldencsv.iloc[row, :]
image_name = records['CompName'] + '@' + str(records['TB_Panel'])
pre_golden_path = data_path + 'dlout/' + records['directory']
post_golden_path = data_path + 'images/' + records['directory']
for light in ['LowAngleLight', 'SolderLight', 'UniformLight', 'WhiteLight']:
if savemaplight:
pre_img_light_path = pre_golden_path + image_name + '{SaveMapLight}' + '_' + light + '.jpg'
post_img_light_path = post_golden_path + image_name + '_' + light + '.jpg'
else:
pre_img_light_path = pre_golden_path + image_name + '_' + light + '.jpg'
post_img_light_path = post_golden_path + image_name + '_' + light + '.jpg'
shutil.copy(pre_img_light_path, post_img_light_path)
def getgoldenpaths(goldencsv):
"""Get golden paths"""
listG = goldencsv.directory[0].split('/')
listG = listG[listG.index('golden'):]
gpath = '/'.join(listG)
return gpath
def getgolden(project_name, top_bot, golden_csv_path, data_path, movegoldenimgs, savemaplight):
"""Getting Golden Boards for a specific project and top/bottom configuration.
Args:
project_name (str): The name of the project.
top_bot (str): The top/bottom configuration.
data_path (str): The path to the data directory.
golden_csv_path (str): The path to the golden CSV file.
movegoldenimgs (bool): Flag indicating whether to move golden images to images directory.
savemaplight (str): Flag indicating whether {SaveMapLight} is present in
original image paths before pre-processing the dataset paths.
Returns:
pandas.DataFrame: A DataFrame containing the golden board information.
"""
goldencsv = pd.read_csv(data_path + golden_csv_path + project_name + top_bot + '.csv')
if goldencsv.directory[0].split('/')[0] == 'golden':
if movegoldenimgs:
move_golden(data_path, goldencsv, savemaplight)
return goldencsv
goldencsv['directory'] = getgoldenpaths(goldencsv)
if movegoldenimgs:
move_golden(data_path, goldencsv, savemaplight)
return goldencsv
def getboards_mergedw_golden(df_, golden_csv_path, compare_csv_path, data_path, movegoldenimgs, savemaplight):
"""Getting boards merged with Golden
Args:
df_ (pd.DataFrame): The input DataFrame containing the dataset.
data_path (str): The path to the data directory.
golden_csv_path (str): The path to the golden CSV file.
compare_csv_path (str): The path to the CSV file containing compare images.
movegoldenimgs (bool): Flag indicating whether to move golden images to images directory.
savemaplight (str): Flag indicating whether {SaveMapLight} is present in
original image paths before pre-processing the dataset paths.
Returns:
pandas.DataFrame: A DataFrame containing merged information from the golden and compare files.
"""
merged_df = pd.DataFrame()
for proj, tb in df_.groupby(['project', 'TB']).groups.keys():
goldencsv = getgolden(proj, tb, golden_csv_path, data_path, movegoldenimgs, savemaplight)
df_proj_tb = df_.loc[(df_['project'] == proj) & (df_['TB'] == tb)]
comparefile = get_allcomp_csv(df_proj_tb, compare_csv_path, data_path)
comparefile = aggr_status_across_lights(comparefile)
comp_on_board_cols = ['CompName', 'directory', 'TOP_BOT']
compare_light = comparefile.groupby(comp_on_board_cols)[
'LightingCondition'].apply(' '.join).reset_index()
comparefile.drop(['LightingCondition'], axis=1, inplace=True)
if 'Type' in comparefile.columns.tolist():
comparefile.drop(['Type'], axis=1, inplace=True)
# comparefile.drop(['Type', 'LightingCondition'], axis=1, inplace=True)
comparefile = pd.merge(comparefile, compare_light, how='left', on=comp_on_board_cols)
comparefile = comparefile.drop_duplicates(subset=['CompName', 'BoardSN', 'TOP_BOT', 'directory'])
comp_const_cols = ['CompName', 'TOP_BOT', 'TB_Panel']
merged = pd.merge(comparefile, goldencsv[comp_const_cols + ['directory']],
how='left', on=comp_const_cols)
merged['TB_Panel_x'], merged['TB_Panel_y'] = merged['TB_Panel'].copy(), merged['TB_Panel'].copy()
merged.drop('TB_Panel', axis=1, inplace=True)
merged['project_name'] = proj
merged['TB'] = tb
merged_df = pd.concat([merged_df, merged], axis=0)
return merged_df
def output_combined_dataset(df_, sampling=False,
data_path=None, golden_csv_path=None,
compare_csv_path=None, output_dir=None,
valid=None, movegoldenimgs=None,
project_name=None, savemaplight=None):
"""
Generate a combined dataset for a specific project and save it as a CSV file.
Args:
df_ (pd.DataFrame): The input DataFrame containing the dataset.
sampling (bool): Flag indicating whether to perform sampling on the dataset.
data_path (str): The path to the data directory.
golden_csv_path (str): The path to the golden CSV file.
compare_csv_path (str): The path to the CSV file containing compare images.
output_dir (str): The directory where the generated CSV file will be saved.
valid (bool): Flag indicating whether the dataframe corresponds to train or validation data.
movegoldenimgs (bool): Flag indicating whether to move golden images to images directory.
project_name (str): The name of the project.
savemaplight (str): Flag indicating whether {SaveMapLight} is present in
original image paths before pre-processing the dataset paths.
Returns:
None
"""
max_rows = 15000
if valid:
df_ = df_[df_['isValid'] == 1]
else:
df_ = df_[df_['isValid'] == 0]
merged_file = getboards_mergedw_golden(df_, golden_csv_path,
compare_csv_path,
data_path,
movegoldenimgs,
savemaplight)
if sampling and merged_file.shape[0] > max_rows:
merged_filep = merged_file.loc[
merged_file['ConfirmDefect'] == 'PASS'].sample(axis=0, n=max_rows)
merged_filef = merged_file.loc[merged_file['ConfirmDefect'] != 'PASS']
merged_file = pd.concat([merged_filep, merged_filef], axis=0)
if merged_file[merged_file['directory_y'].isna()].shape[0]:
num_na_rows = merged_file[merged_file['directory_y'].isna()].shape[0]
logging.warning(
f"\n\nFound {num_na_rows} rows with no golden directory. Removing them"
)
prev_rows = merged_file.shape[0]
merged_file.dropna(how='any', inplace=True)
logging.warning("Dropped {} Rows due to NA".format(prev_rows - merged_file.shape[0]))
col = ['directory_x', 'directory_y', 'ConfirmDefect', 'image_name']
merged_subset = merged_file[col]
merged_subset['image_name'] = merged_subset.image_name.str.split('_').str[0]
merged_subset.rename(columns={'directory_x': 'input_path',
'directory_y': 'golden_path', 'image_name': 'object_name',
'ConfirmDefect': 'label'}, inplace=True)
if valid:
merged_subset.to_csv(output_dir + '/' + 'valid_combined.csv', index=False)
else:
merged_subset.to_csv(output_dir + '/' + 'train_combined.csv', index=False)
def unzip_tars(root_path, projs, top_bot, zip_tar):
"""UnZip Tar files"""
for proj in projs:
for tb in top_bot:
idx = 0
path = '/'.join([root_path, proj, tb, ''])
if zip_tar == "zip":
tars = [x for x in os.listdir(path) if x.endswith('.zip')]
else:
tars = [x for x in os.listdir(path) if x.endswith('.tar.gz')]
for tar in tars:
fulltar = path + tar
if os.path.isdir(fulltar[:-7]):
continue
if zip_tar == "zip":
shutil.unpack_archive(fulltar, path)
# file = tarfile.open(fulltar)
# file.extractall(path)
# file.close()
else:
import tarfile
file = tarfile.open(fulltar)
file.extractall(path)
file.close()
idx += 1
if idx:
print("Extracted {} tars in {}_{}".format(idx, proj, tb))
else:
print("All tars already unzipped ")
def zip_tar_images(df, BOT_TOP, root_data_path):
"""Zip Tar files"""
if BOT_TOP == 'all':
# tb_suffs = ['AOI_T', 'AOI_B']
tb_suffs = ['AOI_T']
df_combined = df.loc[df['TB'] == 'TOP']
unzip_tars(root_data_path + 'images', df_combined['project'].unique(), tb_suffs, "zip")
tb_suffs = ['AOI_B']
df_combined = df.loc[df['TB'] == 'BOT']
unzip_tars(root_data_path + 'images', df_combined['project'].unique(), tb_suffs, "zip")
elif BOT_TOP == 'TOP':
tb_suffs = ['AOI_T']
df_combined = df.loc[df['TB'] == 'TOP']
unzip_tars(root_data_path + 'images', df_combined['project'].unique(), tb_suffs, "tar")
else:
tb_suffs = ['AOI_B']
df_combined = df.loc[df['TB'] == 'BOT']
unzip_tars(root_data_path + 'images', df_combined['project'].unique(), tb_suffs, "tar")
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/optical_inspection/utils/data_preprocess.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Action recognition utils module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/optical_inspection/utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for action recognition"""
import os
import csv
import torch
import shutil
import struct
from eff.core.codec import encrypt_stream
from nvidia_tao_pytorch.core.connectors.checkpoint_connector import decrypt_checkpoint
def patch_decrypt_checkpoint(checkpoint, key):
"""Patch decrypt checkpoint.
To make it work when using multi-gpu trained model
to single-gpu environment
Args:
checkpoint: The encrypted checkpoint dictionary.
key: The decryption key.
"""
from functools import partial
legacy_load = torch.load
torch.load = partial(legacy_load, map_location="cpu")
checkpoint = decrypt_checkpoint(checkpoint, key)
torch.load = legacy_load
# set the encrypted status to be False when it is decrypted
checkpoint["state_dict_encrypted"] = False
return checkpoint
def encrypt_onnx(tmp_file_name, output_file_name, key):
"""Encrypt the onnx model"""
with open(tmp_file_name, "rb") as open_temp_file, open(output_file_name,
"wb") as open_encoded_file:
# set the input name magic number
open_encoded_file.write(struct.pack("<i", 0))
encrypt_stream(
input_stream=open_temp_file, output_stream=open_encoded_file,
passphrase=key, encryption=True
)
def check_and_create(d):
"""Create a directory."""
if not os.path.isdir(d):
os.makedirs(d)
def data_to_device(data):
"""Transfer data to GPU."""
if isinstance(data, list):
cuda_data = []
for item in data:
cuda_item = item.cuda(non_blocking=True)
cuda_data.append(cuda_item)
else:
cuda_data = data.cuda(non_blocking=True)
return cuda_data
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
"""Init"""
self.reset()
def reset(self):
"""reset parameters."""
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
"""update accuracy."""
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k
Args:
output (torch.Tensor): The predicted output tensor.
target (torch.Tensor): The target tensor with true labels.
topk (list): A tuple of integers specifying the top-k values for precision calculation.
Returns:
list: A list of precision values at each specified k.
"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.reshape(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def save_checkpoint(state, is_best, checkpoint, model_best):
"""Naive checkpoint saver."""
torch.save(state, checkpoint)
if is_best:
shutil.copyfile(checkpoint, model_best)
def record_train_info(info, filename):
"""Naive log information."""
str_log = "train_loss: {} val_loss: {} train_acc@1: {} val_acc@1: {} lr: {}".format(
info['train_loss'],
info['val_loss'],
info['train_acc@1'],
info['val_acc@1'],
info['lr'])
print(str_log)
column_names = ['epoch', 'train_loss', 'val_loss', 'train_acc@1', 'val_acc@1', 'lr']
if not os.path.isfile(filename):
with open(filename, "w") as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=column_names)
writer.writeheader()
writer.writerow(info)
else: # else it exists so append without writing the header
with open(filename, "a") as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=column_names)
writer.writerow(info)
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/optical_inspection/utils/common_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Optical Inspection scripts module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/optical_inspection/scripts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Export Optical Inspection model to ONNX."""
import os
import torch
from nvidia_tao_pytorch.core.cookbooks.tlt_pytorch_cookbook import TLTPyTorchCookbook
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.cv.optical_inspection.config.default_config import OIExperimentConfig
from nvidia_tao_pytorch.cv.optical_inspection.model.pl_oi_model import OpticalInspectionModel
from nvidia_tao_pytorch.cv.optical_inspection.utils.common_utils import check_and_create
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load experiment specification, additially using schema for validation/retrieving the default values.
# --config_path and --config_name will be provided by the entrypoint script.
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"),
config_name="experiment", schema=OIExperimentConfig
)
def main(cfg: OIExperimentConfig) -> None:
"""CLI wrapper to run export.
This function parses the command line interface for tlt-export, instantiates the respective
exporter and serializes the trained model to an etlt file. The tools also runs optimization
to the int8 backend.
Args:
cl_args(list): Arguments to parse.
Returns:
No explicit returns.
"""
try:
run_export(cfg)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Export finished successfully"
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Export was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
def run_export(args):
"""Wrapper to run export of tlt models.
Args:
args (dict): Dictionary of parsed arguments to run export.
Returns:
No explicit returns.
"""
if args.export.results_dir is not None:
results_dir = args.export.results_dir
else:
results_dir = os.path.join(args.results_dir, "export")
check_and_create(results_dir)
# Set status logging
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
append=True
)
)
status_logging.get_status_logger().write(
status_level=status_logging.Status.STARTED,
message="Starting Optical Inspection export"
)
gpu_id = args.export.gpu_id
torch.cuda.set_device(gpu_id)
model_path = args.export.checkpoint
on_cpu = args.export.on_cpu
key = args.encryption_key
output_file = args.export.onnx_file
batch_size = args.export.batch_size
input_channel = args.export.input_channel
input_width = args.export.input_width
input_height = args.export.input_height
opset_version = args.export.opset_version
do_constant_folding = args.export.do_constant_folding
experiment_config = args
if batch_size is None or batch_size == -1:
input_batch_size = 1
else:
input_batch_size = batch_size
# set the encryption key:
TLTPyTorchCookbook.set_passphrase(key)
# Set default output filename if the filename
# isn't provided over the command line.
if output_file is None:
split_name = os.path.splitext(model_path)[0]
output_file = "{}.onnx".format(split_name)
assert not os.path.exists(output_file), "Default output file {} already "\
"exists.".format(output_file)
# Make an output directory if necessary.
output_root = os.path.dirname(os.path.realpath(output_file))
if not os.path.exists(output_root):
os.makedirs(output_root)
# load model
pl_model = OpticalInspectionModel.load_from_checkpoint(
model_path,
map_location="cpu",
experiment_spec=experiment_config,
export=True
)
model = pl_model.model
model.eval()
model.cuda()
output_names = ["output_1", "output_2"]
input_names = ["input_1", "input_2"]
if on_cpu:
dummy_input_1 = torch.randn(
input_batch_size, input_channel, input_height, input_width, device="cpu")
dummy_input_2 = torch.randn(
input_batch_size, input_channel, input_height, input_width, device="cpu")
else:
dummy_input_1 = torch.randn(
input_batch_size, input_channel, input_height, input_width, device="cuda")
dummy_input_2 = torch.randn(
input_batch_size, input_channel, input_height, input_width, device="cuda")
dummy_input = (dummy_input_1, dummy_input_2)
if batch_size is None or batch_size == -1:
dynamic_axes = {
"input_1": {0: "batch"},
"input_2": {0: "batch"}
}
else:
dynamic_axes = None
# export
torch.onnx.export(model,
dummy_input,
output_file,
input_names=input_names,
output_names=output_names,
dynamic_axes=dynamic_axes,
opset_version=opset_version,
do_constant_folding=do_constant_folding,
verbose=True)
if __name__ == "__main__":
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/optical_inspection/scripts/export.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset convert - Integrates into Factory camera system pipeline"""
import pandas as pd
import os
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
from nvidia_tao_pytorch.core.tlt_logging import logging
from nvidia_tao_pytorch.cv.optical_inspection.config.default_config import OIExperimentConfig
from nvidia_tao_pytorch.cv.optical_inspection.utils.data_preprocess import output_combined_dataset, preprocess_boards_cam
def run_experiment(experiment_config):
"""Run Experiment"""
dataset_convert_config = experiment_config["dataset_convert"]
root_data_path = dataset_convert_config["root_dataset_dir"]
train_csv_path = dataset_convert_config["train_pcb_dataset_dir"]
test_csv_path = dataset_convert_config["val_pcb_dataset_dir"]
all_csv_path = dataset_convert_config["all_pcb_dataset_dir"]
output_dir = dataset_convert_config['data_convert_output_dir']
golden_csv_path = dataset_convert_config["golden_csv_dir"]
project_name = dataset_convert_config["project_name"]
BOT_TOP = dataset_convert_config["bot_top"]
df = preprocess_boards_cam(root_data_path + train_csv_path, BOT_TOP)
df_0 = preprocess_boards_cam(root_data_path + test_csv_path, BOT_TOP)
df['isValid'], df_0['isValid'] = 0, 1
df = pd.concat([df, df_0], axis=0)
if project_name != 'all':
df = df.loc[df['project'] == project_name]
logging.info("Using projects:\n {}".format('\n'.join(df['project'].unique())))
df_combined = df
# create_golden_forprojects(df_combined, root_data_path, golden_csv_path,all_csv_path, project_list)
output_combined_dataset(df_combined,
data_path=root_data_path,
golden_csv_path=golden_csv_path,
compare_csv_path=all_csv_path,
output_dir=output_dir,
movegoldenimgs=False,
savemaplight=False,
valid=False,
project_name=project_name)
output_combined_dataset(df_combined,
data_path=root_data_path,
golden_csv_path=golden_csv_path,
compare_csv_path=all_csv_path,
output_dir=output_dir,
movegoldenimgs=False,
savemaplight=False,
valid=True,
project_name=project_name
)
# zip_tar_images(df_combined, BOT_TOP, root_data_path)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"), config_name="experiment",
schema=OIExperimentConfig
)
def main(cfg: OIExperimentConfig) -> None:
"""Run the dataset conversion process."""
try:
run_experiment(experiment_config=cfg)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Dataset convert finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Dataset convert was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/optical_inspection/scripts/dataset_convert.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train Optical Inspection Siamese Network model."""
import os
import re
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.loggers import TensorBoardLogger
from nvidia_tao_pytorch.core.callbacks.loggers import TAOStatusLogger
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.core.cookbooks.tlt_pytorch_cookbook import TLTPyTorchCookbook
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
from nvidia_tao_pytorch.core.tlt_logging import logging
from nvidia_tao_pytorch.core.utilities import get_last_generated_file
from nvidia_tao_pytorch.cv.optical_inspection.config.default_config import OIExperimentConfig
from nvidia_tao_pytorch.cv.optical_inspection.model.pl_oi_model import OpticalInspectionModel
from nvidia_tao_pytorch.cv.optical_inspection.utils.common_utils import check_and_create
CHECKPOINT_FILE_EXT = "pth"
def run_experiment(experiment_config,
results_dir,
key):
"""Start the training."""
# set the encryption key:
TLTPyTorchCookbook.set_passphrase(key)
# Load pretrained model as starting point if pretrained path is provided,
pretrained_path = experiment_config.train.pretrained_model_path
if pretrained_path is not None:
oi_model = OpticalInspectionModel.load_from_checkpoint(pretrained_path,
map_location="cpu",
experiment_spec=experiment_config)
else:
oi_model = OpticalInspectionModel(experiment_config)
check_and_create(results_dir)
total_epochs = experiment_config['train']['num_epochs']
clip_grad = experiment_config['train']['clip_grad_norm']
gpus_ids = experiment_config['train']['gpu_ids']
validation_interval = experiment_config.train.validation_interval
checkpoint_interval = experiment_config.train.checkpoint_interval
enable_tensorboard = experiment_config.train.tensorboard.enabled
assert checkpoint_interval <= total_epochs, (
f"Checkpoint interval {checkpoint_interval} > Number of epochs {total_epochs}."
f"Please set experiment_config.train.checkpoint_interval < {total_epochs}"
)
assert validation_interval <= total_epochs, (
f"Validation interval {validation_interval} > Number of epochs {total_epochs}."
f"Please set experiment_config.train.validation_interval < {total_epochs}"
)
status_logger_callback = TAOStatusLogger(results_dir, append=True, num_epochs=total_epochs)
status_logging.set_status_logger(status_logger_callback.logger)
trainer_kwargs = {}
if enable_tensorboard:
trainer_kwargs["logger"] = TensorBoardLogger(
save_dir=results_dir
)
infrequent_logging_frequency = experiment_config.train.tensorboard.infrequent_logging_frequency
assert max(0, infrequent_logging_frequency) <= total_epochs, (
f"infrequent_logging_frequency {infrequent_logging_frequency} must be < num_epochs {total_epochs}"
)
logging.info("Tensorboard logging enabled.")
else:
logging.info("Tensorboard logging disabled.")
acc_flag = None
if len(gpus_ids) > 1:
acc_flag = "ddp"
trainer = Trainer(
gpus=gpus_ids,
max_epochs=total_epochs,
check_val_every_n_epoch=validation_interval,
default_root_dir=results_dir,
accelerator='gpu',
strategy=acc_flag,
gradient_clip_val=clip_grad,
**trainer_kwargs
)
resume_ckpt = None
if experiment_config['train']['resume_training_checkpoint_path']:
resume_ckpt = experiment_config['train']['resume_training_checkpoint_path']
else:
# Get the latest checkpoint file to resume training from by default.
resume_ckpt = get_last_generated_file(
results_dir,
extension=CHECKPOINT_FILE_EXT
)
logging.info("Setting resume checkpoint to {}".format(resume_ckpt))
logging.info(
"Results directory {} Checkpoint Interval {}".format(results_dir, checkpoint_interval)
)
# Setup model checkpoint callback.
ModelCheckpoint.FILE_EXTENSION = f".{CHECKPOINT_FILE_EXT}"
checkpoint_callback = ModelCheckpoint(
every_n_epochs=checkpoint_interval,
dirpath=results_dir,
save_on_train_epoch_end=True,
monitor=None,
save_top_k=-1,
filename='oi_model_{epoch:03d}'
)
trainer.callbacks.append(checkpoint_callback)
if resume_ckpt:
status_logging.get_status_logger().write(
message=f"Resuming training from checkpoint: {resume_ckpt}",
status_level=status_logging.Status.STARTED
)
resumed_epoch = re.search('epoch=(\\d+)', resume_ckpt)
if resumed_epoch:
resumed_epoch = int(resumed_epoch.group(1))
else:
resumed_epoch = 0
status_logger_callback.epoch_counter = resumed_epoch + 1
trainer.callbacks.append(status_logger_callback)
trainer.fit(oi_model, ckpt_path=resume_ckpt or None)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load experiment specification, additially using schema for validation/retrieving the default values.
# --config_path and --config_name will be provided by the entrypoint script.
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"),
config_name="experiment", schema=OIExperimentConfig
)
def main(cfg: OIExperimentConfig) -> None:
"""Run the training process."""
try:
if cfg.train.results_dir is not None:
results_dir = cfg.train.results_dir
else:
results_dir = os.path.join(cfg.results_dir, "train")
run_experiment(experiment_config=cfg,
results_dir=results_dir,
key=cfg.encryption_key)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Training finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Training was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/optical_inspection/scripts/train.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Inference on inspection images
"""
import os
import torch
import pandas as pd
from nvidia_tao_pytorch.core.cookbooks.tlt_pytorch_cookbook import TLTPyTorchCookbook
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
from nvidia_tao_pytorch.core.tlt_logging import logging
from nvidia_tao_pytorch.cv.optical_inspection.config.default_config import OIExperimentConfig
from nvidia_tao_pytorch.cv.optical_inspection.dataloader.build_data_loader import (
build_dataloader)
from nvidia_tao_pytorch.cv.optical_inspection.inference.inferencer import Inferencer
from nvidia_tao_pytorch.cv.optical_inspection.model.pl_oi_model import OpticalInspectionModel
from nvidia_tao_pytorch.cv.optical_inspection.utils.common_utils import check_and_create
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
def run_experiment(experiment_config,
model_path,
key,
results_dir):
"""Start the inference."""
check_and_create(results_dir)
# Set status logging
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(status_logging.StatusLogger(filename=status_file, append=True))
status_logging.get_status_logger().write(
status_level=status_logging.Status.STARTED,
message="Starting Optical Inspection inference"
)
gpu_id = experiment_config.inference.gpu_id
torch.cuda.set_device(gpu_id)
# set the encryption key:
TLTPyTorchCookbook.set_passphrase(key)
infer_data_path = experiment_config["dataset"]["infer_dataset"]["csv_path"]
if not os.path.exists(infer_data_path):
raise FileNotFoundError(f"No inference csv file was found at {infer_data_path}")
logging.info("Loading inference csv from : {}".format(infer_data_path))
df = pd.read_csv(infer_data_path)
model = OpticalInspectionModel.load_from_checkpoint(
model_path,
map_location="cpu",
experiment_spec=experiment_config
)
inferencer = Inferencer(model, ret_prob=False)
with torch.no_grad():
# Building dataloader without weighted sampling for inference.
dataloader = build_dataloader(
df=df,
weightedsampling=False,
split='infer',
data_config=experiment_config["dataset"]
)
data_frame = dataloader.dataset.data_frame
for i, data in enumerate(dataloader, 0):
euclidean_distance = inferencer.inference(data)
if i == 0:
euclid = euclidean_distance
else:
euclid = torch.cat((euclid, euclidean_distance), 0)
siamese_score = 'siamese_score'
data_frame[siamese_score] = euclid.cpu().numpy()
data_frame.to_csv(
os.path.join(results_dir, "inference.csv"),
header=True,
index=False
)
logging.info("Completed")
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load experiment specification, additially using schema for validation/retrieving the default values.
# --config_path and --config_name will be provided by the entrypoint script.
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"),
config_name="experiment", schema=OIExperimentConfig
)
def main(cfg: OIExperimentConfig) -> None:
"""Run the training process."""
if cfg.inference.results_dir is not None:
results_dir = cfg.inference.results_dir
else:
results_dir = os.path.join(cfg.results_dir, "inference")
try:
run_experiment(experiment_config=cfg,
key=cfg.encryption_key,
model_path=cfg.inference.checkpoint,
results_dir=results_dir)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Inference finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Inference was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/optical_inspection/scripts/inference.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluate a trained Optical Inspection model."""
import os
import torch
import pandas as pd
from nvidia_tao_pytorch.core.cookbooks.tlt_pytorch_cookbook import TLTPyTorchCookbook
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
from nvidia_tao_pytorch.core.tlt_logging import logging
from nvidia_tao_pytorch.cv.optical_inspection.config.default_config import OIExperimentConfig
from nvidia_tao_pytorch.cv.optical_inspection.model.pl_oi_model import OpticalInspectionModel
from nvidia_tao_pytorch.cv.optical_inspection.inference.inferencer import Inferencer
from nvidia_tao_pytorch.cv.optical_inspection.dataloader.build_data_loader import build_dataloader
from nvidia_tao_pytorch.cv.optical_inspection.model.build_nn_model import AOIMetrics
from nvidia_tao_pytorch.cv.optical_inspection.utils.common_utils import check_and_create
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
def run_experiment(experiment_config, model_path, key, results_dir):
"""Run experiment."""
check_and_create(results_dir)
# Set status logging
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(status_logging.StatusLogger(filename=status_file, append=True))
status_logging.get_status_logger().write(
status_level=status_logging.Status.STARTED,
message="Starting Optical Inspection evaluate"
)
gpu_id = experiment_config.evaluate.gpu_id
torch.cuda.set_device(gpu_id)
# Set the encryption key:
TLTPyTorchCookbook.set_passphrase(key)
eval_data_path = experiment_config["dataset"]["test_dataset"]["csv_path"]
margin = experiment_config["model"]["margin"]
df = pd.read_csv(eval_data_path)
# infer_results_path = experiment_config['inference']["results_dir"]
logging.info("test_csv_path {}".format(eval_data_path))
# build inferencer @TODO TRT support
model = OpticalInspectionModel.load_from_checkpoint(
model_path,
map_location="cpu",
experiment_spec=experiment_config
)
infer = Inferencer(model, ret_prob=False)
with torch.no_grad():
dataloader = build_dataloader(
df=df,
weightedsampling=True,
split='test',
data_config=experiment_config["dataset"]
)
valid_AOIMetrics = AOIMetrics(margin)
for i, data in enumerate(dataloader, 0):
euclidean_distance = infer.inference(data)
valid_AOIMetrics.update(euclidean_distance, data[2])
if i == 0:
euclid = euclidean_distance
else:
euclid = torch.cat((euclid, euclidean_distance), 0)
total_accuracy = valid_AOIMetrics.compute()['total_accuracy'].item()
false_alarm = valid_AOIMetrics.compute()['false_alarm'].item()
defect_accuracy = valid_AOIMetrics.compute()['defect_accuracy'].item()
false_negative = valid_AOIMetrics.compute()['false_negative'].item()
logging.info(
"Tot Comp {} Total Accuracy {} False Negative {} False Alarm {} Defect Correctly Captured {} for Margin {}".format(
len(euclid),
round(total_accuracy, 2),
round(false_negative, 2),
round(false_alarm, 2),
round(defect_accuracy, 2),
margin
)
)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load experiment specification, additially using schema for validation/retrieving the default values.
# --config_path and --config_name will be provided by the entrypoint script.
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"),
config_name="experiment", schema=OIExperimentConfig
)
def main(cfg: OIExperimentConfig) -> None:
"""Run the Evaluate process."""
if cfg.evaluate.results_dir is not None:
results_dir = cfg.evaluate.results_dir
else:
results_dir = os.path.join(cfg.results_dir, "evaluate")
try:
run_experiment(experiment_config=cfg,
key=cfg.encryption_key,
model_path=cfg.evaluate.checkpoint,
results_dir=results_dir)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Evaluation finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Evaluation was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/optical_inspection/scripts/evaluate.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entrypoint script for the action recognition task."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/optical_inspection/entrypoint/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""'Entry point' script running subtasks related to action recognition.
"""
import importlib
import os
import pkgutil
import argparse
import subprocess
import sys
from time import time
import nvidia_tao_pytorch.cv.optical_inspection.scripts as scripts
from nvidia_tao_pytorch.core.telemetry.nvml_utils import get_device_details
from nvidia_tao_pytorch.core.telemetry.telemetry import send_telemetry_data
def get_subtasks(package):
"""Get supported subtasks for a given task.
This function lists out the tasks in in the .scripts folder.
Returns:
subtasks (dict): Dictionary of files.
"""
module_path = package.__path__
modules = {}
# Collect modules dynamically.
for _, task, is_package in pkgutil.walk_packages(module_path):
if is_package:
continue
module_name = package.__name__ + '.' + task
module_details = {
"module_name": module_name,
"runner_path": os.path.abspath(importlib.import_module(module_name).__file__),
}
modules[task] = module_details
return modules
def launch(parser, subtasks, network=None):
"""CLI function that executes subtasks.
Args:
parser: Created parser object for a given task.
subtasks: list of subtasks for a given task.
network (str): Name of the network.
"""
# Subtasks for a given model.
parser.add_argument(
'subtask', default='train', choices=subtasks.keys(), help="Subtask for a given task/model.",
)
# Add standard TLT arguments.
parser.add_argument(
"-r",
"--results_dir",
help="Path to a folder where the experiment outputs should be written.",
default=None,
required=False,
)
parser.add_argument(
"--gpus",
help="Number of GPUs to run the train subtask.",
default=None,
type=int,
)
parser.add_argument("-k", "--key", help="User specific encoding key to save or load a .tlt model.")
parser.add_argument("-e", "--experiment_spec_file", help="Path to the experiment spec file.", default=None)
# Parse the arguments.
args, unknown_args = parser.parse_known_args()
script_args = ""
# Process spec file for all commands except the one for getting spec files ;)
# Make sure the user provides spec file.
if args.experiment_spec_file is None:
print("ERROR: The subtask `{}` requires the following argument: -e/--experiment_spec_file".format(args.subtask))
exit(1)
# Make sure the file exists!
if not os.path.exists(args.experiment_spec_file):
print("ERROR: The indicated experiment spec file `{}` doesn't exist!".format(args.experiment_spec_file))
exit(1)
# Split spec file_path into config path and config name.
path, name = os.path.split(args.experiment_spec_file)
if path != '':
script_args += " --config-path " + os.path.realpath(path)
script_args += " --config-name " + name
# And add other params AFTERWARDS!
if args.subtask in ["train"]:
if args.results_dir:
script_args += " results_dir=" + args.results_dir
if args.subtask in ["train", "evaluate", "inference"]:
if args.gpus:
script_args += f" {args.subtask}.num_gpus={args.gpus}"
if args.subtask in ["train", "evaluate", "inference", "export"]:
if args.key is not None:
script_args += " encryption_key=" + args.key
# Find relevant module and pass args.
script = subtasks[args.subtask]["runner_path"]
# Pass unknown args to call
unknown_args_as_str = " ".join(unknown_args)
# Create a system call.
call = "python " + script + script_args + " " + unknown_args_as_str
process_passed = True
start = time()
try:
# Run the script.
subprocess.check_call(call, shell=True, stdout=sys.stdout, stderr=sys.stdout) # nosec B602
except (KeyboardInterrupt, SystemExit):
print("Command was interrupted.")
process_passed = True
except subprocess.CalledProcessError as e:
if e.output is not None:
print(e.output)
process_passed = False
end = time()
time_lapsed = int(end - start)
try:
gpu_data = list()
for device in get_device_details():
gpu_data.append(device.get_config())
send_telemetry_data(
network,
args.subtask,
gpu_data,
num_gpus=args.gpus,
time_lapsed=time_lapsed,
pass_status=process_passed
)
except Exception as e:
print("Telemetry data couldn't be sent, but the command ran successfully.")
print(f"[WARNING]: {e}")
pass
if not process_passed:
print("Execution status: FAIL")
exit(1) # returning non zero return code from the process.
print("Execution status: PASS")
def main():
"""Main entrypoint wrapper."""
# Create parser for a given task.
parser = argparse.ArgumentParser(
"optical_inspection", add_help=True, description="Transfer Learning Toolkit"
)
# Build list of subtasks by inspecting the package.
subtasks = get_subtasks(scripts)
# Parse the arguments and launch the subtask.
launch(parser, subtasks, network="optical_inspection")
if __name__ == '__main__':
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/optical_inspection/entrypoint/optical_inspection.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Main PTL model file for optical inspection"""
import math
import torch
from torch.nn import functional as F
import pandas as pd
import pytorch_lightning as pl
from typing import Optional, Dict, Any
from nvidia_tao_pytorch.core.cookbooks.tlt_pytorch_cookbook import TLTPyTorchCookbook
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.core.tlt_logging import logging
from nvidia_tao_pytorch.cv.optical_inspection.dataloader.build_data_loader import build_dataloader
from nvidia_tao_pytorch.cv.optical_inspection.model.build_nn_model import (build_oi_model, ContrastiveLoss1, AOIMetrics)
from nvidia_tao_pytorch.cv.optical_inspection.utils.common_utils import patch_decrypt_checkpoint
class OpticalInspectionModel(pl.LightningModule):
"""Pytorch Lighting for Optical Inspection
Args:
experiment_config (OmegaConf.DictConf): The experiment configuration.
export (bool): Flag indicating whether to export the model.
"""
def __init__(self, experiment_spec, export=False, **kwargs):
"""Initialize"""
super().__init__(**kwargs)
self.experiment_spec = experiment_spec
# init the model
self._build_model(experiment_spec, export)
self.tensorboard = experiment_spec.train.tensorboard
self.status_logging_dict = {"train_loss": 0.0,
"train_acc": 0.0,
"train_fpr": 0.0,
"val_loss": 0.0,
"val_acc": 0.0,
"val_fpr": 0.0}
self.train_metrics = AOIMetrics()
self.val_metrics = AOIMetrics()
self.num_train_steps_per_epoch = None
self.num_val_steps_per_epoch = None
def _build_model(self, experiment_spec, export=False):
self.model = build_oi_model(
experiment_config=experiment_spec, export=export
)
print(self.model)
def setup(self, stage: Optional[str] = None):
""" Set up the dataset for train and val"""
dataset_config = self.experiment_spec["dataset"]
train_data_path = dataset_config["train_dataset"]["csv_path"]
val_data_path = dataset_config["validation_dataset"]["csv_path"]
self.df_train = pd.read_csv(train_data_path)
self.df_valid = pd.read_csv(val_data_path)
def train_dataloader(self):
"""Build the dataloader for training."""
train_loader = build_dataloader(df=self.df_train,
weightedsampling=True,
split='train',
data_config=self.experiment_spec["dataset"])
self.num_train_steps_per_epoch = math.ceil(len(train_loader.dataset) / train_loader.batch_size)
logging.info("Number of steps for training: {}".format(self.num_train_steps_per_epoch))
return train_loader
def val_dataloader(self):
"""Build the dataloader for training."""
val_loader = build_dataloader(df=self.df_valid,
weightedsampling=False,
split='valid',
data_config=self.experiment_spec["dataset"])
self.num_val_steps_per_epoch = math.ceil(len(val_loader.dataset) / val_loader.batch_size)
logging.info("Number of steps for validation: {}".format(self.num_val_steps_per_epoch))
return val_loader
def configure_optimizers(self):
"""Configure optimizers for training"""
optim_dict = {}
train_config = self.experiment_spec["train"]
if train_config['optim']['type'] == 'Adam':
optim = torch.optim.Adam(self.parameters(), train_config['optim']['lr'])
else:
optim = torch.optim.SGD(params=self.parameters(),
lr=train_config['optim']['lr'],
momentum=train_config['optim']['momentum'],
weight_decay=train_config['optim']['weight_decay'])
optim_dict["optimizer"] = optim
return optim_dict
def training_step(self, batch, batch_idx):
"""Perform a single training step.
Args:
batch: The input batch containing img0, img1, and label tensors.
batch_idx: Index of the current batch.
Returns:
The computed loss value for the training step.
"""
margin = self.experiment_spec["model"]["margin"]
img0, img1, label = batch
self.visualize_image(
"compare_sample", img0,
logging_frequency=self.tensorboard.infrequent_logging_frequency
)
self.visualize_image(
"golden_sample", img1,
logging_frequency=self.tensorboard.infrequent_logging_frequency
)
criterion = ContrastiveLoss1(margin)
output1, output2 = self.model(img0, img1)
loss = criterion(output1, output2, label)
euclidean_distance = F.pairwise_distance(output1, output2)
self.train_metrics.update(euclidean_distance, label)
self.log("train_loss", loss, on_step=True, on_epoch=False, prog_bar=True, sync_dist=True)
return loss
def visualize_histogram(self, logging_frequency=2):
"""Visualize histograms of model parameters.
Args:
logging_frequency (int): The frequency at which to log the histograms.
"""
if self.current_epoch % logging_frequency == 0 and self.tensorboard.enabled:
for name, params in self.named_parameters():
self.logger.experiment.add_histogram(f"histogram/{name}", params, self.current_epoch)
def visualize_image(self, name, tensor, logging_frequency=10):
"""Visualize images during training.
Args:
name (str): The name for the image to be visualized.
tensor (torch.Tensor): The input tensor containing the image.
logging_frequency (int): The frequency at which to log the images.
"""
logging_frequency_in_steps = self.num_train_steps_per_epoch * logging_frequency
is_log_step = self.global_step % logging_frequency_in_steps == 0
if is_log_step and self.tensorboard.enabled:
self.logger.experiment.add_images(
f"image/{name}",
tensor,
global_step=self.global_step
)
def visualize_metrics(self, metrics_dict):
"""Visualize metrics from during train/val.
Args:
metrics_dict (dict): Dictionary of metric tensors to be visualized.
Returns:
No returns.
"""
# Make sure the scalars are logged in TensorBoard always.
if self.logger:
self.logger.log_metrics(metrics_dict, step=self.current_epoch)
def training_epoch_end(self, training_step_outputs):
"""Log Training metrics to status.json
Args:
training_step_outputs: List of outputs from training steps in the epoch.
"""
average_train_loss = 0.0
for out in training_step_outputs:
if isinstance(out, tuple):
average_train_loss += out[0].item()
else:
average_train_loss += out['loss'].item()
average_train_loss /= len(training_step_outputs)
train_accuracy = self.train_metrics.compute()['total_accuracy'].item()
train_false_positive_rate = self.train_metrics.compute()['false_alarm'].item()
self.status_logging_dict["train_loss"] = average_train_loss
self.status_logging_dict["train_acc"] = train_accuracy
self.status_logging_dict["train_fpr"] = train_false_positive_rate
tensorboard_logging_dict = {
"train_acc": train_accuracy,
"train_fpr": train_false_positive_rate
}
self.visualize_histogram(
logging_frequency=self.tensorboard.infrequent_logging_frequency
)
self.visualize_metrics(tensorboard_logging_dict)
self.train_metrics.reset()
status_logging.get_status_logger().kpi = self.status_logging_dict
status_logging.get_status_logger().write(
message="Train and Val metrics generated.",
status_level=status_logging.Status.RUNNING
)
def validation_step(self, batch, batch_idx):
"""Validation step.
Args:
batch: The input batch containing img0, img1, and label tensors.
batch_idx: Index of the current batch.
Returns:
loss: Validation loss value.
"""
margin = self.experiment_spec["model"]["margin"]
img0, img1, label = batch
criterion = ContrastiveLoss1(margin)
output1, output2 = self.model(img0, img1)
loss = criterion(output1, output2, label)
euclidean_distance = F.pairwise_distance(output1, output2)
self.val_metrics.update(euclidean_distance, label)
self.log("val_loss", loss, on_step=False, on_epoch=True, prog_bar=True, sync_dist=True)
return loss
def validation_epoch_end(self, validation_step_outputs):
"""Log Validation metrics to status.json
Args:
validation_step_outputs: List of outputs from validation steps in the epoch.
"""
average_val_loss = 0.0
for out in validation_step_outputs:
if isinstance(out, tuple):
average_val_loss += out[0].item()
else:
average_val_loss += out.item()
average_val_loss /= len(validation_step_outputs)
val_accuracy = self.val_metrics.compute()['total_accuracy'].item()
val_fpr = self.val_metrics.compute()['false_alarm'].item()
self.status_logging_dict["val_loss"] = average_val_loss
self.status_logging_dict["val_acc"] = val_accuracy
self.status_logging_dict["val_fpr"] = val_fpr
validation_logging_dict = {
"val_acc": val_accuracy,
"val_fpr": val_fpr
}
self.visualize_metrics(validation_logging_dict)
self.val_metrics.reset()
def forward(self, x):
"""Forward of the Optical Inspection model.
Args:
x (torch.Tensor): Input data containing two images.
Returns:
output (torch.Tensor): Output of the model.
"""
x1 = x[0]
x2 = x[1]
output = self.model(x1, x2)
return output
def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
"""Encrpyt the checkpoint. The encryption is done in TLTCheckpointConnector."""
pass
def on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
"""Decrpyt the checkpoint"""
if checkpoint.get("state_dict_encrypted", False):
# Retrieve encryption key from TLTPyTorchCookbook.
key = TLTPyTorchCookbook.get_passphrase()
if key is None:
raise PermissionError("Cannot access model state dict without the encryption key")
checkpoint = patch_decrypt_checkpoint(checkpoint, key)
def load_final_model(self) -> None:
"""Loading a pre-trained network weights"""
model_path = self.experiment_spec['inference']['checkpoint']
gpu_device = self.experiment_spec['inference']['gpu_id']
siamese_inf = self.model.cuda().load_state_dict(torch.load(model_path, map_location='cuda:' + str(gpu_device)))
return siamese_inf
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/optical_inspection/model/pl_oi_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Action recognition dataloader module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/optical_inspection/model/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The top model builder interface."""
import torch.nn as nn
import torch
from torch.nn import functional as F
from torchmetrics import Metric
from nvidia_tao_pytorch.core.tlt_logging import logging
class SiameseNetwork3(nn.Module):
"""Siamese Network model for finding defects."""
def __init__(self, embedding_vectorsize=None,
num_lights=4, output_shape=[100, 100]):
"""Initialize the SiameseNetwork3 model.
Args:
embedding_vectorsize (int): The size of the embedding vector.
num_lights (int): The number of lighting conditions.
output_shape (list): The output shape of the model [height, width].
"""
super(SiameseNetwork3, self).__init__()
self.embedding = embedding_vectorsize
self.cnn1 = nn.Sequential(
nn.ReflectionPad2d(1),
nn.Conv2d(3, 4, kernel_size=3),
nn.ReLU(inplace=True),
nn.BatchNorm2d(4),
nn.ReflectionPad2d(1),
nn.Conv2d(4, 8, kernel_size=3),
nn.ReLU(inplace=True),
nn.BatchNorm2d(8),
nn.ReflectionPad2d(1),
nn.Conv2d(8, 8, kernel_size=3),
nn.ReLU(inplace=True),
nn.BatchNorm2d(8),
)
fc_ip_dim = 8 * num_lights * int(output_shape[0]) * int(output_shape[1])
self.fc1 = nn.Sequential(
nn.Linear(fc_ip_dim, 512),
nn.ReLU(inplace=True),
nn.BatchNorm1d(512),
nn.Linear(512, 512),
nn.ReLU(inplace=True),
nn.BatchNorm1d(512),
nn.Linear(512, self.embedding))
def forward_once(self, x):
"""Forward pass using one image.
Args:
x (torch.Tensor): The input image.
Returns:
torch.Tensor: The output of the forward pass.
"""
output = self.cnn1(x)
output = output.view(output.size()[0], -1)
output = self.fc1(output)
return output
def forward(self, input1, input2):
"""Forward pass using two images.
Args:
input1 (torch.Tensor): The first input image.
input2 (torch.Tensor): The second input image.
Returns:
tuple: Tuple of output tensors from the forward pass.
"""
output1 = self.forward_once(input1)
output2 = self.forward_once(input2)
return output1, output2
class SiameseNetwork1(nn.Module):
"""Siamese Network model for finding defects."""
def __init__(self, embedding_vectorsize=None,
num_lights=4, output_shape=[100, 100]):
"""Initialize the SiameseNetwork1 model.
Args:
embedding_vectorsize (int): The size of the embedding vector.
num_lights (int): The number of lighting conditions.
output_shape (list): The output shape of the model [height, width].
"""
super(SiameseNetwork1, self).__init__()
self.embedding = embedding_vectorsize
self.cnn1 = nn.Sequential(
nn.ReflectionPad2d(1),
nn.Conv2d(3, 4, kernel_size=3),
nn.ReLU(inplace=True),
nn.BatchNorm2d(4),
nn.ReflectionPad2d(1),
nn.Conv2d(4, 8, kernel_size=3),
nn.ReLU(inplace=True),
nn.BatchNorm2d(8),
nn.ReflectionPad2d(1),
nn.Conv2d(8, 8, kernel_size=3),
nn.ReLU(inplace=True),
nn.BatchNorm2d(8),
)
fc_ip_dim = 8 * num_lights * int(output_shape[0]) * int(output_shape[1])
self.fc1 = nn.Sequential(
nn.Linear(fc_ip_dim, 500),
nn.ReLU(inplace=True),
nn.Linear(500, 500),
nn.ReLU(inplace=True),
nn.Linear(500, self.embedding))
def forward_once(self, x):
"""Forward pass using one image.
Args:
x (torch.Tensor): The input image.
Returns:
torch.Tensor: The output of the forward pass.
"""
output = self.cnn1(x)
output = output.view(output.size()[0], -1)
output = self.fc1(output)
return output
def forward(self, input1, input2):
"""Forward pass using two images.
Args:
input1 (torch.Tensor): The first input image.
input2 (torch.Tensor): The second input image.
Returns:
tuple: Tuple of output tensors from the forward pass.
"""
output1 = self.forward_once(input1)
output2 = self.forward_once(input2)
return output1, output2
class ContrastiveLoss1(torch.nn.Module):
"""Contrastive Loss for comparing image embeddings.
Args:
margin (float): The margin used for contrastive loss.
"""
def __init__(self, margin=2.0):
"""Initialize"""
super(ContrastiveLoss1, self).__init__()
self.margin = margin
def forward(self, output1, output2, label):
"""
Compute the contrastive loss.
Args:
output1 (torch.Tensor): Embedding vector of the first image.
output2 (torch.Tensor): Embedding vector of the second image.
label (torch.Tensor): Label indicating if the images are similar or dissimilar.
Returns:
torch.Tensor: Contrastive loss value.
"""
euclidean_distance = F.pairwise_distance(output1, output2, keepdim=True)
loss_contrastive = torch.mean(
(1 - label) * torch.pow(euclidean_distance, 2) +
(label) * torch.pow(torch.clamp(self.margin - euclidean_distance, min=0.0), 2)
)
return loss_contrastive
class AOIMetrics(Metric):
"""AOI Metrics"""
def __init__(self, margin=2.0):
"""Intialize metrics"""
super().__init__()
self.add_state("match_fail", default=torch.tensor(0), dist_reduce_fx="sum")
self.add_state("tot_fail", default=torch.tensor(0), dist_reduce_fx="sum")
self.add_state("match_pass", default=torch.tensor(0), dist_reduce_fx="sum")
self.add_state("tot_pass", default=torch.tensor(0), dist_reduce_fx="sum")
self.add_state("mismatch_fail", default=torch.tensor(0), dist_reduce_fx="sum")
self.add_state("mismatch_pass", default=torch.tensor(0), dist_reduce_fx="sum")
self.margin = margin
def update(self, preds: torch.Tensor, target: torch.Tensor):
"""Update the metrics based on the predictions and targets.
Args:
preds (torch.Tensor): Predicted distances.
target (torch.Tensor): Target labels.
"""
preds, target = self._input_format(preds, target)
# assert preds.shape == target.shape
# self.correct += torch.sum(preds == target)
# self.total += target.numel()
for k, euc_dist in enumerate(preds, 0):
if euc_dist > float(self.margin):
# Model Classified as FAIL
# totmodelfail += 1
if target.data[k].item() == 1:
self.match_fail += 1
self.tot_fail += 1
else:
self.mismatch_pass += 1
self.tot_pass += 1
else:
# totmodelpass += 1
# Model Classified as PASS
if target.data[k].item() == 0:
self.match_pass += 1
self.tot_pass += 1
else:
self.mismatch_fail += 1
self.tot_fail += 1
def compute(self):
"""Compute the metrics.
Returns:
dict: Dictionary containing the computed metrics.
"""
metric_collect = {}
metric_collect['total_accuracy'] = ((self.match_pass + self.match_fail) / (self.tot_pass + self.tot_fail)) * 100
metric_collect['defect_accuracy'] = 0 if self.tot_fail == 0 else (self.match_fail / self.tot_fail) * 100
metric_collect['false_alarm'] = (self.mismatch_pass / (self.tot_pass + self.tot_fail)) * 100
metric_collect['false_negative'] = (self.mismatch_fail / (self.tot_pass + self.tot_fail)) * 100
return metric_collect
def _input_format(self, preds, target):
return preds, target
def cal_model_accuracy(euclidean_distance, label, match_cnts, total_cnts, margin):
"""Calculate Siamese model accuracy"""
for j in range(euclidean_distance.size()[0]):
if ((euclidean_distance.data[j].item() < margin)):
if label.data[j].item() == 0:
match_cnts += 1
total_cnts += 1
else:
total_cnts += 1
else:
if label.data[j].item() == 1:
match_cnts += 1
total_cnts += 1
else:
total_cnts += 1
return (match_cnts / total_cnts) * 100, match_cnts, total_cnts
def build_oi_model(experiment_config, imagenet_pretrained=True,
export=False):
"""Select and build the Siamese model based on the experiment configuration.
Args:
experiment_config (OmegaConf.DictConf): The experiment configuration.
imagenet_pretrained (bool): Flag indicating whether to use ImageNet pre-trained weights. # TODO: @pgurumurthy to add support
export (bool): Flag indicating whether to export the model.
Returns:
torch.nn.Module: The built Siamese model.
"""
model_config = experiment_config["model"]
embedding_vectorsize = model_config["embedding_vectors"]
model_type = model_config["model_type"]
output_shape = experiment_config["dataset"]["output_shape"]
num_lights = experiment_config["dataset"]["num_input"]
model_backbone = model_config["model_backbone"]
if model_backbone == "custom":
logging.info("Starting training with custom backbone")
if model_type == 'Siamese_3':
model = SiameseNetwork3(embedding_vectorsize, num_lights, output_shape).cuda()
else:
model = SiameseNetwork1(embedding_vectorsize, num_lights, output_shape).cuda()
# TODO: @pgurumurthy to add resnet/efficientnet support.
# elif model_backbone == "resnet":
# print("Starting Siamese with ResNet backbone")
# # PlaceHolder for adding ResNet backbone####
# elif model_backbone == "efficientet":
# print("Starting Siamese with EfficientNet backbone")
# # PlaceHolder for adding EfficientNet backbone####
else:
raise NotImplementedError(f"Invalid model backbone requested.: {model_backbone}")
return model
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/optical_inspection/model/build_nn_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inferencer"""
from nvidia_tao_pytorch.cv.optical_inspection.utils.common_utils import data_to_device
import torch.nn.functional as F
from torch.autograd import Variable
class Inferencer():
"""Pytorch model inferencer."""
def __init__(self, model, ret_prob=False):
"""Initialize the Inferencer with a PyTorch model.
Args:
model (torch.nn.Module): The PyTorch model.
"""
self.model = model
self.model.eval()
self.model.cuda()
def inference(self, data):
"""
Perform inference using the model.
Args:
data (Tuple[torch.Tensor, torch.Tensor]): The input data tuple.
Returns:
torch.Tensor: The Siamese score tensor.
"""
cuda_data = []
cuda_data0 = data_to_device(Variable(data[0]))
cuda_data1 = data_to_device(Variable(data[1]))
cuda_data.append(cuda_data0)
cuda_data.append(cuda_data1)
# output1, output2 = self.model(cuda_data0, cuda_data1)
output1, output2 = self.model(cuda_data)
siam_score = F.pairwise_distance(output1, output2)
# score = euclidean_distance.detach().cpu().numpy()
return siam_score
# @TODO(tylerz): TRT inference
class TRTInferencer():
"""TRT engine inferencer."""
pass
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/optical_inspection/inference/inferencer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Action recognition inference module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/optical_inspection/inference/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Action recognition dataloader module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/optical_inspection/dataloader/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Optical Inspection dataset."""
import os
import numpy as np
from PIL import Image
import torch
from torch.utils.data import Dataset, WeightedRandomSampler
from nvidia_tao_pytorch.core.tlt_logging import logging
class SiameseNetworkTRIDataset(Dataset):
"""Siamese Model Dataset Class"""
def __init__(self, data_frame=None, transform=None,
input_data_path=None, train=True, data_config=None):
"""Initialize the SiameseNetworkTRIDataset.
Args:
data_frame (pd.DataFrame): The input data frame.
transform (transforms.Compose): transformation to be applied to the input image samples.
input_data_path (str): The path to the input data root directory.
train (bool): Flag indicating whether the dataset is for training.
data_config (OmegaConf.DictConf): Configuration for the dataset.
"""
self.data_frame = data_frame
self.transform = transform
# self.data_path = data_path
# self.input_images = data_config["validation_dataset"]["images_dir"]
self.input_image_root = input_data_path
self.train = train
self.num_inputs = data_config["num_input"]
self.concat_type = data_config["concat_type"]
self.lighting = data_config["input_map"]
self.grid_map = data_config["grid_map"]
self.output_shape = data_config["output_shape"]
self.ext = data_config["image_ext"]
if self.concat_type == "grid":
print("Using {} input types and {} type {} X {} for comparison ".format(
self.num_inputs,
self.concat_type,
self.grid_map["x"],
self.grid_map["y"]
))
else:
print("Using {} input types and {} type 1 X {} for comparison".format(
self.num_inputs,
self.concat_type,
self.num_inputs
))
# self.tensorBR = tensorBR
self.ext = '.jpg'
def get_absolute_image_path(self, prefix, light=None):
"""
Get the absolute image path.
Args:
prefix (str): The prefix of the image path.
light (str): The lighting condition suffix to be appended to image name.
Returns:
str: The absolute image path.
"""
image_path = prefix
if light:
image_path += f"_{light}"
image_path += self.ext
if not os.path.exists(image_path):
raise FileNotFoundError(f"Image file wasn't found at {image_path}")
return image_path
def __getitem__(self, index):
"""Get the item at a specific index."""
img_tuple = self.data_frame.iloc[index, :]
img0, img1 = [], []
if self.lighting:
for i, light in enumerate(self.lighting):
img0.append(
Image.open(
self.get_absolute_image_path(
self.getComparePathsV1(img_tuple),
light
)
)
)
img1.append(
Image.open(
self.get_absolute_image_path(
self.getGoldenPathsV1(img_tuple),
light
)
)
)
else:
img0.append(
Image.open(
self.get_absolute_image_path(
self.getComparePathsV1(img_tuple)
)
)
)
img1.append(
Image.open(
self.get_absolute_image_path(
self.getGoldenPathsV1(img_tuple)
)
)
)
if self.train:
for i in range(len(img0)):
img0[i] = img0[i].convert("RGB")
for i in range(len(img1)):
img1[i] = img1[i].convert("RGB")
if self.transform is not None:
img0T = [self.transform(img) for img in img0]
img1T = [self.transform(img) for img in img1]
else:
if self.transform is not None:
img0T = [self.transform(img) for img in img0]
img1T = [self.transform(img) for img in img1]
if self.concat_type == "grid" and int(self.num_inputs) % 2 == 0:
img0 = self.get_grid_concat(img0T)
img1 = self.get_grid_concat(img1T)
else:
img0 = torch.cat(img0T, 1)
img1 = torch.cat(img1T, 1)
# if self.train:
label = torch.from_numpy(
np.array([int(img_tuple['label'] != 'PASS')], dtype=np.float32)
)
return img0, img1, label
def __len__(self):
"""Length of Dataset"""
return len(self.data_frame)
def get_grid_concat(self, img_list):
"""Grid Concat"""
x, y = int(self.grid_map["x"]), int(self.grid_map["y"])
combined_y = []
cnt = 0
for _ in range(0, y, 1):
combined_x = []
for j in range(0, x, 1):
combined_x.append(img_list[cnt])
cnt += 1
if j == (x - 1):
combined_y.append(torch.cat(combined_x, 2))
img_grid = torch.cat(combined_y, 1)
return img_grid
def getTotFail(self):
"""Total FAIL in dataset"""
return len(self.data_frameFAIL)
def getTotPass(self):
"""Total PASS in dataset"""
return len(self.data_framePASS)
def getComparePathsV1(self, img_tuple):
"""Get compare file Path"""
return os.path.join(self.input_image_root, img_tuple['input_path'], img_tuple['object_name'])
def getGoldenPathsV1(self, img_tuple):
"""Get golden file Path"""
return os.path.join(self.input_image_root, img_tuple['golden_path'], img_tuple['object_name'])
def get_sampler(dataset, train_sampl_ratio=0.1):
"""
Returns a weighted sampler for imbalanced dataset.
The weighted sampler increases the sampling rate of FAIL instances relative to PASS instances.
Args:
dataset (SiameseNetworkTRIDataset): The input dataset.
train_sampl_ratio (float): The ratio to increase the sampling rate of FAIL instances.
Returns:
WeightedRandomSampler: The weighted random sampler object.
"""
n = dataset.data_frame.shape[0]
target_list = [0] * n
df_ = dataset.data_frame.copy()
fail_indices = [i for i in range(df_.shape[0]) if df_['label'].iloc[i] != 'PASS']
for i in fail_indices:
target_list[i] = 1
num_pass = dataset.data_frame.shape[0] - len(fail_indices)
pf_ratio = num_pass / len(fail_indices)
fail_wt = pf_ratio * train_sampl_ratio
logging.info('\nSampling Defective components at {:05.2f}:1 rate'.format(fail_wt))
class_weights = torch.tensor([1, fail_wt], dtype=torch.float)
class_weights_all = class_weights[target_list]
weighted_sampler = WeightedRandomSampler(
weights=class_weights_all,
num_samples=len(class_weights_all),
replacement=True)
return weighted_sampler
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/optical_inspection/dataloader/oi_dataset.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build torch data loader."""
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
# from nvidia_tao_pytorch.core.tlt_logging import logging
from nvidia_tao_pytorch.cv.optical_inspection.dataloader.oi_dataset import SiameseNetworkTRIDataset, get_sampler
# START SIAMESE DATALOADER
def build_dataloader(df, weightedsampling, split, data_config):
"""Build torch dataloader.
Args:
df (pd.DataFrame): The input data frame.
weightedsampling (bool): Flag indicating whether to use weighted sampling.
split (str): The split type ('train', 'valid', 'test', 'infer').
data_config (OmegaConf.DictConf): Configuration spec for data loading.
Returns:
DataLoader: The built torch DataLoader object.
"""
workers = data_config["workers"]
batch_size = data_config["batch_size"]
output_shape = data_config["output_shape"]
rgb_mean = data_config["augmentation_config"]["rgb_input_mean"]
rgb_std = data_config["augmentation_config"]["rgb_input_std"]
train_transforms = transforms.Compose(
[
transforms.Resize((output_shape[0], output_shape[1])),
transforms.ToTensor(),
transforms.Normalize(rgb_mean, rgb_std)
]
)
test_transforms = transforms.Compose(
[
transforms.Resize((output_shape[0], output_shape[1])),
transforms.ToTensor(),
transforms.Normalize(rgb_mean, rgb_std)
]
)
dataloader_kwargs = {
"pin_memory": True,
"batch_size": batch_size,
"num_workers": workers
}
if split == 'train':
input_data_path = data_config["train_dataset"]["images_dir"]
dataset = SiameseNetworkTRIDataset(data_frame=df,
train=True,
input_data_path=input_data_path,
transform=train_transforms,
data_config=data_config)
if weightedsampling:
fpratio_sampling = data_config['fpratio_sampling']
wt_sampler = get_sampler(dataset, fpratio_sampling)
dataloader_kwargs["sampler"] = wt_sampler
else:
dataloader_kwargs["shuffle"] = True
assert batch_size > 1, "Training batch size must be greater than 1."
dataloader_kwargs["drop_last"] = True
elif split == 'valid':
input_data_path = data_config["validation_dataset"]["images_dir"]
dataset = SiameseNetworkTRIDataset(data_frame=df,
train=False,
input_data_path=input_data_path,
transform=test_transforms,
data_config=data_config)
dataloader_kwargs["shuffle"] = False
elif split == 'test':
input_data_path = data_config["test_dataset"]["images_dir"]
dataset = SiameseNetworkTRIDataset(data_frame=df,
train=False,
input_data_path=input_data_path,
transform=test_transforms,
data_config=data_config)
dataloader_kwargs["shuffle"] = False
elif split == 'infer':
input_data_path = data_config["infer_dataset"]["images_dir"]
dataset = SiameseNetworkTRIDataset(data_frame=df,
train=False,
input_data_path=input_data_path,
transform=test_transforms,
data_config=data_config)
dataloader_kwargs["shuffle"] = False
# Build dataloader
dataloader = DataLoader(
dataset,
**dataloader_kwargs
)
return dataloader
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/optical_inspection/dataloader/build_data_loader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Deformable DETR module."""
# Temporarily override torch versioning from DLFW so that we disable warning from fairscale
# about torch version during ddp_sharded training. Fairscale doesn't handle commit versions well
# E.g. 1.13.0a0+d0d6b1f
import torch
import re
numbering = re.search(r"^(\d+).(\d+).(\d+)([^\+]*)(\+\S*)?$", torch.__version__)
torch.__version__ = ".".join([str(numbering.group(n)) for n in range(1, 4)])
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/deformable_detr/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Deformable DETR config module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/deformable_detr/config/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default config file."""
from typing import Optional, List, Dict
from dataclasses import dataclass, field
from omegaconf import MISSING
@dataclass
class DDDatasetConvertConfig:
"""Dataset Convert config."""
input_source: Optional[str] = None
data_root: Optional[str] = None
results_dir: str = MISSING
image_dir_name: Optional[str] = None
label_dir_name: Optional[str] = None
val_split: int = 0
num_shards: int = 20
num_partitions: int = 1
partition_mode: Optional[str] = None
image_extension: str = ".jpg"
mapping_path: Optional[str] = None
@dataclass
class DDAugmentationConfig:
"""Augmentation config."""
scales: List[int] = field(default_factory=lambda: [480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800],
metadata={"description": "Random Scales for Augmentation"})
input_mean: List[float] = field(default_factory=lambda: [0.485, 0.456, 0.406],
metadata={"description": "Pixel mean value"})
input_std: List[float] = field(default_factory=lambda: [0.229, 0.224, 0.225],
metadata={"description": "Pixel Standard deviation value"})
train_random_resize: List[int] = field(default_factory=lambda: [400, 500, 600],
metadata={"description": "Training Random Resize"})
horizontal_flip_prob: float = 0.5
train_random_crop_min: int = 384
train_random_crop_max: int = 600
random_resize_max_size: int = 1333
test_random_resize: int = 800
fixed_padding: bool = True
@dataclass
class DDDatasetConfig:
"""Dataset config."""
train_sampler: str = "default_sampler"
train_data_sources: Optional[List[Dict[str, str]]] = None
val_data_sources: Optional[List[Dict[str, str]]] = None
test_data_sources: Optional[Dict[str, str]] = None
infer_data_sources: Optional[Dict[str, str]] = None
batch_size: int = 4
workers: int = 8
pin_memory: bool = True
num_classes: int = 91
dataset_type: str = "serialized"
eval_class_ids: Optional[List[int]] = None
augmentation: DDAugmentationConfig = DDAugmentationConfig()
@dataclass
class DDModelConfig:
"""Deformable DETR model config."""
pretrained_backbone_path: Optional[str] = None
backbone: str = "resnet_50"
num_queries: int = 300
num_feature_levels: int = 4
return_interm_indices: List[int] = field(default_factory=lambda: [1, 2, 3, 4],
metadata={"description": "Indices to return from backbone"})
with_box_refine: bool = True
cls_loss_coef: float = 2.0
bbox_loss_coef: float = 5.0
giou_loss_coef: float = 2.0
focal_alpha: float = 0.25
clip_max_norm: float = 0.1
dropout_ratio: float = 0.3
hidden_dim: int = 256
nheads: int = 8
enc_layers: int = 6
dec_layers: int = 6
dim_feedforward: int = 1024
dec_n_points: int = 4
enc_n_points: int = 4
aux_loss: bool = True
dilation: bool = False
train_backbone: bool = True
loss_types: List[str] = field(default_factory=lambda: ['labels', 'boxes'],
metadata={"description": "Losses to be used during training"})
backbone_names: List[str] = field(default_factory=lambda: ["backbone.0"],
metadata={"description": "Backbone name"})
linear_proj_names: List[str] = field(default_factory=lambda: ['reference_points', 'sampling_offsets'],
metadata={"description": "Linear Projection names"})
@dataclass
class OptimConfig:
"""Optimizer config."""
optimizer: str = "AdamW"
monitor_name: str = "val_loss" # {val_loss, train_loss}
lr: float = 2e-4
lr_backbone: float = 2e-5
lr_linear_proj_mult: float = 0.1
momentum: float = 0.9
weight_decay: float = 1e-4
lr_scheduler: str = "MultiStep"
lr_steps: List[int] = field(default_factory=lambda: [40],
metadata={"description": "learning rate decay steps"})
lr_step_size: int = 40
lr_decay: float = 0.1
@dataclass
class DDTrainExpConfig:
"""Train experiment config."""
num_gpus: int = 1
num_nodes: int = 1
resume_training_checkpoint_path: Optional[str] = None
pretrained_model_path: Optional[str] = None
validation_interval: int = 1
clip_grad_norm: float = 0.1
is_dry_run: bool = False
results_dir: Optional[str] = None
num_epochs: int = 50
checkpoint_interval: int = 1
optim: OptimConfig = OptimConfig()
precision: str = "fp32"
distributed_strategy: str = "ddp"
activation_checkpoint: bool = True
@dataclass
class DDInferenceExpConfig:
"""Inference experiment config."""
num_gpus: int = 1
results_dir: Optional[str] = None
checkpoint: Optional[str] = None
trt_engine: Optional[str] = None
color_map: Dict[str, str] = MISSING
conf_threshold: float = 0.5
is_internal: bool = False
input_width: Optional[int] = None
input_height: Optional[int] = None
@dataclass
class DDEvalExpConfig:
"""Evaluation experiment config."""
num_gpus: int = 1
results_dir: Optional[str] = None
input_width: Optional[int] = None
input_height: Optional[int] = None
checkpoint: Optional[str] = None
trt_engine: Optional[str] = None
conf_threshold: float = 0.0
@dataclass
class CalibrationConfig:
"""Calibration config."""
cal_image_dir: List[str] = MISSING
cal_cache_file: str = MISSING
cal_batch_size: int = 1
cal_batches: int = 1
@dataclass
class TrtConfig:
"""Trt config."""
data_type: str = "FP32"
workspace_size: int = 1024
min_batch_size: int = 1
opt_batch_size: int = 1
max_batch_size: int = 1
calibration: CalibrationConfig = CalibrationConfig()
@dataclass
class DDExportExpConfig:
"""Export experiment config."""
results_dir: Optional[str] = None
gpu_id: int = 0
checkpoint: str = MISSING
onnx_file: str = MISSING
on_cpu: bool = False
input_channel: int = 3
input_width: int = 960
input_height: int = 544
opset_version: int = 12
batch_size: int = -1
verbose: bool = False
@dataclass
class DDGenTrtEngineExpConfig:
"""Gen TRT Engine experiment config."""
results_dir: Optional[str] = None
gpu_id: int = 0
onnx_file: str = MISSING
trt_engine: Optional[str] = None
input_channel: int = 3
input_width: int = 960
input_height: int = 544
opset_version: int = 12
batch_size: int = -1
verbose: bool = False
tensorrt: TrtConfig = TrtConfig()
@dataclass
class ExperimentConfig:
"""Experiment config."""
model: DDModelConfig = DDModelConfig()
dataset: DDDatasetConfig = DDDatasetConfig()
train: DDTrainExpConfig = DDTrainExpConfig()
evaluate: DDEvalExpConfig = DDEvalExpConfig()
inference: DDInferenceExpConfig = DDInferenceExpConfig()
export: DDExportExpConfig = DDExportExpConfig()
gen_trt_engine: DDGenTrtEngineExpConfig = DDGenTrtEngineExpConfig()
encryption_key: Optional[str] = None
results_dir: str = MISSING
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/deformable_detr/config/default_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Original source taken from https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/coco.py
# Copyright (c) 2020 SenseTime.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Modified COCO Dataset Class """
import json
import numpy as np
import copy
import itertools
from collections import defaultdict
def _isArrayLike(obj):
"""_isArrayLike"""
return hasattr(obj, '__iter__') and hasattr(obj, '__len__')
# pylint:disable=R1718, W0622
class COCO:
""" COCO class """
def __init__(self, dataset=None):
"""COCO Constructor.
Args:
dataset (json data): json data after loading the json file
"""
# load dataset - loaded json data
self.dataset = dataset
self.anns, self.cats, self.imgs = {}, {}, {}
self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list)
assert type(dataset) == dict, f'annotation file format {type(dataset)} not supported'
self.createIndex()
def createIndex(self):
"""createIndex"""
anns, cats, imgs = {}, {}, {}
imgToAnns, catToImgs = defaultdict(list), defaultdict(list)
if 'annotations' in self.dataset:
for ann in self.dataset['annotations']:
imgToAnns[ann['image_id']].append(ann)
anns[ann['id']] = ann
if 'images' in self.dataset:
for img in self.dataset['images']:
imgs[img['id']] = img
if 'categories' in self.dataset:
for cat in self.dataset['categories']:
cats[cat['id']] = cat
if 'annotations' in self.dataset and 'categories' in self.dataset:
for ann in self.dataset['annotations']:
catToImgs[ann['category_id']].append(ann['image_id'])
# create class members
self.anns = anns
self.imgToAnns = imgToAnns
self.catToImgs = catToImgs
self.imgs = imgs
self.cats = cats
def info(self):
"""print info"""
for key, value in self.dataset['info'].items():
print('{}: {}'.format(key, value))
def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):
"""
Get ann ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get anns for given imgs
catIds (int array) : get anns for given cats
areaRng (float array) : get anns for given area range (e.g. [0 inf])
iscrowd (boolean) : get anns for given crowd label (False or True)
:return: ids (int array) : integer array of ann ids
"""
imgIds = imgIds if _isArrayLike(imgIds) else [imgIds]
catIds = catIds if _isArrayLike(catIds) else [catIds]
if len(imgIds) == len(catIds) == len(areaRng) == 0:
anns = self.dataset['annotations']
else:
if not len(imgIds) == 0:
lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns]
anns = list(itertools.chain.from_iterable(lists))
else:
anns = self.dataset['annotations']
anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]
anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]]
if iscrowd is not None:
ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]
else:
ids = [ann['id'] for ann in anns]
return ids
def getCatIds(self, catNms=[], supNms=[], catIds=[]):
"""
filtering parameters. default skips that filter.
:param catNms (str array) : get cats for given cat names
:param supNms (str array) : get cats for given supercategory names
:param catIds (int array) : get cats for given cat ids
:return: ids (int array) : integer array of cat ids
"""
catNms = catNms if _isArrayLike(catNms) else [catNms]
supNms = supNms if _isArrayLike(supNms) else [supNms]
catIds = catIds if _isArrayLike(catIds) else [catIds]
if len(catNms) == len(supNms) == len(catIds) == 0:
cats = self.dataset['categories']
else:
cats = self.dataset['categories']
cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]
cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]
cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]
ids = [cat['id'] for cat in cats]
return ids
def getImgIds(self, imgIds=[], catIds=[]):
"""
Get img ids that satisfy given filter conditions.
:param imgIds (int array) : get imgs for given ids
:param catIds (int array) : get imgs with all given cats
:return: ids (int array) : integer array of img ids
"""
imgIds = imgIds if _isArrayLike(imgIds) else [imgIds]
catIds = catIds if _isArrayLike(catIds) else [catIds]
if len(imgIds) == len(catIds) == 0:
ids = self.imgs.keys()
else:
ids = set(imgIds)
for i, catId in enumerate(catIds):
if i == 0 and len(ids) == 0:
ids = set(self.catToImgs[catId])
else:
ids &= set(self.catToImgs[catId])
return list(ids)
def loadAnns(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying anns
:return: anns (object array) : loaded ann objects
"""
output = []
if _isArrayLike(ids):
output = [self.anns[id] for id in ids]
elif type(ids) == int:
output = [self.anns[ids]]
return output
def loadCats(self, ids=[]):
"""
Load cats with the specified ids.
:param ids (int array) : integer ids specifying cats
:return: cats (object array) : loaded cat objects
"""
output = []
if _isArrayLike(ids):
output = [self.cats[id] for id in ids]
elif type(ids) == int:
output = [self.cats[ids]]
return output
def loadImgs(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying img
:return: imgs (object array) : loaded img objects
"""
output = []
if _isArrayLike(ids):
output = [self.imgs[id] for id in ids]
elif type(ids) == int:
output = [self.imgs[ids]]
return output
def loadRes(self, resFile):
"""
Load result file and return a result api object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = COCO()
res.dataset['images'] = [img for img in self.dataset['images']]
if type(resFile) == str:
with open(resFile) as f:
anns = json.load(f)
elif type(resFile) == np.ndarray:
anns = self.loadNumpyAnnotations(resFile)
else:
anns = resFile
assert type(anns) == list, 'results in not an array of objects'
annsImgIds = [ann['image_id'] for ann in anns]
assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \
'Results do not correspond to current coco set'
if 'caption' in anns[0]:
imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])
res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]
for id, ann in enumerate(anns):
ann['id'] = id + 1
elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
bb = ann['bbox']
x1, x2, y1, y2 = [bb[0], bb[0] + bb[2], bb[1], bb[1] + bb[3]]
if 'segmentation' not in ann:
ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
ann['area'] = bb[2] * bb[3]
ann['id'] = id + 1
ann['iscrowd'] = 0
elif 'keypoints' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
s = ann['keypoints']
x = s[0::3]
y = s[1::3]
x0, x1, y0, y1 = np.min(x), np.max(x), np.min(y), np.max(y)
ann['area'] = (x1 - x0) * (y1 - y0)
ann['id'] = id + 1
ann['bbox'] = [x0, y0, x1 - x0, y1 - y0]
res.dataset['annotations'] = anns
res.createIndex()
return res
def loadNumpyAnnotations(self, data):
"""
Convert result data from a numpy array [Nx7] where each row contains {imageID,x1,y1,w,h,score,class}
:param data (numpy.ndarray)
:return: annotations (python nested list)
"""
# print('Converting ndarray to lists...')
assert (type(data) == np.ndarray)
# print(data.shape)
assert (data.shape[1] == 7)
N = data.shape[0]
ann = []
for i in range(N):
if i % 1000000 == 0:
print('{}/{}'.format(i, N))
ann += [{'image_id': int(data[i, 0]),
'bbox': [data[i, 1], data[i, 2], data[i, 3], data[i, 4]],
'score': data[i, 5],
'category_id': int(data[i, 6])}]
return ann
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/deformable_detr/utils/coco.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Misc functions. """
import os
import pickle # nosec B403
import torch
import torch.distributed as dist
from nvidia_tao_pytorch.core.cookbooks.tlt_pytorch_cookbook import TLTPyTorchCookbook
from nvidia_tao_pytorch.cv.action_recognition.utils.common_utils import patch_decrypt_checkpoint
DEFAULT_TARGET_CLASS_MAPPING = {
"Person": "person",
"Person Group": "person",
"Rider": "person",
"backpack": "bag",
"face": "face",
"large_bag": "bag",
"person": "person",
"person group": "person",
"person_group": "person",
"personal_bag": "bag",
"rider": "person",
"rolling_bag": "bag",
"rollingbag": "bag",
"largebag": "bag",
"personalbag": "bag"
}
DEFAULT_CLASS_LOOKUP_TABLE = {'person': 1, 'face': 2, 'bag': 3}
def get_categories(cat_map):
"""
Function to convert the category map to COCO annotation format
Args:
cat_map (dict): Category map
Returns:
categories_info (list): COCO annotation format of the category map
categories_dict (dict): In a format of {"class_name": "id"}
"""
categories_info = []
categories_dict = {}
for i, class_name in enumerate(sorted(set(cat_map.values()), reverse=True)):
category = {
'id': i + 1,
'name': class_name
}
categories_info.append(category)
categories_dict[class_name] = i + 1
return categories_info, categories_dict
def check_and_create(d):
"""Create a directory."""
if not os.path.isdir(d):
os.makedirs(d, exist_ok=True)
def load_pretrained_weights(pretrained_path):
"""To get over pytorch lightning module in the checkpoint state_dict."""
temp = torch.load(pretrained_path,
map_location="cpu")
if temp.get("state_dict_encrypted", False):
# Retrieve encryption key from TLTPyTorchCookbook.
key = TLTPyTorchCookbook.get_passphrase()
if key is None:
raise PermissionError("Cannot access model state dict without the encryption key")
temp = patch_decrypt_checkpoint(temp, key)
# for loading pretrained I3D weights released on
# https://github.com/piergiaj/pytorch-i3d
if "state_dict" not in temp:
return temp
state_dict = {}
for key, value in list(temp["state_dict"].items()):
if "module" in key:
new_key = ".".join(key.split(".")[1:])
state_dict[new_key] = value
elif key.startswith("backbone."):
# MMLab compatible weight loading
new_key = key[9:]
state_dict[new_key] = value
elif key.startswith("ema_"):
# Do not include ema params from MMLab
continue
else:
state_dict[key] = value
return state_dict
def match_name_keywords(n, name_keywords):
"""match_name_keywords"""
out = False
for b in name_keywords:
if b in n:
out = True
break
return out
def all_gather(data):
"""Run all_gather on arbitrary picklable data (not necessarily tensors).
Args:
data: any picklable object.
Returns:
list[data]: list of data gathered from each rank.
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to('cuda')
# obtain Tensor size of each rank
local_size = torch.tensor([tensor.numel()], device='cuda')
size_list = [torch.tensor([0], device='cuda') for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device='cuda'))
if local_size != max_size:
padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device='cuda')
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer)) # nosec B301
return data_list
def collate_fn(batch):
"""Custom collate function for DETR-like models.
DETR models use mutli-scale resize and random cropping which results in the varying input resolution of a single image.
Hence, we need a custom collate_fn to pad additional regions and pass that as mask to transformer.
Args:
batch (tuple): tuple of a single batch. Contains image and label tensors
Returns:
batch (tuple): tuple of a single batch with uniform image resolution after padding.
"""
batch = list(zip(*batch))
batch[0] = tensor_from_tensor_list(batch[0], batch[1])
return tuple(batch)
def _max_by_axis(the_list):
"""Get maximum image shape for padding."""
maxes = the_list[0]
for sublist in the_list[1:]:
for index, item in enumerate(sublist):
maxes[index] = max(maxes[index], item)
return maxes
def tensor_from_tensor_list(tensor_list, targets):
"""Convert list of tensors with different size to fixed resolution.
The final size is determined by largest height and width.
In theory, the batch could become [3, 1333, 1333] on dataset with different aspect ratio, e.g. COCO
A fourth channel dimension is the mask region in which 0 represents the actual image and 1 means the padded region.
This is to give size information to the transformer archicture. If transform-padding is applied,
then only the pre-padded regions gets mask value of 1.
Args:
tensor_list (List[Tensor]): list of image tensors
targets (List[dict]): list of labels that contain the size information
Returns:
tensors (torch.Tensor): list of image tensors in shape of (B, 4, H, W)
"""
if tensor_list[0].ndim == 3:
max_size = _max_by_axis([list(img.shape) for img in tensor_list])
batch_shape = [len(tensor_list)] + max_size
b, c, h, w = batch_shape
dtype = tensor_list[0].dtype
device = tensor_list[0].device
temp_tensors = torch.zeros((b, c, h, w), dtype=dtype, device=device)
mask = torch.ones((b, 1, h, w), dtype=dtype, device=device)
tensors = torch.concat((temp_tensors, mask), 1)
for img, target, pad_img in zip(tensor_list, targets, tensors):
# Get original image size before transform-padding
# If no transform-padding has been applied,
# then height == img.shape[1] and width == img.shape[2]
actual_height, actual_width = target['size']
pad_img[:img.shape[0], :actual_height, :actual_width].copy_(img[:, :actual_height, :actual_width])
pad_img[c, :actual_height, :actual_width] = 0 # set zeros for mask in non-padded area
else:
raise ValueError('Channel size other than 3 is not supported')
return tensors
@torch.no_grad()
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k."""
if target.numel() == 0:
return [torch.zeros([], device=output.device)]
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def get_total_grad_norm(parameters, norm_type=2):
"""Get toal gradient norm."""
parameters = list(filter(lambda p: p.grad is not None, parameters))
norm_type = float(norm_type)
device = parameters[0].grad.device
total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]),
norm_type)
return total_norm
def inverse_sigmoid(x, eps=1e-5):
"""Inverse sigmoid."""
x = x.clamp(min=0, max=1)
x1 = x.clamp(min=eps)
x2 = (1 - x).clamp(min=eps)
return torch.log(x1 / x2)
def is_dist_avail_and_initialized():
"""Check if DDP is initialized."""
is_dist = True
if not dist.is_available():
is_dist = False
else:
is_dist = dist.is_initialized() or False
return is_dist
def get_world_size():
"""Get world size."""
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_global_rank():
"""Get global rank."""
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/deformable_detr/utils/misc.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Generates TRT compatible DDETR onnx model. """
import torch
from torch.onnx import register_custom_op_symbolic
import onnx
import numpy as np
import onnx_graphsurgeon as gs
# register plugin
def nvidia_msda(g, value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights):
"""Returns nvidia_msda."""
return g.op("nvidia::MultiscaleDeformableAttnPlugin_TRT", value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights)
class ONNXExporter(object):
"""Onnx Exporter."""
@classmethod
def setUpClass(cls):
"""SetUpclass to set the manual seed for reproduceability"""
torch.manual_seed(123)
def export_model(self, model, batch_size, onnx_file, dummy_input, do_constant_folding=False, opset_version=12,
output_names=None, input_names=None, verbose=False):
""" Export_model.
The do_constant_folding = False avoids MultiscaleDeformableAttnPlugin_TRT error (tensors on 2 devices) when torch > 1.9.0.
However, it would cause tensorrt 8.0.3.4 (nvcr.io/nvidia/pytorch:21.11-py3 env) reports clip node error.
This error is fixed in tensorrt >= 8.2.1.8 (nvcr.io/nvidia/tensorrt:22.01-py3).
Args:
model (nn.Module): torch model to export.
batch_size (int): batch size of the ONNX model. -1 means dynamic batch size.
onnx_file (str): output path of the onnx file.
dummy_input (torch.Tensor): input tensor.
do_constant_folding (bool): flag to indicate whether to fold constants in the ONNX model.
opset_version (int): opset_version of the ONNX file.
output_names (str): output names of the ONNX file.
input_names (str): input names of the ONNX file.
verbose (bool): verbosity level.
"""
if batch_size is None or batch_size == -1:
dynamic_axes = {"inputs": {0: "batch"}, "pred_logits": {0: "batch"}, "pred_boxes": {0: "batch"}}
else:
dynamic_axes = None
# CPU version requires opset_version > 16
if not next(model.parameters()).is_cuda and opset_version < 16:
print(f"CPU version of Deformable MHA requires opset version larger than 16. Overriding provided opset {opset_version} to 16.")
opset_version = 16
register_custom_op_symbolic('nvidia::MultiscaleDeformableAttnPlugin_TRT', nvidia_msda, opset_version)
torch.onnx.export(model, dummy_input, onnx_file,
input_names=input_names, output_names=output_names, export_params=True,
training=torch.onnx.TrainingMode.EVAL, opset_version=opset_version, do_constant_folding=do_constant_folding,
custom_opsets={"nvidia": 1}, verbose=verbose, dynamic_axes=dynamic_axes)
@staticmethod
def check_onnx(onnx_file):
"""Check onnx file.
Args:
onnx_file (str): path to ONNX file.
"""
model = onnx.load(onnx_file)
onnx.checker.check_model(model)
@staticmethod
def onnx_change(onnx_file):
"""Make ddetr onnx compatible with TRT. Additionally, fold constants.
Args:
onnx_file (str): path to ONNX file.
"""
graph = gs.import_onnx(onnx.load(onnx_file))
for node in graph.nodes:
if node.op == "MultiscaleDeformableAttnPlugin_TRT":
node.attrs = {"name": "MultiscaleDeformableAttnPlugin_TRT", "version": "1", "namespace": ""}
new_inputs = []
for i, inp in enumerate(node.inputs):
if i in (1, 2) and hasattr(inp, "values"):
new_inp = gs.Constant(name=inp.name, values=inp.values.astype(np.int32))
new_inputs.append(new_inp)
else:
new_inputs.append(inp)
node.inputs = new_inputs
# Setting constant folding in torch result in error due to some layers still in CPU
# Limit workspace size to 1GB to disable folding for MatMul
graph.fold_constants(size_threshold=1024 * 1024 * 1024)
graph.cleanup().toposort()
onnx.save(gs.export_onnx(graph), onnx_file)
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/deformable_detr/utils/onnx_export.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities for bounding box manipulation and GIoU.
"""
import torch
from torchvision.ops.boxes import box_area
def box_cxcywh_to_xyxy(x):
"""Convert cxcywh format to xyxy."""
x_c, y_c, w, h = x.unbind(-1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h),
(x_c + 0.5 * w), (y_c + 0.5 * h)]
return torch.stack(b, dim=-1)
def box_xyxy_to_cxcywh(x):
"""Convert xyxy format to cxcywh."""
x0, y0, x1, y1 = x.unbind(-1)
b = [(x0 + x1) / 2, (y0 + y1) / 2,
(x1 - x0), (y1 - y0)]
return torch.stack(b, dim=-1)
def box_iou(boxes1, boxes2):
"""Calculate box IoU.
Args:
boxes1 (torch.Tensor): boxes 1.
boxes2 (torch.Tensor): boxes 2.
Returns:
iou (torch.Tensor): IoU values.
union (torch.Tensor): Union values.
"""
area1 = box_area(boxes1)
area2 = box_area(boxes2)
lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]
rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
wh = (rb - lt).clamp(min=0) # [N,M,2]
inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
union = area1[:, None] + area2 - inter
iou = inter / union
return iou, union
def generalized_box_iou(boxes1, boxes2):
"""Generalized IoU from https://giou.stanford.edu/.
The boxes should be in [x0, y0, x1, y1] format.
Args:
boxes1 (torch.Tensor): boxes 1.
boxes2 (torch.Tensor): boxes 2.
Returns:
A [N, M] pairwise matrix, where N = len(boxes1) and M = len(boxes2).
Raises:
Degenerate boxes gives inf / nan results, so do an early check.
"""
assert (boxes1[:, 2:] >= boxes1[:, :2]).all()
assert (boxes2[:, 2:] >= boxes2[:, :2]).all()
iou, union = box_iou(boxes1, boxes2)
lt = torch.min(boxes1[:, None, :2], boxes2[:, :2])
rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])
wh = (rb - lt).clamp(min=0) # [N,M,2]
area = wh[:, :, 0] * wh[:, :, 1]
return iou - (area - union) / area
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/deformable_detr/utils/box_ops.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data source config class for DriveNet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
class DataSourceConfig(object):
"""Hold all data source related parameters."""
def __init__(self,
dataset_type,
dataset_files,
image_dir,
minimum_target_class_imbalance=None,
num_duplicates=0,
skip_empty_frames=False,
ignored_classifiers_for_skip=None,
additional_conditions=None):
"""Constructor.
Args:
dataset_type (string): Currently only 'tfrecord' and 'sqlite' are supported.
dataset_files (list): A list of absolute paths to dataset files. In case of
tfrecords, a list of absolute paths to .tfrecord files.
image_dir (string): Absolute path to images directory.
minimum_target_class_imbalance (map<string, float>): Minimum ratio
(#dominant_class_instances/#target_class_instances) criteria for duplication
of frames. The string is the non-dominant class name and the float is the
ratio for duplication.
num_duplicates (int): Number of duplicates of frames to be added, if the frame
satifies the minimum_target_class_imbalance.
skip_empty_frames (bool): Whether to ignore empty frames (i.e frames without relevant
features. By default, False, i.e all frames are returned.
ignored_classifiers_for_skip (set): Names of classifiers to ignore when
considering if frame is empty. I.e if frame only has these classes, it is still
regarded as empty.
additional_conditions (list): List of additional sql conditions for a 'where' clause.
It's only for SqliteDataSource, and other data sources will ignore it.
"""
self.dataset_type = dataset_type
self.dataset_files = dataset_files
self.image_dir = image_dir
self.minimum_target_class_imbalance = minimum_target_class_imbalance
self.num_duplicates = num_duplicates
self.skip_empty_frames = skip_empty_frames
self.ignored_classifiers_for_skip = ignored_classifiers_for_skip
self.additional_conditions = additional_conditions
def build_data_source_lists(data_sources): # Non-sharding data
"""Build training and validation data source lists from proto.
Args:
dataset_config
Returns:
training_data_source_list (list): List of tuples (tfrecord_file_pattern,
image_directory_path) to use for training.
validation_data_source_list (list): List of tuples (tfrecord_file_pattern,
image_directory_path) to use for validation. Can be None.
"""
# Prepare data source
data_source_list = []
if type(data_sources).__name__ == "DictConfig":
data_sources = [data_sources]
for data_source in data_sources:
image_dir = data_source["image_dir"]
_files = data_source["json_file"]
extension = os.path.splitext(os.path.basename(_files))[1]
if extension == '.json': # use specific json file provided in spec file
json_files = [_files]
elif extension == "": # grab all json file under the directory
json_files = glob.glob(_files)
else:
raise NotImplementedError("Need to provide json_file in dataset_config with the format of either '/path/to/json_file/.json' or '/path/to/json_files/*' ")
data_source_list.append(DataSourceConfig(
dataset_type='json',
dataset_files=json_files,
image_dir=image_dir))
return data_source_list
def build_data_source_lists_per_gpu(data_sources, global_rank, num_gpus): # Sharded data
"""Build training and validation data source lists from proto.
Args:
data_sources
Returns:
training_data_source_list (list): List of tuples (tfrecord_file_pattern,
image_directory_path) to use for training.
validation_data_source_list (list): List of tuples (tfrecord_file_pattern,
image_directory_path) to use for validation. Can be None.
"""
# Prepare training and validation data source
data_source_list = []
if type(data_sources).__name__ == "DictConfig":
data_sources = [data_sources]
for data_source in data_sources:
image_dir = data_source["image_dir"]
_files = data_source["json_file"]
extension = os.path.splitext(os.path.basename(_files))[1]
if extension == '.json': # use specific json file provided
json_files = [_files]
elif extension == "": # grab all json file under the directory
json_files = glob.glob(_files)
else:
raise NotImplementedError("Need to provide json_file in dataset_config with the format of either '/path/to/json_file/.json' or '/path/to/json_files/*' ")
training_jsons_per_seq = []
for shard_idx, json_file in enumerate(json_files):
if (shard_idx % num_gpus) == global_rank:
training_jsons_per_seq.append(json_file)
data_source_list.append(DataSourceConfig(
dataset_type='json',
dataset_files=training_jsons_per_seq,
image_dir=image_dir))
return data_source_list
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/deformable_detr/utils/data_source_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for converting datasets to json"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import ABCMeta
from abc import abstractmethod
import logging
import os
import random
import six
import numpy as np
from PIL import Image
import json
from json.decoder import JSONDecodeError
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.cv.deformable_detr.utils.converter_lib import _shard, _shuffle
from nvidia_tao_pytorch.cv.deformable_detr.utils.misc import DEFAULT_TARGET_CLASS_MAPPING, DEFAULT_CLASS_LOOKUP_TABLE, get_categories
logger = logging.getLogger(__name__)
def create_info_images(image_id, file_name, img_size):
"""create image info in COCO format"""
images = {
"id": image_id,
"file_name": file_name,
"height": img_size[1],
"width": img_size[0]
}
return images
def create_info_categories():
"""create categories info in COCO format"""
categories = [{"supercategory": "person", "id": 1, "name": "person"},
{"supercategory": "face", "id": 2, "name": "face"},
{"supercategory": "bag", "id": 3, "name": "bag"}]
return categories
def create_info_annotations(image_id, category_id, annotation_id, bbox, area):
"""create annotation info in COCO format"""
annotation = {
'image_id': image_id,
'category_id': category_id,
'id': annotation_id,
'bbox': bbox,
'area': area,
'iscrowd': 0
}
return annotation
class DatasetConverter(six.with_metaclass(ABCMeta, object)):
"""Converts an object detection dataset to Json.
This class needs to be subclassed, and the convert() and
create_example_proto() methods overridden to do the dataset
conversion. Splitting of partitions to shards, shuffling and
writing Json are implemented here.
"""
@abstractmethod
def __init__(self, data_root, input_source, num_partitions, num_shards,
output_dir, mapping_path=None):
"""Initialize the converter.
Args:
input_source (string): Dataset directory path relative to data root.
num_partitions (int): Number of partitions (folds).
num_shards (int): Number of shards.
output_dir (str): Path for the output file.
mapping_path (str): Path to a JSON file containing the class mapping.
"""
if data_root is not None:
self.img_root_dir = os.path.join(data_root, input_source)
else:
self.img_root_dir = input_source
self.img_root_dir = os.path.abspath(self.img_root_dir) # data_root/sqeuence_name
self.input_source = os.path.basename(input_source)
self.output_partitions = num_partitions
self.output_shards = num_shards
self.output_dir = output_dir
self.output_filename = os.path.join(output_dir, self.input_source, self.input_source)
# Make the output directory to write the shard.
if not os.path.exists(output_dir):
logger.info("Creating output directory {}".format(output_dir))
os.makedirs(output_dir)
if not os.path.exists(os.path.join(output_dir, self.input_source)):
logger.info("Creating output directory {}".format(os.path.join(output_dir, self.input_source)))
os.makedirs(os.path.join(output_dir, self.input_source))
self.class_map = {}
self.log_warning = {}
# check the previous image id and annotation id for offset
self.image_id_offset = 0
self.annotation_id_offset = 0
if mapping_path:
if not os.path.exists(mapping_path):
raise FileNotFoundError(f"Provided class mapping path {mapping_path} does not exist!")
with open(mapping_path, "r", encoding='utf-8') as f:
self.target_class_mapping = json.load(f)
self.category_info, self.class_lookup_table = get_categories(self.target_class_mapping)
logger.info("Load category mapping from {}".format(mapping_path))
else:
# Use the default values for PeopleNet
self.target_class_mapping = DEFAULT_TARGET_CLASS_MAPPING
self.class_lookup_table = DEFAULT_CLASS_LOOKUP_TABLE
self.category_info = create_info_categories()
logger.info("Category mapping: \n {}".format(self.class_lookup_table))
# Set a fixed seed to get a reproducible sequence.
random.seed(42)
def convert(self):
"""Do the dataset conversion."""
# Divide dataset into partitions and shuffle them.
partitions = self._partition()
_shuffle(partitions)
# Shard and write the partitions to Json.
global_image_id, global_ann_id = self._write_partitions(partitions)
stats_filename = os.path.join(self.output_dir, 'stats.txt')
with open(stats_filename, 'a+') as f:
print('writing')
print(stats_filename)
out_str = "{},{},{}\n".format(self.input_source, global_image_id, global_ann_id)
print(out_str)
f.write(out_str)
# Log how many objects per class got written in total.
logger.info("Cumulative object statistics")
s_logger = status_logging.get_status_logger()
# Print out the class map
log_str = "Class map. \nLabel in GT: Label in Json file "
for key, value in six.iteritems(self.class_map):
log_str += "\n{}: {}".format(key, value)
logger.info(log_str)
s_logger.write(message=log_str)
note_string = (
"For the dataset_config in the experiment_spec, "
"please use labels in the Json file, while writing the classmap.\n"
)
print(note_string)
s_logger.write(message=note_string)
logger.info("Json generation complete.")
s_logger.write(
status_level=status_logging.Status.SUCCESS,
message="Json generation complete."
)
# Save labels with error to a JSON file
self._save_log_warnings()
def _write_partitions(self, partitions):
"""Shard and write partitions into Json.
Args:
partitions (list): A list of list of frame IDs.
Returns:
"""
# Divide partitions into shards.
sharded_partitions = _shard(partitions, self.output_shards)
# Write .Json to disk for each partition and shard.
stats_filename = os.path.join(self.output_dir, 'stats.txt')
if os.path.exists(stats_filename):
with open(stats_filename, 'r') as f:
line = f.readlines()[-1].split(",")
global_image_id = int(line[1])
global_ann_id = int(line[2])
else:
global_image_id = 0
global_ann_id = 0
for p, partition in enumerate(sharded_partitions):
for s, shard in enumerate(partition):
shard_image_counter, shrad_ann_counter = self._write_shard(shard, p, s, global_image_id, global_ann_id)
global_image_id += shard_image_counter
global_ann_id += shrad_ann_counter
return global_image_id, global_ann_id
def _write_shard(self, shard, partition_number, shard_number, global_image_id, global_ann_id):
"""Write a single shard into the Json file.
Note that the dataset-specific part is captured in function
create_example_proto() which needs to be overridden for each
specific dataset.
Args:
shard (list): A list of frame IDs for this shard.
partition_number (int): Current partition (fold) index.
shard_number (int): Current shard index.
Returns:
"""
logger.info('Writing partition {}, shard {}'.format(partition_number, shard_number))
status_logging.get_status_logger().write(
message='Writing partition {}, shard {}'.format(partition_number, shard_number)
)
output = self.output_filename
if self.output_partitions != 0 and self.output_partitions != 1:
output = '{}-fold-{:03d}-of-{:03d}'.format(output, partition_number,
self.output_partitions)
if self.output_shards != 0:
output = '{}-shard-{:05d}-of-{:05d}.json'.format(output, shard_number, self.output_shards)
# # Store all the data for the shard.
json_output = {
"images": [],
"annotations": [],
"categories": self.category_info
}
image_count = 0
ann_count = 0
for frame_id in shard:
image_file = os.path.join(self.input_source, self.images_dir, frame_id + self.extension)
width, height = self._get_image_size(frame_id)
shard_imgage_id = self.image_id_offset + global_image_id + image_count
images_info = create_info_images(shard_imgage_id, image_file, (width, height))
json_output["images"].append(images_info)
# Create the Example with all labels for this frame_id.
shard_ann_id = self.annotation_id_offset + global_ann_id + ann_count
json_output, shard_ann_count = self._create_info_dict(json_output, frame_id, shard_imgage_id, shard_ann_id)
image_count = image_count + 1
ann_count = ann_count + shard_ann_count
with open(output, 'w+') as outfile:
try:
json.dump(json_output, outfile)
except JSONDecodeError:
pass
return image_count, ann_count
def _get_image_size(self, frame_id):
"""Read image size from the image file, image sizes vary in KITTI."""
image_file = os.path.join(self.img_root_dir, self.images_dir, frame_id + self.extension)
width, height = Image.open(image_file).size
return width, height
@abstractmethod
def _partition(self):
"""Return dataset partitions."""
pass
@abstractmethod
def _create_info_dict(self, json_output, frame_id, image_id, ann_id):
"""Generate the example for this frame."""
pass
def _save_log_warnings(self):
"""Store out of bound bounding boxes to a json file."""
if self.log_warning:
logger.info("Writing the log_warning.json")
with open(f"{self.output_dir}_warning.json", "w") as f:
json.dump(self.log_warning, f, indet=2)
logger.info("There were errors in the labels. Details are logged at"
" %s_waring.json", self.output_dir)
class KITTIConverter(DatasetConverter):
"""Converts a KITTI detection dataset to jsons."""
def __init__(self, data_root, input_source, num_partitions, num_shards,
output_dir,
image_dir_name=None,
label_dir_name=None,
extension='.png',
partition_mode='random',
val_split=None,
mapping_path=None):
"""Initialize the converter.
Args:
input_source (string): Dataset directory path relative to data root.
num_partitions (int): Number of partitions (folds).
num_shards (int): Number of shards.
output_dir (str): Path for the output file.
image_dir_name (str): Name of the subdirectory containing images.
label_dir_name (str): Name of the subdirectory containing the label files for the
respective images in image_dir_name
extension (str): Extension of the images in the dataset directory.
partition_mode (str): Mode to partitition the dataset. We only support sequence or
random split mode. In the sequence mode, it is mandatory to instantiate the
kitti sequence to frames file. Also, any arbitrary number of partitions maybe
used. However, for random split, the sequence map file is ignored and only 2
partitions can every be used. Here, the data is divided into two folds
1. validation fold
2. training fold
Validation fold (defaults to fold=0) contains val_split% of data, while train
fold contains (100-val_split)% of data.
val_split (int): Percentage split for validation. This is used with the random
partition mode only.
mapping_path (str): Path to a JSON file containing the class mapping.
If not specified, default to DEFAULT_TARGET_CLASS_MAPPING
"""
super(KITTIConverter, self).__init__(
data_root=data_root,
input_source=input_source,
num_partitions=num_partitions,
num_shards=num_shards,
output_dir=output_dir,
mapping_path=mapping_path)
# KITTI defaults.
self.images_dir = image_dir_name
self.labels_dir = label_dir_name
self.extension = extension
self.partition_mode = partition_mode
self.val_split = val_split / 100.
# check the previous image id and annotation id for offset
self.image_id_offset = 0
self.annotation_id_offset = 0
def _partition(self):
"""Partition KITTI dataset to self.output_partitions partitions based on sequences.
The following code is a modified version of the KITTISplitter class in Rumpy.
Returns:
partitions (list): A list of lists of frame ids, one list per partition.
"""
logger.debug("Generating partitions")
s_logger = status_logging.get_status_logger()
s_logger.write(message="Generating partitions")
partitions = [[] for _ in six.moves.range(self.output_partitions)]
if self.partition_mode is None and self.output_partitions == 1:
images_root = os.path.join(self.img_root_dir, self.images_dir)
images_list = [os.path.splitext(imfile)[0] for imfile in os.listdir(images_root)
if imfile.endswith(self.extension)]
partitions[0].extend(images_list)
elif self.partition_mode == 'random':
assert self.output_partitions == 2, "Invalid number of partitions ({}) "\
"for random split mode.".format(self.output_partitions)
assert 0 <= self.val_split < 1, (
"Validation split must satisfy the criteria, 0 <= val_split < 100. "
)
images_root = os.path.join(self.img_root_dir, self.images_dir)
images_list = [os.path.splitext(imfile)[0] for imfile in os.listdir(images_root)
if imfile.endswith(self.extension)]
total_num_images = len(images_list)
num_val_images = (int)(self.val_split * total_num_images)
logger.debug("Validation percentage: {}".format(self.val_split))
partitions[0].extend(images_list[:num_val_images])
partitions[1].extend(images_list[num_val_images:])
for part in partitions:
random.shuffle(part)
logger.info("Num images in\nTrain: {}\tVal: {}".format(len(partitions[1]),
len(partitions[0])))
s_logger.kpi = {
"num_images": total_num_images
}
s_logger.write(
message="Num images in\nTrain: {}\tVal: {}".format(
len(partitions[1]),
len(partitions[0])
)
)
if self.val_split == 0:
logger.info("Skipped validation data...")
s_logger.write(message="Skipped validation data.")
else:
validation_note = (
"Validation data in partition 0. Hence, while choosing the validation"
"set during training choose validation_fold 0."
)
logger.info(validation_note)
s_logger.write(message=validation_note)
else:
raise NotImplementedError("Unknown partition mode. Please stick to either "
"random or sequence")
return partitions
def _create_info_dict(self, json_output, frame_id, image_id, global_ann_id):
"""Generate the example proto for this frame.
Args:
frame_id (string): The frame id.
Returns:
example : An Example containing all labels for the frame.
"""
# Create proto for the training example. Populate with frame attributes.
json_output = self._process_info(json_output, frame_id, image_id, global_ann_id)
return json_output
def _get_image_size(self, frame_id):
"""Read image size from the image file, image sizes vary in KITTI."""
image_file = os.path.join(self.img_root_dir, self.images_dir, frame_id + self.extension)
width, height = Image.open(image_file).size
return width, height
def _process_info(self, json_output, frame_id, image_id, global_ann_id):
"""Add KITTI target features such as bbox to the Example protobuf.
Reads labels from KITTI txt files with following fields:
(From Kitti devkit's README)
1 type Describes the type of object: 'Car', 'Van',
'Truck', 'Pedestrian', 'Person_sitting', 'Cyclist',
'Tram', 'Misc' or 'DontCare'
1 truncated Float from 0 (non-truncated) to 1 (truncated),
where truncated refers to the object leaving image
boundaries
1 occluded Integer (0,1,2,3) indicating occlusion state:
0 = fully visible, 1 = partly occluded
2 = largely occluded, 3 = unknown
1 alpha Observation angle of object, ranging [-pi..pi]
4 bbox 2D bounding box of object in the image (0-based
index): contains left, top, right, bottom pixel
coordinates
3 dimensions 3D object dimensions: height, width, length (in
meters)
3 location 3D object location x,y,z in camera coordinates (in
meters)
1 rotation_y Rotation ry around Y-axis in camera coordinates
[-pi..pi]
Args:
example (tf.train.Example): The Example protobuf for this frame.
frame_id (string): Frame id.
"""
# reads the labels as a list of tuples
label_file = os.path.join(self.img_root_dir, self.labels_dir, '{}.txt'.format(frame_id))
labels = np.genfromtxt(label_file, dtype=None).tolist()
if isinstance(labels, tuple):
labels = [labels]
ann_counter = 0
for label in labels:
assert len(label) == 15, 'Ground truth kitti labels should have only 15 fields.'
x1 = label[4]
y1 = label[5]
x2 = label[6]
y2 = label[7]
# Map object classes as they are in the dataset to target classes of the model
# self.class_map[label[0]] = label[0].lower()
object_class = label[0].lower().decode('utf-8')
mapped_class = self.target_class_mapping.get(object_class, 'unknown')
self.class_map[label[0]] = mapped_class
if mapped_class == 'unknown':
continue
category_id = self.class_lookup_table[mapped_class]
# Check to make sure the coordinates are 'ltrb' format.
error_string = "Top left coordinate must be less than bottom right."\
"Error in object {} of label_file {}. \nCoordinates: "\
"x1 = {}, x2 = {}, y1: {}, y2: {}".format(labels.index(label),
label_file,
x1, x2, y1, y2)
if not (x1 < x2 and y1 < y2):
logger.debug(error_string)
logger.debug("Skipping this object")
continue
# coco bbox format (x1, y1, w, h)
bbox = [x1, y1, (x2 - x1), (y2 - y1)]
area = bbox[2] * bbox[3]
annotation_id = global_ann_id + ann_counter
annotation_info = create_info_annotations(image_id, category_id, annotation_id, bbox, area)
ann_counter = ann_counter + 1
json_output["annotations"].append(annotation_info)
return json_output, ann_counter
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/deformable_detr/utils/converter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DD utils module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/deformable_detr/utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
COCO evaluator that works in distributed mode.
Mostly copy-paste from https://github.com/pytorch/vision/blob/edfd5a7/references/detection/coco_eval.py
The difference is that there is less copy-pasting from pycocotools
in the end of the file, as python3 can suppress prints with contextlib.
"""
import os
import contextlib
import copy
import numpy as np
import torch
import json
from pycocotools.cocoeval import COCOeval
from pycocotools.coco import COCO
from collections import defaultdict
from nvidia_tao_pytorch.cv.deformable_detr.utils.misc import all_gather
class CocoEvaluator(object):
""" CocoEvaluator class """
def __init__(self, coco_gt, iou_types, eval_class_ids):
""" CocoEval init """
if not isinstance(iou_types, (list, tuple)):
raise TypeError(f"Invalid data type for iou_types encountered: {type(iou_types)}")
coco_gt = copy.deepcopy(coco_gt)
self.coco_gt = coco_gt
self.eval_class_ids = eval_class_ids
self.iou_types = iou_types
self.coco_eval = {}
for iou_type in iou_types:
self.coco_eval[iou_type] = COCOeval(coco_gt, iouType=iou_type)
self.img_ids = []
self.eval_imgs = {k: [] for k in iou_types}
def update(self, predictions):
""" update predictions """
img_ids = list(np.unique(list(predictions.keys())))
self.img_ids.extend(img_ids)
for iou_type in self.iou_types:
results = self.prepare(predictions, iou_type)
# suppress pycocotools prints
with open(os.devnull, 'w') as devnull:
with contextlib.redirect_stdout(devnull):
coco_dt = COCO.loadRes(self.coco_gt, results) if results else COCO()
coco_eval = self.coco_eval[iou_type]
if self.eval_class_ids is not None:
coco_eval.params.catIds = self.eval_class_ids
coco_eval.cocoDt = coco_dt
coco_eval.params.imgIds = list(img_ids)
img_ids, eval_imgs = evaluate(coco_eval)
self.eval_imgs[iou_type].append(eval_imgs)
def synchronize_between_processes(self):
""" synchronize_between_processes """
for iou_type in self.iou_types:
self.eval_imgs[iou_type] = np.concatenate(self.eval_imgs[iou_type], 2)
create_common_coco_eval(self.coco_eval[iou_type], self.img_ids, self.eval_imgs[iou_type])
def overall_accumulate(self):
""" overall_accumulate """
for coco_eval in self.coco_eval.values():
accumulate(coco_eval)
def overall_summarize(self, is_print=False):
""" overall_summarize """
for _, coco_eval in self.coco_eval.items():
summarize(coco_eval, is_print=is_print)
def prepare(self, predictions, iou_type):
""" prepare """
if iou_type == "bbox":
prepared_data = self.prepare_for_coco_detection(predictions)
else:
raise NotImplementedError("iou type {} is not implemented".format(iou_type))
return prepared_data
def prepare_for_coco_detection(self, predictions):
""" prepare_for_coco_detection """
coco_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
boxes = prediction["boxes"]
boxes = convert_to_xywh(boxes).tolist()
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
coco_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
"bbox": box,
"score": scores[k],
}
for k, box in enumerate(boxes)
]
)
return coco_results
def convert_to_xywh(boxes):
""" convert_to_xywh """
xmin, ymin, xmax, ymax = boxes.unbind(1)
return torch.stack((xmin, ymin, xmax - xmin, ymax - ymin), dim=1)
def merge(img_ids, eval_imgs):
""" merge """
all_img_ids = all_gather(img_ids)
all_eval_imgs = all_gather(eval_imgs)
merged_img_ids = []
for p in all_img_ids:
merged_img_ids.extend(p)
merged_eval_imgs = []
for p in all_eval_imgs:
merged_eval_imgs.append(p)
merged_img_ids = np.array(merged_img_ids)
merged_eval_imgs = np.concatenate(merged_eval_imgs, 2)
# keep only unique (and in sorted order) images
merged_img_ids, idx = np.unique(merged_img_ids, return_index=True)
merged_eval_imgs = merged_eval_imgs[..., idx]
return merged_img_ids, merged_eval_imgs
def create_common_coco_eval(coco_eval, img_ids, eval_imgs):
""" create_common_coco_eval """
img_ids, eval_imgs = merge(img_ids, eval_imgs)
img_ids = list(img_ids)
eval_imgs = list(eval_imgs.flatten())
coco_eval.evalImgs = eval_imgs
coco_eval.params.imgIds = img_ids
coco_eval._paramsEval = copy.deepcopy(coco_eval.params)
def createIndex(self):
""" create Index for COCO Class """
anns, cats, imgs = {}, {}, {}
imgToAnns, catToImgs = defaultdict(list), defaultdict(list)
if 'annotations' in self.dataset:
for ann in self.dataset['annotations']:
imgToAnns[ann['image_id']].append(ann)
anns[ann['id']] = ann
if 'images' in self.dataset:
for img in self.dataset['images']:
imgs[img['id']] = img
if 'categories' in self.dataset:
for cat in self.dataset['categories']:
cats[cat['id']] = cat
if 'annotations' in self.dataset and 'categories' in self.dataset:
for ann in self.dataset['annotations']:
catToImgs[ann['category_id']].append(ann['image_id'])
# create class members
self.anns = anns
self.imgToAnns = imgToAnns
self.catToImgs = catToImgs
self.imgs = imgs
self.cats = cats
def loadRes(self, resFile):
"""
Load result file and return a result api object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = COCO()
res.dataset['images'] = [img for img in self.dataset['images']]
if isinstance(resFile, torch._six.string_classes):
anns = json.load(open(resFile))
elif type(resFile) == np.ndarray:
anns = self.loadNumpyAnnotations(resFile)
else:
anns = resFile
assert type(anns) == list, 'results in not an array of objects'
annsImgIds = [ann['image_id'] for ann in anns]
assert set(annsImgIds) == (
set(annsImgIds) & set(self.getImgIds())
), 'Results do not correspond to current coco set'
if 'bbox' in anns[0] and not anns[0]['bbox'] == []:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for img_id, ann in enumerate(anns):
bb = ann['bbox']
x1, x2, y1, y2 = [bb[0], bb[0] + bb[2], bb[1], bb[1] + bb[3]]
if 'segmentation' not in ann:
ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
ann['area'] = bb[2] * bb[3]
ann['id'] = img_id + 1
ann['iscrowd'] = 0
res.dataset['annotations'] = anns
createIndex(res)
return res
def evaluate(self):
"""
Run per image evaluation on given images and store results (a list of dict) in self.evalImgs
return: None
"""
p = self.params
# add backward compatibility if useSegm is specified in params
if p.useSegm is not None:
p.iouType = 'segm' if p.useSegm == 1 else 'bbox'
print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType))
# print('Evaluate annotation type *{}*'.format(p.iouType))
p.imgIds = list(np.unique(p.imgIds))
if p.useCats:
p.catIds = list(np.unique(p.catIds))
p.maxDets = sorted(p.maxDets)
self.params = p
self._prepare()
# loop through images, area range, max detection number
catIds = p.catIds if p.useCats else [-1]
if p.iouType == 'segm' or p.iouType == 'bbox':
computeIoU = self.computeIoU
elif p.iouType == 'keypoints':
computeIoU = self.computeOks
self.ious = {
(imgId, catId): computeIoU(imgId, catId)
for imgId in p.imgIds
for catId in catIds}
evaluateImg = self.evaluateImg
maxDet = p.maxDets[-1]
evalImgs = [
evaluateImg(imgId, catId, areaRng, maxDet)
for catId in catIds
for areaRng in p.areaRng
for imgId in p.imgIds
]
# this is NOT in the pycocotools code, but could be done outside
evalImgs = np.asarray(evalImgs).reshape(len(catIds), len(p.areaRng), len(p.imgIds))
self._paramsEval = copy.deepcopy(self.params)
return p.imgIds, evalImgs
def summarize(self, is_print=False):
"""
Compute summary metrics for evaluation results.
Note this functin can *only* be applied on the default parameter setting
"""
self.is_print = is_print
def _summarize(ap=1, iouThr=None, areaRng='all', maxDets=100):
""" _summarize, remove prints"""
p = self.params
if self.is_print:
iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}'
titleStr = 'Average Precision' if ap == 1 else 'Average Recall'
typeStr = '(AP)' if ap == 1 else '(AR)'
iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1]) \
if iouThr is None else '{:0.2f}'.format(iouThr)
aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]
mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]
if ap == 1:
# dimension of precision: [TxRxKxAxM]
s = self.eval['precision']
# IoU
if iouThr is not None:
t = np.where(iouThr == p.iouThrs)[0]
s = s[t]
s = s[:, :, :, aind, mind]
else:
# dimension of recall: [TxKxAxM]
s = self.eval['recall']
if iouThr is not None:
t = np.where(iouThr == p.iouThrs)[0]
s = s[t]
s = s[:, :, aind, mind]
if len(s[s > -1]) == 0:
mean_s = -1
else:
mean_s = np.mean(s[s > -1])
if self.is_print:
print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s))
return mean_s
def _summarizeDets():
""" _summarizeDets """
stats = np.zeros((12,))
stats[0] = _summarize(1)
stats[1] = _summarize(1, iouThr=.5, maxDets=self.params.maxDets[2])
stats[2] = _summarize(1, iouThr=.75, maxDets=self.params.maxDets[2])
stats[3] = _summarize(1, areaRng='small', maxDets=self.params.maxDets[2])
stats[4] = _summarize(1, areaRng='medium', maxDets=self.params.maxDets[2])
stats[5] = _summarize(1, areaRng='large', maxDets=self.params.maxDets[2])
stats[6] = _summarize(0, maxDets=self.params.maxDets[0])
stats[7] = _summarize(0, maxDets=self.params.maxDets[1])
stats[8] = _summarize(0, maxDets=self.params.maxDets[2])
stats[9] = _summarize(0, areaRng='small', maxDets=self.params.maxDets[2])
stats[10] = _summarize(0, areaRng='medium', maxDets=self.params.maxDets[2])
stats[11] = _summarize(0, areaRng='large', maxDets=self.params.maxDets[2])
return stats
def _summarizeKps():
""" _summarizeKps """
stats = np.zeros((10,))
stats[0] = _summarize(1, maxDets=20)
stats[1] = _summarize(1, maxDets=20, iouThr=.5)
stats[2] = _summarize(1, maxDets=20, iouThr=.75)
stats[3] = _summarize(1, maxDets=20, areaRng='medium')
stats[4] = _summarize(1, maxDets=20, areaRng='large')
stats[5] = _summarize(0, maxDets=20)
stats[6] = _summarize(0, maxDets=20, iouThr=.5)
stats[7] = _summarize(0, maxDets=20, iouThr=.75)
stats[8] = _summarize(0, maxDets=20, areaRng='medium')
stats[9] = _summarize(0, maxDets=20, areaRng='large')
return stats
if not self.eval:
raise Exception('Please run accumulate() first')
iouType = self.params.iouType
if iouType in ('segm', 'bbox'):
summarize = _summarizeDets
elif iouType == 'keypoints':
summarize = _summarizeKps
self.stats = summarize()
def accumulate(self, p=None):
"""
Accumulate per image evaluation results and store the result in self.eval
param p: input params for evaluation
return: None
"""
if not self.evalImgs:
print('Please run evaluate() first')
# allows input customized parameters
if p is None:
p = self.params
p.catIds = p.catIds if p.useCats == 1 else [-1]
T = len(p.iouThrs)
R = len(p.recThrs)
K = len(p.catIds) if p.useCats else 1
A = len(p.areaRng)
M = len(p.maxDets)
precision = -np.ones((T, R, K, A, M)) # -1 for the precision of absent categories
recall = -np.ones((T, K, A, M))
scores = -np.ones((T, R, K, A, M))
# create dictionary for future indexing
_pe = self._paramsEval
catIds = _pe.catIds if _pe.useCats else [-1]
setK = set(catIds)
setA = set(map(tuple, _pe.areaRng))
setM = set(_pe.maxDets)
setI = set(_pe.imgIds)
# get inds to evaluate
k_list = [n for n, k in enumerate(p.catIds) if k in setK]
m_list = [m for n, m in enumerate(p.maxDets) if m in setM]
a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA]
i_list = [n for n, i in enumerate(p.imgIds) if i in setI]
I0 = len(_pe.imgIds)
A0 = len(_pe.areaRng)
# retrieve E at each category, area range, and max number of detections
for k, k0 in enumerate(k_list):
Nk = k0 * A0 * I0
for a, a0 in enumerate(a_list):
Na = a0 * I0
for m, maxDet in enumerate(m_list):
E = [self.evalImgs[Nk + Na + i] for i in i_list]
E = [e for e in E if e is not None]
if len(E) == 0:
continue
dtScores = np.concatenate([e['dtScores'][0: maxDet] for e in E])
# different sorting method generates slightly different results.
# mergesort is used to be consistent as Matlab implementation.
inds = np.argsort(-dtScores, kind='mergesort')
dtScoresSorted = dtScores[inds]
dtm = np.concatenate([e['dtMatches'][:, 0:maxDet] for e in E], axis=1)[:, inds]
dtIg = np.concatenate([e['dtIgnore'][:, 0:maxDet] for e in E], axis=1)[:, inds]
gtIg = np.concatenate([e['gtIgnore'] for e in E])
npig = np.count_nonzero(gtIg == 0)
if npig == 0:
continue
tps = np.logical_and(dtm, np.logical_not(dtIg))
fps = np.logical_and(np.logical_not(dtm), np.logical_not(dtIg))
tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float32)
fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float32)
for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):
tp = np.array(tp)
fp = np.array(fp)
nd = len(tp)
rc = tp / npig
pr = tp / (fp + tp + np.spacing(1))
q = np.zeros((R,))
ss = np.zeros((R,))
if nd:
recall[t, k, a, m] = rc[-1]
else:
recall[t, k, a, m] = 0
# numpy is slow without cython optimization for accessing elements
# use python array gets significant speed improvement
pr = pr.tolist()
q = q.tolist()
for i in range(nd - 1, 0, -1):
if pr[i] > pr[i - 1]:
pr[i - 1] = pr[i]
inds = np.searchsorted(rc, p.recThrs, side='left')
try:
for ri, pi in enumerate(inds):
q[ri] = pr[pi]
ss[ri] = dtScoresSorted[pi]
except Exception:
pass
precision[t, :, k, a, m] = np.array(q)
scores[t, :, k, a, m] = np.array(ss)
self.eval = {
'params': p,
'counts': [T, R, K, A, M],
# 'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'precision': precision,
'recall': recall,
'scores': scores,
}
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/deformable_detr/utils/coco_eval.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for converting datasets to json"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import math
def _shard(partitions, num_shards):
"""Shard each partition."""
num_shards = max(num_shards, 1) # 0 means 1 shard.
shards = []
for partition in partitions:
result = []
if len(partition) == 0:
continue
shard_size = math.ceil(len(partition) / num_shards)
for i in range(num_shards):
begin = i * shard_size
end = (i + 1) * shard_size
if end > len(partition):
pad_counter = end - len(partition)
pad_samples = random.sample(partition, pad_counter)
out_partition = partition[begin: len(partition)] + pad_samples
else:
out_partition = partition[begin:end]
result.append(out_partition)
shards.append(result)
return shards
def _shuffle(partitions):
"""Shuffle each partition independently."""
for partition in partitions:
random.shuffle(partition)
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/deformable_detr/utils/converter_lib.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert ODDataset to sharded json format."""
import os
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
from nvidia_tao_pytorch.cv.deformable_detr.config.default_config import DDDatasetConvertConfig
from nvidia_tao_pytorch.cv.deformable_detr.utils.misc import check_and_create
from nvidia_tao_pytorch.cv.deformable_detr.utils.converter import KITTIConverter
def build_converter(experiment_config, input_source):
"""Build a DatasetConverter object.
Build and return an object of desired subclass of DatasetConverter based on
given dataset convert configuration.
Args:
experiment_config (DDDatasetConvertConfig): Dataset convert configuration object
input_source (string).
Return:
converter (DatasetConverter): An object of desired subclass of DatasetConverter.
"""
constructor_kwargs = {'data_root': experiment_config["data_root"],
'input_source': input_source,
'partition_mode': experiment_config["partition_mode"],
'num_partitions': experiment_config["num_partitions"],
'num_shards': experiment_config["num_shards"],
'output_dir': experiment_config["results_dir"],
'mapping_path': experiment_config["mapping_path"]}
constructor_kwargs['image_dir_name'] = experiment_config["image_dir_name"]
constructor_kwargs['label_dir_name'] = experiment_config["label_dir_name"]
# Those two directories are by default empty string in proto
# Here we do some check to make them default to None(in constructor)
# Otherwise it will raise error if we pass the empty strings
# directly to constructors.
constructor_kwargs['extension'] = experiment_config["image_extension"] or '.png'
constructor_kwargs['val_split'] = experiment_config["val_split"]
converter = KITTIConverter(**constructor_kwargs)
return converter
def run_experiment(experiment_config,
results_dir):
"""Start the Data Converter."""
input_sources = experiment_config["input_source"]
check_and_create(results_dir)
# Set status logging
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
append=True
)
)
status_logging.get_status_logger().write(
status_level=status_logging.Status.STARTED,
message="Starting DDETR/DINO dataset convert"
)
with open(input_sources, 'r') as f:
seq_txt = f.readlines()
for input_source in seq_txt:
print(input_source)
input_source = input_source.rstrip('\n')
converter = build_converter(experiment_config, input_source)
converter.convert()
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load experiment specification, additially using schema for validation/retrieving the default values.
# --config_path and --config_name will be provided by the entrypoint script.
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"), config_name="convert", schema=DDDatasetConvertConfig
)
def main(cfg: DDDatasetConvertConfig) -> None:
"""Run the convert dataset process."""
try:
run_experiment(experiment_config=cfg,
results_dir=cfg.results_dir)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Dataset convert finished successfully"
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Dataset convert was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/deformable_detr/scripts/convert.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Deformable DETR scripts module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/deformable_detr/scripts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Export deformable detr model to ONNX."""
import os
import torch
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.core.utilities import update_results_dir
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
from nvidia_tao_pytorch.cv.action_recognition.utils.common_utils import check_and_create, encrypt_onnx
from nvidia_tao_pytorch.cv.deformable_detr.config.default_config import ExperimentConfig
from nvidia_tao_pytorch.cv.deformable_detr.model.pl_dd_model import DeformableDETRModel
from nvidia_tao_pytorch.cv.deformable_detr.utils.onnx_export import ONNXExporter
from nvidia_tao_pytorch.core.cookbooks.tlt_pytorch_cookbook import TLTPyTorchCookbook
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load experiment specification, additially using schema for validation/retrieving the default values.
# --config_path and --config_name will be provided by the entrypoint script.
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"), config_name="export", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""CLI wrapper to run export.
This function parses the command line interface for tlt-export, instantiates the respective
exporter and serializes the trained model to an etlt file. The tools also runs optimization
to the int8 backend.
Args:
cl_args(list): Arguments to parse.
Returns:
No explicit returns.
"""
try:
torch.backends.cudnn.allow_tf32 = False
torch.backends.cuda.matmul.allow_tf32 = False
cfg = update_results_dir(cfg, task="export")
run_export(cfg, cfg.results_dir)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Export finished successfully"
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Export was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
def run_export(experiment_config, results_dir):
"""Wrapper to run export of tlt models.
Args:
args (dict): Dictionary of parsed arguments to run export.
Returns:
No explicit returns.
"""
check_and_create(results_dir)
# Set status logging
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
append=True
)
)
status_logging.get_status_logger().write(
status_level=status_logging.Status.STARTED,
message="Starting DDETR export"
)
gpu_id = experiment_config.export.gpu_id
torch.cuda.set_device(gpu_id)
# Parsing command line arguments.
model_path = experiment_config.export.checkpoint
key = experiment_config.encryption_key
# set the encryption key:
TLTPyTorchCookbook.set_passphrase(key)
output_file = experiment_config.export.onnx_file
input_channel = experiment_config.export.input_channel
input_width = experiment_config.export.input_width
input_height = experiment_config.export.input_height
opset_version = experiment_config.export.opset_version
batch_size = experiment_config.export.batch_size
on_cpu = experiment_config.export.on_cpu
if batch_size is None or batch_size == -1:
input_batch_size = 1
else:
input_batch_size = batch_size
# Set default output filename if the filename
# isn't provided over the command line.
if output_file is None:
split_name = os.path.splitext(model_path)[0]
output_file = "{}.onnx".format(split_name)
# Warn the user if an exported file already exists.
assert not os.path.exists(output_file), "Default onnx file {} already "\
"exists".format(output_file)
# Make an output directory if necessary.
output_root = os.path.dirname(os.path.realpath(output_file))
if not os.path.exists(output_root):
os.makedirs(output_root)
# load model
pl_model = DeformableDETRModel.load_from_checkpoint(model_path,
map_location='cpu' if on_cpu else 'cuda',
experiment_spec=experiment_config,
export=True)
model = pl_model.model
model.eval()
if not on_cpu:
model.cuda()
input_names = ['inputs']
output_names = ["pred_logits", "pred_boxes"]
# create dummy input
if on_cpu:
dummy_input = torch.ones(input_batch_size, input_channel, input_height, input_width, device='cpu')
else:
dummy_input = torch.ones(input_batch_size, input_channel, input_height, input_width, device='cuda')
if output_file.endswith('.etlt'):
tmp_onnx_file = output_file.replace('.etlt', '.onnx')
else:
tmp_onnx_file = output_file
onnx_export = ONNXExporter()
onnx_export.export_model(model, batch_size,
tmp_onnx_file,
dummy_input,
input_names=input_names,
opset_version=opset_version,
output_names=output_names,
do_constant_folding=False,
verbose=experiment_config.export.verbose)
onnx_export.check_onnx(tmp_onnx_file)
onnx_export.onnx_change(tmp_onnx_file)
if output_file.endswith('.etlt') and key:
# encrypt the onnx if and only if key is provided and output file name ends with .etlt
encrypt_onnx(tmp_file_name=tmp_onnx_file,
output_file_name=output_file,
key=key)
os.remove(tmp_onnx_file)
print(f"ONNX file stored at {output_file}")
if __name__ == "__main__":
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/deformable_detr/scripts/export.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train deformable detr model."""
import os
import re
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.strategies import DDPStrategy
from nvidia_tao_pytorch.core.callbacks.loggers import TAOStatusLogger
from nvidia_tao_pytorch.core.connectors.checkpoint_connector import TLTCheckpointConnector
from nvidia_tao_pytorch.core.cookbooks.tlt_pytorch_cookbook import TLTPyTorchCookbook
from nvidia_tao_pytorch.core.utilities import update_results_dir
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
from nvidia_tao_pytorch.cv.deformable_detr.config.default_config import ExperimentConfig
from nvidia_tao_pytorch.cv.deformable_detr.dataloader.od_data_module import ODDataModule
from nvidia_tao_pytorch.cv.deformable_detr.model.pl_dd_model import DeformableDETRModel
from nvidia_tao_pytorch.cv.deformable_detr.utils.misc import check_and_create
def run_experiment(experiment_config,
results_dir,
key):
"""Start the training."""
# set the encryption key:
TLTPyTorchCookbook.set_passphrase(key)
dm = ODDataModule(experiment_config.dataset)
# find_unuser_parameters=False and activation_checkpoint combination
# requires every output in forward function to participate in
# loss calculation. When return_interm_indices < 4, we must disable
# activation checkpointing
if experiment_config.train.activation_checkpoint and \
len(experiment_config.model.return_interm_indices) < 4 and \
experiment_config.train.num_gpus > 1:
experiment_config.train.activation_checkpoint = False
print("Disabling activation checkpointing since model is smaller")
activation_checkpoint = experiment_config.train.activation_checkpoint
# Load pretrained model as starting point if pretrained path is provided,
pretrained_path = experiment_config.train.pretrained_model_path
if pretrained_path is not None:
pt_model = DeformableDETRModel.load_from_checkpoint(pretrained_path,
map_location="cpu",
experiment_spec=experiment_config)
else:
pt_model = DeformableDETRModel(experiment_config)
total_epochs = experiment_config.train.num_epochs
check_and_create(results_dir)
status_logger_callback = TAOStatusLogger(
results_dir,
append=True,
num_epochs=total_epochs
)
status_logging.set_status_logger(status_logger_callback.logger)
num_gpus = experiment_config.train.num_gpus
num_nodes = experiment_config.train.num_nodes
validation_interval = experiment_config.train.validation_interval
ckpt_inter = experiment_config.train.checkpoint_interval
assert ckpt_inter <= total_epochs, (
f"Checkpoint interval {ckpt_inter} > Number of epochs {total_epochs}."
f"Please set experiment_config.train.checkpoint_interval < {total_epochs}"
)
assert validation_interval <= total_epochs, (
f"Validation interval {validation_interval} > Number of epochs {total_epochs}."
f"Please set experiment_config.train.validation_interval < {total_epochs}"
)
clip_grad_norm = experiment_config.train.clip_grad_norm
is_dry_run = experiment_config.train.is_dry_run
distributed_strategy = experiment_config.train.distributed_strategy
if experiment_config.train.precision.lower() in ["fp16", "fp32"]:
precision = int(experiment_config.train.precision.replace("fp", ""))
else:
raise NotImplementedError(f"{experiment_config.train.precision} is not supported. Only fp32 and fp16 are supported")
strategy = None
if num_gpus > 1:
# By default find_unused_parameters is set to True in Lightning for backward compatibility
# This introduces extra overhead and can't work with activation checkpointing
# Ref: https://pytorch-lightning.readthedocs.io/en/1.8.5/advanced/model_parallel.html#when-using-ddp-strategies-set-find-unused-parameters-false
# TODO: Starting from PTL 2.0, find_usued_parameters is set to False by default
if distributed_strategy.lower() == "ddp" and activation_checkpoint:
strategy = DDPStrategy(find_unused_parameters=False)
elif distributed_strategy.lower() == "ddp" and not activation_checkpoint:
strategy = 'ddp'
elif distributed_strategy.lower() == "ddp_sharded":
strategy = 'ddp_sharded'
# Override to FP16 for ddp_sharded as there's an error with FP32 during Positional Embedding forward pass
print("Overriding Precision to FP16 for ddp_sharded")
precision = 16
else:
raise NotImplementedError(f"{distributed_strategy} is not implemented. Only ddp and ddp_sharded are supported")
trainer = Trainer(devices=num_gpus,
num_nodes=num_nodes,
max_epochs=total_epochs,
check_val_every_n_epoch=validation_interval,
default_root_dir=results_dir,
accelerator='gpu',
strategy=strategy,
precision=precision,
gradient_clip_val=clip_grad_norm,
replace_sampler_ddp=False,
fast_dev_run=is_dry_run)
# Overload connector to enable intermediate ckpt encryption & decryption.
resume_ckpt = experiment_config.train.resume_training_checkpoint_path
if resume_ckpt and resume_ckpt.endswith('.tlt'):
if resume_ckpt is not None:
trainer._checkpoint_connector = TLTCheckpointConnector(trainer, resume_from_checkpoint=resume_ckpt)
else:
trainer._checkpoint_connector = TLTCheckpointConnector(trainer)
resume_ckpt = None
# setup checkpointer:
ModelCheckpoint.FILE_EXTENSION = ".pth"
checkpoint_callback = ModelCheckpoint(every_n_epochs=ckpt_inter,
dirpath=results_dir,
save_on_train_epoch_end=True,
monitor=None,
save_top_k=-1,
filename='dd_model_{epoch:03d}')
if resume_ckpt:
status_logging.get_status_logger().write(
message=f"Resuming training from checkpoint: {resume_ckpt}",
status_level=status_logging.Status.STARTED
)
resumed_epoch = re.search('epoch=(\\d+)', resume_ckpt)
if resumed_epoch:
resumed_epoch = int(resumed_epoch.group(1))
else:
resumed_epoch = 0
status_logger_callback.epoch_counter = resumed_epoch + 1 # make sure callback epoch matches resumed epoch
trainer.callbacks.append(status_logger_callback)
trainer.callbacks.append(checkpoint_callback)
trainer.fit(pt_model, dm, ckpt_path=resume_ckpt or None)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load experiment specification, additially using schema for validation/retrieving the default values.
# --config_path and --config_name will be provided by the entrypoint script.
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"), config_name="train", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""Run the training process."""
try:
cfg = update_results_dir(cfg, task="train")
run_experiment(experiment_config=cfg,
key=cfg.encryption_key,
results_dir=cfg.results_dir)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Training finished successfully"
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Training was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/deformable_detr/scripts/train.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Inference on single patch. """
import os
from pytorch_lightning import Trainer
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.core.utilities import update_results_dir
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
from nvidia_tao_pytorch.cv.deformable_detr.config.default_config import ExperimentConfig
from nvidia_tao_pytorch.cv.deformable_detr.dataloader.od_data_module import ODDataModule
from nvidia_tao_pytorch.cv.deformable_detr.model.pl_dd_model import DeformableDETRModel
from nvidia_tao_pytorch.cv.deformable_detr.utils.misc import check_and_create
from nvidia_tao_pytorch.core.cookbooks.tlt_pytorch_cookbook import TLTPyTorchCookbook
def run_experiment(experiment_config, model_path, key, results_dir=None):
"""Start the inference."""
if not model_path:
raise FileNotFoundError("inference.checkpoint is not set!")
# set the encryption key:
TLTPyTorchCookbook.set_passphrase(key)
check_and_create(results_dir)
# Set status logging
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
append=True
)
)
status_logging.get_status_logger().write(
status_level=status_logging.Status.STARTED,
message="Starting DDETR inference"
)
# tlt inference
if model_path.endswith('.tlt') or model_path.endswith('.pth'):
num_gpus = experiment_config.inference.num_gpus
# build data module
dm = ODDataModule(experiment_config.dataset)
dm.setup(stage="predict")
# Run inference using tlt model
acc_flag = None
if num_gpus > 1:
acc_flag = "ddp"
model = DeformableDETRModel.load_from_checkpoint(model_path,
map_location="cpu",
experiment_spec=experiment_config)
trainer = Trainer(devices=num_gpus,
default_root_dir=results_dir,
accelerator='gpu',
strategy=acc_flag)
trainer.predict(model, datamodule=dm)
elif model_path.endswith('.engine'):
raise NotImplementedError("TensorRT inference is supported through tao-deploy. "
"Please use tao-deploy to generate TensorRT enigne and run inference.")
else:
raise NotImplementedError("Model path format is only supported for .tlt or .pth")
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load experiment specification, additially using schema for validation/retrieving the default values.
# --config_path and --config_name will be provided by the entrypoint script.
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"), config_name="infer", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""Run the inference process."""
try:
cfg = update_results_dir(cfg, task="inference")
run_experiment(experiment_config=cfg,
key=cfg.encryption_key,
model_path=cfg.inference.checkpoint,
results_dir=cfg.results_dir)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Inference finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Inference was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/deformable_detr/scripts/inference.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluate a trained deformable detr model."""
import os
from pytorch_lightning import Trainer
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.core.utilities import update_results_dir
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
from nvidia_tao_pytorch.cv.deformable_detr.config.default_config import ExperimentConfig
from nvidia_tao_pytorch.cv.deformable_detr.dataloader.od_data_module import ODDataModule
from nvidia_tao_pytorch.cv.deformable_detr.model.pl_dd_model import DeformableDETRModel
from nvidia_tao_pytorch.cv.deformable_detr.utils.misc import check_and_create
from nvidia_tao_pytorch.core.cookbooks.tlt_pytorch_cookbook import TLTPyTorchCookbook
def run_experiment(experiment_config, model_path, key, results_dir=None):
"""Run experiment."""
if not model_path:
raise FileNotFoundError("evaluate.checkpoint is not set!")
# set the encryption key:
TLTPyTorchCookbook.set_passphrase(key)
check_and_create(results_dir)
# Set status logging
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
append=True
)
)
status_logging.get_status_logger().write(
status_level=status_logging.Status.STARTED,
message="Starting DDETR evaluation"
)
# tlt inference
if model_path.endswith('.tlt') or model_path.endswith('.pth'):
# build dataloader
dm = ODDataModule(experiment_config.dataset)
dm.setup(stage="test")
# build model and load from the given checkpoint
model = DeformableDETRModel.load_from_checkpoint(model_path,
map_location="cpu",
experiment_spec=experiment_config)
num_gpus = experiment_config.evaluate.num_gpus
acc_flag = None
if num_gpus > 1:
acc_flag = "ddp"
trainer = Trainer(devices=num_gpus,
default_root_dir=results_dir,
accelerator='gpu',
strategy=acc_flag)
trainer.test(model, datamodule=dm)
elif model_path.endswith('.engine'):
raise NotImplementedError("TensorRT evaluation is supported through tao-deploy. "
"Please use tao-deploy to generate TensorRT enigne and run evaluation.")
else:
raise NotImplementedError("Model path format is only supported for .tlt or .pth")
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load experiment specification, additially using schema for validation/retrieving the default values.
# --config_path and --config_name will be provided by the entrypoint script.
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"), config_name="evaluate", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""Run the evaluate process."""
try:
cfg = update_results_dir(cfg, task="evaluate")
run_experiment(experiment_config=cfg,
key=cfg.encryption_key,
model_path=cfg.evaluate.checkpoint,
results_dir=cfg.results_dir)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Evaluation finished successfully"
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Evaluation was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/deformable_detr/scripts/evaluate.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entrypoint script for the deformable detr task."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/deformable_detr/entrypoint/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""'Entry point' script running subtasks related to deformable detr.
"""
import importlib
import os
import pkgutil
import argparse
import subprocess # nosec B404
import sys
from time import time
import nvidia_tao_pytorch.cv.deformable_detr.scripts as scripts
from nvidia_tao_pytorch.core.telemetry.nvml_utils import get_device_details
from nvidia_tao_pytorch.core.telemetry.telemetry import send_telemetry_data
def get_subtasks(package):
"""Get supported subtasks for a given task.
This function lists out the tasks in in the .scripts folder.
Returns:
subtasks (dict): Dictionary of files.
"""
module_path = package.__path__
modules = {}
# Collect modules dynamically.
for _, task, is_package in pkgutil.walk_packages(module_path):
if is_package:
continue
module_name = package.__name__ + '.' + task
module_details = {
"module_name": module_name,
"runner_path": os.path.abspath(importlib.import_module(module_name).__file__),
}
modules[task] = module_details
return modules
def launch(parser, subtasks, network=None):
"""CLI function that executes subtasks.
Args:
parser: Created parser object for a given task.
subtasks: list of subtasks for a given task.
network (str): Name of the network.
"""
# Subtasks for a given model.
parser.add_argument(
'subtask', default='train', choices=subtasks.keys(), help="Subtask for a given task/model.",
)
# Add standard TLT arguments.
parser.add_argument(
"-r",
"--results_dir",
help="Path to a folder where the experiment outputs should be written.",
default=None,
required=False,
)
parser.add_argument(
"--gpus",
help="Number of GPUs to run the train subtask.",
default=None,
type=int,
)
parser.add_argument(
"--num_nodes",
help="Number of nodes to run the train subtask.",
default=None,
type=int
)
parser.add_argument("-k", "--key", help="User specific encoding key to save or load a .tlt model.")
parser.add_argument("-e", "--experiment_spec_file", help="Path to the experiment spec file.", default=None)
# Parse the arguments.
args, unknown_args = parser.parse_known_args()
script_args = ""
# Process spec file for all commands except the one for getting spec files ;)
# Make sure the user provides spec file.
if args.experiment_spec_file is None:
print("ERROR: The subtask `{}` requires the following argument: -e/--experiment_spec_file".format(args.subtask))
exit(1)
# Make sure the file exists!
if not os.path.exists(args.experiment_spec_file):
print("ERROR: The indicated experiment spec file `{}` doesn't exist!".format(args.experiment_spec_file))
exit(1)
# Split spec file_path into config path and config name.
path, name = os.path.split(args.experiment_spec_file)
if path != '':
script_args += " --config-path " + os.path.realpath(path)
script_args += " --config-name " + name
# And add other params AFTERWARDS!
if args.results_dir:
script_args += " results_dir=" + args.results_dir
if args.subtask in ["train", "evaluate", "inference"]:
if args.gpus:
script_args += f" {args.subtask}.num_gpus={args.gpus}"
if args.subtask in ["train"]:
if args.num_nodes:
script_args += f" {args.subtask}.num_nodes={args.num_nodes}"
# Add encryption key.
if args.subtask in ["train", "evaluate", "inference", "export"]:
if args.key is not None:
script_args += " encryption_key=" + args.key
# Find relevant module and pass args.
script = subtasks[args.subtask]["runner_path"]
# Pass unknown args to call
unknown_args_as_str = " ".join(unknown_args)
# Create a system call.
call = "python " + script + script_args + " " + unknown_args_as_str
process_passed = True
start = time()
try:
# Run the script.
subprocess.check_call(call, shell=True, stdout=sys.stdout, stderr=sys.stdout) # nosec B602
except (KeyboardInterrupt, SystemExit):
print("Command was interrupted.")
process_passed = True
except subprocess.CalledProcessError as e:
if e.output is not None:
print(e.output)
process_passed = False
end = time()
time_lapsed = int(end - start)
try:
gpu_data = list()
for device in get_device_details():
gpu_data.append(device.get_config())
send_telemetry_data(
network,
args.subtask,
gpu_data,
num_gpus=args.gpus,
time_lapsed=time_lapsed,
pass_status=process_passed
)
except Exception as e:
print("Telemetry data couldn't be sent, but the command ran successfully.")
print(f"[WARNING]: {e}")
pass
if not process_passed:
print("Execution status: FAIL")
exit(1) # returning non zero return code from the process.
print("Execution status: PASS")
def main():
"""Main entrypoint wrapper."""
# Create parser for a given task.
parser = argparse.ArgumentParser(
"deformable_detr", add_help=True, description="Transfer Learning Toolkit"
)
# Build list of subtasks by inspecting the package.
subtasks = get_subtasks(scripts)
# Parse the arguments and launch the subtask.
launch(parser, subtasks, network="deformable_detr")
if __name__ == '__main__':
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/deformable_detr/entrypoint/deformable_detr.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Deformable DETR model. """
import torch
import torch.nn.functional as F
from torch import nn
import math
import copy
import warnings
from nvidia_tao_pytorch.cv.deformable_detr.utils.misc import (tensor_from_tensor_list, inverse_sigmoid)
def _get_clones(module, N):
"""Get clones of nn.Module.
Args:
module (nn.Module): torch module to clone.
N (int): number of times to clone.
Returns:
nn.ModuleList of the cloned module.
"""
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
class MLP(nn.Module):
"""Simple multi-layer perceptron (FFN)."""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
"""FFN Initialization.
Args:
input_dim (int): input dimension.
hidden_dim (int): hidden dimension.
output_dim (int): output dimension.
num_layers (int): number of layers.
"""
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
def forward(self, x):
"""Forward function."""
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
class DeformableDETR(nn.Module):
""" This is the Deformable DETR module that performs object detection """
def __init__(self, backbone, position_embedding, transformer, num_classes, num_queries, num_feature_levels,
aux_loss=True, with_box_refine=True, export=False):
""" Initializes the D-DETR model.
Args:
backbone (nn.Module): torch module of the backbone to be used. See backbone.py
transformer (nn.Module): torch module of the transformer architecture. See transformer.py
num_classes (int): number of object classes
num_queries (int): number of object queries, ie detection slot. This is the maximal number of objects
DETR can detect in a single image. For COCO, we recommend 100 queries.
aux_loss (bool): True if auxiliary decoding losses (loss at each decoder layer) are to be used.
with_box_refine (bool): iterative bounding box refinement.
"""
super().__init__()
self.num_queries = num_queries
self.transformer = transformer
hidden_dim = transformer.d_model
self.class_embed = nn.Linear(hidden_dim, num_classes)
self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)
self.num_feature_levels = num_feature_levels
self.query_embed = nn.Embedding(num_queries, hidden_dim * 2)
if num_feature_levels > 1:
num_backbone_outs = len(backbone.num_channels)
input_proj_list = []
for _ in range(num_backbone_outs):
in_channels = backbone.num_channels[_]
input_proj_list.append(nn.Sequential(
nn.Conv2d(in_channels, hidden_dim, kernel_size=1),
nn.GroupNorm(32, hidden_dim),
))
for _ in range(num_feature_levels - num_backbone_outs):
input_proj_list.append(nn.Sequential(
nn.Conv2d(in_channels, hidden_dim, kernel_size=3, stride=2, padding=1),
nn.GroupNorm(32, hidden_dim),
))
in_channels = hidden_dim
self.input_proj = nn.ModuleList(input_proj_list)
else:
self.input_proj = nn.ModuleList([
nn.Sequential(
nn.Conv2d(backbone.num_channels[0], hidden_dim, kernel_size=1),
nn.GroupNorm(32, hidden_dim),
)])
self.position_embedding = position_embedding
self.backbone = backbone
self.aux_loss = aux_loss
self.export = export
if self.export:
warnings.warn("Setting aux_loss to be False for export")
self.aux_loss = False
self.with_box_refine = with_box_refine
prior_prob = 0.01
bias_value = -math.log((1 - prior_prob) / prior_prob)
self.class_embed.bias.data = torch.ones(num_classes) * bias_value
nn.init.constant_(self.bbox_embed.layers[-1].weight.data, 0)
nn.init.constant_(self.bbox_embed.layers[-1].bias.data, 0)
for proj in self.input_proj:
nn.init.xavier_uniform_(proj[0].weight, gain=1)
nn.init.constant_(proj[0].bias, 0)
num_pred = transformer.decoder.num_layers
if with_box_refine:
self.class_embed = _get_clones(self.class_embed, num_pred)
self.bbox_embed = _get_clones(self.bbox_embed, num_pred)
nn.init.constant_(self.bbox_embed[0].layers[-1].bias.data[2:], -2.0)
# hack implementation for iterative bounding box refinement
self.transformer.decoder.bbox_embed = self.bbox_embed
else:
nn.init.constant_(self.bbox_embed.layers[-1].bias.data[2:], -2.0)
self.class_embed = nn.ModuleList([self.class_embed for _ in range(num_pred)])
self.bbox_embed = nn.ModuleList([self.bbox_embed for _ in range(num_pred)])
self.transformer.decoder.bbox_embed = None
def forward(self, samples):
""" Forward function of DD Model
Args:
samples (torch.Tensor): batched images, of shape [batch_size x 3 x H x W]
Returns:
pred_logits (torch.Tensor): the classification logits (including no-object) for all queries.
Shape = [batch_size x num_queries x (num_classes + 1)]
pred_boxes (torch.Tensor): the normalized boxes coordinates for all queries, represented as (center_x, center_y, height, width).
"""
if not isinstance(samples, torch.Tensor):
samples = tensor_from_tensor_list(samples)
features = self.backbone(samples)
srcs = []
masks = []
for level, feat in enumerate(features):
src = feat[0]
mask = (feat[1].float()[:, 0].bool())
srcs.append(self.input_proj[level](src))
masks.append(mask)
if self.num_feature_levels > len(srcs):
_len_srcs = len(srcs)
for li in range(_len_srcs, self.num_feature_levels):
if li == _len_srcs:
src = self.input_proj[li](features[-1][0])
else:
src = self.input_proj[li](srcs[-1])
srcs.append(src)
if self.export:
m = torch.zeros((src.shape[0], 1, src.shape[2], src.shape[3]), dtype=src.dtype, device=src.device)
else:
m = samples[:, 3:4]
mask = F.interpolate(m.float(), size=src.shape[-2:]).to(torch.bool)
masks.append(mask.float()[:, 0].bool())
# build positional embedding
pos = []
for mask in masks:
if self.export:
N, H, W = mask.shape
tensor_shape = torch.tensor([N, H, W], device=src.device)
pos.append(self.position_embedding(tensor_shape, src.device))
else:
not_mask = ~mask
pos.append(self.position_embedding(not_mask, src.device))
query_embeds = self.query_embed.weight
hs, init_reference, inter_references = self.transformer(srcs, masks, pos, query_embeds)
outputs_classes = []
outputs_coords = []
for lvl in range(hs.shape[0]):
if lvl == 0:
reference = init_reference
else:
reference = inter_references[lvl - 1]
reference = inverse_sigmoid(reference)
outputs_class = self.class_embed[lvl](hs[lvl])
tmp = self.bbox_embed[lvl](hs[lvl])
if reference.shape[-1] == 4:
tmp += reference
else:
assert reference.shape[-1] == 2
tmp[..., :2] += reference
outputs_coord = tmp.sigmoid()
outputs_classes.append(outputs_class)
outputs_coords.append(outputs_coord)
outputs_class = torch.stack(outputs_classes)
outputs_coord = torch.stack(outputs_coords)
out = {'pred_logits': outputs_class[-1], 'pred_boxes': outputs_coord[-1]}
if self.aux_loss:
out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord)
return out
@torch.jit.unused
def _set_aux_loss(self, outputs_class, outputs_coord):
"""A workaround as torchscript doesn't support dictionary with non-homogeneous values."""
return [{'pred_logits': a, 'pred_boxes': b}
for a, b in zip(outputs_class[:-1], outputs_coord[:-1])]
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/deformable_detr/model/deformable_detr_base.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Main PTL model file for deformable detr. """
import datetime
import os
import json
from typing import Any, Dict
import torch
from torch.optim.lr_scheduler import MultiStepLR, StepLR
from fairscale.optim import OSS
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_only
from nvidia_tao_pytorch.core.cookbooks.tlt_pytorch_cookbook import TLTPyTorchCookbook
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.cv.action_recognition.utils.common_utils import patch_decrypt_checkpoint
from nvidia_tao_pytorch.pointcloud.pointpillars.pcdet.utils import common_utils
from nvidia_tao_pytorch.cv.deformable_detr.dataloader.od_dataset import CoCoDataMerge
from nvidia_tao_pytorch.cv.deformable_detr.model.build_nn_model import build_model
from nvidia_tao_pytorch.cv.deformable_detr.model.matcher import HungarianMatcher
from nvidia_tao_pytorch.cv.deformable_detr.model.criterion import SetCriterion
from nvidia_tao_pytorch.cv.deformable_detr.model.post_process import PostProcess, save_inference_prediction, threshold_predictions
from nvidia_tao_pytorch.cv.deformable_detr.utils.misc import match_name_keywords
from nvidia_tao_pytorch.cv.deformable_detr.utils.coco import COCO
from nvidia_tao_pytorch.cv.deformable_detr.utils.coco_eval import CocoEvaluator
# pylint:disable=too-many-ancestors
class DeformableDETRModel(pl.LightningModule):
"""PTL module for Deformable DETR Object Detection Model."""
def __init__(self, experiment_spec, export=False):
"""Init training for Deformable DETR Model."""
super().__init__()
self.experiment_spec = experiment_spec
self.dataset_config = experiment_spec.dataset
self.model_config = experiment_spec.model
self.eval_class_ids = self.dataset_config["eval_class_ids"]
self.dataset_type = self.dataset_config["dataset_type"]
if self.dataset_type not in ("serialized", "default"):
raise ValueError(f"{self.dataset_type} is not supported. Only serialized and default are supported.")
# init the model
self._build_model(export)
self._build_criterion()
self.status_logging_dict = {}
def _build_model(self, export):
"""Internal function to build the model."""
self.model = build_model(experiment_config=self.experiment_spec, export=export)
def _build_criterion(self):
"""Internal function to build the loss function."""
self.matcher = HungarianMatcher(cost_class=self.model_config["cls_loss_coef"], cost_bbox=self.model_config["bbox_loss_coef"], cost_giou=self.model_config["giou_loss_coef"])
self.weight_dict = {'loss_ce': self.model_config["cls_loss_coef"], 'loss_bbox': self.model_config["bbox_loss_coef"], 'loss_giou': self.model_config["giou_loss_coef"]}
if self.model_config["aux_loss"]:
aux_weight_dict = {}
for i in range(self.model_config["dec_layers"] - 1):
aux_weight_dict.update({f'{k}_{i}': v for k, v in self.weight_dict.items()})
aux_weight_dict.update({f'{k}_enc': v for k, v in self.weight_dict.items()})
self.weight_dict.update(aux_weight_dict)
self.weight_dict = self.weight_dict
loss_types = self.model_config["loss_types"] # ['labels', 'boxes']
self.criterion = SetCriterion(self.dataset_config["num_classes"], self.matcher, loss_types, focal_alpha=self.model_config["focal_alpha"])
self.box_processors = PostProcess()
def configure_optimizers(self):
"""Configure optimizers for training."""
self.train_config = self.experiment_spec.train
param_dicts = [
{
"params":
[p for n, p in self.model.named_parameters()
if not match_name_keywords(n, self.model_config["backbone_names"]) and
not match_name_keywords(n, self.model_config["linear_proj_names"]) and
p.requires_grad],
"lr": self.train_config['optim']['lr'],
},
{
"params": [p for n, p in self.model.named_parameters() if match_name_keywords(n, self.model_config["backbone_names"]) and p.requires_grad],
"lr": self.train_config['optim']['lr_backbone'],
},
{
"params": [p for n, p in self.model.named_parameters() if match_name_keywords(n, self.model_config["linear_proj_names"]) and p.requires_grad],
"lr": self.train_config['optim']['lr'] * self.train_config['optim']['lr_linear_proj_mult'],
}
]
if self.train_config.optim.optimizer == 'SGD':
base_optimizer = torch.optim.SGD(params=param_dicts,
lr=self.train_config.optim.lr,
momentum=self.train_config.optim.momentum,
weight_decay=self.train_config.optim.weight_decay)
elif self.train_config.optim.optimizer == 'AdamW':
base_optimizer = torch.optim.AdamW(params=param_dicts,
lr=self.train_config.optim.lr,
weight_decay=self.train_config.optim.weight_decay)
else:
raise NotImplementedError(f"Optimizer {self.train_config.optim.optimizer} is not implemented")
if self.train_config.distributed_strategy == "ddp_sharded":
# Override force_broadcast_object=False in PTL
optim = OSS(params=base_optimizer.param_groups, optim=type(base_optimizer), force_broadcast_object=True, **base_optimizer.defaults)
else:
optim = base_optimizer
optim_dict = {}
optim_dict["optimizer"] = optim
scheduler_type = self.train_config['optim']['lr_scheduler']
if scheduler_type == "MultiStep":
lr_scheduler = MultiStepLR(optimizer=optim,
milestones=self.train_config['optim']["lr_steps"],
gamma=self.train_config['optim']["lr_decay"],
verbose=True)
elif scheduler_type == "StepLR":
lr_scheduler = StepLR(optimizer=optim,
step_size=self.train_config['optim']["lr_step_size"],
gamma=self.train_config['optim']["lr_decay"],
verbose=True)
else:
raise NotImplementedError("LR Scheduler {} is not implemented".format(scheduler_type))
optim_dict["lr_scheduler"] = lr_scheduler
optim_dict['monitor'] = self.train_config['optim']['monitor_name']
return optim_dict
def training_step(self, batch, batch_idx):
"""Training step."""
data, targets, _ = batch
batch_size = data.shape[0]
outputs = self.model(data)
# loss
loss_dict = self.criterion(outputs, targets)
losses = sum(loss_dict[k] * self.weight_dict[k] for k in loss_dict.keys() if k in self.weight_dict)
self.log("train_loss", losses, on_step=False, on_epoch=True, prog_bar=True, sync_dist=True, batch_size=batch_size)
self.log("train_class_error", loss_dict['class_error'], on_step=False, on_epoch=True, prog_bar=False, sync_dist=True, batch_size=batch_size)
self.log("train_loss_ce", loss_dict['loss_ce'], on_step=False, on_epoch=True, prog_bar=False, sync_dist=True, batch_size=batch_size)
self.log("train_loss_bbox", loss_dict['loss_bbox'], on_step=False, on_epoch=True, prog_bar=False, sync_dist=True, batch_size=batch_size)
self.log("train_loss_giou", loss_dict['loss_giou'], on_step=False, on_epoch=True, prog_bar=False, sync_dist=True, batch_size=batch_size)
return {'loss': losses}
def training_epoch_end(self, training_step_outputs):
"""Log Training metrics to status.json"""
average_train_loss = 0.0
for out in training_step_outputs:
average_train_loss += out['loss'].item()
average_train_loss /= len(training_step_outputs)
self.status_logging_dict["train_loss"] = average_train_loss
status_logging.get_status_logger().kpi = self.status_logging_dict
status_logging.get_status_logger().write(
message="Train and Val metrics generated.",
status_level=status_logging.Status.RUNNING
)
training_step_outputs.clear()
def on_validation_epoch_start(self) -> None:
"""
Validation epoch start.
Reset coco evaluator for each epoch.
"""
if self.dataset_type == "serialized":
# Load from scratch since COCO object is not instantiated for SerializedDatasetFromList
coco_lists = []
for source in self.dataset_config["val_data_sources"]:
with open(source["json_file"], "r") as f:
tmp = json.load(f)
coco_lists.append(COCO(tmp))
coco = COCO(CoCoDataMerge(coco_lists))
self.val_coco_evaluator = CocoEvaluator(coco, iou_types=['bbox'], eval_class_ids=self.eval_class_ids)
else:
self.val_coco_evaluator = CocoEvaluator(self.trainer.datamodule.val_dataset.coco, iou_types=['bbox'], eval_class_ids=self.eval_class_ids)
def validation_step(self, batch, batch_idx):
"""Validation step."""
data, targets, image_names = batch
outputs = self.model(data)
batch_size = data.shape[0]
loss_dict = self.criterion(outputs, targets)
losses = sum(loss_dict[k] * self.weight_dict[k] for k in loss_dict.keys() if k in self.weight_dict)
orig_target_sizes = torch.stack([t["orig_size"] for t in targets], dim=0)
results = self.box_processors(outputs, orig_target_sizes, image_names)
res = {target['image_id'].item(): output for target, output in zip(targets, results)}
self.val_coco_evaluator.update(res)
self.log("val_loss", losses, on_step=False, on_epoch=True, prog_bar=True, sync_dist=True, batch_size=batch_size)
self.log("val_class_error", loss_dict['class_error'], on_step=False, on_epoch=True, prog_bar=False, sync_dist=True, batch_size=batch_size)
self.log("val_loss_ce", loss_dict['loss_ce'], on_step=False, on_epoch=True, prog_bar=False, sync_dist=True, batch_size=batch_size)
self.log("val_loss_bbox", loss_dict['loss_bbox'], on_step=False, on_epoch=True, prog_bar=False, sync_dist=True, batch_size=batch_size)
self.log("val_loss_giou", loss_dict['loss_giou'], on_step=False, on_epoch=True, prog_bar=False, sync_dist=True, batch_size=batch_size)
return losses
def validation_epoch_end(self, outputs):
"""
Validation epoch end.
Compute mAP at the end of epoch.
"""
self.val_coco_evaluator.synchronize_between_processes()
self.val_coco_evaluator.overall_accumulate()
self.val_coco_evaluator.overall_summarize(is_print=False)
mAP = self.val_coco_evaluator.coco_eval['bbox'].stats[0]
mAP50 = self.val_coco_evaluator.coco_eval['bbox'].stats[1]
if self.trainer.is_global_zero:
print("\n Validation mAP : {}\n".format(mAP))
print("\n Validation mAP50 : {}\n".format(mAP50))
self.log("val_mAP", mAP, rank_zero_only=True, sync_dist=True)
self.log("val_mAP50", mAP50, rank_zero_only=True, sync_dist=True)
self.status_logging_dict["val_mAP"] = str(mAP)
self.status_logging_dict["val_mAP50"] = str(mAP50)
average_val_loss = 0.0
for out in outputs:
average_val_loss += out.item()
average_val_loss /= len(outputs)
self.status_logging_dict["val_loss"] = average_val_loss
outputs.clear()
def on_test_epoch_start(self) -> None:
"""
Test epoch start.
Reset coco evaluator at start.
"""
if self.dataset_type == "serialized":
# Load from scratch since COCO object is not instantiated for SerializedDatasetFromList
with open(self.dataset_config["test_data_sources"]["json_file"], "r") as f:
tmp = json.load(f)
coco = COCO(tmp)
self.test_coco_evaluator = CocoEvaluator(coco, iou_types=['bbox'], eval_class_ids=self.eval_class_ids)
else:
self.test_coco_evaluator = CocoEvaluator(self.trainer.datamodule.test_dataset.coco, iou_types=['bbox'], eval_class_ids=self.eval_class_ids)
def test_step(self, batch, batch_idx):
"""Test step. Evaluate."""
data, targets, image_names = batch
outputs = self.model(data)
batch_size = data.shape[0]
loss_dict = self.criterion(outputs, targets)
losses = sum(loss_dict[k] * self.weight_dict[k] for k in loss_dict.keys() if k in self.weight_dict)
orig_target_sizes = torch.stack([t["orig_size"] for t in targets], dim=0)
results = self.box_processors(outputs, orig_target_sizes, image_names)
if self.experiment_spec.evaluate.conf_threshold > 0:
filtered_res = threshold_predictions(results, self.experiment_spec.evaluate.conf_threshold)
else:
filtered_res = results
res = {target['image_id'].item(): output for target, output in zip(targets, filtered_res)}
self.test_coco_evaluator.update(res)
self.log("test_loss", losses, on_step=False, on_epoch=True, prog_bar=False, sync_dist=True, batch_size=batch_size)
self.log("test_class_error", loss_dict['class_error'], on_step=False, on_epoch=True, prog_bar=False, sync_dist=True, batch_size=batch_size)
self.log("test_loss_ce", loss_dict['loss_ce'], on_step=False, on_epoch=True, prog_bar=False, sync_dist=True, batch_size=batch_size)
self.log("test_loss_bbox", loss_dict['loss_bbox'], on_step=False, on_epoch=True, prog_bar=False, sync_dist=True, batch_size=batch_size)
self.log("test_loss_giou", loss_dict['loss_giou'], on_step=False, on_epoch=True, prog_bar=False, sync_dist=True, batch_size=batch_size)
def test_epoch_end(self, outputs):
"""
Test epoch end.
Compute mAP at the end of epoch.
"""
self.test_coco_evaluator.synchronize_between_processes()
self.test_coco_evaluator.overall_accumulate()
self.test_coco_evaluator.overall_summarize(is_print=True)
mAP = self.test_coco_evaluator.coco_eval['bbox'].stats[0]
mAP50 = self.test_coco_evaluator.coco_eval['bbox'].stats[1]
self.log("test_mAP", mAP, rank_zero_only=True)
self.log("test_mAP50", mAP50, rank_zero_only=True)
# Log the evaluation results to a file
log_file = os.path.join(self.experiment_spec.results_dir, 'log_eval_{}.txt'.format(datetime.datetime.now().strftime('%Y%m%d-%H%M%S')))
logger = common_utils.create_logger(log_file, rank=0)
if self.trainer.is_global_zero:
logger.info('**********************Start logging Evaluation Results **********************')
logger.info('*************** mAP *****************')
logger.info('mAP : %2.2f' % mAP)
logger.info('*************** mAP50 *****************')
logger.info('mAP50 : %2.2f' % mAP50)
self.status_logging_dict["test_mAP"] = str(mAP)
self.status_logging_dict["test_mAP50"] = str(mAP50)
status_logging.get_status_logger().kpi = self.status_logging_dict
status_logging.get_status_logger().write(
message="Evaluation metrics generated.",
status_level=status_logging.Status.RUNNING
)
outputs.clear()
def predict_step(self, batch, batch_idx):
"""Predict step. Inference."""
data, targets, image_names = batch
outputs = self.model(data)
orig_target_sizes = torch.stack([t["orig_size"] for t in targets], dim=0)
pred_results = self.box_processors(outputs, orig_target_sizes, image_names)
return pred_results
@rank_zero_only
def on_predict_batch_end(self, outputs, batch, batch_idx, dataloader_idx):
"""
Predict batch end.
Save the result inferences at the end of batch.
"""
output_dir = self.experiment_spec.results_dir
label_map = self.trainer.datamodule.pred_dataset.label_map
color_map = self.experiment_spec.inference.color_map
conf_threshold = self.experiment_spec.inference.conf_threshold
is_internal = self.experiment_spec.inference.is_internal
save_inference_prediction(outputs, output_dir, conf_threshold, label_map, color_map, is_internal)
def forward(self, x):
"""Forward of the deformable detr model."""
outputs = self.model(x)
return outputs
def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
"""Encrpyt the checkpoint. The encryption is done in TLTCheckpointConnector."""
pass
def on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
"""Decrpyt the checkpoint."""
if checkpoint.get("state_dict_encrypted", False):
# Retrieve encryption key from TLTPyTorchCookbook.
key = TLTPyTorchCookbook.get_passphrase()
if key is None:
raise PermissionError("Cannot access model state dict without the encryption key")
checkpoint = patch_decrypt_checkpoint(checkpoint, key)
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/deformable_detr/model/pl_dd_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Modules to compute the matching cost and solve the corresponding LSAP. """
import torch
from torch import nn
from scipy.optimize import linear_sum_assignment
from nvidia_tao_pytorch.cv.deformable_detr.utils.box_ops import box_cxcywh_to_xyxy, generalized_box_iou
class HungarianMatcher(nn.Module):
"""This class computes an assignment between the targets and the predictions of the network.
For efficiency reasons, the targets don't include the no_object. Because of this, in general,
there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,
while the others are un-matched (and thus treated as non-objects).
"""
def __init__(self,
cost_class: float = 1,
cost_bbox: float = 1,
cost_giou: float = 1):
"""Creates the matcher.
Args:
cost_class (float): This is the relative weight of the classification error in the matching cost.
cost_bbox (float): This is the relative weight of the L1 error of the bounding box coordinates in the matching cost.
cost_giou (float): This is the relative weight of the giou loss of the bounding box in the matching cost.
"""
super().__init__()
self.cost_class = cost_class
self.cost_bbox = cost_bbox
self.cost_giou = cost_giou
assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, "all costs cant be 0"
def forward(self, outputs, targets):
"""Performs the matching.
Args:
outputs (dict): This is a dict that contains at least these entries:
"pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits
"pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates
targets (list): This is a list of targets (len(targets) = batch_size), where each target is a dict containing:
"labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth
objects in the target) containing the class labels
"boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates
Returns:
A list of size batch_size, containing tuples of (index_i, index_j) where:
- index_i is the indices of the selected predictions (in order)
- index_j is the indices of the corresponding selected targets (in order)
For each batch element, it holds:
len(index_i) = len(index_j) = min(num_queries, num_target_boxes)
"""
with torch.no_grad():
bs, num_queries = outputs["pred_logits"].shape[:2]
# We flatten to compute the cost matrices in a batch
out_prob = outputs["pred_logits"].flatten(0, 1).sigmoid()
out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4]
# Also concat the target labels and boxes
tgt_ids = torch.cat([v["labels"] for v in targets])
tgt_bbox = torch.cat([v["boxes"] for v in targets])
# Compute the classification cost.
alpha = 0.25
gamma = 2.0
neg_cost_class = (1 - alpha) * (out_prob ** gamma) * (-(1 - out_prob + 1e-8).log())
pos_cost_class = alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log())
cost_class = pos_cost_class[:, tgt_ids] - neg_cost_class[:, tgt_ids]
# Compute the L1 cost between boxes
cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1)
# Compute the giou cost betwen boxes
cost_giou = -generalized_box_iou(box_cxcywh_to_xyxy(out_bbox),
box_cxcywh_to_xyxy(tgt_bbox))
# Final cost matrix
C = self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou
C = C.view(bs, num_queries, -1).cpu()
sizes = [len(v["boxes"]) for v in targets]
indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))]
return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/deformable_detr/model/matcher.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Backbone modules. """
import torch
from torch import nn
import torch.nn.functional as F
from torchvision.models._utils import IntermediateLayerGetter
from typing import Optional
from typing import Dict, List
import numpy as np
from nvidia_tao_pytorch.cv.deformable_detr.utils.misc import get_global_rank, load_pretrained_weights
from nvidia_tao_pytorch.cv.deformable_detr.model.resnet import resnet50
from nvidia_tao_pytorch.cv.deformable_detr.model.gc_vit import gc_vit_model_dict
class FrozenBatchNorm2d(torch.nn.Module):
"""BatchNorm2d where the batch statistics and the affine parameters are fixed."""
def __init__(self, n, eps=1e-5):
"""Initialize the FrozenBatchNorm2d Class.
Args:
n (int): num_features from an expected input of size
eps (float): a value added to the denominator for numerical stability.
"""
super(FrozenBatchNorm2d, self).__init__()
self.register_buffer("weight", torch.ones(n))
self.register_buffer("bias", torch.zeros(n))
self.register_buffer("running_mean", torch.zeros(n))
self.register_buffer("running_var", torch.ones(n))
self.eps = eps
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
"""Load paremeters from state dict. """
num_batches_tracked_key = prefix + 'num_batches_tracked'
if num_batches_tracked_key in state_dict:
del state_dict[num_batches_tracked_key]
super(FrozenBatchNorm2d, self)._load_from_state_dict(
state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs)
def forward(self, x):
"""Forward function: move reshapes to the beginning to make it fuser-friendly.
Args:
x (torch.Tensor): input tensor.
Returns:
torch.Tensor: output of Frozen Batch Norm.
"""
w = self.weight.view(1, -1, 1, 1)
b = self.bias.view(1, -1, 1, 1)
rv = self.running_var.view(1, -1, 1, 1)
rm = self.running_mean.view(1, -1, 1, 1)
eps = self.eps
scale = 1 / (rv + eps).sqrt()
scale = w * scale
bias = b - rm * scale
return x * scale + bias
class BackboneBase(nn.Module):
"""BackboneBase class."""
def __init__(self, model_name, backbone: nn.Module, train_backbone: bool, num_channels: int, return_interm_indices: list, export: bool):
"""Initialize the Backbone Base Class.
Args:
model_name (str): backbone model name.
backbone (nn.Module): backbone torch module.
train_backbone (bool): flag whether we want to train the backbone or not.
num_channels (int): channel size.
return_interm_indices (list): list of layer indices to reutrn as backbone features.
export (bool): flag to indicate whehter exporting to onnx or not.
"""
super().__init__()
self.export = export
self.model_name = model_name
if model_name == 'resnet_50':
for name, parameter in backbone.named_parameters():
if not train_backbone or 'layer2' not in name and 'layer3' not in name and 'layer4' not in name:
parameter.requires_grad_(False)
return_layers = {}
# 4 scale: {'layer2': '1', 'layer3': '2', 'layer4': '3'}
# 5 scale: {'layer1': '0', 'layer2': '1', 'layer3': '2', 'layer4': '3'}
for layer_index in return_interm_indices:
return_layers.update({"layer{}".format(layer_index + 1): "{}".format(layer_index)})
self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)
elif 'fan' in model_name or 'gc_vit' in model_name:
for name, parameter in backbone.named_parameters():
if not train_backbone:
parameter.requires_grad_(False)
self.body = backbone
self.num_channels = num_channels
self.return_interm_indices = return_interm_indices
def forward(self, input_tensors):
"""Forward function for Backboone base.
Args:
input_tensors (torch.Tensor): input tensor.
Returns:
out (torch.Tensor): output tensor.
"""
if self.export:
batch_shape = input_tensors.shape
dtype = input_tensors.dtype
device = input_tensors.device
# when exporting, the input shape is fixed and no padding mask is needed.
masks = torch.zeros((batch_shape[0], 1, batch_shape[2], batch_shape[3]), dtype=dtype, device=device)
input_tensor = input_tensors
else:
masks = input_tensors[:, 3:4]
input_tensor = input_tensors[:, :3]
xs = self.body(input_tensor)
out: Dict[str, torch.Tensor] = {}
for name, x in xs.items():
mask = F.interpolate(masks.float(), size=x.shape[-2:])
mask = mask.to(torch.bool)
out[name] = (x, mask)
return out
class Backbone(BackboneBase):
"""Backbone for D-DETR."""
def __init__(self, name: str,
pretrained_backbone_path: Optional[str],
train_backbone: bool,
return_interm_indices: list,
dilation: bool,
export: bool,
activation_checkpoint: bool):
"""Initialize the Backbone Class.
Args:
pretrained_backbone_path (str): optional path to the pretrained backbone.
train_backbone (bool): flag whether we want to train the backbone or not.
return_interm_indices (list): list of layer indices to reutrn as backbone features.
dilation (bool): flag whether we can to use dilation or not.
export (bool): flag to indicate whehter exporting to onnx or not.
activation_checkpoint (bool): flag to indicate whether to run activation checkpointing during training.
Raises:
ValueError: If return_interm_indices does not have valid range or has duplicate index.
NotImplementedError: If invalid backbone name was provided.
"""
return_interm_indices = np.array(return_interm_indices)
if not np.logical_and(return_interm_indices >= 0, return_interm_indices <= 4).all():
raise ValueError(f"Invalid range for return_interm_indices. "
f"Provided return_interm_indices is {return_interm_indices}.")
if len(np.unique(return_interm_indices)) != len(return_interm_indices):
raise ValueError(f"Duplicate index in the provided return_interm_indices: {return_interm_indices}")
if name == 'resnet_50':
if export:
_norm_layer = nn.BatchNorm2d
else:
_norm_layer = FrozenBatchNorm2d
backbone = resnet50(norm_layer=_norm_layer,
replace_stride_with_dilation=[False, False, dilation])
num_channels_all = np.array([256, 512, 1024, 2048])
num_channels = num_channels_all[return_interm_indices]
elif 'gc_vit' in name:
if name not in gc_vit_model_dict:
raise NotImplementedError(f"{name} is not supported GCViT backbone. "
f"Supported architecutres: {gc_vit_model_dict.keys()}")
backbone = gc_vit_model_dict[name](out_indices=return_interm_indices,
activation_checkpoint=activation_checkpoint)
num_channels_all = np.array(backbone.num_features)
num_channels = num_channels_all[return_interm_indices]
else:
supported_arch = list(gc_vit_model_dict.keys()) + ["resnet_50"]
raise NotImplementedError(f"Backbone {name} is not implemented. Supported architectures {supported_arch}")
if pretrained_backbone_path:
checkpoint = load_pretrained_weights(pretrained_backbone_path)
_tmp_st_output = backbone.load_state_dict(checkpoint, strict=False)
if get_global_rank() == 0:
print(f"Loaded pretrained weights from {pretrained_backbone_path}")
print(f"{_tmp_st_output}")
super().__init__(name, backbone, train_backbone, num_channels, return_interm_indices, export)
class Joiner(nn.Sequential):
"""Joiner Class."""
def __init__(self, backbone):
"""Initialize the Joiner Class.
Args:
backbone (nn.Module): backbone module.
"""
super().__init__(backbone)
self.num_channels = backbone.num_channels
def forward(self, input_tensors):
"""Forward function for Joiner to prepare the backbone output into transformer input format.
Args:
input_tensors (torch.Tensor): input tensor.
Returns:
out (List[Tensor]): list of tensor (feature vectors from backbone).
"""
xs = self[0](input_tensors)
out: List[torch.Tensor] = []
for _, x in sorted(xs.items()):
out.append(x)
return out
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/deformable_detr/model/backbone.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Positional encodings for the transformer. """
import math
import torch
from torch import nn
class PositionEmbeddingSine(nn.Module):
"""
A standard version of the position embedding, very similar to the one
used by the Attention is all you need paper, generalized to work on images.
"""
def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):
"""Initialize PositionEmbeddingSine Class"""
super().__init__()
self.num_pos_feats = num_pos_feats
self.temperature = temperature
self.normalize = normalize
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * math.pi
self.scale = scale
def forward(self, not_mask, device):
"""Forward"""
y_embed = not_mask.cumsum(1, dtype=torch.float32)
x_embed = not_mask.cumsum(2, dtype=torch.float32)
if self.normalize:
eps = 1e-6
y_embed = (y_embed - 0.5) / (y_embed[:, -1:, :] + eps) * self.scale
x_embed = (x_embed - 0.5) / (x_embed[:, :, -1:] + eps) * self.scale
dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=device)
dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos
class PositionEmbeddingSineExport(nn.Module):
"""Exportable Positional Bemdding Sine Class."""
def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):
"""Initialize PositionEmbeddingSineExport Class"""
super().__init__()
self.num_pos_feats = num_pos_feats
self.temperature = temperature
self.normalize = normalize
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * math.pi
self.scale = scale
def forward(self, batch_shape, device):
""" Forward """
not_mask = torch.ones(batch_shape.tolist(), dtype=torch.bool, device=device)
y_embed = not_mask.cumsum(1, dtype=torch.float32)
x_embed = not_mask.cumsum(2, dtype=torch.float32)
if self.normalize:
eps = 1e-6
y_embed = (y_embed - 0.5) / (y_embed[:, -1:, :] + eps) * self.scale
x_embed = (x_embed - 0.5) / (x_embed[:, :, -1:] + eps) * self.scale
dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=device)
dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/deformable_detr/model/position_encoding.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Deformable DETR model module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/deformable_detr/model/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Original source taken from https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py
# Copyright (c) 2020 SenseTime.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Backbone ResNet model definition. """
import torch
import torch.nn as nn
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
"""BasicBlock for ResNet"""
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
"""Init"""
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
"""Forward"""
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
""" Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
while original implementation places the stride at the first 1x1 convolution(self.conv1)
according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
This variant is also known as ResNet V1.5 and improves accuracy according to
https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
"""
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
"""Init"""
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
"""Forward"""
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
""" Baset ResNet Module class """
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
"""Init"""
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
"""Make_layer"""
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
"""Forward"""
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def resnet50(norm_layer=None, replace_stride_with_dilation=None, **kwargs):
""" ResNet-50 model from
Deep Residual Learning for Image Recognition
Args:
norm_layer (nn.Module): Normalization layer to be used
replace_stride_with_dilation (list): list of boolean to indicate whether to replace 2x2 stride with a dilated convolution instead
"""
return ResNet(block=Bottleneck, layers=[3, 4, 6, 3], norm_layer=norm_layer, replace_stride_with_dilation=replace_stride_with_dilation, **kwargs)
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/deformable_detr/model/resnet.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Deformable Transformer module. """
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.init import xavier_uniform_, constant_, normal_
import torch.utils.checkpoint as checkpoint
from nvidia_tao_pytorch.core.modules.activation.activation import MultiheadAttention
from nvidia_tao_pytorch.cv.deformable_detr.utils.misc import inverse_sigmoid
from nvidia_tao_pytorch.cv.deformable_detr.model.ops.modules import MSDeformAttn
class DeformableTransformer(nn.Module):
"""Deformable Transfromer module."""
def __init__(self, d_model=256, nhead=8,
num_encoder_layers=6, num_decoder_layers=6, dim_feedforward=1024, dropout=0.3,
activation="relu", return_intermediate_dec=True,
num_feature_levels=4, dec_n_points=4, enc_n_points=4,
export=False, activation_checkpoint=True):
"""Initialize Deformable Transformer Module.
Args:
d_model (int): size of the hidden dimension.
nhead (int): number of heads.
num_encoder_layers (int): number of encoder layers.
num_decoder_layers (int): number of decoder layers.
dim_feedforward (int): dimension of the feedforward layer.
dropout (float): probability for the dropout layer.
activation (str): type of activation layer.
return_intermediate_dec (bool): return intermediate decoder layers.
num_feature_levels (int): Number of levels to extract from the backbone feature maps.
dec_n_points (int): number of reference points in the decoder.
enc_n_points (int): number of reference points in the encoder.
export (bool): flag to indicate if the current model is being used for ONNX export.
activation_checkpoint (bool): flag to indicate if activation checkpointing is used.
"""
super().__init__()
self.d_model = d_model
self.nhead = nhead
self.export = export
self.activation_checkpoint = activation_checkpoint
encoder_args = {
"d_model": d_model,
"dropout": dropout,
"d_ffn": dim_feedforward,
"activation": activation,
"n_levels": num_feature_levels,
"n_heads": nhead,
"n_points": enc_n_points,
"export": self.export
}
decoder_args = dict(encoder_args)
decoder_args["n_points"] = dec_n_points
self.encoder = DeformableTransformerEncoder(num_encoder_layers, encoder_args,
export=self.export, activation_checkpoint=self.activation_checkpoint)
self.decoder = DeformableTransformerDecoder(num_decoder_layers, decoder_args,
return_intermediate=return_intermediate_dec,
export=self.export,
activation_checkpoint=self.activation_checkpoint)
self.level_embed = nn.Parameter(torch.Tensor(num_feature_levels, d_model))
self.reference_points = nn.Linear(d_model, 2)
self._reset_parameters()
def _reset_parameters(self):
"""Reset parmaeters."""
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
for m in self.modules():
if isinstance(m, MSDeformAttn):
m._reset_parameters()
xavier_uniform_(self.reference_points.weight.data, gain=1.0)
constant_(self.reference_points.bias.data, 0.)
normal_(self.level_embed)
def get_valid_ratio(self, mask):
"""Compute the valid ratio from given mask."""
_, H, W = mask.shape
temp_mask = mask.bool()
valid_H = torch.sum((~temp_mask).float()[:, :, 0], 1)
valid_W = torch.sum((~temp_mask).float()[:, 0, :], 1)
valid_ratio_h = valid_H.float() / H
valid_ratio_w = valid_W.float() / W
valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1)
return valid_ratio
def forward(self, srcs, masks, pos_embeds, query_embed=None):
"""Forward function."""
assert query_embed is not None
# prepare input for encoder
src_flatten = []
lvl_pos_embed_flatten = []
if self.export:
spatial_shapes = []
else:
spatial_shapes = torch.empty(len(srcs), 2, dtype=torch.int32, device=srcs[0].device)
for lvl, (src, pos_embed) in enumerate(zip(srcs, pos_embeds)):
bs, c, h, w = src.shape
if self.export: # Input shaped is fixed for export in onnx/tensorRT
spatial_shapes.append(torch.tensor([[h, w]], dtype=torch.int32, device=srcs[0].device))
else: # Used for dynamic input shape
spatial_shapes[lvl, 0], spatial_shapes[lvl, 1] = h, w
src = src.flatten(2).transpose(1, 2)
pos_embed = pos_embed.flatten(2).transpose(1, 2)
lvl_pos_embed = pos_embed + self.level_embed[lvl].view(1, 1, -1)
lvl_pos_embed_flatten.append(lvl_pos_embed)
src_flatten.append(src)
src_flatten = torch.cat(src_flatten, 1)
lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1)
if isinstance(spatial_shapes, list):
spatial_shapes = torch.cat(spatial_shapes, 0)
level_start_index = torch.cat((spatial_shapes.new_zeros((1,)), spatial_shapes.prod(1).cumsum(0)[:-1])).type(torch.int32)
valid_ratios = torch.stack([self.get_valid_ratio(m) for m in masks], 1)
# encoder
memory = self.encoder(src_flatten, spatial_shapes, level_start_index, valid_ratios, lvl_pos_embed_flatten)
# prepare input for decoder
bs, _, c = memory.shape
query_embed, tgt = torch.split(query_embed, c, dim=1)
query_embed = query_embed.unsqueeze(0).expand(bs, -1, -1)
tgt = tgt.unsqueeze(0).expand(bs, -1, -1)
reference_points = self.reference_points(query_embed).sigmoid()
init_reference_out = reference_points
# decoder
hs, inter_references = self.decoder(tgt, reference_points, memory,
spatial_shapes, level_start_index, valid_ratios, query_embed)
inter_references_out = inter_references
return hs, init_reference_out, inter_references_out
class DeformableTransformerEncoderLayer(nn.Module):
"""Deformable Transfromer Encoder Layer module."""
def __init__(self,
d_model=256, d_ffn=1024,
dropout=0.3, activation="relu",
n_levels=4, n_heads=8, n_points=4, export=False):
"""Initializes the Transformer Encoder Layer.
Args:
d_model (int): size of the hidden dimension.
d_ffn (int): dimension of the feedforward layer.
dropout (float): probability for the dropout layer.
activation (str): type of activation layer.
n_heads (int): number of heads.
n_points (int): number of encoder layers.
export (bool): flag to indicate if the current model is being used for ONNX export.
"""
super().__init__()
self.export = export
# self attention
self.self_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)
self.dropout1 = nn.Dropout(dropout)
self.norm1 = nn.LayerNorm(d_model)
# ffn
self.linear1 = nn.Linear(d_model, d_ffn)
self.activation = _get_activation_fn(activation)
self.dropout2 = nn.Dropout(dropout)
self.linear2 = nn.Linear(d_ffn, d_model)
self.dropout3 = nn.Dropout(dropout)
self.norm2 = nn.LayerNorm(d_model)
@staticmethod
def with_pos_embed(tensor, pos):
"""Add positional Embedding to the tensor."""
return tensor if pos is None else tensor + pos
def forward_ffn(self, src):
"""Forward ffn."""
src2 = self.linear2(self.dropout2(self.activation(self.linear1(src))))
src = src + self.dropout3(src2)
src = self.norm2(src)
return src
def forward(self, src, pos, reference_points, spatial_shapes, level_start_index):
"""Forward function for Encoder Layer."""
src2 = self.self_attn(self.with_pos_embed(src, pos), reference_points, src, spatial_shapes, level_start_index, export=self.export)
src = src + self.dropout1(src2)
src = self.norm1(src)
# ffn
src = self.forward_ffn(src)
return src
class DeformableTransformerEncoder(nn.Module):
"""Deformable Transfromer Encoder module"""
def __init__(self, num_layers, encoder_args={}, export=False, activation_checkpoint=True):
"""Initializes the Transformer Encoder Module.
Args:
num_layers (int): number of encoder layers.
encoder_args (dict): additional arguments.
export (bool): flag to indicate if the current model is being used for ONNX export.
activation_checkpoint (bool): flag to indicate if activation checkpointing is used.
"""
super().__init__()
self.layers = _get_clones(DeformableTransformerEncoderLayer, num_layers, **encoder_args)
self.num_layers = num_layers
self.export = export
self.activation_checkpoint = activation_checkpoint
@staticmethod
def get_reference_points(spatial_shapes, valid_ratios, device, export=False):
"""Get reference points."""
reference_points_list = []
for lvl, (H_, W_) in enumerate(spatial_shapes):
if export: # Fixed dimensions for export in onnx
H_, W_ = int(H_), int(W_)
else:
H_, W_ = spatial_shapes[lvl, 0], spatial_shapes[lvl, 1]
range_y = torch.arange(H_, dtype=torch.int32, device=device).float() + 0.5
range_x = torch.arange(W_, dtype=torch.int32, device=device).float() + 0.5
ref_y, ref_x = torch.meshgrid(range_y, range_x)
ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, lvl, 1] * H_)
ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, lvl, 0] * W_)
ref = torch.stack((ref_x, ref_y), -1)
reference_points_list.append(ref)
reference_points = torch.cat(reference_points_list, 1)
reference_points = reference_points[:, :, None] * valid_ratios[:, None]
return reference_points
def forward(self, src, spatial_shapes, level_start_index, valid_ratios, pos=None):
"""Forward function for Encoder Module."""
output = src
reference_points = self.get_reference_points(spatial_shapes, valid_ratios, device=src.device, export=self.export)
for _, layer in enumerate(self.layers):
if self.export or not self.activation_checkpoint:
output = layer(output, pos, reference_points, spatial_shapes, level_start_index)
else:
output = checkpoint.checkpoint(layer,
output,
pos,
reference_points,
spatial_shapes,
level_start_index)
return output
class DeformableTransformerDecoderLayer(nn.Module):
""" Deformable Transfromer Decoder Layer module """
def __init__(self, d_model=256, d_ffn=1024,
dropout=0.3, activation="relu",
n_levels=4, n_heads=8, n_points=4, export=False):
"""Initializes the Transformer Decoder Layer.
Args:
d_model (int): size of the hidden dimension.
d_ffn (int): dimension of the feedforward layer.
dropout (float): probability for the dropout layer.
activation (str): type of activation layer.
n_heads (int): number of heads.
n_points (int): number of encoder layers.
export (bool): flag to indicate if the current model is being used for ONNX export.
"""
super().__init__()
self.export = export
# cross attention
self.cross_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)
self.dropout1 = nn.Dropout(dropout)
self.norm1 = nn.LayerNorm(d_model)
# self attention
if self.export:
# Starting from PyT 1.14, _scaled_dot_product_attention has been switched to C++ backend
# which is not exportable as ONNX operator
# However, the training / eval time can be greatly optimized by Torch selecting the optimal
# attention mechanism under the hood
self.self_attn = MultiheadAttention(d_model, n_heads, dropout=dropout)
else:
self.self_attn = nn.MultiheadAttention(d_model, n_heads, dropout=dropout)
self.dropout2 = nn.Dropout(dropout)
self.norm2 = nn.LayerNorm(d_model)
# ffn
self.linear1 = nn.Linear(d_model, d_ffn)
self.activation = _get_activation_fn(activation)
self.dropout3 = nn.Dropout(dropout)
self.linear2 = nn.Linear(d_ffn, d_model)
self.dropout4 = nn.Dropout(dropout)
self.norm3 = nn.LayerNorm(d_model)
@staticmethod
def with_pos_embed(tensor, pos):
"""Add positional Embedding to the tensor."""
return tensor if pos is None else tensor + pos
def forward_ffn(self, tgt):
"""Forward ffn."""
tgt2 = self.linear2(self.dropout3(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout4(tgt2)
tgt = self.norm3(tgt)
return tgt
def forward(self, tgt, query_pos, reference_points, src, src_spatial_shapes, level_start_index):
"""Forward function for Decoder Layer."""
# self attention
q = k = self.with_pos_embed(tgt, query_pos)
tgt2 = self.self_attn(q.transpose(0, 1), k.transpose(0, 1), tgt.transpose(0, 1))[0].transpose(0, 1)
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
# cross attention
tgt2 = self.cross_attn(self.with_pos_embed(tgt, query_pos),
reference_points,
src, src_spatial_shapes, level_start_index, export=self.export)
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
# ffn
tgt = self.forward_ffn(tgt)
return tgt
# pylint:disable=E1136
class DeformableTransformerDecoder(nn.Module):
""" Deformable Transfromer Decoder module """
def __init__(self, num_layers, decoder_args={}, return_intermediate=False, export=False, activation_checkpoint=True):
"""Initializes the Transformer Decoder Module.
Args:
num_layers (int): number of decoder layers.
decoder_args (dict): additional arguments.
return_intermediate (bool): flat to indicate if intermediate outputs to be returned.
export (bool): flag to indicate if the current model is being used for ONNX export.
activation_checkpoint (bool): flag to indicate if activation checkpointing is used.
"""
super().__init__()
self.export = export
self.activation_checkpoint = activation_checkpoint
self.layers = _get_clones(DeformableTransformerDecoderLayer, num_layers, **decoder_args)
self.num_layers = num_layers
self.return_intermediate = return_intermediate
# hack implementation for iterative bounding box refinement Deformable DETR
self.bbox_embed = None
self.class_embed = None
def forward(self, tgt, reference_points, src, src_spatial_shapes, src_level_start_index, src_valid_ratios,
query_pos=None):
"""Forward function for Decoder Module."""
output = tgt
intermediate = []
intermediate_reference_points = []
for lid, layer in enumerate(self.layers):
if reference_points.shape[-1] == 4:
reference_points_input = reference_points[:, :, None] * torch.cat([src_valid_ratios, src_valid_ratios], -1)[:, None]
else:
assert reference_points.shape[-1] == 2
reference_points_input = reference_points[:, :, None] * src_valid_ratios[:, None]
if self.export or not self.activation_checkpoint:
output = layer(output, query_pos, reference_points_input, src, src_spatial_shapes, src_level_start_index)
else:
output = checkpoint.checkpoint(layer,
output,
query_pos,
reference_points_input,
src,
src_spatial_shapes,
src_level_start_index)
# hack implementation for iterative bounding box refinement
if self.bbox_embed is not None:
tmp = self.bbox_embed[lid](output)
if reference_points.shape[-1] == 4:
new_reference_points = tmp + inverse_sigmoid(reference_points)
new_reference_points = new_reference_points.sigmoid()
else:
assert reference_points.shape[-1] == 2
new_reference_points = tmp
new_reference_points[..., :2] = tmp[..., :2] + inverse_sigmoid(reference_points)
new_reference_points = new_reference_points.sigmoid()
reference_points = new_reference_points.detach()
if self.return_intermediate:
intermediate.append(output)
intermediate_reference_points.append(reference_points)
if self.return_intermediate:
return torch.stack(intermediate), torch.stack(intermediate_reference_points)
return output, reference_points
def _get_clones(module_class, N, **kwargs):
"""Get clones of nn.Module.
Args:
module_class (nn.Module): torch module to clone.
N (int): number of times to clone.
Returns:
nn.ModuleList of the cloned module_class.
"""
return nn.ModuleList([module_class(**kwargs) for i in range(N)])
def _get_activation_fn(activation):
"""Return an activation function given a string.
Args:
activation (str): type of activation function.
Returns:
PyTorch activation layer.
Raises:
RuntimeError: if unsupported activation type is provided.
"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(F"activation should be relu/gelu/glu, not {activation}.")
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/deformable_detr/model/deformable_transformer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Criterion Loss functions. """
import torch
import torch.nn.functional as F
from torch import nn
from nvidia_tao_pytorch.cv.deformable_detr.utils import box_ops
from nvidia_tao_pytorch.cv.deformable_detr.utils.misc import (accuracy, get_world_size, is_dist_avail_and_initialized)
def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2):
"""
Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
Args:
inputs (torch.Tensor): A float tensor of arbitrary shape. The predictions for each example.
targets (torch.Tensor): A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
alpha (float): (optional) Weighting factor in range (0,1) to balance
positive vs negative examples. Default = -1 (no weighting).
gamma (float): Exponent of the modulating factor (1 - p_t) to
balance easy vs hard examples.
Returns:
Loss tensor
"""
prob = inputs.sigmoid()
ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
p_t = prob * targets + (1 - prob) * (1 - targets)
loss = ce_loss * ((1 - p_t) ** gamma)
if alpha >= 0:
alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
loss = alpha_t * loss
return loss.mean(1).sum() / num_boxes
class SetCriterion(nn.Module):
""" SetCriterion class computes the loss for DETR.
1) Compute hungarian assignment between ground truth boxes and the outputs of the model.
2) Supervise each pair of matched ground-truth / prediction (supervise class and box).
"""
def __init__(self, num_classes, matcher, losses, focal_alpha=0.25):
""" Initialize the criterion class
Args:
num_classes (int): number of object categories, omitting the special no-object category
matcher (nn.Module): module able to compute a matching between targets and proposals
losses (list[str]): list of all the losses to be applied. See get_loss for list of available losses.
focal_alpha (float): alpha in Focal Loss
"""
super().__init__()
self.num_classes = num_classes
self.matcher = matcher
self.losses = losses
self.focal_alpha = focal_alpha
def loss_labels(self, outputs, targets, indices, num_boxes, log=True):
""" Classification loss (NLL).
Args:
outputs (dict[torch.Tensor]): computed outputs
targets (List[dict]): target annotations
indices (list): matching indices
num_boxes (int): number of bounding boxes
Returns:
classification loss
"""
assert 'pred_logits' in outputs
src_logits = outputs['pred_logits']
idx = self._get_src_permutation_idx(indices)
target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)])
target_classes = torch.full(src_logits.shape[:2], self.num_classes,
dtype=torch.int64, device=src_logits.device)
target_classes[idx] = target_classes_o
target_classes_onehot = torch.zeros([src_logits.shape[0], src_logits.shape[1], src_logits.shape[2] + 1],
dtype=src_logits.dtype, layout=src_logits.layout, device=src_logits.device)
target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1)
target_classes_onehot = target_classes_onehot[:, :, :-1]
loss_ce = sigmoid_focal_loss(src_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2) * src_logits.shape[1]
losses = {'loss_ce': loss_ce}
if log:
# TODO this should probably be a separate loss, not hacked in this one here
losses['class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0]
return losses
def loss_boxes(self, outputs, targets, indices, num_boxes):
"""Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss.
Args:
outputs (dict[torch.Tensor]): computed outputs
targets (List[dict]): target annotations
indices (list): matching indices
num_boxes (int): number of bounding boxes
Returns:
bbox loss and giou loss
"""
assert 'pred_boxes' in outputs
idx = self._get_src_permutation_idx(indices)
src_boxes = outputs['pred_boxes'][idx]
target_boxes = torch.cat([t['boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0)
loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none')
losses = {}
losses['loss_bbox'] = loss_bbox.sum() / num_boxes
loss_giou = 1 - torch.diag(box_ops.generalized_box_iou(
box_ops.box_cxcywh_to_xyxy(src_boxes),
box_ops.box_cxcywh_to_xyxy(target_boxes)))
losses['loss_giou'] = loss_giou.sum() / num_boxes
return losses
def _get_src_permutation_idx(self, indices):
"""Permute predictions following indices.
Args:
indices (list): matching indices.
"""
batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])
src_idx = torch.cat([src for (src, _) in indices])
return batch_idx, src_idx
def _get_tgt_permutation_idx(self, indices):
"""Permute targets following indices.
Args:
indices (list): matching indices.
"""
batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])
tgt_idx = torch.cat([tgt for (_, tgt) in indices])
return batch_idx, tgt_idx
def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs):
"""Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss.
Args:
loss (str): name of the loss to get
outputs (dict[torch.Tensor]): computed outputs
targets (List[dict]): target annotations
indices (list): matching indices
num_boxes (int): number of bounding boxes
Returns:
the loss value given the loss name
"""
loss_map = {
'labels': self.loss_labels,
'boxes': self.loss_boxes,
}
assert loss in loss_map, f'do you really want to compute {loss} loss?'
return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs)
def forward(self, outputs, targets):
""" Performs the loss computation.
Args:
outputs (dict[torch.Tensor]): dict of tensors, see the output specification of the model for the format
targets (List[dict]): list of dicts, such that len(targets) == batch_size.
The expected keys in each dict depends on the losses applied, see each loss' doc
Returns:
losses (dict): Dictionary of computed losses
"""
outputs_without_aux = {k: v for k, v in outputs.items() if k not in ('aux_outputs', 'enc_outputs')}
# Retrieve the matching between the outputs of the last layer and the targets
indices = self.matcher(outputs_without_aux, targets)
# Compute the average number of target boxes accross all nodes, for normalization purposes
num_boxes = sum(len(t["labels"]) for t in targets)
num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device)
if is_dist_avail_and_initialized():
torch.distributed.all_reduce(num_boxes)
num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item()
# Compute all the requested losses
losses = {}
for loss in self.losses:
kwargs = {}
losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes, **kwargs))
# In case of auxiliary losses, we repeat this process with the output of each intermediate layer.
if 'aux_outputs' in outputs:
for i, aux_outputs in enumerate(outputs['aux_outputs']):
indices = self.matcher(aux_outputs, targets)
for loss in self.losses:
kwargs = {}
if loss == 'labels':
# Logging is enabled only for the last layer
kwargs['log'] = False
l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes, **kwargs)
l_dict = {k + f'_{i}': v for k, v in l_dict.items()}
losses.update(l_dict)
return losses
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/deformable_detr/model/criterion.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Post processing for inference. """
import torch
from torch import nn
import os
from PIL import Image, ImageDraw
from nvidia_tao_pytorch.cv.deformable_detr.utils import box_ops
def get_key(label_map, val):
"""get_key for class label."""
for label in label_map:
if label['id'] == val:
return label['name']
return None
def check_key(my_dict, key):
"""check_key for classes."""
return bool(key in my_dict.keys())
def save_inference_prediction(predictions, output_dir, conf_threshold, label_map, color_map, is_internal=False):
"""Save the annotated images and label file to the output directory.
Args:
predictions (List): List of predictions from the model.
output_dir (str) : Output directory to save predictions.
conf_threshold (float) : Confidence Score Threshold value.
label_map(Dict): Dictonary for the class lables.
color_map(Dict): Dictonary for the color mapping to annotate the bounding box per class.
is_internal(Bool) : To save the inference results in format of output_dir/sequence/image_name.
"""
for pred in predictions:
image_name = pred['image_names']
image_size = pred['image_size']
pred_boxes = pred['boxes']
pred_labels = pred['labels']
pred_scores = pred['scores']
assert pred_boxes.shape[0] == pred_labels.shape[0] == pred_scores.shape[0]
path_list = image_name.split(os.sep)
basename, extension = os.path.splitext(path_list[-1])
if is_internal:
folder_name = path_list[-3]
output_label_root = os.path.join(output_dir, folder_name, 'labels')
output_label_name = os.path.join(output_label_root, basename + '.txt')
output_annotate_root = os.path.join(output_dir, folder_name, 'images_annotated')
output_image_name = os.path.join(output_annotate_root, basename + extension)
else:
output_label_root = os.path.join(output_dir, 'labels')
output_label_name = os.path.join(output_label_root, basename + '.txt')
output_annotate_root = os.path.join(output_dir, 'images_annotated')
output_image_name = os.path.join(output_annotate_root, basename + extension)
if not os.path.exists(output_label_root):
os.makedirs(output_label_root)
if not os.path.exists(output_annotate_root):
os.makedirs(output_annotate_root)
pil_input = Image.open(image_name)
pil_input = pil_input.resize((image_size[1], image_size[0]))
im1 = ImageDraw.Draw(pil_input)
with open(output_label_name, 'w') as f:
pred_boxes = pred_boxes.tolist()
scores = pred_scores.tolist()
labels = pred_labels.tolist()
for k, box in enumerate(pred_boxes):
class_key = get_key(label_map, labels[k])
if class_key is None:
continue
else:
class_name = class_key
# Conf score Thresholding
if scores[k] < conf_threshold:
continue
x1 = float(box[0])
y1 = float(box[1])
x2 = float(box[2])
y2 = float(box[3])
label_head = class_name + " 0.00 0 0.00 "
bbox_string = f"{x1:.3f} {y1:.3f} {x2:.3f} {y2:.3f}"
label_tail = f" 0.00 0.00 0.00 0.00 0.00 0.00 0.00 {scores[k]:.3f}\n"
label_string = label_head + bbox_string + label_tail
f.write(label_string)
if check_key(color_map, class_name):
im1.rectangle([int(x1), int(y1), int(x2), int(y2)], fill=None, outline=color_map[class_name], width=1)
pil_input.save(output_image_name)
f.closed
def threshold_predictions(predictions, conf_threshold):
"""Thresholding the predctions based on the given confidence score threshold.
Args:
predictions (List): List of predictions from the model.
conf_threshold (float) : Confidence Score Threshold value.
Returns:
filtered_predictions (List): List of thresholded predictions.
"""
filtered_predictions = []
for pred in predictions:
pred_boxes = pred['boxes']
pred_labels = pred['labels']
pred_scores = pred['scores']
assert pred_boxes.shape[0] == pred_labels.shape[0] == pred_scores.shape[0]
if len(pred_boxes) == 0:
continue
pred_boxes = pred_boxes.tolist()
scores = pred_scores.tolist()
labels = pred_labels.tolist()
for k, _ in enumerate(pred_boxes):
# Conf score Thresholding
if scores[k] < conf_threshold:
# remove from list
scores.pop(k)
labels.pop(k)
pred_boxes.pop(k)
filtered_predictions.extend(
[
{
'image_names': pred['image_names'],
'image_size': pred['image_size'],
'boxes': torch.Tensor(pred_boxes),
'scores': torch.Tensor(scores),
'labels': torch.Tensor(labels)
}
]
)
return filtered_predictions
class PostProcess(nn.Module):
"""This module converts the model's output into the format expected by the coco api."""
def __init__(self, num_select=100) -> None:
"""PostProcess constructor.
Args:
num_select (int): top K predictions to select from
"""
super().__init__()
self.num_select = num_select
@torch.no_grad()
def forward(self, outputs, target_sizes, image_names):
""" Perform the post-processing. Scale back the boxes to the original size.
Args:
outputs (dict[torch.Tensor]): raw outputs of the model
target_sizes (torch.Tensor): tensor of dimension [batch_size x 2] containing the size of each images of the batch.
For evaluation, this must be the original image size (before any data augmentation).
For visualization, this should be the image size after data augment, but before padding.
Returns:
results (List[dict]): final predictions compatible with COCOEval format.
"""
out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes']
assert len(out_logits) == len(target_sizes)
assert target_sizes.shape[1] == 2
prob = out_logits.sigmoid()
topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), self.num_select, dim=1)
scores = topk_values
topk_boxes = torch.div(topk_indexes, out_logits.shape[2], rounding_mode="floor")
labels = topk_indexes % out_logits.shape[2]
boxes = box_ops.box_cxcywh_to_xyxy(out_bbox)
boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))
# from relative [0, 1] to absolute [0, height] coordinates
img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)
boxes = boxes * scale_fct[:, None, :]
results = [{'scores': s, 'labels': l, 'boxes': b, 'image_names': n, 'image_size': i}
for s, l, b, n, i in zip(scores, labels, boxes, image_names, target_sizes)]
return results
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/deformable_detr/model/post_process.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Backbone GCViT model definition. """
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath
from nvidia_tao_pytorch.cv.backbone.gc_vit import (
_to_channel_first, WindowAttentionGlobal, Mlp,
WindowAttention, PatchEmbed, ReduceSize, GlobalQueryGen
)
def window_partition(x, window_size):
"""Window partions.
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows
def window_reverse(windows, window_size, H, W):
"""Window reversal.
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
# Casting to int leads to error
B = windows.shape[0] // (H * W // window_size // window_size)
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class GCViTBlock(nn.Module):
"""GCViT block based on: "Hatamizadeh et al.,
Global Context Vision Transformers <https://arxiv.org/abs/2206.09959>"
"""
def __init__(self,
dim,
num_heads,
window_size=7,
mlp_ratio=4.,
qkv_bias=True,
qk_scale=None,
drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
attention=WindowAttentionGlobal,
norm_layer=nn.LayerNorm,
layer_scale=None,
use_rel_pos_bias=False
):
"""Initialize GCViT Block.
Args:
dim (int): feature size dimension.
num_heads (int): number of heads in each stage.
window_size (int): window size in each stage.
mlp_ratio (float): MLP ratio.
qkv_bias (bool): bool argument for query, key, value learnable bias.
qk_scale (bool): bool argument to scaling query, key.
drop (float): dropout rate.
attn_drop (float): attention dropout rate.
drop_path (float): drop path rate.
act_layer (nn.Module): type of activation layer.
attention (nn.Module): type of attention layer
norm_layer (nn.Module): normalization layer.
layer_scale (float): layer scaling coefficient.
use_rel_pos_bias (bool): whether to use relative positional bias.
"""
super().__init__()
self.window_size = window_size
self.norm1 = norm_layer(dim)
self.attn = attention(dim,
num_heads=num_heads,
window_size=window_size,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop=attn_drop,
proj_drop=drop,
use_rel_pos_bias=use_rel_pos_bias
)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=drop)
self.layer_scale = False
if layer_scale is not None and type(layer_scale) in [int, float]:
self.layer_scale = True
self.gamma1 = nn.Parameter(layer_scale * torch.ones(dim), requires_grad=True)
self.gamma2 = nn.Parameter(layer_scale * torch.ones(dim), requires_grad=True)
else:
self.gamma1 = 1.0
self.gamma2 = 1.0
def forward(self, x, q_global):
"""Forward function."""
_, H, W, C = x.shape
shortcut = x
x = self.norm1(x)
pad_l = pad_t = 0
pad_r = (self.window_size - W % self.window_size) % self.window_size
pad_b = (self.window_size - H % self.window_size) % self.window_size
x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
_, Hp, Wp, _ = x.shape
shifted_x = x
x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
x_windows = x_windows.view(-1, self.window_size * self.window_size, C)
attn_windows = self.attn(x_windows, q_global)
shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C
x = shifted_x
if pad_r > 0 or pad_b > 0:
x = x[:, :H, :W, :].contiguous()
x = shortcut + self.drop_path(self.gamma1 * x)
x = x + self.drop_path(self.gamma2 * self.mlp(self.norm2(x)))
return x
class GCViTLayer(nn.Module):
"""GCViT layer based on: "Hatamizadeh et al.,
Global Context Vision Transformers <https://arxiv.org/abs/2206.09959>"
"""
def __init__(self,
dim,
depth,
input_resolution,
image_resolution,
num_heads,
window_size,
downsample=True,
mlp_ratio=4.,
qkv_bias=True,
qk_scale=None,
drop=0.,
attn_drop=0.,
drop_path=0.,
norm_layer=nn.LayerNorm,
layer_scale=None,
use_rel_pos_bias=False):
"""Initialize GCViT Layer.
Args:
dim (int): feature size dimension.
depths (int): number of layers in each stage.
input_resolution (int): input image resolution
image_resolution (int): input image resolution
num_heads (int): number of heads in each stage.
window_size (tuple): window size in each stage.
downsample (bool): bool argument to downsample.
mlp_ratio (float): MLP ratio.
qkv_bias (bool): bool argument for query, key, value learnable bias.
qk_scale (bool): bool argument to scaling query, key.
attn_drop (float): attention dropout rate.
drop (float): dropout rate.
drop_path (float): drop path rate.
norm_layer (nn.Module): normalization layer.
layer_scale (float): layer scaling coefficient.
use_rel_pos_bias (bool): whether to use relative positional bias.
"""
super().__init__()
self.blocks = nn.ModuleList([
GCViTBlock(dim=dim,
num_heads=num_heads,
window_size=window_size,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attention=WindowAttention if (i % 2 == 0) else WindowAttentionGlobal,
drop=drop,
attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer,
layer_scale=layer_scale,
use_rel_pos_bias=use_rel_pos_bias)
for i in range(depth)])
self.downsample = None if not downsample else ReduceSize(dim=dim, norm_layer=norm_layer)
self.q_global_gen = GlobalQueryGen(dim, input_resolution, image_resolution, window_size, num_heads)
def forward(self, x):
"""Foward function."""
q_global = self.q_global_gen(_to_channel_first(x))
for blk in self.blocks:
x = blk(x, q_global)
if self.downsample is None:
return x, x
return self.downsample(x), x
class GCViT(nn.Module):
"""GCViT model based on: "Hatamizadeh et al.,
Global Context Vision Transformers <https://arxiv.org/abs/2206.09959>"
"""
def __init__(self,
dim,
depths,
mlp_ratio,
num_heads,
window_size=(7, 7, 14, 7),
resolution=224,
drop_path_rate=0.2,
in_chans=3,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
norm_layer=nn.LayerNorm,
layer_scale=None,
out_indices=(0, 1, 2, 3),
frozen_stages=-1,
use_rel_pos_bias=True,
activation_checkpoint=True,
**kwargs):
"""Initialize GCViT model.
Args:
dim (int): feature size dimension.
depths (int): number of layers in each stage.
mlp_ratio (float): MLP ratio.
num_heads (int): number of heads in each stage.
window_size (tuple): window size in each stage.
resolution (int): input image resolution
drop_path_rate (float): drop path rate.
qkv_bias (bool): bool argument for query, key, value learnable bias.
qk_scale (bool): bool argument to scaling query, key.
drop_rate (float): dropout rate.
attn_drop_rate (float): attention dropout rate.
norm_layer (nn.Module): normalization layer.
layer_scale (float): layer scaling coefficient.
out_indices (list): list of block indices to return as feature.
frozen_stages (int): stage to freeze.
use_rel_pos_bias (bool): whether to use relative positional bias.
activation_checkpoint (bool): bool argument for activiation checkpointing.
"""
super().__init__()
self.num_levels = len(depths)
self.embed_dim = dim
self.num_features = [int(dim * 2 ** i) for i in range(self.num_levels)]
self.mlp_ratio = mlp_ratio
self.activation_checkpoint = activation_checkpoint
self.pos_drop = nn.Dropout(p=drop_rate)
self.patch_embed = PatchEmbed(in_chans=in_chans, dim=dim)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
self.levels = nn.ModuleList()
for i in range(len(depths)):
level = GCViTLayer(dim=int(dim * 2 ** i),
depth=depths[i],
num_heads=num_heads[i],
window_size=window_size[i],
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i]):sum(depths[:i + 1])],
norm_layer=norm_layer,
downsample=(i < len(depths) - 1),
layer_scale=layer_scale,
input_resolution=int(2 ** (-2 - i) * resolution),
image_resolution=resolution,
use_rel_pos_bias=use_rel_pos_bias)
self.levels.append(level)
# add a norm layer for each output
self.out_indices = out_indices
for i_layer in self.out_indices:
layer = norm_layer(self.num_features[i_layer])
layer_name = f'norm{i_layer}'
self.add_module(layer_name, layer)
self.frozen_stages = frozen_stages
for level in self.levels:
for block in level.blocks:
w_ = block.attn.window_size[0]
relative_position_bias_table_pre = block.attn.relative_position_bias_table
L1, nH1 = relative_position_bias_table_pre.shape
L2 = (2 * w_ - 1) * (2 * w_ - 1)
S1 = int(L1 ** 0.5)
S2 = int(L2 ** 0.5)
relative_position_bias_table_pretrained_resized = torch.nn.functional.interpolate(
relative_position_bias_table_pre.permute(1, 0).view(1, nH1, S1, S1), size=(S2, S2),
mode='bicubic')
relative_position_bias_table_pretrained_resized = relative_position_bias_table_pretrained_resized.view(nH1, L2).permute(1, 0)
block.attn.relative_position_bias_table = torch.nn.Parameter(relative_position_bias_table_pretrained_resized)
def _freeze_stages(self):
"""Freeze some blocks"""
if self.frozen_stages >= 0:
self.patch_embed.eval()
for param in self.patch_embed.parameters():
param.requires_grad = False
if self.frozen_stages >= 2:
for i in range(0, self.frozen_stages - 1):
m = self.network[i]
m.eval()
for param in m.parameters():
param.requires_grad = False
def train(self, mode=True):
"""Freeze some blocks during training"""
super(GCViT, self).train(mode)
self._freeze_stages()
def forward_embeddings(self, x):
"""Compute patch embedding"""
x = self.patch_embed(x)
return x
def forward_tokens(self, x):
"""Returns features with normalization"""
outs = {}
for idx, level in enumerate(self.levels):
# Disable activation checkpointing during ONNX export
if torch.onnx.is_in_onnx_export() or not self.activation_checkpoint:
x, xo = level(x)
else:
x, xo = checkpoint.checkpoint(level, x)
if idx in self.out_indices:
norm_layer = getattr(self, f'norm{idx}')
x_out = norm_layer(xo)
outs[f'p{idx}'] = x_out.permute(0, 3, 1, 2).contiguous()
return outs
def forward(self, x):
"""Forward function"""
x = self.forward_embeddings(x)
return self.forward_tokens(x)
def forward_features(self, x):
"""Extract features"""
x = self.forward_embeddings(x)
return self.forward_tokens(x)
def gc_vit_xxtiny(out_indices=[0, 1, 2, 3], activation_checkpoint=True, **kwargs):
"""GCViT-XXTiny model.
Args:
out_indices (list): List of block indices to return as feature.
activation_checkpoint (bool): flag to indicate if activation checkpoint is used.
"""
model = GCViT(depths=[2, 2, 6, 2],
num_heads=[2, 4, 8, 16],
window_size=[7, 7, 14, 7],
dim=64,
mlp_ratio=3,
drop_path_rate=0.2,
out_indices=out_indices,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
frozen_stages=-1,
activation_checkpoint=activation_checkpoint,
**kwargs)
return model
def gc_vit_xtiny(out_indices=[0, 1, 2, 3], activation_checkpoint=True, **kwargs):
"""GCViT-XTiny model.
Args:
out_indices (list): List of block indices to return as feature.
activation_checkpoint (bool): flag to indicate if activation checkpoint is used.
"""
model = GCViT(depths=[3, 4, 6, 5],
num_heads=[2, 4, 8, 16],
window_size=[7, 7, 14, 7],
dim=64,
mlp_ratio=3,
drop_path_rate=0.2,
out_indices=out_indices,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
frozen_stages=-1,
activation_checkpoint=activation_checkpoint,
**kwargs)
return model
def gc_vit_tiny(out_indices=[0, 1, 2, 3], activation_checkpoint=True, **kwargs):
"""GCViT-Tiny model.
Args:
out_indices (list): List of block indices to return as feature.
activation_checkpoint (bool): flag to indicate if activation checkpoint is used.
"""
model = GCViT(depths=[3, 4, 19, 5],
num_heads=[2, 4, 8, 16],
window_size=[7, 7, 14, 7],
dim=64,
mlp_ratio=3,
drop_path_rate=0.2,
out_indices=out_indices,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
frozen_stages=-1,
activation_checkpoint=activation_checkpoint,
**kwargs)
return model
def gc_vit_small(out_indices=[0, 1, 2, 3], activation_checkpoint=True, **kwargs):
"""GCViT-Small model.
Args:
out_indices (list): List of block indices to return as feature.
activation_checkpoint (bool): flag to indicate if activation checkpoint is used.
"""
model = GCViT(depths=[3, 4, 19, 5],
num_heads=[3, 6, 12, 24],
window_size=[7, 7, 14, 7],
dim=96,
mlp_ratio=2,
drop_path_rate=0.2,
out_indices=out_indices,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
frozen_stages=-1,
layer_scale=1e-5,
activation_checkpoint=activation_checkpoint,
**kwargs)
return model
def gc_vit_base(out_indices=[0, 1, 2, 3], activation_checkpoint=True, **kwargs):
"""GCViT-Base model.
Args:
out_indices (list): List of block indices to return as feature.
activation_checkpoint (bool): flag to indicate if activation checkpoint is used.
"""
model = GCViT(depths=[3, 4, 19, 5],
num_heads=[4, 8, 16, 32],
window_size=[7, 7, 14, 7],
dim=128,
mlp_ratio=2,
drop_path_rate=0.2,
out_indices=out_indices,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
frozen_stages=-1,
layer_scale=1e-5,
activation_checkpoint=activation_checkpoint,
**kwargs)
return model
def gc_vit_large(out_indices=[0, 1, 2, 3], activation_checkpoint=True, **kwargs):
"""GCViT-Large model.
Args:
out_indices (list): List of block indices to return as feature.
activation_checkpoint (bool): flag to indicate if activation checkpoint is used.
"""
model = GCViT(depths=[3, 4, 19, 5],
num_heads=[6, 12, 24, 48],
window_size=[7, 7, 14, 7],
dim=192,
mlp_ratio=2,
drop_path_rate=0.2,
out_indices=out_indices,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
frozen_stages=-1,
layer_scale=1e-5,
activation_checkpoint=activation_checkpoint,
**kwargs)
return model
def gc_vit_large_384(out_indices=[0, 1, 2, 3], activation_checkpoint=True, **kwargs):
"""GCViT-Large Input Resolution 384 model.
Args:
out_indices (list): List of block indices to return as feature.
activation_checkpoint (bool): flag to indicate if activation checkpoint is used.
"""
model = GCViT(depths=[3, 4, 19, 5],
num_heads=[6, 12, 24, 48],
window_size=[12, 12, 24, 12],
dim=192,
mlp_ratio=2,
drop_path_rate=0.2,
out_indices=out_indices,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
frozen_stages=-1,
layer_scale=1e-5,
activation_checkpoint=activation_checkpoint,
**kwargs)
return model
gc_vit_model_dict = {
'gc_vit_xxtiny': gc_vit_xxtiny,
'gc_vit_xtiny': gc_vit_xtiny,
'gc_vit_tiny': gc_vit_tiny,
'gc_vit_small': gc_vit_small,
'gc_vit_base': gc_vit_base,
'gc_vit_large': gc_vit_large,
'gc_vit_large_384': gc_vit_large_384,
}
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/deformable_detr/model/gc_vit.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The build nn module model."""
import torch.nn as nn
from nvidia_tao_pytorch.cv.deformable_detr.model.backbone import Backbone, Joiner
from nvidia_tao_pytorch.cv.deformable_detr.model.position_encoding import PositionEmbeddingSine, PositionEmbeddingSineExport
from nvidia_tao_pytorch.cv.deformable_detr.model.deformable_transformer import DeformableTransformer
from nvidia_tao_pytorch.cv.deformable_detr.model.deformable_detr_base import DeformableDETR
class DDModel(nn.Module):
"""Deformable DETR model module."""
def __init__(self,
num_classes=4,
hidden_dim=256,
pretrained_backbone_path=None,
backbone='resnet_50',
train_backbone=True,
num_feature_levels=4,
return_interm_indices=[1, 2, 3, 4],
nheads=8,
enc_layers=6,
dec_layers=6,
dim_feedforward=1024,
dec_n_points=4,
enc_n_points=4,
num_queries=300,
aux_loss=True,
with_box_refine=True,
dilation=False,
dropout_ratio=0.3,
export=False,
activation_checkpoint=True):
"""Initialize D-DETR Model.
Args:
num_classes (int): number of classes for the model.
hidden_dim (int): size of the hidden dimension.
pretrained_backbone_path (str): pretrained backbone path.
If not provided, train from scratch.
backbone (str): type of backbone architecture.
train_backbone (bool): whether to train backbone or not.
num_feature_levels (int): Number of levels to extract from the backbone feature maps.
return_interm_indices (list): indices of feature level to use.
nheads (int): number of heads.
enc_layers (int): number of encoder layers.
dec_layers (int): number of decoder layers.
dim_feedforward (int): dimension of the feedforward layer.
dec_n_points (int): number of reference points in the decoder.
enc_n_points (int): number of reference points in the encoder.
num_queries (int): number of queries to be used in D-DETR encoder-decoder.
aux_loss (bool): flag to indicate if auxiliary loss is used.
with_box_refine (bool): flag to indicate if iterative box refinement is used.
dilation (bool): flag to indicate if dilation is used (only for ResNet).
dropout_ratio (float): probability for the dropout layer.
export (bool): flag to indicate if the current model is being used for ONNX export.
activation_checkpoint (bool): flag to indicate if activation checkpointing is used.
"""
super(__class__, self).__init__() # pylint:disable=undefined-variable
# build positional encoding. only support PositionEmbeddingSine
if export:
position_embedding = PositionEmbeddingSineExport(hidden_dim // 2, normalize=True)
else:
position_embedding = PositionEmbeddingSine(hidden_dim // 2, normalize=True)
# build backbone
if num_feature_levels != len(return_interm_indices):
raise ValueError(f"num_feature_levels: {num_feature_levels} does not match the size of "
f"return_interm_indices: {return_interm_indices}")
# Index 4 is not part of the backbone but taken from index 3 with conv 3x3 stride 2
return_interm_indices = [r for r in return_interm_indices if r != 4]
backbone_only = Backbone(backbone,
pretrained_backbone_path,
train_backbone,
return_interm_indices,
dilation,
export,
activation_checkpoint)
# Keep joiner for backward compatibility
joined_backbone = Joiner(backbone_only)
# build tranformer
transformer = DeformableTransformer(d_model=hidden_dim,
nhead=nheads,
num_encoder_layers=enc_layers,
num_decoder_layers=dec_layers,
dim_feedforward=dim_feedforward,
dropout=dropout_ratio,
activation="relu",
return_intermediate_dec=True,
num_feature_levels=num_feature_levels,
dec_n_points=dec_n_points,
enc_n_points=enc_n_points,
export=export,
activation_checkpoint=activation_checkpoint)
# build deformable detr model
self.model = DeformableDETR(joined_backbone,
position_embedding,
transformer,
num_classes=num_classes,
num_queries=num_queries,
num_feature_levels=num_feature_levels,
aux_loss=aux_loss,
with_box_refine=with_box_refine,
export=export)
def forward(self, x):
"""model forward function"""
x = self.model(x)
return x
def build_model(experiment_config,
export=False):
""" Build deformable detr model according to configuration.
Args:
experiment_config (OmegaConf): experiment configuration.
export (bool): flag to indicate onnx export.
Returns:
model (nn.Module): D-DETR model.
"""
model_config = experiment_config.model
dataset_config = experiment_config.dataset
num_classes = dataset_config.num_classes
backbone = model_config.backbone
dropout_ratio = model_config.dropout_ratio
hidden_dim = model_config.hidden_dim
num_feature_levels = model_config.num_feature_levels
return_interm_indices = model_config.return_interm_indices
nheads = model_config.nheads
enc_layers = model_config.enc_layers
dec_layers = model_config.dec_layers
dim_feedforward = model_config.dim_feedforward
dec_n_points = model_config.dec_n_points
enc_n_points = model_config.enc_n_points
num_queries = model_config.num_queries
aux_loss = model_config.aux_loss
with_box_refine = model_config.with_box_refine
dilation = model_config.dilation
train_backbone = model_config.train_backbone
pretrained_backbone = model_config.pretrained_backbone_path
activation_checkpoint = experiment_config.train.activation_checkpoint
model = DDModel(num_classes=num_classes,
hidden_dim=hidden_dim,
pretrained_backbone_path=pretrained_backbone,
backbone=backbone,
train_backbone=train_backbone,
num_feature_levels=num_feature_levels,
return_interm_indices=return_interm_indices,
nheads=nheads,
enc_layers=enc_layers,
dec_layers=dec_layers,
dim_feedforward=dim_feedforward,
dec_n_points=dec_n_points,
enc_n_points=enc_n_points,
num_queries=num_queries,
aux_loss=aux_loss,
with_box_refine=with_box_refine,
dilation=dilation,
dropout_ratio=dropout_ratio,
export=export,
activation_checkpoint=activation_checkpoint)
return model
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/deformable_detr/model/build_nn_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" MSDeformAttnFunction modules. """
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import torch
from torch.autograd import Function
from torch.autograd.function import once_differentiable
import os
def load_ops(ops_dir, lib_name):
"""Load C++ Ops to PyTorch.
Args:
ops_dir (str): Path to the C++ src code directory.
lib_name (str): Name of the library to load.
"""
module_path = os.path.join(ops_dir, lib_name)
torch.ops.load_library(module_path)
class MSDeformAttnFunction(Function):
"""MSDeformAttnFunction"""
@staticmethod
def forward(ctx, value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, im2col_step):
"""Forward function.
Args:
value (torch.Tensor): The value has shape
(bs, num_keys, mum_heads, embed_dims//num_heads)
value_spatial_shapes (torch.Tensor): Spatial shape of
each feature map, has shape (num_levels, 2),
last dimension 2 represent (h, w)
sampling_locations (torch.Tensor): The location of sampling points,
has shape
(bs ,num_queries, num_heads, num_levels, num_points, 2),
the last dimension 2 represent (x, y).
attention_weights (torch.Tensor): The weight of sampling points
used when calculate the attention, has shape
(bs ,num_queries, num_heads, num_levels, num_points),
im2col_step (torch.Tensor): The step used in image to column.
Returns:
torch.Tensor: has shape (bs, num_queries, embed_dims)
"""
ctx.im2col_step = im2col_step
output = torch.ops.nvidia.MultiscaleDeformableAttnPlugin_TRT(
value, value_spatial_shapes, value_level_start_index,
sampling_locations, attention_weights)
ctx.save_for_backward(value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
"""Backward function.
Args:
grad_output (torch.Tensor): Gradient of output tensor of forward.
Returns:
tuple[Tensor]: Gradient of input tensors in forward.
"""
value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights = ctx.saved_tensors
grad_value, grad_sampling_loc, grad_attn_weight = \
torch.ops.nvidia.DMHA_backward(
value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, grad_output, ctx.im2col_step)
return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/deformable_detr/model/ops/functions.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Deformable DETR model ops module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/deformable_detr/model/ops/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" MSDeformAttn modules. """
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import warnings
import math
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn.init import xavier_uniform_, constant_
import os
from mmcv.ops.multi_scale_deform_attn import multi_scale_deformable_attn_pytorch
from nvidia_tao_pytorch.cv.deformable_detr.model.ops.functions import MSDeformAttnFunction, load_ops
def _is_power_of_2(n):
"""Check if n is power of 2.
Args:
n (int): input
Returns:
Boolean on if n is power of 2 or not.
"""
if (not isinstance(n, int)) or (n < 0):
raise ValueError(f"invalid input for _is_power_of_2: {n} (type: {type(n)})")
return (n & (n - 1) == 0) and n != 0
class MSDeformAttn(nn.Module):
"""Multi-Scale Deformable Attention Module."""
def __init__(self, d_model=256, n_levels=4, n_heads=8, n_points=4):
"""Multi-Scale Deformable Attention Constructor.
Args:
d_model (int): hidden dimension
n_levels (int): number of feature levels
n_heads (int): number of attention heads
n_points (int): number of sampling points per attention head per feature level
"""
super().__init__()
if d_model % n_heads != 0:
raise ValueError('d_model must be divisible by n_heads, but got {} and {}'.format(d_model, n_heads))
_d_per_head = d_model // n_heads
# you'd better set _d_per_head to a power of 2 which is more efficient in our CUDA implementation
if not _is_power_of_2(_d_per_head):
warnings.warn("You'd better set d_model in MSDeformAttn to make the dimension of each attention head a power of 2 "
"which is more efficient in our CUDA implementation.")
self.im2col_step = 64
self.d_model = d_model
self.n_levels = n_levels
self.n_heads = n_heads
self.n_points = n_points
self.sampling_offsets = nn.Linear(d_model, n_heads * n_levels * n_points * 2)
self.attention_weights = nn.Linear(d_model, n_heads * n_levels * n_points)
self.value_proj = nn.Linear(d_model, d_model)
self.output_proj = nn.Linear(d_model, d_model)
self._reset_parameters()
# load custom ops
ops_dir = os.path.dirname(os.path.abspath(__file__))
lib_name = "MultiScaleDeformableAttention.cpython-38-x86_64-linux-gnu.so"
load_ops(ops_dir, lib_name)
def _reset_parameters(self):
"""Reset parameters."""
constant_(self.sampling_offsets.weight.data, 0.)
thetas = torch.arange(self.n_heads, dtype=torch.float32) * (2.0 * math.pi / self.n_heads)
grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)
grid_init = (grid_init / grid_init.abs().max(-1, keepdim=True)[0]).view(self.n_heads, 1, 1, 2).repeat(1, self.n_levels, self.n_points, 1)
for i in range(self.n_points):
grid_init[:, :, i, :] *= i + 1
with torch.no_grad():
self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1))
constant_(self.attention_weights.weight.data, 0.)
constant_(self.attention_weights.bias.data, 0.)
xavier_uniform_(self.value_proj.weight.data)
constant_(self.value_proj.bias.data, 0.)
xavier_uniform_(self.output_proj.weight.data)
constant_(self.output_proj.bias.data, 0.)
def forward(self, query, reference_points, input_flatten, input_spatial_shapes, input_level_start_index, input_padding_mask=None, export=False):
"""Forward function.
Args:
query (torch.Tensor): (N, Length_{query}, C)
reference_points (torch.Tensor): (N, Length_{query}, n_levels, 2), range in [0, 1], top-left (0,0), bottom-right (1, 1), including padding area
or (N, Length_{query}, n_levels, 4), add additional (w, h) to form reference boxes
input_flatten (torch.Tensor): (N, sum_{l=0}^{L-1} H_l cdot W_l, C)
input_spatial_shapes (torch.Tensor): (n_levels, 2), [(H_0, W_0), (H_1, W_1), ..., (H_{L-1}, W_{L-1})]
input_level_start_index (torch.Tensor): (n_levels, ), [0, H_0*W_0, H_0*W_0+H_1*W_1, H_0*W_0+H_1*W_1+H_2*W_2, ..., H_0*W_0+H_1*W_1+...+H_{L-1}*W_{L-1}]
input_padding_mask (torch.Tensor): (N, sum_{l=0}^{L-1} H_l cdot W_l), True for padding elements, False for non-padding elements
Returns:
output (torch.Tensor): (N, Length_{query}, C)
"""
N, Len_q, _ = query.shape
N, Len_in, _ = input_flatten.shape
assert (input_spatial_shapes[:, 0] * input_spatial_shapes[:, 1]).sum() == Len_in
value = self.value_proj(input_flatten)
if input_padding_mask is not None:
value = value.masked_fill(input_padding_mask[..., None], float(0))
value = value.view(N, Len_in, self.n_heads, self.d_model // self.n_heads)
sampling_offsets = self.sampling_offsets(query).view(N, Len_q, self.n_heads, self.n_levels, self.n_points, 2)
attention_weights = self.attention_weights(query).view(N, Len_q, self.n_heads, self.n_levels * self.n_points)
attention_weights = F.softmax(attention_weights, -1).view(N, Len_q, self.n_heads, self.n_levels, self.n_points)
# N, Len_q, n_heads, n_levels, n_points, 2
if reference_points.shape[-1] == 2:
offset_normalizer = torch.stack([input_spatial_shapes[..., 1],
input_spatial_shapes[..., 0]], -1)
sampling_locations = reference_points[:, :, None, :, None, :] + sampling_offsets / offset_normalizer[None, None, None, :, None, :]
elif reference_points.shape[-1] == 4:
sampling_locations = reference_points[:, :, None, :, None, :2] + sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5
else:
raise ValueError(
'Last dim of reference_points must be 2 or 4, but get {} instead.'.format(reference_points.shape[-1]))
input_spatial_shapes = input_spatial_shapes.long()
input_level_start_index = input_level_start_index.long()
if export:
if torch.cuda.is_available() and value.is_cuda:
output = torch.ops.nvidia.MultiscaleDeformableAttnPlugin_TRT(
value, input_spatial_shapes, input_level_start_index,
sampling_locations, attention_weights)
else:
# CPU implementation of multi-scale deformable attention
# Note that this implementation uses GridSample operator which requires
# opset version >= 16 and is much slower in TensorRT
warnings.warn("PyTorch native implementation of multi-scale deformable attention is being used. "
"Expect slower inference performance until TensorRT further optimizes GridSample.")
output = multi_scale_deformable_attn_pytorch(
value, input_spatial_shapes, sampling_locations, attention_weights
)
else:
if torch.cuda.is_available() and value.is_cuda:
# For mixed precision training
if value.dtype == torch.float16:
output = MSDeformAttnFunction.apply(
value.to(torch.float32), input_spatial_shapes,
input_level_start_index, sampling_locations.to(torch.float32),
attention_weights, self.im2col_step)
output = output.to(torch.float16)
else:
output = MSDeformAttnFunction.apply(
value, input_spatial_shapes, input_level_start_index,
sampling_locations, attention_weights, self.im2col_step)
else:
# CPU implementation of multi-scale deformable attention
output = multi_scale_deformable_attn_pytorch(value, input_spatial_shapes, sampling_locations, attention_weights)
output = output.view(N, Len_q, self.d_model)
output = self.output_proj(output)
return output
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/deformable_detr/model/ops/modules.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Transformation for people transformer."""
import PIL
import torch
import random
import torchvision.transforms as T
import torchvision.transforms.functional as F
from nvidia_tao_pytorch.cv.deformable_detr.utils.box_ops import box_xyxy_to_cxcywh
def build_transforms(augmentation_config, experiment_config=None, dataset_mode='train'):
"""Build Augmentations.
Args:
augmentation_config (dict): augmentation configuration.
experiment_config (dict): experiment configuration.
dataset_mode (str): data mode (train, val, eval, infer).
Returns:
transforms (Compose): Final built transforms.
Raises:
If dataset_mode is set to other than given options (train, val, eval, infer), the code will raise the value error.
"""
input_mean = list(augmentation_config["input_mean"])
input_std = list(augmentation_config["input_std"])
scales = list(augmentation_config["scales"])
ranom_resize_max_size = augmentation_config["random_resize_max_size"]
test_random_size = augmentation_config["test_random_resize"]
train_random_sizes = list(augmentation_config["train_random_resize"])
train_random_crop_min = augmentation_config["train_random_crop_min"]
train_random_crop_max = augmentation_config["train_random_crop_max"]
flip_prob = min(1.0, augmentation_config["horizontal_flip_prob"])
fixed_padding = augmentation_config["fixed_padding"]
normalize = Compose([
ToTensor(),
Normalize(input_mean, input_std)
])
# Fixed Padding is applied to prevent memory leak
# It nees to be applied prior to normalize transform
# Padding has same effect as the collate_fn as only the original image is passed as the size
if dataset_mode == 'train':
if fixed_padding:
transforms = Compose([
RandomHorizontalFlip(flip_prob),
RandomSelect(
RandomResize(scales, max_size=ranom_resize_max_size),
Compose([
RandomResize(train_random_sizes),
RandomSizeCrop(train_random_crop_min, train_random_crop_max),
RandomResize(scales, max_size=ranom_resize_max_size),
])
),
normalize,
FixedPad(sorted(scales)[-1], ranom_resize_max_size),
])
else:
transforms = Compose([
RandomHorizontalFlip(flip_prob),
RandomSelect(
RandomResize(scales, max_size=ranom_resize_max_size),
Compose([
RandomResize(train_random_sizes),
RandomSizeCrop(train_random_crop_min, train_random_crop_max),
RandomResize(scales, max_size=ranom_resize_max_size),
])
),
normalize,
])
elif dataset_mode in ('val', 'eval', 'infer'):
if fixed_padding:
transforms = Compose([
RandomResize([test_random_size], max_size=ranom_resize_max_size),
normalize,
FixedPad(test_random_size, ranom_resize_max_size),
])
else:
transforms = Compose([
RandomResize([test_random_size], max_size=ranom_resize_max_size),
normalize,
])
else:
raise ValueError('There are only train, val, eval, and infer options in dataset_mode.')
return transforms
def crop(image, target, region):
"""Crop image.
Args:
image (PIL.Image): loaded image.
target (dict): loaded target.
region (tuple): region to crop.
Returns:
(cropped_image, taret): cropped image and processed target based on the cropped image
"""
cropped_image = F.crop(image, *region)
target = target.copy()
i, j, h, w = region
target["size"] = torch.tensor([h, w])
fields = ["labels"]
if "boxes" in target:
boxes = target["boxes"]
max_size = torch.as_tensor([w, h], dtype=torch.float32)
cropped_boxes = boxes - torch.as_tensor([j, i, j, i])
cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size)
cropped_boxes = cropped_boxes.clamp(min=0)
target["boxes"] = cropped_boxes.reshape(-1, 4)
fields.append("boxes")
# remove elements for which the boxes that have zero area
if "boxes" in target:
# favor boxes selection when defining which elements to keep
# this is compatible with previous implementation
if "boxes" in target:
cropped_boxes = target['boxes'].reshape(-1, 2, 2)
keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1)
for field in fields:
target[field] = target[field][keep]
return cropped_image, target
def hflip(image, target):
"""Horizontal Flip.
Args:
image (PIL.image): loaded image.
target (dict): loaded target.
Returns:
(flipped_image, taret): flipped image and processed target based on the flipped image.
"""
flipped_image = F.hflip(image)
w, _ = image.size
target = target.copy()
if "boxes" in target:
boxes = target["boxes"]
boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor([w, 0, w, 0])
target["boxes"] = boxes
return flipped_image, target
def resize(image, target, size, max_size=None):
"""Resize the image.
Args:
image (PIL.Image): loaded image.
target (dict): loaded target.
size (int / tuple): size to resize, size can be min_size (scalar) or (w, h) tuple.
max_size (int): maximum size to resize.
Returns:
(rescaled_image, taret): rescaled image and processed target based on the rescaled image.
"""
def get_size_with_aspect_ratio(image_size, size, max_size=None):
""" get size with aspect ratio """
w, h = image_size
if max_size is not None:
min_original_size = float(min((w, h)))
max_original_size = float(max((w, h)))
if max_original_size / min_original_size * size > max_size:
size = int(round(max_size * min_original_size / max_original_size))
if (w <= h and w == size) or (h <= w and h == size):
return (h, w)
if w < h:
ow = size
oh = int(size * h / w)
else:
oh = size
ow = int(size * w / h)
return (oh, ow)
def get_size(image_size, size, max_size=None):
""" get size to resize """
if isinstance(size, (list, tuple)):
return_size = size[::-1]
else:
return_size = get_size_with_aspect_ratio(image_size, size, max_size)
return return_size
size = get_size(image.size, size, max_size)
rescaled_image = F.resize(image, size)
if target is None:
return rescaled_image, None
ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size))
ratio_width, ratio_height = ratios
target = target.copy()
if "boxes" in target:
boxes = target["boxes"]
scaled_boxes = boxes * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height])
target["boxes"] = scaled_boxes
h, w = size
target["size"] = torch.tensor([h, w])
return rescaled_image, target
def pad(image, target, padding):
"""Padding the image on the bottom right corners.
Args:
image (PIL.Image): loaded image.
target (dict): loaded target.
padding (tuple): size to pad.
Returns:
(padded_image, taret): padded image and processed target based on the padded image.
"""
# zero padding
padded_image = F.pad(image, (0, 0, padding[0], padding[1]))
if target is None:
return padded_image, None
target = target.copy()
# We pass size as pre-padded image so that collate_fn can overwrite the
# transform-padded region too
if isinstance(image, torch.Tensor):
target["size"] = image.shape[1:]
else:
target["size"] = torch.tensor(image.size[::-1])
return padded_image, target
class RandomCrop(object):
"""Random Crop class."""
def __init__(self, size):
"""Initialize the RandomCrop Class.
Args:
size (tuple): size to perform random crop
"""
self.size = size
def __call__(self, img, target):
"""Call RandomCrop.
Args:
image (PIL.Image): Pillow Image.
target (dict): Annotations.
Returns:
image (PIL.Image): Cropped Image.
target (dict): Cropped Annotations.
"""
region = T.RandomCrop.get_params(img, self.size)
return crop(img, target, region)
class RandomSizeCrop(object):
"""Random Size Crop class."""
def __init__(self, min_size: int, max_size: int):
"""Initialize the RandomCrop Class.
Args:
min_size (int): minimum size to perform random crop.
max_size (int): maximum size to perform random crop.
"""
self.min_size = min_size
self.max_size = max_size
def __call__(self, img: PIL.Image.Image, target: dict):
"""Call RandomSizeCrop.
Args:
image (PIL.Image): Pillow Image.
target (dict): Annotations.
Returns:
image (PIL.Image): Cropped Image.
target (dict): Cropped Annotations.
"""
w = random.randint(self.min_size, min(img.width, self.max_size))
h = random.randint(self.min_size, min(img.height, self.max_size))
region = T.RandomCrop.get_params(img, [h, w])
return crop(img, target, region)
class CenterCrop(object):
"""Center Crop class."""
def __init__(self, size):
"""Initialize the CenterCrop Class.
Args:
size (tuple): size to perform center crop.
"""
self.size = size
def __call__(self, img, target):
"""Call CenterCrop.
Args:
image (PIL.Image): Pillow Image.
target (dict): Annotations.
Returns:
image (PIL.Image): Cropped Image.
target (dict): Cropped Annotations.
"""
image_width, image_height = img.size
crop_height, crop_width = self.size
crop_top = int(round((image_height - crop_height) / 2.))
crop_left = int(round((image_width - crop_width) / 2.))
return crop(img, target, (crop_top, crop_left, crop_height, crop_width))
class RandomHorizontalFlip(object):
"""Random Horizontal Flip class"""
def __init__(self, p=0.5):
"""Initialize the RandomHorizontalFlip Class.
Args:
p (float): probability to perform random horizontal flip.
"""
self.p = p
def __call__(self, img, target):
"""Call RandomHorizontalFlip.
Args:
image (PIL.Image): Pillow Image.
target (dict): Annotations.
Returns:
image (PIL.Image): Flipped Image.
target (dict): Flipped Annotations.
"""
if random.random() < self.p:
return hflip(img, target)
return img, target
class RandomResize(object):
"""Random Resize class."""
def __init__(self, sizes, max_size=None):
"""Initialize the RandomResize Class.
Args:
size (list): size to perform random resize.
max_size (int): maximum size to perform random resize.
"""
assert isinstance(sizes, (list, tuple))
self.sizes = sizes
self.max_size = max_size
def __call__(self, img, target=None):
"""Call RandomResize.
Args:
image (PIL.Image): Pillow Image.
target (dict): Annotations.
Returns:
image (PIL.Image): Resized Image.
target (dict): Resized Annotations.
"""
size = random.choice(self.sizes)
return resize(img, target, size, self.max_size)
class FixedResize(object):
"""Fixed Size Resize class."""
def __init__(self, sizes):
"""Initialize the FixedResize Class.
Args:
sizes (list): size to perform random resize.
"""
assert isinstance(sizes, (list, tuple))
self.sizes = sizes
def __call__(self, img, target=None):
"""Call FixedResize.
Args:
image (PIL.Image): Pillow Image.
target (dict): Annotations.
Returns:
image (PIL.Image): Resized Image.
target (dict): Resized Annotations.
"""
return resize(img, target, self.sizes, None)
class RandomPad(object):
"""Random Pad class."""
def __init__(self, max_pad):
"""Initialize the RandomPad Class.
Args:
max_pad (int): max padding size.
"""
self.max_pad = max_pad
def __call__(self, img, target):
"""Call RandomPad.
Args:
image (PIL.Image): Pillow Image.
target (dict): Annotations.
Returns:
image (PIL.Image): Padded Image.
target (dict): Padded Annotations.
"""
pad_x = random.randint(0, self.max_pad)
pad_y = random.randint(0, self.max_pad)
return pad(img, target, (pad_x, pad_y))
class FixedPad(object):
"""Fixed Pad class."""
def __init__(self, target_min, target_max):
"""Initialize the FixedPad Class.
Args:
target_width (int): padding size.
target_height (int): padding size.
"""
self.target_min = target_min
self.target_max = target_max
def __call__(self, img, target):
"""Call FixedPad.
Args:
image (PIL.Image): Pillow Image.
target (dict): Annotations.
Returns:
image (PIL.Image): Padded Image.
target (dict): Padded Annotations.
"""
height, width = target['size']
if height > width:
pad_x = self.target_min - width
pad_y = self.target_max - height
else:
pad_x = self.target_max - width
pad_y = self.target_min - height
tmp = pad(img, target, (pad_x, pad_y))
return tmp
class RandomSelect(object):
"""
Randomly selects between transforms1 and transforms2,
with probability p for transforms1 and (1 - p) for transforms2.
"""
def __init__(self, transforms1, transforms2, p=0.5):
"""Initialize the RandomSelect Class.
Args:
transforms1 (object): given transform to select.
transforms2 (object): given transform to select.
p (float): probability to select between transform 1 and 2.
"""
self.transforms1 = transforms1
self.transforms2 = transforms2
self.p = p
def __call__(self, img, target):
"""Call RandomSelect.
Args:
image (PIL.Image): Pillow Image.
target (dict): Annotations.
Returns:
image (PIL.Image): Image.
target (dict): Annotations.
"""
if random.random() < self.p:
return self.transforms1(img, target)
return self.transforms2(img, target)
class ToTensor(object):
"""Convert PIL.Image to torch.Tensor"""
def __call__(self, img, target):
"""Call ToTensor.
Args:
image (PIL.Image): Pillow Image.
target (dict): Annotations.
Returns:
image (torch.Tensor): Image Tensor.
target (dict): Annotations.
"""
return F.to_tensor(img), target
class RandomErasing(object):
"""Random Erasing class."""
def __init__(self, *args, **kwargs):
"""Initialize the RandomErasing Class."""
self.eraser = T.RandomErasing(*args, **kwargs)
def __call__(self, img, target):
"""Call RandomErasing.
Args:
image (PIL.Image): Pillow Image.
target (dict): Annotations.
Returns:
image (PIL.Image): Randomly erased Image.
target (dict): Randomly erased Annotations.
"""
return self.eraser(img), target
class Normalize(object):
""" Normalize class """
def __init__(self, mean, std):
"""Initialize the Normalize Class.
Args:
mean (list): mean value to normalize.
std (list): standard deviation value to normalize.
"""
self.mean = mean
self.std = std
def __call__(self, image, target=None):
"""Call Normalize.
Args:
image (PIL.Image): Pillow Image.
target (dict): Annotations.
Returns:
image (torch.Tensor): Normalized Tensor.
target (dict): Normalized Annotations.
"""
image = F.normalize(image, mean=self.mean, std=self.std)
if target is None:
return image, None
target = target.copy()
h, w = image.shape[-2:]
if "boxes" in target:
boxes = target["boxes"]
boxes = box_xyxy_to_cxcywh(boxes)
boxes = boxes / torch.tensor([w, h, w, h], dtype=torch.float32)
target["boxes"] = boxes
return image, target
class Compose(object):
"""Compose class."""
def __init__(self, transforms):
"""Initialize the Compose Class.
Args:
transforms (list): transform list to compose.
"""
self.transforms = transforms
def __call__(self, image, target):
"""Call Compose.
Args:
image (torch.Tensor): Image in Tensor.
target (dict): Annotations.
Returns:
image (torch.Tensor): Composed Tensor.
target (dict): Composed Annotations.
"""
for t in self.transforms:
image, target = t(image, target)
return image, target
def __repr__(self):
""" repr """
format_string = self.__class__.__name__ + "("
for t in self.transforms:
format_string += "\n"
format_string += " {0}".format(t)
format_string += "\n)"
return format_string
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/deformable_detr/dataloader/transforms.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Object Detection dataset."""
import torch
from typing import Optional
from torch.utils.data import DataLoader
import pytorch_lightning as pl
from nvidia_tao_pytorch.cv.deformable_detr.dataloader.transforms import build_transforms
from nvidia_tao_pytorch.cv.deformable_detr.utils.misc import collate_fn, is_dist_avail_and_initialized
from nvidia_tao_pytorch.cv.deformable_detr.dataloader.od_dataset import ODPredictDataset, ODDataset
from nvidia_tao_pytorch.cv.deformable_detr.dataloader.serialized_dataset import build_shm_dataset
from nvidia_tao_pytorch.cv.deformable_detr.dataloader.sampler import UniformSampler, NonUniformSampler, DefaultSampler
from nvidia_tao_pytorch.cv.deformable_detr.utils.misc import get_world_size, get_global_rank
class ODDataModule(pl.LightningDataModule):
"""Lightning DataModule for Object Detection."""
def __init__(self, dataset_config):
""" Lightning DataModule Initialization.
Args:
dataset_config (OmegaConf): dataset configuration
"""
super().__init__()
self.dataset_config = dataset_config
self.augmentation_config = dataset_config["augmentation"]
self.batch_size = dataset_config["batch_size"]
self.num_workers = dataset_config["workers"]
self.num_classes = dataset_config["num_classes"]
self.pin_memory = dataset_config["pin_memory"]
def setup(self, stage: Optional[str] = None):
""" Loads in data from file and prepares PyTorch tensor datasets for each split (train, val, test).
Args:
stage (str): stage options from fit, test, predict or None.
"""
is_distributed = is_dist_avail_and_initialized()
if stage in ('fit', None):
# check pytorch distributed is set or not
train_data_sources = self.dataset_config["train_data_sources"]
train_transform = build_transforms(self.augmentation_config, dataset_mode='train')
val_data_sources = self.dataset_config["val_data_sources"]
val_transform = build_transforms(self.augmentation_config, dataset_mode='val')
if self.dataset_config["dataset_type"] == "serialized":
self.train_dataset = build_shm_dataset(train_data_sources, train_transform)
if is_distributed:
self.train_sampler = torch.utils.data.distributed.DistributedSampler(self.train_dataset, shuffle=True)
else:
self.train_sampler = torch.utils.data.RandomSampler(self.train_dataset)
# prep validation
self.val_dataset = build_shm_dataset(val_data_sources, val_transform)
if is_distributed:
self.val_sampler = torch.utils.data.distributed.DistributedSampler(self.val_dataset, shuffle=False)
else:
self.val_sampler = torch.utils.data.SequentialSampler(self.val_dataset)
else:
data_sampler = self.dataset_config["train_sampler"]
self.val_dataset = DefaultSampler(val_data_sources, is_distributed, transforms=val_transform).build_data_source()
if is_distributed: # distributed training
if data_sampler == "default_sampler":
self.train_dataset, self.train_sampler = DefaultSampler(train_data_sources, is_distributed, transforms=train_transform).get_sampler()
self.val_sampler = torch.utils.data.distributed.DistributedSampler(self.val_dataset, shuffle=False)
elif data_sampler == "non_uniform_sampler":
# manual partial data loading for each GPU. Use this for large dataset which can't fit into the memory, sampler is Default sampler
global_rank = get_global_rank()
num_gpus = get_world_size()
self.train_dataset, self.train_sampler = NonUniformSampler(train_data_sources, transforms=train_transform).get_sampler(global_rank, num_gpus)
self.val_sampler = torch.utils.data.distributed.DistributedSampler(self.val_dataset, shuffle=False)
elif data_sampler == "uniform_sampler":
# manual partial data loading for each GPU. Use this for large dataset which can't fit into the memory, sampler is Uniform Distribution Sampler
global_rank = get_global_rank()
num_gpus = get_world_size()
self.train_dataset, self.train_sampler = UniformSampler(train_data_sources, transforms=train_transform).get_sampler(global_rank, num_gpus)
self.val_sampler = torch.utils.data.distributed.DistributedSampler(self.val_dataset, shuffle=False)
else:
raise NotImplementedError("Sampler {} is not implemented. Use DefaultSampler or UniformSampler".format(data_sampler))
else: # Non-distributed learning
if data_sampler == "default_sampler":
self.train_dataset, self.train_sampler = DefaultSampler(train_data_sources, is_distributed, transforms=train_transform).get_sampler()
self.val_sampler = torch.utils.data.SequentialSampler(self.val_dataset)
else:
raise NotImplementedError("Sampler {} is not implemented for this type of input data. Use DefaultSampler in data_sampler".format(data_sampler))
# Assign test dataset for use in dataloader
if stage in ('test', None):
test_data_sources = self.dataset_config["test_data_sources"]
self.test_root = test_data_sources.get("image_dir", "")
test_json = test_data_sources.get("json_file", "")
test_transforms = build_transforms(self.augmentation_config, dataset_mode='eval')
if self.dataset_config["dataset_type"] == "serialized":
self.test_dataset = build_shm_dataset(test_data_sources, transforms=test_transforms)
else:
self.test_dataset = ODDataset(dataset_dir=self.test_root, json_file=test_json, transforms=test_transforms)
# Assign predict dataset for use in dataloader
if stage in ('predict', None):
pred_data_sources = self.dataset_config["infer_data_sources"]
pred_list = pred_data_sources.get("image_dir", [])
if isinstance(pred_list, str):
pred_list = [pred_list]
classmap = pred_data_sources.get("classmap", "")
self.pred_dataset = ODPredictDataset(pred_list, classmap, transforms=build_transforms(self.augmentation_config, dataset_mode='infer'))
def train_dataloader(self):
"""Build the dataloader for training.
Returns:
train_loader: PyTorch DataLoader used for training.
"""
train_loader = DataLoader(
self.train_dataset,
num_workers=self.num_workers,
pin_memory=self.pin_memory,
collate_fn=collate_fn,
batch_sampler=torch.utils.data.BatchSampler(self.train_sampler, self.batch_size, drop_last=True)
)
return train_loader
def val_dataloader(self):
"""Build the dataloader for validation.
Returns:
PyTorch DataLoader used for validation.
"""
return DataLoader(
self.val_dataset,
num_workers=self.num_workers,
batch_size=self.batch_size,
pin_memory=self.pin_memory,
drop_last=False,
collate_fn=collate_fn,
sampler=self.val_sampler)
def test_dataloader(self):
"""Build the dataloader for evaluation.
Returns:
PyTorch DataLoader used for evaluation.
"""
return DataLoader(
self.test_dataset,
num_workers=self.num_workers,
batch_size=self.batch_size,
shuffle=False,
pin_memory=self.pin_memory,
drop_last=False,
collate_fn=collate_fn)
def predict_dataloader(self):
"""Build the dataloader for inference.
Returns:
PyTorch DataLoader used for inference.
"""
return DataLoader(
self.pred_dataset,
num_workers=self.num_workers,
batch_size=self.batch_size,
shuffle=False,
pin_memory=self.pin_memory,
drop_last=False,
collate_fn=collate_fn)
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/deformable_detr/dataloader/od_data_module.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Deformable DETR dataloader module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/deformable_detr/dataloader/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Object Detection Dataset Class and Related Functions """
import torch
from torch.utils.data.dataset import Dataset
import os
import json
import glob
import numpy as np
from PIL import Image
from typing import Any, Tuple, List
from nvidia_tao_pytorch.cv.deformable_detr.utils.coco import COCO
# List of valid image extensions
VALID_IMAGE_EXTENSIONS = (".jpg", ".jpeg", ".png", ".bmp", ".JPEG", ".JPG", ".PNG")
class ODDataset(Dataset):
"""Base Object Detection Dataset Class."""
def __init__(self, json_file: str = None, dataset_dir: str = None, transforms=None):
"""Initialize the Object Detetion Dataset Class.
Note that multiple loading of COCO type JSON files can lead to system memory OOM.
In such case, use SerializedDatasetFromList.
Args:
json_file (str): json_file name to load the data.
dataset_dir (str): dataset directory.
transforms: augmentations to apply.
"""
self.dataset_dir = dataset_dir
self.transforms = transforms
with open(json_file, 'r') as f:
json_data = json.load(f)
self.coco = COCO(json_data)
self.ids = list(sorted(self.coco.imgs.keys()))
self.label_map = self.coco.dataset['categories']
def _load_image(self, img_id: int) -> Image.Image:
"""Load image given image id.
Args:
img_id (int): image id to load.
Returns:
Loaded PIL Image.
"""
path = self.coco.loadImgs(img_id)[0]["file_name"]
if not self.dataset_dir == "":
img_path = os.path.join(self.dataset_dir, path)
return_output = (Image.open(img_path).convert("RGB"), img_path)
else:
return_output = (Image.open(path).convert("RGB"), path)
return return_output
def _load_target(self, img_id: int) -> List[Any]:
"""Load target (annotation) given image id.
Args:
img_id (int): image id to load.
Returns:
Loaded COCO annotation list
"""
return self.coco.loadAnns(self.coco.getAnnIds(img_id))
def _process_image_target(self, image: Image.Image, target: List[Any], img_id: int) -> Tuple[Any, Any]:
"""Process the image and target given image id.
Args:
image (PIL.Image): Loaded image given img_id.
target (list): Loaded annotation given img_id.
img_id (int): image id to load.
Returns:
(image, target): pre-processed image and target for the model.
"""
width, height = image.size
image_id = torch.tensor([img_id])
boxes = [obj["bbox"] for obj in target]
# guard against no boxes via resizing
boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4)
boxes[:, 2:] += boxes[:, :2]
boxes[:, 0::2].clamp_(min=0, max=width)
boxes[:, 1::2].clamp_(min=0, max=height)
classes = [obj["category_id"] for obj in target]
classes = torch.tensor(classes, dtype=torch.int64)
keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
boxes = boxes[keep]
classes = classes[keep]
area = torch.tensor([obj["area"] for obj in target])
iscrowd = torch.tensor([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in target])
target = {}
target["boxes"] = boxes
target["labels"] = classes
target["image_id"] = image_id
# for conversion to coco api
target["area"] = area[keep]
target["iscrowd"] = iscrowd[keep]
target["orig_size"] = torch.as_tensor([int(height), int(width)])
target["size"] = torch.as_tensor([int(height), int(width)])
return image, target
def __getitem__(self, index: int) -> Tuple[Any, Any, Any]:
"""Get image, target, image_path given index.
Args:
index (int): index of the image id to load.
Returns:
(image, target, image_path): pre-processed image, target and image_path for the model.
"""
img_id = self.ids[index]
image, image_path = self._load_image(img_id)
target = self._load_target(img_id)
image, target = self._process_image_target(image, target, img_id)
if self.transforms is not None:
image, target = self.transforms(image, target)
return image, target, image_path
def __len__(self) -> int:
"""__len__"""
return len(self.ids)
class ODPredictDataset(Dataset):
"""Base Object Detection Predict Dataset Class."""
def __init__(self, dataset_list: List[Any], label_map_path: str, transforms=None):
"""Initialize the Object Detetion Dataset Class for inference.
Unlike ODDataset, this class does not require COCO JSON file.
Args:
dataset_list (list): list of dataset directory.
label_map_path (str): label mapping path.
transforms: augmentations to apply.
Raises:
FileNotFoundErorr: If provided classmap, sequence, or image extension does not exist.
"""
self.dataset_list = dataset_list
self.transforms = transforms
if not os.path.exists(label_map_path):
raise FileNotFoundError(f"Provided class map {label_map_path} does not exist!")
# Load classmap and reformat it to COCO categories format
with open(label_map_path, "r") as f:
classmap = [line.rstrip() for line in f.readlines()]
self.label_map = [{"id": i + 1, "name": c} for i, c in enumerate(classmap)]
self.ids = []
for seq in dataset_list:
if not os.path.exists(seq):
raise FileNotFoundError(f"Provided inference directory {seq} does not exist!")
for ext in VALID_IMAGE_EXTENSIONS:
self.ids.extend(glob.glob(seq + f"/*{ext}"))
if len(self.ids) == 0:
raise FileNotFoundError(f"No valid image with extensions {VALID_IMAGE_EXTENSIONS} found in the provided directories")
def _load_image(self, img_path: int) -> Image.Image:
"""Load image given image path.
Args:
img_path (str): image path to load.
Returns:
Loaded PIL.Image.
"""
return_output = (Image.open(img_path).convert("RGB"), img_path)
return return_output
def __getitem__(self, index: int) -> Tuple[Any, Any, Any]:
"""Get image, target, image_path given index.
Args:
index (int): index of the image id to load.
Returns:
(image, target, image_path): pre-processed image, target and image_path for the model.
"""
img_path = self.ids[index]
image, image_path = self._load_image(img_path)
width, height = image.size
target = {}
target["orig_size"] = torch.as_tensor([int(height), int(width)])
target["size"] = torch.as_tensor([int(height), int(width)])
if self.transforms is not None:
image, target = self.transforms(image, target)
return image, target, image_path
def __len__(self) -> int:
"""__len__"""
return len(self.ids)
def CoCoDataMerge(coco_list):
""" Concatenate COCO Dataset.
We assume that the sharded JSON files were generated using `deformable_detr convert`
where the ids of the sharded JSON are ensured to be unique.
We do not perform ID deduplication for faster data loading.
Args:
coco_list (list): list of COCO Datasets.
Returns:
merged_coco_data (dict) : Merged dictionary in COCO format.
"""
merged_coco_data = {"images": [], "annotations": [], "categories": None}
for idx, coco in enumerate(coco_list):
# Merge all the annotations to single dict
merged_coco_data["images"].extend(coco.dataset["images"])
merged_coco_data["annotations"].extend(coco.dataset["annotations"])
if idx == 0:
merged_coco_data["categories"] = coco.dataset["categories"]
return merged_coco_data
class ConcateODDataset(torch.utils.data.ConcatDataset):
""" Concatenate ODDataset """
def __init__(self, datasets):
"""Initialize the ConcateODDataset Class.
Args:
datasets (iterable): List of datasets to be concatenated.
"""
super(ConcateODDataset, self).__init__(datasets)
self.datasets = list(datasets)
assert len(datasets) > 0, 'datasets should not be an empty iterable'
self.cum_sizes = np.cumsum([len(x) for x in self.datasets])
coco_list = []
for dataset in datasets:
coco_list.append(dataset.coco)
self.coco = COCO(CoCoDataMerge(coco_list))
self.label_map = self.coco.dataset['categories']
def __len__(self) -> int:
"""Returns length of the concatenated dataset."""
return self.cum_sizes[-1]
def __getitem__(self, idx):
"""Get sub-dataset from ConcateODDataset.
Args:
idx (int): index to retrieve.
Returns:
Sub dataset from the list.
"""
super(ConcateODDataset, self).__getitem__(idx)
dataset_index = self.cum_sizes.searchsorted(idx, 'right')
if dataset_index == 0:
dataset_idx = idx
else:
dataset_idx = idx - self.cum_sizes[dataset_index - 1]
return self.datasets[dataset_index][dataset_idx]
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/deformable_detr/dataloader/od_dataset.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
"""
List serialization code adopted from
https://github.com/facebookresearch/detectron2/blob/main/detectron2/data/common.py
"""
import os
import contextlib
import io
import pickle # nosec B403
import torch
from typing import Dict, List, Any
from pycocotools.coco import COCO
from PIL import Image
import numpy as np
from nvidia_tao_pytorch.pointcloud.pointpillars.pcdet.utils.safe_unpickler import SafeUnpickler
from nvidia_tao_pytorch.cv.deformable_detr.utils.data_source_config import build_data_source_lists
from nvidia_tao_pytorch.cv.deformable_detr.utils.misc import get_global_rank
def load_coco_json(json_file: str, image_root: str) -> List[Dict]:
"""Load COCO json file and return list of dictionaries.
Referenced from detectron2: https://detectron2.readthedocs.io/en/latest/_modules/detectron2/data/datasets/coco.html
Args:
json_file (str): Path to the JSON annotation file.
image_root (str): Path to root directory of images from the annotations.
Returns:
List of COCO annotation dicts.
"""
with contextlib.redirect_stdout(io.StringIO()):
coco_api = COCO(json_file)
img_ids = sorted(coco_api.imgs.keys())
# imgs is a list of dicts, each looks something like:
# {'license': 4,
# 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg',
# 'file_name': 'COCO_val2014_000000001268.jpg',
# 'height': 427,
# 'width': 640,
# 'date_captured': '2013-11-17 05:57:24',
# 'id': 1268}
imgs = coco_api.loadImgs(img_ids)
# anns is a list[list[dict]], where each dict is an annotation
# record for an object. The inner list enumerates the objects in an image
# and the outer list enumerates over images. Example of anns[0]:
# [{'segmentation': [[192.81,
# 247.09,
# ...
# 219.03,
# 249.06]],
# 'area': 1035.749,
# 'iscrowd': 0,
# 'image_id': 1268,
# 'bbox': [192.81, 224.8, 74.73, 33.43],
# 'category_id': 16,
# 'id': 42986},
# ...]
anns = [coco_api.imgToAnns[img_id] for img_id in img_ids]
imgs_anns = list(zip(imgs, anns))
dataset_dicts = []
ann_keys = ["iscrowd", "bbox", "keypoints", "category_id", "area"]
for (img_dict, anno_dict_list) in imgs_anns:
record = {}
record["file_name"] = os.path.join(image_root, img_dict["file_name"])
record["height"] = img_dict["height"]
record["width"] = img_dict["width"]
image_id = record["image_id"] = img_dict["id"]
objs = []
for anno in anno_dict_list:
# Check that the image_id in this annotation is the same as
# the image_id we're looking at.
# This fails only when the data parsing logic or the annotation file is buggy.
# The original COCO valminusminival2014 & minival2014 annotation files
# actually contains bugs that, together with certain ways of using COCO API,
# can trigger this assertion.
assert anno["image_id"] == image_id
assert anno.get("ignore", 0) == 0, '"ignore" in COCO json file is not supported.'
obj = {key: anno[key] for key in ann_keys if key in anno}
if "bbox" in obj and len(obj["bbox"]) == 0:
raise ValueError(
f"One annotation of image {image_id} contains empty 'bbox' value! "
"This json does not have valid COCO format."
)
objs.append(obj)
record["annotations"] = objs
dataset_dicts.append(record)
return dataset_dicts
def build_shm_dataset(data_sources, transforms):
"""Preload the COCO ann lists to prevent memory leakage from Python.
Args:
data_sources (str): list of different data sources.
transforms (dict): augmentations to apply.
"""
# grab all the json files and concate them into one single dataset
data_source_list = build_data_source_lists(data_sources)
dataset_list = []
for data_source in data_source_list:
image_dir = data_source.image_dir
for _json_file in data_source.dataset_files:
dl = load_coco_json(_json_file, image_root=image_dir)
dataset_list.extend(dl)
dataset = SerializedDatasetFromList(dataset_list, transforms=transforms)
return dataset
class SerializedDatasetFromList(torch.utils.data.Dataset):
"""
Hold memory using serialized objects so that data loader workers can use
shared RAM from master process instead of making a copy in each subprocess.
"""
def __init__(self, lst, transforms=None):
"""Initialize the Serialized Shared Memory COCO-based Dataset.
Reference from this blog: https://ppwwyyxx.com/blog/2022/Demystify-RAM-Usage-in-Multiprocess-DataLoader/
Args:
lst (list): list of dataset dicts.
transforms (dict): augmentations to apply.
"""
def _serialize(data):
buffer = pickle.dumps(data, protocol=4)
return np.frombuffer(buffer, dtype=np.uint8)
if get_global_rank() == 0:
print(f"Serializing {len(lst)} elements to byte tensors and concatenating them all ...")
self._lst = [_serialize(x) for x in lst]
self._addr = np.asarray([len(x) for x in self._lst], dtype=np.int64)
self._addr = np.cumsum(self._addr)
self._lst = np.concatenate(self._lst)
if get_global_rank() == 0:
print(f"Serialized dataset takes {len(self._lst) / 1024 ** 2:.2f} MiB")
self._addr = torch.from_numpy(self._addr)
self._lst = torch.from_numpy(self._lst)
self.transforms = transforms
def __len__(self):
"""__len__"""
return len(self._addr)
def _process_image_target(self, image: Image.Image, target: List[Any], img_id: int):
"""Process the image and target given image id.
Args:
image (PIL.Image): Loaded Pillow Image .
target (list): Loaded annotation given img_id.
img_id (int): image id to load.
Returns:
target (dict): pre-processed target.
"""
width, height = image.size
image_id = torch.tensor([img_id])
boxes = [obj["bbox"] for obj in target]
# guard against no boxes via resizing
boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4)
boxes[:, 2:] += boxes[:, :2]
boxes[:, 0::2].clamp_(min=0, max=width)
boxes[:, 1::2].clamp_(min=0, max=height)
classes = [obj["category_id"] for obj in target]
classes = torch.tensor(classes, dtype=torch.int64)
keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
boxes = boxes[keep]
classes = classes[keep]
area = torch.tensor([obj["area"] for obj in target])
iscrowd = torch.tensor([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in target])
target = {}
target["boxes"] = boxes
target["labels"] = classes
target["image_id"] = image_id
# for conversion to coco api
target["area"] = area[keep]
target["iscrowd"] = iscrowd[keep]
target["orig_size"] = torch.as_tensor([int(height), int(width)])
target["size"] = torch.as_tensor([int(height), int(width)])
return target
def __getitem__(self, idx: int):
"""Get image, target, image_path given index,
Args:
index (int): index of the image id to load
Returns:
(image, target, image_path): pre-processed image, target and image_path for the model
"""
start_addr = 0 if idx == 0 else self._addr[idx - 1].item()
end_addr = self._addr[idx].item()
data_bytes = memoryview(self._lst[start_addr:end_addr].numpy())
# Secure pickle unloading
record = SafeUnpickler(data_bytes, SerializedDatasetFromList).load()
image_path = record['file_name']
image = Image.open(image_path).convert("RGB")
img_id = record["image_id"]
target = record['annotations']
target = self._process_image_target(image, target, img_id)
if self.transforms is not None:
image, target = self.transforms(image, target)
return image, target, image_path
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/deformable_detr/dataloader/serialized_dataset.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.