python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perform continuous RetinaNet training on a tfrecords dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
from math import ceil
from multiprocessing import cpu_count
import os
from google.protobuf.json_format import MessageToDict
from keras import backend as K
from keras.callbacks import EarlyStopping, TerminateOnNaN
import numpy as np
import tensorflow as tf
from nvidia_tao_tf1.core.export._quantized import check_for_quantized_layers
from nvidia_tao_tf1.cv.common.callbacks.enc_model_saver_callback import KerasModelSaver
from nvidia_tao_tf1.cv.common.callbacks.loggers import TAOStatusLogger
from nvidia_tao_tf1.cv.common.evaluator.ap_evaluator import APEvaluator
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.common.mlops.clearml import get_clearml_task
from nvidia_tao_tf1.cv.common.mlops.wandb import check_wandb_logged_in, initialize_wandb
from nvidia_tao_tf1.cv.common.utils import ap_mode_dict
from nvidia_tao_tf1.cv.common.utils import build_class_weights
from nvidia_tao_tf1.cv.common.utils import build_lrs_from_config
from nvidia_tao_tf1.cv.common.utils import (
build_optimizer_from_config,
build_regularizer_from_config
)
from nvidia_tao_tf1.cv.common.utils import check_tf_oom, hvd_keras, initialize
from nvidia_tao_tf1.cv.common.utils import OneIndexedCSVLogger as CSVLogger
from nvidia_tao_tf1.cv.common.utils import parse_model_load_from_config
from nvidia_tao_tf1.cv.retinanet.box_coder.input_encoder import InputEncoder
from nvidia_tao_tf1.cv.retinanet.box_coder.input_encoder_tf import InputEncoderTF
from nvidia_tao_tf1.cv.retinanet.builders import eval_builder, input_builder
from nvidia_tao_tf1.cv.retinanet.callbacks.retinanet_metric_callback import RetinaMetricCallback
from nvidia_tao_tf1.cv.retinanet.losses.focal_loss import FocalLoss
from nvidia_tao_tf1.cv.retinanet.utils.helper import eval_str
from nvidia_tao_tf1.cv.retinanet.utils.model_io import load_model_as_pretrain
from nvidia_tao_tf1.cv.retinanet.utils.spec_loader import load_experiment_spec
from nvidia_tao_tf1.cv.ssd.callbacks.tb_callback import SSDTensorBoard, SSDTensorBoardImage
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level='INFO')
logger = logging.getLogger(__name__)
verbose = 0
def run_experiment(config_path, results_dir, key, root_path=None, initial_epoch=0):
"""
Launch experiment that trains the model.
NOTE: Do not change the argument names without verifying that cluster submission works.
Args:
config_path (str): Path to a text file containing a complete experiment configuration.
results_dir (str): Path to a folder where various training outputs will be written.
If the folder does not already exist, it will be created.
key (str): encryption key.
root_path (str): for AVDC training, INTERNAL only.
"""
hvd = hvd_keras()
hvd.init()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = str(hvd.local_rank())
sess = tf.Session(config=config)
K.set_session(sess)
verbose = 1 if hvd.rank() == 0 else 0
is_master = hvd.rank() == 0
if is_master and not os.path.exists(results_dir):
os.makedirs(results_dir)
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=is_master,
verbosity=1,
append=True
)
)
# Load experiment spec.
if config_path is not None:
# Create an experiment_pb2.Experiment object from the input file.
logger.info("Loading experiment spec at %s.", config_path)
# The spec in config_path has to be complete.
# Default spec is not merged into experiment_spec.
experiment_spec = load_experiment_spec(config_path, merge_from_default=False)
else:
logger.info("Loading default experiment spec.")
experiment_spec = load_experiment_spec()
initialize(experiment_spec.random_seed, hvd)
training_config = experiment_spec.training_config
if is_master:
if training_config.HasField("visualizer"):
if training_config.visualizer.HasField("clearml_config"):
clearml_config = training_config.visualizer.clearml_config
get_clearml_task(clearml_config, "retinanet")
if training_config.visualizer.HasField("wandb_config"):
wandb_config = training_config.visualizer.wandb_config
wandb_logged_in = check_wandb_logged_in()
wandb_name = f"{wandb_config.name}" if wandb_config.name else \
"retinanet_training"
wandb_stream_config = MessageToDict(
experiment_spec,
preserving_proto_field_name=True,
including_default_value_fields=True
)
initialize_wandb(
project=wandb_config.project if wandb_config.project else None,
entity=wandb_config.entity if wandb_config.entity else None,
config=wandb_stream_config,
notes=wandb_config.notes if wandb_config.notes else None,
tags=wandb_config.tags if wandb_config.tags else None,
sync_tensorboard=True,
save_code=False,
results_dir=results_dir,
wandb_logged_in=wandb_logged_in,
name=wandb_name
)
# Load training parameters
num_epochs = experiment_spec.training_config.num_epochs
ckpt_interval = experiment_spec.training_config.checkpoint_interval or 1
train_bs = experiment_spec.training_config.batch_size_per_gpu
# Class mapping
cls_mapping = experiment_spec.dataset_config.target_class_mapping
classes = sorted({str(x) for x in cls_mapping.values()})
# n_classes + 1 for background class
n_classes = len(classes) + 1
# Choose DALI
use_dali = False
if experiment_spec.dataset_config.data_sources[0].tfrecords_path != "":
use_dali = True
logger.info("Using DALI dataloader...")
# build dataset
train_dataset = input_builder.build(experiment_spec,
training=True,
device_id=hvd.local_rank(),
root_path=root_path,
shard_id=hvd.rank(),
num_shards=hvd.size(),
use_dali=use_dali)
val_dataset = input_builder.build(experiment_spec,
training=False,
root_path=root_path,
use_dali=False)
# config regularizer
kr = build_regularizer_from_config(experiment_spec.training_config.regularizer)
# configure optimizer
optim = build_optimizer_from_config(experiment_spec.training_config.optimizer,
clipnorm=2.0)
focal_loss = FocalLoss(loc_loss_weight=experiment_spec.retinanet_config.loss_loc_weight,
alpha=experiment_spec.retinanet_config.focal_loss_alpha,
gamma=experiment_spec.retinanet_config.focal_loss_gamma)
# config model loading
load_path, load_graph, reset_optim, init_epoch = \
parse_model_load_from_config(experiment_spec.training_config)
if initial_epoch > 0:
init_epoch = initial_epoch
model_train, model_eval, optim_load = load_model_as_pretrain(
load_path,
load_graph,
n_classes,
experiment_spec=experiment_spec,
input_tensor=train_dataset.images if use_dali else None,
kernel_regularizer=kr,
key=key,
resume_training=not reset_optim)
# check if the loaded model is QAT
if not experiment_spec.training_config.enable_qat and check_for_quantized_layers(model_eval):
raise ValueError("QAT training is disabled but the pretrained model is a QAT model.")
if experiment_spec.training_config.enable_qat and not check_for_quantized_layers(model_eval):
raise ValueError("QAT training is enabled but the pretrained model is not a QAT model.")
if optim_load is not None:
optim = optim_load
# set encoder for data sequences
predictor_sizes = [model_train.get_layer('P3_relu').output_shape[2:],
model_train.get_layer('P4_relu').output_shape[2:],
model_train.get_layer('P5_relu').output_shape[2:],
model_train.get_layer('P6_relu').output_shape[2:],
model_train.get_layer('P7_relu').output_shape[2:]]
# encoder parameters
img_height = experiment_spec.augmentation_config.output_height
img_width = experiment_spec.augmentation_config.output_width
scales = eval_str(experiment_spec.retinanet_config.scales)
aspect_ratios_global = eval_str(experiment_spec.retinanet_config.aspect_ratios_global)
aspect_ratios_per_layer = eval_str(experiment_spec.retinanet_config.aspect_ratios)
steps = eval_str(experiment_spec.retinanet_config.steps)
offsets = eval_str(experiment_spec.retinanet_config.offsets)
variances = eval_str(experiment_spec.retinanet_config.variances)
min_scale = experiment_spec.retinanet_config.min_scale
max_scale = experiment_spec.retinanet_config.max_scale
two_boxes_for_ar1 = experiment_spec.retinanet_config.two_boxes_for_ar1
clip_boxes = experiment_spec.retinanet_config.clip_boxes
pos_iou_thresh = experiment_spec.retinanet_config.pos_iou_thresh or 0.5
neg_iou_thresh = experiment_spec.retinanet_config.neg_iou_thresh or 0.4
n_anchor_levels = experiment_spec.retinanet_config.n_anchor_levels or 3
# set the background weights
cls_weights = [1.0]
cls_weights.extend(build_class_weights(experiment_spec))
# encoder for keras seq training
input_encoder = InputEncoder(
img_height=img_height,
img_width=img_width,
n_classes=n_classes,
predictor_sizes=predictor_sizes,
scales=scales,
min_scale=min_scale,
max_scale=max_scale,
aspect_ratios_global=aspect_ratios_global,
aspect_ratios_per_layer=aspect_ratios_per_layer,
two_boxes_for_ar1=two_boxes_for_ar1,
steps=steps,
n_anchor_levels=n_anchor_levels,
offsets=offsets,
clip_boxes=clip_boxes,
variances=variances,
pos_iou_threshold=pos_iou_thresh,
neg_iou_limit=neg_iou_thresh,
class_weights=cls_weights)
input_encoder_tf = InputEncoderTF(
img_height=img_height,
img_width=img_width,
n_classes=n_classes,
predictor_sizes=predictor_sizes,
scales=scales,
min_scale=min_scale,
max_scale=max_scale,
aspect_ratios_global=aspect_ratios_global,
aspect_ratios_per_layer=aspect_ratios_per_layer,
two_boxes_for_ar1=two_boxes_for_ar1,
steps=steps,
n_anchor_levels=n_anchor_levels,
offsets=offsets,
clip_boxes=clip_boxes,
variances=variances,
pos_iou_threshold=pos_iou_thresh,
neg_iou_limit=neg_iou_thresh,
gt_normalized=True,
class_weights=cls_weights)
# encoder for eval.
def eval_encode_fn(gt_label):
bboxes = gt_label[:, -4:]
cls_id = gt_label[:, 0:1]
gt_label_without_diff = np.concatenate((cls_id, bboxes), axis=-1)
return (input_encoder(gt_label_without_diff), gt_label)
# set encode_fn
train_dataset.set_encoder(input_encoder_tf if use_dali else input_encoder)
val_dataset.set_encoder(eval_encode_fn)
# configure LR scheduler
iters_per_epoch = int(ceil(train_dataset.n_samples / hvd.size() / train_bs))
max_iterations = num_epochs * iters_per_epoch
lr_scheduler = build_lrs_from_config(experiment_spec.training_config.learning_rate,
max_iterations, hvd.size())
init_step = init_epoch * iters_per_epoch
lr_scheduler.reset(init_step)
callbacks = [hvd.callbacks.BroadcastGlobalVariablesCallback(0),
hvd.callbacks.MetricAverageCallback(),
lr_scheduler,
TerminateOnNaN()]
model_train.compile(optimizer=hvd.DistributedOptimizer(optim),
loss=focal_loss.compute_loss,
target_tensors=[train_dataset.labels] if use_dali else None)
if hvd.rank() == 0:
model_train.summary()
logger.info("Number of samples in the training dataset:\t{:>6}"
.format(train_dataset.n_samples))
logger.info("Number of samples in the validation dataset:\t{:>6}"
.format(val_dataset.n_samples))
if not os.path.exists(os.path.join(results_dir, 'weights')):
os.mkdir(os.path.join(results_dir, 'weights'))
arch_name = experiment_spec.retinanet_config.arch
if arch_name in ['resnet', 'darknet', 'vgg']:
# append nlayers into meta_arch_name
arch_name = arch_name + str(experiment_spec.retinanet_config.nlayers)
ckpt_path = str(os.path.join(results_dir, 'weights',
'retinanet_' + arch_name + '_epoch_{epoch:03d}.hdf5'))
# This callback will update model and save the model.
model_checkpoint = KerasModelSaver(ckpt_path, key, ckpt_interval, last_epoch=num_epochs,
verbose=1)
callbacks.append(model_checkpoint)
if len(val_dataset) > 0:
# Load evaluation parameters
validation_interval = experiment_spec.eval_config.validation_period_during_training
ap_mode = experiment_spec.eval_config.average_precision_mode
matching_iou = experiment_spec.eval_config.matching_iou_threshold
# Load NMS parameters
conf_th = experiment_spec.nms_config.confidence_threshold
clustering_iou = experiment_spec.nms_config.clustering_iou_threshold
top_k = experiment_spec.nms_config.top_k
nms_max_output = top_k
# build eval graph
K.set_learning_phase(0)
built_eval_model = eval_builder.build(model_eval, conf_th,
clustering_iou, top_k,
nms_max_output,
include_encoded_pred=True)
evaluator = APEvaluator(n_classes,
conf_thres=experiment_spec.nms_config.confidence_threshold,
matching_iou_threshold=matching_iou,
average_precision_mode=ap_mode_dict[ap_mode])
focal_loss_val = FocalLoss(
loc_loss_weight=experiment_spec.retinanet_config.loss_loc_weight,
alpha=experiment_spec.retinanet_config.focal_loss_alpha,
gamma=experiment_spec.retinanet_config.focal_loss_gamma)
n_box, n_attr = model_eval.layers[-1].output_shape[1:]
op_pred = tf.placeholder(tf.float32, shape=(None, n_box, n_attr))
# +1 for class weights
op_true = tf.placeholder(tf.float32, shape=(None, n_box, n_attr+1))
loss_ops = [op_true, op_pred,
focal_loss_val.compute_loss(op_true, op_pred)]
eval_callback = RetinaMetricCallback(
ap_evaluator=evaluator,
built_eval_model=built_eval_model,
eval_sequence=val_dataset,
loss_ops=loss_ops,
eval_model=model_eval,
metric_interval=validation_interval or 10,
last_epoch=num_epochs,
verbose=verbose)
K.set_learning_phase(1)
callbacks.append(eval_callback)
# K.set_learning_phase(1)
if hvd.rank() == 0:
# This callback logs loss and mAP
csv_path = os.path.join(results_dir, 'retinanet_training_log_' + arch_name + '.csv')
csv_logger = CSVLogger(filename=csv_path,
separator=',',
append=True)
callbacks.append(csv_logger)
status_logger = TAOStatusLogger(
results_dir,
append=True,
num_epochs=num_epochs,
is_master=is_master,
)
callbacks.append(status_logger)
# init EarlyStopping callback:
if experiment_spec.training_config.HasField("early_stopping"):
es_config = experiment_spec.training_config.early_stopping
# align the validation name
if es_config.monitor == "val_loss":
es_config.monitor = "validation_loss"
if es_config.monitor == "validation_loss":
if len(val_dataset) <= 0:
raise ValueError("Validation dataset is needed for "
"using validation_loss as the early stopping monitor")
if experiment_spec.eval_config.validation_period_during_training != 1:
raise ValueError("validation_period_during_training should be 1 for "
"using validation_loss as the early stopping monitor")
es_cb = EarlyStopping(monitor=es_config.monitor,
min_delta=es_config.min_delta,
patience=es_config.patience,
verbose=True)
callbacks.append(es_cb)
if hvd.rank() == 0:
if experiment_spec.training_config.visualizer.enabled:
tb_log_dir = os.path.join(results_dir, "events")
tb_cb = SSDTensorBoard(log_dir=tb_log_dir, write_graph=False)
callbacks.append(tb_cb)
tbimg_cb = SSDTensorBoardImage(tb_log_dir, experiment_spec, variances,
experiment_spec.training_config.visualizer.num_images)
fetches = [tf.assign(tbimg_cb.img, model_train.inputs[0], validate_shape=False),
tf.assign(tbimg_cb.label, model_train.targets[0], validate_shape=False)]
model_train._function_kwargs = {'fetches': fetches}
callbacks.append(tbimg_cb)
if use_dali:
model_train.fit(
steps_per_epoch=iters_per_epoch,
epochs=num_epochs,
callbacks=callbacks,
initial_epoch=init_epoch,
verbose=verbose)
else:
model_train.fit_generator(
generator=train_dataset,
steps_per_epoch=iters_per_epoch,
epochs=num_epochs,
callbacks=callbacks,
initial_epoch=init_epoch,
workers=experiment_spec.training_config.n_workers or (cpu_count()-1),
shuffle=False,
use_multiprocessing=experiment_spec.training_config.use_multiprocessing,
max_queue_size=experiment_spec.training_config.max_queue_size or 20,
verbose=verbose)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Training finished successfully."
)
def build_command_line_parser(parser=None):
"""Build the command line parser using argparse.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism.
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(prog='train', description='Train a RetinaNet model.')
parser.add_argument(
'-e',
'--experiment_spec_file',
type=str,
required=True,
help='Path to spec file. Absolute path or relative to working directory. \
If not specified, default spec from spec_loader.py is used.')
parser.add_argument(
'-r',
'--results_dir',
type=str,
required=True,
help='Path to a folder where experiment outputs should be written.'
)
parser.add_argument(
'-k',
'--key',
type=str,
default="",
required=False,
help='Key to save or load a .tlt model.'
)
parser.add_argument(
'--root_path',
type=str,
required=False,
help=argparse.SUPPRESS
)
parser.add_argument(
'--initial_epoch',
type=int,
default=0,
help=argparse.SUPPRESS
)
return parser
def parse_command_line_arguments(args=None):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser(args)
return parser.parse_args(args)
@check_tf_oom
def main(args=None):
"""Run the training process."""
args = parse_command_line_arguments(args)
try:
run_experiment(config_path=args.experiment_spec_file,
results_dir=args.results_dir,
key=args.key,
root_path=args.root_path,
initial_epoch=args.initial_epoch)
logger.info("Training finished successfully.")
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Training was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
logger.info("Training was interrupted.")
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/scripts/train.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to prune the RetinaNet TLT model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.common.magnet_prune import ( # noqa pylint: disable=unused-import
build_command_line_parser,
main,
)
if __name__ == "__main__":
try:
main(sys.argv[1:])
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Pruning finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Pruning was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/scripts/prune.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple Stand-alone inference script for RetinaNet models trained using TAO Toolkit."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import os
import keras.backend as K
import numpy as np
from nvidia_tao_tf1.cv.common.inferencer.inferencer import Inferencer
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.common.utils import check_tf_oom
from nvidia_tao_tf1.cv.retinanet.builders import eval_builder, input_builder
from nvidia_tao_tf1.cv.retinanet.utils.model_io import load_model
from nvidia_tao_tf1.cv.retinanet.utils.spec_loader import load_experiment_spec
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level='INFO')
def build_command_line_parser(parser=None):
"""Build the command line parser using argparse.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism.
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(description='RetinaNet Inference Tool')
parser.add_argument('-m',
'--model_path',
type=str,
required=True,
help='Path to a TLT model or TensorRT engine.')
parser.add_argument('-i',
'--image_dir',
required=True,
type=str,
help='The path to input image or directory.')
parser.add_argument('-k',
'--key',
type=str,
default="",
help='Key to save or load a .tlt model. Must present if -m is a TLT model')
parser.add_argument('-e',
'--experiment_spec',
required=True,
type=str,
help='Path to an experiment spec file for training.')
parser.add_argument('-t',
'--threshold',
type=float,
default=0.3,
help='Confidence threshold for inference.')
parser.add_argument('-r',
'--results_dir',
type=str,
default='/tmp',
required=False,
help='Output directory where the status log is saved.')
parser.add_argument('-b',
'--batch_size',
type=int,
required=False,
default=1,
help=argparse.SUPPRESS)
return parser
def parse_command_line(args=None):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser(args)
return parser.parse_args(args)
def keras_output_process_fn(inferencer, y_encoded):
"function to process keras model output."
return y_encoded
def trt_output_process_fn(inferencer, y_encoded):
"function to process TRT model output."
det_out, keep_k = y_encoded
result = []
for idx, k in enumerate(keep_k.reshape(-1)):
det = det_out[idx].reshape(-1, 7)[:k]
xmin = det[:, 3] * inferencer.model_input_width
ymin = det[:, 4] * inferencer.model_input_height
xmax = det[:, 5] * inferencer.model_input_width
ymax = det[:, 6] * inferencer.model_input_height
cls_id = det[:, 1]
conf = det[:, 2]
result.append(np.stack((cls_id, conf, xmin, ymin, xmax, ymax), axis=-1))
return result
def inference(arguments):
'''make inference on a folder of images.'''
if not os.path.exists(arguments.results_dir):
os.mkdir(arguments.results_dir)
status_file = os.path.join(arguments.results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True,
verbosity=1,
append=True
)
)
s_logger = status_logging.get_status_logger()
s_logger.write(
status_level=status_logging.Status.STARTED,
message="Starting RetinaNet inference."
)
config_path = arguments.experiment_spec
if config_path is not None:
# Create an experiment_pb2.Experiment object from the input file.
logger.info("Loading experiment spec at %s.", config_path)
# The spec in config_path has to be complete.
# Default spec is not merged into experiment_spec.
experiment_spec = load_experiment_spec(config_path, merge_from_default=False)
else:
logger.info("Loading default experiment spec.")
experiment_spec = load_experiment_spec()
K.clear_session() # Clear previous models from memory.
K.set_learning_phase(0)
val_dataset = input_builder.build(experiment_spec,
training=False)
class_mapping = {v : k for k, v in val_dataset.classes.items()}
img_mean = experiment_spec.augmentation_config.image_mean
if experiment_spec.augmentation_config.output_channel == 3:
if img_mean:
img_mean = [img_mean['b'], img_mean['g'], img_mean['r']]
else:
img_mean = [103.939, 116.779, 123.68]
else:
if img_mean:
img_mean = [img_mean['l']]
else:
img_mean = [117.3786]
if os.path.splitext(arguments.model_path)[1] in ['.h5', '.tlt', '.hdf5']:
model = load_model(arguments.model_path, experiment_spec, key=arguments.key)
# Load evaluation parameters
conf_th = experiment_spec.nms_config.confidence_threshold
iou_th = experiment_spec.nms_config.clustering_iou_threshold
top_k = experiment_spec.nms_config.top_k
nms_max_output = top_k
# Build evaluation model
model = eval_builder.build(model, conf_th, iou_th, top_k, nms_max_output)
inferencer = Inferencer(keras_model=model,
batch_size=experiment_spec.eval_config.batch_size,
infer_process_fn=keras_output_process_fn,
class_mapping=class_mapping,
img_mean=img_mean,
threshold=arguments.threshold)
print("Using TLT model for inference, setting batch size to the one in eval_config:",
experiment_spec.eval_config.batch_size)
else:
inferencer = Inferencer(trt_engine_path=arguments.model_path,
infer_process_fn=trt_output_process_fn,
class_mapping=class_mapping,
img_mean=img_mean,
threshold=arguments.threshold)
print("Using TensorRT engine for inference, setting batch size to engine's one:",
inferencer.batch_size)
out_image_path = os.path.join(arguments.results_dir, "images_annotated")
out_label_path = os.path.join(arguments.results_dir, "labels")
os.makedirs(out_image_path, exist_ok=True)
os.makedirs(out_label_path, exist_ok=True)
inferencer.infer(arguments.image_dir, out_image_path, out_label_path)
s_logger.write(
status_level=status_logging.Status.SUCCESS,
message="Inference finished successfully."
)
@check_tf_oom
def main(args=None):
"""Run the inference process."""
try:
args = parse_command_line(args)
inference(args)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Inference was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/scripts/inference.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple Stand-alone evaluate script for RetinaNet models trained using TAO Toolkit."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import os
import sys
import keras.backend as K
from keras.utils.data_utils import OrderedEnqueuer
import numpy as np
import tensorflow as tf
from tqdm import trange
from nvidia_tao_tf1.cv.common.evaluator.ap_evaluator import APEvaluator
from nvidia_tao_tf1.cv.common.inferencer.inferencer import Inferencer
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.common.utils import ap_mode_dict, check_tf_oom
from nvidia_tao_tf1.cv.retinanet.builders import eval_builder, input_builder
from nvidia_tao_tf1.cv.retinanet.utils.model_io import load_model
from nvidia_tao_tf1.cv.retinanet.utils.spec_loader import load_experiment_spec
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level='INFO')
def build_command_line_parser(parser=None):
'''Parse command line arguments.'''
if parser is None:
parser = argparse.ArgumentParser(description='Evaluate a RetinaNet model.')
parser.add_argument('-m',
'--model_path',
help='Path to a RetinaNet Keras model or TensorRT engine.',
required=True,
type=str)
parser.add_argument('-k',
'--key',
type=str,
default="",
help='Key to save or load a .tlt model.')
parser.add_argument('-e',
'--experiment_spec',
required=False,
type=str,
help='Experiment spec file for training and evaluation.')
parser.add_argument('-r',
'--results_dir',
type=str,
default='/tmp',
required=False,
help='Output directory where the status log is saved.')
parser.add_argument('-i',
'--image_dir',
type=str,
required=False,
default=None,
help=argparse.SUPPRESS)
parser.add_argument('-l',
'--label_dir',
type=str,
required=False,
help=argparse.SUPPRESS)
parser.add_argument('-b',
'--batch_size',
type=int,
required=False,
default=1,
help=argparse.SUPPRESS)
return parser
def parse_command_line(args=None):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser(args)
return parser.parse_args(args)
def keras_output_process_fn(inferencer, y_encoded):
"""function to process keras model output."""
return y_encoded
def trt_output_process_fn(inferencer, y_encoded):
"""function to process TRT model output."""
det_out, keep_k = y_encoded
result = []
for idx, k in enumerate(keep_k.reshape(-1)):
det = det_out[idx].reshape(-1, 7)[:k]
xmin = det[:, 3] * inferencer.model_input_width
ymin = det[:, 4] * inferencer.model_input_height
xmax = det[:, 5] * inferencer.model_input_width
ymax = det[:, 6] * inferencer.model_input_height
cls_id = det[:, 1]
conf = det[:, 2]
result.append(np.stack((cls_id, conf, xmin, ymin, xmax, ymax), axis=-1))
return result
def evaluate(arguments):
'''Run evaluation.'''
if not os.path.exists(arguments.results_dir):
os.mkdir(arguments.results_dir)
status_file = os.path.join(arguments.results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True,
verbosity=1,
append=True
)
)
s_logger = status_logging.get_status_logger()
s_logger.write(
status_level=status_logging.Status.STARTED,
message="Starting RetinaNet evaluation."
)
config_path = arguments.experiment_spec
if config_path is not None:
# Create an experiment_pb2.Experiment object from the input file.
logger.info("Loading experiment spec at %s.", config_path)
# The spec in config_path has to be complete.
# Default spec is not merged into experiment_spec.
experiment_spec = load_experiment_spec(config_path, merge_from_default=False)
else:
logger.info("Loading default class experiment spec.")
experiment_spec = load_experiment_spec()
K.clear_session() # Clear previous models from memory.
if os.path.splitext(arguments.model_path)[1] in ['.h5', '.tlt', '.hdf5']:
K.set_learning_phase(0)
# load model
model = load_model(arguments.model_path, experiment_spec, key=arguments.key)
# Load NMS parameters
conf_th = experiment_spec.nms_config.confidence_threshold
clustering_iou = experiment_spec.nms_config.clustering_iou_threshold
top_k = experiment_spec.nms_config.top_k
nms_max_output = top_k
# build eval graph
built_eval_model = eval_builder.build(model, conf_th,
clustering_iou, top_k,
nms_max_output)
inferencer = Inferencer(keras_model=built_eval_model,
batch_size=experiment_spec.eval_config.batch_size,
infer_process_fn=keras_output_process_fn,
class_mapping=None,
threshold=experiment_spec.nms_config.confidence_threshold)
else:
# Works in python 3.6
cpu_cnt = os.cpu_count()
if cpu_cnt is None:
cpu_cnt = 1
session_config = tf.compat.v1.ConfigProto(
device_count={'GPU' : 0, 'CPU': cpu_cnt}
)
session = tf.Session(config=session_config)
# Pin TF to CPU to avoid TF & TRT CUDA context conflict
K.set_session(session)
inferencer = Inferencer(trt_engine_path=arguments.model_path,
infer_process_fn=trt_output_process_fn,
batch_size=experiment_spec.eval_config.batch_size,
class_mapping=None,
threshold=experiment_spec.nms_config.confidence_threshold)
print("Using TLT model for inference, setting batch size to "
f"{experiment_spec.eval_config.batch_size} in eval_config")
val_dataset = input_builder.build(experiment_spec,
training=False)
classes = val_dataset.classes
class_mapping = {v : k for k, v in classes.items()}
logger.info("Number of batches in the validation dataset:\t{:>6}".format(len(val_dataset)))
# Load evaluation parameters
ap_mode = experiment_spec.eval_config.average_precision_mode
matching_iou = experiment_spec.eval_config.matching_iou_threshold
matching_iou = matching_iou if matching_iou > 0 else 0.5
# initialize evaluator
evaluator = APEvaluator(len(classes) + 1,
conf_thres=experiment_spec.nms_config.confidence_threshold,
matching_iou_threshold=matching_iou,
average_precision_mode=ap_mode_dict[ap_mode])
print("Using TLT model for inference, setting batch size to the one in eval_config:",
experiment_spec.eval_config.batch_size)
# Prepare labels
gt_labels = []
pred_labels = []
tr = trange(len(val_dataset), file=sys.stdout)
tr.set_description('Producing predictions')
enqueuer = OrderedEnqueuer(val_dataset, use_multiprocessing=False)
enqueuer.start(workers=max(os.cpu_count() - 1, 1), max_queue_size=20)
output_generator = enqueuer.get()
output_height = val_dataset.output_height
output_width = val_dataset.output_width
# Loop over all batches.
for _ in tr:
# Generate batch.
batch_X, batch_labs = next(output_generator)
y_pred = inferencer._predict_batch(batch_X)
gt_labels.extend(batch_labs)
conf_thres = experiment_spec.nms_config.confidence_threshold
for i in range(len(y_pred)):
y_pred_valid = y_pred[i][y_pred[i][:, 1] > conf_thres]
y_pred_valid[..., 2] = np.clip(y_pred_valid[..., 2].round(), 0.0,
output_width)
y_pred_valid[..., 3] = np.clip(y_pred_valid[..., 3].round(), 0.0,
output_height)
y_pred_valid[..., 4] = np.clip(y_pred_valid[..., 4].round(), 0.0,
output_width)
y_pred_valid[..., 5] = np.clip(y_pred_valid[..., 5].round(), 0.0,
output_height)
pred_labels.append(y_pred_valid)
enqueuer.stop()
results = evaluator(gt_labels, pred_labels, verbose=True)
_, average_precisions = results
mean_average_precision = np.mean(average_precisions[1:])
print("*******************************")
for i in range(len(classes)):
print("{:<14}{:<6}{}".format(
class_mapping[i+1], 'AP', round(average_precisions[i+1], 3)))
print("{:<14}{:<6}{}".format('', 'mAP', round(mean_average_precision, 3)))
print("*******************************")
s_logger.kpi.update({'mAP': float(mean_average_precision)})
s_logger.write(
status_level=status_logging.Status.SUCCESS,
message="Evaluation finished successfully."
)
@check_tf_oom
def main(args=None):
"""Run the evaluation process."""
try:
args = parse_command_line(args)
evaluate(args)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Evaluation was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/scripts/evaluate.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dump kitti label for KPI computation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import os
import shutil
from keras.applications.imagenet_utils import preprocess_input
import keras.backend as K
import numpy as np
from PIL import Image
from nvidia_tao_tf1.core.utils.path_utils import expand_path
from nvidia_tao_tf1.cv.retinanet.builders import eval_builder
from nvidia_tao_tf1.cv.retinanet.utils.model_io import load_model
from nvidia_tao_tf1.cv.retinanet.utils.spec_loader import load_experiment_spec
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level='INFO')
image_extensions = ['.jpg', '.jpeg', '.JPG', '.JPEG', '.png', '.PNG']
def parse_command_line():
'''Parse command line arguments.'''
parser = argparse.ArgumentParser(description='Keras KPI Sequence Inference Tool')
parser.add_argument('-m',
'--model',
help='Keras model file',
default=None)
parser.add_argument('-i',
'--in_seq_dir',
help='The Directory to the input images')
parser.add_argument('-e',
'--config_path',
help='Experiment spec file for training')
parser.add_argument('-o',
'--out_seq_dir',
help='The Directory to the output kitti labels.'
)
parser.add_argument('-k',
'--key',
required=False,
type=str,
default="",
help='Key to save or load a .tlt model.')
parser.add_argument('-t',
'--out_thres',
type=float,
help='Threshold of confidence score for dumping kitti labels.',
default=0.3
)
arguments = parser.parse_args()
return arguments
def inference(arguments):
'''make inference on a folder of images.'''
config_path = arguments.config_path
if config_path is not None:
# Create an experiment_pb2.Experiment object from the input file.
logger.info("Loading experiment spec at %s.", config_path)
# The spec in config_path has to be complete.
# Default spec is not merged into experiment_spec.
experiment_spec = load_experiment_spec(config_path, merge_from_default=False)
else:
logger.info("Loading default experiment spec.")
experiment_spec = load_experiment_spec()
K.clear_session() # Clear previous models from memory.
model = load_model(arguments.model, experiment_spec, key=arguments.key)
# Load evaluation parameters
conf_th = experiment_spec.nms_config.confidence_threshold
iou_th = experiment_spec.nms_config.clustering_iou_threshold
top_k = experiment_spec.nms_config.top_k
nms_max_output = top_k
# Build evaluation model
model = eval_builder.build(model, conf_th, iou_th, top_k, nms_max_output)
img_channel = model.layers[0].output_shape[-3]
img_width = model.layers[0].output_shape[-1]
img_height = model.layers[0].output_shape[-2]
# check if it's a monochrome model or RGB model
img_mode = 'RGB' if img_channel == 3 else 'L'
classes = sorted({str(x) for x in
experiment_spec.dataset_config.target_class_mapping.values()})
class_mapping = dict(zip(range(len(classes)), classes))
# Create output directory
if os.path.exists(expand_path(arguments.out_seq_dir)):
shutil.rmtree(arguments.out_seq_dir)
os.mkdir(arguments.out_seq_dir)
inf_seq_list = []
for folder in os.listdir(arguments.in_seq_dir):
f = expand_path(f"{arguments.in_seq_dir}/{folder}")
if os.path.isdir(f) and os.path.exists(os.path.join(f, 'images')):
inf_seq_list.append(folder)
print('seqs:', inf_seq_list)
for folder in inf_seq_list:
in_base = expand_path(f"{arguments.in_seq_dir}/{folder}/images")
os.mkdir(expand_path(f"{arguments.out_seq_dir}/{folder}"))
out_base = expand_path(f"{arguments.out_seq_dir}/{folder}/labels")
os.mkdir(expand_path(out_base))
if os.path.isdir(in_base):
for img_path in os.listdir(in_base):
if os.path.splitext(img_path)[1] not in image_extensions:
continue
img = Image.open(os.path.join(in_base, img_path))
orig_w, orig_h = [float(x) for x in img.size]
ratio = min(img_width/orig_w, img_height/orig_h)
# do not change aspect ratio
new_w = int(round(orig_w*ratio))
new_h = int(round(orig_h*ratio))
im = img.resize((new_w, new_h), Image.ANTIALIAS)
if im.mode in ('RGBA', 'LA') or \
(im.mode == 'P' and 'transparency' in im.info) and img_channel == 1:
bg_colour = (255, 255, 255)
# Need to convert to RGBA if LA format due to a bug in PIL
alpha = im.convert('RGBA').split()[-1]
# Create a new background image of our matt color.
# Must be RGBA because paste requires both images have the same format
bg = Image.new("RGBA", im.size, bg_colour + (255,))
bg.paste(im, mask=alpha)
inf_img = im.convert(img_mode)
else:
inf_img = Image.new(img_mode, (img_width, img_height))
inf_img.paste(im, (0, 0))
inf_img = np.array(inf_img).astype(np.float32)
if img_mode == 'L':
inf_img = np.expand_dims(inf_img, axis=2)
inference_input = inf_img.transpose(2, 0, 1) - 117.3786
else:
inference_input = preprocess_input(inf_img.transpose(2, 0, 1))
# run inference
y_pred_decoded = model.predict(np.array([inference_input]))
kitti_txt = ""
decode_ratio = (orig_w/new_w, orig_h/new_h)
for i in y_pred_decoded[0]:
if i[1] < arguments.out_thres:
continue
xmin = decode_ratio[0]*i[2]
xmax = decode_ratio[0]*i[4]
ymin = decode_ratio[1]*i[3]
ymax = decode_ratio[1]*i[5]
kitti_txt += class_mapping[int(i[0])] + ' 0 0 0 ' + \
' '.join([str(x) for x in [xmin, ymin, xmax, ymax]]) + \
' 0 0 0 0 0 0 0 ' + str(i[1]) + '\n'
open(os.path.join(out_base, os.path.splitext(img_path)[0] + '.txt'),
'w').write(kitti_txt)
if __name__ == "__main__":
arguments = parse_command_line()
inference(arguments)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/scripts/inference_seq.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit test for RetinaNet train script."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
import keras
import pytest
SPEC_FILES = [
'default_spec.txt',
]
@pytest.mark.script_launch_mode('subprocess')
@pytest.mark.slow
@pytest.mark.parametrize('_spec_file', SPEC_FILES)
def test_train(script_runner, tmpdir, _spec_file):
'''Test the train script.'''
keras.backend.clear_session()
keras.backend.set_learning_phase(1)
script = 'nvidia_tao_tf1/cv/retinanet/scripts/train.py'
env = os.environ.copy()
parent_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
spec_file = os.path.join(parent_dir, 'experiment_specs', _spec_file)
temp_dir_name = tempfile.mkdtemp()
args = ['-e']
args.append(spec_file)
args.append('-k')
args.append('nvidia_tlt')
args.append('-r')
args.append(temp_dir_name)
ret = script_runner.run(script, env=env, *args)
try:
assert ret.success
shutil.rmtree(temp_dir_name)
except AssertionError:
print("Local path is not ready.")
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/scripts/tests/test_train.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""TLT RetinaNet entrypoint."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/entrypoint/__init__.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""TLT command line wrapper to invoke CLI scripts."""
import sys
from nvidia_tao_tf1.cv.common.entrypoint.entrypoint import launch_job
import nvidia_tao_tf1.cv.retinanet.scripts
def main():
"""Function to launch the job."""
launch_job(nvidia_tao_tf1.cv.retinanet.scripts, "retinanet", sys.argv[1:])
if __name__ == "__main__":
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/entrypoint/retinanet.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/experiment_specs/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RetinaNet export model to UFF."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import logging
import os
import struct
import tempfile
import graphsurgeon as gs
import keras.backend as K
from keras.layers import Permute, Reshape
from keras.models import Model
import tensorflow as tf
import uff
# Import quantization layer processing.
from nvidia_tao_tf1.core.export._quantized import (
check_for_quantized_layers,
process_quantized_layers,
)
from nvidia_tao_tf1.core.export._uff import keras_to_pb
from nvidia_tao_tf1.cv.common.export.keras_exporter import KerasExporter as Exporter
from nvidia_tao_tf1.cv.retinanet.initializers.prior_prob import PriorProbability
from nvidia_tao_tf1.cv.retinanet.layers.anchor_box_layer import RetinaAnchorBoxes
from nvidia_tao_tf1.cv.retinanet.utils.model_io import load_model
from nvidia_tao_tf1.cv.retinanet.utils.spec_loader import load_experiment_spec
NUM_FEATURE_MAPS = 5
logger = logging.getLogger(__name__)
class RetinaNetExporter(Exporter):
"""Exporter class to export a trained RetinaNet model."""
def __init__(self, model_path=None,
key=None,
data_type="fp32",
strict_type=False,
experiment_spec_path="",
backend="uff",
**kwargs):
"""Instantiate the RetinaNet exporter to export etlt model.
Args:
model_path(str): Path to the RetinaNet model file.
key (str): Key to decode the model.
data_type (str): Backend data-type for the optimized TensorRT engine.
strict_type(bool): Apply TensorRT strict_type_constraints or not for INT8 mode.
experiment_spec_path (str): Path to RetinaNet experiment spec file.
backend (str): Type of intermediate backend parser to be instantiated.
"""
super(RetinaNetExporter, self).__init__(model_path=model_path,
key=key,
data_type=data_type,
strict_type=strict_type,
backend=backend)
self.experiment_spec_path = experiment_spec_path
assert os.path.isfile(self.experiment_spec_path), \
"Experiment spec file not found at {}.".format(self.experiment_spec_path)
self.experiment_spec = load_experiment_spec(self.experiment_spec_path)
self.custom_objects = {'RetinaAnchorBoxes': RetinaAnchorBoxes,
'PriorProbability': PriorProbability}
self.tlt2 = False
self.num_classes = len({str(x) for x in
self.experiment_spec.dataset_config.target_class_mapping.values()})
def load_model(self, backend="uff"):
"""Simple function to load the RetinaNet Keras model."""
experiment_spec = self.experiment_spec
K.clear_session()
K.set_learning_phase(0)
model = load_model(self.model_path, experiment_spec, key=self.key)
if model.get_layer('mbox_conf').output.shape[3] == self.num_classes:
self.tlt2 = True
outputs = self.generate_trt_output(model.get_layer('mbox_loc').output,
model.get_layer('mbox_conf').output,
model.get_layer('mbox_priorbox').output)
model = Model(inputs=model.inputs, outputs=outputs)
if check_for_quantized_layers(model):
model, self.tensor_scale_dict = process_quantized_layers(
model, backend,
calib_cache=None,
calib_json=None)
# plugin nodes will have different names in TRT
nodes = list(self.tensor_scale_dict.keys())
for k in nodes:
if k.find('upsample') != -1:
node_name_in_trt = k.split('/')[0]
self.tensor_scale_dict[node_name_in_trt] = self.tensor_scale_dict.pop(k)
# ZeroPadding is fused with its following conv2d/depthwiseconv2d, collapse them.
padding_nodes = []
for k in self.tensor_scale_dict:
if '/Pad' in k:
# this is a ZeroPadding node
padding_nodes.append(k)
for n in padding_nodes:
self.tensor_scale_dict.pop(n)
img_mean = experiment_spec.augmentation_config.image_mean
self.image_mean = [103.939, 116.779, 123.68] \
if experiment_spec.augmentation_config.output_channel == 3 else [117.3786]
if img_mean:
if experiment_spec.augmentation_config.output_channel == 3:
self.image_mean = [img_mean['b'], img_mean['g'], img_mean['r']]
else:
self.image_mean = [img_mean['l']]
return model
def _calibration_cache_from_dict(self, tensor_scale_dict,
calibration_cache=None,
calib_json=None):
"""Write calibration cache file for QAT model.
This function converts a tensor scale dictionary generated by processing
QAT models to TRT readable format. By default we set it as a
trt.IInt8.EntropyCalibrator2 cache file.
Args:
tensor_scale_dict (dict): The dictionary of parameters: scale_value file.
calibration_cache (str): Path to output calibration cache file.
Returns:
No explicit returns.
"""
if calibration_cache is not None:
cal_cache_str = "TRT-{}-EntropyCalibration2\n".format(self._trt_version_number)
assert not os.path.exists(calibration_cache), (
"A pre-existing cache file exists. Please delete this "
"file and re-run export."
)
# Converting float numbers to hex representation.
for tensor in tensor_scale_dict:
if tensor in ["P4_upsampled", "P5_upsampled"]:
continue
scaling_factor = tensor_scale_dict[tensor] / 127.0
cal_scale = hex(struct.unpack("i", struct.pack("f", scaling_factor))[0])
assert cal_scale.startswith(
"0x"), "Hex number expected to start with 0x."
cal_scale = cal_scale[2:]
cal_cache_str += tensor + ": " + cal_scale + "\n"
with open(calibration_cache, "w") as f:
f.write(cal_cache_str)
if calib_json is not None:
calib_json_data = {"tensor_scales": {}}
for tensor in tensor_scale_dict:
calib_json_data["tensor_scales"][tensor] = float(
tensor_scale_dict[tensor])
with open(calib_json, "w") as outfile:
json.dump(calib_json_data, outfile, indent=4)
def generate_trt_output(self, loc, conf, anchor):
"""Manipulate model outputs so we can use TRT NMS plugin."""
out_loc = Reshape((-1, 1, 1), name='loc_data')(loc)
out_conf = Reshape((-1, 1, 1), name='conf_data')(conf)
out_anchor = Reshape((-1, 2, 4), name="anchor_reshape")(anchor)
out_anchor = Permute((2, 1, 3), name="anchor_permute")(out_anchor)
out_anchor = Reshape((2, -1, 1), name='anchor_data')(out_anchor)
return [out_loc, out_conf, out_anchor]
def save_exported_file(self, model, output_file_name):
"""Save the exported model file.
This routine converts a keras model to onnx/uff model
based on the backend the exporter was initialized with.
Args:
model (keras.model.Model): Decoded keras model to be exported.
output_file_name (str): Path to the output file.
Returns:
tmp_uff_file (str): Path to the temporary uff file.
"""
os_handle, tmp_pb_file = tempfile.mkstemp(suffix=".pb")
os.close(os_handle)
if self.backend == "uff":
keras_to_pb(model, tmp_pb_file, None,
custom_objects=self.custom_objects)
tf.reset_default_graph()
dynamic_graph = gs.DynamicGraph(tmp_pb_file)
dynamic_graph = self.node_process(dynamic_graph)
os.remove(tmp_pb_file)
uff.from_tensorflow(dynamic_graph.as_graph_def(),
self.output_node_names,
output_filename=output_file_name,
text=False,
quiet=True)
logger.info("Converted model was saved into %s", output_file_name)
return output_file_name
raise NotImplementedError("Invalid backend provided. {}".format(self.backend))
def set_input_output_node_names(self):
"""Set input output node names."""
self.output_node_names = ["NMS"]
self.input_node_names = ["Input"]
def node_process(self, retinanet_graph):
"""Manipulating the dynamic graph to make it compatible with TRT.
Args:
retinanet_graph (gs.DynamicGraph): Dynamic graph from the TF Proto file.
Returns:
retinanet_graph (gs.DymanicGraph): Post processed dynamic graph which is ready to be
serialized as a uff file.
"""
spec = self.experiment_spec
FirstDimTile = [
gs.create_node(name="FirstDimTile_{}".format(i), trt_plugin=True,
op="BatchTilePlugin_TRT")
for i in range(NUM_FEATURE_MAPS)
]
# TensorRT Bug 2603572, anchor_data/Reshape must be at the very beginning!
if self.tlt2:
background_id = -1
num_classes = self.num_classes
else:
background_id = 0
num_classes = self.num_classes + 1
NMS = gs.create_plugin_node(name='NMS', op='NMS_TRT',
inputs=['anchor_data/Reshape',
'loc_data/Reshape',
'conf_data/Reshape'],
shareLocation=1,
varianceEncodedInTarget=0,
backgroundLabelId=background_id,
confidenceThreshold=spec.nms_config.confidence_threshold,
nmsThreshold=spec.nms_config.clustering_iou_threshold,
topK=2*spec.nms_config.top_k, # topK as NMS input
codeType=1,
keepTopK=spec.nms_config.top_k, # NMS output topK
numClasses=num_classes,
inputOrder=[1, 2, 0],
confSigmoid=1,
isNormalized=1,
scoreBits=spec.nms_config.infer_nms_score_bits)
# Create a mapping of namespace names -> plugin nodes.
namespace_plugin_map = {"retinanet_anchor_{}/FirstDimTile".format(i): FirstDimTile[i]
for i in range(NUM_FEATURE_MAPS)}
resizenearest_map = {'P4_upsampled': gs.create_plugin_node(name='P4_upsampled',
op="ResizeNearest_TRT",
scale=2.0),
'P5_upsampled': gs.create_plugin_node(name='P5_upsampled',
op="ResizeNearest_TRT",
scale=2.0)}
namespace_plugin_map.update(dict(resizenearest_map))
# Create a new graph by collapsing namespaces
retinanet_graph.append(NMS)
retinanet_graph.collapse_namespaces(namespace_plugin_map)
return retinanet_graph
def get_class_labels(self):
"""Get list of class labels to serialize to a labels.txt file."""
classes = sorted({str(x) for x in
self.experiment_spec.dataset_config.target_class_mapping.values()})
# add background label at idx=0:
classes = ["background"] + classes
return classes
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/export/exporter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to export a trained RetinaNet model to an ETLT file for deployment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/export/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RetinaNet export model to encrypted ONNX."""
import json
import logging
import os
import struct
import tempfile
import keras.backend as K
from keras.layers import Permute, Reshape
from keras.models import Model
import numpy as np
import onnx
import onnx_graphsurgeon as onnx_gs
import tensorflow as tf
from nvidia_tao_tf1.core.export._onnx import keras_to_onnx
# Import quantization layer processing.
from nvidia_tao_tf1.core.export._quantized import (
check_for_quantized_layers,
process_quantized_layers,
)
from nvidia_tao_tf1.cv.common.export.keras_exporter import KerasExporter as Exporter
from nvidia_tao_tf1.cv.retinanet.initializers.prior_prob import PriorProbability
from nvidia_tao_tf1.cv.retinanet.layers.anchor_box_layer import RetinaAnchorBoxes
from nvidia_tao_tf1.cv.retinanet.utils.model_io import load_model
from nvidia_tao_tf1.cv.retinanet.utils.spec_loader import load_experiment_spec
NUM_FEATURE_MAPS = 5
logger = logging.getLogger(__name__)
class RetinaNetOnnxExporter(Exporter):
"""Exporter class to export a trained RetinaNet model."""
def __init__(self, model_path=None,
key=None,
data_type="fp32",
strict_type=False,
experiment_spec_path="",
backend="onnx",
**kwargs):
"""Instantiate the RetinaNet exporter to export etlt model.
Args:
model_path(str): Path to the RetinaNet model file.
key (str): Key to decode the model.
data_type (str): Backend data-type for the optimized TensorRT engine.
strict_type(bool): Apply TensorRT strict_type_constraints or not for INT8 mode.
experiment_spec_path (str): Path to RetinaNet experiment spec file.
backend (str): Type of intermediate backend parser to be instantiated.
"""
super(RetinaNetOnnxExporter, self).__init__(model_path=model_path,
key=key,
data_type=data_type,
strict_type=strict_type,
backend=backend)
self.experiment_spec_path = experiment_spec_path
assert os.path.isfile(self.experiment_spec_path), \
"Experiment spec file not found at {}.".format(self.experiment_spec_path)
self.experiment_spec = None
self.custom_objects = {'RetinaAnchorBoxes': RetinaAnchorBoxes,
'PriorProbability': PriorProbability}
self.backend = 'onnx'
def load_model(self, backend="onnx"):
"""Simple function to load the RetinaNet Keras model."""
experiment_spec = load_experiment_spec(self.experiment_spec_path)
K.clear_session()
K.set_learning_phase(0)
model = load_model(self.model_path, experiment_spec, key=self.key)
model.summary()
outputs = self.generate_trt_output(model.get_layer('mbox_loc').output,
model.get_layer('mbox_conf').output,
model.get_layer('mbox_priorbox').output)
model = Model(inputs=model.inputs, outputs=outputs)
if check_for_quantized_layers(model):
logger.info("Processing quantized layers...")
model, self.tensor_scale_dict = process_quantized_layers(
model, backend,
calib_cache=None,
calib_json=None)
# plugin nodes will have different names in TRT
nodes = list(self.tensor_scale_dict.keys())
for k in nodes:
if k.find('upsample') != -1:
node_name_in_trt = k.split('/')[0]
self.tensor_scale_dict[node_name_in_trt] = self.tensor_scale_dict.pop(k)
# ZeroPadding is fused with its following conv2d/depthwiseconv2d, collapse them.
padding_nodes = []
for k in self.tensor_scale_dict:
if '/Pad' in k:
# this is a ZeroPadding node
padding_nodes.append(k)
for n in padding_nodes:
self.tensor_scale_dict.pop(n)
self.experiment_spec = experiment_spec
# @tylerz: clear the session and reload the model to remove _1 suffix
# Save model to a temp file so we can reload it later.
os_handle, tmp_model_file_name = tempfile.mkstemp(suffix=".hdf5")
os.close(os_handle)
model.save(tmp_model_file_name)
# Make sure Keras session is clean and tuned for inference.
K.clear_session()
K.set_learning_phase(0)
model = load_model(tmp_model_file_name, experiment_spec, key=self.key)
# Delete temp file.
os.remove(tmp_model_file_name)
img_mean = experiment_spec.augmentation_config.image_mean
self.image_mean = [103.939, 116.779, 123.68] \
if experiment_spec.augmentation_config.output_channel == 3 else [117.3786]
if img_mean:
if experiment_spec.augmentation_config.output_channel == 3:
self.image_mean = [img_mean['b'], img_mean['g'], img_mean['r']]
else:
self.image_mean = [img_mean['l']]
return model
def _calibration_cache_from_dict(self, tensor_scale_dict,
calibration_cache=None,
calib_json=None):
"""Write calibration cache file for QAT model.
This function converts a tensor scale dictionary generated by processing
QAT models to TRT readable format. By default we set it as a
trt.IInt8.EntropyCalibrator2 cache file.
Args:
tensor_scale_dict (dict): The dictionary of parameters: scale_value file.
calibration_cache (str): Path to output calibration cache file.
Returns:
No explicit returns.
"""
if calibration_cache is not None:
cal_cache_str = "TRT-{}-EntropyCalibration2\n".format(self._trt_version_number)
assert not os.path.exists(calibration_cache), (
"A pre-existing cache file exists. Please delete this "
"file and re-run export."
)
# Converting float numbers to hex representation.
for tensor in tensor_scale_dict:
if tensor in ["P4_upsampled", "P5_upsampled"]:
continue
scaling_factor = tensor_scale_dict[tensor] / 127.0
cal_scale = hex(struct.unpack("i", struct.pack("f", scaling_factor))[0])
assert cal_scale.startswith(
"0x"), "Hex number expected to start with 0x."
cal_scale = cal_scale[2:]
cal_cache_str += tensor + ": " + cal_scale + "\n"
with open(calibration_cache, "w") as f:
f.write(cal_cache_str)
if calib_json is not None:
calib_json_data = {"tensor_scales": {}}
for tensor in tensor_scale_dict:
calib_json_data["tensor_scales"][tensor] = float(
tensor_scale_dict[tensor])
with open(calib_json, "w") as outfile:
json.dump(calib_json_data, outfile, indent=4)
def generate_trt_output(self, loc, conf, anchor):
"""Manipulate model outputs so we can use TRT NMS plugin."""
out_loc = Reshape((-1, 1, 1), name='loc_data')(loc)
out_conf = Reshape((-1, 1, 1), name='conf_data')(conf)
out_anchor = Reshape((-1, 2, 4), name="anchor_reshape")(anchor)
out_anchor = Permute((2, 1, 3), name="anchor_permute")(out_anchor)
out_anchor = Reshape((2, -1, 1), name='anchor_data')(out_anchor)
return [out_loc, out_conf, out_anchor]
def save_exported_file(self, model, output_file_name):
"""Save the exported model file.
This routine converts a keras model to onnx/uff model
based on the backend the exporter was initialized with.
Args:
model (keras.model.Model): Decoded keras model to be exported.
output_file_name (str): Path to the output file.
Returns:
tmp_uff_file (str): Path to the temporary uff file.
"""
if self.backend == "onnx":
keras_to_onnx(model, output_file_name,
custom_objects=self.custom_objects)
tf.reset_default_graph()
onnx_model = onnx.load(output_file_name)
onnx_model = self.node_process(onnx_model)
os.remove(output_file_name)
onnx.save(onnx_model, output_file_name)
logger.info("Converted model was saved into %s", output_file_name)
return output_file_name
raise NotImplementedError("Invalid backend provided. {}".format(self.backend))
def set_input_output_node_names(self):
"""Set input output node names."""
self.output_node_names = ["NMS"]
self.input_node_names = ["Input"]
def node_process(self, retinanet_graph):
"""Manipulating the dynamic graph to make it compatible with TRT.
Args:
retinanet_graph (gs.DynamicGraph): Dynamic graph from the TF Proto file.
Returns:
retinanet_graph (gs.DymanicGraph): Post processed dynamic graph which is ready to be
serialized as a uff file.
"""
retinanet_graph = onnx_gs.import_onnx(retinanet_graph)
spec = self.experiment_spec
num_classes = len({str(x) for x in
spec.dataset_config.target_class_mapping.values()})
anchor_data = self._get_onnx_node_by_name(
retinanet_graph, 'anchor_data').outputs[0]
loc_data = self._get_onnx_node_by_name(
retinanet_graph, 'loc_data').outputs[0]
conf_data = self._get_onnx_node_by_name(
retinanet_graph, 'conf_data').outputs[0]
nms_out = onnx_gs.Variable(
"NMS",
dtype=np.float32
)
nms_out_1 = onnx_gs.Variable(
"NMS_1",
dtype=np.float32
)
nms_attrs = dict()
nms_attrs["shareLocation"] = 1
nms_attrs["varianceEncodedInTarget"] = 0
nms_attrs["backgroundLabelId"] = 0
nms_attrs["confidenceThreshold"] = spec.nms_config.confidence_threshold
nms_attrs["nmsThreshold"] = spec.nms_config.clustering_iou_threshold
nms_attrs["topK"] = 2*spec.nms_config.top_k
nms_attrs["codeType"] = 1
nms_attrs["keepTopK"] = spec.nms_config.top_k
nms_attrs["numClasses"] = num_classes + 1
nms_attrs["inputOrder"] = [1, 2, 0]
nms_attrs["confSigmoid"] = 1
nms_attrs["isNormalized"] = 1
nms_attrs["scoreBits"] = spec.nms_config.infer_nms_score_bits
nms_plugin = onnx_gs.Node(
op="NMSDynamic_TRT",
name="NMS",
inputs=[anchor_data, loc_data, conf_data],
outputs=[nms_out, nms_out_1],
attrs=nms_attrs
)
retinanet_graph.nodes.append(nms_plugin)
retinanet_graph.outputs = nms_plugin.outputs
retinanet_graph.cleanup().toposort()
self._fix_onnx_paddings(retinanet_graph)
return onnx_gs.export_onnx(retinanet_graph)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/export/onnx_exporter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Unit test for RetinaNet model export functionality.'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import keras
import pytest
import tensorflow as tf
from nvidia_tao_tf1.cv.common.utils import encode_from_keras
from nvidia_tao_tf1.cv.retinanet.architecture.retinanet import retinanet
from nvidia_tao_tf1.cv.retinanet.utils.helper import eval_str
from nvidia_tao_tf1.cv.retinanet.utils.spec_loader import load_experiment_spec
backbone_configs = [
('resnet', 10, False, "fp32"),
('resnet', 50, True, "int8"),
('resnet', 18, False, "int8")]
keras.backend.set_image_data_format('channels_first')
@pytest.fixture
def _spec_file():
'''Default spec path.'''
parent_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
return os.path.join(parent_dir, 'experiment_specs/default_spec.txt')
@pytest.fixture
def spec():
'''Default spec.'''
experiment_spec = load_experiment_spec(merge_from_default=True)
return experiment_spec
@pytest.mark.skipif(os.getenv("RUN_ON_CI", "0") == "1", reason="Cannot be run on CI")
@pytest.mark.script_launch_mode('subprocess')
@pytest.mark.parametrize("model_type, nlayers, qat, data_type",
backbone_configs)
def test_export_uff(script_runner, spec, _spec_file, model_type,
nlayers, qat, data_type):
'''test to make sure the export works and uff model can be parsed without issues.'''
# pin GPU ID 0 so it uses the newest GPU ARCH for INT8
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
keras.backend.set_session(sess)
enc_key = 'nvidia_tlt'
cls_mapping = spec.dataset_config.target_class_mapping
classes = sorted({str(x) for x in cls_mapping.values()})
# n_classes + 1 for background class
n_classes = len(classes) + 1
scales = eval_str(spec.retinanet_config.scales)
aspect_ratios_global = eval_str(
spec.retinanet_config.aspect_ratios_global)
aspect_ratios_per_layer = eval_str(
spec.retinanet_config.aspect_ratios)
steps = eval_str(spec.retinanet_config.steps)
offsets = eval_str(spec.retinanet_config.offsets)
variances = eval_str(spec.retinanet_config.variances)
freeze_blocks = eval_str(spec.retinanet_config.freeze_blocks)
freeze_bn = eval_str(spec.retinanet_config.freeze_bn)
keras.backend.clear_session()
model = retinanet(
image_size=(3, 608, 608),
n_classes=n_classes,
nlayers=nlayers,
kernel_regularizer=None,
freeze_blocks=freeze_blocks,
freeze_bn=freeze_bn,
scales=scales,
min_scale=spec.retinanet_config.min_scale,
max_scale=spec.retinanet_config.max_scale,
aspect_ratios_global=aspect_ratios_global,
aspect_ratios_per_layer=aspect_ratios_per_layer,
two_boxes_for_ar1=spec.retinanet_config.two_boxes_for_ar1,
steps=steps,
offsets=offsets,
clip_boxes=spec.retinanet_config.clip_boxes,
variances=variances,
arch=model_type,
input_tensor=None,
qat=qat)
os_handle, tmp_keras_model = tempfile.mkstemp(suffix=".tlt")
os.close(os_handle)
encode_from_keras(model, tmp_keras_model, enc_key.encode())
os_handle, tmp_exported_model = tempfile.mkstemp(suffix=".onnx")
os.close(os_handle)
os.remove(tmp_exported_model)
del model
# export to etlt model
script = 'nvidia_tao_tf1/cv/retinanet/scripts/export.py'
env = os.environ.copy()
# 1. export in FP32 mode
if data_type == "fp32":
args = ['-m', tmp_keras_model,
'-k', enc_key,
'--experiment_spec', _spec_file,
'-o', tmp_exported_model,
'--static_batch_size', "1"]
keras.backend.clear_session()
ret = script_runner.run(script, env=env, *args)
# before abort, remove the created temp files when exception raises
try:
assert ret.success
assert os.path.isfile(tmp_exported_model)
if os.path.exists(tmp_exported_model):
os.remove(tmp_exported_model)
except AssertionError:
# if the script runner failed, the tmp_exported_model may not be created at all
if os.path.exists(tmp_exported_model):
os.remove(tmp_exported_model)
os.remove(tmp_keras_model)
raise(AssertionError(ret.stdout + ret.stderr))
# 2. export in FP16 mode
if data_type == "fp16":
args = ['-m', tmp_keras_model,
'-k', enc_key,
'--experiment_spec', _spec_file,
'-o', tmp_exported_model,
'--data_type', 'fp16',
'--static_batch_size', "1"]
keras.backend.clear_session()
ret = script_runner.run(script, env=env, *args)
try:
assert ret.success
assert os.path.isfile(tmp_exported_model)
if os.path.exists(tmp_exported_model):
os.remove(tmp_exported_model)
except AssertionError:
if os.path.exists(tmp_exported_model):
os.remove(tmp_exported_model)
os.remove(tmp_keras_model)
raise(AssertionError(ret.stdout + ret.stderr))
# 3. export in INT8 mode with random data for calibration
# 4. export in INT8 mode with tensor_scale_dict
os_handle, tmp_data_file = tempfile.mkstemp()
os.close(os_handle)
os.remove(tmp_data_file)
os_handle, tmp_cache_file = tempfile.mkstemp()
os.close(os_handle)
os.remove(tmp_cache_file)
if data_type == "int8":
if qat:
args = ['-m', tmp_keras_model,
'-k', enc_key,
'--experiment_spec', _spec_file,
'-o', tmp_exported_model,
'--data_type', 'int8',
'--cal_cache_file', tmp_cache_file,
'--static_batch_size', "1"]
keras.backend.clear_session()
ret = script_runner.run(script, env=env, *args)
try:
assert ret.success
assert os.path.isfile(tmp_exported_model)
assert os.path.isfile(tmp_cache_file)
except AssertionError:
raise AssertionError(ret.stdout + ret.stderr)
else:
args = ['-m', tmp_keras_model,
'-k', enc_key,
'--experiment_spec', _spec_file,
'-o', tmp_exported_model,
'--data_type', 'int8',
'--cal_data_file', tmp_data_file,
'--cal_image_dir', "",
'--batches', '1',
'--batch_size', '1',
'--cal_cache_file', tmp_cache_file,
'--static_batch_size', "1"]
keras.backend.clear_session()
ret = script_runner.run(script, env=env, *args)
try:
# this is the last export, retain the etlt model for following check
assert ret.success
assert os.path.isfile(tmp_exported_model)
if os.path.exists(tmp_data_file):
os.remove(tmp_data_file)
if os.path.exists(tmp_cache_file):
os.remove(tmp_cache_file)
except AssertionError:
raise(AssertionError(ret.stdout + ret.stderr))
# clear the tmp files
if os.path.exists(tmp_exported_model):
os.remove(tmp_exported_model)
if os.path.exists(tmp_cache_file):
os.remove(tmp_cache_file)
if os.path.exists(tmp_keras_model):
os.remove(tmp_keras_model)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/export/tests/test_uff_export.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IVA RetinaNet base architecture."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import keras
import numpy as np
class PriorProbability(keras.initializers.Initializer):
"""Apply a prior probability to the weights.
https://arxiv.org/pdf/1708.02002.pdf
"""
def __init__(self, probability=0.01):
"""Set prior probability."""
self.probability = probability
def get_config(self):
"""Get probability."""
return {
'probability': self.probability
}
def __call__(self, shape, dtype=None):
"""set bias to -log((1 - p)/p) for foreground."""
result = np.ones(shape, dtype=dtype) * - math.log((1 - self.probability) / self.probability)
return result
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/initializers/prior_prob.py |
"""Initializers module for RetinaNet.""" | tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/initializers/__init__.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/__init__.py |
"""Module containing implementation of loss functions for FasterRCNN."""
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/losses/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Loss functions for FasterRCNN.'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import keras.backend as K
from keras.objectives import categorical_crossentropy
import tensorflow as tf
from nvidia_tao_tf1.cv.faster_rcnn.layers.utils import safe_gather
def _smooth_l1_loss(bbox_pred, bbox_targets, sigma=1.0):
"""Smooth L1 loss function."""
sigma_2 = sigma * sigma
box_diff = bbox_pred - bbox_targets
in_box_diff = box_diff
abs_in_box_diff = tf.abs(in_box_diff)
smoothL1_sign = tf.stop_gradient(K.cast(K.less_equal(abs_in_box_diff, 1.0/sigma_2),
tf.float32))
x1 = (in_box_diff * in_box_diff) * (sigma_2 / 2.) * smoothL1_sign
x2 = (abs_in_box_diff - (0.5 / sigma_2)) * (1. - smoothL1_sign)
in_loss_box = x1 + x2
return in_loss_box
def _build_rpn_class_loss(num_anchors, lambda_rpn_class, rpn_train_bs):
'''build RPN classification loss.'''
def rpn_loss_cls(y_true, y_pred):
y_true = tf.stop_gradient(y_true)
ce_loss = K.binary_crossentropy(y_true[:, num_anchors:, :, :],
y_pred[:, :, :, :])
loss = lambda_rpn_class * \
K.sum(y_true[:, :num_anchors, :, :] * ce_loss, axis=[1, 2, 3]) / rpn_train_bs
return K.mean(loss)
return rpn_loss_cls
def _build_rpn_bbox_loss(num_anchors, lambda_rpn_regr, rpn_train_bs):
'''build RPN bbox loss.'''
def rpn_loss_regr(y_true, y_pred):
y_true = tf.stop_gradient(y_true)
l1_loss = _smooth_l1_loss(y_pred,
y_true[:, 4 * num_anchors:, :, :],
sigma=3.0)
loss = lambda_rpn_regr * \
K.sum(y_true[:, :4 * num_anchors, :, :] * l1_loss, axis=[1, 2, 3]) / rpn_train_bs
return K.mean(loss)
return rpn_loss_regr
def _build_rcnn_class_loss(lambda_rcnn_class, rcnn_train_bs):
'''build RCNN classification loss.'''
def rcnn_loss_cls(y_true, y_pred):
# y_true: (N, R, C+1), all zero label indicates padded numbers.
# y_pred: (N, R, C+1)
# mask for positive + negative ROIs, ignore padded ROIs.
batch = tf.cast(tf.shape(y_pred)[0], tf.float32)
y_true = tf.stop_gradient(y_true)
y_true_mask = tf.cast(tf.reduce_sum(y_true, axis=-1) > 0, tf.float32)
ce_loss = categorical_crossentropy(y_true, y_pred)
loss = lambda_rcnn_class * \
K.sum(ce_loss*tf.stop_gradient(y_true_mask)) / rcnn_train_bs
# average over batch dim
return loss / batch
return rcnn_loss_cls
def _build_rcnn_bbox_loss(num_classes, lambda_rcnn_regr, rcnn_train_bs):
'''build RCNN bbox loss.'''
def rcnn_loss_regr(y_true, y_pred):
# y_true: (N, R, C8)
# y_pred: (N, R, C4)
batch = tf.cast(tf.shape(y_pred)[0], tf.float32)
y_true = tf.stop_gradient(y_true)
y_true = tf.reshape(y_true,
(tf.shape(y_true)[0], tf.shape(y_true)[1], num_classes-1, 8))
y_true_positive = tf.reshape(y_true[:, :, :, 0:4], (-1, 4))
y_true_deltas = tf.reshape(y_true[:, :, :, 4:], (-1, 4))
y_pred = tf.reshape(y_pred, (-1, 4))
y_true_pos_sel = tf.math.equal(tf.reduce_sum(y_true_positive, axis=1), 4.0)
positive_idxs = tf.where(y_true_pos_sel)[:, 0]
l1_loss = _smooth_l1_loss(safe_gather(y_pred, positive_idxs),
safe_gather(y_true_deltas, positive_idxs))
loss = K.switch(tf.size(positive_idxs) > 0,
l1_loss,
tf.constant(0.0))
loss = lambda_rcnn_regr * K.sum(loss) / rcnn_train_bs
# average over batch dim
return loss / batch
return rcnn_loss_regr
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/losses/losses.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Unit test for FasterRCNN loss functions.'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pytest
import tensorflow as tf
from nvidia_tao_tf1.cv.faster_rcnn.losses.losses import (
_build_rcnn_bbox_loss,
_build_rcnn_class_loss,
_build_rpn_bbox_loss,
_build_rpn_class_loss
)
NUM_ANCHORS = 9
LAMBDA_RPN_CLASS = 1.0
LAMBDA_RPN_DELTAS = 1.0
LAMBDA_RCNN_CLASS = 1.0
LAMBDA_RCNN_DELTAS = 1.0
RPN_TRAIN_BS = 256
RCNN_TRAIN_BS = 256
BS = 2
NUM_CLASSES = 4
RPN_H = 20
RPN_W = 30
def test_rpn_class_loss():
'''Check the RPN classification loss.'''
# loss should be non-negative
rpn_class_loss = _build_rpn_class_loss(NUM_ANCHORS, LAMBDA_RPN_CLASS, RPN_TRAIN_BS)
shape = (BS, NUM_ANCHORS, RPN_H, RPN_W)
shape2 = (BS, 2*NUM_ANCHORS, RPN_H, RPN_W)
y_pred = tf.constant(np.random.random(shape), dtype=tf.float32)
y_true_np = np.random.randint(0,
high=2,
size=(BS, 1, NUM_ANCHORS, RPN_H, RPN_W)).astype(np.float32)
y_true_np = np.broadcast_to(y_true_np, (BS, 2, NUM_ANCHORS, RPN_H, RPN_W)).reshape(shape2)
y_true = tf.constant(y_true_np,
dtype=tf.float32)
loss = rpn_class_loss(y_true, y_pred)
with tf.Session() as sess:
assert sess.run(loss) >= 0.0
# if providing inputs with 3 dims, it should raise error.
y_true_wrong = \
tf.constant(np.random.randint(0,
high=2,
size=(2*NUM_ANCHORS, RPN_H, RPN_W)).astype(np.float32),
dtype=tf.float32)
with pytest.raises(ValueError):
rpn_class_loss(y_true_wrong, y_pred)
def test_rpn_bbox_loss():
'''Check the RPN boundingbox loss.'''
# loss should be non-negative.
rpn_bbox_loss = _build_rpn_bbox_loss(NUM_ANCHORS, LAMBDA_RPN_DELTAS, RPN_TRAIN_BS)
shape = (BS, 4*NUM_ANCHORS, RPN_H, RPN_W)
y_pred = tf.constant(np.random.random(shape), dtype=tf.float32)
y_true_np = np.broadcast_to(np.random.randint(0,
high=2,
size=(BS, 1, NUM_ANCHORS, RPN_H, RPN_W)),
(BS, 4, NUM_ANCHORS, RPN_H, RPN_W)).reshape(shape)
y_true_mask = tf.constant(y_true_np, dtype=tf.float32)
y_true_deltas = tf.constant(np.random.random(shape), dtype=tf.float32)
y_true = tf.concat((y_true_mask, y_true_deltas), axis=1)
loss = rpn_bbox_loss(y_true, y_pred)
with tf.Session() as sess:
assert sess.run(loss) >= 0.0
y_true_wrong = y_true[0, ...]
with pytest.raises(ValueError):
rpn_bbox_loss(y_true_wrong, y_pred)
def test_rcnn_class_loss():
'''Check the RCNN classification loss.'''
# loss should be non-negative.
rcnn_class_loss = _build_rcnn_class_loss(LAMBDA_RCNN_CLASS, RCNN_TRAIN_BS)
shape = (BS, RCNN_TRAIN_BS, NUM_CLASSES)
y_pred = tf.constant(np.random.random(shape), dtype=tf.float32)
y_true = tf.constant(np.random.randint(0, high=2, size=shape), dtype=tf.float32)
loss = rcnn_class_loss(y_true, y_pred)
with tf.Session() as sess:
assert sess.run(loss) >= 0.0
# raise error when with wrong input dims
y_true_error = y_true[..., 0:2]
with pytest.raises(ValueError):
rcnn_class_loss(y_true_error, y_pred)
def test_rcnn_bbox_loss():
'''Check the RCNN bbox loss.'''
# loss should be non-negative.
rcnn_bbox_loss = _build_rcnn_bbox_loss(NUM_CLASSES, LAMBDA_RCNN_DELTAS, RCNN_TRAIN_BS)
shape = (BS, RCNN_TRAIN_BS, (NUM_CLASSES-1)*4)
shape2 = (BS, RCNN_TRAIN_BS, (NUM_CLASSES-1)*8)
y_pred = tf.constant(np.random.random(shape), dtype=tf.float32)
y_true_np = np.broadcast_to(np.random.randint(0,
high=2,
size=(BS, RCNN_TRAIN_BS, NUM_CLASSES-1, 1)),
(BS, RCNN_TRAIN_BS, (NUM_CLASSES-1), 4))
y_true_mask = tf.constant(y_true_np, dtype=tf.float32)
y_true_deltas_np = np.random.random((BS, RCNN_TRAIN_BS, (NUM_CLASSES-1), 4))
y_true_deltas = tf.constant(y_true_deltas_np, dtype=tf.float32)
y_true = tf.reshape(tf.concat((y_true_mask, y_true_deltas), axis=-1), shape2)
loss = rcnn_bbox_loss(y_true, y_pred)
with tf.Session() as sess:
assert sess.run(loss) >= 0.0
y_true_wrong = y_true[0, ...]
with pytest.raises(ValueError):
rcnn_bbox_loss(y_true_wrong, y_pred)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/losses/tests/test_losses.py |
"""FasterRCNN entry point."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from maglev_sdk.docker_container.entrypoint import main
if __name__ == '__main__':
main('faster_rcnn', 'nvidia_tao_tf1/cv/faster_rcnn/scripts')
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/docker/faster_rcnn.py |
"""Quantization Aware Training module for FRCNN."""
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/qat/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Process and export quantized models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import tempfile
import keras
from keras.layers import Conv2D, Dense, DepthwiseConv2D, TimeDistributed
from keras.utils.generic_utils import CustomObjectScope
import tensorflow as tf
from nvidia_tao_tf1.core.models.templates.qdq_layer import QDQ
from nvidia_tao_tf1.core.models.templates.quantized_conv2d import QuantizedConv2D
from nvidia_tao_tf1.core.models.templates.quantized_dense import QuantizedDense
from nvidia_tao_tf1.core.models.templates.quantized_depthwiseconv2d import QuantizedDepthwiseConv2D
from nvidia_tao_tf1.cv.common.utils import CUSTOM_OBJS
QAT_LAYERS = [
QuantizedConv2D,
QuantizedDepthwiseConv2D,
QDQ,
QuantizedDense,
]
def collapse_pad_and_conv(tensor_scale_dict):
"""ZeroPadding is fused with its following conv2d/depthwiseconv2d, collapse them."""
padding_nodes = []
for k in tensor_scale_dict:
if '/Pad' in k:
# this is a ZeroPadding node
padding_nodes.append(k)
for n in padding_nodes:
tensor_scale_dict.pop(n)
def collapse_flatten_and_prev(tensor_scale_dict):
"""Flatten node is no-op in UFF, collapse with its previous layer."""
# get flatten node
flatten_op = tf.get_default_graph().get_operation_by_name('time_distributed_flatten/Reshape')
if flatten_op:
# get flatten input tensor(QDQ)
flatten_input_tensor = flatten_op.inputs[0]
while '_qdq' in flatten_input_tensor.name:
# get QDQ input tensor
flatten_input_tensor = flatten_input_tensor.op.inputs[0]
# get previous node name
prev_node_name = flatten_input_tensor.op.inputs[0].op.name
if prev_node_name and (prev_node_name in tensor_scale_dict):
tensor_scale_dict.pop(prev_node_name)
return
if 'crop_and_resize_' in prev_node_name:
plugin_name = 'roi_pooling_conv_1/CropAndResize_new'
assert plugin_name in tensor_scale_dict, (
"Expect plugin node: {} in tensor_scale_dict, but not found.".format(plugin_name)
)
tensor_scale_dict.pop(plugin_name)
return
def process_flatten_name(tensor_name):
"""Strip Flatten TD reshape."""
if re.match(r'time_distributed_flatten/Reshape_2', tensor_name):
return tensor_name.replace('Reshape_2', 'Reshape_1', 1)
return tensor_name
def process_plugins_name(tensor_name):
"""replace the node name with the corresponding plugins name."""
if re.match(r'crop_and_resize_1/Reshape_1', tensor_name):
plugin_name = 'roi_pooling_conv_1/CropAndResize_new'
return plugin_name
return tensor_name
def process_td_output_name(tensor_name, layer, up=False):
"""replace the output name of TD layer with its inner layer output name."""
# if the input comes from a TD layer, we should use the name
# of the inner layer of TD layer to align with pb
if re.match(r'time_distributed_[0-9]+/Reshape_[0-9]+', tensor_name):
if up:
prev_layer = layer._inbound_nodes[0].inbound_layers[0]
else:
prev_layer = layer
assert type(prev_layer) == TimeDistributed, type(prev_layer)
# the TD inner layer .output attr is not set
# so we have to find it out with TF APIs
# get the node of the TD Reshape_1 op
td_reshape_1 = tf.get_default_graph().get_operation_by_name(tensor_name.split(":")[0])
# get this op's input tensor
tensor_name_inner = td_reshape_1.inputs[0].name
if re.match(r'time_distributed_[0-9]+/Reshape:.*$', tensor_name_inner):
# this is a TD Dropout layer, get its input again
tensor_name_inner = td_reshape_1.inputs[0].op.inputs[0].name
tensor = td_reshape_1.inputs[0].op.inputs[0]
# probably there are some QDQ layers in between
while '_qdq' in tensor_name_inner:
tensor = tensor.op.inputs[0]
tensor_name_inner = tensor.name
# can still get a TD layer, strip it
if re.match(r'time_distributed_[0-9]+/Reshape_[0-9]+', tensor_name_inner):
tensor = tensor.op.inputs[0]
tensor_name_inner = tensor.name
return tensor_name_inner
return tensor_name
def check_for_quantized_layers(model):
"""Check Keras model for quantization layers."""
for layer in model.layers:
if type(layer) in QAT_LAYERS:
return True
if type(layer) == TimeDistributed:
if type(layer.layer) in QAT_LAYERS:
return True
return False
def process_quantized_layers(model,
output_format,
create_session=False,
learning_phase=0):
"""Remove QDQ, replace the QuantizedConv2D with Conv2D and extract calibration cache."""
network_dict = {"input_layers_of": {}, "new_output_tensor_of": {}}
# Set the input layers of each layer.
for layer in model.layers:
if len(layer._inbound_nodes) > 1:
raise AttributeError(
"Layers with multiple inbound nodes are not supported."
)
inbound_node = layer._inbound_nodes[0]
inbound_layers = [in_layer.name for in_layer in inbound_node.inbound_layers]
if len(inbound_layers) > 0:
network_dict["input_layers_of"].update({layer.name: inbound_layers})
input_layers = [
l for l in model.layers if len(l._inbound_nodes[0].inbound_layers) == 0
]
assert len(input_layers) > 0, "No input layer was found."
assert len(input_layers) == len(
model.inputs
), "Number of input layers does not match number of input tensors."
for layer in input_layers:
input_tensor = layer._inbound_nodes[0].input_tensors[0]
assert input_tensor in model.inputs, "Input tensor not found in model inputs."
network_dict["new_output_tensor_of"].update({layer.name: input_tensor})
qdq_scale_dict = {}
for layer in model.layers:
if type(layer) == QDQ:
scaling_factor = layer.get_weights()
scaling_factor = scaling_factor[0]
prev_layer_name = network_dict["input_layers_of"][layer.name]
assert (
len(prev_layer_name) == 1
), "QDQ layer is expected to have only one input layer."
qdq_scale_dict[prev_layer_name[0]] = scaling_factor
for node in layer._outbound_nodes:
layer_name = node.outbound_layer.name
if type(node.outbound_layer) == QDQ:
raise AttributeError("Cascaded QDQ layers are not supported.")
idx = network_dict["input_layers_of"][layer_name].index(layer.name)
network_dict["input_layers_of"][layer_name][idx] = prev_layer_name[0]
output_tensors = []
tensor_scale_dict = {}
for layer in model.layers:
if layer.name not in network_dict["input_layers_of"]:
# It's an input layer.
if layer.name in qdq_scale_dict:
tensor_name = layer.output.name
# UFF exporter freezes the graph into a .pb file before exporting to UFF.
# As a result, the ":0", ":1", ... which indicates the output index of
# a TensorFlow OP in the output tensor name will be removed from the name
# of the tensors. The ONNX exporter does not seem to be starting from
# a frozen graph.
if output_format != "onnx":
tensor_name = tensor_name.split(":")[0]
tensor_scale_dict[tensor_name] = qdq_scale_dict[layer.name]
continue
if type(layer) == QDQ:
continue
# Determine input tensors.
layer_input = [
network_dict["new_output_tensor_of"][layer_aux]
for layer_aux in network_dict["input_layers_of"][layer.name]
]
if len(layer_input) == 1:
layer_input = layer_input[0]
if type(layer) == QuantizedConv2D:
x = layer_input
layer_config = layer.get_config()
layer_config.pop("bitwidth")
quantize_input = layer_config.pop("quantize")
new_layer = Conv2D.from_config(layer_config)
if quantize_input:
if layer.use_bias:
kernels, biases, scaling_factor = layer.get_weights()
else:
kernels, scaling_factor = layer.get_weights()
assert (
scaling_factor.shape == ()
), "Unexpected shape for scaling factor parameter."
else:
if layer.use_bias:
kernels, biases = layer.get_weights()
else:
kernels = layer.get_weights()[0]
x = new_layer(x)
if layer.use_bias:
new_layer.set_weights([kernels, biases])
else:
new_layer.set_weights([kernels])
if (
quantize_input
and type(layer._inbound_nodes[0].inbound_layers[0]) != QDQ
):
tensor_name = process_td_output_name(layer.input.name, layer, up=True)
if output_format != "onnx":
tensor_name = process_plugins_name(tensor_name)
tensor_name = process_flatten_name(tensor_name)
if output_format != "onnx":
tensor_name = tensor_name.split(":")[0]
if tensor_name in tensor_scale_dict:
tensor_scale_dict[tensor_name] = max(
tensor_scale_dict[tensor_name], scaling_factor
)
else:
tensor_scale_dict[tensor_name] = scaling_factor
elif type(layer) == TimeDistributed and type(layer.layer) == QuantizedConv2D:
x = layer_input
layer_config = layer.layer.get_config()
layer_config.pop("bitwidth")
quantize_input = layer_config.pop("quantize")
new_layer = Conv2D.from_config(layer_config)
if quantize_input:
if layer.layer.use_bias:
kernels, biases, scaling_factor = layer.layer.get_weights()
else:
kernels, scaling_factor = layer.layer.get_weights()
assert (
scaling_factor.shape == ()
), "Unexpected shape for scaling factor parameter."
else:
if layer.layer.use_bias:
kernels, biases = layer.layer.get_weights()
else:
kernels = layer.layer.get_weights()[0]
x = TimeDistributed(new_layer, name=layer.name)(x)
if layer.layer.use_bias:
new_layer.set_weights([kernels, biases])
else:
new_layer.set_weights([kernels])
if (
quantize_input
and type(layer._inbound_nodes[0].inbound_layers[0]) != QDQ
):
tensor_name = process_td_output_name(layer.input.name, layer, up=True)
if output_format != "onnx":
tensor_name = process_plugins_name(tensor_name)
tensor_name = process_flatten_name(tensor_name)
if output_format != "onnx":
tensor_name = tensor_name.split(":")[0]
if tensor_name in tensor_scale_dict:
tensor_scale_dict[tensor_name] = max(
tensor_scale_dict[tensor_name], scaling_factor
)
else:
tensor_scale_dict[tensor_name] = scaling_factor
elif type(layer) == QuantizedDepthwiseConv2D:
x = layer_input
layer_config = layer.get_config()
layer_config.pop("bitwidth")
quantize_input = layer_config.pop("quantize")
new_layer = DepthwiseConv2D.from_config(layer_config)
if quantize_input:
if layer.use_bias:
kernels, biases, scaling_factor = layer.get_weights()
else:
kernels, scaling_factor = layer.get_weights()
assert (
scaling_factor.shape == ()
), "Unexpected shape for scaling factor parameter."
else:
if layer.use_bias:
kernels, biases = layer.get_weights()
else:
kernels = layer.get_weights()[0]
x = new_layer(x)
if layer.use_bias:
new_layer.set_weights([kernels, biases])
else:
new_layer.set_weights([kernels])
if (
quantize_input
and type(layer._inbound_nodes[0].inbound_layers[0]) != QDQ
):
tensor_name = process_td_output_name(layer.input.name, layer, up=True)
if output_format != "onnx":
tensor_name = process_plugins_name(tensor_name)
tensor_name = process_flatten_name(tensor_name)
if output_format != "onnx":
tensor_name = tensor_name.split(":")[0]
if tensor_name in tensor_scale_dict:
tensor_scale_dict[tensor_name] = max(
tensor_scale_dict[tensor_name], scaling_factor
)
else:
tensor_scale_dict[tensor_name] = scaling_factor
elif type(layer) == TimeDistributed and type(layer.layer) == QuantizedDepthwiseConv2D:
x = layer_input
layer_config = layer.layer.get_config()
layer_config.pop("bitwidth")
quantize_input = layer_config.pop("quantize")
new_layer = DepthwiseConv2D.from_config(layer_config)
if quantize_input:
if layer.layer.use_bias:
kernels, biases, scaling_factor = layer.layer.get_weights()
else:
kernels, scaling_factor = layer.layer.get_weights()
assert (
scaling_factor.shape == ()
), "Unexpected shape for scaling factor parameter."
else:
if layer.layer.use_bias:
kernels, biases = layer.layer.get_weights()
else:
kernels = layer.layer.get_weights()[0]
x = TimeDistributed(new_layer, name=layer.name)(x)
if layer.layer.use_bias:
new_layer.set_weights([kernels, biases])
else:
new_layer.set_weights([kernels])
if (
quantize_input
and type(layer._inbound_nodes[0].inbound_layers[0]) != QDQ
):
tensor_name = process_td_output_name(layer.input.name, layer, up=True)
if output_format != "onnx":
tensor_name = process_plugins_name(tensor_name)
tensor_name = process_flatten_name(tensor_name)
if output_format != "onnx":
tensor_name = tensor_name.split(":")[0]
if tensor_name in tensor_scale_dict:
tensor_scale_dict[tensor_name] = max(
tensor_scale_dict[tensor_name], scaling_factor
)
else:
tensor_scale_dict[tensor_name] = scaling_factor
elif type(layer) == QuantizedDense:
x = layer_input
layer_config = layer.get_config()
layer_config.pop("bitwidth")
quantize_input = layer_config.pop("quantize")
new_layer = Dense.from_config(layer_config)
if quantize_input:
if layer.use_bias:
kernels, biases, scaling_factor = layer.get_weights()
else:
kernels, scaling_factor = layer.get_weights()
assert (
scaling_factor.shape == ()
), "Unexpected shape for scaling factor parameter."
else:
if layer.use_bias:
kernels, biases = layer.get_weights()
else:
kernels = layer.get_weights()[0]
x = new_layer(x)
if layer.use_bias:
new_layer.set_weights([kernels, biases])
else:
new_layer.set_weights([kernels])
if (
quantize_input
and type(layer._inbound_nodes[0].inbound_layers[0]) != QDQ
):
tensor_name = process_td_output_name(layer.input.name, layer, up=True)
if output_format != "onnx":
tensor_name = process_plugins_name(tensor_name)
tensor_name = process_flatten_name(tensor_name)
if output_format != "onnx":
tensor_name = tensor_name.split(":")[0]
if tensor_name in tensor_scale_dict:
tensor_scale_dict[tensor_name] = max(
tensor_scale_dict[tensor_name], scaling_factor
)
else:
tensor_scale_dict[tensor_name] = scaling_factor
elif type(layer) == TimeDistributed and type(layer.layer) == QuantizedDense:
x = layer_input
layer_config = layer.layer.get_config()
layer_config.pop("bitwidth")
quantize_input = layer_config.pop("quantize")
new_layer = Dense.from_config(layer_config)
if quantize_input:
if layer.layer.use_bias:
kernels, biases, scaling_factor = layer.layer.get_weights()
else:
kernels, scaling_factor = layer.layer.get_weights()
assert (
scaling_factor.shape == ()
), "Unexpected shape for scaling factor parameter."
else:
if layer.layer.use_bias:
kernels, biases = layer.layer.get_weights()
else:
kernels = layer.layer.get_weights()[0]
x = TimeDistributed(new_layer, name=layer.name)(x)
if layer.layer.use_bias:
new_layer.set_weights([kernels, biases])
else:
new_layer.set_weights([kernels])
if (
quantize_input
and type(layer._inbound_nodes[0].inbound_layers[0]) != QDQ
):
tensor_name = process_td_output_name(layer.input.name, layer, up=True)
if output_format != "onnx":
tensor_name = process_plugins_name(tensor_name)
tensor_name = process_flatten_name(tensor_name)
if output_format != "onnx":
tensor_name = tensor_name.split(":")[0]
if tensor_name in tensor_scale_dict:
tensor_scale_dict[tensor_name] = max(
tensor_scale_dict[tensor_name], scaling_factor
)
else:
tensor_scale_dict[tensor_name] = scaling_factor
else:
weights = layer.get_weights()
layer_config = layer.get_config()
new_layer = type(layer).from_config(layer_config)
x = new_layer(layer_input)
new_layer.set_weights(weights)
if layer.name in qdq_scale_dict:
tensor_name = process_td_output_name(layer.output.name, layer)
if output_format != "onnx":
tensor_name = process_plugins_name(tensor_name)
tensor_name = process_flatten_name(tensor_name)
if output_format != "onnx":
tensor_name = tensor_name.split(":")[0]
tensor_scale_dict[tensor_name] = qdq_scale_dict[layer.name]
if len(layer._outbound_nodes) == 0:
if isinstance(x, list):
output_tensors.extend(x)
else:
output_tensors.append(x)
for node in layer._outbound_nodes:
outbound_layer = node.outbound_layer
if type(outbound_layer) == QDQ:
if len(outbound_layer._outbound_nodes) == 0:
if isinstance(x, list):
output_tensors.extend(x)
else:
output_tensors.append(x)
network_dict["new_output_tensor_of"].update({layer.name: x})
model = keras.models.Model(inputs=model.inputs, outputs=output_tensors)
# collapse flatten node and its previous node
if output_format != "onnx":
collapse_flatten_and_prev(tensor_scale_dict)
# collapse padding and conv2d/depthwiseconv2d
collapse_pad_and_conv(tensor_scale_dict)
# convert input_image:0 to input_image for onnx
# since it seems there is no :0 for input in onnx model
if output_format == "onnx":
if "input_image:0" in tensor_scale_dict:
tensor_scale_dict.update(
{"input_image": tensor_scale_dict["input_image:0"]}
)
tensor_scale_dict.pop("input_image:0")
# save model to file, reset the tf graph and load it to make sure the tf op names
# not appended with _n
os_handle, temp_file_name = tempfile.mkstemp()
os.close(os_handle)
with CustomObjectScope(CUSTOM_OBJS):
model.save(temp_file_name)
# clear old tf graph and session
keras.backend.clear_session()
if create_session:
# create a new tf session and use it as Keras session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
keras.backend.set_session(tf.Session(config=config))
assert learning_phase in [0, 1], "Keras learning phase should be 0 or 1, got {}".format(
learning_phase
)
keras.backend.set_learning_phase(learning_phase)
with CustomObjectScope(CUSTOM_OBJS):
new_model = keras.models.load_model(temp_file_name, compile=False)
os.remove(temp_file_name)
return new_model, tensor_scale_dict
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/qat/_quantized.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create a Keras model for Quantization-Aware Training (QAT)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import keras
from keras.layers import (
Add,
Average,
Concatenate,
Maximum,
Minimum,
Multiply,
Permute,
Subtract,
)
from keras.layers import (
BatchNormalization, Conv2D, Dense, DepthwiseConv2D,
ReLU, Softmax, TimeDistributed
)
from keras.layers.core import Activation
from nvidia_tao_tf1.core.models.templates.qdq_layer import QDQ
from nvidia_tao_tf1.core.models.templates.quantized_conv2d import QuantizedConv2D
from nvidia_tao_tf1.core.models.templates.quantized_dense import QuantizedDense
from nvidia_tao_tf1.core.models.templates.quantized_depthwiseconv2d import QuantizedDepthwiseConv2D
from nvidia_tao_tf1.cv.faster_rcnn.layers.custom_layers import CropAndResize, Proposal, ProposalTarget
output_types = [
Activation,
ReLU,
Softmax,
Add,
Subtract,
Multiply,
Average,
Maximum,
Minimum,
Concatenate,
Permute,
]
def create_layer_from_config(layer, input_tensor, freeze_bn=False):
"""Re-create a Keras layer from config."""
layer_config = layer.get_config()
weights = layer.get_weights()
new_layer = type(layer).from_config(layer_config)
if (type(new_layer) == BatchNormalization or
(type(new_layer) == TimeDistributed and
type(new_layer.layer) == BatchNormalization)):
if freeze_bn:
x = new_layer(input_tensor, training=False)
else:
x = new_layer(input_tensor)
else:
x = new_layer(input_tensor)
new_layer.set_weights(weights)
return x
def _add_outputs(layer, output_layers):
"""Recursively find the output layers."""
for prev_layer in layer._inbound_nodes[0].inbound_layers:
if prev_layer.name not in output_layers:
output_layers.append(prev_layer.name)
if type(prev_layer) in output_types:
output_layers = _add_outputs(prev_layer, output_layers)
return output_layers
def create_quantized_keras_model(model, freeze_bn=False, training=False):
"""Quantize a Keras model.
This function replaces the Conv2D with QuantizedConv2D, ReLU with ReLU6 and adds QDQ layers
as needed in the graph. It also uses the weights from original Keras model.
Args:
model (Keras model): The input Keras model.
freeze_bn(bool): Freeze BN layers or not.
training(bool): Flag for training or validation mode.
Returns:
model (Keras model): A keras model prepared for Quantization-Aware Training.
"""
network_dict = {"input_layers_of": {}, "new_output_tensor_of": {}}
# Set the input layers of each layer.
for layer in model.layers:
if len(layer._inbound_nodes) > 1:
raise AttributeError(
"Layers with multiple inbound nodes are not supported."
)
inbound_node = layer._inbound_nodes[0]
inbound_layers = [in_layer.name for in_layer in inbound_node.inbound_layers]
if len(inbound_layers) > 0:
network_dict["input_layers_of"].update({layer.name: inbound_layers})
input_layers = [
l for l in model.layers if len(l._inbound_nodes[0].inbound_layers) == 0
]
assert len(input_layers) > 0, "No input layer was found."
assert len(input_layers) == len(
model.inputs
), "Number of input layers does not match number of input tensors."
for layer in input_layers:
input_tensor = layer._inbound_nodes[0].input_tensors[0]
assert input_tensor in model.inputs, "Input tensor not found in model inputs."
# only input image need to be quantized
if "input_image" in layer.name:
input_tensor = QDQ(name=layer.name + "_qdq")(input_tensor)
network_dict["new_output_tensor_of"].update({layer.name: input_tensor})
output_layers = []
for layer in model.layers:
if len(layer._outbound_nodes) == 0:
output_layers.append(layer.name)
if type(layer) in output_types:
output_layers = _add_outputs(layer, output_layers)
output_tensors = []
record_cr_rois = None
for layer in model.layers:
if layer.name not in network_dict["input_layers_of"]:
# It's an input layer.
continue
# Determine input tensors.
layer_input = [
network_dict["new_output_tensor_of"][layer_aux]
for layer_aux in network_dict["input_layers_of"][layer.name]
]
if len(layer_input) == 1:
layer_input = layer_input[0]
is_output = layer.name in output_layers
if is_output or type(layer) in [Proposal, ProposalTarget]:
x = layer_input
x = create_layer_from_config(layer, x)
else:
if type(layer) == Conv2D or (
type(layer) == TimeDistributed and
type(layer.layer) == Conv2D):
x = layer_input
if type(layer) == Conv2D:
layer_config = layer.get_config()
else:
layer_config = layer.layer.get_config()
layer_config["quantize"] = False
layer_config["bitwidth"] = 8
conv_act = layer_config["activation"]
if conv_act != "linear":
layer_config["activation"] = "linear"
new_layer = QuantizedConv2D.from_config(layer_config)
if type(layer) == Conv2D:
if layer.use_bias:
kernels, biases = layer.get_weights()
x = new_layer(x)
new_layer.set_weights([kernels, biases])
else:
kernels = layer.get_weights()[0]
x = new_layer(x)
new_layer.set_weights([kernels])
else:
if layer.layer.use_bias:
kernels, biases = layer.layer.get_weights()
x = TimeDistributed(new_layer, name=layer.name)(x)
new_layer.set_weights([kernels, biases])
else:
kernels = layer.layer.get_weights()[0]
x = TimeDistributed(new_layer, name=layer.name)(x)
new_layer.set_weights([kernels])
if conv_act == "linear":
# TensorRT folds the BN into previous Conv. layer.
# So if the output of this Conv. layer goes to only
# a BN layer, then don't add a QDQ layer.
next_layer_is_relu = False
next_layer = layer._outbound_nodes[0].outbound_layer
if len(layer._outbound_nodes) == 1:
if type(next_layer) == Activation:
next_layer_act = next_layer.get_config()['activation']
if next_layer_act == "relu":
next_layer_is_relu = True
elif type(next_layer) == ReLU:
next_layer_is_relu = True
next_layer_is_bn = False
if type(layer) == Conv2D:
if (len(layer._outbound_nodes) == 1 and
type(layer._outbound_nodes[0].outbound_layer) == BatchNormalization):
next_layer_is_bn = True
else:
next_layer = layer._outbound_nodes[0].outbound_layer
if (len(layer._outbound_nodes) == 1 and
type(next_layer) == TimeDistributed and
type(next_layer.layer) == BatchNormalization):
next_layer_is_bn = True
if (not next_layer_is_relu) and (not next_layer_is_bn):
x = QDQ(name=layer.name + "_qdq")(x)
else:
# TensorRT fuses ReLU back into the Conv. layer.
# Other activations are implemented as separate layers.
# So we need to add QDQ layer unless the activation is ReLU
if conv_act == "relu":
x = ReLU(max_value=6.0)(x)
else:
x = QDQ(name=layer.name + "_qdq")(x)
x = Activation(conv_act)(x)
x = QDQ(name=layer.name + "_act_qdq")(x)
elif type(layer) == DepthwiseConv2D or (
type(layer) == TimeDistributed and
type(layer.layer) == DepthwiseConv2D):
x = layer_input
if type(layer) == DepthwiseConv2D:
layer_config = layer.get_config()
else:
layer_config = layer.layer.get_config()
layer_config["quantize"] = False
layer_config["bitwidth"] = 8
conv_act = layer_config["activation"]
if conv_act != "linear":
layer_config["activation"] = "linear"
new_layer = QuantizedDepthwiseConv2D.from_config(layer_config)
if type(layer) == DepthwiseConv2D:
if layer.use_bias:
kernels, biases = layer.get_weights()
x = new_layer(x)
new_layer.set_weights([kernels, biases])
else:
kernels = layer.get_weights()[0]
x = new_layer(x)
new_layer.set_weights([kernels])
else:
if layer.layer.use_bias:
kernels, biases = layer.layer.get_weights()
x = TimeDistributed(new_layer, name=layer.name)(x)
new_layer.set_weights([kernels, biases])
else:
kernels = layer.layer.get_weights()[0]
x = TimeDistributed(new_layer, name=layer.name)(x)
new_layer.set_weights([kernels])
if conv_act == "linear":
# TensorRT folds the BN into previous Conv. layer.
# So if the output of this Conv. layer goes to only
# a BN layer, then don't add a QDQ layer.
next_layer_is_relu = False
next_layer = layer._outbound_nodes[0].outbound_layer
if len(layer._outbound_nodes) == 1:
if type(next_layer) == Activation:
next_layer_act = next_layer.get_config()['activation']
if next_layer_act == "relu":
next_layer_is_relu = True
elif type(next_layer) == ReLU:
next_layer_is_relu = True
next_layer_is_bn = False
if type(layer) == DepthwiseConv2D:
if (len(layer._outbound_nodes) == 1 and
type(layer._outbound_nodes[0].outbound_layer) == BatchNormalization):
next_layer_is_bn = True
else:
next_layer = layer._outbound_nodes[0].outbound_layer
if (len(layer._outbound_nodes) == 1 and
type(next_layer) == TimeDistributed and
type(next_layer.layer) == BatchNormalization):
next_layer_is_bn = True
if (not next_layer_is_relu) and (not next_layer_is_bn):
x = QDQ(name=layer.name + "_qdq")(x)
else:
# TensorRT fuses ReLU back into the Conv. layer.
# Other activations are implemented as separate layers.
# So we need to add QDQ layer unless the activation is ReLU
if conv_act == "relu":
x = ReLU(max_value=6.0)(x)
else:
x = QDQ(name=layer.name + "_qdq")(x)
x = Activation(conv_act)(x)
x = QDQ(name=layer.name + "_act_qdq")(x)
elif type(layer) == Dense or (
type(layer) == TimeDistributed and
type(layer.layer) == Dense):
x = layer_input
if type(layer) == Dense:
layer_config = layer.get_config()
else:
layer_config = layer.layer.get_config()
layer_config["quantize"] = False
layer_config["bitwidth"] = 8
conv_act = layer_config["activation"]
if conv_act != "linear":
layer_config["activation"] = "linear"
new_layer = QuantizedDense.from_config(layer_config)
if type(layer) == Dense:
if layer.use_bias:
kernels, biases = layer.get_weights()
x = new_layer(x)
new_layer.set_weights([kernels, biases])
else:
kernels = layer.get_weights()[0]
x = new_layer(x)
new_layer.set_weights([kernels])
else:
if layer.layer.use_bias:
kernels, biases = layer.layer.get_weights()
x = TimeDistributed(new_layer, name=layer.name)(x)
new_layer.set_weights([kernels, biases])
else:
kernels = layer.layer.get_weights()[0]
x = TimeDistributed(new_layer, name=layer.name)(x)
new_layer.set_weights([kernels])
# TensorRT does not fuse FC and Relu6, so always insert QDQ after FC
x = QDQ(name=layer.name + "_qdq")(x)
if conv_act == "relu":
x = ReLU(max_value=6.0)(x)
else:
x = Activation(conv_act)(x)
x = QDQ(name=layer.name + "_act_qdq")(x)
elif type(layer) == Activation:
# Need QDQ layer after every activation layers (except output layers.)
x = layer_input
layer_config = layer.get_config()
if layer_config["activation"] == "relu":
x = ReLU(max_value=6.0, name=layer.name)(x)
else:
x = create_layer_from_config(layer, x)
x = QDQ(name=layer.name + "_qdq")(x)
elif type(layer) == ReLU:
x = layer_input
x = ReLU(max_value=6.0, name=layer.name)(x)
x = QDQ(name=layer.name + "_qdq")(x)
elif type(layer) == BatchNormalization or (
type(layer) == TimeDistributed and type(layer.layer) == BatchNormalization
):
# TensorRT fuses Conv + BN + ReLU together.
# So if previous layer is Conv. and next layer is
# ReLU we should not add QDQ layers.
x = layer_input
# BN layers training=False it not serialized in config
# so we have to pin it in the call method
x = create_layer_from_config(layer, x, freeze_bn=freeze_bn)
next_layer_is_relu = False
if len(layer._outbound_nodes) == 1:
next_layer = layer._outbound_nodes[0].outbound_layer
if type(next_layer) == ReLU:
next_layer_is_relu = True
elif type(next_layer) == Activation:
next_layer_cfg = next_layer.get_config()
if next_layer_cfg["activation"] == "relu":
next_layer_is_relu = True
prev_layer_is_conv = False
if len(layer._inbound_nodes[0].inbound_layers) == 1:
prev_layer = layer._inbound_nodes[0].inbound_layers[0]
if (type(layer) == BatchNormalization and
type(prev_layer) in [Conv2D, DepthwiseConv2D]):
prev_layer_is_conv = True
elif (type(layer) == TimeDistributed and
type(prev_layer) == TimeDistributed and
type(prev_layer.layer) in [Conv2D, DepthwiseConv2D]):
prev_layer_is_conv = True
if not (next_layer_is_relu and prev_layer_is_conv):
x = QDQ(name=layer.name + "_qdq")(x)
else:
x = layer_input
# CropAndResize only need the first output from ProposalTarget as
# the 2nd input for training model.
if type(layer) == CropAndResize:
if training:
x = [x[0], x[1][0], x[2]]
record_cr_rois = x[1]
x = create_layer_from_config(layer, x)
x = QDQ(name=layer.name + "_qdq")(x)
if len(layer._outbound_nodes) == 0 or (training and 'rpn_out' in layer.name):
output_tensors.append(x)
network_dict["new_output_tensor_of"].update({layer.name: x})
if (not training) and (record_cr_rois is not None):
output_tensors.insert(0, record_cr_rois)
model = keras.models.Model(inputs=model.inputs, outputs=output_tensors)
return model
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/qat/quantize_keras_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OutputParser layer in FasterRCNN for post-processing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Layer
import tensorflow as tf
class OutputParser(Layer):
'''OutputParser layer for post-processing in FasterRCNN.'''
def __init__(self, max_box_num, regr_std_scaling, iou_thres, score_thres, **kwargs):
"""Initialize the OutputParser layer.
Args:
max_box_num(int): maximum number of total boxes for output.
"""
self.max_box_num = max_box_num
self.regr_std_scaling = regr_std_scaling
self.iou_thres = iou_thres
self.score_thres = score_thres
super(OutputParser, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
"""compute_output_shape.
Args:
input_shape(tuple): the shape of the input tensor.
Returns:
The output 3D tensor shape: (None, B, 6). where 6 is bbox + class + confidence
"""
return [
(None, self.max_box_num, 4),
(None, self.max_box_num),
(None, self.max_box_num),
(None,),
input_shape[0],
]
def call(self, x, mask=None):
"""Call this layer with inputs.
Args:
x(list): The list of input tensors.
x[0]: the input ROIs in shape (N, B, 4), absolute coordinates.
x[1]: RCNN confidence in the shape (N, B, C+1),including the background.
x[2]: RCNN deltas in the shape (N, B, C*4), for valid classes.
x[3]: Input image for clipping boxes.
Returns:
the output tensor of the layer.
"""
assert(len(x) == 4)
# (N, B, 4) to (N, B, 1, 4) for ease of broadcasting
rois = tf.expand_dims(x[0], axis=2)
# strip the groundtruth class in confidence
rcnn_conf = x[1]
rcnn_conf_valid = rcnn_conf[:, :, :-1]
# (N, B, C*4) to (N, B, C, 4)
rcnn_deltas = x[2]
rcnn_deltas = tf.reshape(
rcnn_deltas,
(tf.shape(rcnn_deltas)[0], tf.shape(rcnn_deltas)[1], -1, 4)
)
input_image = x[3]
image_h = tf.cast(tf.shape(input_image)[2], tf.float32)
image_w = tf.cast(tf.shape(input_image)[3], tf.float32)
# apply deltas to RoIs
y1 = rois[:, :, :, 0]
x1 = rois[:, :, :, 1]
y2 = rois[:, :, :, 2]
x2 = rois[:, :, :, 3]
w0 = x2 - x1 + 1.0
h0 = y2 - y1 + 1.0
x0 = x1 + w0 / 2.0
y0 = y1 + h0 / 2.0
tx = rcnn_deltas[:, :, :, 0] / self.regr_std_scaling[0]
ty = rcnn_deltas[:, :, :, 1] / self.regr_std_scaling[1]
tw = rcnn_deltas[:, :, :, 2] / self.regr_std_scaling[2]
th = rcnn_deltas[:, :, :, 3] / self.regr_std_scaling[3]
cx = tx * w0 + x0
cy = ty * h0 + y0
ww = tf.exp(tw) * w0
hh = tf.exp(th) * h0
xx1 = cx - 0.5 * ww
yy1 = cy - 0.5 * hh
xx2 = cx + 0.5 * ww
yy2 = cy + 0.5 * hh
xx1 = tf.clip_by_value(xx1, 0.0, image_w-1.0)
yy1 = tf.clip_by_value(yy1, 0.0, image_h-1.0)
xx2 = tf.clip_by_value(xx2, 0.0, image_w-1.0)
yy2 = tf.clip_by_value(yy2, 0.0, image_h-1.0)
boxes = tf.stack([yy1, xx1, yy2, xx2], axis=-1)
tf_nms = tf.image.combined_non_max_suppression
# walk around of a bug for tf.image.combined_non_max_suppression
# force it to run on CPU since GPU version is flaky
with tf.device("cpu:0"):
nmsed_boxes, nmsed_scores, nmsed_classes, num_dets = tf_nms(
boxes,
rcnn_conf_valid,
self.max_box_num,
self.max_box_num,
self.iou_thres,
self.score_thres,
clip_boxes=False,
)
return [nmsed_boxes, nmsed_scores, nmsed_classes, num_dets, x[0]]
def get_config(self):
"""Get config for this layer."""
config = {
'max_box_num': self.max_box_num,
'regr_std_scaling': self.regr_std_scaling,
'iou_thres': self.iou_thres,
'score_thres': self.score_thres,
}
base_config = super(OutputParser, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/layers/OutputParser.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FasterRCNN custom keras layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Group all the custom layers here so that we can import them
# from here with a single import statement
from nvidia_tao_tf1.cv.faster_rcnn.layers.CropAndResize import CropAndResize
from nvidia_tao_tf1.cv.faster_rcnn.layers.NmsInputs import NmsInputs
from nvidia_tao_tf1.cv.faster_rcnn.layers.OutputParser import OutputParser
from nvidia_tao_tf1.cv.faster_rcnn.layers.Proposal import Proposal
from nvidia_tao_tf1.cv.faster_rcnn.layers.ProposalTarget import ProposalTarget
from nvidia_tao_tf1.cv.faster_rcnn.layers.TFReshape import TFReshape
__all__ = (
'CropAndResize',
'NmsInputs',
'OutputParser',
'Proposal',
'ProposalTarget',
'TFReshape',
)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/layers/custom_layers.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FasterRCNN custom keras layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/layers/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFReshape layer in FasterRCNN."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import keras.backend as K
from keras.layers import Layer
class TFReshape(Layer):
'''New Reshape Layer to mimic TF reshape op.'''
def __init__(self, target_shape, **kwargs):
"""Init function.
Args:
target_shape(tuple): the target shape of this layer.
"""
assert not (None in target_shape), 'Target shape should be all defined.'
minus_one_num = sum(s for s in target_shape if s == -1)
if minus_one_num > 1:
raise ValueError('Can have at most one -1 in target shape.')
self.target_shape = list(target_shape)
super(TFReshape, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
"""compute_output_shape.
Args:
input_shape(tuple): the shape of the input tensor.
Returns:
the target shape.
"""
return tuple(self.target_shape)
def call(self, x, mask=None):
"""call.
Args:
x(list): the list of input tensors.
Returns:
The output tensor with the target shape.
"""
return K.reshape(x, self.target_shape)
def get_config(self):
"""Get config for this layer."""
config = {'target_shape': self.target_shape}
base_config = super(TFReshape, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/layers/TFReshape.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Proposal layer in FasterRCNN."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Layer
import numpy as np
import tensorflow as tf
from nvidia_tao_tf1.cv.faster_rcnn.layers.utils import (
apply_deltas_to_anchors,
batch_op,
clip_bbox_tf,
make_anchors,
nms_tf
)
class Proposal(Layer):
'''Proposal layer to convert RPN output to RoIs.
In FasterRCNN, Proposal layer is applied on top of RPN to convert the
dense anchors into a smaller sparse subset of proposals(RoIs). This
conversion roughly includes below steps:
1. Apply deltas to anchors.
2. Take pre NMS top N boxes.
3. Do NMS against the dense boxes and finally take post NMS top N boxes.
'''
def __init__(self, anchor_sizes, anchor_ratios, std_scaling,
rpn_stride, pre_nms_top_N, post_nms_top_N,
nms_iou_thres, activation_type, bs_per_gpu, **kwargs):
'''Initialize the Proposal layer.
Args:
anchor_sizes(list): the list of anchor box sizes, at input image scale.
anchor_ratios(list): the list of anchor box ratios.
std_scaling(float): a constant to do scaling for the RPN deltas output.
rpn_stride(int): the total stirde of RPN relative to input image, always
16 in current implementation.
pre_nms_top_N(int): the number of boxes to retain before doing NMS.
post_nms_top_N(int): the number of boxes to retain after doing NMS.
nms_iou_thres(float): the NMS IoU threshold.
activation_type(str): the activation type for RPN confidence output. Currently
only sigmoid is supported.
bs_per_gpu(int): the batch size for each GPU.
'''
self.anchor_sizes = anchor_sizes
self.anchor_ratios = [np.sqrt(ar) for ar in anchor_ratios]
self.std_scaling = std_scaling
self.rpn_stride = rpn_stride
self.pre_nms_top_N = pre_nms_top_N
self.post_nms_top_N = post_nms_top_N
self.nms_iou_thres = nms_iou_thres
self.activation_type = activation_type
self.bs_per_gpu = bs_per_gpu
super(Proposal, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
'''Compute the output shape.'''
batch_size = input_shape[0][0]
return tuple([batch_size, self.post_nms_top_N, 4])
def _build_anchors_tf(self, rpn_h, rpn_w):
"""Build the anchors in tensorflow ops."""
anc_x, anc_y = tf.meshgrid(
tf.range(tf.cast(rpn_w, tf.float32)),
tf.range(tf.cast(rpn_h, tf.float32))
)
# this is a simple numpy function to generate the base anchors
ancs = tf.constant(
make_anchors(self.anchor_sizes, self.anchor_ratios).reshape(-1, 2),
dtype=tf.float32
)
anc_pos = self.rpn_stride*(tf.stack((anc_x, anc_y), axis=-1) + 0.5)
anc_pos = tf.reshape(anc_pos, (rpn_h, rpn_w, 1, 2))
anc_pos = tf.broadcast_to(anc_pos, (rpn_h, rpn_w, ancs.shape[0], 2))
anc_left_top = anc_pos - ancs/2.0
full_anc_xywh = tf.concat(
(
anc_left_top,
tf.broadcast_to(ancs,
tf.shape(anc_left_top))
),
axis=-1
)
# broadcast to batch dim: (H, W, A, 4) -> (N, H, W, A, 4)
full_anc_xywh = tf.broadcast_to(
full_anc_xywh,
tf.concat([[self.bs_per_gpu], tf.shape(full_anc_xywh)], axis=-1)
)
return tf.reshape(full_anc_xywh, (-1, 4))
def call(self, x, mask=None):
"""Call Proposal layer with RPN outputs as inputs.
Args:
x(list): the list of input tensors.
x[0]: RPN confidence.
x[1]: RPN deltas.
x[2]: input image of the entire model, for clipping bboxes.
Returns:
The output bbox coordinates(RoIs).
"""
rpn_scores = x[0]
rpn_deltas = x[1] * (1.0 / self.std_scaling)
input_image = x[2]
# get dynamic shapes
rpn_h = tf.shape(rpn_scores)[2]
rpn_w = tf.shape(rpn_scores)[3]
image_h = tf.cast(tf.shape(input_image)[2], tf.float32)
image_w = tf.cast(tf.shape(input_image)[3], tf.float32)
# RPN deltas: (N, A4, H, W) -> (N, H, W, A4) -> (-1, 4)
rpn_deltas = tf.reshape(tf.transpose(rpn_deltas, perm=[0, 2, 3, 1]), (-1, 4))
# Anchors: (N, H, W, A, 4) -> (-1, 4)
full_anc_tf = self._build_anchors_tf(rpn_h, rpn_w)
# for testing, remember it then we can retrieve it in testing.
self.full_anc_tf = full_anc_tf
self.rpn_deltas = rpn_deltas
all_boxes = apply_deltas_to_anchors(full_anc_tf, rpn_deltas)
num_ancs = len(self.anchor_sizes) * len(self.anchor_ratios)
NHWA4 = (self.bs_per_gpu, rpn_h, rpn_w, num_ancs, 4)
# (N, H, W, A, 4) -> (NAHW, 4)
all_boxes = tf.reshape(tf.transpose(tf.reshape(all_boxes, NHWA4), (0, 3, 1, 2, 4)), (-1, 4))
# for testing
self.all_boxes_tf = all_boxes
all_boxes = clip_bbox_tf(all_boxes, image_w, image_h)
# (N, AHW, 4)
all_boxes = tf.reshape(all_boxes, (self.bs_per_gpu, -1, 4))
# (N, A, H, W) -> (N, AHW)
all_probs = tf.reshape(rpn_scores, (self.bs_per_gpu, -1))
# NMS for each image
result = batch_op([all_boxes, all_probs],
lambda x: nms_tf(*x,
pre_nms_top_N=self.pre_nms_top_N,
post_nms_top_N=self.post_nms_top_N,
nms_iou_thres=self.nms_iou_thres),
self.bs_per_gpu)
return result
def get_config(self):
"""Get config for this layer."""
config = {'anchor_sizes': self.anchor_sizes,
'anchor_ratios': self.anchor_ratios,
'std_scaling': self.std_scaling,
'rpn_stride': self.rpn_stride,
'pre_nms_top_N': self.pre_nms_top_N,
'post_nms_top_N': self.post_nms_top_N,
'nms_iou_thres': self.nms_iou_thres,
'activation_type': self.activation_type,
'bs_per_gpu': self.bs_per_gpu}
base_config = super(Proposal, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/layers/Proposal.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CropAndResize layer in FasterRCNN."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import keras.backend as K
from keras.layers import Layer
import tensorflow as tf
from nvidia_tao_tf1.cv.faster_rcnn.layers.utils import normalize_bbox_tf
class CropAndResize(Layer):
'''Tensorflow style of ROI pooling layer for 2D inputs.
CropAndResize is a Tensorflow implementation of the original RoI Pooling layer in FasterRCNN
paper. In this implementation, TensorFlow crop the RoIs and do resize by linear interpolation.
This is different from the original implementation as there is quantization steps in original
RoI Pooling layer.
Reference: Spatial Pyramid Pooling in Deep Convolutional Networks for Visual Recognition,
K. He, X. Zhang, S. Ren, J. Sun.
'''
def __init__(self, pool_size, **kwargs):
"""Initialize the CropAndResize layer.
Args:
pool_size(int): output feature width/height of this layer.
"""
self.pool_size = pool_size
super(CropAndResize, self).__init__(**kwargs)
def build(self, input_shape):
"""Setup some internal parameters.
Args:
input_shape(tuple): the shape of the input tensor. The first input is feature map
from backbone, so the number of channels is input_shape[0][1]. The second input is
RoIs, so the number of RoIs is input_shape[1][1]. These parameters will be used to
compute the output shape.
"""
self.nb_channels = input_shape[0][1]
self.num_rois = input_shape[1][1]
def compute_output_shape(self, input_shape):
"""compute_output_shape.
Args:
input_shape(tuple): the shape of the input tensor.
Returns:
The output 5D tensor shape: (None, num_rois, C, P, P).
"""
batch_size = input_shape[0][0]
return (batch_size, self.num_rois, self.nb_channels, self.pool_size, self.pool_size)
def call(self, x, mask=None):
"""Call this layer with inputs.
Args:
x(list): The list of input tensors.
x[0]: the input image in shape (N, C, H, W)
x[1]: the input ROIs in shape (N, B, 4)
x[2]: input image for normalizing the coordinates
Returns:
the output tensor of the layer.
"""
assert(len(x) == 3)
img = x[0]
rois = x[1]
input_image = x[2]
image_h = tf.cast(tf.shape(input_image)[2], tf.float32)
image_w = tf.cast(tf.shape(input_image)[3], tf.float32)
img_channels_last = tf.transpose(img, (0, 2, 3, 1))
rois_reshaped = K.reshape(rois, (-1, 4))
rois_reshaped = normalize_bbox_tf(rois_reshaped, image_h, image_w)
# (NB, 4)
box_idxs = tf.floor_div(tf.where(tf.ones_like(rois_reshaped[:, 0]))[:, 0],
self.num_rois)
box_idxs = tf.cast(box_idxs, tf.int32)
# for testing
self.rois_reshaped = rois_reshaped
final_output = tf.image.crop_and_resize(img_channels_last,
tf.stop_gradient(rois_reshaped),
tf.stop_gradient(box_idxs),
[self.pool_size, self.pool_size],
method='bilinear')
final_output = tf.transpose(final_output, (0, 3, 1, 2))
# back to 5D (N, B, C, H, W)
final_output = tf.reshape(final_output, [-1, self.num_rois,
tf.shape(final_output)[1],
tf.shape(final_output)[2],
tf.shape(final_output)[3]])
return final_output
def get_config(self):
"""Get config for this layer."""
config = {'pool_size': self.pool_size}
base_config = super(CropAndResize, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/layers/CropAndResize.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ProposalTarget layer for FasterRCNN."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Layer
import numpy as np
from nvidia_tao_tf1.cv.faster_rcnn.layers.utils import proposal_target_wrapper
class ProposalTarget(Layer):
'''ProposalTarget layer constructed with TF Ops.
In FasterRCNN, ProposalTarget layer is applied on top of Proposal layer
to generate the target tensors for RCNN. Apart from Proposal layer, this also need
input class groundtruth and input bbox groundtruth as inputs. In principle, it
calculates the IoUs between the groundtruth boxes and the RoIs come from Proposal layer.
With some specific IoU thresholds, it categorizes the RoIs as two classes: positive RoIs
and negative RoIs. Then positive RoIs are use to train RCNN classifier and regressor.
While negative RoIs are only used to train RCNN classifier.
'''
def __init__(self,
gt_as_roi, iou_high_thres,
iou_low_thres, bg_class_id,
roi_train_bs, roi_positive_ratio,
deltas_scaling, bs_per_gpu, **kwargs):
'''Initialize the ProposalTarget layer.
Args:
gt_as_roi(bool): Whether or not to use groundtruth boxes as RoIs to train RCNN.
If this is True, wil concatenate the RoIs from Proposal layer with groundtruth
boxes and forward it to RCNN. Default is False.
iou_high_thres(float): high IoU threshold above which we regard those RoIs as positiv
RoIs.
iou_low_thres(float): low IoU threshold below which we regard those RoIs as negative
RoIs.
bg_class_id(int): the class ID(number) for the background class. For training RCNN
classifier, we always need a background class. But background is not a valid class
for training RCNN regressor. By convention, valid class ID is 0, 1, 2, ..., (N-2),
and background class ID is N-1. So background class ID is always the number of
classes subtracted by 1.
roi_train_bs(int): the batch size used to train RCNN for each image.
roi_positive_ratio(float): the ratio for positive RoIs in the roi_train_bs(batch size).
By convention, this is set to 0.25 in current implementation.
deltas_scaling(list of float): the scaling factors applied to RCNN regressor,
one scalar for each coordinate in (x, y, w, h). List length is 4.
bs_per_gpu(int): the image batch size per GPU for this layer. Due to implementation,
this layer is not agnostic to batch size.
'''
self.gt_as_roi = gt_as_roi
self.iou_high_thres = iou_high_thres
self.iou_low_thres = iou_low_thres
self.roi_train_bs = roi_train_bs
self.roi_positive_ratio = roi_positive_ratio
self.deltas_scaling = np.array(deltas_scaling, dtype=np.float32)
self.bg_class_id = bg_class_id
self.bs_per_gpu = bs_per_gpu
super(ProposalTarget, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
'''compute the output shape.'''
return [(None, self.roi_train_bs, 4),
(None, self.roi_train_bs, self.bg_class_id+1),
(None, self.roi_train_bs, self.bg_class_id*8),
(None, None)]
def call(self, x, mask=None):
'''call method.
Args:
x(list): the list of input tensors.
x[0]: RoIs from Proposal layer.
x[1]: groundtruth class IDs for each objects.
x[2]: groundtruth bbox coordinates for each objects.
'''
rois = x[0]
gt_class_ids = x[1]
gt_boxes = x[2]
result = proposal_target_wrapper(rois, gt_class_ids, gt_boxes, self.iou_high_thres,
self.iou_low_thres, self.roi_train_bs,
self.roi_positive_ratio, self.deltas_scaling,
self.bg_class_id, self.bs_per_gpu, self.gt_as_roi)
return result
def get_config(self):
"""Get config for this layer."""
config = {'gt_as_roi': self.gt_as_roi,
'iou_high_thres': self.iou_high_thres,
'iou_low_thres': self.iou_low_thres,
'roi_train_bs': self.roi_train_bs,
'roi_positive_ratio': self.roi_positive_ratio,
'deltas_scaling': self.deltas_scaling,
'bg_class_id': self.bg_class_id,
'bs_per_gpu': self.bs_per_gpu}
base_config = super(ProposalTarget, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/layers/ProposalTarget.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""utils for FasterRCNN custom keras layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
def non_max_suppression_fast(boxes_in, probs, overlap_thresh=0.9, max_boxes=300, scale=1.):
"""non-maximum-suppression in Python."""
# if there are no boxes, return an empty list
if len(boxes_in) == 0:
return boxes_in, probs, np.zeros((0,)).astype(np.int64)
# grab the coordinates of the bounding boxes
boxes = boxes_in * scale
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
# initialize the list of picked indexes
pick = []
# calculate the areas
area = (x2 - x1 + 1.) * (y2 - y1 + 1.)
# sort the bounding boxes
idxs = np.argsort(probs, kind='stable')
# keep looping while some indexes still remain in the indexes
# list
while len(idxs) > 0:
# grab the last index in the indexes list and add the
# index value to the list of picked indexes
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
# find the intersection
xx1_int = np.maximum(x1[i], x1[idxs[:last]])
yy1_int = np.maximum(y1[i], y1[idxs[:last]])
xx2_int = np.minimum(x2[i], x2[idxs[:last]])
yy2_int = np.minimum(y2[i], y2[idxs[:last]])
ww_int = np.maximum(0, xx2_int - xx1_int + 1.)
hh_int = np.maximum(0, yy2_int - yy1_int + 1.)
area_int = ww_int * hh_int
# find the union
area_union = area[i] + area[idxs[:last]] - area_int
# compute the ratio of overlap
overlap = area_int / area_union
# delete all indexes from the index list that have
idxs = idxs[0:last]
idxs = idxs[np.where(overlap <= overlap_thresh)[0]]
if len(pick) >= max_boxes:
break
# return only the bounding boxes that were picked using the integer data type
boxes = boxes_in[np.array(pick)]
probs = probs[np.array(pick)]
return boxes, probs, np.array(pick)
def unique_with_inverse(x):
'''get unique elements from an array and also return the original index.'''
y, idx = tf.unique(x)
num_segments = tf.shape(y)[0]
num_elems = tf.shape(x)[0]
return (y, idx, tf.unsorted_segment_min(tf.range(num_elems), idx, num_segments))
def safe_gather(tensor, indices, axis=0):
'''Add an assert to tf.gather to make sure there is no out-of-bound indexing.'''
length = tf.shape(tensor)[axis]
min_idx = tf.cond(tf.logical_and(tf.size(indices) > 0, length > 0),
true_fn=lambda: tf.cast(tf.reduce_min(indices), length.dtype),
false_fn=lambda: tf.cast(0, length.dtype))
max_idx = tf.cond(tf.logical_and(tf.size(indices) > 0, length > 0),
true_fn=lambda: tf.cast(tf.reduce_max(indices), length.dtype),
false_fn=lambda: tf.cast(-1, length.dtype))
op_1 = tf.debugging.assert_less(max_idx, length)
op_2 = tf.debugging.assert_less_equal(0, min_idx)
# if tensor is non-empty, check the out-of-bound of indices
# and do gather.
with tf.control_dependencies([op_1, op_2]):
out_non_empty = tf.gather(tensor, indices, axis=axis)
# if tensor is empty, then we should output an empty tensor
# instead of returing the out-of-bound default values(0)
# create empty tensor with the same rank
empty_tensor = tf.zeros_like(out_non_empty)[..., :0]
out = tf.cond(length > 0,
true_fn=lambda: out_non_empty,
false_fn=lambda: empty_tensor)
return out
def stable_top_k(values, k, axis=-1):
'''Stable descending sort of a tensor and then take top k indices.'''
sort_idx = tf.argsort(values, axis=axis, direction='DESCENDING', stable=True)
# the slice[:k] will retain the first k or the actual size if the actual size
# is less than k
return sort_idx[:k]
def normalize_bbox_tf(boxes, h, w):
'''Normalize the bbox coordinates to the range of (0, 1).'''
scale = tf.stack([h-1.0, w-1.0, h-1.0, w-1.0])
return boxes / scale
def apply_deltas_to_anchors(ancs, deltas):
'''Apply deltas to anchors in RPN.'''
x = ancs[:, 0]
y = ancs[:, 1]
w = ancs[:, 2]
h = ancs[:, 3]
tx = deltas[:, 0]
ty = deltas[:, 1]
tw = deltas[:, 2]
th = deltas[:, 3]
cx = x + 0.5 * w
cy = y + 0.5 * h
cx1 = tx * w + cx
cy1 = ty * h + cy
w1 = tf.exp(tw) * w
h1 = tf.exp(th) * h
x1 = cx1 - 0.5 * w1
y1 = cy1 - 0.5 * h1
return tf.stack((y1, x1, y1 + h1, x1 + w1), axis=-1)
def clip_bbox_tf(boxes, width, height):
'''Clip Bboxes to the the boundary of the input images.'''
y1, x1, y2, x2 = tf.split(boxes, 4, axis=1)
x1_clip = tf.minimum(tf.maximum(x1, 0.0), width-1.0)
y1_clip = tf.minimum(tf.maximum(y1, 0.0), height-1.0)
x2_clip = tf.minimum(tf.maximum(x2, 0.0), width-1.0)
y2_clip = tf.minimum(tf.maximum(y2, 0.0), height-1.0)
clipped_boxes = tf.concat((y1_clip, x1_clip, y2_clip, x2_clip), axis=1)
clipped_boxes.set_shape((clipped_boxes.shape[0], 4))
return clipped_boxes
def batch_op(inputs, graph_fn, batch_size, names=None):
'''Batch processing of an Op with given batch size.'''
if not isinstance(inputs, list):
inputs = [inputs]
outputs = []
for i in range(batch_size):
inputs_slice = [tf.gather(x, tf.constant(i, dtype=tf.int32), axis=0) for x in inputs]
output_slice = graph_fn(inputs_slice)
if not isinstance(output_slice, (tuple, list)):
output_slice = [output_slice]
outputs.append(output_slice)
# Change outputs from a list of slices where each is
# a list of outputs to a list of outputs and each has
# a list of slices
outputs = list(zip(*outputs))
if names is None:
names = [None] * len(outputs)
result = [tf.stack(o, axis=0, name=n)
for o, n in zip(outputs, names)]
if len(result) == 1:
result = result[0]
return result
def nms_core_py_func(boxes, probs, max_boxes, iou_threshold):
'''NMS core in numpy and wrapped as TF Op with tf.py_func.'''
def _nms_wrapper(_boxes, _probs, _max_boxes, _iou_threshold):
return non_max_suppression_fast(_boxes,
_probs,
overlap_thresh=_iou_threshold,
max_boxes=_max_boxes)[2]
out = tf.py_func(_nms_wrapper, [boxes, probs, max_boxes, iou_threshold],
tf.int64)
return out
def nms_tf(all_boxes, all_probs, pre_nms_top_N,
post_nms_top_N, nms_iou_thres):
'''NMS in TF Op.'''
val_idx = tf.where(tf.logical_and(all_boxes[:, 2] - all_boxes[:, 0] > 1.0,
all_boxes[:, 3] - all_boxes[:, 1] > 1.0))[:, 0]
valid_boxes = safe_gather(all_boxes, val_idx)
valid_probs = safe_gather(all_probs, val_idx)
idx = stable_top_k(valid_probs, pre_nms_top_N)
valid_boxes = safe_gather(valid_boxes, idx)
valid_probs = safe_gather(valid_probs, idx)
# In the rare case of no valid_boxes at all, tf.image.non_max_suppression will
# raise error due to limitation of the TF API.
# So use a conditional to make sure it is not empty.
valid_boxes_non_empty = tf.cond(tf.size(valid_boxes) > 0,
true_fn=lambda : valid_boxes,
false_fn=lambda: tf.constant([[0, 0, 0, 0]], dtype=tf.float32))
valid_probs_non_empty = tf.cond(tf.size(valid_boxes) > 0,
true_fn=lambda: valid_probs,
false_fn=lambda: tf.constant([0], dtype=tf.float32))
sel_idx = tf.image.non_max_suppression(valid_boxes_non_empty,
valid_probs_non_empty,
post_nms_top_N,
iou_threshold=nms_iou_thres)
result = safe_gather(valid_boxes_non_empty, sel_idx)
zero_pads = tf.zeros(
(post_nms_top_N - tf.shape(sel_idx)[0], tf.shape(result)[1]),
dtype=result.dtype
)
result = tf.concat((result, zero_pads), axis=0)
return result
def make_anchors(sizes, ratios):
"""Generate base anchors with different size and aspect ratios."""
sa = np.array(sizes).astype(np.float32)
ra = np.array(ratios).astype(np.float32)
sa = np.repeat(sa, ra.size*2).reshape(-1, ra.size, 2)
ra = np.stack((ra, 1./ra), axis=-1)
ancs = sa*ra
return ancs
def iou_tf(boxes1, boxes2):
'''Calculate IoU.'''
b1 = tf.reshape(tf.tile(tf.expand_dims(boxes1, 1),
[1, 1, tf.shape(boxes2)[0]]), [-1, 4])
b2 = tf.tile(boxes2, [tf.shape(boxes1)[0], 1])
# 2. Compute intersections
b1_y1, b1_x1, b1_y2, b1_x2 = tf.split(b1, 4, axis=1)
b2_y1, b2_x1, b2_y2, b2_x2 = tf.split(b2, 4, axis=1)
y1 = tf.maximum(b1_y1, b2_y1)
x1 = tf.maximum(b1_x1, b2_x1)
y2 = tf.minimum(b1_y2, b2_y2)
x2 = tf.minimum(b1_x2, b2_x2)
intersection = tf.maximum(x2 - x1 + 1.0, 0.) * tf.maximum(y2 - y1 + 1.0, 0.)
# 3. Compute unions
b1_area = tf.maximum(b1_y2 - b1_y1 + 1.0, 0.) * tf.maximum(b1_x2 - b1_x1 + 1.0, 0.)
b2_area = tf.maximum(b2_y2 - b2_y1 + 1.0, 0.) * tf.maximum(b2_x2 - b2_x1 + 1.0, 0.)
union = b1_area + b2_area - intersection
# 4. Compute IoU and reshape to [boxes1, boxes2]
iou = intersection / union
overlaps = tf.reshape(iou, [tf.shape(boxes1)[0], tf.shape(boxes2)[0]])
return overlaps
def calculate_delta_tf(box, gt_box):
'''Calculate deltas of proposals w.r.t. gt boxes.'''
box = tf.cast(box, tf.float32)
gt_box = tf.cast(gt_box, tf.float32)
height = box[:, 2] - box[:, 0] + 1.0
width = box[:, 3] - box[:, 1] + 1.0
center_y = box[:, 0] + 0.5 * height
center_x = box[:, 1] + 0.5 * width
gt_height = gt_box[:, 2] - gt_box[:, 0] + 1.0
gt_width = gt_box[:, 3] - gt_box[:, 1] + 1.0
gt_center_y = gt_box[:, 0] + 0.5 * gt_height
gt_center_x = gt_box[:, 1] + 0.5 * gt_width
dy = (gt_center_y - center_y) / height
dx = (gt_center_x - center_x) / width
dh = tf.log(gt_height / height)
dw = tf.log(gt_width / width)
result = tf.stack([dx, dy, dw, dh], axis=1)
return result
def unpad_tf(boxes):
'''Remove paddings from the boxes.'''
nonzeros = tf.cast(tf.reduce_sum(tf.abs(boxes), axis=-1), tf.bool)
nonzero_boxes = tf.boolean_mask(boxes, nonzeros)
return nonzero_boxes, nonzeros
def generate_proposal_target(rois, gt_boxes, gt_class_ids,
iou_high_thres, iou_low_thres,
roi_train_bs, roi_positive_ratio,
deltas_scaling, bg_class_id):
'''Generate target tensors for RCNN.'''
# unpad
rois_unpad, _ = unpad_tf(rois)
gt_boxes_unpad, nonzero_gt = unpad_tf(gt_boxes)
gt_class_ids = tf.boolean_mask(gt_class_ids, nonzero_gt)
# compute IoU
ious = iou_tf(rois_unpad, gt_boxes_unpad)
iou_max_over_gt = tf.reduce_max(ious, axis=1)
positive_roi_bool = (iou_max_over_gt >= iou_high_thres)
positive_indices = tf.where(positive_roi_bool)[:, 0]
negative_indices = tf.where(tf.logical_and(iou_max_over_gt < iou_high_thres,
iou_max_over_gt >= iou_low_thres))[:, 0]
positive_count = int(roi_train_bs * roi_positive_ratio)
positive_indices = tf.random_shuffle(positive_indices)[:positive_count]
positive_count = tf.shape(positive_indices)[0]
negative_count = roi_train_bs - positive_count
negative_indices = tf.random_shuffle(negative_indices)[:negative_count]
negative_count = tf.shape(negative_indices)[0]
positive_rois = safe_gather(rois_unpad, positive_indices)
negative_rois = safe_gather(rois_unpad, negative_indices)
positive_ious = safe_gather(ious, positive_indices)
roi_gt_box_assignment = tf.cond(
tf.greater(tf.shape(positive_ious)[1], 0),
true_fn=lambda: tf.argmax(positive_ious, axis=1),
false_fn=lambda: tf.cast(tf.constant([]), tf.int64))
roi_gt_boxes = safe_gather(gt_boxes_unpad, roi_gt_box_assignment)
roi_gt_class_ids = safe_gather(gt_class_ids, roi_gt_box_assignment)
deltas = calculate_delta_tf(positive_rois, roi_gt_boxes)
deltas = deltas * deltas_scaling.reshape((1, 4))
pn_rois = tf.concat((positive_rois, negative_rois), axis=0)
num_paddings = tf.maximum(0, roi_train_bs - tf.shape(pn_rois)[0])
pn_rois = tf.pad(pn_rois, [(0, num_paddings), (0, 0)])
# pad bg class id for negative ROIs, shape: (R,)
total_roi_gt_class_ids = tf.pad(roi_gt_class_ids, [(0, negative_count)],
constant_values=bg_class_id)
# convert class IDs to one hot format, shape: (R, C+1)
total_roi_gt_class_ids = tf.one_hot(tf.cast(total_roi_gt_class_ids, tf.int32),
int(bg_class_id+1), axis=-1)
# zero-padding for class IDs, shape: (total, C+1)
total_roi_gt_class_ids = tf.pad(total_roi_gt_class_ids, [(0, num_paddings), (0, 0)])
# construct one-hot deltas to mask out negative and padded ROIs.
# (R, C, 1)
roi_gt_class_ids_oh = tf.one_hot(tf.cast(roi_gt_class_ids, tf.int32),
int(bg_class_id))
roi_gt_class_ids_expand = tf.expand_dims(roi_gt_class_ids_oh, axis=-1)
# (R, 1, 4)
deltas_expanded = tf.expand_dims(deltas, axis=-2)
# (R, C, 4)
deltas_masked = roi_gt_class_ids_expand * deltas_expanded
deltas_boolean_mask = tf.concat((roi_gt_class_ids_expand,
roi_gt_class_ids_expand,
roi_gt_class_ids_expand,
roi_gt_class_ids_expand),
axis=-1)
# (R, C, 8)
deltas_masked = tf.concat((deltas_boolean_mask, deltas_masked), axis=-1)
# padding negative and empty deltas. -> (total, C, 8)
deltas_masked = tf.pad(deltas_masked,
[(0, negative_count + num_paddings), (0, 0), (0, 0)])
# flatten deltas_masked: (R, C8)
deltas_masked = tf.reshape(deltas_masked,
[tf.shape(deltas_masked)[0],
tf.shape(deltas_masked)[1]*tf.shape(deltas_masked)[2]])
return [pn_rois,
total_roi_gt_class_ids,
deltas_masked,
positive_count]
def generate_proposal_target_v1(rois, gt_boxes, gt_class_ids,
iou_high_thres, iou_low_thres,
roi_train_bs, roi_positive_ratio,
deltas_scaling, bg_class_id):
'''Generate target tensors for RCNN.'''
# unpad
rois_unpad, _ = unpad_tf(rois)
gt_boxes_unpad, nonzero_gt = unpad_tf(gt_boxes)
gt_class_ids = tf.boolean_mask(gt_class_ids, nonzero_gt)
# compute IoU
# in the rare case of empty RoIs after unpadding, iou_tf will raise error
# So fix this by assigning zero boxes and set ious to be all -1 matrix in this case
# -1 implies no postive or negative RoIs at all.
rois_unpad_nz = tf.cond(tf.size(rois_unpad) > 0,
true_fn=lambda: rois_unpad,
false_fn=lambda: tf.constant([[0., 0., 0., 0.]], dtype=tf.float32))
_ious = iou_tf(rois_unpad_nz, gt_boxes_unpad)
ious = tf.cond(tf.size(rois_unpad) > 0,
true_fn=lambda: _ious,
false_fn=lambda: -1.0*tf.ones([1, tf.shape(gt_boxes_unpad)[0]],
dtype=tf.float32))
iou_max_over_gt = tf.reduce_max(ious, axis=1)
positive_roi_bool = (iou_max_over_gt >= iou_high_thres)
positive_indices = tf.cond(tf.size(tf.where(positive_roi_bool)) > 0,
true_fn=lambda: tf.where(positive_roi_bool)[:, 0],
false_fn=lambda: tf.constant([], dtype=tf.int64))
negative_roi_bool = tf.logical_and(iou_max_over_gt < iou_high_thres,
iou_max_over_gt >= iou_low_thres)
negative_indices = tf.cond(tf.size(tf.where(negative_roi_bool)) > 0,
true_fn=lambda: tf.where(negative_roi_bool)[:, 0],
false_fn=lambda: tf.constant([], dtype=tf.int64))
positive_limit = int(roi_train_bs * roi_positive_ratio)
has_pos = tf.size(positive_indices) > 0
has_neg = tf.size(negative_indices) > 0
# case 1: both positive RoI and negative RoI are not empty
pos_idx_limit = tf.random_shuffle(positive_indices)[:positive_limit]
neg_limit_pn = tf.constant(roi_train_bs, dtype=tf.int32) - tf.shape(pos_idx_limit)[0]
neg_idx_limit_pn = tf.random_shuffle(negative_indices)[:neg_limit_pn]
maxval_n = tf.cond(has_neg,
true_fn=lambda: tf.shape(negative_indices)[0],
false_fn=lambda: tf.constant(1, dtype=tf.int32))
neg_idx_unform_sampler_pn = tf.random.uniform([neg_limit_pn],
maxval=maxval_n,
dtype=tf.int32,
seed=42)
neg_idx_replace_pn = safe_gather(negative_indices, neg_idx_unform_sampler_pn)
neg_idx_pn = tf.cond(tf.shape(negative_indices)[0] >= neg_limit_pn,
true_fn=lambda: neg_idx_limit_pn,
false_fn=lambda: neg_idx_replace_pn)
# case 2: only positive RoIs.
maxval_p = tf.cond(has_pos,
true_fn=lambda: tf.shape(positive_indices)[0],
false_fn=lambda: tf.constant(1, dtype=tf.int32))
uniform_sampler_p = tf.random.uniform(tf.constant([roi_train_bs]),
maxval=maxval_p,
dtype=tf.int32,
seed=42)
pos_idx_batch = tf.cond(tf.size(positive_indices) >= roi_train_bs,
true_fn=lambda: tf.random_shuffle(positive_indices)[:roi_train_bs],
false_fn=lambda: safe_gather(positive_indices, uniform_sampler_p))
# case 3: only negative RoIs.
uniform_sampler_n = tf.random.uniform(tf.constant([roi_train_bs]),
maxval=maxval_n,
dtype=tf.int32,
seed=42)
neg_idx_batch = tf.cond(tf.size(negative_indices) >= roi_train_bs,
true_fn=lambda: tf.random_shuffle(negative_indices)[:roi_train_bs],
false_fn=lambda: safe_gather(negative_indices, uniform_sampler_n))
# case 4: both positive and negative RoIs are empty. leave it empty.
# Finally, combine the 4 cases.
positive_idxs_case_1_2 = tf.cond(tf.logical_and(has_pos, has_neg),
true_fn=lambda: pos_idx_limit,
false_fn=lambda: pos_idx_batch)
positive_idxs_case_3_4 = tf.constant([], dtype=tf.int64)
positive_idx_all = tf.cond(has_pos,
true_fn=lambda: positive_idxs_case_1_2,
false_fn=lambda: positive_idxs_case_3_4)
negative_idxs_case_1_2 = tf.cond(tf.logical_and(has_pos, has_neg),
true_fn=lambda: neg_idx_pn,
false_fn=lambda: tf.constant([], dtype=tf.int64))
negative_idxs_case_3_4 = tf.cond(has_neg,
true_fn=lambda: neg_idx_batch,
false_fn=lambda: tf.constant([], dtype=tf.int64))
negative_idx_all = tf.cond(has_pos,
true_fn=lambda: negative_idxs_case_1_2,
false_fn=lambda: negative_idxs_case_3_4)
positive_count = tf.shape(positive_idx_all)[0]
negative_count = tf.shape(negative_idx_all)[0]
positive_rois = safe_gather(rois_unpad, positive_idx_all)
negative_rois = safe_gather(rois_unpad, negative_idx_all)
positive_ious = safe_gather(ious, positive_idx_all)
roi_gt_box_assignment = tf.cond(
tf.greater(tf.shape(positive_ious)[1], 0),
true_fn=lambda: tf.argmax(positive_ious, axis=1),
false_fn=lambda: tf.cast(tf.constant([]), tf.int64))
roi_gt_boxes = safe_gather(gt_boxes_unpad, roi_gt_box_assignment)
roi_gt_class_ids = safe_gather(gt_class_ids, roi_gt_box_assignment)
_deltas = tf.cond(tf.logical_and(tf.size(positive_rois) > 0, tf.size(roi_gt_boxes) > 0),
true_fn=lambda: calculate_delta_tf(positive_rois, roi_gt_boxes),
false_fn=lambda: tf.constant([], dtype=tf.float32))
deltas = tf.cond(tf.size(_deltas) > 0,
true_fn=lambda: _deltas * deltas_scaling.reshape((1, 4)),
false_fn=lambda: tf.zeros([1, 4], dtype=tf.float32))
pn_rois = tf.concat((positive_rois, negative_rois), axis=0)
num_paddings = tf.maximum(0, roi_train_bs - tf.shape(pn_rois)[0])
pn_rois = tf.cond(tf.size(pn_rois) > 0,
true_fn=lambda: tf.pad(pn_rois, [(0, num_paddings), (0, 0)]),
false_fn=lambda: tf.zeros([num_paddings, 4], dtype=tf.float32))
# pad bg class id for negative ROIs, shape: (R,)
total_roi_gt_class_ids = tf.pad(roi_gt_class_ids, [(0, negative_count)],
constant_values=bg_class_id)
# convert class IDs to one hot format, shape: (R, C+1)
total_roi_gt_class_ids = tf.one_hot(tf.cast(total_roi_gt_class_ids, tf.int32),
int(bg_class_id+1), axis=-1)
# zero-padding for class IDs, shape: (total, C+1)
total_roi_gt_class_ids = tf.pad(total_roi_gt_class_ids, [(0, num_paddings), (0, 0)])
# construct one-hot deltas to mask out negative and padded ROIs.
# (R, C, 1)
roi_gt_class_ids_oh = tf.cond(tf.size(roi_gt_class_ids) > 0,
true_fn=lambda: tf.one_hot(tf.cast(roi_gt_class_ids, tf.int32),
int(bg_class_id)),
false_fn=lambda: tf.zeros([1, int(bg_class_id)],
dtype=tf.float32))
roi_gt_class_ids_expand = tf.expand_dims(roi_gt_class_ids_oh, axis=-1)
# (R, 1, 4)
deltas_expanded = tf.expand_dims(deltas, axis=-2)
# (R, C, 4)
deltas_masked = roi_gt_class_ids_expand * deltas_expanded
deltas_boolean_mask = tf.concat((roi_gt_class_ids_expand,
roi_gt_class_ids_expand,
roi_gt_class_ids_expand,
roi_gt_class_ids_expand),
axis=-1)
# (R, C, 8)
deltas_masked = tf.concat((deltas_boolean_mask, deltas_masked), axis=-1)
# padding negative and empty deltas. -> (total, C, 8)
deltas_masked = tf.pad(deltas_masked,
[(0, roi_train_bs-tf.shape(deltas_masked)[0]), (0, 0), (0, 0)])
# flatten deltas_masked: (R, C8)
deltas_masked = tf.reshape(deltas_masked,
[tf.shape(deltas_masked)[0],
tf.shape(deltas_masked)[1]*tf.shape(deltas_masked)[2]])
return [pn_rois,
total_roi_gt_class_ids,
deltas_masked,
positive_count]
def proposal_target_wrapper(rois, gt_class_ids, gt_boxes, iou_high_thres,
iou_low_thres, roi_train_bs, roi_positive_ratio,
deltas_scaling, bg_class_id, bs_per_gpu,
gt_as_roi=False):
'''proposal target wrapper function.'''
if gt_as_roi:
rois = tf.concat((rois, gt_boxes), axis=1)
# remove zero padding in ROIs and GT boxes.
result = batch_op([rois, gt_boxes, gt_class_ids],
lambda x: generate_proposal_target_v1(
*x,
iou_high_thres=iou_high_thres,
iou_low_thres=iou_low_thres,
roi_train_bs=roi_train_bs,
roi_positive_ratio=roi_positive_ratio,
deltas_scaling=deltas_scaling,
bg_class_id=int(bg_class_id)),
bs_per_gpu)
return result
def intersection_np(boxes_a1, boxes_b1):
"""Intersection of two sets of boxes."""
boxes_x1 = np.maximum(boxes_a1[:, :, 0], boxes_b1[:, :, 0])
boxes_y1 = np.maximum(boxes_a1[:, :, 1], boxes_b1[:, :, 1])
boxes_x2 = np.minimum(boxes_a1[:, :, 2], boxes_b1[:, :, 2])
boxes_y2 = np.minimum(boxes_a1[:, :, 3], boxes_b1[:, :, 3])
area = np.maximum(boxes_x2 - boxes_x1 + 1., 0.) * np.maximum(boxes_y2 - boxes_y1 + 1., 0.)
return area
def union_np(boxes_a1, boxes_b1, area_inter):
"""Union of two sets of boxes."""
area_a = np.maximum(boxes_a1[:, :, 2] - boxes_a1[:, :, 0] + 1., 0.) * \
np.maximum(boxes_a1[:, :, 3] - boxes_a1[:, :, 1] + 1., 0.)
area_b = np.maximum(boxes_b1[:, :, 2] - boxes_b1[:, :, 0] + 1., 0.) * \
np.maximum(boxes_b1[:, :, 3] - boxes_b1[:, :, 1] + 1., 0.)
return area_a + area_b - area_inter
def iou_np(boxes_a, boxes_b, scale=1.0):
"""IoU of two sets of boxes."""
m = boxes_a.shape[0]
n = boxes_b.shape[0]
boxes_a1 = np.broadcast_to(boxes_a.reshape(m, 1, 4), (m, n, 4))
boxes_b1 = np.broadcast_to(boxes_b.reshape(1, n, 4), (m, n, 4))
boxes_a1 = boxes_a1 * scale
boxes_b1 = boxes_b1 * scale
area_i = intersection_np(boxes_a1, boxes_b1)
area_u = union_np(boxes_a1, boxes_b1, area_i)
result = area_i / area_u
return result
def encode_anchor(_anc_boxes, _gt_boxes, scale=1.):
"""Encode ground truth boxes with offset relative to anchor boxes."""
anc_boxes = _anc_boxes * scale
gt_boxes = _gt_boxes * scale
cx = (gt_boxes[:, 0] + gt_boxes[:, 2] + 1.) / 2.0
cy = (gt_boxes[:, 1] + gt_boxes[:, 3] + 1.) / 2.0
cxa = (anc_boxes[:, 0] + anc_boxes[:, 2] + 1.) / 2.0
cya = (anc_boxes[:, 1] + anc_boxes[:, 3] + 1.) / 2.0
tx = (cx - cxa) / (anc_boxes[:, 2] - anc_boxes[:, 0] + 1.)
ty = (cy - cya) / (anc_boxes[:, 3] - anc_boxes[:, 1] + 1.)
tw = np.log((gt_boxes[:, 2] - gt_boxes[:, 0] + 1.) /
(anc_boxes[:, 2] - anc_boxes[:, 0] + 1.))
th = np.log((gt_boxes[:, 3] - gt_boxes[:, 1] + 1.) /
(anc_boxes[:, 3] - anc_boxes[:, 1] + 1.))
return np.stack((tx, ty, tw, th), axis=-1)
def unpad_np(boxes):
'''Unpad in numpy.'''
nonzeros = np.sum(np.absolute(boxes), axis=-1).astype(np.bool_)
nonzero_boxes = boxes[nonzeros, :]
return np.ascontiguousarray(np.copy(nonzero_boxes)), nonzeros
def decode_anchor_np(X, T):
"""apply deltas to anchors, numpy version."""
# anchors
x = X[:, 0]
y = X[:, 1]
w = X[:, 2]
h = X[:, 3]
# deltas
tx = T[:, 0]
ty = T[:, 1]
tw = T[:, 2]
th = T[:, 3]
cx = x + w/2.
cy = y + h/2.
cx1 = tx * w + cx
cy1 = ty * h + cy
w1 = np.exp(tw) * w
h1 = np.exp(th) * h
x1 = cx1 - w1/2.
y1 = cy1 - h1/2.
return np.stack((x1, y1, w1, h1), axis=-1)
def _unique_no_sort(ar):
'''numpy unique but without sorting.'''
idx = np.unique(ar, return_index=True)[1]
return np.array([ar[i] for i in sorted(idx)]), np.array(sorted(idx))
def anchor_target_process(num_anchors_for_bbox,
best_anchor_for_bbox,
y_is_box_valid,
y_rpn_overlap,
y_rpn_regr,
best_dx_for_bbox,
n_anchratios,
rpn_mini_batch):
"""post processing of anchor generation."""
# we ensure that every bbox has at least one positive RPN region
for idx in range(num_anchors_for_bbox.shape[0]):
if num_anchors_for_bbox[idx] == 0:
# no box with an IOU greater than zero ...
if best_anchor_for_bbox[idx, 0] == -1:
continue
y_is_box_valid[best_anchor_for_bbox[idx, 0],
best_anchor_for_bbox[idx, 1],
best_anchor_for_bbox[idx, 2] + n_anchratios *
best_anchor_for_bbox[idx, 3]] = 1
y_rpn_overlap[best_anchor_for_bbox[idx, 0],
best_anchor_for_bbox[idx, 1],
best_anchor_for_bbox[idx, 2] + n_anchratios *
best_anchor_for_bbox[idx, 3]] = 1
start = 4 * (best_anchor_for_bbox[idx, 2] + n_anchratios * best_anchor_for_bbox[idx, 3])
y_rpn_regr[best_anchor_for_bbox[idx, 0],
best_anchor_for_bbox[idx, 1],
start:start+4] = best_dx_for_bbox[idx, :]
y_rpn_overlap = np.transpose(y_rpn_overlap, (2, 0, 1))
y_rpn_overlap_save = np.copy(y_rpn_overlap)
y_rpn_overlap = np.expand_dims(y_rpn_overlap, axis=0)
y_is_box_valid = np.transpose(y_is_box_valid, (2, 0, 1))
y_is_box_valid_save = np.copy(y_is_box_valid)
y_is_box_valid = np.expand_dims(y_is_box_valid, axis=0)
y_rpn_regr = np.transpose(y_rpn_regr, (2, 0, 1))
y_rpn_regr = np.expand_dims(y_rpn_regr, axis=0)
pos_locs = np.where(np.logical_and(y_rpn_overlap[0, :, :, :] == 1,
y_is_box_valid[0, :, :, :] == 1))
neg_locs = np.where(np.logical_and(y_rpn_overlap[0, :, :, :] == 0,
y_is_box_valid[0, :, :, :] == 1))
num_pos = len(pos_locs[0])
# one issue is that the RPN has many more negative than positive regions,
# so we turn off some of the negative
# regions. We also limit it to 256 regions.
num_regions = rpn_mini_batch
if len(pos_locs[0]) > num_regions//2:
val_locs = np.random.choice(np.arange(len(pos_locs[0])), len(pos_locs[0]) -
num_regions//2, replace=False)
y_is_box_valid[0, pos_locs[0][val_locs], pos_locs[1][val_locs],
pos_locs[2][val_locs]] = 0
# also mask out the conf for pos
y_rpn_overlap[0, pos_locs[0][val_locs],
pos_locs[1][val_locs], pos_locs[2][val_locs]] = 0
num_pos = num_regions//2
if len(neg_locs[0]) + num_pos > num_regions:
val_locs = np.random.choice(np.arange(len(neg_locs[0])), len(neg_locs[0]) -
(num_regions - num_pos), replace=False).tolist()
y_is_box_valid[0, neg_locs[0][val_locs],
neg_locs[1][val_locs], neg_locs[2][val_locs]] = 0
pos_locs = np.where(np.logical_and(y_rpn_overlap[0, :, :, :] == 1,
y_is_box_valid[0, :, :, :] == 1))
neg_locs = np.where(np.logical_and(y_rpn_overlap[0, :, :, :] == 0,
y_is_box_valid[0, :, :, :] == 1))
num_pos = len(pos_locs[0])
y_rpn_cls = np.concatenate([y_is_box_valid, y_rpn_overlap], axis=1)
y_rpn_regr = np.concatenate([np.repeat(y_rpn_overlap, 4, axis=1), y_rpn_regr], axis=1)
return np.copy(y_rpn_cls), np.copy(y_rpn_regr), y_is_box_valid_save, y_rpn_overlap_save
def _compute_rpn_target_np(input_bboxes, anchor_sizes, anchor_ratios,
rpn_stride, rpn_h, rpn_w, image_h, image_w,
rpn_train_bs, iou_high_thres, iou_low_thres):
'''Compute RPN target via numpy.'''
input_bboxes, _ = unpad_np(input_bboxes)
downscale = float(rpn_stride)
num_anchors = len(anchor_sizes) * len(anchor_ratios)
n_anchratios = len(anchor_ratios)
# initialise empty output objectives
output_height = rpn_h
output_width = rpn_w
resized_width = image_w
resized_height = image_h
y_rpn_overlap = np.zeros((output_height, output_width, num_anchors))
y_is_box_valid = np.zeros((output_height, output_width, num_anchors))
y_rpn_regr = np.zeros((output_height, output_width, num_anchors * 4))
num_bboxes = input_bboxes.shape[0]
num_anchors_for_bbox = np.zeros(num_bboxes).astype(int)
best_anchor_for_bbox = -1*np.ones((num_bboxes, 4)).astype(int)
best_dx_for_bbox = np.zeros((num_bboxes, 4)).astype(np.float32)
# get the GT box coordinates, and resize to account for image resizing
gta = input_bboxes[:, (1, 0, 3, 2)]
# rpn ground truth
anc_x, anc_y = np.meshgrid(np.arange(output_width), np.arange(output_height))
ancs = make_anchors(anchor_sizes, anchor_ratios).reshape(-1, 2)
xy_s = np.stack((anc_x, anc_y), axis=-1) + 0.5
anc_pos = downscale * xy_s.reshape(output_height, output_width, 1, 2)
anc_pos = np.broadcast_to(anc_pos, (output_height, output_width, ancs.shape[0], 2))
anc_left_top = anc_pos - ancs/2.0
anc_right_bot = anc_pos + ancs/2.0
full_anc = np.concatenate((anc_left_top, anc_right_bot), axis=-1)
# remove outlier anchors(set iou to zero.)
full_anc = full_anc.reshape(-1, 4).astype(np.float32)
valid_anc_mask = ((full_anc[:, 0] >= 0.) &
(full_anc[:, 1] >= 0.) &
(full_anc[:, 2] <= resized_width - 1.) &
(full_anc[:, 3] <= resized_height - 1.))
iou_area = iou_np(full_anc.reshape(-1, 4), gta)
valid_anc_mask_reshape = valid_anc_mask.astype(np.float32).reshape(iou_area.shape[0], 1)
iou_area = iou_area * np.broadcast_to(valid_anc_mask_reshape, iou_area.shape)
per_anchor = np.amax(iou_area, axis=1)
# positive anchors by iou threshold
positive_anchors = np.where(per_anchor >= iou_high_thres)[0]
positive_anchors_gt = np.argmax(iou_area[positive_anchors, :], axis=1)
positive_anchors_gt_unique = np.unique(positive_anchors_gt)
# negative anchors by iou threshold, excluding outlier anchors
negative_anchors = np.where(np.logical_and(per_anchor <= iou_low_thres,
valid_anc_mask.astype(np.float32) > 0.0))[0]
# build outputs
# build positive anchors
if positive_anchors.size > 0:
positive_anchors_idxs = np.unravel_index(positive_anchors,
(output_height, output_width, num_anchors))
y_is_box_valid[positive_anchors_idxs[0],
positive_anchors_idxs[1],
positive_anchors_idxs[2]] = 1
y_rpn_overlap[positive_anchors_idxs[0],
positive_anchors_idxs[1],
positive_anchors_idxs[2]] = 1
start = positive_anchors_idxs[2]
best_regr = encode_anchor(full_anc[positive_anchors, :],
gta[positive_anchors_gt, :])
y_rpn_regr_4d_view = y_rpn_regr.reshape((output_height,
output_width,
num_anchors,
4))
y_rpn_regr_4d_view[positive_anchors_idxs[0],
positive_anchors_idxs[1],
start,
:] = best_regr
# build negative anchors
if negative_anchors.size > 0:
negative_anchors_idxs = np.unravel_index(negative_anchors,
(output_height, output_width, num_anchors))
y_is_box_valid[negative_anchors_idxs[0],
negative_anchors_idxs[1],
negative_anchors_idxs[2]] = 1
y_rpn_overlap[negative_anchors_idxs[0],
negative_anchors_idxs[1],
negative_anchors_idxs[2]] = 0
# build other data for missed positive anchors.
num_anchors_for_bbox[positive_anchors_gt_unique] = 1
per_gt = np.amax(iou_area, axis=0)
per_gt_best_anchor_idxs = np.argmax(iou_area, axis=0)
# for testing
per_gt_best_anc_ = np.copy(per_gt_best_anchor_idxs)
per_gt_best_anc_[np.where(per_gt <= 0.0)[0]] = -1
best_anc_idxs = np.unravel_index(per_gt_best_anchor_idxs,
(output_height,
output_width,
len(anchor_sizes),
len(anchor_ratios)))
best_anchor_for_bbox[...] = np.stack(best_anc_idxs, -1)[:, (0, 1, 3, 2)]
best_dx_for_bbox[...] = encode_anchor(full_anc[per_gt_best_anchor_idxs, :], gta)
valid_best_anc_mask_out = np.where(per_gt <= 0.0)[0]
best_anchor_for_bbox[valid_best_anc_mask_out, :] = [-1, -1, -1, -1]
unmapped_box = np.where(num_anchors_for_bbox == 0)[0]
unmapped_anc = per_gt_best_anc_[unmapped_box]
unmapped_anc, _anc_unique_idx = _unique_no_sort(unmapped_anc[::-1])
unmapped_anc = unmapped_anc[::-1]
_box_unique_idx = unmapped_box.shape[0] - 1 - _anc_unique_idx
_box_unique_idx = _box_unique_idx[::-1].astype(np.int32)
unmapped_box = unmapped_box[_box_unique_idx]
res = anchor_target_process(num_anchors_for_bbox,
best_anchor_for_bbox,
y_is_box_valid,
y_rpn_overlap,
y_rpn_regr,
best_dx_for_bbox,
n_anchratios,
rpn_train_bs)
return res[0:2] + (iou_area, positive_anchors_gt, per_gt_best_anc_, full_anc, unmapped_box,
unmapped_anc) + res[2:]
def compute_rpn_target_np(input_bboxes, anchor_sizes, anchor_ratios,
rpn_stride, rpn_h, rpn_w, image_h, image_w,
rpn_train_bs, iou_high_thres, iou_low_thres):
'''Wrapper for compute_rpn_target_np to remove iou_area output as it it mainly for testing.'''
res = _compute_rpn_target_np(input_bboxes, anchor_sizes, anchor_ratios,
rpn_stride, rpn_h, rpn_w, image_h, image_w,
rpn_train_bs, iou_high_thres, iou_low_thres)
return res[0:2]
def rpn_to_roi_kernel_np(rpn_layer, regr_layer, std_scaling,
anchor_sizes, anchor_ratios,
rpn_stride, dim_ordering, input_w, input_h,
use_regr=True, max_boxes=300,
overlap_thresh=0.9, rpn_pre_nms_top_N=0,
activation_type='sigmoid'):
"""Proposal layer in numpy."""
regr_layer = regr_layer / std_scaling
if dim_ordering == 0:
(rows, cols) = rpn_layer.shape[2:]
elif dim_ordering == 1:
(rows, cols) = rpn_layer.shape[1:3]
anc_x, anc_y = np.meshgrid(np.arange(cols), np.arange(rows))
ancs = make_anchors(anchor_sizes, anchor_ratios).reshape(-1, 2)
anc_pos = rpn_stride*(np.stack((anc_x, anc_y), axis=-1) + 0.5).reshape(rows, cols, 1, 2)
anc_pos = np.broadcast_to(anc_pos, (rows, cols, ancs.shape[0], 2))
anc_left_top = anc_pos - ancs/2.0
full_anc_xywh = np.concatenate((anc_left_top,
np.broadcast_to(ancs, anc_left_top.shape)),
axis=-1).astype(np.float32)
if dim_ordering == 0:
# if NCHW, it's in (1, A*4, H, W) shape. do reshape
regr = np.transpose(regr_layer[0, ...], (1, 2, 0))
else:
# if NHWC, it's in (1, H, W, A*4). no need to reshape.
regr = regr_layer[0, ...]
all_boxes = decode_anchor_np(full_anc_xywh.reshape(-1, 4), regr.reshape(-1, 4))
# (H, W, A, 4) -> (A, H, W, 4) to match the prob shape: (A, H, W)
all_boxes = all_boxes.reshape(rows, cols,
ancs.size//2, 4).transpose((2, 0, 1, 3)).reshape((-1, 4))
# back to x1, y1, x2, y2 format.
all_boxes[:, 2] += all_boxes[:, 0]
all_boxes[:, 3] += all_boxes[:, 1]
all_boxes_save = np.copy(all_boxes)
rpn_deltas_save = np.copy(regr.reshape(-1, 4))
# clip boxes
all_boxes[:, 0] = np.minimum(np.maximum(0., all_boxes[:, 0]), input_w-1.)
all_boxes[:, 1] = np.minimum(np.maximum(0., all_boxes[:, 1]), input_h-1.)
all_boxes[:, 2] = np.minimum(np.maximum(0., all_boxes[:, 2]), input_w-1.)
all_boxes[:, 3] = np.minimum(np.maximum(0., all_boxes[:, 3]), input_h-1.)
if dim_ordering == 1:
if activation_type == 'softmax':
rpn_layer = rpn_layer[:, :, :, rpn_layer.shape[3]//2:]
all_probs = rpn_layer.transpose((0, 3, 1, 2)).reshape((-1))
else:
if activation_type == 'softmax':
rpn_layer = rpn_layer[:, rpn_layer.shape[1]//2:, :, :]
all_probs = rpn_layer.reshape((-1))
x1 = all_boxes[:, 0]
y1 = all_boxes[:, 1]
x2 = all_boxes[:, 2]
y2 = all_boxes[:, 3]
# delete boxes whose width is less than one pixel.
idxs = np.where((x1 - x2 < -1.) & (y1 - y2 < -1.))[0]
all_boxes = all_boxes[idxs, :]
all_probs = all_probs[idxs]
if all_probs.shape[0] > rpn_pre_nms_top_N > 0:
sorted_idx = np.argsort(-1.0*all_probs, kind='stable')
all_boxes = np.copy(all_boxes[sorted_idx[0:rpn_pre_nms_top_N], :])
all_probs = np.copy(all_probs[sorted_idx[0:rpn_pre_nms_top_N]])
result = non_max_suppression_fast(np.copy(all_boxes),
np.copy(all_probs),
overlap_thresh=overlap_thresh,
max_boxes=max_boxes)
return result[0], full_anc_xywh, all_boxes_save, rpn_deltas_save
def _rpn_to_roi(rpn_layer, regr_layer,
input_w, input_h, anchor_sizes,
anchor_ratios, std_scaling, rpn_stride,
use_regr=True, max_boxes=300,
overlap_thresh=0.9, rpn_pre_nms_top_N=0):
"""Wrapper for proposal numpy layer."""
anchor_sizes = anchor_sizes # noqa pylint: disable = W0127
anchor_ratios = [np.sqrt(r) for r in anchor_ratios]
assert rpn_layer.shape[0] == 1
dim_ordering = 0
new_result = rpn_to_roi_kernel_np(rpn_layer, regr_layer, std_scaling,
anchor_sizes, anchor_ratios,
rpn_stride, dim_ordering,
use_regr=use_regr, max_boxes=max_boxes,
overlap_thresh=overlap_thresh,
rpn_pre_nms_top_N=rpn_pre_nms_top_N,
activation_type='sigmoid',
input_w=input_w,
input_h=input_h)
return (np.expand_dims(new_result[0], axis=0), new_result[1], new_result[2],
new_result[3])
def rpn_to_roi(rpn_layer, regr_layer,
input_w, input_h, anchor_sizes,
anchor_ratios, std_scaling, rpn_stride,
use_regr=True, max_boxes=300,
overlap_thresh=0.9, rpn_pre_nms_top_N=0):
'''Wrapper for _rpn_to_roi to remove outputs for testing.'''
return _rpn_to_roi(rpn_layer, regr_layer,
input_w, input_h, anchor_sizes,
anchor_ratios, std_scaling, rpn_stride,
use_regr=use_regr, max_boxes=max_boxes,
overlap_thresh=overlap_thresh,
rpn_pre_nms_top_N=rpn_pre_nms_top_N)[0]
def calc_iou_np(R, bboxes, class_ids, iou_high_thres,
iou_low_thres, deltas_scaling, gt_as_roi=False,
num_classes=21):
"""compute IoU for proposal layer."""
# (y1, x2, y2, x2) to (x1, y1, x2, y2)
gta = np.copy(bboxes[:, (1, 0, 3, 2)])
R = np.copy(R[:, (1, 0, 3, 2)])
if gt_as_roi:
R = np.concatenate((R, gta), axis=0)
iou_area = iou_np(R, gta)
best_gt = np.argmax(iou_area, axis=1)
best_gt_iou = np.amax(iou_area, axis=1)
# build outputs
# positive ROI labels
positive_idx = np.where(best_gt_iou >= iou_high_thres)[0]
if positive_idx.size > 0:
positive_gt_idx = best_gt[positive_idx]
positive_class_idx = class_ids[positive_gt_idx]
positive_class_idx = np.array(positive_class_idx).astype(np.int32)
positive_class_label = np.zeros((positive_idx.size, num_classes))
positive_class_label[np.arange(positive_class_idx.size), positive_class_idx] = 1
positive_coords = np.zeros((positive_idx.size, num_classes-1, 4))
positive_labels = np.zeros((positive_idx.size, num_classes-1, 4))
deltas = encode_anchor(R[positive_idx, :],
gta[positive_gt_idx, :],
scale=1.)
sx = deltas_scaling[0]
sy = deltas_scaling[1]
sw = deltas_scaling[2]
sh = deltas_scaling[3]
coords_scaling = np.array([sx, sy, sw, sh]).reshape(1, 4)
positive_coords[np.arange(positive_idx.size),
positive_class_idx,
:] = deltas*coords_scaling
positive_labels[np.arange(positive_idx.size),
positive_class_idx,
:] = [1, 1, 1, 1]
# negative ROI labels
_lb = best_gt_iou >= iou_low_thres
_ub = best_gt_iou < iou_high_thres
negative_idx = np.where(np.logical_and(_lb, _ub))[0]
if negative_idx.size > 0:
negative_class_label = np.zeros((negative_idx.size, num_classes))
negative_class_label[:, -1] = 1
negative_coords = np.zeros((negative_idx.size, num_classes-1, 4))
negative_labels = np.zeros((negative_idx.size, num_classes-1, 4))
# positive and negative ROIs.
pn_idx = np.concatenate((positive_idx, negative_idx))
# it's must be either greater than 0.5 or less than 0.5. Can't be both empty.
assert pn_idx.size > 0, '''Both positive and negative ROIs are empty,
this cannot happen anyway.'''
pn_ROIs = R[pn_idx, :]
X = pn_ROIs
if positive_idx.size > 0 and negative_idx.size > 0:
Y1 = np.concatenate((positive_class_label, negative_class_label), axis=0)
elif positive_idx.size > 0:
Y1 = positive_class_label
else:
Y1 = negative_class_label
if positive_idx.size > 0 and negative_idx.size > 0:
_pos_labels_concat = np.concatenate((positive_labels, positive_coords), axis=-1)
_neg_labels_concat = np.concatenate((negative_labels, negative_coords), axis=-1)
Y2 = np.concatenate((_pos_labels_concat, _neg_labels_concat), axis=0)
elif positive_idx.size > 0:
Y2 = np.concatenate((positive_labels, positive_coords), axis=-1)
else:
Y2 = np.concatenate((negative_labels, negative_coords), axis=-1)
# shapes: (B, 4), (B, C), (B, C-1, 8)
return X, Y1, Y2
def sample_proposals(class_id_gts, roi_mini_batch, positive_ratio=0.25):
"""Sample ROIs in proposal layer."""
negative_idx = np.where(class_id_gts[:, -1] == 1)[0]
positive_idx = np.where(class_id_gts[:, -1] == 0)[0]
positive_max_num = int(roi_mini_batch * positive_ratio)
if positive_idx.size > 0 and negative_idx.size > 0:
if positive_idx.size >= positive_max_num:
pos_idx = np.random.choice(positive_idx, positive_max_num, replace=False)
else:
pos_idx = positive_idx
neg_num = roi_mini_batch - pos_idx.size
if negative_idx.size >= neg_num:
neg_idx = np.random.choice(negative_idx, neg_num, replace=False)
else:
neg_idx = np.random.choice(negative_idx, neg_num, replace=True)
sample_proposals = np.concatenate((pos_idx, neg_idx))
elif positive_idx.size > 0:
if positive_idx.size >= roi_mini_batch:
pos_idx = np.random.choice(positive_idx, roi_mini_batch, replace=False)
else:
pos_idx = np.random.choice(positive_idx, roi_mini_batch, replace=True)
sample_proposals = pos_idx
elif negative_idx.size > 0:
if negative_idx.size >= roi_mini_batch:
neg_idx = np.random.choice(negative_idx, roi_mini_batch, replace=False)
else:
neg_idx = np.random.choice(negative_idx, roi_mini_batch, replace=True)
sample_proposals = neg_idx
else:
sample_proposals = np.array([], dtype=np.float32)
return sample_proposals
def proposal_target_py_func(rois, gt_bboxes, gt_class_ids, iou_high_thres,
iou_low_thres, roi_train_bs, roi_positive_ratio,
deltas_scaling, bg_class_id):
'''compute proposal target in numpy.'''
def _core_func(rois, gt_bboxes, gt_class_ids, iou_high_thres,
iou_low_thres, roi_train_bs, roi_positive_ratio,
deltas_scaling, bg_class_id):
rois, _ = unpad_np(rois)
gt_bboxes, nz = unpad_np(gt_bboxes)
gt_class_ids = gt_class_ids[nz]
rois_np, cls_np, deltas_np = calc_iou_np(rois, gt_bboxes, gt_class_ids,
iou_high_thres, iou_low_thres,
deltas_scaling,
num_classes=bg_class_id+1)
roi_idxs_np = sample_proposals(cls_np, roi_train_bs, roi_positive_ratio)
# pad in case there is no any RoI.
if np.size(roi_idxs_np) == 0:
rois_ret = np.zeros((roi_train_bs, 4), dtype=np.float32)
cls_ret = np.zeros((roi_train_bs, bg_class_id+1), dtype=np.float32)
deltas_ret = np.zeros((roi_train_bs, bg_class_id*8), dtype=np.float32)
return rois_ret, cls_ret, deltas_ret
return (rois_np[roi_idxs_np, ...].astype(np.float32),
cls_np[roi_idxs_np, ...].astype(np.float32),
deltas_np[roi_idxs_np, ...].astype(np.float32))
return tf.py_func(_core_func,
[rois, gt_bboxes, gt_class_ids, iou_high_thres,
iou_low_thres, roi_train_bs, roi_positive_ratio,
deltas_scaling, bg_class_id],
(tf.float32, tf.float32, tf.float32))
def normalize_rois(rois, rpn_h, rpn_w):
"""Normalize the ROIs to the range of (0, 1)."""
x = rois[:, 0]
y = rois[:, 1]
x2 = rois[:, 2]
y2 = rois[:, 3]
rois_n = np.stack((y/np.array(rpn_h-1.0).astype(np.float32),
x/np.array(rpn_w-1.0).astype(np.float32),
y2/np.array(rpn_h-1.0).astype(np.float32),
x2/np.array(rpn_w-1.0).astype(np.float32)),
axis=-1)
return rois_n
def normalize_rois_no_perm(rois, rpn_h, rpn_w):
"""Normalize the ROIs to the range of (0, 1), without changing the coordinate ordering."""
y = rois[:, 0]
x = rois[:, 1]
y2 = rois[:, 2]
x2 = rois[:, 3]
rois_n = np.stack((y/np.array(rpn_h-1.0).astype(np.float32),
x/np.array(rpn_w-1.0).astype(np.float32),
y2/np.array(rpn_h-1.0).astype(np.float32),
x2/np.array(rpn_w-1.0).astype(np.float32)),
axis=-1)
return rois_n
def normalize_rois_no_perm_pf(rois, rpn_h, rpn_w):
'''py_func wrapper for normalize_rois_no_perm.'''
return tf.py_func(normalize_rois_no_perm, [rois, rpn_h, rpn_w], tf.float32)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/layers/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""NmsInputs layer in FasterRCNN for post-processing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Layer
import tensorflow as tf
class NmsInputs(Layer):
'''Prepare input tensors for NMS plugin for post-processing in FasterRCNN.'''
def __init__(self, regr_std_scaling, **kwargs):
"""Initialize the NmsInputs layer.
Args:
regr_std_scaling(tuple): The variances for the RCNN deltas.
"""
self.regr_std_scaling = regr_std_scaling
super(NmsInputs, self).__init__(**kwargs)
def build(self, input_shape):
"""Setup some internal parameters."""
self.batch_size = input_shape[0][0]
self.roi_num = input_shape[0][1]
self.class_num = input_shape[2][2] // 4
def compute_output_shape(self, input_shape):
"""compute_output_shape.
Args:
input_shape(tuple): the shape of the input tensor.
Returns:
The output shapes for loc_data, conf_data and prior_data
"""
batch_size = input_shape[0][0]
roi_num = input_shape[0][1]
class_num = input_shape[2][2] // 4
return [
(batch_size, roi_num * (class_num + 1) * 4, 1, 1),
(batch_size, roi_num * (class_num + 1), 1, 1),
(batch_size, 2, roi_num * 4, 1),
]
def call(self, x, mask=None):
"""Call this layer with inputs.
Args:
x(list): The list of input tensors.
x[0]: the input ROIs in shape (N, B, 4), absolute coordinates.
x[1]: RCNN confidence in the shape (N, B, C+1),including the background.
x[2]: RCNN deltas in the shape (N, B, C*4), for valid classes.
Returns:
the output tensor of the layer.
"""
# ROIs: (N, B, 4) to (N, 1, B*4, 1)
rois = x[0]
if self.batch_size is None:
self.batch_size = tf.shape(rois)[0]
rois = tf.reshape(rois, (self.batch_size, self.roi_num, 4, 1))
# ROIs is (y1, x1, y2, x2), reorg to (x1, y1, x2, y2) conforming with NMSPlugin
rois = tf.concat(
(rois[:, :, 1:2, :],
rois[:, :, 0:1, :],
rois[:, :, 3:4, :],
rois[:, :, 2:3, :]),
axis=2
)
rois = tf.reshape(rois, (self.batch_size, 1, self.roi_num * 4, 1))
# variances: (N, 1, B*4, 1), encoded in targets
# so just concat with a dummy tensor
# to get a tensor of shape (N, 2, B*4, 1)
prior_data = tf.concat((rois, rois), axis=1, name="prior_data")
# conf_data: -> (N, B*(C+1), 1, 1)
# strip the groundtruth class in confidence
# (N, B, C+1) to (N, B*(C+1), 1, 1)
conf_data = tf.reshape(
x[1],
(self.batch_size, self.roi_num * (self.class_num + 1), 1, 1),
name="conf_data"
)
# loc_data: -> (N, B*(C+1)*4, 1, 1)
# (N, B, C*4) to (N, B, C, 4)
loc_data = tf.reshape(x[2], (self.batch_size, self.roi_num, self.class_num, 4))
loc_data_0 = loc_data[:, :, :, 0:1] * (1.0 / self.regr_std_scaling[0])
loc_data_1 = loc_data[:, :, :, 1:2] * (1.0 / self.regr_std_scaling[1])
loc_data_2 = loc_data[:, :, :, 2:3] * (1.0 / self.regr_std_scaling[2])
loc_data_3 = loc_data[:, :, :, 3:4] * (1.0 / self.regr_std_scaling[3])
loc_data = tf.concat((loc_data_0, loc_data_1, loc_data_2, loc_data_3), axis=3)
# padding dummy deltas for background class to get (N, B, C+1, 4)
# as required by the NMSPlugin
loc_data = tf.concat((loc_data, loc_data[:, :, 0:1, :]), axis=2)
loc_data = tf.reshape(
loc_data,
(self.batch_size, self.roi_num * (self.class_num + 1)*4, 1, 1),
name="loc_data"
)
return [loc_data, conf_data, prior_data]
def get_config(self):
"""Get config for this layer."""
config = {
'regr_std_scaling': self.regr_std_scaling,
}
base_config = super(NmsInputs, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/layers/NmsInputs.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for FasterRCNN proposal layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import keras
import keras.backend as K
import mock
import numpy as np
import tensorflow as tf
from nvidia_tao_tf1.cv.faster_rcnn.layers.custom_layers import Proposal
from nvidia_tao_tf1.cv.faster_rcnn.layers.utils import (
compute_rpn_target_np, nms_core_py_func,
rpn_to_roi
)
class TestProposal(unittest.TestCase):
'''Main class for testing the proposal layer.'''
def init(self):
'''Initialize.'''
self.anchor_sizes = [64.0, 128.0, 256.0]
self.anchor_ratios = [1.0, 0.5, 2.0]
self.std_scaling = 1.0
self.rpn_stride = 16.0
self.pre_nms_top_N = 12000
self.post_nms_top_N = 2000
self.nms_iou_thres = 0.7
self.activation_type = 'sigmoid'
self.image_h = 384
self.image_w = 1280
self.rpn_h = self.image_h // 16
self.rpn_w = self.image_w // 16
self.bs_per_gpu = 1
self.num_anchors = len(self.anchor_sizes) * len(self.anchor_ratios)
self.iou_high_thres = 0.5
self.iou_low_thres = 0.0
self.rpn_train_bs = 256
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
K.set_session(tf.Session(config=config))
self.session = K.get_session()
def build_proposal_graph(self):
'''Build the model with only a proposal layer.'''
input_scores = keras.layers.Input(shape=(self.num_anchors,
self.rpn_h, self.rpn_w),
name='rpn_scores')
input_deltas = keras.layers.Input(shape=(4*self.num_anchors,
self.rpn_h, self.rpn_w),
name='rpn_deltas')
input_image = keras.layers.Input(
shape=(3, self.image_h, self.image_w),
name="input_image"
)
proposal_out = Proposal(self.anchor_sizes,
self.anchor_ratios,
self.std_scaling,
self.rpn_stride,
self.pre_nms_top_N,
self.post_nms_top_N,
self.nms_iou_thres,
self.activation_type,
self.bs_per_gpu)(
[input_scores, input_deltas, input_image]
)
if not isinstance(proposal_out, list):
proposal_out = [proposal_out]
self.model = keras.models.Model(inputs=[input_scores, input_deltas, input_image],
outputs=proposal_out)
def generate_test_vectors(self):
'''generate the RPN scores and deltas as test vectors.'''
gt_boxes = np.array([[141., 1244., 382., 1278.],
[151., 849., 305., 896.],
[163., 1191., 326., 1270.],
[175., 133., 284., 180.],
[170., 213., 288., 255.],
[208., 86., 261., 140.],
[170., 458., 235., 497.],
[158., 534., 215., 607.],
[156., 856., 282., 901.],
[176., 608., 219., 652.]], dtype=np.float32)
ar_sqrt = [np.sqrt(ar) for ar in self.anchor_ratios]
rpn_scores, rpn_deltas = compute_rpn_target_np(gt_boxes, self.anchor_sizes,
ar_sqrt, self.rpn_stride,
self.rpn_h, self.rpn_w,
self.image_h, self.image_w,
self.rpn_train_bs, self.iou_high_thres,
self.iou_low_thres)
images = np.zeros((1, 3, self.image_h, self.image_w), dtype=np.float32)
return (rpn_scores[:, self.num_anchors:, :, :],
rpn_deltas[:, self.num_anchors*4:, :, :],
images)
def proposal_np(self, scores, deltas):
'''Proposal layer in numpy.'''
return rpn_to_roi(scores, deltas,
self.image_w, self.image_h,
self.anchor_sizes, self.anchor_ratios,
self.std_scaling, self.rpn_stride,
use_regr=True, max_boxes=self.post_nms_top_N,
overlap_thresh=self.nms_iou_thres,
rpn_pre_nms_top_N=self.pre_nms_top_N)
def test_proposal_layer(self):
'''Check the proposal layer output.'''
self.init()
# monkey patch the tf.image.non_max_suppression since we found
# its result cannot match the numpy NMS exactly anyway.
with mock.patch('tensorflow.image.non_max_suppression', side_effect=nms_core_py_func) \
as _non_max_suppression_function: # noqa pylint: disable=F841, W0612
self.build_proposal_graph()
scores, deltas, images = self.generate_test_vectors()
proposal_out_keras = self.model.predict([scores, deltas, images])
proposal_out_np = self.proposal_np(scores, deltas)
assert np.allclose(proposal_out_keras, proposal_out_np[:, :, (1, 0, 3, 2)], atol=1e-4)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/layers/tests/test_proposal.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for FasterRCNN proposal target layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import keras.backend as K
import mock
import numpy as np
import tensorflow as tf
from nvidia_tao_tf1.cv.faster_rcnn.layers.utils import (
calc_iou_np,
generate_proposal_target_v1,
sample_proposals,
unpad_np
)
from nvidia_tao_tf1.cv.faster_rcnn.tests.utils import _fake_choice, _fake_uniform
class TestProposalTarget(unittest.TestCase):
'''Main class that checks the proposal_target generation graph.'''
def init(self):
'''Initialize the necessary data.'''
self.rois = tf.placeholder(shape=(None, None, 4), dtype=tf.float32, name='rois')
self.gt_boxes = tf.placeholder(shape=(None, None, 4), dtype=tf.float32, name='gt_boxes')
self.gt_class_ids = tf.placeholder(shape=(None, None),
dtype=tf.float32,
name='gt_class_ids')
self.iou_high_thres = 0.5
self.iou_low_thres = 0.0
self.roi_train_bs = 256
self.roi_positive_ratio = 0.25
self.deltas_scaling = [10., 10., 5., 5.]
self.bg_class_id = 3
self.image_w = 1248
self.image_h = 384
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
K.set_session(tf.Session(config=config))
self.session = K.get_session()
def compute_proposal_target_tf(self):
'''construct the proposal target graph.'''
self.rois_tf, self.class_ids_tf, self.deltas_tf, _ = \
generate_proposal_target_v1(self.rois,
self.gt_boxes,
self.gt_class_ids,
self.iou_high_thres,
self.iou_low_thres,
self.roi_train_bs,
self.roi_positive_ratio,
np.array(self.deltas_scaling),
self.bg_class_id)
def proposal_target_np(self, gt_bboxes, gt_class_ids, rois):
'''compute proposal target in numpy.'''
rois, _ = unpad_np(rois)
gt_bboxes, nz = unpad_np(gt_bboxes)
gt_class_ids = gt_class_ids[nz]
rois_np, cls_np, deltas_np = calc_iou_np(rois, gt_bboxes, gt_class_ids,
self.iou_high_thres, self.iou_low_thres,
self.deltas_scaling,
num_classes=4)
roi_idxs_np = sample_proposals(cls_np, self.roi_train_bs, self.roi_positive_ratio)
return rois_np[roi_idxs_np, ...], cls_np[roi_idxs_np, ...], deltas_np[roi_idxs_np, ...]
def proposal_target_tf(self, gt_bboxes, gt_class_ids, rois):
'''compute proposal target in tf.'''
rois_tf, cls_tf, deltas_tf = self.session.run([self.rois_tf,
self.class_ids_tf,
self.deltas_tf],
feed_dict={'rois:0': rois,
'gt_boxes:0': gt_bboxes,
'gt_class_ids: 0': gt_class_ids})
return rois_tf, cls_tf, deltas_tf
def gen_test_vectors(self):
'''generate test vectors.'''
gt_boxes = np.array([[[726, 148, 826, 319],
[389, 185, 425, 207],
[679, 167, 692, 198]]], dtype=np.float32)
gt_cls = np.array([[1, 0, 2]], dtype=np.int32)
rois = np.array([[[730, 150, 830, 320],
[390, 190, 430, 210],
[700, 200, 750, 250]]], dtype=np.float32)
# convert to y1, x1, y2, x2 format
gt_boxes = np.copy(gt_boxes[:, :, (1, 0, 3, 2)])
rois = np.copy(rois[:, :, (1, 0, 3, 2)])
# pad gt_boxes to 100
gt_boxes_ = gt_boxes.shape[1]
gt_boxes = np.concatenate([gt_boxes, np.zeros(shape=(1, 100-gt_boxes_, 4))], axis=1)
gt_cls_ = gt_cls.shape[1]
gt_cls = np.concatenate([gt_cls, -1 * np.ones(shape=(1, 100-gt_cls_))], axis=1)
rois_ = rois.shape[1]
rois = np.concatenate([rois, np.zeros(shape=(1, 300-rois_, 4))], axis=1)
return gt_boxes, gt_cls, rois
def test_proposal_target(self):
'''check the output of tensorflow and numpy.'''
self.init()
with mock.patch('tensorflow.random.uniform', side_effect=_fake_uniform) \
as uniform_function: # noqa pylint: disable=F841, W0612
with mock.patch('tensorflow.random_shuffle', side_effect=tf.identity) \
as random_shuffle_function: # noqa pylint: disable=F841, W0612
self.compute_proposal_target_tf()
gt_bboxes, gt_class_ids, rois = self.gen_test_vectors()
with mock.patch('numpy.random.choice', side_effect=_fake_choice) \
as choice_function: # noqa pylint: disable=F841, W0612
rois_np, cls_np, deltas_np = \
self.proposal_target_np(gt_bboxes[0, ...], # noqa pylint: disable=E1126
gt_class_ids[0, ...], # noqa pylint: disable=E1126
rois[0, ...]) # noqa pylint: disable=E1126
rois_tf, cls_tf, deltas_tf = self.proposal_target_tf(gt_bboxes, gt_class_ids, rois)
# convert tf RoIs to (x1, y1, x2, y2) format
rois_tf = rois_tf[:, (1, 0, 3, 2)]
deltas_tf = np.reshape(deltas_tf, (deltas_tf.shape[0], -1, 8))
assert np.allclose(rois_np, rois_tf)
assert np.allclose(cls_np, cls_tf)
assert np.allclose(deltas_np, deltas_tf)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/layers/tests/test_proposal_target.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/faster_rcnn/proto/training.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_tao_tf1.cv.common.proto import visualizer_config_pb2 as nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_visualizer__config__pb2
from nvidia_tao_tf1.cv.detectnet_v2.proto import regularizer_config_pb2 as nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_regularizer__config__pb2
from nvidia_tao_tf1.cv.faster_rcnn.proto import optimizer_pb2 as nvidia__tao__tf1_dot_cv_dot_faster__rcnn_dot_proto_dot_optimizer__pb2
from nvidia_tao_tf1.cv.faster_rcnn.proto import learning_rate_pb2 as nvidia__tao__tf1_dot_cv_dot_faster__rcnn_dot_proto_dot_learning__rate__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/faster_rcnn/proto/training.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n2nvidia_tao_tf1/cv/faster_rcnn/proto/training.proto\x1a\x36nvidia_tao_tf1/cv/common/proto/visualizer_config.proto\x1a=nvidia_tao_tf1/cv/detectnet_v2/proto/regularizer_config.proto\x1a\x33nvidia_tao_tf1/cv/faster_rcnn/proto/optimizer.proto\x1a\x37nvidia_tao_tf1/cv/faster_rcnn/proto/learning_rate.proto\"E\n\rEarlyStopping\x12\x0f\n\x07monitor\x18\x01 \x01(\t\x12\x11\n\tmin_delta\x18\x02 \x01(\x02\x12\x10\n\x08patience\x18\x03 \x01(\r\"\xad\x07\n\x0eTrainingConfig\x12\x1b\n\x13\x65nable_augmentation\x18\x30 \x01(\x08\x12\x1c\n\x14retrain_pruned_model\x18\x03 \x01(\t\x12\x1a\n\x12pretrained_weights\x18\n \x01(\t\x12\x19\n\x11resume_from_model\x18/ \x01(\t\x12\x17\n\x0frpn_min_overlap\x18\x04 \x01(\x02\x12\x17\n\x0frpn_max_overlap\x18\x05 \x01(\x02\x12\x1e\n\x16\x63lassifier_min_overlap\x18\x06 \x01(\x02\x12\x1e\n\x16\x63lassifier_max_overlap\x18\x07 \x01(\x02\x12\x11\n\tgt_as_roi\x18& \x01(\x08\x12\x13\n\x0bstd_scaling\x18\x08 \x01(\x02\x12\x43\n\x13\x63lassifier_regr_std\x18\t \x03(\x0b\x32&.TrainingConfig.ClassifierRegrStdEntry\x12\x1a\n\x12\x62\x61tch_size_per_gpu\x18. \x01(\r\x12\x12\n\nnum_epochs\x18\x0b \x01(\r\x12\x1b\n\x13\x63heckpoint_interval\x18\x31 \x01(\r\x12\x19\n\x11rpn_pre_nms_top_N\x18# \x01(\r\x12\x16\n\x0erpn_mini_batch\x18$ \x01(\r\x12\x19\n\x11rpn_nms_max_boxes\x18\x10 \x01(\r\x12!\n\x19rpn_nms_overlap_threshold\x18\x11 \x01(\x02\x12\'\n\x0bregularizer\x18\x14 \x01(\x0b\x32\x12.RegularizerConfig\x12#\n\toptimizer\x18+ \x01(\x0b\x32\x10.OptimizerConfig\x12 \n\rlearning_rate\x18- \x01(\x0b\x32\t.LRConfig\x12\x17\n\x0flambda_rpn_regr\x18\' \x01(\x02\x12\x18\n\x10lambda_rpn_class\x18( \x01(\x02\x12\x17\n\x0flambda_cls_regr\x18) \x01(\x02\x12\x18\n\x10lambda_cls_class\x18* \x01(\x02\x12\x12\n\nenable_qat\x18\x32 \x01(\x08\x12\x19\n\x11model_parallelism\x18\x33 \x03(\x02\x12&\n\x0e\x65\x61rly_stopping\x18\x34 \x01(\x0b\x32\x0e.EarlyStopping\x12%\n\nvisualizer\x18\x35 \x01(\x0b\x32\x11.VisualizerConfig\x1a\x38\n\x16\x43lassifierRegrStdEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x02:\x02\x38\x01\x62\x06proto3')
,
dependencies=[nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_visualizer__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_regularizer__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_faster__rcnn_dot_proto_dot_optimizer__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_faster__rcnn_dot_proto_dot_learning__rate__pb2.DESCRIPTOR,])
_EARLYSTOPPING = _descriptor.Descriptor(
name='EarlyStopping',
full_name='EarlyStopping',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='monitor', full_name='EarlyStopping.monitor', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_delta', full_name='EarlyStopping.min_delta', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='patience', full_name='EarlyStopping.patience', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=283,
serialized_end=352,
)
_TRAININGCONFIG_CLASSIFIERREGRSTDENTRY = _descriptor.Descriptor(
name='ClassifierRegrStdEntry',
full_name='TrainingConfig.ClassifierRegrStdEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='TrainingConfig.ClassifierRegrStdEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='TrainingConfig.ClassifierRegrStdEntry.value', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1240,
serialized_end=1296,
)
_TRAININGCONFIG = _descriptor.Descriptor(
name='TrainingConfig',
full_name='TrainingConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='enable_augmentation', full_name='TrainingConfig.enable_augmentation', index=0,
number=48, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='retrain_pruned_model', full_name='TrainingConfig.retrain_pruned_model', index=1,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pretrained_weights', full_name='TrainingConfig.pretrained_weights', index=2,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='resume_from_model', full_name='TrainingConfig.resume_from_model', index=3,
number=47, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rpn_min_overlap', full_name='TrainingConfig.rpn_min_overlap', index=4,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rpn_max_overlap', full_name='TrainingConfig.rpn_max_overlap', index=5,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='classifier_min_overlap', full_name='TrainingConfig.classifier_min_overlap', index=6,
number=6, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='classifier_max_overlap', full_name='TrainingConfig.classifier_max_overlap', index=7,
number=7, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gt_as_roi', full_name='TrainingConfig.gt_as_roi', index=8,
number=38, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='std_scaling', full_name='TrainingConfig.std_scaling', index=9,
number=8, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='classifier_regr_std', full_name='TrainingConfig.classifier_regr_std', index=10,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batch_size_per_gpu', full_name='TrainingConfig.batch_size_per_gpu', index=11,
number=46, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_epochs', full_name='TrainingConfig.num_epochs', index=12,
number=11, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='checkpoint_interval', full_name='TrainingConfig.checkpoint_interval', index=13,
number=49, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rpn_pre_nms_top_N', full_name='TrainingConfig.rpn_pre_nms_top_N', index=14,
number=35, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rpn_mini_batch', full_name='TrainingConfig.rpn_mini_batch', index=15,
number=36, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rpn_nms_max_boxes', full_name='TrainingConfig.rpn_nms_max_boxes', index=16,
number=16, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rpn_nms_overlap_threshold', full_name='TrainingConfig.rpn_nms_overlap_threshold', index=17,
number=17, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='regularizer', full_name='TrainingConfig.regularizer', index=18,
number=20, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='optimizer', full_name='TrainingConfig.optimizer', index=19,
number=43, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='learning_rate', full_name='TrainingConfig.learning_rate', index=20,
number=45, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='lambda_rpn_regr', full_name='TrainingConfig.lambda_rpn_regr', index=21,
number=39, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='lambda_rpn_class', full_name='TrainingConfig.lambda_rpn_class', index=22,
number=40, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='lambda_cls_regr', full_name='TrainingConfig.lambda_cls_regr', index=23,
number=41, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='lambda_cls_class', full_name='TrainingConfig.lambda_cls_class', index=24,
number=42, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='enable_qat', full_name='TrainingConfig.enable_qat', index=25,
number=50, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='model_parallelism', full_name='TrainingConfig.model_parallelism', index=26,
number=51, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='early_stopping', full_name='TrainingConfig.early_stopping', index=27,
number=52, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='visualizer', full_name='TrainingConfig.visualizer', index=28,
number=53, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_TRAININGCONFIG_CLASSIFIERREGRSTDENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=355,
serialized_end=1296,
)
_TRAININGCONFIG_CLASSIFIERREGRSTDENTRY.containing_type = _TRAININGCONFIG
_TRAININGCONFIG.fields_by_name['classifier_regr_std'].message_type = _TRAININGCONFIG_CLASSIFIERREGRSTDENTRY
_TRAININGCONFIG.fields_by_name['regularizer'].message_type = nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_regularizer__config__pb2._REGULARIZERCONFIG
_TRAININGCONFIG.fields_by_name['optimizer'].message_type = nvidia__tao__tf1_dot_cv_dot_faster__rcnn_dot_proto_dot_optimizer__pb2._OPTIMIZERCONFIG
_TRAININGCONFIG.fields_by_name['learning_rate'].message_type = nvidia__tao__tf1_dot_cv_dot_faster__rcnn_dot_proto_dot_learning__rate__pb2._LRCONFIG
_TRAININGCONFIG.fields_by_name['early_stopping'].message_type = _EARLYSTOPPING
_TRAININGCONFIG.fields_by_name['visualizer'].message_type = nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_visualizer__config__pb2._VISUALIZERCONFIG
DESCRIPTOR.message_types_by_name['EarlyStopping'] = _EARLYSTOPPING
DESCRIPTOR.message_types_by_name['TrainingConfig'] = _TRAININGCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
EarlyStopping = _reflection.GeneratedProtocolMessageType('EarlyStopping', (_message.Message,), dict(
DESCRIPTOR = _EARLYSTOPPING,
__module__ = 'nvidia_tao_tf1.cv.faster_rcnn.proto.training_pb2'
# @@protoc_insertion_point(class_scope:EarlyStopping)
))
_sym_db.RegisterMessage(EarlyStopping)
TrainingConfig = _reflection.GeneratedProtocolMessageType('TrainingConfig', (_message.Message,), dict(
ClassifierRegrStdEntry = _reflection.GeneratedProtocolMessageType('ClassifierRegrStdEntry', (_message.Message,), dict(
DESCRIPTOR = _TRAININGCONFIG_CLASSIFIERREGRSTDENTRY,
__module__ = 'nvidia_tao_tf1.cv.faster_rcnn.proto.training_pb2'
# @@protoc_insertion_point(class_scope:TrainingConfig.ClassifierRegrStdEntry)
))
,
DESCRIPTOR = _TRAININGCONFIG,
__module__ = 'nvidia_tao_tf1.cv.faster_rcnn.proto.training_pb2'
# @@protoc_insertion_point(class_scope:TrainingConfig)
))
_sym_db.RegisterMessage(TrainingConfig)
_sym_db.RegisterMessage(TrainingConfig.ClassifierRegrStdEntry)
_TRAININGCONFIG_CLASSIFIERREGRSTDENTRY._options = None
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/proto/training_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/faster_rcnn/proto/trt_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/faster_rcnn/proto/trt_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n4nvidia_tao_tf1/cv/faster_rcnn/proto/trt_config.proto\"\"\n\x0cTrtInference\x12\x12\n\ntrt_engine\x18\x01 \x01(\tb\x06proto3')
)
_TRTINFERENCE = _descriptor.Descriptor(
name='TrtInference',
full_name='TrtInference',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='trt_engine', full_name='TrtInference.trt_engine', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=56,
serialized_end=90,
)
DESCRIPTOR.message_types_by_name['TrtInference'] = _TRTINFERENCE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TrtInference = _reflection.GeneratedProtocolMessageType('TrtInference', (_message.Message,), dict(
DESCRIPTOR = _TRTINFERENCE,
__module__ = 'nvidia_tao_tf1.cv.faster_rcnn.proto.trt_config_pb2'
# @@protoc_insertion_point(class_scope:TrtInference)
))
_sym_db.RegisterMessage(TrtInference)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/proto/trt_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/faster_rcnn/proto/input_image.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/faster_rcnn/proto/input_image.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n5nvidia_tao_tf1/cv/faster_rcnn/proto/input_image.proto\"!\n\x12ImageSizeConfigMin\x12\x0b\n\x03min\x18\x01 \x01(\r\";\n\x1aImageSizeConfigHeightWidth\x12\x0e\n\x06height\x18\x01 \x01(\r\x12\r\n\x05width\x18\x02 \x01(\r\"\x86\x03\n\x10InputImageConfig\x12\x1e\n\nimage_type\x18\x06 \x01(\x0e\x32\n.ImageType\x12\'\n\x08size_min\x18\x01 \x01(\x0b\x32\x13.ImageSizeConfigMinH\x00\x12\x38\n\x11size_height_width\x18\x02 \x01(\x0b\x32\x1b.ImageSizeConfigHeightWidthH\x00\x12\x1b\n\x13image_channel_order\x18\x05 \x01(\t\x12\x43\n\x12image_channel_mean\x18\x03 \x03(\x0b\x32\'.InputImageConfig.ImageChannelMeanEntry\x12\x1c\n\x14image_scaling_factor\x18\x04 \x01(\x02\x12!\n\x19max_objects_num_per_image\x18\x07 \x01(\r\x1a\x37\n\x15ImageChannelMeanEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x02:\x02\x38\x01\x42\x13\n\x11image_size_config*$\n\tImageType\x12\x07\n\x03RGB\x10\x00\x12\x0e\n\nGRAY_SCALE\x10\x01\x62\x06proto3')
)
_IMAGETYPE = _descriptor.EnumDescriptor(
name='ImageType',
full_name='ImageType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='RGB', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GRAY_SCALE', index=1, number=1,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=546,
serialized_end=582,
)
_sym_db.RegisterEnumDescriptor(_IMAGETYPE)
ImageType = enum_type_wrapper.EnumTypeWrapper(_IMAGETYPE)
RGB = 0
GRAY_SCALE = 1
_IMAGESIZECONFIGMIN = _descriptor.Descriptor(
name='ImageSizeConfigMin',
full_name='ImageSizeConfigMin',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='min', full_name='ImageSizeConfigMin.min', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=57,
serialized_end=90,
)
_IMAGESIZECONFIGHEIGHTWIDTH = _descriptor.Descriptor(
name='ImageSizeConfigHeightWidth',
full_name='ImageSizeConfigHeightWidth',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='height', full_name='ImageSizeConfigHeightWidth.height', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='width', full_name='ImageSizeConfigHeightWidth.width', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=92,
serialized_end=151,
)
_INPUTIMAGECONFIG_IMAGECHANNELMEANENTRY = _descriptor.Descriptor(
name='ImageChannelMeanEntry',
full_name='InputImageConfig.ImageChannelMeanEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='InputImageConfig.ImageChannelMeanEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='InputImageConfig.ImageChannelMeanEntry.value', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=468,
serialized_end=523,
)
_INPUTIMAGECONFIG = _descriptor.Descriptor(
name='InputImageConfig',
full_name='InputImageConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='image_type', full_name='InputImageConfig.image_type', index=0,
number=6, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='size_min', full_name='InputImageConfig.size_min', index=1,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='size_height_width', full_name='InputImageConfig.size_height_width', index=2,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image_channel_order', full_name='InputImageConfig.image_channel_order', index=3,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image_channel_mean', full_name='InputImageConfig.image_channel_mean', index=4,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image_scaling_factor', full_name='InputImageConfig.image_scaling_factor', index=5,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_objects_num_per_image', full_name='InputImageConfig.max_objects_num_per_image', index=6,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_INPUTIMAGECONFIG_IMAGECHANNELMEANENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='image_size_config', full_name='InputImageConfig.image_size_config',
index=0, containing_type=None, fields=[]),
],
serialized_start=154,
serialized_end=544,
)
_INPUTIMAGECONFIG_IMAGECHANNELMEANENTRY.containing_type = _INPUTIMAGECONFIG
_INPUTIMAGECONFIG.fields_by_name['image_type'].enum_type = _IMAGETYPE
_INPUTIMAGECONFIG.fields_by_name['size_min'].message_type = _IMAGESIZECONFIGMIN
_INPUTIMAGECONFIG.fields_by_name['size_height_width'].message_type = _IMAGESIZECONFIGHEIGHTWIDTH
_INPUTIMAGECONFIG.fields_by_name['image_channel_mean'].message_type = _INPUTIMAGECONFIG_IMAGECHANNELMEANENTRY
_INPUTIMAGECONFIG.oneofs_by_name['image_size_config'].fields.append(
_INPUTIMAGECONFIG.fields_by_name['size_min'])
_INPUTIMAGECONFIG.fields_by_name['size_min'].containing_oneof = _INPUTIMAGECONFIG.oneofs_by_name['image_size_config']
_INPUTIMAGECONFIG.oneofs_by_name['image_size_config'].fields.append(
_INPUTIMAGECONFIG.fields_by_name['size_height_width'])
_INPUTIMAGECONFIG.fields_by_name['size_height_width'].containing_oneof = _INPUTIMAGECONFIG.oneofs_by_name['image_size_config']
DESCRIPTOR.message_types_by_name['ImageSizeConfigMin'] = _IMAGESIZECONFIGMIN
DESCRIPTOR.message_types_by_name['ImageSizeConfigHeightWidth'] = _IMAGESIZECONFIGHEIGHTWIDTH
DESCRIPTOR.message_types_by_name['InputImageConfig'] = _INPUTIMAGECONFIG
DESCRIPTOR.enum_types_by_name['ImageType'] = _IMAGETYPE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ImageSizeConfigMin = _reflection.GeneratedProtocolMessageType('ImageSizeConfigMin', (_message.Message,), dict(
DESCRIPTOR = _IMAGESIZECONFIGMIN,
__module__ = 'nvidia_tao_tf1.cv.faster_rcnn.proto.input_image_pb2'
# @@protoc_insertion_point(class_scope:ImageSizeConfigMin)
))
_sym_db.RegisterMessage(ImageSizeConfigMin)
ImageSizeConfigHeightWidth = _reflection.GeneratedProtocolMessageType('ImageSizeConfigHeightWidth', (_message.Message,), dict(
DESCRIPTOR = _IMAGESIZECONFIGHEIGHTWIDTH,
__module__ = 'nvidia_tao_tf1.cv.faster_rcnn.proto.input_image_pb2'
# @@protoc_insertion_point(class_scope:ImageSizeConfigHeightWidth)
))
_sym_db.RegisterMessage(ImageSizeConfigHeightWidth)
InputImageConfig = _reflection.GeneratedProtocolMessageType('InputImageConfig', (_message.Message,), dict(
ImageChannelMeanEntry = _reflection.GeneratedProtocolMessageType('ImageChannelMeanEntry', (_message.Message,), dict(
DESCRIPTOR = _INPUTIMAGECONFIG_IMAGECHANNELMEANENTRY,
__module__ = 'nvidia_tao_tf1.cv.faster_rcnn.proto.input_image_pb2'
# @@protoc_insertion_point(class_scope:InputImageConfig.ImageChannelMeanEntry)
))
,
DESCRIPTOR = _INPUTIMAGECONFIG,
__module__ = 'nvidia_tao_tf1.cv.faster_rcnn.proto.input_image_pb2'
# @@protoc_insertion_point(class_scope:InputImageConfig)
))
_sym_db.RegisterMessage(InputImageConfig)
_sym_db.RegisterMessage(InputImageConfig.ImageChannelMeanEntry)
_INPUTIMAGECONFIG_IMAGECHANNELMEANENTRY._options = None
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/proto/input_image_pb2.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing experiment proto schema for FasterRCNN."""
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/proto/__init__.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/faster_rcnn/proto/inference.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_tao_tf1.cv.faster_rcnn.proto import trt_config_pb2 as nvidia__tao__tf1_dot_cv_dot_faster__rcnn_dot_proto_dot_trt__config__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/faster_rcnn/proto/inference.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n3nvidia_tao_tf1/cv/faster_rcnn/proto/inference.proto\x1a\x34nvidia_tao_tf1/cv/faster_rcnn/proto/trt_config.proto\"\xc4\x03\n\x0fInferenceConfig\x12\x12\n\nimages_dir\x18\x01 \x01(\t\x12\r\n\x05model\x18\x02 \x01(\t\x12\x12\n\nbatch_size\x18\x0f \x01(\r\x12\x19\n\x11rpn_pre_nms_top_N\x18\n \x01(\r\x12\x19\n\x11rpn_nms_max_boxes\x18\x07 \x01(\r\x12!\n\x19rpn_nms_overlap_threshold\x18\x08 \x01(\x02\x12 \n\x18\x62\x62ox_visualize_threshold\x18\x05 \x01(\x02\x12\x1f\n\x17object_confidence_thres\x18\x10 \x01(\x02\x12 \n\x18\x63lassifier_nms_max_boxes\x18\t \x01(\r\x12(\n classifier_nms_overlap_threshold\x18\x06 \x01(\x02\x12\"\n\x1a\x64\x65tection_image_output_dir\x18\x0b \x01(\t\x12\x17\n\x0f\x62\x62ox_caption_on\x18\x0c \x01(\x08\x12\x17\n\x0flabels_dump_dir\x18\r \x01(\t\x12$\n\rtrt_inference\x18\x0e \x01(\x0b\x32\r.TrtInference\x12\x16\n\x0enms_score_bits\x18\x11 \x01(\rb\x06proto3')
,
dependencies=[nvidia__tao__tf1_dot_cv_dot_faster__rcnn_dot_proto_dot_trt__config__pb2.DESCRIPTOR,])
_INFERENCECONFIG = _descriptor.Descriptor(
name='InferenceConfig',
full_name='InferenceConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='images_dir', full_name='InferenceConfig.images_dir', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='model', full_name='InferenceConfig.model', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batch_size', full_name='InferenceConfig.batch_size', index=2,
number=15, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rpn_pre_nms_top_N', full_name='InferenceConfig.rpn_pre_nms_top_N', index=3,
number=10, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rpn_nms_max_boxes', full_name='InferenceConfig.rpn_nms_max_boxes', index=4,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rpn_nms_overlap_threshold', full_name='InferenceConfig.rpn_nms_overlap_threshold', index=5,
number=8, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bbox_visualize_threshold', full_name='InferenceConfig.bbox_visualize_threshold', index=6,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='object_confidence_thres', full_name='InferenceConfig.object_confidence_thres', index=7,
number=16, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='classifier_nms_max_boxes', full_name='InferenceConfig.classifier_nms_max_boxes', index=8,
number=9, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='classifier_nms_overlap_threshold', full_name='InferenceConfig.classifier_nms_overlap_threshold', index=9,
number=6, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='detection_image_output_dir', full_name='InferenceConfig.detection_image_output_dir', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bbox_caption_on', full_name='InferenceConfig.bbox_caption_on', index=11,
number=12, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='labels_dump_dir', full_name='InferenceConfig.labels_dump_dir', index=12,
number=13, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='trt_inference', full_name='InferenceConfig.trt_inference', index=13,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='nms_score_bits', full_name='InferenceConfig.nms_score_bits', index=14,
number=17, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=110,
serialized_end=562,
)
_INFERENCECONFIG.fields_by_name['trt_inference'].message_type = nvidia__tao__tf1_dot_cv_dot_faster__rcnn_dot_proto_dot_trt__config__pb2._TRTINFERENCE
DESCRIPTOR.message_types_by_name['InferenceConfig'] = _INFERENCECONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
InferenceConfig = _reflection.GeneratedProtocolMessageType('InferenceConfig', (_message.Message,), dict(
DESCRIPTOR = _INFERENCECONFIG,
__module__ = 'nvidia_tao_tf1.cv.faster_rcnn.proto.inference_pb2'
# @@protoc_insertion_point(class_scope:InferenceConfig)
))
_sym_db.RegisterMessage(InferenceConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/proto/inference_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/faster_rcnn/proto/experiment.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_tao_tf1.cv.detectnet_v2.proto import augmentation_config_pb2 as nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_augmentation__config__pb2
from nvidia_tao_tf1.cv.detectnet_v2.proto import dataset_config_pb2 as nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_dataset__config__pb2
from nvidia_tao_tf1.cv.faster_rcnn.proto import training_pb2 as nvidia__tao__tf1_dot_cv_dot_faster__rcnn_dot_proto_dot_training__pb2
from nvidia_tao_tf1.cv.faster_rcnn.proto import model_pb2 as nvidia__tao__tf1_dot_cv_dot_faster__rcnn_dot_proto_dot_model__pb2
from nvidia_tao_tf1.cv.faster_rcnn.proto import inference_pb2 as nvidia__tao__tf1_dot_cv_dot_faster__rcnn_dot_proto_dot_inference__pb2
from nvidia_tao_tf1.cv.faster_rcnn.proto import evaluation_pb2 as nvidia__tao__tf1_dot_cv_dot_faster__rcnn_dot_proto_dot_evaluation__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/faster_rcnn/proto/experiment.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n4nvidia_tao_tf1/cv/faster_rcnn/proto/experiment.proto\x1a>nvidia_tao_tf1/cv/detectnet_v2/proto/augmentation_config.proto\x1a\x39nvidia_tao_tf1/cv/detectnet_v2/proto/dataset_config.proto\x1a\x32nvidia_tao_tf1/cv/faster_rcnn/proto/training.proto\x1a/nvidia_tao_tf1/cv/faster_rcnn/proto/model.proto\x1a\x33nvidia_tao_tf1/cv/faster_rcnn/proto/inference.proto\x1a\x34nvidia_tao_tf1/cv/faster_rcnn/proto/evaluation.proto\"\xc5\x02\n\nExperiment\x12\x13\n\x0brandom_seed\x18\x01 \x01(\r\x12\x0f\n\x07verbose\x18\x02 \x01(\x08\x12\x0f\n\x07\x65nc_key\x18\x06 \x01(\t\x12&\n\x0e\x64\x61taset_config\x18\x05 \x01(\x0b\x32\x0e.DatasetConfig\x12\x30\n\x13\x61ugmentation_config\x18\x07 \x01(\x0b\x32\x13.AugmentationConfig\x12\"\n\x0cmodel_config\x18\x03 \x01(\x0b\x32\x0c.ModelConfig\x12(\n\x0ftraining_config\x18\x04 \x01(\x0b\x32\x0f.TrainingConfig\x12*\n\x10inference_config\x18\x08 \x01(\x0b\x32\x10.InferenceConfig\x12,\n\x11\x65valuation_config\x18\t \x01(\x0b\x32\x11.EvaluationConfigb\x06proto3')
,
dependencies=[nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_augmentation__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_dataset__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_faster__rcnn_dot_proto_dot_training__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_faster__rcnn_dot_proto_dot_model__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_faster__rcnn_dot_proto_dot_inference__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_faster__rcnn_dot_proto_dot_evaluation__pb2.DESCRIPTOR,])
_EXPERIMENT = _descriptor.Descriptor(
name='Experiment',
full_name='Experiment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='random_seed', full_name='Experiment.random_seed', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='verbose', full_name='Experiment.verbose', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='enc_key', full_name='Experiment.enc_key', index=2,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dataset_config', full_name='Experiment.dataset_config', index=3,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='augmentation_config', full_name='Experiment.augmentation_config', index=4,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='model_config', full_name='Experiment.model_config', index=5,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='training_config', full_name='Experiment.training_config', index=6,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='inference_config', full_name='Experiment.inference_config', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='evaluation_config', full_name='Experiment.evaluation_config', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=388,
serialized_end=713,
)
_EXPERIMENT.fields_by_name['dataset_config'].message_type = nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_dataset__config__pb2._DATASETCONFIG
_EXPERIMENT.fields_by_name['augmentation_config'].message_type = nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_augmentation__config__pb2._AUGMENTATIONCONFIG
_EXPERIMENT.fields_by_name['model_config'].message_type = nvidia__tao__tf1_dot_cv_dot_faster__rcnn_dot_proto_dot_model__pb2._MODELCONFIG
_EXPERIMENT.fields_by_name['training_config'].message_type = nvidia__tao__tf1_dot_cv_dot_faster__rcnn_dot_proto_dot_training__pb2._TRAININGCONFIG
_EXPERIMENT.fields_by_name['inference_config'].message_type = nvidia__tao__tf1_dot_cv_dot_faster__rcnn_dot_proto_dot_inference__pb2._INFERENCECONFIG
_EXPERIMENT.fields_by_name['evaluation_config'].message_type = nvidia__tao__tf1_dot_cv_dot_faster__rcnn_dot_proto_dot_evaluation__pb2._EVALUATIONCONFIG
DESCRIPTOR.message_types_by_name['Experiment'] = _EXPERIMENT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Experiment = _reflection.GeneratedProtocolMessageType('Experiment', (_message.Message,), dict(
DESCRIPTOR = _EXPERIMENT,
__module__ = 'nvidia_tao_tf1.cv.faster_rcnn.proto.experiment_pb2'
# @@protoc_insertion_point(class_scope:Experiment)
))
_sym_db.RegisterMessage(Experiment)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/proto/experiment_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/faster_rcnn/proto/model.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_tao_tf1.cv.faster_rcnn.proto import input_image_pb2 as nvidia__tao__tf1_dot_cv_dot_faster__rcnn_dot_proto_dot_input__image__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/faster_rcnn/proto/model.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n/nvidia_tao_tf1/cv/faster_rcnn/proto/model.proto\x1a\x35nvidia_tao_tf1/cv/faster_rcnn/proto/input_image.proto\"/\n\x0f\x41nchorBoxConfig\x12\r\n\x05scale\x18\x01 \x03(\x02\x12\r\n\x05ratio\x18\x02 \x03(\x02\";\n\x10RoiPoolingConfig\x12\x11\n\tpool_size\x18\x01 \x01(\r\x12\x14\n\x0cpool_size_2x\x18\x02 \x01(\x08\"\xa8\x01\n\nActivation\x12\x17\n\x0f\x61\x63tivation_type\x18\x01 \x01(\t\x12\x44\n\x15\x61\x63tivation_parameters\x18\x02 \x03(\x0b\x32%.Activation.ActivationParametersEntry\x1a;\n\x19\x41\x63tivationParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x02:\x02\x38\x01\"\xdd\x03\n\x0bModelConfig\x12-\n\x12input_image_config\x18\x01 \x01(\x0b\x32\x11.InputImageConfig\x12\x0c\n\x04\x61rch\x18\x02 \x01(\t\x12+\n\x11\x61nchor_box_config\x18\x03 \x01(\x0b\x32\x10.AnchorBoxConfig\x12\x16\n\x0eroi_mini_batch\x18\x04 \x01(\r\x12\x12\n\nrpn_stride\x18\x05 \x01(\r\x12\x11\n\tfreeze_bn\x18\x06 \x01(\x08\x12\x14\n\x0c\x64ropout_rate\x18\x11 \x01(\x02\x12\x19\n\x11\x64rop_connect_rate\x18\x12 \x01(\x02\x12\x1f\n\x17rpn_cls_activation_type\x18\x07 \x01(\t\x12\x15\n\rfreeze_blocks\x18\t \x03(\x02\x12\x10\n\x08use_bias\x18\n \x01(\x08\x12-\n\x12roi_pooling_config\x18\x0b \x01(\x0b\x32\x11.RoiPoolingConfig\x12\x11\n\trfcn_mode\x18\x0c \x01(\x08\x12\x19\n\x11tf_proposal_layer\x18\r \x01(\x08\x12\x17\n\x0f\x61ll_projections\x18\x0e \x01(\x08\x12\x13\n\x0buse_pooling\x18\x0f \x01(\x08\x12\x1f\n\nactivation\x18\x13 \x01(\x0b\x32\x0b.Activationb\x06proto3')
,
dependencies=[nvidia__tao__tf1_dot_cv_dot_faster__rcnn_dot_proto_dot_input__image__pb2.DESCRIPTOR,])
_ANCHORBOXCONFIG = _descriptor.Descriptor(
name='AnchorBoxConfig',
full_name='AnchorBoxConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='scale', full_name='AnchorBoxConfig.scale', index=0,
number=1, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ratio', full_name='AnchorBoxConfig.ratio', index=1,
number=2, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=106,
serialized_end=153,
)
_ROIPOOLINGCONFIG = _descriptor.Descriptor(
name='RoiPoolingConfig',
full_name='RoiPoolingConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pool_size', full_name='RoiPoolingConfig.pool_size', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pool_size_2x', full_name='RoiPoolingConfig.pool_size_2x', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=155,
serialized_end=214,
)
_ACTIVATION_ACTIVATIONPARAMETERSENTRY = _descriptor.Descriptor(
name='ActivationParametersEntry',
full_name='Activation.ActivationParametersEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='Activation.ActivationParametersEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='Activation.ActivationParametersEntry.value', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=326,
serialized_end=385,
)
_ACTIVATION = _descriptor.Descriptor(
name='Activation',
full_name='Activation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='activation_type', full_name='Activation.activation_type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='activation_parameters', full_name='Activation.activation_parameters', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_ACTIVATION_ACTIVATIONPARAMETERSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=217,
serialized_end=385,
)
_MODELCONFIG = _descriptor.Descriptor(
name='ModelConfig',
full_name='ModelConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='input_image_config', full_name='ModelConfig.input_image_config', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='arch', full_name='ModelConfig.arch', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='anchor_box_config', full_name='ModelConfig.anchor_box_config', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='roi_mini_batch', full_name='ModelConfig.roi_mini_batch', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rpn_stride', full_name='ModelConfig.rpn_stride', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='freeze_bn', full_name='ModelConfig.freeze_bn', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dropout_rate', full_name='ModelConfig.dropout_rate', index=6,
number=17, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='drop_connect_rate', full_name='ModelConfig.drop_connect_rate', index=7,
number=18, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rpn_cls_activation_type', full_name='ModelConfig.rpn_cls_activation_type', index=8,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='freeze_blocks', full_name='ModelConfig.freeze_blocks', index=9,
number=9, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='use_bias', full_name='ModelConfig.use_bias', index=10,
number=10, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='roi_pooling_config', full_name='ModelConfig.roi_pooling_config', index=11,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rfcn_mode', full_name='ModelConfig.rfcn_mode', index=12,
number=12, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tf_proposal_layer', full_name='ModelConfig.tf_proposal_layer', index=13,
number=13, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='all_projections', full_name='ModelConfig.all_projections', index=14,
number=14, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='use_pooling', full_name='ModelConfig.use_pooling', index=15,
number=15, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='activation', full_name='ModelConfig.activation', index=16,
number=19, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=388,
serialized_end=865,
)
_ACTIVATION_ACTIVATIONPARAMETERSENTRY.containing_type = _ACTIVATION
_ACTIVATION.fields_by_name['activation_parameters'].message_type = _ACTIVATION_ACTIVATIONPARAMETERSENTRY
_MODELCONFIG.fields_by_name['input_image_config'].message_type = nvidia__tao__tf1_dot_cv_dot_faster__rcnn_dot_proto_dot_input__image__pb2._INPUTIMAGECONFIG
_MODELCONFIG.fields_by_name['anchor_box_config'].message_type = _ANCHORBOXCONFIG
_MODELCONFIG.fields_by_name['roi_pooling_config'].message_type = _ROIPOOLINGCONFIG
_MODELCONFIG.fields_by_name['activation'].message_type = _ACTIVATION
DESCRIPTOR.message_types_by_name['AnchorBoxConfig'] = _ANCHORBOXCONFIG
DESCRIPTOR.message_types_by_name['RoiPoolingConfig'] = _ROIPOOLINGCONFIG
DESCRIPTOR.message_types_by_name['Activation'] = _ACTIVATION
DESCRIPTOR.message_types_by_name['ModelConfig'] = _MODELCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AnchorBoxConfig = _reflection.GeneratedProtocolMessageType('AnchorBoxConfig', (_message.Message,), dict(
DESCRIPTOR = _ANCHORBOXCONFIG,
__module__ = 'nvidia_tao_tf1.cv.faster_rcnn.proto.model_pb2'
# @@protoc_insertion_point(class_scope:AnchorBoxConfig)
))
_sym_db.RegisterMessage(AnchorBoxConfig)
RoiPoolingConfig = _reflection.GeneratedProtocolMessageType('RoiPoolingConfig', (_message.Message,), dict(
DESCRIPTOR = _ROIPOOLINGCONFIG,
__module__ = 'nvidia_tao_tf1.cv.faster_rcnn.proto.model_pb2'
# @@protoc_insertion_point(class_scope:RoiPoolingConfig)
))
_sym_db.RegisterMessage(RoiPoolingConfig)
Activation = _reflection.GeneratedProtocolMessageType('Activation', (_message.Message,), dict(
ActivationParametersEntry = _reflection.GeneratedProtocolMessageType('ActivationParametersEntry', (_message.Message,), dict(
DESCRIPTOR = _ACTIVATION_ACTIVATIONPARAMETERSENTRY,
__module__ = 'nvidia_tao_tf1.cv.faster_rcnn.proto.model_pb2'
# @@protoc_insertion_point(class_scope:Activation.ActivationParametersEntry)
))
,
DESCRIPTOR = _ACTIVATION,
__module__ = 'nvidia_tao_tf1.cv.faster_rcnn.proto.model_pb2'
# @@protoc_insertion_point(class_scope:Activation)
))
_sym_db.RegisterMessage(Activation)
_sym_db.RegisterMessage(Activation.ActivationParametersEntry)
ModelConfig = _reflection.GeneratedProtocolMessageType('ModelConfig', (_message.Message,), dict(
DESCRIPTOR = _MODELCONFIG,
__module__ = 'nvidia_tao_tf1.cv.faster_rcnn.proto.model_pb2'
# @@protoc_insertion_point(class_scope:ModelConfig)
))
_sym_db.RegisterMessage(ModelConfig)
_ACTIVATION_ACTIVATIONPARAMETERSENTRY._options = None
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/proto/model_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/faster_rcnn/proto/optimizer.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/faster_rcnn/proto/optimizer.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n3nvidia_tao_tf1/cv/faster_rcnn/proto/optimizer.proto\"r\n\x13\x41\x64\x61mOptimizerConfig\x12\n\n\x02lr\x18\x01 \x01(\x02\x12\x0e\n\x06\x62\x65ta_1\x18\x02 \x01(\x02\x12\x0e\n\x06\x62\x65ta_2\x18\x03 \x01(\x02\x12\x0f\n\x07\x65psilon\x18\x04 \x01(\x02\x12\r\n\x05\x64\x65\x63\x61y\x18\x05 \x01(\x02\x12\x0f\n\x07\x61msgrad\x18\x06 \x01(\x08\"S\n\x12SgdOptimizerConfig\x12\n\n\x02lr\x18\x01 \x01(\x02\x12\x10\n\x08momentum\x18\x02 \x01(\x02\x12\r\n\x05\x64\x65\x63\x61y\x18\x03 \x01(\x02\x12\x10\n\x08nesterov\x18\x04 \x01(\x08\"$\n\x16RmspropOptimizerConfig\x12\n\n\x02lr\x18\x01 \x01(\x02\"\x90\x01\n\x0fOptimizerConfig\x12$\n\x04\x61\x64\x61m\x18\x01 \x01(\x0b\x32\x14.AdamOptimizerConfigH\x00\x12\"\n\x03sgd\x18\x02 \x01(\x0b\x32\x13.SgdOptimizerConfigH\x00\x12*\n\x07rmsprop\x18\x03 \x01(\x0b\x32\x17.RmspropOptimizerConfigH\x00\x42\x07\n\x05optimb\x06proto3')
)
_ADAMOPTIMIZERCONFIG = _descriptor.Descriptor(
name='AdamOptimizerConfig',
full_name='AdamOptimizerConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='lr', full_name='AdamOptimizerConfig.lr', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='beta_1', full_name='AdamOptimizerConfig.beta_1', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='beta_2', full_name='AdamOptimizerConfig.beta_2', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='epsilon', full_name='AdamOptimizerConfig.epsilon', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='decay', full_name='AdamOptimizerConfig.decay', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='amsgrad', full_name='AdamOptimizerConfig.amsgrad', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=55,
serialized_end=169,
)
_SGDOPTIMIZERCONFIG = _descriptor.Descriptor(
name='SgdOptimizerConfig',
full_name='SgdOptimizerConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='lr', full_name='SgdOptimizerConfig.lr', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='momentum', full_name='SgdOptimizerConfig.momentum', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='decay', full_name='SgdOptimizerConfig.decay', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='nesterov', full_name='SgdOptimizerConfig.nesterov', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=171,
serialized_end=254,
)
_RMSPROPOPTIMIZERCONFIG = _descriptor.Descriptor(
name='RmspropOptimizerConfig',
full_name='RmspropOptimizerConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='lr', full_name='RmspropOptimizerConfig.lr', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=256,
serialized_end=292,
)
_OPTIMIZERCONFIG = _descriptor.Descriptor(
name='OptimizerConfig',
full_name='OptimizerConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='adam', full_name='OptimizerConfig.adam', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sgd', full_name='OptimizerConfig.sgd', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rmsprop', full_name='OptimizerConfig.rmsprop', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='optim', full_name='OptimizerConfig.optim',
index=0, containing_type=None, fields=[]),
],
serialized_start=295,
serialized_end=439,
)
_OPTIMIZERCONFIG.fields_by_name['adam'].message_type = _ADAMOPTIMIZERCONFIG
_OPTIMIZERCONFIG.fields_by_name['sgd'].message_type = _SGDOPTIMIZERCONFIG
_OPTIMIZERCONFIG.fields_by_name['rmsprop'].message_type = _RMSPROPOPTIMIZERCONFIG
_OPTIMIZERCONFIG.oneofs_by_name['optim'].fields.append(
_OPTIMIZERCONFIG.fields_by_name['adam'])
_OPTIMIZERCONFIG.fields_by_name['adam'].containing_oneof = _OPTIMIZERCONFIG.oneofs_by_name['optim']
_OPTIMIZERCONFIG.oneofs_by_name['optim'].fields.append(
_OPTIMIZERCONFIG.fields_by_name['sgd'])
_OPTIMIZERCONFIG.fields_by_name['sgd'].containing_oneof = _OPTIMIZERCONFIG.oneofs_by_name['optim']
_OPTIMIZERCONFIG.oneofs_by_name['optim'].fields.append(
_OPTIMIZERCONFIG.fields_by_name['rmsprop'])
_OPTIMIZERCONFIG.fields_by_name['rmsprop'].containing_oneof = _OPTIMIZERCONFIG.oneofs_by_name['optim']
DESCRIPTOR.message_types_by_name['AdamOptimizerConfig'] = _ADAMOPTIMIZERCONFIG
DESCRIPTOR.message_types_by_name['SgdOptimizerConfig'] = _SGDOPTIMIZERCONFIG
DESCRIPTOR.message_types_by_name['RmspropOptimizerConfig'] = _RMSPROPOPTIMIZERCONFIG
DESCRIPTOR.message_types_by_name['OptimizerConfig'] = _OPTIMIZERCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AdamOptimizerConfig = _reflection.GeneratedProtocolMessageType('AdamOptimizerConfig', (_message.Message,), dict(
DESCRIPTOR = _ADAMOPTIMIZERCONFIG,
__module__ = 'nvidia_tao_tf1.cv.faster_rcnn.proto.optimizer_pb2'
# @@protoc_insertion_point(class_scope:AdamOptimizerConfig)
))
_sym_db.RegisterMessage(AdamOptimizerConfig)
SgdOptimizerConfig = _reflection.GeneratedProtocolMessageType('SgdOptimizerConfig', (_message.Message,), dict(
DESCRIPTOR = _SGDOPTIMIZERCONFIG,
__module__ = 'nvidia_tao_tf1.cv.faster_rcnn.proto.optimizer_pb2'
# @@protoc_insertion_point(class_scope:SgdOptimizerConfig)
))
_sym_db.RegisterMessage(SgdOptimizerConfig)
RmspropOptimizerConfig = _reflection.GeneratedProtocolMessageType('RmspropOptimizerConfig', (_message.Message,), dict(
DESCRIPTOR = _RMSPROPOPTIMIZERCONFIG,
__module__ = 'nvidia_tao_tf1.cv.faster_rcnn.proto.optimizer_pb2'
# @@protoc_insertion_point(class_scope:RmspropOptimizerConfig)
))
_sym_db.RegisterMessage(RmspropOptimizerConfig)
OptimizerConfig = _reflection.GeneratedProtocolMessageType('OptimizerConfig', (_message.Message,), dict(
DESCRIPTOR = _OPTIMIZERCONFIG,
__module__ = 'nvidia_tao_tf1.cv.faster_rcnn.proto.optimizer_pb2'
# @@protoc_insertion_point(class_scope:OptimizerConfig)
))
_sym_db.RegisterMessage(OptimizerConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/proto/optimizer_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/faster_rcnn/proto/learning_rate.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/faster_rcnn/proto/learning_rate.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n7nvidia_tao_tf1/cv/faster_rcnn/proto/learning_rate.proto\"\x86\x01\n\x18SoftStartAnnealingConfig\x12\x0f\n\x07\x62\x61se_lr\x18\x01 \x01(\x02\x12\x10\n\x08start_lr\x18\x02 \x01(\x02\x12\x12\n\nsoft_start\x18\x03 \x01(\x02\x12\x18\n\x10\x61nnealing_points\x18\x04 \x03(\x02\x12\x19\n\x11\x61nnealing_divider\x18\x05 \x01(\x02\"A\n\x0cStepLrConfig\x12\x0f\n\x07\x62\x61se_lr\x18\x01 \x01(\x02\x12\r\n\x05gamma\x18\x02 \x01(\x02\x12\x11\n\tstep_size\x18\x03 \x01(\x02\"g\n\x08LRConfig\x12/\n\nsoft_start\x18\x01 \x01(\x0b\x32\x19.SoftStartAnnealingConfigH\x00\x12\x1d\n\x04step\x18\x02 \x01(\x0b\x32\r.StepLrConfigH\x00\x42\x0b\n\tlr_configb\x06proto3')
)
_SOFTSTARTANNEALINGCONFIG = _descriptor.Descriptor(
name='SoftStartAnnealingConfig',
full_name='SoftStartAnnealingConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='base_lr', full_name='SoftStartAnnealingConfig.base_lr', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='start_lr', full_name='SoftStartAnnealingConfig.start_lr', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='soft_start', full_name='SoftStartAnnealingConfig.soft_start', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='annealing_points', full_name='SoftStartAnnealingConfig.annealing_points', index=3,
number=4, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='annealing_divider', full_name='SoftStartAnnealingConfig.annealing_divider', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=60,
serialized_end=194,
)
_STEPLRCONFIG = _descriptor.Descriptor(
name='StepLrConfig',
full_name='StepLrConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='base_lr', full_name='StepLrConfig.base_lr', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gamma', full_name='StepLrConfig.gamma', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='step_size', full_name='StepLrConfig.step_size', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=196,
serialized_end=261,
)
_LRCONFIG = _descriptor.Descriptor(
name='LRConfig',
full_name='LRConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='soft_start', full_name='LRConfig.soft_start', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='step', full_name='LRConfig.step', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='lr_config', full_name='LRConfig.lr_config',
index=0, containing_type=None, fields=[]),
],
serialized_start=263,
serialized_end=366,
)
_LRCONFIG.fields_by_name['soft_start'].message_type = _SOFTSTARTANNEALINGCONFIG
_LRCONFIG.fields_by_name['step'].message_type = _STEPLRCONFIG
_LRCONFIG.oneofs_by_name['lr_config'].fields.append(
_LRCONFIG.fields_by_name['soft_start'])
_LRCONFIG.fields_by_name['soft_start'].containing_oneof = _LRCONFIG.oneofs_by_name['lr_config']
_LRCONFIG.oneofs_by_name['lr_config'].fields.append(
_LRCONFIG.fields_by_name['step'])
_LRCONFIG.fields_by_name['step'].containing_oneof = _LRCONFIG.oneofs_by_name['lr_config']
DESCRIPTOR.message_types_by_name['SoftStartAnnealingConfig'] = _SOFTSTARTANNEALINGCONFIG
DESCRIPTOR.message_types_by_name['StepLrConfig'] = _STEPLRCONFIG
DESCRIPTOR.message_types_by_name['LRConfig'] = _LRCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SoftStartAnnealingConfig = _reflection.GeneratedProtocolMessageType('SoftStartAnnealingConfig', (_message.Message,), dict(
DESCRIPTOR = _SOFTSTARTANNEALINGCONFIG,
__module__ = 'nvidia_tao_tf1.cv.faster_rcnn.proto.learning_rate_pb2'
# @@protoc_insertion_point(class_scope:SoftStartAnnealingConfig)
))
_sym_db.RegisterMessage(SoftStartAnnealingConfig)
StepLrConfig = _reflection.GeneratedProtocolMessageType('StepLrConfig', (_message.Message,), dict(
DESCRIPTOR = _STEPLRCONFIG,
__module__ = 'nvidia_tao_tf1.cv.faster_rcnn.proto.learning_rate_pb2'
# @@protoc_insertion_point(class_scope:StepLrConfig)
))
_sym_db.RegisterMessage(StepLrConfig)
LRConfig = _reflection.GeneratedProtocolMessageType('LRConfig', (_message.Message,), dict(
DESCRIPTOR = _LRCONFIG,
__module__ = 'nvidia_tao_tf1.cv.faster_rcnn.proto.learning_rate_pb2'
# @@protoc_insertion_point(class_scope:LRConfig)
))
_sym_db.RegisterMessage(LRConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/proto/learning_rate_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/faster_rcnn/proto/evaluation.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_tao_tf1.cv.faster_rcnn.proto import trt_config_pb2 as nvidia__tao__tf1_dot_cv_dot_faster__rcnn_dot_proto_dot_trt__config__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/faster_rcnn/proto/evaluation.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n4nvidia_tao_tf1/cv/faster_rcnn/proto/evaluation.proto\x1a\x34nvidia_tao_tf1/cv/faster_rcnn/proto/trt_config.proto\"=\n\x11IoUThresholdRange\x12\r\n\x05start\x18\x01 \x01(\x02\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x02\x12\x0c\n\x04step\x18\x03 \x01(\x02\"\x87\x04\n\x10\x45valuationConfig\x12\r\n\x05model\x18\x03 \x01(\t\x12\x19\n\x11rpn_pre_nms_top_N\x18\x0c \x01(\r\x12\x19\n\x11rpn_nms_max_boxes\x18\x06 \x01(\r\x12!\n\x19rpn_nms_overlap_threshold\x18\x07 \x01(\x02\x12 \n\x18\x63lassifier_nms_max_boxes\x18\x08 \x01(\r\x12(\n classifier_nms_overlap_threshold\x18\t \x01(\x02\x12\x1f\n\x17object_confidence_thres\x18\x0b \x01(\x02\x12 \n\x18use_voc07_11point_metric\x18\r \x01(\x08\x12)\n!validation_period_during_training\x18\x0f \x01(\r\x12\x12\n\nbatch_size\x18\x10 \x01(\r\x12%\n\x0etrt_evaluation\x18\x11 \x01(\x0b\x32\r.TrtInference\x12=\n\x1fgt_matching_iou_threshold_range\x18\x12 \x01(\x0b\x32\x12.IoUThresholdRangeH\x00\x12#\n\x19gt_matching_iou_threshold\x18\x13 \x01(\x02H\x00\x12\x1a\n\x12visualize_pr_curve\x18\x14 \x01(\x08\x42\x16\n\x14iou_threshold_configb\x06proto3')
,
dependencies=[nvidia__tao__tf1_dot_cv_dot_faster__rcnn_dot_proto_dot_trt__config__pb2.DESCRIPTOR,])
_IOUTHRESHOLDRANGE = _descriptor.Descriptor(
name='IoUThresholdRange',
full_name='IoUThresholdRange',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='start', full_name='IoUThresholdRange.start', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='end', full_name='IoUThresholdRange.end', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='step', full_name='IoUThresholdRange.step', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=110,
serialized_end=171,
)
_EVALUATIONCONFIG = _descriptor.Descriptor(
name='EvaluationConfig',
full_name='EvaluationConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='model', full_name='EvaluationConfig.model', index=0,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rpn_pre_nms_top_N', full_name='EvaluationConfig.rpn_pre_nms_top_N', index=1,
number=12, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rpn_nms_max_boxes', full_name='EvaluationConfig.rpn_nms_max_boxes', index=2,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rpn_nms_overlap_threshold', full_name='EvaluationConfig.rpn_nms_overlap_threshold', index=3,
number=7, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='classifier_nms_max_boxes', full_name='EvaluationConfig.classifier_nms_max_boxes', index=4,
number=8, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='classifier_nms_overlap_threshold', full_name='EvaluationConfig.classifier_nms_overlap_threshold', index=5,
number=9, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='object_confidence_thres', full_name='EvaluationConfig.object_confidence_thres', index=6,
number=11, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='use_voc07_11point_metric', full_name='EvaluationConfig.use_voc07_11point_metric', index=7,
number=13, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='validation_period_during_training', full_name='EvaluationConfig.validation_period_during_training', index=8,
number=15, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batch_size', full_name='EvaluationConfig.batch_size', index=9,
number=16, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='trt_evaluation', full_name='EvaluationConfig.trt_evaluation', index=10,
number=17, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gt_matching_iou_threshold_range', full_name='EvaluationConfig.gt_matching_iou_threshold_range', index=11,
number=18, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gt_matching_iou_threshold', full_name='EvaluationConfig.gt_matching_iou_threshold', index=12,
number=19, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='visualize_pr_curve', full_name='EvaluationConfig.visualize_pr_curve', index=13,
number=20, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='iou_threshold_config', full_name='EvaluationConfig.iou_threshold_config',
index=0, containing_type=None, fields=[]),
],
serialized_start=174,
serialized_end=693,
)
_EVALUATIONCONFIG.fields_by_name['trt_evaluation'].message_type = nvidia__tao__tf1_dot_cv_dot_faster__rcnn_dot_proto_dot_trt__config__pb2._TRTINFERENCE
_EVALUATIONCONFIG.fields_by_name['gt_matching_iou_threshold_range'].message_type = _IOUTHRESHOLDRANGE
_EVALUATIONCONFIG.oneofs_by_name['iou_threshold_config'].fields.append(
_EVALUATIONCONFIG.fields_by_name['gt_matching_iou_threshold_range'])
_EVALUATIONCONFIG.fields_by_name['gt_matching_iou_threshold_range'].containing_oneof = _EVALUATIONCONFIG.oneofs_by_name['iou_threshold_config']
_EVALUATIONCONFIG.oneofs_by_name['iou_threshold_config'].fields.append(
_EVALUATIONCONFIG.fields_by_name['gt_matching_iou_threshold'])
_EVALUATIONCONFIG.fields_by_name['gt_matching_iou_threshold'].containing_oneof = _EVALUATIONCONFIG.oneofs_by_name['iou_threshold_config']
DESCRIPTOR.message_types_by_name['IoUThresholdRange'] = _IOUTHRESHOLDRANGE
DESCRIPTOR.message_types_by_name['EvaluationConfig'] = _EVALUATIONCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
IoUThresholdRange = _reflection.GeneratedProtocolMessageType('IoUThresholdRange', (_message.Message,), dict(
DESCRIPTOR = _IOUTHRESHOLDRANGE,
__module__ = 'nvidia_tao_tf1.cv.faster_rcnn.proto.evaluation_pb2'
# @@protoc_insertion_point(class_scope:IoUThresholdRange)
))
_sym_db.RegisterMessage(IoUThresholdRange)
EvaluationConfig = _reflection.GeneratedProtocolMessageType('EvaluationConfig', (_message.Message,), dict(
DESCRIPTOR = _EVALUATIONCONFIG,
__module__ = 'nvidia_tao_tf1.cv.faster_rcnn.proto.evaluation_pb2'
# @@protoc_insertion_point(class_scope:EvaluationConfig)
))
_sym_db.RegisterMessage(EvaluationConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/proto/evaluation_pb2.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FasterRCNN callbacks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/callbacks/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Callbacks for FasterRCNN.'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import keras
from keras.callbacks import Callback
import numpy as np
from tqdm import tqdm
from nvidia_tao_tf1.cv.common import utils as iva_utils
from nvidia_tao_tf1.cv.faster_rcnn.utils import utils
class ModelSaver(Callback):
'''Custom checkpointer for FasterRCNN, as a Keras callback.'''
def __init__(self, model_path, key, interval):
'''Initialize the checkpointer.'''
self.model_path = model_path
self.key = key
self.ckpt_interval = interval
def on_epoch_end(self, epoch, logs=None):
'''On specified epoch end, save the encoded model.'''
if (epoch + 1) % self.ckpt_interval == 0:
model_file = self.model_path.format(epoch + 1)
iva_utils.encode_from_keras(self.model,
model_file,
str.encode(self.key),
only_weights=False)
class ValidationCallback(Callback):
"""A callback for online validation during training."""
def __init__(self, val_model, val_data_loader,
val_interval, batch_size, prob_thres,
use_11point, id_to_class_map,
gt_matching_iou_list):
"""Initialize the validation hook."""
self.val_model = val_model
self.val_data_loader = val_data_loader
self.val_interval = val_interval
self.batch_size = batch_size
self.prob_thres = prob_thres
self.use_11point = use_11point
self.id_to_class_map = id_to_class_map
self.iou_list = gt_matching_iou_list
self.max_iters = (
(val_data_loader.num_samples + batch_size - 1) // batch_size
)
def on_epoch_end(self, epoch, logs=None):
'''On specified epoch end, do validation.'''
if (epoch + 1) % self.val_interval == 0:
print("Doing validation at epoch {}(1-based index)...".format(epoch + 1))
prev_lp = keras.backend.learning_phase()
keras.backend.set_learning_phase(0)
# set train model weights to validation model weights
self.val_model.set_weights(self.model.get_weights())
# do validation on validation model
maps = self.do_validation()
keras.backend.set_learning_phase(prev_lp)
print("Validation done!")
# Update logs with mAPs
for _map, _iou in zip(maps, self.iou_list):
logs[f"mAP@{_iou:.2f}"] = _map
if len(maps) > 1:
logs[f"mAP@[{self.iou_list[0]:.2f}:{self.iou_list[-1]:.2f}]"] = np.mean(maps)
# Unified alias mAP for TAO API logging
logs['mAP'] = np.mean(maps)
def do_validation(self):
"""Conduct validation during training."""
T = [dict() for _ in self.iou_list]
P = [dict() for _ in self.iou_list]
RPN_RECALL = {}
for _ in tqdm(range(self.max_iters)):
images, gt_class_ids, gt_bboxes, gt_diff = self.val_data_loader.get_array_with_diff()
image_h, image_w = images.shape[2:]
# get the feature maps and output from the RPN
nmsed_boxes, nmsed_scores, nmsed_classes, num_dets, rois_output = \
self.val_model.predict(images)
# apply the spatial pyramid pooling to the proposed regions
for image_idx in range(nmsed_boxes.shape[0]):
all_dets = utils.gen_det_boxes(
self.id_to_class_map, nmsed_classes,
nmsed_boxes, nmsed_scores,
image_idx, num_dets,
)
# get detection results for each IoU threshold, for each image
utils.get_detection_results(
all_dets, gt_class_ids, gt_bboxes, gt_diff, image_h,
image_w, image_idx, self.id_to_class_map, T, P, self.iou_list
)
# # calculate RPN recall for each class, this will help debugging
utils.calc_rpn_recall(
RPN_RECALL,
self.id_to_class_map,
rois_output[image_idx, ...],
gt_class_ids[image_idx, ...],
gt_bboxes[image_idx, ...]
)
# finally, compute and print all the mAP values
maps = utils.compute_map_list(
T, P, self.prob_thres,
self.use_11point, RPN_RECALL,
self.iou_list
)
return maps
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/callbacks/callbacks.py |
"""Module to load and parse FRCNN experiment spec file."""
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/spec_loader/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Load experiment spec .txt files and return an experiment_pb2.Experiment object."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
from google.protobuf.text_format import Merge as merge_text_proto
import nvidia_tao_tf1.cv.faster_rcnn.proto.experiment_pb2 as experiment_pb2
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level='INFO')
logger = logging.getLogger(__name__)
def _load_proto(spec_path, proto_buffer):
"""Load spec from file and merge with given proto_buffer instance.
Args:
spec_path (str): Location of a file containing the custom spec proto.
proto_buffer (pb2): Protocal buffer instance to be loaded.
Returns:
proto_buffer (pb2): Protocol buffer instance updated with spec.
"""
with open(spec_path, "r") as f:
merge_text_proto(f.read(), proto_buffer)
return proto_buffer
def load_experiment_spec(spec_path=None):
"""Load experiment spec from a .txt file and return an experiment_pb2.Experiment object.
Args:
spec_path (str): Location of a file containing the custom experiment spec proto. If None,
then a default spec is used.
Returns:
experiment_spec: protocol buffer instance of type experiment_pb2.Experiment.
"""
experiment_spec = experiment_pb2.Experiment()
if spec_path is None:
file_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
default_spec_path = os.path.join(file_path, 'experiment_spec/default_spec.txt')
spec_path = default_spec_path
logger.info("Loading experiment spec at %s.", spec_path)
experiment_spec = _load_proto(spec_path, experiment_spec)
return experiment_spec
def write_spec_to_disk(experiment_spec, path):
"""Write experiment_pb2.Experiment object to a text file to the given path."""
# Create the path if it does not exist yet.
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(path, 'wb') as f:
f.write(str.encode(str(experiment_spec)))
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/spec_loader/spec_loader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Wrapper for experiment_spec to make it easier for validation and change.'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import re
from keras.regularizers import l1, l2
import numpy as np
from nvidia_tao_tf1.cv.detectnet_v2.proto.regularizer_config_pb2 import RegularizerConfig
class ExperimentSpec(object):
'''A class to wrap the experiment spec and do some validations.
When using the experiment spec, usually we did not do extensive validations for the
parameters in it. If a user provides an invalid parameter that is valid in terms of protobuf
laguage syntax, it will not be caught by some exception handling mechanism except we have
implemented the checks before using it. The result is usually the program aborts with some
Python internal log printed on the screen. This kind of error log is not quite helpful from
the user's perspective. Sometimes, there is even no errors during the execution of the program.
From issues raised by users, many of them are due to provding invalid parameters in spec. So we
need to do some validations here and print useful message to users to help them correct their
spec files.
'''
def __init__(self, spec_proto):
'''Initialize the ExperimentSpec.
Args:
spec_proto(experiment_spec proto): the proto object for experiment_spec.
'''
self.spec_proto = spec_proto
self.validate_spec()
@property
def random_seed(self):
'''random seed.'''
return int(self.spec_proto.random_seed)
@property
def enc_key(self):
'''encoding key.'''
return str(self.spec_proto.enc_key)
@property
def verbose(self):
'''verbose or not.'''
return bool(self.spec_proto.verbose)
@property
def model_config(self):
'''model configurations.'''
return self.spec_proto.model_config
@property
def image_type(self):
'''Image type.'''
return int(self.model_config.input_image_config.image_type)
@property
def image_c(self):
'''Image channel number.'''
return 3 if self.image_type == 0 else 1
@property
def image_h(self):
'''Image height.'''
if self.model_config.input_image_config.WhichOneof("image_size_config") == "size_min":
return 0
return int(self.model_config.input_image_config.size_height_width.height)
@property
def image_w(self):
'''Image width.'''
if self.model_config.input_image_config.WhichOneof("image_size_config") == "size_min":
return 0
return int(self.model_config.input_image_config.size_height_width.width)
@property
def image_min(self):
"""Image smaller size of height and width."""
if self.model_config.input_image_config.WhichOneof("image_size_config") == "size_min":
return int(self.model_config.input_image_config.size_min.min)
return 0
@property
def input_dims(self):
'''Input dimensions in (C, H, W) format.'''
# replace 0 with None to be compatible with Keras Input layer
_image_h = self.image_h or None
_image_w = self.image_w or None
return [self.image_c, _image_h, _image_w]
@property
def image_channel_order(self):
'''Image channel order.'''
return str(self.model_config.input_image_config.image_channel_order)
@property
def image_mean_values(self):
'''Image mean values per channel.'''
means = self.model_config.input_image_config.image_channel_mean
if self.image_c == 3:
assert ('r' in means) and ('g' in means) and ('b' in means), (
"'r', 'g', 'b' should all be present in image_channel_mean "
"for images with 3 channels."
)
means_list = [means['r'], means['g'], means['b']]
else:
assert 'l' in means, (
"'l' should be present in image_channel_mean for images "
"with 1 channel."
)
means_list = [means['l']]
return [float(m) for m in means_list]
@property
def image_scaling_factor(self):
'''Image scaling factor.'''
return float(self.model_config.input_image_config.image_scaling_factor)
@property
def max_objs_per_img(self):
'''Maximum number of objects in an image in the dataset.'''
return self.model_config.input_image_config.max_objects_num_per_image
@property
def _backbone(self):
'''backbone type with number of layers.'''
return str(self.model_config.arch)
@property
def backbone(self):
'''backbone type, without number of layers.'''
return self._backbone.split(':')[0]
@property
def nlayers(self):
'''number of layers in backbone or subsets like EfficientNet B0, etc.'''
if ':' in self._backbone:
# normal case like resnet:18
if self._backbone.split(':')[1].isdigit():
return int(self._backbone.split(':')[1])
# case like efficientnet:b0
return self._backbone.split(':')[1]
return None
@property
def anchor_config(self):
'''anchor box configurations.'''
anc_config = self.model_config.anchor_box_config
return anc_config
@property
def anchor_sizes(self):
'''anchor box sizes configurations.'''
anc_scales = list(self.anchor_config.scale)
return [float(a) for a in anc_scales]
@property
def anchor_ratios(self):
'''anchor box ratios configurations.'''
anc_ratios = list(self.anchor_config.ratio)
return [float(a) for a in anc_ratios]
@property
def freeze_bn(self):
'''freeze BN layers or not.'''
return bool(self.model_config.freeze_bn)
@property
def freeze_blocks(self):
'''List of blocks to freeze.'''
blocks = list(self.model_config.freeze_blocks)
return [int(b) for b in blocks]
@property
def dropout_rate(self):
"""Dropout rate."""
if self.model_config.dropout_rate < 0.0:
raise ValueError("Dropout rate cannot be negative. Got {}.".format(
self.model_config.dropout_rate
))
return float(self.model_config.dropout_rate)
@property
def drop_connect_rate(self):
"""Drop-connect rate."""
if self.model_config.drop_connect_rate < 0.0:
raise ValueError("Drop connect rate cannot be negative. Got {}.".format(
self.model_config.drop_connect_rate
))
return float(self.model_config.drop_connect_rate)
@property
def rcnn_train_bs(self):
'''RCNN train batch size.'''
return int(self.model_config.roi_mini_batch)
@property
def rpn_stride(self):
'''RPN stride with respect to input image.'''
assert 16.0 == float(self.model_config.rpn_stride), '''
RPN stride can only be 16, got {}'''.format(self.model_config.rpn_stride)
return 16.0
@property
def conv_bn_share_bias(self):
'''Conv and BN layers share bias or not.'''
return not bool(self.model_config.use_bias)
@property
def roi_pool_size(self):
'''CropAndResize output spatial size.'''
return int(self.model_config.roi_pooling_config.pool_size)
@property
def roi_pool_2x(self):
'''Whether or not to double the roi_pool_size and then apply a pooling/strided conv.'''
return bool(self.model_config.roi_pooling_config.pool_size_2x)
@property
def all_projections(self):
'''Whether or not to use all_projections for shorcut connections.'''
return bool(self.model_config.all_projections)
@property
def use_pooling(self):
'''use pooling or use strided conv instead.'''
return bool(self.model_config.use_pooling)
@property
def enable_qat(self):
'''Enable QAT or not.'''
return bool(self.training_config.enable_qat)
@property
def activation_type(self):
"""activation function type."""
return str(self.model_config.activation.activation_type)
@property
def training_config(self):
'''Training config.'''
return self.spec_proto.training_config
@property
def training_dataset(self):
'''Training dataset.'''
# data_sources can be repeated(multiple)
for ds in self.spec_proto.dataset_config.data_sources:
image_path = str(ds.image_directory_path)
assert os.path.isdir(image_path), (
"Training image path not found: {}".format(image_path)
)
tfrecords_path = str(ds.tfrecords_path)
tfrecords = glob.glob(tfrecords_path)
assert tfrecords, (
"No TFRecord file found with the pattern : {}".format(tfrecords_path)
)
val_type = self.spec_proto.dataset_config.WhichOneof('dataset_split_type')
if val_type == 'validation_fold':
val_fold = self.spec_proto.dataset_config.validation_fold
assert val_fold >= 0, (
"Validation fold should be non-negative, got {}".format(val_fold)
)
elif val_type == 'validation_data_source':
val_set = self.spec_proto.dataset_config.validation_data_source
assert os.path.isdir(str(val_set.image_directory_path)), (
"Validation image directory not found: {}".format(str(val_set.image_directory_path))
)
val_tfrecords = str(val_set.tfrecords_path)
assert glob.glob(val_tfrecords), (
"Validation TFRecords with the pattern: {} not found.".format(val_tfrecords)
)
image_ext = str(self.spec_proto.dataset_config.image_extension)
assert image_ext.lower() in ['jpg', 'jpeg', 'png'], (
"Only image format jpg/jpeg/png are supported, "
"got extension {}".format(image_ext)
)
return self.spec_proto.dataset_config
@property
def class_mapping(self):
'''class mapping.'''
cm = dict(self.spec_proto.dataset_config.target_class_mapping)
assert len(cm), 'Class mapping is empty.'
# class_mapping should not contains a background class because we
# will append it implicitly
assert 'background' not in cm, (
"Class mapping should not "
"contain a background class."
)
return cm
@property
def class_to_id(self):
'''dict to map class names to class IDs(including background).'''
class_names = sorted(set(self.class_mapping.values())) + ['background']
return dict(zip(class_names, range(len(class_names))))
@property
def id_to_class(self):
'''dict to map class IDs to class names(including background).'''
class_names = self.class_to_id.keys()
class_ids = [self.class_to_id[c] for c in class_names]
return dict(zip(class_ids, class_names))
@property
def num_classes(self):
'''number of classes(including background).'''
return len(self.class_to_id.keys())
@property
def data_augmentation(self):
'''data augmentation config.'''
return self.spec_proto.augmentation_config
@property
def enable_augmentation(self):
'''Enable data augmentation or not.'''
return bool(self.training_config.enable_augmentation)
@property
def epochs(self):
'''Number of epochs for training.'''
return int(self.training_config.num_epochs)
@property
def batch_size_per_gpu(self):
'''Image batch size per GPU.'''
return int(self.training_config.batch_size_per_gpu)
@property
def pretrained_weights(self):
'''path of the pretrained weights.'''
pw = str(self.training_config.pretrained_weights)
if pw:
assert os.path.isfile(pw), (
"Pretrained weights not found: {}".format(pw)
)
return pw
@property
def pretrained_model(self):
'''path of the pretrained(pruned) model.'''
pm = str(self.training_config.retrain_pruned_model)
if pm:
assert os.path.isfile(pm), (
"Pruned model for retrain not found: {}".format(pm)
)
return pm
@property
def resume_from_model(self):
'''resume training from checkpoint model.'''
rm = str(self.training_config.resume_from_model)
if rm:
assert os.path.isfile(rm), (
"Model to be resumed is not found: {}".format(rm)
)
assert re.match(r'.*\.epoch_[0-9]+\.[tlt|hdf5]', rm), (
"`resume_from_model` path not conforming to the saved model pattern: "
r"`*\.epoch_[0-9]+\.[tlt|hdf5]`"
", got {}".format(rm)
)
return rm
@property
def checkpoint_interval(self):
'''Saving checkpoint every k epochs.'''
# defaults to k=1(unset)
if self.training_config.checkpoint_interval == 0:
return 1
return int(self.training_config.checkpoint_interval)
@property
def rpn_min_overlap(self):
'''RPN min overlap below which we regard anchors as negative anchors.'''
return float(self.training_config.rpn_min_overlap)
@property
def rpn_max_overlap(self):
'''RPN max overlap above which we regard anchors as positive anchors.'''
return float(self.training_config.rpn_max_overlap)
@property
def rcnn_min_overlap(self):
'''RCNN min overlap below which we regard RoIs as negative.'''
return float(self.training_config.classifier_min_overlap)
@property
def rcnn_max_overlap(self):
'''RCNN max overlap above which we regard RoIs as positive.'''
return float(self.training_config.classifier_max_overlap)
@property
def gt_as_roi(self):
'''Whether or not to use groundtruth boxes as RoIs to train RCNN.'''
return bool(self.training_config.gt_as_roi)
@property
def std_scaling(self):
'''scaling factor applied to RPN deltas output.'''
return float(self.training_config.std_scaling)
@property
def rcnn_regr_std(self):
'''scaling factors applied to RCNN deltas output.'''
stds = dict(self.training_config.classifier_regr_std)
return [float(stds['x']), float(stds['y']),
float(stds['w']), float(stds['h'])]
@property
def rpn_train_bs(self):
'''training batch size for RPN for each image.'''
return int(self.training_config.rpn_mini_batch)
@property
def rpn_pre_nms_top_N(self):
'''RPN pre NMS top N.'''
return int(self.training_config.rpn_pre_nms_top_N)
@property
def rpn_post_nms_top_N(self):
'''RPN post NMS top N.'''
return int(self.training_config.rpn_nms_max_boxes)
@property
def rpn_nms_iou_thres(self):
'''IoU threshold for RPN NMS.'''
return float(self.training_config.rpn_nms_overlap_threshold)
@property
def regularization_config(self):
'''regularization config.'''
return self.training_config.regularizer
@property
def reg_type(self):
'''regularization type in enum.'''
return self.regularization_config.type
@property
def regularizer(self):
'''regularizer in keras object.'''
if self.type == RegularizerConfig.L1:
return l1
if self.type == RegularizerConfig.L2:
return l2
return None
@property
def weight_decay(self):
'''weight decay factor.'''
return float(self.regularization_config.weight)
@property
def optimizer(self):
'''Optimizer.'''
return self.training_config.optimizer
@property
def lr_scheduler(self):
'''Learning rate scheduler.'''
return self.training_config.learning_rate
@property
def lambda_rpn_regr(self):
'''scaling factor for RPN regressor loss.'''
return float(self.training_config.lambda_rpn_regr)
@property
def lambda_rpn_class(self):
'''scaling factor for RPN classifier loss.'''
return float(self.training_config.lambda_rpn_class)
@property
def lambda_cls_regr(self):
'''scaling factor for RCNN classifier loss.'''
return float(self.training_config.lambda_cls_regr)
@property
def lambda_cls_class(self):
'''scaling factor for RCNN regressor loss.'''
return float(self.training_config.lambda_cls_class)
@property
def inference_config(self):
'''inference config.'''
return self.spec_proto.inference_config
@property
def inference_images_dir(self):
'''the path to the image directory for doing inference.'''
infer_image_dir = str(self.inference_config.images_dir)
assert infer_image_dir and os.path.isdir(infer_image_dir), (
"Inference images directory not found: {}".format(infer_image_dir)
)
image_ext = str(self.spec_proto.dataset_config.image_extension)
images = glob.glob(os.path.join(infer_image_dir, '*.'+image_ext))
assert images, (
"Inference images not found in the directory: {}".format(infer_image_dir)
)
return str(self.inference_config.images_dir)
@property
def inference_model(self):
'''The model path for doing inference.'''
# Inference model may not exist at the start time,
# so we check it when it is called here instead of in constructor.
assert os.path.isfile(str(self.inference_config.model)), '''
Inference model not found: {}'''.format(str(self.inference_config.model))
return str(self.inference_config.model)
@property
def inference_trt_config(self):
'''TensorRT inference config.'''
if self.inference_config.HasField('trt_inference'):
return self.inference_config.trt_inference
return None
@property
def inference_trt_engine(self):
'''The TensorRT engine file from tlt-converter for inference.'''
if (self.inference_trt_config is not None):
_engine_file = str(self.inference_trt_config.trt_engine)
assert os.path.isfile(_engine_file), \
'TensorRT Engine for inference not found: {}'.format(_engine_file)
return _engine_file
return None
@property
def inference_output_images_dir(self):
'''The output image directory during inference.'''
return str(self.inference_config.detection_image_output_dir)
@property
def inference_output_labels_dir(self):
'''The output labels directory during inference.'''
return str(self.inference_config.labels_dump_dir)
@property
def infer_rpn_pre_nms_top_N(self):
'''RPN pre NMS top N during inference.'''
return int(self.inference_config.rpn_pre_nms_top_N)
@property
def infer_rpn_post_nms_top_N(self):
'''RPN post NMS top N during inference.'''
return int(self.inference_config.rpn_nms_max_boxes)
@property
def infer_rpn_nms_iou_thres(self):
'''RPN NMS IoU threshold during inference.'''
return float(self.inference_config.rpn_nms_overlap_threshold)
@property
def vis_conf(self):
'''bbox visualize confidence threshold for inference.'''
return float(self.inference_config.bbox_visualize_threshold)
@property
def infer_confidence_thres(self):
'''bbox confidence threshold for inference for NMS export.'''
return float(self.inference_config.object_confidence_thres)
@property
def infer_rcnn_post_nms_top_N(self):
'''RCNN post NMS top N during inference.'''
return int(self.inference_config.classifier_nms_max_boxes)
@property
def infer_rcnn_nms_iou_thres(self):
'''RCNN NMS IoU threshold during inference.'''
return float(self.inference_config.classifier_nms_overlap_threshold)
@property
def infer_batch_size(self):
"""Batch size for inference."""
# defaults to 1 if 0(unset)
return int(self.inference_config.batch_size) or 1
@property
def infer_nms_score_bits(self):
"""NMS score bits for TensorRT inference."""
return int(self.inference_config.nms_score_bits)
@property
def eval_config(self):
'''Evaluation config.'''
return self.spec_proto.evaluation_config
@property
def eval_trt_config(self):
"""TensorRT based evaluation config."""
if self.eval_config.HasField("trt_evaluation"):
return self.eval_config.trt_evaluation
return None
@property
def eval_trt_engine(self):
'''The TensorRT engine file from tlt-converter for evaluation.'''
if (self.eval_trt_config is not None):
_engine_file = str(self.eval_trt_config.trt_engine)
assert os.path.isfile(_engine_file), \
'TensorRT Engine for evaluation not found: {}'.format(_engine_file)
return _engine_file
return None
@property
def eval_model(self):
'''Model path for evaluation.'''
_model = str(self.eval_config.model)
assert os.path.isfile(_model), (
"Evaluation model not found: {}".format(_model)
)
return _model
@property
def eval_rpn_pre_nms_top_N(self):
'''RPN pre nms top N during evaluation.'''
return int(self.eval_config.rpn_pre_nms_top_N)
@property
def eval_rpn_post_nms_top_N(self):
'''RPN post NMS top N during evaluation.'''
return int(self.eval_config.rpn_nms_max_boxes)
@property
def eval_rpn_nms_iou_thres(self):
'''RPN NMS IoU threshold for evaluation.'''
return float(self.eval_config.rpn_nms_overlap_threshold)
@property
def eval_rcnn_post_nms_top_N(self):
'''RCNN post NMS top N during evaluation.'''
return int(self.eval_config.classifier_nms_max_boxes)
@property
def eval_rcnn_nms_iou_thres(self):
'''RCNN NMS IoU threshold for evaluation.'''
return float(self.eval_config.classifier_nms_overlap_threshold)
@property
def eval_confidence_thres(self):
'''Confidence threshold for evaluation.'''
return float(self.eval_config.object_confidence_thres)
@property
def validation_period(self):
"""Validation period during training for online validation."""
val_period = int(self.eval_config.validation_period_during_training)
# defaults to 1 if not set
if val_period == 0:
val_period = 1
return val_period
@property
def eval_batch_size(self):
"""Batch size for evaluation and online validation."""
if int(self.eval_config.batch_size):
return int(self.eval_config.batch_size)
# if 0(unset), use 1 as default
return 1
@property
def use_voc07_metric(self):
'''Whether or not to use PASCAL VOC 07 metric for AP calculation.'''
return bool(self.eval_config.use_voc07_11point_metric)
@property
def eval_gt_matching_iou_thres(self):
"""Evaluation IoU threshold between detected box and groundtruth box."""
if self.eval_config.WhichOneof("iou_threshold_config") == "gt_matching_iou_threshold":
assert 0.0 < self.eval_config.gt_matching_iou_threshold < 1.0, (
"IoU threshold should be in the range (0, 1), got {}".format(
self.eval_config.gt_matching_iou_threshold
)
)
return self.eval_config.gt_matching_iou_threshold
return None
@property
def eval_gt_matching_iou_thres_range(self):
"""Evaluation IoU threshold range between detected box and groundtruth box."""
if self.eval_config.WhichOneof("iou_threshold_config") == "gt_matching_iou_threshold_range":
thres_range = self.eval_config.gt_matching_iou_threshold_range
assert 0.0 < thres_range.start < thres_range.end <= 1.0, (
"IoU threshold should be in the range (0, 1), got start: {}, end: {}".format(
thres_range.start,
thres_range.end
)
)
assert 0.0 < thres_range.step < 1.0, (
"IoU threshold range step size should be in (0, 1), got {}".format(
thres_range.step,
)
)
return self.eval_config.gt_matching_iou_threshold_range
return None
@property
def eval_gt_matching_iou_list(self):
"""The list of IoUs for matching detected boxes and groundtruth boxes."""
if self.eval_gt_matching_iou_thres_range is not None:
return np.arange(
self.eval_gt_matching_iou_thres_range.start,
self.eval_gt_matching_iou_thres_range.end,
self.eval_gt_matching_iou_thres_range.step
).tolist()
if self.eval_gt_matching_iou_thres is not None:
return [self.eval_gt_matching_iou_thres]
raise ValueError(
"Either specify a gt_matching_iou_threshold_range "
"or a gt_matching_iou_threshold in the evaluation_config. "
"Neither is found."
)
@property
def early_stopping(self):
"""Early stopping config."""
if self.training_config.HasField("early_stopping"):
es = self.training_config.early_stopping
if es.monitor not in ["loss"]:
raise ValueError(
"Only `loss` is supported monitor"
f", got {es.monitor}"
)
if es.min_delta < 0.:
raise ValueError(
f"`min_delta` should be non-negative, got {es.min_delta}"
)
if es.patience == 0:
raise ValueError(
f"`patience` should be positive, got {es.patience}"
)
return es
return None
def validate_spec(self):
'''Validate parameters in spec file.'''
self.validate_model_config()
self.validate_training_config()
self.validate_evaluation_config()
self.validate_inference_config()
def validate_model_config(self):
'''Check for model config.'''
# Check image type
assert self.image_type in [0, 1], '''
Input image type can only be RGB(0) or grayscale(1),
got {}'''.format(self.image_type)
# Check image channel order
assert self.image_channel_order in ['bgr', 'rgb', 'l'], '''
Image channel order can only be bgr, rgb or l,
got {}'''.format(self.image_channel_order)
# Check image height and width
assert (self.image_h == 0 or self.image_h >= 160), '''Image height should be at least 160,
got {}'''.format(self.image_h)
assert (self.image_w == 0 or self.image_w >= 160), '''Image width should be at least 160,
got {}'''.format(self.image_w)
assert (self.image_min == 0 or self.image_min >= 160), (
"Image min side should be at least 160, got {}".format(self.image_min)
)
# Check image mean values
assert len(self.image_mean_values) == self.image_c, '''
Length of image mean values: {} does not match
image channel number: {}'''.format(len(self.image_mean_values), self.image_c)
for idx, m in enumerate(self.image_mean_values):
assert 0.0 < m < 255.0, '''image_mean_values[{}]
should be between 0.0 and 255.0, got {}'''.format(idx, m)
# Check image scaling factor
assert self.image_scaling_factor > 0.0, '''
Image scaling factor should be positive,
got {}'''.format(self.image_scaling_factor)
# check max_objs_per_img
assert self.max_objs_per_img > 0, ('Maximum number of objects in an image should be ' +
'positive, got {}'.format(self.max_objs_per_img))
# Check backbone
_valid_backbones = ['resnet:10',
'resnet:18',
'resnet:34',
'resnet:50',
'resnet:101',
'vgg16',
'vgg:16',
'vgg:19',
'googlenet',
'mobilenet_v1',
'mobilenet_v2',
'darknet:19',
'darknet:53',
'resnet101',
'efficientnet:b0',
'efficientnet:b1',
'efficientnet:b2',
'efficientnet:b3',
'efficientnet:b4',
'efficientnet:b5',
'efficientnet:b6',
'efficientnet:b7']
assert self._backbone in _valid_backbones, '''
Backbone {} is not implemented, please
choose from {}.'''.format(self._backbone, _valid_backbones)
# Check Anchors
assert len(self.anchor_sizes) > 0, '''
Anchor sizes should not be empty.'''
assert len(self.anchor_ratios) > 0, '''
Anchor ratios should not be empty.'''
for _as in self.anchor_sizes:
assert _as > 0.0, '''Anchor size should be positive,
got {}'''.format(_as)
for _ar in self.anchor_ratios:
assert _ar > 0.0, '''Anchor ratios should be positive
, got {}'''.format(_ar)
# Check freeze_blocks
if self._backbone.startswith('resnet'):
assert set(self.freeze_blocks) <= set([0, 1, 2, 3]), '''
ResNet freeze_blocks should be a subset of {}
got {}'''.format([0, 1, 2, 3], self.freeze_blocks)
elif self._backbone.startswith('vgg'):
assert set(self.freeze_blocks) <= set([1, 2, 3, 4, 5]), '''
VGG freeze_blocks should be a subset of {}
got {}'''.format([1, 2, 3, 4, 5], self.freeze_blocks)
elif self._backbone.startswith('googlenet'):
assert set(self.freeze_blocks) <= set([0, 1, 2, 3, 4, 5, 6, 7]), '''
GoogLeNet freeze_blocks should be a subset of {}
got {}'''.format([0, 1, 2, 3, 4, 5, 6, 7], self.freeze_blocks)
elif self._backbone.startswith('mobilenet_v1'):
assert set(self.freeze_blocks) <= set(range(12)), '''
MobileNet V1 freeze_blocks should be a subset of {}
got {}'''.format(list(range(12)), self.freeze_blocks)
elif self._backbone.startswith('mobilenet_v2'):
assert set(self.freeze_blocks) <= set(range(14)), '''
MobileNet V2 freeze_blocks should be a subset of {}
got {}'''.format(list(range(14)), self.freeze_blocks)
elif self._backbone.startswith('darknet'):
assert set(self.freeze_blocks) <= set(range(6)), '''
DarkNet freeze_blocks should be a subset of {}
got {}'''.format(list(range(6)), self.freeze_blocks)
assert self.rcnn_train_bs > 0, '''
RCNN train batch size should be a positive integer
got {}'''.format(self.rcnn_train_bs)
def validate_training_config(self):
'''Check for training config.'''
self.validate_augmentation()
assert self.batch_size_per_gpu >= 1, '''
Batch size per GPU should be positive, got {}'''.format(self.batch_size_per_gpu)
assert self.epochs > 0, '''
Number of epochs should be positive, got {}'''.format(self.epochs)
assert 0.0 <= self.rcnn_min_overlap < self.rcnn_max_overlap <= 1.0, '''
RCNN min overlap should be non-negative and less than
RCNN max overlap, got {}(min),
and {}(max)'''.format(self.rcnn_min_overlap, self.rcnn_max_overlap)
assert 0.0 < self.rpn_min_overlap < self.rpn_max_overlap <= 1.0, '''
RPN min overlap should be positive and less than RPN max overlap
got {}(min) and {}(max)'''.format(self.rpn_min_overlap, self.rpn_max_overlap)
assert self.std_scaling > 0.0, '''std_scaling should be positive
got {}'''.format(self.std_scaling)
for idx, s in enumerate(self.rcnn_regr_std):
assert s > 0.0, '''RCNN regressor std[{}] should be positive
got {}'''.format(idx, s)
assert self.rpn_train_bs > 0, '''RPN train batch size should be positive
got {}'''.format(self.rpn_train_bs)
assert self.rpn_post_nms_top_N > 0, '''RPN post NMS top N should be positive
got {}'''.format(self.rpn_post_nms_top_N)
assert self.rpn_post_nms_top_N < self.rpn_pre_nms_top_N, '''
RPN post NMS topN should be less than RPN pre NMS top N
got {}'''.format(self.rpn_post_nms_top_N)
assert 1.0 > self.rpn_nms_iou_thres > 0.0, '''
RPN NMS IoU threshold should in (0, 1),
got {}'''.format(self.rpn_nms_iou_thres)
if self.reg_type in [RegularizerConfig.L1, RegularizerConfig.L2]:
assert 0.0 < self.weight_decay < 1.0, '''
Weight decay should be positive and less than 1.0, got {}'''.format(self.weight_decay)
assert self.lambda_rpn_regr > 0.0, '''
lambda_rpn_regr should be positive, got {}'''.format(self.lambda_rpn_regr)
assert self.lambda_rpn_class > 0.0, '''
lambda_rpn_class should be positive, got {}'''.format(self.lambda_rpn_class)
assert self.lambda_cls_regr > 0.0, '''
lambda_cls_regr should be positive, got {}'''.format(self.lambda_cls_regr)
assert self.lambda_cls_class > 0.0, '''
lambda_cls_class should be positive, got {}'''.format(self.lambda_cls_class)
def validate_inference_config(self):
'''Check for inference config.'''
assert 0 < self.infer_rpn_post_nms_top_N < self.infer_rpn_pre_nms_top_N, '''
Inference RPN post NMS should be positive and less than Inference RPN pre NMS top N
got {}(pre) and {}(post)'''.format(self.infer_rpn_pre_nms_top_N,
self.infer_rpn_post_nms_top_N)
assert 0.0 < self.infer_rpn_nms_iou_thres < 1.0, '''
Inference RPN NMS IoU threshold should be in (0, 1), got
{}'''.format(self.infer_rpn_nms_iou_thres)
assert 0.0 < self.vis_conf < 1.0, '''
Bbox visualization threshold for inference should be in (0, 1),
got {}'''.format(self.vis_conf)
assert 0.0 < self.infer_confidence_thres < 1.0, (
"object_confidence_thres for inference should be in (0, 1), "
"got {}").format(self.infer_confidence_thres)
assert 0.0 < self.infer_rcnn_nms_iou_thres < 1.0, '''
Inference RCNN NMS IoU threshold should be in (0, 1),
got {}'''.format(self.infer_rcnn_nms_iou_thres)
assert self.infer_rcnn_post_nms_top_N > 0, '''
Inference RCNN post NMS top N should be positive,
got {}'''.format(self.infer_rcnn_post_nms_top_N)
def validate_evaluation_config(self):
'''Check for evaluation config.'''
assert 0 < self.eval_rpn_post_nms_top_N < self.eval_rpn_pre_nms_top_N, '''
Evaluation RPN post NMS top N should be positive and less than
pre NMS top N, got {}(pre), and {}(post)'''.format(self.eval_rpn_pre_nms_top_N,
self.eval_rpn_post_nms_top_N)
assert 0.0 < self.eval_rpn_nms_iou_thres < 1.0, '''
Evaluation RPN NMS IoU threshold should be in (0, 1),
got {}'''.format(self.eval_rpn_nms_iou_thres)
assert 0.0 < self.eval_rcnn_nms_iou_thres < 1.0, '''
Evaluation RCNN NMS IoU threshold should be in (0, 1),
got {}'''.format(self.eval_rcnn_nms_iou_thres)
assert self.eval_rcnn_post_nms_top_N > 0, '''
Evaluation RCNN post NMS top N should be positive,
got {}'''.format(self.eval_rcnn_post_nms_top_N)
assert 0.0 < self.eval_confidence_thres < 1.0, '''
Evaluation object confidence threshold should be positive,
got {}'''.format(self.eval_confidence_thres)
def validate_augmentation(self):
'''Check for data augmentation config.'''
aug = self.data_augmentation
if (self.image_w > 0 and aug.preprocessing.output_image_width > 0):
assert aug.preprocessing.output_image_width == self.image_w, '''
Augmentation ouput image width not match model input width
{} vs {}'''.format(aug.preprocessing.output_image_width,
self.image_w)
if (self.image_h > 0 and aug.preprocessing.output_image_height > 0):
assert aug.preprocessing.output_image_height == self.image_h, '''
Augmentation output image height not match model input height
{} vs {}'''.format(aug.preprocessing.output_image_height,
self.image_h)
assert aug.preprocessing.output_image_channel == self.image_c, '''
Augmentation output image channel number not match model input
channel number, {} vs {}'''.format(aug.preprocessing.output_image_channel,
self.image_c)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/spec_loader/spec_wrapper.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Unit test for spec loader.'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import pytest
from nvidia_tao_tf1.cv.faster_rcnn.spec_loader import spec_loader
from nvidia_tao_tf1.cv.faster_rcnn.spec_loader.spec_wrapper import ExperimentSpec
@pytest.fixture(scope='function')
def _spec_file():
parent_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
frcnn_root_dir = os.path.dirname(parent_dir)
return os.path.join(frcnn_root_dir, 'experiment_spec/default_spec_ci.txt')
@pytest.fixture(scope='function')
def _out_spec_file():
os_handle, out_file_name = tempfile.mkstemp()
os.close(os_handle)
return out_file_name
def test_spec_loader(_spec_file, _out_spec_file):
spec = spec_loader.load_experiment_spec(_spec_file)
spec_loader.write_spec_to_disk(spec, _out_spec_file)
def test_spec_wrapper(_spec_file):
spec = spec_loader.load_experiment_spec(_spec_file)
spec_obj = ExperimentSpec(spec)
assert spec_obj, (
"Invalid spec file: {}".format(_spec_file)
)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/spec_loader/tests/test_spec_loader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data augmentation helper functions for FasterRCNN."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def random_hflip(image, prob, seed):
"""Random horizontal flip.
Args:
image(Tensor): The input image in (C, H, W).
prob(float): The probability for horizontal flip.
seed(int): The random seed.
Returns:
out_image(Tensor): The output image.
flipped(boolean Tensor): A boolean scalar tensor to indicate whether flip is
applied or not. This can be used to manipulate the labels accordingly.
"""
val = tf.random.uniform([], maxval=1.0, seed=seed)
is_flipped = tf.cast(
tf.cond(
tf.less_equal(val, prob),
true_fn=lambda: tf.constant(1.0, dtype=tf.float32),
false_fn=lambda: tf.constant(0.0, dtype=tf.float32)
),
tf.bool
)
# CHW to HWC
image_hwc = tf.transpose(image, (1, 2, 0))
# flip and to CHW
flipped_image = tf.transpose(tf.image.flip_left_right(image_hwc), (2, 0, 1))
out_image = tf.cond(
is_flipped,
true_fn=lambda: flipped_image,
false_fn=lambda: image
)
return out_image, is_flipped
def hflip_bboxes(boxes, image_width):
"""Flip the bboxes horizontally.
Args:
boxes(Tensor): (N, 4) shaped bboxes in [y1, x1, y2, x2] absolute coordinates.
image_width(Tensor): image width for calculating the flipped coordinates.
Returns:
out_boxes(Tensor): horizontally flipped boxes.
"""
# x1 becomes new x2, while x2 becomes new x1
# (N,)
x1_new = tf.cast(image_width, tf.float32) - 1.0 - boxes[:, 3]
x2_new = tf.cast(image_width, tf.float32) - 1.0 - boxes[:, 1]
# (N, 4)
flipped_boxes = tf.stack([boxes[:, 0], x1_new, boxes[:, 2], x2_new], axis=1)
# keep all-zero boxes untouched as they are padded boxes
out_boxes = tf.where(
tf.cast(tf.reduce_sum(tf.math.abs(boxes), axis=-1), tf.bool),
x=flipped_boxes,
y=boxes
)
return out_boxes
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/data_augmentation/augmentation.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data augmentation helper functions for FasterRCNN."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/data_augmentation/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FasterRCNN test utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/tests/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Utility functions used by unit tests for FasterRCNN.'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
def _take_first_k(arr, k, *args, **kargs):
'''Take the first k elements.'''
return np.arange(k)
def _fake_uniform(shape, maxval, dtype, seed):
'''A function that monkey patches tf.random.uniform to make it deterministic in test.'''
enough = (maxval >= shape[0])
ret_enough = tf.where(tf.ones(shape))[:, 0]
r = tf.cast(tf.ceil(shape[0] / maxval), tf.int32)
ret_segment = tf.where(tf.ones([maxval]))[:, 0]
ret_not_enough = tf.tile(ret_segment, [r])[:tf.cast(shape[0], tf.int32)]
return tf.cond(enough,
true_fn=lambda: ret_enough,
false_fn=lambda: ret_not_enough)
def _fake_choice(arr, num, replace, *args):
'''A function that monkey patches np.random.choice.'''
if arr.size >= num:
return arr[:num]
r = (num + arr.size - 1) // arr.size
a = np.tile(arr, r)
return a[:num]
def _fake_exp(t):
'''A fake exp function to replace tf.exp for monkey patch.'''
def _exp_np(x):
return np.exp(x)
return tf.py_func(_exp_np, [t], tf.float32)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/tests/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for FasterRCNN."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilitity functions for FasterRCNN."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import math
import os
import random
import cv2
from matplotlib import pyplot as plt
import numpy as np
from PIL import Image
import tensorflow as tf
from nvidia_tao_tf1.cv.faster_rcnn.layers.utils import iou_np
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="DEBUG")
logger = logging.getLogger(__name__)
def get_init_ops():
"""Return all ops required for initialization."""
"""copied from dlav.common.graph.initializer"""
return tf.group(tf.local_variables_initializer(),
tf.tables_initializer(),
*tf.get_collection('iterator_init'))
def resize_all(img, new_height, new_width):
"""Resize both the width and height."""
(height, width) = img.shape[:2]
if height == new_height and width == new_width:
return img, (1.0, 1.0)
img = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_CUBIC)
return img, (float(new_height)/height, float(new_width)/width)
def resize_min(img, min_side):
"""Resize both the width and height."""
(height, width) = img.shape[:2]
if height <= width:
target_size = (int(width * (min_side / height)), min_side)
ratio = (min_side / height, min_side / height)
else:
target_size = (min_side, int(height * (min_side / width)))
ratio = (min_side / width, min_side / width)
img = cv2.resize(img, target_size, interpolation=cv2.INTER_CUBIC)
return img, ratio
def preprocess_images(img,
image_h,
image_w,
image_c,
image_min,
image_scaling_factor,
image_mean_values,
image_channel_order,
expand_dims=True):
"""Resize and normalize image."""
if (image_h > 0 and image_w > 0):
img, ratio = resize_all(img, image_h, image_w)
elif image_min > 0:
img, ratio = resize_min(img, image_min)
else:
raise(
ValueError(
"Either image static shape(height and width) or dynamic shape"
"(minimal side) should be specified."
)
)
if image_c == 3:
assert len(image_mean_values) == 3, \
'Image mean values length should be 3 for color image, got {}'.format(image_mean_values)
if image_channel_order == 'rgb':
img = img[:, :, ::-1]
if image_channel_order == 'bgr':
image_mean_values = image_mean_values[::-1]
img -= np.array(image_mean_values)
img /= image_scaling_factor
elif image_c == 1:
assert len(image_mean_values) == 1, '''
Image mean values length should be 1 for
grascale image, got {}'''.format(image_mean_values)
img -= np.array(image_mean_values)
img /= image_scaling_factor
img = np.expand_dims(img, axis=2)
else:
raise ValueError('Unsupported image type with channel number: {}'.format(image_c))
# HWC to CHW
img = np.transpose(img, (2, 0, 1))
# add batch dim
if expand_dims:
img = np.expand_dims(img, axis=0)
return img, ratio
def preprocess_image_batch(imgs,
image_h,
image_w,
image_c,
image_min,
image_scaling_factor,
image_mean_values,
image_channel_order):
"""Resize and normalize a batch of images."""
img_batch = []
ratio_batch = []
original_shape = []
for img in imgs:
_img, _ratio = preprocess_images(img.astype(np.float32), image_h,
image_w, image_c, image_min,
image_scaling_factor,
image_mean_values,
image_channel_order,
expand_dims=False)
img_batch.append(_img)
ratio_batch.append(_ratio)
original_shape.append(img.shape[:2])
# (N, C, H, W), (N, 2)
return np.stack(img_batch, axis=0), ratio_batch, original_shape
def get_original_coordinates(ratio, x1, y1, x2, y2, orig_h=None, orig_w=None):
"""compute original bbox given resized bbox."""
if type(ratio) is tuple:
real_x1 = int(round(x1 / ratio[1]))
real_y1 = int(round(y1 / ratio[0]))
real_x2 = int(round(x2 / ratio[1]))
real_y2 = int(round(y2 / ratio[0]))
elif type(ratio) is float:
real_x1 = int(round(x1 / ratio))
real_y1 = int(round(y1 / ratio))
real_x2 = int(round(x2 / ratio))
real_y2 = int(round(y2 / ratio))
else:
raise TypeError('invalid data type for ratio.')
if orig_h is not None:
real_y1 = max(min(real_y1, orig_h-1), 0)
real_y2 = max(min(real_y2, orig_h-1), 0)
if orig_w is not None:
real_x1 = max(min(real_x1, orig_w-1), 0)
real_x2 = max(min(real_x2, orig_w-1), 0)
return (real_x1, real_y1, real_x2, real_y2)
def union(au, bu, area_intersection):
"""Union of two boxes."""
area_a = (au[2] - au[0] + 1.) * (au[3] - au[1] + 1.)
area_b = (bu[2] - bu[0] + 1.) * (bu[3] - bu[1] + 1.)
area_union = area_a + area_b - area_intersection
return area_union
def intersection(ai, bi):
"""Intersection of two boxes."""
x = max(ai[0], bi[0])
y = max(ai[1], bi[1])
w = min(ai[2], bi[2]) - x + 1.
h = min(ai[3], bi[3]) - y + 1.
if w < 0 or h < 0:
return 0
return w*h
def calc_iou(a, b, scale=1.0):
"""IoU of two boxes."""
sa = [ai*scale for ai in a]
sb = [bi*scale for bi in b]
if sa[0] >= sa[2] or sa[1] >= sa[3] or sb[0] >= sb[2] or sb[1] >= sb[3]:
return 0.0
area_i = intersection(sa, sb)
area_u = union(sa, sb, area_i)
return float(area_i) / float(area_u + 1e-6)
def calc_map(pred, gt_cls, gt_bbox, gt_diff, image_h, image_w, id_to_class,
iou_thres=0.5):
"""Compute mAP."""
T = {}
P = {}
valid_idx = np.where(gt_cls >= 0)[0]
gt_cls = gt_cls[valid_idx]
gt_bbox = gt_bbox[valid_idx, :]
gt_diff = gt_diff[valid_idx]
gt_bbox_cls_diff = np.concatenate(
(gt_bbox, np.expand_dims(gt_cls, axis=1), np.expand_dims(gt_diff, axis=1)),
axis=-1
)
gt = []
for _bbox in gt_bbox_cls_diff:
_gt = dict()
_gt['bbox_matched'] = False
_gt['x1'] = _bbox[1]
_gt['x2'] = _bbox[3]
_gt['y1'] = _bbox[0]
_gt['y2'] = _bbox[2]
_gt['class'] = id_to_class[_bbox[4]]
_gt['difficult'] = _bbox[5]
gt.append(_gt)
pred_probs = np.array([s['prob'] for s in pred])
box_idx_sorted_by_prob = np.argsort(pred_probs)[::-1]
for box_idx in box_idx_sorted_by_prob:
pred_box = pred[box_idx]
pred_class = pred_box['class']
pred_x1 = max(min(int(pred_box['x1']), image_w-1), 0)
pred_x2 = max(min(int(pred_box['x2']), image_w-1), 0)
pred_y1 = max(min(int(pred_box['y1']), image_h-1), 0)
pred_y2 = max(min(int(pred_box['y2']), image_h-1), 0)
pred_prob = pred_box['prob']
if pred_class not in P:
P[pred_class] = []
T[pred_class] = []
P[pred_class].append(pred_prob)
max_ovp = -1.0
best_gt_idx = -1
for gt_idx, gt_box in enumerate(gt):
gt_class = gt_box['class']
gt_x1 = int(gt_box['x1'])
gt_x2 = int(gt_box['x2'])
gt_y1 = int(gt_box['y1'])
gt_y2 = int(gt_box['y2'])
if gt_class != pred_class:
continue
iou = calc_iou((pred_x1, pred_y1, pred_x2, pred_y2),
(gt_x1, gt_y1, gt_x2, gt_y2))
if iou > max_ovp:
max_ovp = iou
best_gt_idx = gt_idx
if max_ovp >= iou_thres:
if gt[best_gt_idx]['difficult']:
# if best match is a difficult GT, ignore it.
logger.warning("Got label marked as difficult(occlusion > 0), "
"please set occlusion field in KITTI label to 0 "
"and re-generate TFRecord dataset, if you want to "
"include it in mAP calculation "
"during validation/evaluation.")
P[pred_class].pop()
elif not gt[best_gt_idx]['bbox_matched']:
# TP
T[pred_class].append(1)
gt[best_gt_idx]['bbox_matched'] = True
else:
# FP
T[pred_class].append(0)
else:
# FP
T[pred_class].append(0)
for gt_box in gt:
if not gt_box['bbox_matched'] and not gt_box['difficult']:
if gt_box['class'] not in P:
P[gt_box['class']] = []
T[gt_box['class']] = []
T[gt_box['class']].append(1)
P[gt_box['class']].append(0)
return T, P
def calc_rpn_recall(RPN_RECALL,
id_to_class,
rois_output,
gt_class_ids,
gt_bboxes,
overlap_thres=0.5):
'''Mark TP and FN for each RoI to calculate RPN recall.'''
# unpad zero gt boxes.
val_idx = np.where(gt_class_ids >= 0)[0]
gt_class_ids = gt_class_ids[val_idx]
gt_bboxes = gt_bboxes[val_idx, :]
ious = iou_np(rois_output, gt_bboxes)
# for each gt box, find its best matched RoI
ious_max = np.amax(ious, axis=0)
overlapped_ious = (ious_max >= overlap_thres)
for k in range(overlapped_ious.size):
class_name = id_to_class[gt_class_ids[k]]
if class_name not in RPN_RECALL:
RPN_RECALL[class_name] = []
RPN_RECALL[class_name].append(int(overlapped_ious[k]))
def voc_ap(rec, prec, use_07_metric=False, class_name=None, vis_path=None):
"""Compute VOC AP given precision and recall.
Args:
rec(array): recall vector.
prec(array): precision vector.
use_07_metric(bool): whether to use voc07 metric.
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
if class_name and vis_path:
rec_arr = np.array(rec)
prec_arr = np.array(prec)
plt.plot(rec_arr, prec_arr, label=class_name)
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
if class_name and vis_path:
plt.plot(mrec, mpre, label=class_name)
return ap
def calc_ap(
T, P, p_thres,
use_voc07_metric=False,
class_name=None,
vis_path=None
):
"""compute the AP for a single class."""
prec = []
rec = []
TP = 0.
FP = 0.
FN = 0.
# sort according to prob.
Ta = np.array(T)
Pa = np.array(P)
s_idx = np.argsort(-Pa)
P = Pa[s_idx].tolist()
T = Ta[s_idx].tolist()
npos = np.sum(Ta)
if p_thres <= 0.0:
raise ValueError('''Object confidence score threshold should be
positive, got {}.'''.format(p_thres))
for t, p in zip(T, P):
if t == 1 and p >= p_thres:
TP += 1
elif t == 1 and p < p_thres:
FN += 1
elif t == 0 and p >= p_thres:
FP += 1
if TP+FP == 0.:
precision = 0.
else:
precision = float(TP) / (TP+FP)
if npos > 0:
recall = float(TP) / float(npos)
else:
recall = 0.0
prec.append(precision)
rec.append(recall)
rec_arr = np.array(rec)
prec_arr = np.array(prec)
ap = voc_ap(
rec_arr, prec_arr,
use_voc07_metric,
class_name, vis_path
)
return ap, precision, recall, TP, FP, FN
def dump_kitti_labels(img_file,
all_dets,
ratio_2,
dump_dir,
prob_thres):
'''Dump KITTI labels when doing KPI tests.'''
label_file = '.'.join(os.path.basename(img_file).split('.')[0:-1]) + '.txt'
assert dump_dir, 'KITTI label dump directory cannot be empty.'
if not os.path.exists(dump_dir):
os.makedirs(dump_dir)
label_path = os.path.join(dump_dir, label_file)
with open(label_path, 'w') as f:
for det in all_dets:
if det['prob'] >= prob_thres:
f.write('{} 0 0 0 {:.2f} {:.2f} {:.2f} {:.2f}'
' 0 0 0 0 0 0 0 {:.7f}\n'.format(
det['class'],
det['x1']*ratio_2[0],
det['y1']*ratio_2[1],
det['x2']*ratio_2[0],
det['y2']*ratio_2[1],
det['prob'])
)
def set_random_seed(seed):
"""set radom seed."""
random.seed(seed)
np.random.seed(seed)
tf.set_random_seed(seed)
def debug_roi(rois, scores, img_name, output_dir):
'''debug RoI.'''
rois = np.reshape(rois, (-1, 4))
scores = np.squeeze(scores, axis=0)
y1 = rois[:, 0]
x1 = rois[:, 1]
y2 = rois[:, 2]
x2 = rois[:, 3]
for i in range(y1.shape[0]):
img = cv2.imread(img_name, cv2.IMREAD_COLOR)
cv2.rectangle(img, (x1[i], y1[i]), (x2[i], y2[i]), [255, 255, 255], 2)
basename = os.path.basename(img_name)
output_file = os.path.join(output_dir, 'rois_{}_'.format(i)+basename)
print('================', i, scores[i])
cv2.imwrite(output_file, img)
def apply_regr(x, y, w, h, tx, ty, tw, th):
"""apply deltas to anchor boxes."""
cx = x + w/2.
cy = y + h/2.
cx1 = tx * w + cx
cy1 = ty * h + cy
# clip to exp**50(~10**21) to avoid overflow in FP32.
w1 = math.exp(np.minimum(tw, 50.)) * w
h1 = math.exp(np.minimum(th, 50.)) * h
x1 = cx1 - w1/2.
y1 = cy1 - h1/2.
return x1, y1, w1, h1
def read_images(imgs, channel_means, scaling, flip_channel=True):
'''Read and preprocess images.'''
np_imgs = []
for im in imgs:
with Image.open(im) as f:
np_imgs.append(np.array(f))
# NHWC to NCHW
np_imgs = np.transpose(np.array(np_imgs, dtype=np.float32), (0, 3, 1, 2))
# RGB to BGR
if flip_channel:
perm = np.array([2, 1, 0])
np_imgs = np_imgs[:, perm, :, :]
channel_means = channel_means[::-1]
np_imgs -= np.array(channel_means).reshape([1, 3, 1, 1]).astype(np.float32)
np_imgs /= scaling
return np_imgs
def gen_det_boxes(
id_to_class, nmsed_classes,
nmsed_boxes, nmsed_scores,
image_idx, num_dets,
):
"""Generate detected boxes."""
def _gen_det_box(roi_idx):
"""Generate_detected_box."""
# in case it is an invalid class ID
if nmsed_classes[image_idx, roi_idx] not in id_to_class:
return None
cls_name = id_to_class[nmsed_classes[image_idx, roi_idx]]
y1, x1, y2, x2 = nmsed_boxes[image_idx, roi_idx, :]
det = {'x1': x1,
'x2': x2,
'y1': y1,
'y2': y2,
'class': cls_name,
'prob': nmsed_scores[image_idx, roi_idx]}
return det
return list(filter(lambda x: x is not None, map(_gen_det_box, range(nmsed_classes.shape[1]))))
def get_detection_results(all_dets, gt_class_ids, gt_bboxes, gt_diff, image_h,
image_w, image_idx, id_to_class, T, P, iou_list):
"""Helper function to get the detection results."""
for idx, iou in enumerate(iou_list):
t, p = calc_map(
all_dets,
gt_class_ids[image_idx, ...],
gt_bboxes[image_idx, ...],
gt_diff[image_idx, ...],
image_h,
image_w,
id_to_class,
iou_thres=iou
)
for key in t.keys():
if key not in T[idx]:
T[idx][key] = []
P[idx][key] = []
T[idx][key].extend(t[key])
P[idx][key].extend(p[key])
def compute_map_list(
T, P, score_thres,
use_voc07_metric,
RPN_RECALL,
iou_list,
vis_path=None
):
"""Helper function to compute mAPs."""
mAPs = []
for idx, iou in enumerate(iou_list):
all_aps = []
print('='*90)
if RPN_RECALL:
print('{:<20}{:<20}{:<20}{:<20}{:<20}'.format('Class',
'AP',
'precision',
'recall',
'RPN_recall'))
else:
print('{:<20}{:<20}{:<20}{:<20}'.format('Class',
'AP',
'precision',
'recall'))
print('-'*90)
for key in sorted(list(T[idx].keys())):
ap, prec, rec, _TP, _FP, _FN = calc_ap(
T[idx][key], P[idx][key],
score_thres, use_voc07_metric,
class_name=key,
vis_path=vis_path
)
if RPN_RECALL and (key in RPN_RECALL) and len(RPN_RECALL[key]):
rpn_recall = sum(RPN_RECALL[key]) / len(RPN_RECALL[key])
print('{:<20}{:<20.4f}{:<20.4f}{:<20.4f}{:<20.4f}'.format(
key, ap, prec, rec, rpn_recall)
)
else:
print('{:<20}{:<20.4f}{:<20.4f}{:<20.4f}'.format(
key, ap, prec, rec)
)
print('-'*90)
all_aps.append(ap)
if vis_path is not None:
plt.legend()
plt.title("Precision-Recall curve")
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.grid()
save_path = os.path.join(vis_path, "PR_curve.png")
plt.savefig(save_path)
print(f"PR-curve image saved to {save_path}")
plt.clf()
mAP = np.mean(np.array(all_aps))
mAPs.append(mAP)
print('mAP@{} = {:<20.4f}'.format(iou, mAP))
if len(iou_list) > 1:
print('mAP@[{}:{}] = {:<20.4f}'.format(iou_list[0], iou_list[-1], np.mean(np.array(mAPs))))
return mAPs
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/utils/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GoogleNet as backbone of Faster-RCNN."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Activation, AveragePooling2D, BatchNormalization, \
Conv2D, Flatten, MaxPooling2D, \
TimeDistributed
from nvidia_tao_tf1.core.templates.utils import arg_scope, InceptionV1Block
from nvidia_tao_tf1.cv.faster_rcnn.models.model_builder import FrcnnModel
class GoogleNet(FrcnnModel):
'''GoogleNet as backbone of FasterRCNN model.
This is GoogleNet class that use FrcnnModel class as base class and do some customization
specific to GoogleNet backbone. Methods here will override those functions in FrcnnModel class.
'''
def backbone(self, input_images):
'''GoogleNet backbone implementation.'''
bn_axis = 1
data_format = 'channels_first'
x = Conv2D(64,
(7, 7),
strides=(2, 2),
padding='same',
data_format=data_format,
kernel_regularizer=self.kernel_reg,
bias_regularizer=None,
name='conv1',
trainable=not(0 in self.freeze_blocks),
use_bias=not self.conv_bn_share_bias)(input_images)
if self.freeze_bn:
x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x, training=False)
else:
x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
x = Activation('relu')(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same',
data_format=data_format, name='pool1')(x)
x = Conv2D(64,
(1, 1),
strides=(1, 1),
padding='same',
data_format=data_format,
kernel_regularizer=self.kernel_reg,
bias_regularizer=None,
name='conv2_reduce',
use_bias=not self.conv_bn_share_bias,
trainable=not(0 in self.freeze_blocks))(x)
if self.freeze_bn:
x = BatchNormalization(axis=bn_axis, name='bn_conv2_reduce')(x, training=False)
else:
x = BatchNormalization(axis=bn_axis, name='bn_conv2_reduce')(x)
x = Activation('relu')(x)
x = Conv2D(192,
(3, 3),
strides=(1, 1),
padding='same',
data_format=data_format,
kernel_regularizer=self.kernel_reg,
bias_regularizer=None,
name='conv2',
use_bias=not self.conv_bn_share_bias,
trainable=not(0 in self.freeze_blocks))(x)
if self.freeze_bn:
x = BatchNormalization(axis=bn_axis, name='bn_conv2')(x, training=False)
else:
x = BatchNormalization(axis=bn_axis, name='bn_conv2')(x)
x = Activation('relu')(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same',
data_format=data_format, name='pool2')(x)
# Define a block functor which can create blocks.
with arg_scope([InceptionV1Block],
use_batch_norm=True,
data_format=data_format,
kernel_regularizer=self.kernel_reg,
bias_regularizer=None,
freeze_bn=self.freeze_bn,
activation_type='relu',
use_bias=not self.conv_bn_share_bias):
# Inception_3a
x = InceptionV1Block(subblocks=(64, 96, 128, 16, 32, 32),
index='3a', trainable=not(1 in self.freeze_blocks))(x)
# Inception_3b
x = InceptionV1Block(subblocks=(128, 128, 192, 32, 96, 64),
index='3b', trainable=not(2 in self.freeze_blocks))(x)
# Max Pooling
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same',
data_format=data_format, name='pool3')(x)
# Inception_4a
x = InceptionV1Block(subblocks=(192, 96, 208, 16, 48, 64),
index='4a', trainable=not(3 in self.freeze_blocks))(x)
# Inception_4b
x = InceptionV1Block(subblocks=(160, 112, 224, 24, 64, 64),
index='4b', trainable=not(4 in self.freeze_blocks))(x)
# Inception_4c
x = InceptionV1Block(subblocks=(128, 128, 256, 24, 64, 64),
index='4c', trainable=not(5 in self.freeze_blocks))(x)
# Inception_4d
x = InceptionV1Block(subblocks=(112, 144, 288, 32, 64, 64),
index='4d', trainable=not(6 in self.freeze_blocks))(x)
# Inception_4e
x = InceptionV1Block(subblocks=(256, 160, 320, 32, 128, 128),
index='4e', trainable=not(7 in self.freeze_blocks))(x)
return x
def rcnn_body(self, x):
'''GoogleNet RCNN body.'''
if self.roi_pool_2x:
x = TimeDistributed(MaxPooling2D(pool_size=(2, 2),
strides=(2, 2), padding='same',
data_format='channels_first',
name='pool4'))(x)
with arg_scope([InceptionV1Block],
use_batch_norm=True,
data_format='channels_first',
kernel_regularizer=self.kernel_reg,
bias_regularizer=None,
freeze_bn=self.freeze_bn,
activation_type='relu',
use_bias=not self.conv_bn_share_bias,
use_td=True):
# Inception_5a
x = InceptionV1Block(subblocks=(256, 160, 320, 32, 128, 128), index='5a')(x)
# Inception_5b
x = InceptionV1Block(subblocks=(384, 192, 384, 48, 128, 128), index='5b')(x)
x = TimeDistributed(AveragePooling2D(pool_size=(self.roi_pool_size, self.roi_pool_size),
strides=(1, 1), padding='valid',
data_format='channels_first', name='avg_pool'))(x)
# During export, in order to map this node to UFF Flatten Op, we have to
# make sure this layer name has 'flatten' in it. Otherwise, it cannot be
# converted to UFF Flatten Op during pb to UFF conversion.
x = TimeDistributed(Flatten(name='classifier_flatten'), name='time_distributed_flatten')(x)
return x
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/models/googlenet.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Base class to implement the FasterRCNN model builder.'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import OrderedDict
import logging
import os
import shutil
import sys
import tempfile
import keras
from keras.callbacks import EarlyStopping, TerminateOnNaN
from keras.layers import Conv2D, Dense, Input, TimeDistributed
from keras.regularizers import l1, l2
from keras.utils.generic_utils import CustomObjectScope
import numpy as np
import tensorflow as tf
from nvidia_tao_tf1.cv.common import utils as iva_utils
from nvidia_tao_tf1.cv.common.callbacks.loggers import TAOStatusLogger
from nvidia_tao_tf1.cv.common.model_parallelism.parallelize_model import find_segment_idx
from nvidia_tao_tf1.cv.common.utils import (
CUSTOM_OBJS,
MultiGPULearningRateScheduler,
StepLRScheduler,
TensorBoard
)
from nvidia_tao_tf1.cv.detectnet_v2.proto.regularizer_config_pb2 import RegularizerConfig
from nvidia_tao_tf1.cv.faster_rcnn.callbacks.callbacks import ModelSaver, ValidationCallback
from nvidia_tao_tf1.cv.faster_rcnn.layers.custom_layers import (
CropAndResize, OutputParser, Proposal, ProposalTarget
)
from nvidia_tao_tf1.cv.faster_rcnn.losses import losses
from nvidia_tao_tf1.cv.faster_rcnn.patched_keras import saving
from nvidia_tao_tf1.cv.faster_rcnn.qat._quantized import check_for_quantized_layers
from nvidia_tao_tf1.cv.faster_rcnn.qat.quantize_keras_model import create_quantized_keras_model
# Patch keras.engine.saving so that we can load weights for TimeDistributed layer from
# classification backbones.
saving.patch()
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level='INFO')
logger = logging.getLogger(__name__)
class FrcnnModel(object):
"""Model builder for FasterRCNN model.
This is the base class implementing the FasterRCNN model builder. The FasterRCNN model
includes several major building blocks: backbone, RPN, proposal, proposal_target,
crop_and_resize, and rcnn. It builds the FasterRCNN model architecture with these building
blocks and encapsulate it as a keras model. Besides, it also handles the checkpoint saving and
loading, regularizers updating, overriding custom layer parameters, etc. It is a high level
abstraction of the FasterRCNN model that covers the whole life time of it: from training to
inference and test.
"""
def __init__(self, nlayers, batch_size_per_gpu,
rpn_stride, regularizer_type,
weight_decay, freeze_bn, freeze_blocks,
dropout_rate, drop_connect_rate,
conv_bn_share_bias, all_projections,
use_pooling, anchor_sizes, anchor_ratios,
roi_pool_size, roi_pool_2x, num_classes,
std_scaling, rpn_pre_nms_top_N, rpn_post_nms_top_N,
rpn_nms_iou_thres, gt_as_roi, rcnn_min_overlap,
rcnn_max_overlap, rcnn_train_bs, rcnn_bbox_std,
rpn_train_bs, lambda_rpn_class, lambda_rpn_regr,
lambda_rcnn_class, lambda_rcnn_regr,
backbone, results_dir, enc_key,
lr_config, enable_qat=False,
activation_type=None,
early_stopping=None):
'''Initialize the FasterRCNN model architecture.
Args:
nlayers(int/str): the number of layers in the backbone.
batch_size_per_gpu(int): the image batch size per GPU.
rpn_stride(int): the RPN stride relative to input images(16).
regularizer_type(str): regularizer type in string.
weight_decay(float): weight decay factor.
freeze_bn(bool): Whether or not to freeze the BatchNorm layer during training.
Usually, especially for small batch size, BN layer should be frozen during
the training.
freeze_blocks(list): the list of backbone block IDs to freeze during training.
dropout_rate(float): The dropout rate for Dropout layer.
drop_connect_rate(float): The drop connect rate in EfficientNet.
conv_bn_share_bias(bool): whether or not to share bias between conv2d and BN layer.
If BN layer is frozen during training, then setting this option to False will
allow the conv2d layers to have bias and can be learnt from training data. In this
case, set it to False will benifit accuracy.
all_projections(bool): whether or not to use all_projections for shortcut connections.
This is useful for ResNets and MobileNet V2.
use_pooling(bool): use pooling or strided conv2d in the backbone.
anchor_sizes(list): the list of anchor box sizes, at the input image scale.
anchor_ratios(list): the list of anchor box ratios.
roi_pool_size(int): the output feature map spatial size for CropAndResize layer.
roi_pool_2x(bool): whether or not to double the roi_pool_size and apply a pooling or
a stride-2 conv2d after CropAndResize.
num_classes(int): the number of classes in the dataset(including background).
std_scaling(float): a scaling factor appied to the RPN deltas output.
rpn_pre_nms_top_N(int): the number of bboxes to retain before doing NMS for RPN.
rpn_post_nms_top_N(int): the number of bboxes to retain after doing NMS for RPN.
rpn_nms_iou_thres(float): the IoU threshold used in the NMS for RPN.
gt_as_roi(bool): whether or not to use the groundtruth boxes as RoIs for training RCNN.
rcnn_min_overlap(float): the lower IoU threshold below which we regard RoI as negative
when generating the target tensors for RCNN.
rcnn_max_overlap(float): thw higher IoU threshold above which we regard RoI as positive
when generating the target tensors for RCNN.
rcnn_train_bs(int): RoI batch size per image for training RCNN.
lambda_rpn_class(float): scaling factor for RPN classification loss.
lambda_rpn_regr(float): scaling factor for RPN regression loss.
lambda_rcnn_class(float): scaling factor for RCNN classification loss.
lambda_rcnn_regr(float): scaling factor for RCNN regression loss.
backbone(str): backbone chosen.
results_dir(str): folder to save training checkpoints.
enc_key(str): the encoding key.
lr_config(proto): the learning rate scheduler config proto.
enable_qat(bool): enable the QAT(quantization-aware training) or not.
activation_type(str): type of activation function. For overriding EfficientNet
swish to relu.
early_stopping(proto): Config for early stopping.
'''
self.nlayers = nlayers
self.batch_size_per_gpu = batch_size_per_gpu
self.rpn_stride = rpn_stride
if regularizer_type == RegularizerConfig.L1:
self.regularizer_type = l1
elif regularizer_type == RegularizerConfig.L2:
self.regularizer_type = l2
else:
self.regularizer_type = None
self.weight_decay = weight_decay
if self.regularizer_type is not None:
self.kernel_reg = self.regularizer_type(self.weight_decay)
else:
self.kernel_reg = None
self.freeze_bn = freeze_bn
if freeze_blocks is None:
freeze_blocks = []
self.freeze_blocks = freeze_blocks
self.dropout_rate = dropout_rate
self.drop_connect_rate = drop_connect_rate
self.conv_bn_share_bias = conv_bn_share_bias
self.all_projections = all_projections
self.use_pooling = use_pooling
self.anchor_sizes = anchor_sizes
self.anchor_ratios = anchor_ratios
self.num_anchors = len(self.anchor_sizes) * len(anchor_ratios)
self.roi_pool_size = roi_pool_size
self.roi_pool_2x = roi_pool_2x
self.num_classes = num_classes
self.std_scaling = std_scaling
self.rpn_pre_nms_top_N = rpn_pre_nms_top_N
self.rpn_post_nms_top_N = rpn_post_nms_top_N
self.rpn_nms_iou_thres = rpn_nms_iou_thres
self.gt_as_roi = gt_as_roi
self.rcnn_min_overlap = rcnn_min_overlap
self.rcnn_max_overlap = rcnn_max_overlap
self.rcnn_train_bs = rcnn_train_bs
self.rcnn_bbox_std = rcnn_bbox_std
self.rpn_train_bs = rpn_train_bs
self.lambda_rpn_class = lambda_rpn_class
self.lambda_rpn_regr = lambda_rpn_regr
self.lambda_rcnn_class = lambda_rcnn_class
self.lambda_rcnn_regr = lambda_rcnn_regr
self.output_model = os.path.join(results_dir, f"{backbone}.hdf5")
self.enc_key = enc_key
self.lr_config = lr_config
self.callbacks = []
self.losses = None
self.optimizer = None
self.target_tensors = None
self.qat = enable_qat
self.activation_type = activation_type
self.early_stopping = early_stopping
@property
def prop_config(self):
'''proposal layer config.
This config is used to override the proposal layers config.
'''
config = {'anchor_sizes': self.anchor_sizes,
'anchor_ratios': self.anchor_ratios,
'std_scaling': self.std_scaling,
'rpn_stride': self.rpn_stride,
'pre_nms_top_N': self.rpn_pre_nms_top_N,
'post_nms_top_N': self.rpn_post_nms_top_N,
'nms_iou_thres': self.rpn_nms_iou_thres,
'activation_type': 'sigmoid',
'bs_per_gpu': self.batch_size_per_gpu}
return config
@property
def propt_config(self):
'''proposal_target layers config.
This config is used to override the proposal_target layers config.
'''
config = {'gt_as_roi': self.gt_as_roi,
'iou_high_thres': self.rcnn_max_overlap,
'iou_low_thres': self.rcnn_min_overlap,
'roi_train_bs': self.rcnn_train_bs,
'roi_positive_ratio': 0.25,
'deltas_scaling': self.rcnn_bbox_std,
'bg_class_id': self.num_classes-1,
'bs_per_gpu': self.batch_size_per_gpu}
return config
@property
def cr_config(self):
'''CropAndResize layers config.
This config is used to override the crop_and_resize layer config.
'''
if self.roi_pool_2x:
_pool_size = self.roi_pool_size * 2
else:
_pool_size = self.roi_pool_size
config = {'pool_size': _pool_size}
return config
def backbone(self, input_images):
'''backbone, implemented in derived classes.'''
raise NotImplementedError('backbone is not implemented in FrcnnModel base class.')
def rpn(self, backbone_featuremap):
'''RPN.'''
x = Conv2D(512, (3, 3), padding='same',
activation='relu', name='rpn_conv1',
kernel_regularizer=self.kernel_reg,
bias_regularizer=None)(backbone_featuremap)
x_class = Conv2D(self.num_anchors, (1, 1),
activation='sigmoid',
name='rpn_out_class',
kernel_regularizer=self.kernel_reg,
bias_regularizer=None)(x)
x_regr = Conv2D(self.num_anchors * 4, (1, 1),
activation='linear',
name='rpn_out_regress',
kernel_regularizer=self.kernel_reg,
bias_regularizer=None)(x)
return [x_class, x_regr]
def proposals(self, rpn_score_head, rpn_deltas_head, input_image):
'''proposal layer.'''
rois = Proposal(self.anchor_sizes,
self.anchor_ratios,
self.std_scaling,
self.rpn_stride,
self.rpn_pre_nms_top_N,
self.rpn_post_nms_top_N,
self.rpn_nms_iou_thres,
'sigmoid',
self.batch_size_per_gpu)([rpn_score_head, rpn_deltas_head, input_image])
return rois
def proposals_val(self, spec, rpn_score_head, rpn_deltas_head, input_image):
'''proposal layer for validation model.'''
rois = Proposal(self.anchor_sizes,
self.anchor_ratios,
self.std_scaling,
self.rpn_stride,
spec.eval_rpn_pre_nms_top_N,
spec.eval_rpn_post_nms_top_N,
spec.eval_rpn_nms_iou_thres,
'sigmoid',
spec.eval_batch_size)([rpn_score_head, rpn_deltas_head, input_image])
return rois
def proposal_targets(self, rois, input_gt_class, input_gt_bboxes):
'''proposal target layer.'''
proposal_targets_out = ProposalTarget(self.gt_as_roi,
self.rcnn_max_overlap,
self.rcnn_min_overlap,
self.num_classes-1,
self.rcnn_train_bs,
0.25,
self.rcnn_bbox_std,
self.batch_size_per_gpu)([rois,
input_gt_class,
input_gt_bboxes])
return proposal_targets_out
def crop_and_resize(self, backbone_featuremap, total_rois, input_image):
'''CropAndResize layer.'''
if self.roi_pool_2x:
_pool_size = self.roi_pool_size * 2
else:
_pool_size = self.roi_pool_size
crop_and_resize_layer = CropAndResize(_pool_size)
roi_crop = crop_and_resize_layer([backbone_featuremap, total_rois, input_image])
return roi_crop
def rcnn(self, roi_crop):
'''RCNN layer.'''
out = self.rcnn_body(roi_crop)
out_class = TimeDistributed(Dense(self.num_classes, activation='softmax',
name='dense_class',
kernel_regularizer=self.kernel_reg,
bias_regularizer=None),
name='dense_class_td')(out)
# note: no regression target for bg class
out_regr = TimeDistributed(Dense(4 * (self.num_classes-1),
activation='linear',
name='dense_regress',
kernel_regularizer=self.kernel_reg,
bias_regularizer=None),
name='dense_regress_td')(out)
return out_class, out_regr
def rcnn_body(self, x):
'''RCNN body.'''
raise NotImplementedError('rcnn_body is not implemented in FrcnnModel base class.')
def build_keras_model(self, input_images, input_gt_class, input_gt_bbox):
'''build keras model with these building blocks.'''
backbone_featuremap = self.backbone(input_images)
rpn_out_class, rpn_out_regress = self.rpn(backbone_featuremap)
rois = self.proposals(rpn_out_class, rpn_out_regress, input_images)
total_rois = self.proposal_targets(rois, input_gt_class, input_gt_bbox)[0]
roi_crop = self.crop_and_resize(backbone_featuremap, total_rois, input_images)
rcnn_out_class, rcnn_out_regress = self.rcnn(roi_crop)
self.inputs = [input_images, input_gt_class, input_gt_bbox]
self.outputs = [rpn_out_class, rpn_out_regress,
rcnn_out_class, rcnn_out_regress]
self.keras_model = keras.models.Model(inputs=self.inputs,
outputs=self.outputs)
# Fake quantize the keras model if QAT is enabled
if self.qat:
self.keras_model = create_quantized_keras_model(
self.keras_model,
freeze_bn=self.freeze_bn,
training=True
)
self.inputs = self.keras_model.inputs
self.outputs = self.keras_model.outputs
def build_keras_validation_model(self, spec, input_images):
'''build unpruned validation keras model with these building blocks.'''
backbone_featuremap = self.backbone(input_images)
rpn_out_class, rpn_out_regress = self.rpn(backbone_featuremap)
rois = self.proposals_val(spec, rpn_out_class, rpn_out_regress, input_images)
roi_crop = self.crop_and_resize(backbone_featuremap, rois, input_images)
rcnn_out_class, rcnn_out_regress = self.rcnn(roi_crop)
inputs = [input_images]
outputs = [rois, rcnn_out_class, rcnn_out_regress]
val_model = keras.models.Model(inputs=inputs,
outputs=outputs)
# Fake quantize the keras model if QAT is enabled
if self.qat:
val_model = create_quantized_keras_model(val_model,
freeze_bn=self.freeze_bn)
return val_model
def build_validation_model_unpruned(
self,
spec,
max_box_num=100,
regr_std_scaling=(10.0, 10.0, 5.0, 5.0),
iou_thres=0.5,
score_thres=0.0001
):
"""Build the validation model for online validation during training."""
# tune to inference phase to build the validation model
prev_lp = keras.backend.learning_phase()
keras.backend.set_learning_phase(0)
input_image = Input(shape=spec.input_dims, name='input_image')
val_model = self.build_keras_validation_model(spec, input_image)
# attach OutputParser layer
parser_outputs = OutputParser(max_box_num, list(regr_std_scaling), iou_thres, score_thres)(
val_model.outputs + val_model.inputs
)
val_model = keras.models.Model(
inputs=val_model.inputs,
outputs=parser_outputs,
name=val_model.name
)
keras.backend.set_learning_phase(prev_lp)
return val_model
def build_validation_model(
self,
model,
config_override,
max_box_num=100,
regr_std_scaling=(10.0, 10.0, 5.0, 5.0),
iou_thres=0.5,
score_thres=0.0001,
eval_rois=300
):
"""Build the validation model for online validation during training."""
# clone the training model so it does not use the input tensors
model_config = model.get_config()
with CustomObjectScope(CUSTOM_OBJS):
model = keras.models.Model.from_config(model_config)
# tune to inference phase to build the validation model
prev_lp = keras.backend.learning_phase()
keras.backend.set_learning_phase(0)
# build a validation model out of the cloned training model
_explored_layers = dict()
for l in model.layers:
_explored_layers[l.name] = [False, None]
input_layer = [l for l in model.layers if (type(l) == keras.layers.InputLayer)]
layers_to_explore = input_layer
model_outputs = {}
# Loop until we reach the last layer.
while layers_to_explore:
layer = layers_to_explore.pop(0)
# Skip layers that may be revisited in the graph to prevent duplicates.
if not _explored_layers[layer.name][0]:
# Check if all inbound layers explored for given layer.
if not all([
_explored_layers[l.name][0]
for n in layer._inbound_nodes
for l in n.inbound_layers
]):
continue
outputs = None
# Visit input layer.
if type(layer) == keras.layers.InputLayer and layer.name == 'input_image':
# Re-use the existing InputLayer.
outputs = layer.output
new_layer = layer
elif type(layer) == keras.layers.InputLayer:
# skip the input_class_ids and input_gt_boxes
# mark them as visited but do nothing essential
_explored_layers[layer.name][0] = True
_explored_layers[layer.name][1] = None
layers_to_explore.extend([
node.outbound_layer for node in layer._outbound_nodes
])
continue
# special handling for ProposalTarget layer.
elif type(layer) == ProposalTarget:
# get ROIs data.
for node in layer._inbound_nodes:
prev_outputs = []
# only use the first Input: input_rois
for idx, l in enumerate(node.inbound_layers[:1]):
keras_layer = _explored_layers[l.name][1]
prev_outputs.append(keras_layer.get_output_at(node.node_indices[idx]))
assert prev_outputs, "Expected non-input layer to have inputs."
# remember it
if len(prev_outputs) == 1:
prev_outputs = prev_outputs[0]
proposal_outputs = prev_outputs
_explored_layers[layer.name][0] = True
_explored_layers[layer.name][1] = None
layers_to_explore.extend([
node.outbound_layer for node in layer._outbound_nodes
])
continue
# special handling of CropAndResize to skip the ProposalTarget layer.
elif type(layer) == CropAndResize:
# Create new layer.
layer_config = layer.get_config()
new_layer = type(layer).from_config(layer_config)
outputs = []
for node in layer._inbound_nodes:
prev_outputs = []
for idx, l in enumerate(node.inbound_layers):
# skip ProposalTarget(idx==1) because it doesn't exist
# in validation model. Use None as a placeholder for it
# will update the None later
if idx == 1:
prev_outputs.append(None)
continue
keras_layer = _explored_layers[l.name][1]
prev_outputs.append(keras_layer.get_output_at(node.node_indices[idx]))
assert prev_outputs, "Expected non-input layer to have inputs."
# replace None with the proposal_outputs
prev_outputs[1] = proposal_outputs
if len(prev_outputs) == 1:
prev_outputs = prev_outputs[0]
outputs.append(new_layer(prev_outputs))
if len(outputs) == 1:
outputs = outputs[0]
elif ("pre_pool_reshape" in layer.name and type(layer) == keras.layers.Reshape):
H, W = layer._inbound_nodes[0].inbound_layers[0].output_shape[3:]
new_layer = keras.layers.Reshape((-1, H, W), name=layer.name)
outputs = []
for node in layer._inbound_nodes:
prev_outputs = []
for idx, l in enumerate(node.inbound_layers):
keras_layer = _explored_layers[l.name][1]
prev_outputs.append(keras_layer.get_output_at(node.node_indices[idx]))
assert prev_outputs, "Expected non-input layer to have inputs."
if len(prev_outputs) == 1:
prev_outputs = prev_outputs[0]
outputs.append(new_layer(prev_outputs))
if len(outputs) == 1:
outputs = outputs[0]
elif ("post_pool_reshape" in layer.name and type(layer) == keras.layers.Reshape):
new_layer = keras.layers.Reshape(
(eval_rois, -1, 1, 1),
name=layer.name
)
outputs = []
for node in layer._inbound_nodes:
prev_outputs = []
for idx, l in enumerate(node.inbound_layers):
keras_layer = _explored_layers[l.name][1]
prev_outputs.append(keras_layer.get_output_at(node.node_indices[idx]))
assert prev_outputs, "Expected non-input layer to have inputs."
if len(prev_outputs) == 1:
prev_outputs = prev_outputs[0]
outputs.append(new_layer(prev_outputs))
if len(outputs) == 1:
outputs = outputs[0]
else:
# Create new layer.
layer_config = layer.get_config()
# override config for Proposal layer for test graph
if type(layer) == Proposal:
layer_config.update(config_override)
new_layer = type(layer).from_config(layer_config)
# Add to model.
outputs = []
for node in layer._inbound_nodes:
prev_outputs = []
for idx, l in enumerate(node.inbound_layers):
keras_layer = _explored_layers[l.name][1]
prev_outputs.append(keras_layer.get_output_at(node.node_indices[idx]))
assert prev_outputs, "Expected non-input layer to have inputs."
if len(prev_outputs) == 1:
prev_outputs = prev_outputs[0]
outputs.append(new_layer(prev_outputs))
if len(outputs) == 1:
outputs = outputs[0]
weights = layer.get_weights()
if weights is not None:
new_layer.set_weights(weights)
outbound_nodes = layer._outbound_nodes
# RPN outputs will be excluded since it has outbound nodes.
if not outbound_nodes:
model_outputs[layer.output.name] = outputs
layers_to_explore.extend([node.outbound_layer for node in outbound_nodes])
# Mark current layer as visited and assign output nodes to the layer.
_explored_layers[layer.name][0] = True
_explored_layers[layer.name][1] = new_layer
else:
continue
# Create new keras model object from pruned specifications.
# only use input_image as Model Input.
output_tensors = [model_outputs[l.name] for l in model.outputs if l.name in model_outputs]
output_tensors = [proposal_outputs] + output_tensors
new_model = keras.models.Model(inputs=model.inputs[:1],
outputs=output_tensors,
name=model.name)
# attach OutputParser layer
parser_outputs = OutputParser(max_box_num, list(regr_std_scaling), iou_thres, score_thres)(
new_model.outputs + new_model.inputs
)
new_model = keras.models.Model(
inputs=new_model.inputs,
outputs=parser_outputs,
name=new_model.name
)
keras.backend.set_learning_phase(prev_lp)
return new_model
def summary(self):
'''print keras model summary.'''
self.keras_model.summary()
def load_weights(self, weights_path, key, logger):
'''loading pretrained weights for initialization.'''
assert os.path.isfile(weights_path), '''
pretrained weights file not found: {}'''.format(weights_path)
logger.info('Loading pretrained weights '
'from {}'.format(weights_path))
weights_format = weights_path.split('.')[-1]
# remember the old weights and then compare with new weights after loading
# to see which layers' weights has been loaded(changed) succesfully.
old_weights = self.keras_model.get_weights()
if weights_format == 'tlt':
# first, convert tlt model to weights
_model = iva_utils.decode_to_keras(str(weights_path),
str.encode(key),
compile_model=False)
os_handle, tmp_weights_file = tempfile.mkstemp(suffix='.h5')
os.close(os_handle)
_model.save_weights(tmp_weights_file)
# then load the weights
self.keras_model.load_weights(str(tmp_weights_file),
by_name=True)
os.remove(tmp_weights_file)
elif weights_format == 'hdf5':
# unencoded keras models from classification network
with CustomObjectScope(CUSTOM_OBJS):
_model = keras.models.load_model(str(weights_path),
compile=False)
os_handle, tmp_weights_file = tempfile.mkstemp(suffix='.h5')
os.close(os_handle)
_model.save_weights(tmp_weights_file)
# then load the weights
self.keras_model.load_weights(str(tmp_weights_file),
by_name=True)
os.remove(tmp_weights_file)
elif weights_format == 'h5':
self.keras_model.load_weights(str(weights_path),
by_name=True)
else:
raise ValueError('''Unrecognized pretrained
weights format {}'''.format(weights_format))
new_weights = self.keras_model.get_weights()
self._validate_loaded_weights(old_weights, new_weights)
logger.info('Pretrained weights loaded!')
def _validate_loaded_weights(self, old, new):
_summary = OrderedDict()
idx = 0
for layer in self.keras_model.layers:
if len(layer.weights) == 0:
# this layer has no weights
_summary[layer.name] = None
else:
# layer have weights
num_weights = len(layer.weights)
if self._weights_equal(old[idx:idx+num_weights], new[idx:idx+num_weights]):
# weights was not updated
_summary[layer.name] = False
else:
# weights was updated
_summary[layer.name] = True
idx += num_weights
print('='*99)
print('Pretrained weights loading status summary:')
print('None: layer has no weights at all.')
print('Yes: layer has weights and loaded successfully by name.')
print('No: layer has weights but names not match, skipped.')
print('='*99)
print(self._left_align(90, 'Layer(Type):') + self._left_align(9, 'Status:'))
print('-'*99)
for l_name in _summary:
l_type = '({})'.format(type(self.keras_model.get_layer(l_name)).__name__)
if _summary[l_name] is None:
_stat = 'None'
elif _summary[l_name]:
_stat = 'Yes'
else:
_stat = 'No'
print(self._left_align(90, l_name + l_type) +
self._left_align(9, _stat))
print('-'*99)
def _weights_equal(self, old, new):
for idx, w in enumerate(old):
if not np.array_equal(w, new[idx]):
return False
return True
def _left_align(self, l, s):
s_len = len(s)
return s + ' '*(l-s_len)
@property
def model_format(self):
'''format string for output model path.'''
model_path = str(self.output_model).split('.')
model_path.insert(-1, 'epoch_{}')
return '.'.join(model_path)
def _build_rpn_class_loss(self):
'''build RPN classification loss.'''
self.rpn_class_loss = losses._build_rpn_class_loss(self.num_anchors,
self.lambda_rpn_class,
self.rpn_train_bs)
def _build_rpn_bbox_loss(self):
'''build RPN bbox loss.'''
self.rpn_bbox_loss = losses._build_rpn_bbox_loss(self.num_anchors,
self.lambda_rpn_regr,
self.rpn_train_bs)
def _build_rcnn_class_loss(self):
'''build RCNN classification loss.'''
self.rcnn_class_loss = losses._build_rcnn_class_loss(self.lambda_rcnn_class,
self.rcnn_train_bs)
def _build_rcnn_bbox_loss(self):
'''build RCNN bbox loss.'''
self.rcnn_bbox_loss = losses._build_rcnn_bbox_loss(self.num_classes,
self.lambda_rcnn_regr,
self.rcnn_train_bs)
def build_losses(self):
'''build all the losses(totally 4 losses) and remember them.'''
if self.losses is not None:
return
self._build_rpn_class_loss()
self._build_rpn_bbox_loss()
self._build_rcnn_class_loss()
self._build_rcnn_bbox_loss()
self.losses = [self.rpn_class_loss, self.rpn_bbox_loss,
self.rcnn_class_loss, self.rcnn_bbox_loss]
def build_lr_scheduler(self, max_iters, hvd_size, initial_step=0):
'''build learning rate scheduler.'''
if self.lr_config.WhichOneof("lr_config") == 'soft_start':
lr_config = self.lr_config.soft_start
scheduler = MultiGPULearningRateScheduler(max_iters,
lr_config.start_lr*hvd_size,
lr_config.base_lr*hvd_size,
lr_config.soft_start,
lr_config.annealing_points,
lr_config.annealing_divider)
elif self.lr_config.WhichOneof("lr_config") == 'step':
lr_config = self.lr_config.step
scheduler = StepLRScheduler(lr_config.base_lr*hvd_size,
lr_config.gamma,
lr_config.step_size,
max_iters)
else:
raise ValueError('Invalid learning rate config.')
scheduler.reset(initial_step)
self.lr_scheduler = scheduler
self.callbacks.append(self.lr_scheduler)
def build_checkpointer(self, interval=1):
'''build tlt encoded model checkpointer.'''
self.checkpointer = ModelSaver(self.model_format,
self.enc_key,
interval)
self.callbacks.append(self.checkpointer)
def set_target_tensors(self, rpn_score_tensor, rpn_deltas_tensor):
'''setup target tensors for RPN and RCNN.'''
if self.target_tensors is not None:
return
pt_outputs = None
for l in self.keras_model.layers:
if type(l) == ProposalTarget:
pt_outputs = l.output
break
assert pt_outputs is not None, "Cannot find ProposalTarget output tensors in Keras model."
self.target_tensors = [rpn_score_tensor, rpn_deltas_tensor] + \
pt_outputs[1:3]
def set_optimizer(self, opt, hvd):
'''setup optimizer.'''
if self.optimizer is not None:
return
self.optimizer = hvd.DistributedOptimizer(opt)
def set_hvd_callbacks(self, hvd):
'''setup horovod callbacks.'''
self.callbacks.append(hvd.callbacks.BroadcastGlobalVariablesCallback(0))
self.callbacks.append(hvd.callbacks.MetricAverageCallback())
self.callbacks.append(TerminateOnNaN())
def build_validation_callback(self, val_data_loader, spec):
"""Build the validation callback for online validation."""
logger.info("Building validation model, may take a while...")
if spec.pretrained_model or spec.resume_from_model:
config_override = {'pre_nms_top_N': spec.eval_rpn_pre_nms_top_N,
'post_nms_top_N': spec.eval_rpn_post_nms_top_N,
'nms_iou_thres': spec.eval_rpn_nms_iou_thres,
'bs_per_gpu': spec.eval_batch_size}
val_model = self.build_validation_model(
self.keras_model,
config_override,
max_box_num=spec.eval_rcnn_post_nms_top_N,
regr_std_scaling=spec.rcnn_regr_std,
iou_thres=spec.eval_rcnn_nms_iou_thres,
score_thres=spec.eval_confidence_thres,
eval_rois=spec.eval_rpn_post_nms_top_N
)
else:
val_model = self.build_validation_model_unpruned(
spec,
max_box_num=spec.eval_rcnn_post_nms_top_N,
regr_std_scaling=spec.rcnn_regr_std,
iou_thres=spec.eval_rcnn_nms_iou_thres,
score_thres=spec.eval_confidence_thres
)
logger.info("Validation model built successfully!")
val_model.summary()
validation_callback = ValidationCallback(
val_model,
val_data_loader,
spec.validation_period,
spec.eval_batch_size,
spec.eval_confidence_thres,
spec.use_voc07_metric,
spec.id_to_class,
spec.eval_gt_matching_iou_list,
)
self.callbacks.append(validation_callback)
def build_early_stopping_callback(self):
"""Setup early stopping callback."""
# If early stopping is enabled...
if self.early_stopping is not None:
callback = EarlyStopping(
monitor=self.early_stopping.monitor,
min_delta=self.early_stopping.min_delta,
patience=self.early_stopping.patience,
verbose=True
)
self.callbacks.append(callback)
def build_tensorboard_callback(self):
"""Build TensorBoard callback for visualization."""
tb_path = os.path.join(
os.path.dirname(self.output_model),
"logs"
)
if os.path.exists(tb_path) and os.path.isdir(tb_path):
shutil.rmtree(tb_path)
if not os.path.exists(tb_path):
os.makedirs(tb_path)
tb_callback = TensorBoard(
log_dir=tb_path,
write_graph=False,
weight_hist=False
)
self.callbacks.append(tb_callback)
def build_status_logging_callback(self, results_dir, num_epochs, is_master):
"""Build status logging for TAO API."""
status_logger = TAOStatusLogger(
results_dir,
append=True,
num_epochs=num_epochs,
is_master=is_master,
)
self.callbacks.append(status_logger)
def compile(self):
'''compile the keras model.'''
self.build_losses()
self.keras_model.compile(optimizer=self.optimizer,
loss=self.losses,
target_tensors=self.target_tensors)
def train(self, epochs, steps_per_epoch, initial_epoch):
'''train the keras model with dataset.'''
self.keras_model.fit(epochs=epochs,
steps_per_epoch=steps_per_epoch,
callbacks=self.callbacks,
initial_epoch=initial_epoch)
@staticmethod
def apply_model_to_new_inputs(model, tf_inputs, freeze_bn=False):
'''Apply keras model to new input tensors, it will avoid nested model.'''
# set training=False for BN layers if freeze_bn=True
def compose_call(prev_call_method):
def call(self, inputs, training=False):
return prev_call_method(self, inputs, training)
return call
prev_batchnorm_call = keras.layers.normalization.BatchNormalization.call
prev_td_call = keras.layers.wrappers.TimeDistributed.call
if freeze_bn:
keras.layers.normalization.BatchNormalization.call = compose_call(
prev_batchnorm_call
)
keras.layers.wrappers.TimeDistributed.call = compose_call(
prev_td_call
)
_explored_layers = dict()
for l in model.layers:
_explored_layers[l.name] = [False, None]
input_layer = [l for l in model.layers if (type(l) == keras.layers.InputLayer)]
layers_to_explore = input_layer
model_outputs = {}
# Loop until we reach the last layer.
while layers_to_explore:
layer = layers_to_explore.pop(0)
# Skip layers that may be revisited in the graph to prevent duplicates.
if not _explored_layers[layer.name][0]:
# Check if all inbound layers explored for given layer.
if not all([
_explored_layers[l.name][0]
for n in layer._inbound_nodes
for l in n.inbound_layers
]):
continue
outputs = None
# Visit input layer.
if type(layer) == keras.layers.InputLayer: # noqa pylint: disable = R1724
# skip input layer and use outside input tensors intead.
_explored_layers[layer.name][0] = True
_explored_layers[layer.name][1] = None
layers_to_explore.extend([node.outbound_layer for
node in layer._outbound_nodes])
continue
elif type(layer) == CropAndResize:
# Create new layer.
layer_config = layer.get_config()
new_layer = type(layer).from_config(layer_config)
outputs = []
for node in layer._inbound_nodes:
prev_outputs = []
for idx, l in enumerate(node.inbound_layers):
keras_layer = _explored_layers[l.name][1]
if keras_layer is not None:
# _explored_layers[l.name][1] is None for input image
_tmp_outputs = keras_layer.get_output_at(node.node_indices[idx])
# ProposalTarget has 4 outputs,
# only use the first one for CropAndResize,
# i.e., ROIs.
if type(l) == ProposalTarget:
_tmp_outputs = _tmp_outputs[0]
prev_outputs.append(_tmp_outputs)
else:
prev_outputs.append(None)
assert prev_outputs, "Expected non-input layer to have inputs."
# update the input image
prev_outputs[-1] = tf_inputs[0]
if len(prev_outputs) == 1:
prev_outputs = prev_outputs[0]
outputs.append(new_layer(prev_outputs))
if len(outputs) == 1:
outputs = outputs[0]
else:
# Create new layer.
layer_config = layer.get_config()
new_layer = type(layer).from_config(layer_config)
# Add to model.
outputs = []
for node in layer._inbound_nodes:
prev_outputs = []
for idx, l in enumerate(node.inbound_layers):
if l.name == 'input_image':
prev_outputs.append(tf_inputs[0])
elif l.name == 'input_gt_cls':
prev_outputs.append(tf_inputs[1])
elif l.name == 'input_gt_bbox':
prev_outputs.append(tf_inputs[2])
else:
keras_layer = _explored_layers[l.name][1]
_tmp_output = keras_layer.get_output_at(node.node_indices[idx])
prev_outputs.append(_tmp_output)
assert prev_outputs, "Expected non-input layer to have inputs."
if len(prev_outputs) == 1:
prev_outputs = prev_outputs[0]
outputs.append(new_layer(prev_outputs))
if len(outputs) == 1:
outputs = outputs[0]
weights = layer.get_weights()
if weights is not None:
new_layer.set_weights(weights)
outbound_nodes = layer._outbound_nodes
# RPN outputs will be excluded since it has outbound nodes.
if not outbound_nodes:
model_outputs[layer.output.name] = outputs
# Patch for Faster-RCNN RPN output.
# It's an output layer, but still has outbound_nodes
for idx, node in enumerate(new_layer._inbound_nodes):
_output = layer.get_output_at(idx)
new_output = new_layer.get_output_at(idx)
if (_output in model.outputs) and (_output.name not in model_outputs):
model_outputs[_output.name] = new_output
layers_to_explore.extend([node.outbound_layer for node in outbound_nodes])
# Mark current layer as visited and assign output nodes to the layer.
_explored_layers[layer.name][0] = True
_explored_layers[layer.name][1] = new_layer
else:
continue
# Create new keras model object from pruned specifications.
# only use input_image as Model Input.
output_tensors = [model_outputs[l.name] for l in model.outputs if l.name in model_outputs]
new_model = keras.models.Model(inputs=tf_inputs, outputs=output_tensors, name=model.name)
if freeze_bn:
# Unpatch Keras before return.
keras.layers.normalization.BatchNormalization.call = prev_batchnorm_call
keras.layers.wrappers.TimeDistributed.call = prev_td_call
return new_model
def load_pruned_model(self, pruned_model_path, logger):
'''loading pruned model for retrain.'''
assert os.path.isfile(pruned_model_path), '''
Pruned model file not found: {}'''.format(pruned_model_path)
pruned_model = iva_utils.decode_to_keras(str(pruned_model_path),
str.encode(self.enc_key),
input_model=None,
compile_model=False,
by_name=None)
logger.info('Pruned model loaded!')
return pruned_model
def override_regularizers(self, model, reg_config):
"""Update regularizers according the spec(config)."""
regularizer_dict = {
RegularizerConfig.L1: l1,
RegularizerConfig.L2: l2
}
model_weights = model.get_weights()
mconfig = model.get_config()
assert 0.0 < reg_config.weight < 1.0, \
"Weight decay should be no less than 0 and less than 1"
# Obtain type and scope of the regularizer
reg_type = reg_config.type
for layer, layer_config in zip(model.layers, mconfig['layers']):
# Regularizer settings
if reg_type:
if hasattr(layer, 'kernel_regularizer'):
if reg_type in regularizer_dict.keys():
regularizer = regularizer_dict[reg_type](reg_config.weight)
layer_config['config']['kernel_regularizer'] = \
{'class_name': regularizer.__class__.__name__,
'config': regularizer.get_config()}
else:
layer_config['config']['kernel_regularizer'] = None
with CustomObjectScope({'CropAndResize': CropAndResize,
'Proposal': Proposal,
'ProposalTarget': ProposalTarget}):
new_model = keras.models.Model.from_config(mconfig)
new_model.set_weights(model_weights)
return new_model
def override_custom_layers(self, model):
"""Update regularizers according the spec(config)."""
model_weights = model.get_weights()
mconfig = model.get_config()
for layer, layer_config in zip(model.layers, mconfig['layers']):
# Regularizer settings
if self.prop_config is not None and type(layer) == Proposal:
layer_config['config'].update(self.prop_config)
elif self.propt_config is not None and type(layer) == ProposalTarget:
layer_config['config'].update(self.propt_config)
elif self.cr_config is not None and type(layer) == CropAndResize:
layer_config['config'].update(self.cr_config)
with CustomObjectScope({'CropAndResize': CropAndResize,
'Proposal': Proposal,
'ProposalTarget': ProposalTarget}):
new_model = keras.models.Model.from_config(mconfig)
new_model.set_weights(model_weights)
return new_model
def build_model_from_pruned(self, pruned_model_path, input_images,
input_gt_class, inut_gt_bbox, logger,
reg_config):
'''build keras model from pruned model.'''
logger.info('Loading pretrained model: {} for retrain.'.format(pruned_model_path))
pruned_model = self.load_pruned_model(pruned_model_path, logger)
model_qat = check_for_quantized_layers(pruned_model)
if self.qat ^ model_qat:
qat_strings = {True: "enabled", False: "disabled"}
logger.error(
"Pruned model architecture does not align with "
f"`enable_qat` flag in spec file. Model QAT is {qat_strings[model_qat]} "
f"while spec file has QAT {qat_strings[self.qat]}"
)
sys.exit(1)
pruned_model = self.override_regularizers(pruned_model, reg_config)
pruned_model = self.override_custom_layers(pruned_model)
logger.info('Regularizers updated for the loaded model.')
inputs = [input_images, input_gt_class, inut_gt_bbox]
self.keras_model = self.apply_model_to_new_inputs(pruned_model,
inputs,
freeze_bn=self.freeze_bn)
self.inputs = self.keras_model.inputs
self.outputs = self.keras_model.outputs
return self.keras_model
def get_initial_epoch(self, model_path):
'''Get the epoch number from the pattern of the saved model path.'''
epoch = int(model_path.split('epoch_')[1].split(".")[0])
return epoch
def resume_model(self, spec, tf_inputs, hvd, logger=None):
'''resume model from checkpoints and continue to train.'''
initial_epoch = self.get_initial_epoch(spec.resume_from_model)
if logger is not None:
logger.info('Resuming training from {}'.format(spec.resume_from_model))
# build the loss functions for later use
self.build_losses()
custom_objs = {'rpn_loss_cls' : self.rpn_class_loss,
'rpn_loss_regr' : self.rpn_bbox_loss,
'rcnn_loss_cls' : self.rcnn_class_loss,
'rcnn_loss_regr' : self.rcnn_bbox_loss}
resumed_model = iva_utils.decode_to_keras(spec.resume_from_model,
str.encode(spec.enc_key),
input_model=None,
compile_model=True,
by_name=None,
custom_objects=custom_objs)
optimizer = resumed_model.optimizer
new_model = self.apply_model_to_new_inputs(resumed_model,
tf_inputs,
freeze_bn=self.freeze_bn)
self.keras_model = new_model
self.inputs = self.keras_model.inputs
self.outputs = self.keras_model.outputs
self.set_optimizer(optimizer, hvd)
return initial_epoch
def parallelize(self, parallelism):
"""parallelize the model on multiple GPUs."""
self.keras_model = self.model_parallelism(
self.keras_model,
parallelism,
freeze_bn=self.freeze_bn
)
self.inputs = self.keras_model.inputs
self.outputs = self.keras_model.outputs
def model_parallelism(
self,
model,
parallelism,
freeze_bn=False
):
"""Split the model into several parts on multiple GPUs for model parallelism."""
# set training=False for BN layers if freeze_bn=True
# otherwise the freeze_bn flag in model builder will be ineffective
def compose_call(prev_call_method):
def call(self, inputs, training=False):
return prev_call_method(self, inputs, training)
return call
prev_batchnorm_call = keras.layers.normalization.BatchNormalization.call
if freeze_bn:
keras.layers.normalization.BatchNormalization.call = compose_call(
prev_batchnorm_call
)
world_size = len(parallelism)
# in case that model parallelism is not enabled at all...
if world_size == 0:
world_size = 1
parallelism = (1.0,)
p_arr = np.array((0.0,) + parallelism, dtype=np.float32)
cum_p_arr = np.cumsum(p_arr)
# splitting points for each segment of the model
splits = cum_p_arr / cum_p_arr[-1]
layer_splits = np.round(splits * len(model.layers))
layer_idx = 0
_explored_layers = dict()
for l in model.layers:
_explored_layers[l.name] = [False, None]
input_layer = [l for l in model.layers if (type(l) == keras.layers.InputLayer)]
layers_to_explore = input_layer
model_outputs = {}
# black list of layer types that cannot run on GPU.
black_list = [Proposal, ProposalTarget, CropAndResize]
# Loop until we reach the last layer.
while layers_to_explore:
layer = layers_to_explore.pop(0)
# Skip layers that may be revisited in the graph to prevent duplicates.
if not _explored_layers[layer.name][0]:
# Check if all inbound layers explored for given layer.
if not all([
_explored_layers[l.name][0]
for n in layer._inbound_nodes
for l in n.inbound_layers
]):
continue
outputs = None
# Visit input layer.
if type(layer) == keras.layers.InputLayer:
# Re-use the existing InputLayer.
outputs = layer.output
new_layer = layer
elif type(layer) in black_list:
gpu_idx = find_segment_idx(layer_idx, layer_splits)
layer_idx += 1
# Create new layer.
layer_config = layer.get_config()
new_layer = type(layer).from_config(layer_config)
# Add to model.
outputs = []
for node in layer._inbound_nodes:
prev_outputs = []
for idx, l in enumerate(node.inbound_layers):
keras_layer = _explored_layers[l.name][1]
_tmp_outputs = keras_layer.get_output_at(node.node_indices[idx])
if type(l) == ProposalTarget:
_tmp_outputs = _tmp_outputs[0]
prev_outputs.append(_tmp_outputs)
assert prev_outputs, "Expected non-input layer to have inputs."
if len(prev_outputs) == 1:
prev_outputs = prev_outputs[0]
outputs.append(new_layer(prev_outputs))
if len(outputs) == 1:
outputs = outputs[0]
weights = layer.get_weights()
if weights is not None:
new_layer.set_weights(weights)
else:
gpu_idx = find_segment_idx(layer_idx, layer_splits)
layer_idx += 1
# pin this layer on a certain GPU
with tf.device("/gpu:{}".format(gpu_idx)):
# Create new layer.
layer_config = layer.get_config()
new_layer = type(layer).from_config(layer_config)
# Add to model.
outputs = []
for node in layer._inbound_nodes:
prev_outputs = []
for idx, l in enumerate(node.inbound_layers):
keras_layer = _explored_layers[l.name][1]
_tmp_outputs = keras_layer.get_output_at(node.node_indices[idx])
if type(l) == ProposalTarget:
_tmp_outputs = _tmp_outputs[0]
prev_outputs.append(_tmp_outputs)
assert prev_outputs, "Expected non-input layer to have inputs."
if len(prev_outputs) == 1:
prev_outputs = prev_outputs[0]
outputs.append(new_layer(prev_outputs))
if len(outputs) == 1:
outputs = outputs[0]
weights = layer.get_weights()
if weights is not None:
new_layer.set_weights(weights)
outbound_nodes = layer._outbound_nodes
if not outbound_nodes:
model_outputs[layer.output.name] = outputs
# Patch for Faster-RCNN RPN output.
# It's an output layer, but still has outbound_nodes
for idx, node in enumerate(new_layer._inbound_nodes):
_output = layer.get_output_at(idx)
new_output = new_layer.get_output_at(idx)
if (_output in model.outputs) and (_output.name not in model_outputs):
model_outputs[_output.name] = new_output
layers_to_explore.extend([node.outbound_layer for node in outbound_nodes])
# Mark current layer as visited and assign output nodes to the layer.
_explored_layers[layer.name][0] = True
_explored_layers[layer.name][1] = new_layer
else:
continue
output_tensors = [model_outputs[l.name] for l in model.outputs if l.name in model_outputs]
new_model = keras.models.Model(
inputs=model.inputs,
outputs=output_tensors,
name=model.name
)
# restore the BN call method before return
if freeze_bn:
keras.layers.normalization.BatchNormalization.call = prev_batchnorm_call
return new_model
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/models/model_builder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MobileNet V2 model for FasterRCNN."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Add, AveragePooling2D, BatchNormalization, Conv2D, \
DepthwiseConv2D, Flatten, ReLU, \
TimeDistributed, ZeroPadding2D
from nvidia_tao_tf1.core.templates.utils import add_arg_scope, arg_scope
from nvidia_tao_tf1.cv.faster_rcnn.models.model_builder import FrcnnModel
class MobileNetV2(FrcnnModel):
'''MobileNet V2 as backbones for FasterRCNN model.
This is MobileNet V2 class that use FrcnnModel class as base class and do some customization
specific to MobileNet V2 backbone. Methods here will override those functions in FrcnnModel
class.
'''
def backbone(self, input_images):
'''backbone for MobileNet V2 FasterRCNN.'''
channel_axis = 1
first_block_filters = _make_divisible(32, 8)
x = ZeroPadding2D((1, 1), name='conv1_pad')(input_images)
x = Conv2D(first_block_filters,
kernel_size=3,
strides=(2, 2),
padding='valid',
use_bias=not self.conv_bn_share_bias,
name='conv1',
kernel_regularizer=self.kernel_reg,
bias_regularizer=None,
trainable=not (0 in self.freeze_blocks))(x)
if self.freeze_bn:
x = BatchNormalization(axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name='bn_conv1')(x, training=False)
else:
x = BatchNormalization(axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name='bn_conv1')(x)
x = ReLU()(x)
with arg_scope([_inverted_res_block],
use_batch_norm=True,
data_format='channels_first',
kernel_regularizer=self.kernel_reg,
bias_regularizer=None,
activation_type='relu',
all_projections=self.all_projections,
freeze_bn=self.freeze_bn,
use_bias=not self.conv_bn_share_bias):
x = _inverted_res_block(x, filters=16, alpha=1, stride=1,
expansion=1, block_id=0,
trainable=not (1 in self.freeze_blocks))
x = _inverted_res_block(x, filters=24, alpha=1, stride=2,
expansion=6, block_id=1,
trainable=not(2 in self.freeze_blocks))
x = _inverted_res_block(x, filters=24, alpha=1, stride=1,
expansion=6, block_id=2,
trainable=not (3 in self.freeze_blocks))
x = _inverted_res_block(x, filters=32, alpha=1, stride=2,
expansion=6, block_id=3,
trainable=not (4 in self.freeze_blocks))
x = _inverted_res_block(x, filters=32, alpha=1, stride=1,
expansion=6, block_id=4,
trainable=not (5 in self.freeze_blocks))
x = _inverted_res_block(x, filters=32, alpha=1, stride=1,
expansion=6, block_id=5,
trainable=not (6 in self.freeze_blocks))
x = _inverted_res_block(x, filters=64, alpha=1, stride=2,
expansion=6, block_id=6,
trainable=not (7 in self.freeze_blocks))
x = _inverted_res_block(x, filters=64, alpha=1, stride=1,
expansion=6, block_id=7,
trainable=not (8 in self.freeze_blocks))
x = _inverted_res_block(x, filters=64, alpha=1, stride=1,
expansion=6, block_id=8,
trainable=not (9 in self.freeze_blocks))
x = _inverted_res_block(x, filters=64, alpha=1, stride=1,
expansion=6, block_id=9,
trainable=not (10 in self.freeze_blocks))
x = _inverted_res_block(x, filters=96, alpha=1, stride=1,
expansion=6, block_id=10,
trainable=not (11 in self.freeze_blocks))
x = _inverted_res_block(x, filters=96, alpha=1, stride=1,
expansion=6, block_id=11,
trainable=not (12 in self.freeze_blocks))
x = _inverted_res_block(x, filters=96, alpha=1, stride=1,
expansion=6, block_id=12,
trainable=not (13 in self.freeze_blocks))
return x
def rcnn_body(self, x):
'''RCNN body for MobileNet V2 FasterRCNN model.'''
_stride = 2 if self.roi_pool_2x else 1
channel_axis = 1
with arg_scope([_inverted_res_block],
use_batch_norm=True,
data_format='channels_first',
kernel_regularizer=self.kernel_reg,
bias_regularizer=None,
activation_type='relu',
all_projections=self.all_projections,
freeze_bn=self.freeze_bn,
use_bias=not self.conv_bn_share_bias,
use_td=True):
x = _inverted_res_block(x, filters=160, alpha=1, stride=_stride,
expansion=6, block_id=13)
x = _inverted_res_block(x, filters=160, alpha=1, stride=1,
expansion=6, block_id=14)
x = _inverted_res_block(x, filters=160, alpha=1, stride=1,
expansion=6, block_id=15)
x = _inverted_res_block(x, filters=320, alpha=1, stride=1,
expansion=6, block_id=16)
# no alpha applied to last conv as stated in the paper:
# if the width multiplier is greater than 1 we
# increase the number of output channels
last_block_filters = 1280
layer = Conv2D(last_block_filters,
kernel_size=1,
use_bias=not self.conv_bn_share_bias,
name='conv_1',
kernel_regularizer=self.kernel_reg,
bias_regularizer=None)
layer = TimeDistributed(layer)
x = layer(x)
layer = BatchNormalization(epsilon=1e-3,
axis=channel_axis,
momentum=0.999,
name='conv_1_bn')
layer = TimeDistributed(layer)
if self.freeze_bn:
x = layer(x, training=False)
else:
x = layer(x)
x = ReLU()(x)
x = TimeDistributed(AveragePooling2D(pool_size=(self.roi_pool_size, self.roi_pool_size),
data_format='channels_first',
padding='valid'))(x)
# During export, in order to map this node to UFF Flatten Op, we have to
# make sure this layer name has 'flatten' in it. Otherwise, it cannot be
# converted to UFF Flatten Op during pb to UFF conversion.
x = TimeDistributed(Flatten(name='classifier_flatten'), name='time_distributed_flatten')(x)
return x
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
@add_arg_scope
def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id,
kernel_regularizer=None, bias_regularizer=None,
use_batch_norm=True, activation_type='relu',
data_format='channels_first', all_projections=False,
freeze_bn=False, use_bias=True, trainable=True,
use_td=False):
'''Inverted residual block as building blocks for MobileNet V2.'''
channel_axis = 1 if data_format == 'channels_first' else -1
# if use TD layer, then channel axis should + 1 since input is now a 5D tensor
if use_td and channel_axis == 1:
in_channels = inputs._keras_shape[channel_axis+1]
else:
in_channels = inputs._keras_shape[channel_axis]
pointwise_conv_filters = int(filters * alpha)
pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
x = inputs
prefix = 'block_{}_'.format(block_id)
if block_id:
# Expand
layer = Conv2D(expansion * in_channels,
kernel_size=1,
padding='valid',
use_bias=use_bias,
activation=None,
name=prefix + 'expand',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=trainable)
if use_td:
layer = TimeDistributed(layer)
x = layer(x)
if use_batch_norm:
layer = BatchNormalization(epsilon=1e-3, axis=channel_axis,
momentum=0.999,
name=prefix + 'expand_bn')
if use_td:
layer = TimeDistributed(layer)
if freeze_bn:
x = layer(x, training=False)
else:
x = layer(x)
if activation_type == 'relu6':
x = ReLU(6.)(x)
else:
x = ReLU()(x)
else:
prefix = 'expanded_conv_'
# Depthwise
layer = ZeroPadding2D((1, 1), name=prefix + 'depthwise_pad')
if use_td:
layer = TimeDistributed(layer)
x = layer(x)
layer = DepthwiseConv2D(kernel_size=3,
strides=stride,
activation=None,
use_bias=use_bias,
padding='valid',
name=prefix + 'depthwise',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=trainable)
if use_td:
layer = TimeDistributed(layer)
x = layer(x)
if use_batch_norm:
layer = BatchNormalization(epsilon=1e-3,
axis=channel_axis,
momentum=0.999,
name=prefix + 'depthwise_bn')
if use_td:
layer = TimeDistributed(layer)
if freeze_bn:
x = layer(x, training=False)
else:
x = layer(x)
if activation_type == 'relu6':
x = ReLU(6.)(x)
else:
x = ReLU()(x)
# Project
layer = Conv2D(pointwise_filters,
kernel_size=1,
padding='valid',
use_bias=use_bias,
activation=None,
name=prefix + 'project',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=trainable)
if use_td:
layer = TimeDistributed(layer)
x = layer(x)
if use_batch_norm:
layer = BatchNormalization(axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name=prefix + 'project_bn')
if use_td:
layer = TimeDistributed(layer)
if freeze_bn:
x = layer(x, training=False)
else:
x = layer(x)
if in_channels == pointwise_filters and stride == 1:
if all_projections:
layer = Conv2D(in_channels,
kernel_size=1,
padding='valid',
use_bias=use_bias,
activation=None,
name=prefix + 'projected_inputs',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=trainable)
if use_td:
layer = TimeDistributed(layer)
inputs_projected = layer(inputs)
return Add(name=prefix + 'add')([inputs_projected, x])
return Add(name=prefix + 'add')([inputs, x])
return x
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/models/mobilenet_v2.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FasterRCNN model templates."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/models/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientNet models for FasterRCNN."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from copy import deepcopy
from keras.layers import (
Activation,
AveragePooling2D,
BatchNormalization,
Conv2D,
Dropout,
Flatten,
TimeDistributed,
ZeroPadding2D
)
from nvidia_tao_tf1.core.templates.utils import (
block,
CONV_KERNEL_INITIALIZER,
correct_pad,
round_filters,
round_repeats,
swish
)
from nvidia_tao_tf1.cv.faster_rcnn.models.model_builder import FrcnnModel
DEFAULT_BLOCKS_ARGS = (
{'kernel_size': 3, 'repeats': 1, 'filters_in': 32, 'filters_out': 16,
'expand_ratio': 1, 'id_skip': True, 'strides': 1, 'se_ratio': 0.25},
{'kernel_size': 3, 'repeats': 2, 'filters_in': 16, 'filters_out': 24,
'expand_ratio': 6, 'id_skip': True, 'strides': 2, 'se_ratio': 0.25},
{'kernel_size': 5, 'repeats': 2, 'filters_in': 24, 'filters_out': 40,
'expand_ratio': 6, 'id_skip': True, 'strides': 2, 'se_ratio': 0.25},
{'kernel_size': 3, 'repeats': 3, 'filters_in': 40, 'filters_out': 80,
'expand_ratio': 6, 'id_skip': True, 'strides': 2, 'se_ratio': 0.25},
{'kernel_size': 5, 'repeats': 3, 'filters_in': 80, 'filters_out': 112,
'expand_ratio': 6, 'id_skip': True, 'strides': 1, 'se_ratio': 0.25},
{'kernel_size': 5, 'repeats': 4, 'filters_in': 112, 'filters_out': 192,
'expand_ratio': 6, 'id_skip': True, 'strides': 2, 'se_ratio': 0.25},
{'kernel_size': 3, 'repeats': 1, 'filters_in': 192, 'filters_out': 320,
'expand_ratio': 6, 'id_skip': True, 'strides': 1, 'se_ratio': 0.25}
)
class EfficientNet(FrcnnModel):
'''EfficientNet as backbones for FasterRCNN model.
This is EfficientNet class that use FrcnnModel class as base class and do some customization
specific to EfficientNet backbone. Methods here will override those functions in FrcnnModel
class.
'''
def __init__(self, nlayers, batch_size_per_gpu,
rpn_stride, regularizer_type,
weight_decay, freeze_bn, freeze_blocks,
dropout_rate, drop_connect_rate,
conv_bn_share_bias, all_projections,
use_pooling, anchor_sizes, anchor_ratios,
roi_pool_size, roi_pool_2x, num_classes,
std_scaling, rpn_pre_nms_top_N, rpn_post_nms_top_N,
rpn_nms_iou_thres, gt_as_roi, rcnn_min_overlap,
rcnn_max_overlap, rcnn_train_bs, rcnn_bbox_std,
rpn_train_bs, lambda_rpn_class, lambda_rpn_regr,
lambda_rcnn_class, lambda_rcnn_regr,
backbone, results_dir, enc_key, lr_config,
enable_qat=False, **kwargs):
'''Initialize the EfficientNet FasterRCNN model.'''
super(EfficientNet, self).__init__(
nlayers, batch_size_per_gpu,
rpn_stride, regularizer_type,
weight_decay, freeze_bn, freeze_blocks,
dropout_rate, drop_connect_rate,
conv_bn_share_bias, all_projections,
use_pooling, anchor_sizes, anchor_ratios,
roi_pool_size, roi_pool_2x, num_classes,
std_scaling, rpn_pre_nms_top_N, rpn_post_nms_top_N,
rpn_nms_iou_thres, gt_as_roi, rcnn_min_overlap,
rcnn_max_overlap, rcnn_train_bs, rcnn_bbox_std,
rpn_train_bs, lambda_rpn_class, lambda_rpn_regr,
lambda_rcnn_class, lambda_rcnn_regr,
backbone, results_dir, enc_key, lr_config, enable_qat,
**kwargs
)
# constant parameters specific to EfficientNet
# activation fn defaults to swish if unspecified.
if self.activation_type in [None, ""]:
self.activation_type = swish
self.depth_divisor = 8
if self.nlayers == "b0":
self.width_coefficient = 1.0
self.depth_coefficient = 1.0
elif self.nlayers == "b1":
self.width_coefficient = 1.0
self.depth_coefficient = 1.1
elif self.nlayers == "b2":
self.width_coefficient = 1.1
self.depth_coefficient = 1.2
elif self.nlayers == "b3":
self.width_coefficient = 1.2
self.depth_coefficient = 1.4
elif self.nlayers == "b4":
self.width_coefficient = 1.4
self.depth_coefficient = 1.8
elif self.nlayers == "b5":
self.width_coefficient = 1.6
self.depth_coefficient = 2.2
elif self.nlayers == "b6":
self.width_coefficient = 1.8
self.depth_coefficient = 2.6
elif self.nlayers == "b7":
self.width_coefficient = 2.0
self.depth_coefficient = 3.1
else:
raise ValueError("Unsupported EfficientNet {} architecture.".format(self.nlayers))
def backbone(self, x):
'''backbone of the ResNet FasterRCNN model.'''
bn_axis = 1
x = ZeroPadding2D(
padding=correct_pad(x, 3),
name='stem_conv_pad'
)(x)
x = Conv2D(
round_filters(32, self.depth_divisor, self.width_coefficient),
3,
strides=2,
padding='valid',
use_bias=not self.conv_bn_share_bias,
kernel_initializer=CONV_KERNEL_INITIALIZER,
kernel_regularizer=self.kernel_reg,
trainable=not bool(0 in self.freeze_blocks),
name='stem_conv'
)(x)
if self.freeze_bn:
x = BatchNormalization(axis=bn_axis, name='stem_bn')(x, training=False)
else:
x = BatchNormalization(axis=bn_axis, name='stem_bn')(x)
x = Activation(self.activation_type, name='stem_activation')(x)
blocks_args = deepcopy(list(DEFAULT_BLOCKS_ARGS))
b = 0
blocks = float(sum(args['repeats'] for args in blocks_args))
for (i, args) in enumerate(blocks_args):
assert args['repeats'] > 0
# Update block input and output filters based on depth multiplier.
args['filters_in'] = round_filters(
args['filters_in'],
self.depth_divisor,
self.width_coefficient
)
args['filters_out'] = round_filters(
args['filters_out'],
self.depth_divisor,
self.width_coefficient
)
for j in range(round_repeats(args.pop('repeats'), self.depth_coefficient)):
# The first block needs to take care of stride and filter size increase.
if j > 0:
args['strides'] = 1
args['filters_in'] = args['filters_out']
# skip the last two blocks, will use it in RCNN body
if i < len(blocks_args) - 2:
x = block(
x, self.activation_type,
self.drop_connect_rate * b / blocks,
freeze=bool((i + 1) in self.freeze_blocks),
freeze_bn=self.freeze_bn,
use_bias=not self.conv_bn_share_bias,
kernel_regularizer=self.kernel_reg,
name='block{}{}_'.format(i + 1, chr(j + 97)),
**args
)
b += 1
return x
def rcnn_body(self, x):
'''RCNN body.'''
data_format = 'channels_first'
bn_axis = 1
_stride = 2 if self.roi_pool_2x else 1
blocks_args = deepcopy(list(DEFAULT_BLOCKS_ARGS))
b = 0
blocks = float(sum(args['repeats'] for args in blocks_args))
for (i, args) in enumerate(blocks_args):
assert args['repeats'] > 0
# Update block input and output filters based on depth multiplier.
args['filters_in'] = round_filters(
args['filters_in'],
self.depth_divisor,
self.width_coefficient
)
args['filters_out'] = round_filters(
args['filters_out'],
self.depth_divisor,
self.width_coefficient
)
for j in range(round_repeats(args.pop('repeats'), self.depth_coefficient)):
# The first block needs to take care of stride and filter size increase.
if j > 0:
args['strides'] = 1
args['filters_in'] = args['filters_out']
elif i == len(blocks_args) - 2:
args['strides'] = _stride
# only use the last two blocks
if i >= len(blocks_args) - 2:
x = block(
x, self.activation_type,
self.drop_connect_rate * b / blocks,
freeze=bool((i + 1) in self.freeze_blocks),
freeze_bn=self.freeze_bn,
use_bias=not self.conv_bn_share_bias,
kernel_regularizer=self.kernel_reg,
name='block{}{}_'.format(i + 1, chr(j + 97)),
use_td=True,
**args
)
b += 1
# Build top
layer = Conv2D(
round_filters(1280, self.depth_divisor, self.width_coefficient),
1,
padding='same',
use_bias=not self.conv_bn_share_bias,
kernel_initializer=CONV_KERNEL_INITIALIZER,
trainable=not bool((len(blocks_args) + 1) in self.freeze_blocks),
kernel_regularizer=self.kernel_reg,
name='top_conv'
)
x = TimeDistributed(layer)(x)
layer = BatchNormalization(axis=bn_axis, name='top_bn')
if self.freeze_bn:
x = TimeDistributed(layer)(x, training=False)
else:
x = TimeDistributed(layer)(x)
x = Activation(self.activation_type, name='top_activation')(x)
layer = AveragePooling2D(
pool_size=self.roi_pool_size, name='avg_pool',
data_format=data_format, padding='valid'
)
x = TimeDistributed(layer)(x)
# During export, in order to map this node to UFF Flatten Op, we have to
# make sure this layer name has 'flatten' in it. Otherwise, it cannot be
# converted to UFF Flatten Op during pb to UFF conversion.
x = TimeDistributed(Flatten(name='flatten'), name="time_distributed_flatten")(x)
if self.dropout_rate > 0:
x = TimeDistributed(Dropout(self.dropout_rate, name='top_dropout'))(x)
return x
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/models/efficientnet.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ResNet models for FasterRCNN."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Activation, AveragePooling2D, BatchNormalization, \
Conv2D, Flatten, \
MaxPooling2D, TimeDistributed
from nvidia_tao_tf1.core.templates.utils import arg_scope, CNNBlock
from nvidia_tao_tf1.cv.faster_rcnn.models.model_builder import FrcnnModel
class ResNet(FrcnnModel):
'''ResNet as backbones for FasterRCNN model.
This is ResNet class that use FrcnnModel class as base class and do some customization
specific to ResNet backbone. Methods here will override those functions in FrcnnModel class.
'''
def __init__(self, nlayers, batch_size_per_gpu,
rpn_stride, regularizer_type,
weight_decay, freeze_bn, freeze_blocks,
dropout_rate, drop_connect_rate,
conv_bn_share_bias, all_projections,
use_pooling, anchor_sizes, anchor_ratios,
roi_pool_size, roi_pool_2x, num_classes,
std_scaling, rpn_pre_nms_top_N, rpn_post_nms_top_N,
rpn_nms_iou_thres, gt_as_roi, rcnn_min_overlap,
rcnn_max_overlap, rcnn_train_bs, rcnn_bbox_std,
rpn_train_bs, lambda_rpn_class, lambda_rpn_regr,
lambda_rcnn_class, lambda_rcnn_regr,
backbone, results_dir, enc_key, lr_config, enable_qat=False,
**kwargs):
'''Initialize the ResNet FasterRCNN model.'''
assert nlayers in [10, 18, 34, 50, 101], 'Number of layers for ResNet can ' \
'only be 10, 18, 34, 50, 101, got {}'.format(nlayers)
super(ResNet, self).__init__(nlayers, batch_size_per_gpu,
rpn_stride, regularizer_type,
weight_decay, freeze_bn, freeze_blocks,
dropout_rate, drop_connect_rate,
conv_bn_share_bias, all_projections,
use_pooling, anchor_sizes, anchor_ratios,
roi_pool_size, roi_pool_2x, num_classes,
std_scaling, rpn_pre_nms_top_N, rpn_post_nms_top_N,
rpn_nms_iou_thres, gt_as_roi, rcnn_min_overlap,
rcnn_max_overlap, rcnn_train_bs, rcnn_bbox_std,
rpn_train_bs, lambda_rpn_class, lambda_rpn_regr,
lambda_rcnn_class, lambda_rcnn_regr,
backbone, results_dir, enc_key, lr_config, enable_qat,
**kwargs)
def backbone(self, input_images):
'''backbone of the ResNet FasterRCNN model.'''
bn_axis = 1
data_format = 'channels_first'
freeze0 = bool(0 in self.freeze_blocks)
freeze1 = bool(1 in self.freeze_blocks)
freeze2 = bool(2 in self.freeze_blocks)
freeze3 = bool(3 in self.freeze_blocks)
x = Conv2D(
64, (7, 7),
use_bias=not self.conv_bn_share_bias,
strides=(2, 2),
padding='same',
kernel_regularizer=self.kernel_reg,
name='conv1', trainable=not freeze0)(input_images)
if self.freeze_bn:
x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x, training=False)
else:
x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
x = Activation('relu')(x)
if self.use_pooling:
x = MaxPooling2D(
pool_size=(3, 3), strides=(2, 2), padding='same')(x)
first_stride = 1
else:
first_stride = 2
with arg_scope(
[CNNBlock],
use_batch_norm=True,
all_projections=self.all_projections,
use_shortcuts=True,
data_format=data_format,
kernel_regularizer=self.kernel_reg,
bias_regularizer=None,
activation_type='relu',
freeze_bn=self.freeze_bn,
activation_kwargs={},
use_bias=not self.conv_bn_share_bias):
if self.nlayers == 10:
x = CNNBlock(repeat=1, stride=first_stride,
subblocks=[(3, 64), (3, 64)],
index=1, freeze_block=freeze1)(x)
x = CNNBlock(repeat=1, stride=2,
subblocks=[(3, 128), (3, 128)],
index=2, freeze_block=freeze2)(x)
x = CNNBlock(repeat=1, stride=2,
subblocks=[(3, 256), (3, 256)],
index=3, freeze_block=freeze3)(x)
elif self.nlayers == 18:
x = CNNBlock(repeat=2, stride=first_stride,
subblocks=[(3, 64), (3, 64)],
index=1, freeze_block=freeze1)(x)
x = CNNBlock(repeat=2, stride=2,
subblocks=[(3, 128), (3, 128)],
index=2, freeze_block=freeze2)(x)
x = CNNBlock(repeat=2, stride=2,
subblocks=[(3, 256), (3, 256)],
index=3, freeze_block=freeze3)(x)
elif self.nlayers == 34:
x = CNNBlock(repeat=3, stride=first_stride,
subblocks=[(3, 64), (3, 64)],
index=1, freeze_block=freeze1)(x)
x = CNNBlock(repeat=4, stride=2,
subblocks=[(3, 128), (3, 128)],
index=2, freeze_block=freeze2)(x)
x = CNNBlock(repeat=6, stride=2,
subblocks=[(3, 256), (3, 256)],
index=3, freeze_block=freeze3)(x)
elif self.nlayers == 50:
x = CNNBlock(repeat=3, stride=first_stride,
subblocks=[(1, 64), (3, 64), (1, 256)],
index=1, freeze_block=freeze1)(x)
x = CNNBlock(repeat=4, stride=2,
subblocks=[(1, 128), (3, 128), (1, 512)],
index=2, freeze_block=freeze2)(x)
x = CNNBlock(repeat=6, stride=2,
subblocks=[(1, 256), (3, 256), (1, 1024)],
index=3, freeze_block=freeze3)(x)
elif self.nlayers == 101:
x = CNNBlock(repeat=3, stride=first_stride,
subblocks=[(1, 64), (3, 64), (1, 256)],
index=1, freeze_block=freeze1)(x)
x = CNNBlock(repeat=4, stride=2,
subblocks=[(1, 128), (3, 128), (1, 512)],
index=2, freeze_block=freeze2)(x)
x = CNNBlock(repeat=23, stride=2,
subblocks=[(1, 256), (3, 256), (1, 1024)],
index=3, freeze_block=freeze3)(x)
elif self.nlayers == 152:
x = CNNBlock(repeat=3, stride=first_stride,
subblocks=[(1, 64), (3, 64), (1, 256)],
index=1, freeze_block=freeze1)(x)
x = CNNBlock(repeat=8, stride=2,
subblocks=[(1, 128), (3, 128), (1, 512)],
index=2, freeze_block=freeze2)(x)
x = CNNBlock(repeat=36, stride=2,
subblocks=[(1, 256), (3, 256), (1, 1024)],
index=3, freeze_block=freeze3)(x)
else:
raise NotImplementedError('''A resnet with nlayers=%d is
not implemented.''' % self.nlayers)
return x
def rcnn_body(self, x):
'''RCNN body.'''
data_format = 'channels_first'
_stride = 2 if self.roi_pool_2x else 1
with arg_scope([CNNBlock],
use_batch_norm=True,
all_projections=self.all_projections,
use_shortcuts=True,
data_format=data_format,
kernel_regularizer=self.kernel_reg,
bias_regularizer=None,
activation_type='relu',
freeze_bn=self.freeze_bn,
activation_kwargs={},
use_bias=not self.conv_bn_share_bias,
use_td=True):
if self.nlayers == 10:
x = CNNBlock(repeat=1, stride=_stride,
subblocks=[(3, 512), (3, 512)], index=4)(x)
elif self.nlayers == 18:
x = CNNBlock(repeat=2, stride=_stride,
subblocks=[(3, 512), (3, 512)], index=4)(x)
elif self.nlayers == 34:
x = CNNBlock(repeat=3, stride=_stride,
subblocks=[(3, 512), (3, 512)], index=4)(x)
elif self.nlayers == 50:
x = CNNBlock(repeat=3, stride=_stride,
subblocks=[(1, 512), (3, 512), (1, 2048)], index=4)(x)
elif self.nlayers == 101:
x = CNNBlock(repeat=3, stride=_stride,
subblocks=[(1, 512), (3, 512), (1, 2048)], index=4)(x)
elif self.nlayers == 152:
x = CNNBlock(repeat=3, stride=_stride,
subblocks=[(1, 512), (3, 512), (1, 2048)], index=4)(x)
else:
raise NotImplementedError('''A resnet with nlayers=%d is
not implemented.''' % self.nlayers)
x = TimeDistributed(AveragePooling2D((self.roi_pool_size, self.roi_pool_size),
name='avg_pool'))(x)
# During export, in order to map this node to UFF Flatten Op, we have to
# make sure this layer name has 'flatten' in it. Otherwise, it cannot be
# converted to UFF Flatten Op during pb to UFF conversion.
x = TimeDistributed(Flatten(name='classifier_flatten'),
name='time_distributed_flatten')(x)
return x
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/models/resnets.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilitity functions for FasterRCNN models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import keras
from keras.layers import Input
from keras.optimizers import Adam, RMSprop, SGD
from keras.utils.generic_utils import CustomObjectScope
import tensorflow as tf
from nvidia_tao_tf1.cv.common.mlops.clearml import get_clearml_task
from nvidia_tao_tf1.cv.common.mlops.wandb import check_wandb_logged_in, initialize_wandb
from nvidia_tao_tf1.cv.common.utils import CUSTOM_OBJS
from nvidia_tao_tf1.cv.common.visualizer.tensorboard_visualizer import TensorBoardVisualizer
from nvidia_tao_tf1.cv.faster_rcnn.data_loader.inputs_loader import InputsLoader, RPNTargetGenerator
from nvidia_tao_tf1.cv.faster_rcnn.layers.custom_layers import (
CropAndResize, NmsInputs, OutputParser, Proposal, ProposalTarget
)
from nvidia_tao_tf1.cv.faster_rcnn.models.darknets import DarkNet
from nvidia_tao_tf1.cv.faster_rcnn.models.efficientnet import EfficientNet
from nvidia_tao_tf1.cv.faster_rcnn.models.googlenet import GoogleNet
from nvidia_tao_tf1.cv.faster_rcnn.models.iva_vgg import IVAVGG
from nvidia_tao_tf1.cv.faster_rcnn.models.mobilenet_v1 import MobileNetV1
from nvidia_tao_tf1.cv.faster_rcnn.models.mobilenet_v2 import MobileNetV2
from nvidia_tao_tf1.cv.faster_rcnn.models.resnet101 import ResNet101
from nvidia_tao_tf1.cv.faster_rcnn.models.resnets import ResNet
from nvidia_tao_tf1.cv.faster_rcnn.models.vgg16 import VGG16
def get_optimizer(spec):
"""get the optimizer according to the spec."""
if spec.training_config.optimizer.WhichOneof("optim") == 'adam':
return Adam(lr=spec.training_config.optimizer.adam.lr,
beta_1=spec.training_config.optimizer.adam.beta_1,
beta_2=spec.training_config.optimizer.adam.beta_2,
epsilon=None,
decay=spec.training_config.optimizer.adam.decay,
amsgrad=spec.training_config.optimizer.adam.amsgrad)
if spec.training_config.optimizer.WhichOneof("optim") == 'sgd':
return SGD(lr=spec.training_config.optimizer.sgd.lr,
momentum=spec.training_config.optimizer.sgd.momentum,
decay=spec.training_config.optimizer.sgd.decay,
nesterov=spec.training_config.optimizer.sgd.nesterov)
if spec.training_config.optimizer.WhichOneof("optim") == 'rmsprop':
return RMSprop(lr=spec.training_config.optimizer.rmsprop.lr)
raise ValueError('Invalid Optimizer config in spec file.')
def select_model_type(model_arch):
'''select model type according to the config.'''
if 'resnet:' in model_arch:
arch = ResNet
elif "resnet101" == model_arch:
arch = ResNet101
elif 'vgg:' in model_arch:
arch = IVAVGG
elif 'mobilenet_v1' == model_arch:
arch = MobileNetV1
elif 'mobilenet_v2' == model_arch:
arch = MobileNetV2
elif 'googlenet' == model_arch:
arch = GoogleNet
elif 'vgg16' == model_arch:
arch = VGG16
elif 'darknet' in model_arch:
arch = DarkNet
elif "efficientnet:" in model_arch:
arch = EfficientNet
else:
raise ValueError('Unsupported model architecture: {}'.format(model_arch))
return arch
def build_inference_model(
model,
config_override,
create_session=False,
max_box_num=100,
regr_std_scaling=(10.0, 10.0, 5.0, 5.0),
iou_thres=0.5,
score_thres=0.0001,
attach_keras_parser=True,
eval_rois=300,
force_batch_size=-1
):
'''Build inference/test model from training model.'''
def compose_call(prev_call_method):
def call(self, inputs, training=False):
return prev_call_method(self, inputs, training)
return call
def dropout_patch_call(self, inputs, training=False):
# Just return the input tensor. Keras will map this to ``keras.backend.identity``,
# which the TensorRT 3.0 UFF parser supports.
return inputs
# Patch BatchNormalization and Dropout call methods so they don't create
# the training part of the graph.
prev_batchnorm_call = keras.layers.normalization.BatchNormalization.call
prev_td_call = keras.layers.wrappers.TimeDistributed.call
prev_dropout_call = keras.layers.Dropout.call
keras.layers.normalization.BatchNormalization.call = compose_call(
prev_batchnorm_call
)
keras.layers.wrappers.TimeDistributed.call = compose_call(
prev_td_call
)
keras.layers.Dropout.call = dropout_patch_call
_explored_layers = dict()
for l in model.layers:
_explored_layers[l.name] = [False, None]
input_layer = [l for l in model.layers if (type(l) == keras.layers.InputLayer)]
layers_to_explore = input_layer
model_outputs = {}
# Loop until we reach the last layer.
while layers_to_explore:
layer = layers_to_explore.pop(0)
# Skip layers that may be revisited in the graph to prevent duplicates.
if not _explored_layers[layer.name][0]:
# Check if all inbound layers explored for given layer.
if not all([
_explored_layers[l.name][0]
for n in layer._inbound_nodes
for l in n.inbound_layers
]):
continue
outputs = None
# Visit input layer.
if type(layer) == keras.layers.InputLayer and layer.name == 'input_image':
# Re-use the existing InputLayer.
outputs = layer.output
new_layer = layer
elif type(layer) == keras.layers.InputLayer:
# skip the input_class_ids and input_gt_boxes
# mark them as visited but do nothing essential
_explored_layers[layer.name][0] = True
_explored_layers[layer.name][1] = None
layers_to_explore.extend([node.outbound_layer for node in layer._outbound_nodes])
continue
# special handling for ProposalTarget layer.
elif type(layer) == ProposalTarget:
# get ROIs data.
for node in layer._inbound_nodes:
prev_outputs = []
# only use the first Input: input_rois
for idx, l in enumerate(node.inbound_layers[:1]):
keras_layer = _explored_layers[l.name][1]
prev_outputs.append(keras_layer.get_output_at(node.node_indices[idx]))
assert prev_outputs, "Expected non-input layer to have inputs."
# remember it
if len(prev_outputs) == 1:
prev_outputs = prev_outputs[0]
proposal_outputs = prev_outputs
_explored_layers[layer.name][0] = True
_explored_layers[layer.name][1] = None
layers_to_explore.extend([node.outbound_layer for node in layer._outbound_nodes])
continue
# special handling of CropAndResize to skip the ProposalTarget layer.
elif type(layer) == CropAndResize:
# Create new layer.
layer_config = layer.get_config()
new_layer = type(layer).from_config(layer_config)
outputs = []
for node in layer._inbound_nodes:
prev_outputs = []
for idx, l in enumerate(node.inbound_layers):
# skip ProposalTarget(idx==1) because it doesn't exist
# in validation model. Use None as a placeholder for it
# will update the None later
if idx == 1:
prev_outputs.append(None)
continue
keras_layer = _explored_layers[l.name][1]
prev_outputs.append(keras_layer.get_output_at(node.node_indices[idx]))
assert prev_outputs, "Expected non-input layer to have inputs."
# replace None with the proposal_outputs
prev_outputs[1] = proposal_outputs
if len(prev_outputs) == 1:
prev_outputs = prev_outputs[0]
outputs.append(new_layer(prev_outputs))
if len(outputs) == 1:
outputs = outputs[0]
elif ("pre_pool_reshape" in layer.name and type(layer) == keras.layers.Reshape):
H, W = layer._inbound_nodes[0].inbound_layers[0].output_shape[3:]
new_layer = keras.layers.Reshape((-1, H, W), name=layer.name)
outputs = []
for node in layer._inbound_nodes:
prev_outputs = []
for idx, l in enumerate(node.inbound_layers):
keras_layer = _explored_layers[l.name][1]
prev_outputs.append(keras_layer.get_output_at(node.node_indices[idx]))
assert prev_outputs, "Expected non-input layer to have inputs."
if len(prev_outputs) == 1:
prev_outputs = prev_outputs[0]
outputs.append(new_layer(prev_outputs))
if len(outputs) == 1:
outputs = outputs[0]
elif ("post_pool_reshape" in layer.name and type(layer) == keras.layers.Reshape):
new_layer = keras.layers.Reshape((eval_rois, -1, 1, 1), name=layer.name)
outputs = []
for node in layer._inbound_nodes:
prev_outputs = []
for idx, l in enumerate(node.inbound_layers):
keras_layer = _explored_layers[l.name][1]
prev_outputs.append(keras_layer.get_output_at(node.node_indices[idx]))
assert prev_outputs, "Expected non-input layer to have inputs."
if len(prev_outputs) == 1:
prev_outputs = prev_outputs[0]
outputs.append(new_layer(prev_outputs))
if len(outputs) == 1:
outputs = outputs[0]
else:
# Create new layer.
layer_config = layer.get_config()
# override config for Proposal layer for test graph
if type(layer) == Proposal:
layer_config.update(config_override)
new_layer = type(layer).from_config(layer_config)
# Add to model.
outputs = []
for node in layer._inbound_nodes:
prev_outputs = []
for idx, l in enumerate(node.inbound_layers):
keras_layer = _explored_layers[l.name][1]
prev_outputs.append(keras_layer.get_output_at(node.node_indices[idx]))
assert prev_outputs, "Expected non-input layer to have inputs."
if len(prev_outputs) == 1:
prev_outputs = prev_outputs[0]
outputs.append(new_layer(prev_outputs))
if len(outputs) == 1:
outputs = outputs[0]
weights = layer.get_weights()
if weights is not None:
new_layer.set_weights(weights)
outbound_nodes = layer._outbound_nodes
# RPN outputs will be excluded since it has outbound nodes.
if not outbound_nodes:
model_outputs[layer.output.name] = outputs
layers_to_explore.extend([node.outbound_layer for node in outbound_nodes])
# Mark current layer as visited and assign output nodes to the layer.
_explored_layers[layer.name][0] = True
_explored_layers[layer.name][1] = new_layer
else:
continue
# Create new keras model object from pruned specifications.
# only use input_image as Model Input.
output_tensors = [model_outputs[l.name] for l in model.outputs if l.name in model_outputs]
output_tensors = [proposal_outputs] + output_tensors
new_model = keras.models.Model(inputs=model.inputs[:1], outputs=output_tensors, name=model.name)
# attach OutputParser layer
if attach_keras_parser:
parser_outputs = OutputParser(max_box_num, list(regr_std_scaling), iou_thres, score_thres)(
new_model.outputs + new_model.inputs
)
new_model = keras.models.Model(
inputs=new_model.inputs,
outputs=parser_outputs,
name=new_model.name
)
else:
# prepare NMS input tensors for TensorRT NMSPlugin inference
nms_inputs = NmsInputs(regr_std_scaling)(new_model.outputs)
new_model = keras.models.Model(
inputs=new_model.inputs,
outputs=nms_inputs,
name=new_model.name
)
# save model to file, reset the tf graph and load it to make sure the tf op names
# not appended with _n
os_handle, temp_file_name = tempfile.mkstemp()
os.close(os_handle)
input_shape = list(new_model.get_layer("input_image").output_shape)
input_shape = tuple([force_batch_size] + input_shape[1:])
with CustomObjectScope(CUSTOM_OBJS):
new_model.save(temp_file_name)
# clear old tf graph and session
keras.backend.clear_session()
if create_session:
# create a new tf session and use it as Keras session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
keras.backend.set_session(tf.Session(config=config))
keras.backend.set_learning_phase(0)
def _get_input_layer(*args, **argskw):
return keras.layers.InputLayer(batch_input_shape=input_shape, name="input_image")
# Force a static batch size for the keras model in case we want to export
# to an onnx model with static batch size
if force_batch_size > 0:
with CustomObjectScope({**CUSTOM_OBJS, "InputLayer": _get_input_layer}):
new_model = keras.models.load_model(temp_file_name, compile=False)
else:
with CustomObjectScope({**CUSTOM_OBJS}):
new_model = keras.models.load_model(temp_file_name, compile=False)
os.remove(temp_file_name)
# Unpatch Keras before return.
keras.layers.normalization.BatchNormalization.call = prev_batchnorm_call
keras.layers.wrappers.TimeDistributed.call = prev_td_call
keras.layers.Dropout.call = prev_dropout_call
return new_model
def build_or_resume_model(spec, hvd, logger, results_dir):
'''Build a new model or resume a checkpoint model.'''
# Define visualizer
visualizer = TensorBoardVisualizer()
visualizer.build_from_config(
spec.training_config.visualizer
)
# Disabling visualizer from all other processes
# other than rank 0 process.
if not hvd.rank() == 0:
visualizer.enabled = False
# Setting up clearml integration
task = None
if hvd.rank() == 0:
if spec.training_config.visualizer.HasField("clearml_config"):
clearml_config = spec.training_config.visualizer.clearml_config
task = get_clearml_task(clearml_config, "faster_rcnn")
if spec.training_config.visualizer.HasField("wandb_config"):
wandb_config = spec.training_config.visualizer.wandb_config
wandb_logged_in = check_wandb_logged_in()
if wandb_logged_in:
wandb_name = f"{wandb_config.name}" if wandb_config.name else \
"faster_rcnn_train"
initialize_wandb(
project=wandb_config.project if wandb_config.project else None,
entity=wandb_config.entity if wandb_config.entity else None,
notes=wandb_config.notes if wandb_config.notes else None,
tags=wandb_config.tags if wandb_config.tags else None,
sync_tensorboard=True,
save_code=False,
results_dir=results_dir,
wandb_logged_in=wandb_logged_in,
name=wandb_name
)
# build input tensors
data_loader = InputsLoader(
spec.training_dataset,
spec.data_augmentation,
spec.batch_size_per_gpu,
spec.image_c,
spec.image_mean_values,
spec.image_scaling_factor,
bool(spec.image_channel_order == 'bgr'),
max_objs_per_img=spec.max_objs_per_img,
training=True,
enable_augmentation=spec.enable_augmentation,
visualizer=visualizer,
rank=hvd.rank()
)
img_input = Input(shape=spec.input_dims, name='input_image', tensor=data_loader.images)
gt_cls_input = Input(shape=(None,), name='input_gt_cls', tensor=data_loader.gt_classes)
gt_bbox_input = Input(shape=(None, 4), name='input_gt_bbox', tensor=data_loader.gt_boxes)
# build the model
model_type = select_model_type(spec._backbone)
model = model_type(spec.nlayers, spec.batch_size_per_gpu,
spec.rpn_stride, spec.reg_type,
spec.weight_decay, spec.freeze_bn, spec.freeze_blocks,
spec.dropout_rate, spec.drop_connect_rate,
spec.conv_bn_share_bias, spec.all_projections,
spec.use_pooling, spec.anchor_sizes, spec.anchor_ratios,
spec.roi_pool_size, spec.roi_pool_2x, spec.num_classes,
spec.std_scaling, spec.rpn_pre_nms_top_N, spec.rpn_post_nms_top_N,
spec.rpn_nms_iou_thres, spec.gt_as_roi,
spec.rcnn_min_overlap, spec.rcnn_max_overlap, spec.rcnn_train_bs,
spec.rcnn_regr_std, spec.rpn_train_bs, spec.lambda_rpn_class,
spec.lambda_rpn_regr, spec.lambda_cls_class, spec.lambda_cls_regr,
"frcnn_"+spec._backbone.replace(":", "_"), results_dir,
spec.enc_key, spec.lr_scheduler,
spec.enable_qat,
activation_type=spec.activation_type,
early_stopping=spec.early_stopping)
if spec.resume_from_model:
# resume training from an existing model
initial_epoch = model.resume_model(spec,
[img_input, gt_cls_input, gt_bbox_input],
hvd,
logger=logger)
else:
# build a new model, from scratch or from pruned model
initial_epoch = 0
if spec.pretrained_model:
model.build_model_from_pruned(spec.pretrained_model,
img_input, gt_cls_input,
gt_bbox_input, logger,
spec.regularization_config)
else:
model.build_keras_model(img_input, gt_cls_input, gt_bbox_input)
if (not spec.pretrained_model) and spec.pretrained_weights:
model.load_weights(spec.pretrained_weights, spec.enc_key, logger)
if spec.training_config.model_parallelism:
model.parallelize(
tuple(spec.training_config.model_parallelism),
)
# build target tensors
rpn_target_generator = RPNTargetGenerator(
# dynamic image width
tf.shape(data_loader.images)[3],
# dynamic image height
tf.shape(data_loader.images)[2],
# dynamic RPN width
tf.shape(model.keras_model.outputs[0])[3],
# dynamic RPN height
tf.shape(model.keras_model.outputs[0])[2],
spec.rpn_stride,
spec.anchor_sizes,
spec.anchor_ratios,
spec.batch_size_per_gpu,
spec.rpn_max_overlap,
spec.rpn_min_overlap,
spec.rpn_train_bs,
max_objs_per_image=spec.max_objs_per_img
)
# Visualize model weights histogram
if hvd.rank() == 0 and spec.training_config.visualizer.enabled:
visualizer.keras_model_weight_histogram(model.keras_model)
# assign to model for ease of access and testing
model.rpn_target_generator = rpn_target_generator
rpn_scores_tensor, rpn_deltas_tensor = data_loader.generate_rpn_targets(
rpn_target_generator.build_rpn_target_batch
)
model.set_target_tensors(rpn_scores_tensor, rpn_deltas_tensor)
# build others
model.set_optimizer(get_optimizer(spec), hvd)
_total_examples = (data_loader.num_samples + spec.batch_size_per_gpu - 1)
iters_per_epoch = _total_examples // spec.batch_size_per_gpu
iters_per_epoch = (iters_per_epoch + hvd.size() - 1) // hvd.size()
model.build_losses()
initial_step = initial_epoch * iters_per_epoch
model.build_lr_scheduler(spec.epochs*iters_per_epoch, hvd.size(), initial_step)
model.set_hvd_callbacks(hvd)
if hvd.rank() == 0:
if spec.checkpoint_interval > spec.epochs:
logger.warning(
f"Checkpoint interval: {spec.checkpoint_interval} larger than training epochs: "
f"{spec.epochs}, disabling checkpoint."
)
else:
model.build_checkpointer(spec.checkpoint_interval)
# build validation data loader
# firstly, check if the validation dataset is empty(None)
# To avoid horovod hang, we have to do validation for all the processes
# otherwise, only rank 0 will do validation and takes a few more minutes, it will
# results in horovod deadlock.
if spec.training_dataset.WhichOneof('dataset_split_type') in [
'validation_fold',
'validation_data_source',
]:
logger.info("Building validation dataset...")
val_data_loader = InputsLoader(
spec.training_dataset,
spec.data_augmentation,
spec.eval_batch_size,
spec.image_c,
spec.image_mean_values,
spec.image_scaling_factor,
bool(spec.image_channel_order == 'bgr'),
max_objs_per_img=spec.max_objs_per_img,
training=False,
session=keras.backend.get_session()
)
logger.info("Validation dataset built successfully!")
if val_data_loader.num_samples > 0:
model.build_validation_callback(val_data_loader, spec)
else:
logger.info('No validation dataset found, skip validation during training.')
if hvd.rank() == 0 and spec.training_config.visualizer.enabled:
logger.info('TensorBoard Visualization Enabled')
# Can only write to tb file by 1 process
model.build_tensorboard_callback()
if hvd.rank() == 0:
model.build_status_logging_callback(results_dir, spec.epochs, True)
model.build_early_stopping_callback()
model.compile()
return model, iters_per_epoch, initial_epoch, task
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/models/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""VGG16 models for FasterRCNN."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Conv2D, Dense, Dropout, \
Flatten, MaxPooling2D, \
TimeDistributed
from nvidia_tao_tf1.cv.faster_rcnn.models.model_builder import FrcnnModel
class VGG16(FrcnnModel):
'''VGG16 as backbones for FasterRCNN model.
This is VGG16 class that use FrcnnModel class as base class and do some customization
specific to VGG16 backbone. Methods here will override those functions in FrcnnModel class.
'''
def backbone(self, input_images):
'''backbone.'''
# Block 1
freeze1 = bool(1 in self.freeze_blocks)
x = Conv2D(64, (3, 3), activation='relu',
padding='same', name='block1_conv1',
kernel_regularizer=self.kernel_reg,
bias_regularizer=None,
trainable=not freeze1)(input_images)
x = Conv2D(64, (3, 3), activation='relu',
padding='same', name='block1_conv2',
kernel_regularizer=self.kernel_reg,
bias_regularizer=None,
trainable=not freeze1)(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
freeze2 = bool(2 in self.freeze_blocks)
x = Conv2D(128, (3, 3), activation='relu',
padding='same', name='block2_conv1',
kernel_regularizer=self.kernel_reg,
bias_regularizer=None,
trainable=not freeze2)(x)
x = Conv2D(128, (3, 3), activation='relu',
padding='same', name='block2_conv2',
kernel_regularizer=self.kernel_reg,
bias_regularizer=None,
trainable=not freeze2)(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Block 3
freeze3 = bool(3 in self.freeze_blocks)
x = Conv2D(256, (3, 3), activation='relu',
padding='same', name='block3_conv1',
kernel_regularizer=self.kernel_reg,
bias_regularizer=None,
trainable=not freeze3)(x)
x = Conv2D(256, (3, 3), activation='relu',
padding='same', name='block3_conv2',
kernel_regularizer=self.kernel_reg,
bias_regularizer=None,
trainable=not freeze3)(x)
x = Conv2D(256, (3, 3), activation='relu',
padding='same', name='block3_conv3',
kernel_regularizer=self.kernel_reg,
bias_regularizer=None,
trainable=not freeze3)(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
# Block 4
freeze4 = bool(4 in self.freeze_blocks)
x = Conv2D(512, (3, 3), activation='relu',
padding='same', name='block4_conv1',
kernel_regularizer=self.kernel_reg,
bias_regularizer=None,
trainable=not freeze4)(x)
x = Conv2D(512, (3, 3), activation='relu',
padding='same', name='block4_conv2',
kernel_regularizer=self.kernel_reg,
bias_regularizer=None,
trainable=not freeze4)(x)
x = Conv2D(512, (3, 3), activation='relu',
padding='same', name='block4_conv3',
kernel_regularizer=self.kernel_reg,
bias_regularizer=None,
trainable=not freeze4)(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
# Block 5
freeze5 = bool(5 in self.freeze_blocks)
x = Conv2D(512, (3, 3), activation='relu',
padding='same', name='block5_conv1',
kernel_regularizer=self.kernel_reg,
bias_regularizer=None,
trainable=not freeze5)(x)
x = Conv2D(512, (3, 3), activation='relu',
padding='same', name='block5_conv2',
kernel_regularizer=self.kernel_reg,
bias_regularizer=None,
trainable=not freeze5)(x)
x = Conv2D(512, (3, 3), activation='relu',
padding='same', name='block5_conv3',
kernel_regularizer=self.kernel_reg,
bias_regularizer=None,
trainable=not freeze5)(x)
return x
def rcnn_body(self, x):
'''RCNN body.'''
if self.roi_pool_2x:
x = TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2),
name='classifier_pool'))(x)
# During export, in order to map this node to UFF Flatten Op, we have to
# make sure this layer name has 'flatten' in it. Otherwise, it cannot be
# converted to UFF Flatten Op during pb to UFF conversion.
out = TimeDistributed(Flatten(name='classifier_flatten'),
name='time_distributed_flatten')(x)
out = TimeDistributed(Dense(4096, activation='relu', name='fc1',
kernel_regularizer=self.kernel_reg,
bias_regularizer=None))(out)
if self.dropout_rate > 0:
out = TimeDistributed(Dropout(self.dropout_rate))(out)
out = TimeDistributed(Dense(4096, activation='relu', name='fc2',
kernel_regularizer=self.kernel_reg,
bias_regularizer=None))(out)
if self.dropout_rate > 0:
out = TimeDistributed(Dropout(self.dropout_rate))(out)
return out
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/models/vgg16.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FasterRCNN model templates for VGG16/19."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Flatten
from keras.layers import MaxPooling2D
from keras.layers import TimeDistributed
from nvidia_tao_tf1.core.templates.utils import arg_scope, CNNBlock
from nvidia_tao_tf1.cv.faster_rcnn.models.model_builder import FrcnnModel
class IVAVGG(FrcnnModel):
'''IVA VGG as backbones for FasterRCNN model.
This is IVA VGG class that use FrcnnModel class as base class and do some customization
specific to IVA VGG backbone. Methods here will override those functions in FrcnnModel class.
'''
def __init__(self, nlayers, batch_size_per_gpu,
rpn_stride, regularizer_type,
weight_decay, freeze_bn, freeze_blocks,
dropout_rate, drop_connect_rate,
conv_bn_share_bias, all_projections,
use_pooling, anchor_sizes, anchor_ratios,
roi_pool_size, roi_pool_2x, num_classes,
std_scaling, rpn_pre_nms_top_N, rpn_post_nms_top_N,
rpn_nms_iou_thres, gt_as_roi, rcnn_min_overlap,
rcnn_max_overlap, rcnn_train_bs, rcnn_bbox_std,
rpn_train_bs, lambda_rpn_class, lambda_rpn_regr,
lambda_rcnn_class, lambda_rcnn_regr,
backbone, results_dir, enc_key, lr_config, enable_qat=False,
**kwargs):
'''Initialize the IVA VGG backbones.'''
assert nlayers in [16, 19], '''Number of layers for VGG can
only be 16, 19, got {}'''.format(nlayers)
super(IVAVGG, self).__init__(nlayers, batch_size_per_gpu,
rpn_stride, regularizer_type,
weight_decay, freeze_bn, freeze_blocks,
dropout_rate, drop_connect_rate,
conv_bn_share_bias, all_projections,
use_pooling, anchor_sizes, anchor_ratios,
roi_pool_size, roi_pool_2x, num_classes,
std_scaling, rpn_pre_nms_top_N, rpn_post_nms_top_N,
rpn_nms_iou_thres, gt_as_roi, rcnn_min_overlap,
rcnn_max_overlap, rcnn_train_bs, rcnn_bbox_std,
rpn_train_bs, lambda_rpn_class, lambda_rpn_regr,
lambda_rcnn_class, lambda_rcnn_regr,
backbone, results_dir, enc_key, lr_config, enable_qat,
**kwargs)
def backbone(self, input_images):
'''backbones of IVA VGG FasterRCNN.'''
data_format = 'channels_first'
first_stride = 1
stride = 2
if self.use_pooling:
# Disable strided convolutions with pooling enabled.
stride = 1
freeze1 = 1 in self.freeze_blocks
freeze2 = 2 in self.freeze_blocks
freeze3 = 3 in self.freeze_blocks
freeze4 = 4 in self.freeze_blocks
freeze5 = 5 in self.freeze_blocks
# Define a block functor which can create blocks.
with arg_scope([CNNBlock],
use_batch_norm=True,
freeze_bn=self.freeze_bn,
use_bias=not self.conv_bn_share_bias,
use_shortcuts=False,
data_format='channels_first',
kernel_regularizer=self.kernel_reg,
bias_regularizer=None,
activation_type='relu'):
# Implementing VGG 16 architecture.
if self.nlayers == 16:
# Block - 1.
x = CNNBlock(repeat=2, stride=first_stride, subblocks=[(3, 64)],
index=1, freeze_block=freeze1)(input_images)
if self.use_pooling:
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same',
data_format=data_format, name='block1_pool')(x)
# Block - 2.
x = CNNBlock(repeat=2, stride=stride, subblocks=[(3, 128)],
index=2, freeze_block=freeze2)(x)
if self.use_pooling:
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same',
data_format=data_format, name='block2_pool')(x)
# Block - 3.
x = CNNBlock(repeat=3, stride=stride, subblocks=[(3, 256)], index=3,
freeze_block=freeze3)(x)
if self.use_pooling:
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same',
data_format=data_format, name='block3_pool')(x)
# Block - 4.
x = CNNBlock(repeat=3, stride=stride, subblocks=[(3, 512)], index=4,
freeze_block=freeze4)(x)
if self.use_pooling:
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same',
data_format=data_format, name='block4_pool')(x)
# Block - 5.
x = CNNBlock(repeat=3, stride=stride, subblocks=[(3, 512)], index=5,
freeze_block=freeze5)(x)
# Implementing VGG 19 architecture.
elif self.nlayers == 19:
# Block - 1.
x = CNNBlock(repeat=2, stride=first_stride, subblocks=[(3, 64)], index=1,
freeze_block=freeze1)(input_images)
if self.use_pooling:
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same',
data_format=data_format, name='block1_pool')(x)
# Block - 2.
x = CNNBlock(repeat=2, stride=stride, subblocks=[(3, 128)], index=2,
freeze_block=freeze2)(x)
if self.use_pooling:
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same',
data_format=data_format, name='block2_pool')(x)
# Block - 3.
x = CNNBlock(repeat=4, stride=stride, subblocks=[(3, 256)], index=3,
freeze_block=freeze3)(x)
if self.use_pooling:
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same',
data_format=data_format, name='block3_pool')(x)
# Block - 4.
x = CNNBlock(repeat=4, stride=stride, subblocks=[(3, 512)], index=4,
freeze_block=freeze4)(x)
if self.use_pooling:
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same',
data_format=data_format, name='block4_pool')(x)
# Block - 5.
x = CNNBlock(repeat=4, stride=stride, subblocks=[(3, 512)], index=5,
freeze_block=freeze5)(x)
else:
raise NotImplementedError('''A VGG with nlayers=%d is not
implemented.''' % self.nlayers)
return x
def rcnn_body(self, x):
'''RCNN body for IVA VGG backbones.'''
if self.roi_pool_2x:
x = TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2), name='classifier_pool'))(x)
# During export, in order to map this node to UFF Flatten Op, we have to
# make sure this layer name has 'flatten' in it. Otherwise, it cannot be
# converted to UFF Flatten Op during pb to UFF conversion.
out = TimeDistributed(Flatten(name='classifier_flatten'),
name='time_distributed_flatten')(x)
out = TimeDistributed(Dense(4096, activation='relu', name='fc1',
kernel_regularizer=self.kernel_reg,
bias_regularizer=None))(out)
if self.dropout_rate > 0:
out = TimeDistributed(Dropout(self.dropout_rate))(out)
out = TimeDistributed(Dense(4096, activation='relu', name='fc2',
kernel_regularizer=self.kernel_reg,
bias_regularizer=None))(out)
if self.dropout_rate > 0:
out = TimeDistributed(Dropout(self.dropout_rate))(out)
return out
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/models/iva_vgg.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DarkNet 19/53 as backbone of Faster-RCNN."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras import layers
from keras.layers import AveragePooling2D, Flatten, TimeDistributed
from nvidia_tao_tf1.core.templates.utils import _leaky_conv, arg_scope
from nvidia_tao_tf1.cv.faster_rcnn.models.model_builder import FrcnnModel
class DarkNet(FrcnnModel):
'''DarkNet as backbone of FasterRCNN model.
This is DarkNet class that use FrcnnModel class as base class and do some customization
specific to DarkNet backbone. Methods here will override those functions in FrcnnModel class.
'''
def __init__(self, nlayers, batch_size_per_gpu,
rpn_stride, regularizer_type,
weight_decay, freeze_bn, freeze_blocks,
dropout_rate, drop_connect_rate,
conv_bn_share_bias, all_projections,
use_pooling, anchor_sizes, anchor_ratios,
roi_pool_size, roi_pool_2x, num_classes,
std_scaling, rpn_pre_nms_top_N, rpn_post_nms_top_N,
rpn_nms_iou_thres, gt_as_roi, rcnn_min_overlap,
rcnn_max_overlap, rcnn_train_bs, rcnn_bbox_std,
rpn_train_bs, lambda_rpn_class, lambda_rpn_regr,
lambda_rcnn_class, lambda_rcnn_regr,
backbone, results_dir, enc_key, lr_config, enable_qat=False,
**kwargs):
'''Initialize the DarkNet FasterRCNN model.
See the docstring in FrcnnModel constructor.
'''
assert nlayers in [19, 53], '''Number of layers for DarkNet can
only be 19, 53, got {}'''.format(nlayers)
super(DarkNet, self).__init__(nlayers, batch_size_per_gpu,
rpn_stride, regularizer_type,
weight_decay, freeze_bn, freeze_blocks,
dropout_rate, drop_connect_rate,
conv_bn_share_bias, all_projections,
use_pooling, anchor_sizes, anchor_ratios,
roi_pool_size, roi_pool_2x, num_classes,
std_scaling, rpn_pre_nms_top_N, rpn_post_nms_top_N,
rpn_nms_iou_thres, gt_as_roi, rcnn_min_overlap,
rcnn_max_overlap, rcnn_train_bs, rcnn_bbox_std,
rpn_train_bs, lambda_rpn_class, lambda_rpn_regr,
lambda_rcnn_class, lambda_rcnn_regr,
backbone, results_dir, enc_key, lr_config, enable_qat,
**kwargs)
def backbone(self, input_images):
'''DarkNet backbone implementation.'''
data_format = 'channels_first'
with arg_scope([_leaky_conv],
use_batch_norm=True,
data_format=data_format,
kernel_regularizer=self.kernel_reg,
bias_regularizer=None,
alpha=0.1,
padding='same',
freeze_bn=self.freeze_bn,
use_bias=not self.conv_bn_share_bias):
x = _leaky_conv(input_images, filters=32, kernel=3, strides=1,
name='conv1', trainable=not(0 in self.freeze_blocks))
if self.nlayers == 53:
x = _leaky_conv(x, filters=64, kernel=3, strides=2, name='conv2',
trainable=not(1 in self.freeze_blocks))
y = _leaky_conv(x, filters=32, kernel=1, strides=1, name='b1_conv1_1',
trainable=not(1 in self.freeze_blocks))
y = _leaky_conv(y, filters=64, kernel=3, strides=1, name='b1_conv1_2',
trainable=not(1 in self.freeze_blocks))
x = layers.Add(name='b1_add1')([x, y])
x = _leaky_conv(x, filters=128, kernel=3, strides=2, name='conv3',
trainable=not(2 in self.freeze_blocks))
for i in range(2):
y = _leaky_conv(x, filters=64, kernel=1, strides=1,
name='b2_conv{}_1'.format(i+1),
trainable=not(2 in self.freeze_blocks))
y = _leaky_conv(y, filters=128, kernel=3, strides=1,
name='b2_conv{}_2'.format(i+1),
trainable=not(2 in self.freeze_blocks))
x = layers.Add(name='b2_add{}'.format(i+1))([x, y])
x = _leaky_conv(x, filters=256, kernel=3, strides=2, name='conv4',
trainable=not(3 in self.freeze_blocks))
for i in range(8):
y = _leaky_conv(x, filters=128, kernel=1, strides=1,
name='b3_conv{}_1'.format(i+1),
trainable=not(3 in self.freeze_blocks))
y = _leaky_conv(y, filters=256, kernel=3, strides=1,
name='b3_conv{}_2'.format(i+1),
trainable=not(3 in self.freeze_blocks))
x = layers.Add(name='b3_add{}'.format(i+1))([x, y])
x = _leaky_conv(x, filters=512, kernel=3, strides=2, name='conv5',
trainable=not(4 in self.freeze_blocks))
for i in range(8):
y = _leaky_conv(x, filters=256, kernel=1, strides=1,
name='b4_conv{}_1'.format(i+1),
trainable=not(4 in self.freeze_blocks))
y = _leaky_conv(y, filters=512, kernel=3, strides=1,
name='b4_conv{}_2'.format(i+1),
trainable=not(4 in self.freeze_blocks))
x = layers.Add(name='b4_add{}'.format(i+1))([x, y])
elif self.nlayers == 19:
x = layers.MaxPooling2D(pool_size=2, strides=2, data_format=data_format,
padding='same', name='maxpool_1')(x)
x = _leaky_conv(x, filters=64, kernel=3, strides=1, name='b1_conv1',
trainable=not(1 in self.freeze_blocks))
x = layers.MaxPooling2D(pool_size=2, strides=2, data_format=data_format,
padding='same', name='maxpool_2')(x)
x = _leaky_conv(x, filters=128, kernel=3, strides=1,
name='b2_conv1', trainable=not(2 in self.freeze_blocks))
x = _leaky_conv(x, filters=64, kernel=1, strides=1,
name='b2_conv2', trainable=not(2 in self.freeze_blocks))
x = _leaky_conv(x, filters=128, kernel=3, strides=1,
name='b2_conv3', trainable=not(2 in self.freeze_blocks))
x = layers.MaxPooling2D(pool_size=2, strides=2, data_format=data_format,
padding='same', name='maxpool_3')(x)
x = _leaky_conv(x, filters=256, kernel=3, strides=1,
name='b3_conv1', trainable=not(3 in self.freeze_blocks))
x = _leaky_conv(x, filters=128, kernel=1, strides=1,
name='b3_conv2', trainable=not(3 in self.freeze_blocks))
x = _leaky_conv(x, filters=256, kernel=3, strides=1,
name='b3_conv3', trainable=not(3 in self.freeze_blocks))
x = layers.MaxPooling2D(pool_size=2, strides=2, data_format=data_format,
padding='same', name='maxpool_4')(x)
x = _leaky_conv(x, filters=512, kernel=3, strides=1,
name='b4_conv1', trainable=not(4 in self.freeze_blocks))
x = _leaky_conv(x, filters=256, kernel=1, strides=1,
name='b4_conv2', trainable=not(4 in self.freeze_blocks))
x = _leaky_conv(x, filters=512, kernel=3, strides=1,
name='b4_conv3', trainable=not(4 in self.freeze_blocks))
x = _leaky_conv(x, filters=256, kernel=1, strides=1,
name='b4_conv4', trainable=not(4 in self.freeze_blocks))
x = _leaky_conv(x, filters=512, kernel=3, strides=1,
name='b4_conv5', trainable=not(4 in self.freeze_blocks))
else:
raise NotImplementedError('''
A DarkNet with nlayers=%d is not implemented.''' % self.nlayers)
return x
def rcnn_body(self, x):
'''DarkNet RCNN body.'''
if self.roi_pool_2x:
_stride = 2
else:
_stride = 1
with arg_scope([_leaky_conv],
use_batch_norm=True,
data_format='channels_first',
kernel_regularizer=self.kernel_reg,
bias_regularizer=None,
alpha=0.1,
padding='same',
freeze_bn=self.freeze_bn,
use_bias=not self.conv_bn_share_bias,
use_td=True):
if self.nlayers == 53:
x = _leaky_conv(x, filters=1024, kernel=3, strides=_stride, name='conv6',
trainable=not(5 in self.freeze_blocks))
for i in range(4):
y = _leaky_conv(x, filters=512, kernel=1, strides=1,
name='b5_conv{}_1'.format(i+1),
trainable=not(5 in self.freeze_blocks))
y = _leaky_conv(y, filters=1024, kernel=3, strides=1,
name='b5_conv{}_2'.format(i+1),
trainable=not(5 in self.freeze_blocks))
x = layers.Add(name='b5_add{}'.format(i+1))([x, y])
elif self.nlayers == 19:
x = _leaky_conv(x, filters=1024, kernel=3, strides=_stride,
name='b5_conv1', trainable=not(5 in self.freeze_blocks))
x = _leaky_conv(x, filters=512, kernel=1, strides=1,
name='b5_conv2', trainable=not(5 in self.freeze_blocks))
x = _leaky_conv(x, filters=1024, kernel=3, strides=1,
name='b5_conv3', trainable=not(5 in self.freeze_blocks))
x = _leaky_conv(x, filters=512, kernel=1, strides=1,
name='b5_conv4', trainable=not(5 in self.freeze_blocks))
x = _leaky_conv(x, filters=1024, kernel=3, strides=1,
name='b5_conv5', trainable=not(5 in self.freeze_blocks))
else:
raise NotImplementedError('''
A DarkNet with nlayers=%d is not implemented.''' % self.nlayers)
x = TimeDistributed(AveragePooling2D(pool_size=(self.roi_pool_size, self.roi_pool_size),
strides=(1, 1), padding='valid',
data_format='channels_first', name='avg_pool'))(x)
# During export, in order to map this node to UFF Flatten Op, we have to
# make sure this layer name has 'flatten' in it. Otherwise, it cannot be
# converted to UFF Flatten Op during pb to UFF conversion.
x = TimeDistributed(Flatten(name='classifier_flatten'), name='time_distributed_flatten')(x)
return x
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/models/darknets.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MobileNet V1 model for FasterRCNN."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import AveragePooling2D, BatchNormalization, \
Conv2D, DepthwiseConv2D, \
Flatten, ReLU, TimeDistributed, \
ZeroPadding2D
from nvidia_tao_tf1.core.templates.utils import add_arg_scope, arg_scope
from nvidia_tao_tf1.cv.faster_rcnn.models.model_builder import FrcnnModel
class MobileNetV1(FrcnnModel):
'''MobileNet V1 as backbones for FasterRCNN model.
This is MobileNet V1 class that use FrcnnModel class as base class and do some customization
specific to MobileNet V1 backbone. Methods here will override those functions in FrcnnModel
class.
'''
def backbone(self, input_images):
'''backbone for MobileNet V1 FasterRCNN.'''
with arg_scope([_conv_block, _depthwise_conv_block],
use_batch_norm=True,
data_format='channels_first',
kernel_regularizer=self.kernel_reg,
bias_regularizer=None,
activation_type='relu',
freeze_bn=self.freeze_bn,
use_bias=not self.conv_bn_share_bias):
x = _conv_block(input_images, 32, 1, strides=(2, 2),
trainable=not (0 in self.freeze_blocks))
x = _depthwise_conv_block(x, 64, 1,
block_id=1,
trainable=not (1 in self.freeze_blocks))
x = _depthwise_conv_block(x, 128, 1,
strides=(2, 2), block_id=2,
trainable=not (2 in self.freeze_blocks))
x = _depthwise_conv_block(x, 128, 1, block_id=3,
trainable=not (3 in self.freeze_blocks))
x = _depthwise_conv_block(x, 256, 1,
strides=(2, 2), block_id=4,
trainable=not (4 in self.freeze_blocks))
x = _depthwise_conv_block(x, 256, 1, block_id=5,
trainable=not (5 in self.freeze_blocks))
x = _depthwise_conv_block(x, 512, 1,
strides=(2, 2), block_id=6,
trainable=not (6 in self.freeze_blocks))
x = _depthwise_conv_block(x, 512, 1, block_id=7,
trainable=not (7 in self.freeze_blocks))
x = _depthwise_conv_block(x, 512, 1, block_id=8,
trainable=not (8 in self.freeze_blocks))
x = _depthwise_conv_block(x, 512, 1, block_id=9,
trainable=not (9 in self.freeze_blocks))
x = _depthwise_conv_block(x, 512, 1, block_id=10,
trainable=not (10 in self.freeze_blocks))
x = _depthwise_conv_block(x, 512, 1, block_id=11,
trainable=not (11 in self.freeze_blocks))
return x
def rcnn_body(self, x):
'''RCNN body for MobileNet V1.'''
_stride = 2 if self.roi_pool_2x else 1
with arg_scope([_depthwise_conv_block],
use_batch_norm=True,
data_format='channels_first',
kernel_regularizer=self.kernel_reg,
bias_regularizer=None,
activation_type='relu',
freeze_bn=self.freeze_bn,
use_bias=not self.conv_bn_share_bias,
use_td=True):
x = _depthwise_conv_block(x, 1024, 1,
strides=(_stride, _stride), block_id=12)
x = _depthwise_conv_block(x, 1024, 1, block_id=13)
x = TimeDistributed(AveragePooling2D(pool_size=(self.roi_pool_size, self.roi_pool_size),
data_format='channels_first', padding='valid'))(x)
# During export, in order to map this node to UFF Flatten Op, we have to
# make sure this layer name has 'flatten' in it. Otherwise, it cannot be
# converted to UFF Flatten Op during pb to UFF conversion.
x = TimeDistributed(Flatten(name='classifier_flatten'), name='time_distributed_flatten')(x)
return x
@add_arg_scope
def _conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1),
kernel_regularizer=None, bias_regularizer=None,
use_batch_norm=True, activation_type='relu',
data_format='channels_first', freeze_bn=False, use_bias=True,
trainable=True):
"""Adds an initial convolution layer (with batch normalization and relu).
Args:
inputs: Input tensor of shape `(rows, cols, 3)`
(with `channels_last` data format) or
(3, rows, cols) (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(224, 224, 3)` would be one valid value.
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
alpha: controls the width of the network.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
kernel: An integer or tuple/list of 2 integers, specifying the
width and height of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution
along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
Input shape:
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(samples, filters, new_rows, new_cols)`
if data_format='channels_first'
or 4D tensor with shape:
`(samples, new_rows, new_cols, filters)`
if data_format='channels_last'.
`rows` and `cols` values might have changed due to stride.
Returns:
Output tensor of block.
"""
channel_axis = 1 if data_format == 'channels_first' else -1
filters = int(filters * alpha)
if kernel[0] // 2 > 0:
x = ZeroPadding2D(padding=(kernel[0]//2, kernel[0]//2),
name='conv1_pad')(inputs)
x = Conv2D(filters, kernel,
padding='valid',
use_bias=use_bias,
strides=strides,
name='conv1',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=trainable)(x)
if use_batch_norm:
if freeze_bn:
x = BatchNormalization(axis=channel_axis,
name='conv1_bn')(x, training=False)
else:
x = BatchNormalization(axis=channel_axis,
name='conv1_bn')(x)
if activation_type == 'relu6':
x = ReLU(6.)(x)
else:
x = ReLU()(x)
return x
@add_arg_scope
def _depthwise_conv_block(inputs, pointwise_conv_filters, alpha,
depth_multiplier=1, strides=(1, 1), block_id=1,
kernel_regularizer=None, bias_regularizer=None,
use_batch_norm=True, activation_type='relu',
data_format='channels_first', freeze_bn=False,
use_bias=True,
trainable=True,
use_td=False):
"""Adds a depthwise convolution block.
A depthwise convolution block consists of a depthwise conv,
batch normalization, relu, pointwise convolution,
batch normalization and relu activation.
Args:
inputs: Input tensor of shape `(rows, cols, channels)`
(with `channels_last` data format) or
(channels, rows, cols) (with `channels_first` data format).
pointwise_conv_filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the pointwise convolution).
alpha: controls the width of the network.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
depth_multiplier: The number of depthwise convolution output channels
for each input channel.
The total number of depthwise convolution output
channels will be equal to `filters_in * depth_multiplier`.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution
along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
block_id: Integer, a unique identification designating
the block number.
Input shape:
4D tensor with shape:
`(batch, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(batch, filters, new_rows, new_cols)`
if data_format='channels_first'
or 4D tensor with shape:
`(batch, new_rows, new_cols, filters)`
if data_format='channels_last'.
`rows` and `cols` values might have changed due to stride.
Returns:
Output tensor of block.
"""
channel_axis = 1 if data_format == 'channels_first' else -1
pointwise_conv_filters = int(pointwise_conv_filters * alpha)
layer = ZeroPadding2D((1, 1), name='conv_pad_%d' % block_id)
if use_td:
layer = TimeDistributed(layer)
x = layer(inputs)
layer = DepthwiseConv2D((3, 3),
padding='valid',
depth_multiplier=depth_multiplier,
strides=strides,
use_bias=use_bias,
name='conv_dw_%d' % block_id,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=trainable)
if use_td:
layer = TimeDistributed(layer)
x = layer(x)
if use_batch_norm:
layer = BatchNormalization(axis=channel_axis, name='conv_dw_%d_bn' % block_id)
if use_td:
layer = TimeDistributed(layer)
if freeze_bn:
x = layer(x, training=False)
else:
x = layer(x)
if activation_type == 'relu6':
x = ReLU(6.)(x)
else:
x = ReLU()(x)
layer = Conv2D(pointwise_conv_filters,
(1, 1),
padding='same',
use_bias=use_bias,
strides=(1, 1),
name='conv_pw_%d' % block_id,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=trainable)
if use_td:
layer = TimeDistributed(layer)
x = layer(x)
if use_batch_norm:
layer = BatchNormalization(axis=channel_axis, name='conv_pw_%d_bn' % block_id)
if use_td:
layer = TimeDistributed(layer)
if freeze_bn:
x = layer(x, training=False)
else:
x = layer(x)
if activation_type == 'relu6':
x = ReLU(6.)(x)
else:
x = ReLU()(x)
return x
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/models/mobilenet_v1.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ResNet101 model for FasterRCNN."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras import backend as K
from keras.layers import Activation, Add, AveragePooling2D, BatchNormalization, \
Conv2D, Flatten, \
MaxPooling2D, TimeDistributed, ZeroPadding2D
from nvidia_tao_tf1.cv.faster_rcnn.models.model_builder import FrcnnModel
class ResNet101(FrcnnModel):
'''ResNet101 as backbones for FasterRCNN model.
This is ResNet101 class that uses FrcnnModel class as base class and do some customization
specific to ResNet101 backbone. Methods here will override those functions in FrcnnModel class.
'''
def backbone(self, input_images):
'''backbone of the ResNet FasterRCNN model.'''
bn_axis = 1
freeze0 = bool(0 in self.freeze_blocks)
freeze1 = bool(1 in self.freeze_blocks)
freeze2 = bool(2 in self.freeze_blocks)
freeze3 = bool(3 in self.freeze_blocks)
x = ZeroPadding2D(padding=((3, 3), (3, 3)), name='conv1_pad')(input_images)
x = Conv2D(
64, 7, strides=2,
use_bias=not self.conv_bn_share_bias,
kernel_regularizer=self.kernel_reg,
name='conv1_conv',
trainable=not freeze0
)(x)
if self.freeze_bn:
x = BatchNormalization(axis=bn_axis, epsilon=1.001e-5,
name='conv1_bn')(x, training=False)
else:
x = BatchNormalization(axis=bn_axis, epsilon=1.001e-5,
name='conv1_bn')(x)
x = Activation('relu', name='conv1_relu')(x)
x = ZeroPadding2D(padding=((1, 1), (1, 1)), name='pool1_pad')(x)
x = MaxPooling2D(3, strides=2, name='pool1_pool')(x)
x = self.stack1(x, 64, 3, stride1=1, name='conv2',
freeze_bn=self.freeze_bn, freeze=freeze1)
x = self.stack1(x, 128, 4, name='conv3',
freeze_bn=self.freeze_bn, freeze=freeze2)
x = self.stack1(x, 256, 23, name='conv4',
freeze_bn=self.freeze_bn, freeze=freeze3)
return x
def rcnn_body(self, x):
'''RCNN body.'''
_stride = 2 if self.roi_pool_2x else 1
x = self.stack1(x, 512, 3, name='conv5', stride1=_stride,
freeze_bn=self.freeze_bn, use_td=True)
x = TimeDistributed(AveragePooling2D((self.roi_pool_size, self.roi_pool_size),
name='avg_pool'))(x)
# During export, in order to map this node to UFF Flatten Op, we have to
# make sure this layer name has 'flatten' in it. Otherwise, it cannot be
# converted to UFF Flatten Op during pb to UFF conversion.
x = TimeDistributed(Flatten(name='classifier_flatten'),
name='time_distributed_flatten')(x)
return x
def block1(self, x, filters, kernel_size=3, stride=1,
conv_shortcut=True, name=None, freeze_bn=False,
freeze=False, use_td=False):
"""A residual block."""
bn_axis = 3 if K.image_data_format() == 'channels_last' else 1
if conv_shortcut is True:
layer = Conv2D(
4 * filters,
1,
strides=stride,
name=name + '_0_conv',
trainable=not freeze,
kernel_regularizer=self.kernel_reg
)
if use_td:
layer = TimeDistributed(layer)
shortcut = layer(x)
layer = BatchNormalization(
axis=bn_axis,
epsilon=1.001e-5,
name=name + '_0_bn',
)
if use_td:
layer = TimeDistributed(layer)
if freeze_bn:
shortcut = layer(shortcut, training=False)
else:
shortcut = layer(shortcut)
else:
shortcut = x
layer = Conv2D(
filters, 1, strides=stride,
name=name + '_1_conv',
trainable=not freeze,
kernel_regularizer=self.kernel_reg
)
if use_td:
layer = TimeDistributed(layer)
x = layer(x)
layer = BatchNormalization(
axis=bn_axis, epsilon=1.001e-5,
name=name + '_1_bn'
)
if use_td:
layer = TimeDistributed(layer)
if freeze_bn:
x = layer(x, training=False)
else:
x = layer(x)
x = Activation('relu', name=name + '_1_relu')(x)
layer = Conv2D(
filters, kernel_size, padding='SAME',
name=name + '_2_conv',
trainable=not freeze,
kernel_regularizer=self.kernel_reg
)
if use_td:
layer = TimeDistributed(layer)
x = layer(x)
layer = BatchNormalization(
axis=bn_axis, epsilon=1.001e-5,
name=name + '_2_bn'
)
if use_td:
layer = TimeDistributed(layer)
if freeze_bn:
x = layer(x, training=False)
else:
x = layer(x)
x = Activation('relu', name=name + '_2_relu')(x)
layer = Conv2D(
4 * filters, 1,
name=name + '_3_conv',
trainable=not freeze,
kernel_regularizer=self.kernel_reg
)
if use_td:
layer = TimeDistributed(layer)
x = layer(x)
layer = BatchNormalization(
axis=bn_axis, epsilon=1.001e-5,
name=name + '_3_bn'
)
if use_td:
layer = TimeDistributed(layer)
if freeze_bn:
x = layer(x, training=False)
else:
x = layer(x)
x = Add(name=name + '_add')([shortcut, x])
x = Activation('relu', name=name + '_out')(x)
return x
def stack1(self, x, filters, blocks, stride1=2, name=None,
freeze_bn=False, freeze=False, use_td=False):
"""A set of stacked residual blocks."""
x = self.block1(x, filters, stride=stride1, name=name + '_block1',
freeze=freeze, freeze_bn=freeze_bn, use_td=use_td)
for i in range(2, blocks + 1):
x = self.block1(x, filters, conv_shortcut=False, name=name + '_block' + str(i),
freeze=freeze, freeze_bn=freeze_bn, use_td=use_td)
return x
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/models/resnet101.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Unit test for FasterRCNN model pruning functionality.'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import keras
from keras.layers import Input
import pytest
from nvidia_tao_tf1.core.pruning.pruning import prune
from nvidia_tao_tf1.cv.faster_rcnn.models.darknets import DarkNet
from nvidia_tao_tf1.cv.faster_rcnn.models.googlenet import GoogleNet
from nvidia_tao_tf1.cv.faster_rcnn.models.iva_vgg import IVAVGG
from nvidia_tao_tf1.cv.faster_rcnn.models.mobilenet_v1 import MobileNetV1
from nvidia_tao_tf1.cv.faster_rcnn.models.mobilenet_v2 import MobileNetV2
from nvidia_tao_tf1.cv.faster_rcnn.models.resnets import ResNet
from nvidia_tao_tf1.cv.faster_rcnn.models.vgg16 import VGG16
from nvidia_tao_tf1.cv.faster_rcnn.spec_loader import spec_loader, spec_wrapper
excluded_layers = ['rpn_out_class',
'rpn_out_regress',
'dense_class_td',
'dense_regress_td']
backbone_configs_all = [
(ResNet, 10, False, False),
(ResNet, 10, True, True),
(ResNet, 10, False, True),
(ResNet, 18, True, False),
(ResNet, 18, False, False),
(ResNet, 18, True, True),
(ResNet, 18, False, True),
(ResNet, 34, True, False),
(ResNet, 34, False, False),
(ResNet, 34, True, True),
(ResNet, 34, False, True),
(ResNet, 50, True, False),
(ResNet, 50, False, False),
(ResNet, 50, True, True),
(ResNet, 50, False, True),
(ResNet, 101, True, False),
(ResNet, 101, False, False),
(ResNet, 101, True, True),
(ResNet, 101, False, True),
(VGG16, None, False, True),
(IVAVGG, 16, False, True),
(IVAVGG, 19, False, True),
(GoogleNet, None, False, True),
(MobileNetV1, None, False, False),
(MobileNetV2, None, False, False),
(MobileNetV2, None, True, False),
(DarkNet, 19, None, None),
(DarkNet, 53, None, None),
]
backbone_configs_subset = [
(ResNet, 18, True, False),
(ResNet, 50, True, False),
]
if int(os.getenv("TLT_TF_CI_TEST_LEVEL", "0")) > 0:
backbone_configs = backbone_configs_all
else:
backbone_configs = backbone_configs_subset
keras.backend.set_image_data_format('channels_first')
@pytest.fixture()
def _spec_file():
'''spec file.'''
parent_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
return os.path.join(parent_dir, 'experiment_spec/default_spec_ci.txt')
@pytest.fixture()
def spec(_spec_file):
'''spec.'''
return spec_wrapper.ExperimentSpec(spec_loader.load_experiment_spec(_spec_file))
@pytest.mark.parametrize("model_type, nlayers, all_projections, use_pooling",
backbone_configs)
def test_prune(spec, tmpdir, model_type, nlayers, all_projections, use_pooling):
'''Build the model and prune it.'''
keras.backend.clear_session()
model = model_type(nlayers, spec.batch_size_per_gpu,
spec.rpn_stride, spec.reg_type,
spec.weight_decay, spec.freeze_bn, spec.freeze_blocks,
spec.dropout_rate, spec.drop_connect_rate,
spec.conv_bn_share_bias, all_projections,
use_pooling, spec.anchor_sizes, spec.anchor_ratios,
spec.roi_pool_size, spec.roi_pool_2x, spec.num_classes,
spec.std_scaling, spec.rpn_pre_nms_top_N, spec.rpn_post_nms_top_N,
spec.rpn_nms_iou_thres, spec.gt_as_roi,
spec.rcnn_min_overlap, spec.rcnn_max_overlap, spec.rcnn_train_bs,
spec.rcnn_regr_std, spec.rpn_train_bs, spec.lambda_rpn_class,
spec.lambda_rpn_regr, spec.lambda_cls_class, spec.lambda_cls_regr,
f"frcnn_{spec._backbone.replace(':', '_')}", tmpdir,
spec.enc_key, spec.lr_scheduler)
img_input = Input(shape=spec.input_dims, name='input_image')
gt_cls_input = Input(shape=(None,), name='input_gt_cls')
gt_bbox_input = Input(shape=(None, 4), name='input_gt_bbox')
model.build_keras_model(img_input, gt_cls_input, gt_bbox_input)
prune(model=model.keras_model,
method='min_weight',
normalizer='max',
criterion='L2',
granularity=16,
min_num_filters=16,
threshold=0.7,
equalization_criterion='union',
excluded_layers=excluded_layers)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/models/tests/test_prune.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit test for FasterRCNN model parameter configurations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
# Forcing this test to use GPU 0. For some reason
# after QAT patch, tensorflow seems to be looking for
# GPU id 1.
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import keras.backend as K
from keras.layers import Input
import numpy as np
import tensorflow as tf
from nvidia_tao_tf1.cv.common.utils import hvd_keras
from nvidia_tao_tf1.cv.detectnet_v2.proto.regularizer_config_pb2 import RegularizerConfig
from nvidia_tao_tf1.cv.faster_rcnn.models.utils import (
build_inference_model,
select_model_type,
)
from nvidia_tao_tf1.cv.faster_rcnn.spec_loader import spec_loader, spec_wrapper
from nvidia_tao_tf1.cv.faster_rcnn.utils import utils
np.random.seed(42)
tf.set_random_seed(42)
logger = logging.getLogger(__name__)
hvd = None
class TestModelConfig(object):
'''Main class to test model parameter configurations.'''
def _spec_file(self):
'''default spec file.'''
parent_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
return os.path.join(parent_dir, 'experiment_spec/default_spec_ci.txt')
def _spec(self):
'''spec.'''
return spec_wrapper.ExperimentSpec(spec_loader.load_experiment_spec(self._spec_file()))
def config(self):
'''Configuration.'''
K.clear_session()
global hvd # noqa pylint: disable=W0603
hvd = hvd_keras()
hvd.init()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = str(hvd.local_rank())
K.set_session(tf.Session(config=config))
K.set_image_data_format('channels_first')
self.session = K.get_session()
self.spec = self._spec()
def build_model(self, results_dir, config_override=None):
'''Build a model from spec with possibly outside config for overriding.'''
spec = self.spec
if config_override is None:
config_override = dict()
for k in config_override.keys():
assert hasattr(spec, k)
setattr(spec, k, config_override[k])
self.override_spec = spec
img_input = Input(shape=spec.input_dims, name='input_image')
gt_cls_input = Input(shape=(None,), name='input_gt_cls')
gt_bbox_input = Input(shape=(None, 4), name='input_gt_bbox')
# build the model
model_type = select_model_type(spec._backbone)
self.model = model_type(
spec.nlayers, spec.batch_size_per_gpu,
spec.rpn_stride, spec.reg_type,
spec.weight_decay, spec.freeze_bn, spec.freeze_blocks,
spec.dropout_rate, spec.drop_connect_rate,
spec.conv_bn_share_bias, spec.all_projections,
spec.use_pooling, spec.anchor_sizes, spec.anchor_ratios,
spec.roi_pool_size, spec.roi_pool_2x, spec.num_classes,
spec.std_scaling, spec.rpn_pre_nms_top_N, spec.rpn_post_nms_top_N,
spec.rpn_nms_iou_thres, spec.gt_as_roi,
spec.rcnn_min_overlap, spec.rcnn_max_overlap, spec.rcnn_train_bs,
spec.rcnn_regr_std, spec.rpn_train_bs, spec.lambda_rpn_class,
spec.lambda_rpn_regr, spec.lambda_cls_class, spec.lambda_cls_regr,
"frcnn_"+spec._backbone.replace(":", "_"), results_dir,
spec.enc_key, spec.lr_scheduler,
spec.enable_qat,
activation_type=spec.activation_type
)
self.model.build_keras_model(img_input, gt_cls_input, gt_bbox_input)
return self.model
def test_input_shape(self, tmpdir):
'''Check the model input shape is the same as in spec file.'''
self.config()
K.set_learning_phase(1)
self.build_model(tmpdir)
input_image_shape = self.model.keras_model.get_layer('input_image').output_shape
assert input_image_shape == (None,
self.override_spec.image_c,
self.override_spec.image_h,
self.override_spec.image_w)
input_gt_bbox_shape = self.model.keras_model.get_layer('input_gt_bbox').output_shape
assert input_gt_bbox_shape == (None,
None,
4)
input_cls_shape = self.model.keras_model.get_layer('input_gt_cls').output_shape
assert input_cls_shape == (None,
None)
def test_anchor_boxes(self, tmpdir):
'''Check the anchor boxes.'''
self.config()
K.set_learning_phase(1)
self.build_model(tmpdir)
# check anchors for Proposal layer
proposal = None
for l in self.model.keras_model.layers:
if l.name.startswith('proposal') and not l.name.startswith('proposal_target'):
proposal = l
break
assert proposal is not None
assert proposal.anchor_sizes == self.override_spec.anchor_sizes
ar = [np.sqrt(r) for r in self.override_spec.anchor_ratios]
assert proposal.anchor_ratios == ar
def test_rpn_nms_params(self, tmpdir):
'''Check the RPN NMS parameters.'''
self.config()
K.set_learning_phase(1)
self.build_model(tmpdir)
K.get_session().run(utils.get_init_ops())
proposal = None
for l in self.model.keras_model.layers:
if l.name.startswith('proposal_') and not l.name.startswith('proposal_target_'):
proposal = l
break
assert proposal is not None
proposal.pre_nms_top_N == self.override_spec.rpn_pre_nms_top_N
proposal.post_nms_top_N == self.override_spec.rpn_post_nms_top_N
proposal.nms_iou_thres == self.override_spec.rpn_nms_iou_thres
def test_rcnn_iou_thres(self, tmpdir):
'''Check the RCNN IoU thresholds used to generate the RCNN target tensors.'''
self.config()
K.set_learning_phase(1)
self.build_model(tmpdir)
K.get_session().run(utils.get_init_ops())
pt_layer = None
for l in self.model.keras_model.layers:
if l.name.startswith('proposal_target_'):
pt_layer = l
break
assert pt_layer is not None
assert pt_layer.iou_high_thres == self.override_spec.rcnn_max_overlap
assert pt_layer.iou_low_thres == self.override_spec.rcnn_min_overlap
def test_regularizers(self, tmpdir):
'''Check the regularizers.'''
self.config()
K.set_learning_phase(1)
model = self.build_model(tmpdir)
K.get_session().run(utils.get_init_ops())
mconfig = model.keras_model.get_config()
reg_type = self.override_spec.reg_type
if reg_type == RegularizerConfig.L1:
reg_type = 'l1'
elif reg_type == RegularizerConfig.L2:
reg_type = 'l2'
else:
ValueError(
"Should use either L1 or L2 regularizer for test_regularizers."
" Got {}".format(reg_type)
)
# Obtain type and scope of the regularizer
for layer, layer_config in zip(model.keras_model.layers, mconfig['layers']):
# Regularizer settings
if hasattr(layer, 'kernel_regularizer'):
assert layer_config['config']['kernel_regularizer']['config'][reg_type] == \
self.override_spec.weight_decay
def test_inference_model_config(self, tmpdir):
'''Check the model config for inference model.'''
self.config()
K.set_learning_phase(1)
model = self.build_model(tmpdir)
K.get_session().run(utils.get_init_ops())
train_model = model.keras_model
config_override = {'pre_nms_top_N': self.override_spec.infer_rpn_pre_nms_top_N,
'post_nms_top_N': self.override_spec.infer_rpn_post_nms_top_N,
'nms_iou_thres': self.override_spec.infer_rpn_nms_iou_thres,
'bs_per_gpu': 1}
infer_model = build_inference_model(train_model, config_override)
proposal = None
for l in infer_model.layers:
if l.name.startswith('proposal_') and not l.name.startswith('proposal_target_'):
proposal = l
break
assert proposal is not None
assert proposal.pre_nms_top_N == self.override_spec.infer_rpn_pre_nms_top_N
assert proposal.post_nms_top_N == self.override_spec.infer_rpn_post_nms_top_N
assert proposal.nms_iou_thres == self.override_spec.infer_rpn_nms_iou_thres
def test_eval_model_config(self, tmpdir):
'''Check the model config for evaluation model.'''
self.config()
K.set_learning_phase(1)
model = self.build_model(tmpdir)
K.get_session().run(utils.get_init_ops())
train_model = model.keras_model
config_override = {'pre_nms_top_N': self.override_spec.eval_rpn_pre_nms_top_N,
'post_nms_top_N': self.override_spec.eval_rpn_post_nms_top_N,
'nms_iou_thres': self.override_spec.eval_rpn_nms_iou_thres,
'bs_per_gpu': 1}
eval_model = build_inference_model(train_model, config_override)
proposal = None
for l in eval_model.layers:
if l.name.startswith('proposal_') and not l.name.startswith('proposal_target_'):
proposal = l
break
assert proposal is not None
assert proposal.pre_nms_top_N == self.override_spec.eval_rpn_pre_nms_top_N
assert proposal.post_nms_top_N == self.override_spec.eval_rpn_post_nms_top_N
assert proposal.nms_iou_thres == self.override_spec.eval_rpn_nms_iou_thres
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/models/tests/test_model_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Unit test for FasterRCNN model pruning functionality.'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import keras
from keras.layers import Input
from keras.utils.generic_utils import CustomObjectScope
import pytest
from nvidia_tao_tf1.core.pruning.pruning import prune
from nvidia_tao_tf1.cv.common.utils import CUSTOM_OBJS
from nvidia_tao_tf1.cv.faster_rcnn.models.darknets import DarkNet
from nvidia_tao_tf1.cv.faster_rcnn.models.googlenet import GoogleNet
from nvidia_tao_tf1.cv.faster_rcnn.models.iva_vgg import IVAVGG
from nvidia_tao_tf1.cv.faster_rcnn.models.mobilenet_v1 import MobileNetV1
from nvidia_tao_tf1.cv.faster_rcnn.models.mobilenet_v2 import MobileNetV2
from nvidia_tao_tf1.cv.faster_rcnn.models.resnets import ResNet
from nvidia_tao_tf1.cv.faster_rcnn.models.vgg16 import VGG16
from nvidia_tao_tf1.cv.faster_rcnn.spec_loader import spec_loader, spec_wrapper
excluded_layers = ['rpn_out_class',
'rpn_out_regress',
'dense_class_td',
'dense_regress_td']
backbone_configs_all = [
(ResNet, 10, False, False),
(ResNet, 10, True, True),
(ResNet, 10, False, True),
(ResNet, 18, True, False),
(ResNet, 18, False, False),
(ResNet, 18, True, True),
(ResNet, 18, False, True),
(ResNet, 34, True, False),
(ResNet, 34, False, False),
(ResNet, 34, True, True),
(ResNet, 34, False, True),
(ResNet, 50, True, False),
(ResNet, 50, False, False),
(ResNet, 50, True, True),
(ResNet, 50, False, True),
(ResNet, 101, True, False),
(ResNet, 101, False, False),
(ResNet, 101, True, True),
(ResNet, 101, False, True),
(VGG16, None, False, True),
(IVAVGG, 16, False, True),
(IVAVGG, 19, False, True),
(GoogleNet, None, False, True),
(MobileNetV1, None, False, False),
(MobileNetV2, None, False, False),
(MobileNetV2, None, True, False),
(DarkNet, 19, None, None),
(DarkNet, 53, None, None),
]
backbone_configs_subset = [
(ResNet, 18, True, False),
(ResNet, 50, True, False),
]
if int(os.getenv("TLT_TF_CI_TEST_LEVEL", "0")) > 0:
backbone_configs = backbone_configs_all
else:
backbone_configs = backbone_configs_subset
keras.backend.set_image_data_format('channels_first')
@pytest.fixture()
def _spec_file():
'''spec file.'''
parent_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
return os.path.join(parent_dir, 'experiment_spec/default_spec_ci.txt')
@pytest.fixture()
def spec(_spec_file):
'''spec.'''
return spec_wrapper.ExperimentSpec(spec_loader.load_experiment_spec(_spec_file))
@pytest.mark.parametrize("model_type, nlayers, all_projections, use_pooling",
backbone_configs)
def test_prune_qat(spec, tmpdir, model_type, nlayers, all_projections, use_pooling):
'''Build the QAT model and prune it.'''
keras.backend.clear_session()
model = model_type(nlayers, spec.batch_size_per_gpu,
spec.rpn_stride, spec.reg_type,
spec.weight_decay, spec.freeze_bn, spec.freeze_blocks,
spec.dropout_rate, spec.drop_connect_rate,
spec.conv_bn_share_bias, all_projections,
use_pooling, spec.anchor_sizes, spec.anchor_ratios,
spec.roi_pool_size, spec.roi_pool_2x, spec.num_classes,
spec.std_scaling, spec.rpn_pre_nms_top_N, spec.rpn_post_nms_top_N,
spec.rpn_nms_iou_thres, spec.gt_as_roi,
spec.rcnn_min_overlap, spec.rcnn_max_overlap, spec.rcnn_train_bs,
spec.rcnn_regr_std, spec.rpn_train_bs, spec.lambda_rpn_class,
spec.lambda_rpn_regr, spec.lambda_cls_class, spec.lambda_cls_regr,
f"frcnn_{spec._backbone.replace(':', '_')}", tmpdir,
spec.enc_key, spec.lr_scheduler,
enable_qat=True)
img_input = Input(shape=spec.input_dims, name='input_image')
gt_cls_input = Input(shape=(None,), name='input_gt_cls')
gt_bbox_input = Input(shape=(None, 4), name='input_gt_bbox')
model.build_keras_model(img_input, gt_cls_input, gt_bbox_input)
# save the model and reload it to avoid the appended _1 in node names
os_handle, tmp_keras_file = tempfile.mkstemp()
os.close(os_handle)
os.remove(tmp_keras_file)
model.keras_model.save(tmp_keras_file)
keras.backend.clear_session()
with CustomObjectScope(CUSTOM_OBJS):
new_model = keras.models.load_model(tmp_keras_file)
os.remove(tmp_keras_file)
prune(model=new_model,
method='min_weight',
normalizer='max',
criterion='L2',
granularity=16,
min_num_filters=16,
threshold=0.7,
equalization_criterion='union',
excluded_layers=excluded_layers)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/models/tests/test_prune_qat.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorRT inference model builder for FasterRCNN."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from io import open # Python 2/3 compatibility. pylint: disable=W0622
import logging
import os
import numpy as np
import pycuda.autoinit # noqa pylint: disable=W0611
import pycuda.driver as cuda
import tensorrt as trt
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level='INFO')
logger = logging.getLogger(__name__)
BINDING_TO_DTYPE_UFF = {
"input_image": np.float32, "NMS": np.float32, "NMS_1": np.int32,
}
BINDING_TO_DTYPE_ONNX = {
"input_image": np.float32, "nms_out": np.float32, "nms_out_1": np.int32,
}
class CacheCalibrator(trt.IInt8EntropyCalibrator2):
"""Calibrator class that loads a cache file directly.
This inherits from ``trt.IInt8EntropyCalibrator2`` to implement
the calibration interface that TensorRT needs to calibrate the
INT8 quantization factors.
Args:
calibration_filename (str): name of calibration to read/write to.
"""
def __init__(self, cache_filename, *args, **kwargs):
"""Init routine."""
super(CacheCalibrator, self).__init__(*args, **kwargs)
self._cache_filename = cache_filename
def get_batch(self, names):
"""Dummy method since we are going to use cache file directly.
Args:
names (list): list of memory bindings names.
"""
return None
def get_batch_size(self):
"""Return batch size."""
return 8
def read_calibration_cache(self):
"""Read calibration from file."""
if os.path.exists(self._cache_filename):
with open(self._cache_filename, "rb") as f:
return f.read()
else:
raise ValueError('''Calibration cache file
not found: {}'''.format(self._cache_filename))
def write_calibration_cache(self, cache):
"""Do nothing since we already have cache file.
Args:
cache (memoryview): buffer to read calibration data from.
"""
return
class Engine(object):
"""A class to represent a TensorRT engine.
This class provides utility functions for performing inference on
a TensorRT engine.
Args:
engine: the CUDA engine to wrap.
"""
def __init__(self, engine, batch_size, input_width, input_height):
"""Initialization routine."""
self._engine = engine
self._context = None
self._batch_size = batch_size
self._input_width = input_width
self._input_height = input_height
self._is_uff = self._engine.has_implicit_batch_dimension
@contextlib.contextmanager
def _create_context(self):
"""Create an execution context and allocate input/output buffers."""
BINDING_TO_DTYPE = BINDING_TO_DTYPE_UFF if self._is_uff else \
BINDING_TO_DTYPE_ONNX
try:
with self._engine.create_execution_context() as self._context:
# Create stream and events to measure timings.
self._stream = cuda.Stream()
self._start = cuda.Event()
self._end = cuda.Event()
self._device_buffers = []
self._host_buffers = []
self._input_binding_ids = {}
if self._is_uff:
# make sure the infer batch size is no more than
# engine.max_batch_size
assert self._batch_size <= self._engine.max_batch_size, (
f"Error: inference batch size: {self._batch_size} is larger than "
f"engine's max_batch_size: {self._engine.max_batch_size}"
)
infer_batch_size = self._batch_size
for i in range(self._engine.num_bindings):
if len(list(self._engine.get_binding_shape(i))) == 3:
dims = trt.Dims3(self._engine.get_binding_shape(i))
size = trt.volume(dims)
elt_count = size * infer_batch_size
target_shape = (infer_batch_size, dims[0], dims[1], dims[2])
elif len(list(self._engine.get_binding_shape(i))) == 4:
# with explicit batch dim
dims = trt.Dims4(self._engine.get_binding_shape(i))
elt_count = infer_batch_size * dims[1] * dims[2] * dims[3]
target_shape = (infer_batch_size, dims[1], dims[2], dims[3])
else:
raise ValueError('''Binding shapes can only be 3 or 4,
got {}.'''.format(self._engine.get_binding_shape(i)))
binding_name = self._engine.get_binding_name(i)
dtype = BINDING_TO_DTYPE[binding_name]
if self._engine.binding_is_input(i):
self._context.set_optimization_profile_async(0, self._stream.handle)
self._context.set_binding_shape(i, target_shape)
self._input_binding_ids[binding_name] = i
page_locked_mem = None
else:
page_locked_mem = cuda.pagelocked_empty(elt_count, dtype=dtype)
page_locked_mem = page_locked_mem.reshape(*target_shape)
# Allocate pagelocked memory.
self._host_buffers.append(page_locked_mem)
_mem_alloced = cuda.mem_alloc(elt_count * np.dtype(dtype).itemsize)
self._device_buffers.append(_mem_alloced)
if not self._input_binding_ids:
raise RuntimeError("No input bindings detected.")
yield
finally:
# Release context and allocated memory.
self._release_context()
def _do_infer(self, batch):
bindings = [int(device_buffer) for device_buffer in self._device_buffers]
if not isinstance(batch, dict):
if len(self._input_binding_ids) > 1:
raise ValueError('''Input node names must be provided in case of multiple
inputs.
Got these inputs: %s''' % self._input_binding_ids.keys())
# Single input case.
batch = {list(self._input_binding_ids.keys())[0]: batch}
batch_sizes = {array.shape[0] for array in batch.values()}
if len(batch_sizes) != 1:
raise ValueError('''All arrays must have the same batch size.
Got %s.''' % repr(batch_sizes))
batch_size = batch_sizes.pop()
assert batch_size == self._batch_size, (
f"Inference data batch size: {batch_size} is not equal to batch size "
f"of the input/output buffers: {self._batch_size}."
)
# Transfer input data to device.
for node_name, array in batch.items():
array = array.astype('float32')
cuda.memcpy_htod_async(self._device_buffers[self._input_binding_ids[node_name]],
array, self._stream)
# Execute model.
self._start.record(self._stream)
if self._is_uff:
self._context.execute_async(batch_size, bindings, self._stream.handle, None)
else:
self._context.execute_async_v2(bindings, self._stream.handle, None)
self._end.record(self._stream)
self._end.synchronize()
# Transfer predictions back.
outputs = dict()
for i in range(self._engine.num_bindings):
if not self._engine.binding_is_input(i):
cuda.memcpy_dtoh_async(self._host_buffers[i], self._device_buffers[i],
self._stream)
out = self._host_buffers[i][:batch_size, ...]
name = self._engine.get_binding_name(i)
outputs[name] = out
# outputs["nms_out"][:, 0, :, 0] is image index, not useful
denormalize = np.array(
[self._input_width, self._input_height,
self._input_width, self._input_height],
dtype=np.float32
)
if self._is_uff:
nms_out_name = "NMS"
nms_out_1_name = "NMS_1"
else:
nms_out_name = "nms_out"
nms_out_1_name = "nms_out_1"
# (x1, y1, x2, y2), shape = (N, 1, R, 4)
nmsed_boxes = outputs[nms_out_name][:, 0, :, 3:7] * denormalize
# convert to (y1, x1, y2, x2) to keep consistent with keras model format
nmsed_boxes = np.take(nmsed_boxes, np.array([1, 0, 3, 2]), axis=2)
# shape = (N, 1, R, 1)
nmsed_scores = outputs[nms_out_name][:, 0, :, 2]
# shape = (N, 1, R, 1)
nmsed_classes = outputs[nms_out_name][:, 0, :, 1]
# shape = (N, 1, 1, 1)
num_dets = outputs[nms_out_1_name][:, 0, 0, 0]
rois_output = None
return [nmsed_boxes, nmsed_scores, nmsed_classes, num_dets, rois_output]
def _release_context(self):
"""Release context and allocated memory."""
for device_buffer in self._device_buffers:
device_buffer.free()
del (device_buffer)
for host_buffer in self._host_buffers:
del (host_buffer)
del (self._start)
del (self._end)
del (self._stream)
def infer(self, batch):
"""Perform inference on a Numpy array.
Args:
batch (ndarray): array to perform inference on.
Returns:
A dictionary of outputs where keys are output names
and values are output tensors.
"""
with self._create_context():
outputs = self._do_infer(np.ascontiguousarray(batch))
return outputs
def infer_iterator(self, iterator):
"""Perform inference on an iterator of Numpy arrays.
This method should be preferred to ``infer`` when performing
inference on multiple Numpy arrays since this will re-use
the allocated execution and memory.
Args:
iterator: an iterator that yields Numpy arrays.
Yields:
A dictionary of outputs where keys are output names
and values are output tensors, for each array returned
by the iterator.
Returns:
None.
"""
with self._create_context():
for batch in iterator:
outputs = self._do_infer(batch)
yield outputs
def save(self, filename):
"""Save serialized engine into specified file.
Args:
filename (str): name of file to save engine to.
"""
with open(filename, "wb") as outf:
outf.write(self._engine.serialize())
class TrtModel(object):
'''A TensorRT model builder for FasterRCNN model inference based on TensorRT.
The TensorRT model builder builds a TensorRT engine from the engine file from the
tlt-converter and do inference in TensorRT. We use this as a way to verify the
TensorRT inference functionality of the FasterRCNN model.
'''
def __init__(self,
trt_engine_file,
batch_size,
input_h,
input_w):
'''Initialize the TensorRT model builder.'''
self._trt_engine_file = trt_engine_file
self._batch_size = batch_size
self._input_w = input_w
self._input_h = input_h
self._trt_logger = trt.Logger(trt.Logger.Severity.WARNING)
trt.init_libnvinfer_plugins(self._trt_logger, "")
def load_trt_engine_file(self):
'''load TensorRT engine file generated by tlt-converter.'''
runtime = trt.Runtime(self._trt_logger)
with open(self._trt_engine_file, 'rb') as f:
_engine = f.read()
logger.info("Loading existing TensorRT engine and "
"ignoring the specified batch size and data type"
" information in spec file.")
self.engine = Engine(runtime.deserialize_cuda_engine(_engine),
self._batch_size,
self._input_w,
self._input_h)
def build_or_load_trt_engine(self):
'''Build engine or load engine depends on whether a trt engine is available.'''
if self._trt_engine_file is not None:
# load engine
logger.info('''Loading TensorRT engine file: {}
for inference.'''.format(self._trt_engine_file))
self.load_trt_engine_file()
else:
raise ValueError('''A TensorRT engine file should
be provided for TensorRT based inference.''')
def predict(self, batch):
'''Do inference with TensorRT engine.'''
return self.engine.infer(batch)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/tensorrt_inference/tensorrt_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorRT inference model builder for FasterRCNN."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/tensorrt_inference/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Unit test for FasterRCNN model TensorRT inference functionality.'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import subprocess
import tempfile
import keras
from keras.layers import Input
import numpy as np
import pytest
import tensorflow as tf
from nvidia_tao_tf1.cv.common.utils import encode_from_keras
from nvidia_tao_tf1.cv.faster_rcnn.models.resnets import ResNet
from nvidia_tao_tf1.cv.faster_rcnn.models.utils import build_inference_model
from nvidia_tao_tf1.cv.faster_rcnn.spec_loader import spec_loader, spec_wrapper
from nvidia_tao_tf1.cv.faster_rcnn.tensorrt_inference.tensorrt_model import TrtModel
backbone_configs = [
(ResNet, 10, False, False),
]
keras.backend.set_image_data_format('channels_first')
@pytest.fixture()
def _spec_file():
'''default spec file.'''
parent_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
return os.path.join(parent_dir, 'experiment_spec/default_spec_ci.txt')
@pytest.fixture()
def spec(_spec_file):
'''spec.'''
return spec_wrapper.ExperimentSpec(spec_loader.load_experiment_spec(_spec_file))
@pytest.mark.skipif(os.getenv("RUN_ON_CI", "0") == "1", reason="Cannot be run on CI")
@pytest.mark.script_launch_mode('subprocess')
@pytest.mark.parametrize("model_type, nlayers, all_projections, use_pooling",
backbone_configs)
def test_trt_inference(script_runner, tmpdir, spec, _spec_file,
model_type, nlayers, all_projections,
use_pooling):
'''test to make sure the trt inference works well.'''
keras.backend.clear_session()
gpu_options = tf.compat.v1.GPUOptions(
per_process_gpu_memory_fraction=0.33,
allow_growth=True
)
device_count = {'GPU': 0, 'CPU': 1}
session_config = tf.compat.v1.ConfigProto(
gpu_options=gpu_options,
device_count=device_count
)
session = tf.compat.v1.Session(config=session_config)
keras.backend.set_session(session)
model = model_type(nlayers, spec.batch_size_per_gpu,
spec.rpn_stride, spec.reg_type,
spec.weight_decay, spec.freeze_bn, spec.freeze_blocks,
spec.dropout_rate, spec.drop_connect_rate,
spec.conv_bn_share_bias, all_projections,
use_pooling, spec.anchor_sizes, spec.anchor_ratios,
spec.roi_pool_size, spec.roi_pool_2x, spec.num_classes,
spec.std_scaling, spec.rpn_pre_nms_top_N, spec.rpn_post_nms_top_N,
spec.rpn_nms_iou_thres, spec.gt_as_roi,
spec.rcnn_min_overlap, spec.rcnn_max_overlap, spec.rcnn_train_bs,
spec.rcnn_regr_std, spec.rpn_train_bs, spec.lambda_rpn_class,
spec.lambda_rpn_regr, spec.lambda_cls_class, spec.lambda_cls_regr,
f"frcnn_{spec._backbone.replace(':', '_')}", tmpdir,
spec.enc_key, spec.lr_scheduler)
img_input = Input(shape=spec.input_dims, name='input_image')
gt_cls_input = Input(shape=(None,), name='input_gt_cls')
gt_bbox_input = Input(shape=(None, 4), name='input_gt_bbox')
model.build_keras_model(img_input, gt_cls_input, gt_bbox_input)
config_override = {
'pre_nms_top_N': spec.infer_rpn_pre_nms_top_N,
'post_nms_top_N': spec.infer_rpn_post_nms_top_N,
'nms_iou_thres': spec.infer_rpn_nms_iou_thres,
'bs_per_gpu': spec.infer_batch_size
}
os_handle, tmp_keras_model = tempfile.mkstemp()
os.close(os_handle)
# export will convert train model to infer model, so we still encode train
# model here
encode_from_keras(model.keras_model, tmp_keras_model, spec.enc_key.encode())
os_handle, tmp_onnx_model = tempfile.mkstemp(suffix=".onnx")
os.close(os_handle)
os.remove(tmp_onnx_model)
# export
script = 'nvidia_tao_tf1/cv/faster_rcnn/scripts/export.py'
env = os.environ.copy()
os_handle, tmp_engine_file = tempfile.mkstemp()
os.close(os_handle)
os.remove(tmp_engine_file)
args = ['faster_rcnn',
'-m', tmp_keras_model,
'-k', spec.enc_key,
'--experiment_spec', _spec_file,
'-o', tmp_onnx_model,
'--engine_file', tmp_engine_file]
keras.backend.clear_session()
ret = script_runner.run(script, env=env, *args)
try:
assert ret.success, print(ret.stdout + ret.stderr)
assert os.path.isfile(tmp_onnx_model)
assert os.path.isfile(tmp_engine_file)
if os.path.exists(tmp_keras_model):
os.remove(tmp_keras_model)
except AssertionError:
if os.path.exists(tmp_onnx_model):
os.remove(tmp_onnx_model)
if os.path.exists(tmp_engine_file):
os.remove(tmp_engine_file)
if os.path.exists(tmp_keras_model):
os.remove(tmp_keras_model)
raise(AssertionError(ret.stdout + ret.stderr))
trt_model = TrtModel(
tmp_engine_file,
spec.batch_size_per_gpu,
spec.image_h,
spec.image_w
)
trt_model.build_or_load_trt_engine()
# do prediction
image_shape = (
spec.batch_size_per_gpu,
spec.image_c,
spec.image_h,
spec.image_w
)
# random image
random_input = np.random.random(size=image_shape) * 255.
# due to plugins, we cannot compare keras output with tensorrt output
# instead, we just make sure the inference can work
trt_model.predict(random_input)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/tensorrt_inference/tests/test_trt_infer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FasterRCNN scripts."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/scripts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Export a faster_rcnn model."""
# import build_command_line_parser as this is needed by entrypoint
from nvidia_tao_tf1.cv.common.export.app import build_command_line_parser # noqa pylint: disable=W0611
from nvidia_tao_tf1.cv.common.export.app import launch_export
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.faster_rcnn.export.exporter import FrcnnExporter as Exporter
if __name__ == "__main__":
try:
launch_export(Exporter, backend="onnx")
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Export finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Export was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/scripts/export.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FasterRCNN multi-gpu wrapper for train script."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import subprocess
def parse_args(args_in=None):
"""Argument parser."""
parser = argparse.ArgumentParser(description=('Train or retrain a Faster-RCNN model' +
' using one or more GPUs.'))
parser.add_argument("-e",
"--experiment_spec",
type=str,
required=True,
help="Experiment spec file has all the training params.")
parser.add_argument("-k",
"--enc_key",
type=str,
required=False,
help="TLT encoding key, can override the one in the spec file.")
parser.add_argument("-g",
"--gpus",
type=int,
default=None,
help="Number of GPUs for multi-gpu training.")
return parser.parse_known_args(args_in)[0]
def main():
'''main function for training.'''
args = parse_args()
np = args.gpus or 1
# key is optional, we pass it to subprocess only if it exists
if args.enc_key is not None:
key_arg = ['-k', args.enc_key]
else:
key_arg = []
train_script = 'nvidia_tao_tf1/cv/faster_rcnn/scripts/train.py'
if np > 1:
# multi-gpu training
ret = subprocess.run(['mpirun', '-np', str(np),
'--oversubscribe',
'--bind-to', 'none',
'python', train_script,
'-e', args.experiment_spec] + key_arg, shell=False).returncode
elif np in [None, 1]:
# fallback to single gpu training by default
ret = subprocess.run(['python', train_script,
'-e', args.experiment_spec] + key_arg, shell=False).returncode
else:
raise(
ValueError(
(
'Invalid value of GPU number specified: {}, '
'should be non-negative.'.format(np)
)
)
)
assert ret == 0, 'Multi-gpu training failed.'
if __name__ == '__main__':
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/scripts/train_multigpu.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert KITTI dataset to TFRecords for FasterRCNN TLT model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.detectnet_v2.scripts.dataset_convert import ( # noqa pylint: disable=unused-import
build_command_line_parser,
main,
)
if __name__ == "__main__":
try:
main(sys.argv[1:])
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Dataset convert finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Dataset convert was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/scripts/dataset_convert.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FasterRCNN train script."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import os
import sys
from keras import backend as K
import tensorflow as tf
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.common.utils import hvd_keras
from nvidia_tao_tf1.cv.faster_rcnn.models.utils import build_or_resume_model
from nvidia_tao_tf1.cv.faster_rcnn.spec_loader import spec_loader, spec_wrapper
from nvidia_tao_tf1.cv.faster_rcnn.utils import utils
def build_command_line_parser(parser=None):
"""Build a command line parser for training."""
if parser is None:
parser = argparse.ArgumentParser(description='Train or retrain a Faster-RCNN model.')
parser.add_argument("-e",
"--experiment_spec",
type=str,
required=True,
help="Experiment spec file has all the training params.")
parser.add_argument("-k",
"--enc_key",
type=str,
required=False,
help="TLT encoding key, can override the one in the spec file.")
parser.add_argument("-r",
"--results_dir",
type=str,
default=None,
required=True,
help="Path to the files where the logs are stored.")
return parser
def parse_args(args_in=None):
"""Parser arguments."""
parser = build_command_line_parser()
return parser.parse_known_args(args_in)[0]
def main(args=None):
"""Train or retrain a model."""
options = parse_args(args)
spec = spec_loader.load_experiment_spec(options.experiment_spec)
# enc key in CLI will override the one in the spec file.
if options.enc_key is not None:
spec.enc_key = options.enc_key
spec = spec_wrapper.ExperimentSpec(spec)
hvd = hvd_keras()
hvd.init()
results_dir = options.results_dir
is_master = hvd.rank() == 0
if is_master and not os.path.exists(results_dir):
os.makedirs(results_dir, exist_ok=True)
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=is_master,
verbosity=1,
append=True
)
)
# Horovod: pin GPU to be used to process local rank (one GPU per process)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
# check if model parallelism is enabled or not
if spec.training_config.model_parallelism:
world_size = len(spec.training_config.model_parallelism)
else:
world_size = 1
gpus = list(range(hvd.local_rank() * world_size, (hvd.local_rank() + 1) * world_size))
config.gpu_options.visible_device_list = ','.join([str(x) for x in gpus])
K.set_session(tf.Session(config=config))
K.set_image_data_format('channels_first')
K.set_learning_phase(1)
utils.set_random_seed(spec.random_seed+hvd.rank())
verbosity = 'INFO'
if spec.verbose:
verbosity = 'DEBUG'
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level=verbosity)
logger = logging.getLogger(__name__)
# Returning the clearml task incase it's needed to be closed.
model, iters_per_epoch, initial_epoch, _ = build_or_resume_model(spec, hvd, logger, results_dir)
if hvd.rank() == 0:
model.summary()
K.get_session().run(utils.get_init_ops())
model.train(spec.epochs, iters_per_epoch, initial_epoch)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Training finished successfully."
)
if __name__ == '__main__':
logger = logging.getLogger(__name__)
try:
main()
logger.info("Training finished successfully.")
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Training was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
logger.info("Training was interrupted.")
except tf.errors.ResourceExhaustedError:
status_logging.get_status_logger().write(
message=(
"Ran out of GPU memory, please lower the batch size, use a smaller input "
"resolution, use a smaller backbone or try model parallelism. See "
"documentation on how to enable model parallelism for FasterRCNN."
),
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
logger.error(
"Ran out of GPU memory, please lower the batch size, use a smaller input "
"resolution, use a smaller backbone or try model parallelism. See "
"documentation on how to enable model parallelism for FasterRCNN."
)
sys.exit(1)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/scripts/train.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to prune the FasterRCNN TLT model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.common.magnet_prune import ( # noqa pylint: disable=unused-import
build_command_line_parser,
main,
)
if __name__ == "__main__":
try:
main(sys.argv[1:])
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Pruning finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Pruning was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/scripts/prune.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FasterRCNN inference script."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import glob
import logging
import os
import sys
import cv2
from keras import backend as K
import numpy as np
from PIL import Image, ImageDraw
import tensorflow as tf
from tqdm import tqdm
try:
import tensorrt as trt # noqa pylint: disable=W0611 pylint: disable=W0611
from nvidia_tao_tf1.cv.faster_rcnn.tensorrt_inference.tensorrt_model import TrtModel
except Exception as e:
logger = logging.getLogger(__name__)
logger.warning(
"Failed to import TensorRT package, exporting TLT to a TensorRT engine "
"will not be available."
)
from nvidia_tao_tf1.cv.common import utils as iva_utils
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.faster_rcnn.models.utils import build_inference_model
from nvidia_tao_tf1.cv.faster_rcnn.spec_loader import spec_loader, spec_wrapper
from nvidia_tao_tf1.cv.faster_rcnn.utils import utils
KERAS_MODEL_EXTENSIONS = ["tlt", "hdf5"]
def build_command_line_parser(parser=None):
"""Build a command line parser for inference."""
if parser is None:
parser = argparse.ArgumentParser(description='''Do inference on the pretrained model
and visualize the results.''')
parser.add_argument("-e",
"--experiment_spec",
type=str,
required=True,
help="Experiment spec file has all the training params.")
parser.add_argument("-k",
"--key",
type=str,
required=False,
help="TLT encoding key, can override the one in the spec file.")
parser.add_argument("-m",
"--model_path",
type=str,
required=False,
default=None,
help="Path to the model to be used for inference")
parser.add_argument("-r",
"--results_dir",
type=str,
default=None,
help="Path to the files where the logs are stored.")
return parser
def parse_args(args=None):
'''Parser arguments.'''
parser = build_command_line_parser()
return parser.parse_known_args(args)[0]
def main(args=None):
"""Do inference on a pretrained model."""
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
K.set_session(tf.Session(config=config))
K.set_image_data_format('channels_first')
K.set_learning_phase(0)
options = parse_args(args)
spec = spec_loader.load_experiment_spec(options.experiment_spec)
# enc key in CLI will override the one in the spec file.
if options.key is not None:
spec.enc_key = options.key
# model in CLI will override the one in the spec file.
if options.model_path is not None:
spec.inference_config.model = options.model_path
spec = spec_wrapper.ExperimentSpec(spec)
# Set up status logging
if options.results_dir:
if not os.path.exists(options.results_dir):
os.makedirs(options.results_dir)
status_file = os.path.join(options.results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True,
verbosity=1,
append=True
)
)
s_logger = status_logging.get_status_logger()
s_logger.write(
status_level=status_logging.Status.STARTED,
message="Starting FasterRCNN inference."
)
verbosity = 'INFO'
if spec.verbose:
verbosity = 'DEBUG'
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level=verbosity)
logger = logging.getLogger(__name__)
# set radom seed
utils.set_random_seed(spec.random_seed)
img_path = spec.inference_images_dir
class_names = spec.class_to_id.keys()
class_to_color = {v: np.random.randint(0, 255, 3) for v in class_names}
# load model and convert train model to infer model
if spec.inference_trt_config is not None:
# spec.inference_trt_engine will be deprecated, use spec.inference_model
logger.info('Running inference with TensorRT as backend.')
logger.warning(
"`spec.inference_config.trt_inference` is deprecated, "
"please use `spec.inference_config.model` in spec file or provide "
"the model/engine as a command line argument instead."
)
if (spec.image_h == 0 or spec.image_w == 0):
raise(
ValueError("TensorRT inference is not supported when using dynamic input shape.")
)
infer_model = TrtModel(spec.inference_trt_engine,
spec.infer_batch_size,
spec.image_h,
spec.image_w)
infer_model.build_or_load_trt_engine()
elif spec.inference_model.split('.')[-1] in KERAS_MODEL_EXTENSIONS:
# spec.inference_model is a TLT model
logger.info('Running inference with TLT as backend.')
# in case of dynamic shape, batch size has to be 1
if (
(spec.image_h == 0 or spec.image_w == 0) and
spec.infer_batch_size != 1
):
raise(ValueError("Only batch size 1 is supported when using dynamic input shapes."))
train_model = iva_utils.decode_to_keras(spec.inference_model,
str.encode(spec.enc_key),
input_model=None,
compile_model=False,
by_name=None)
config_override = {'pre_nms_top_N': spec.infer_rpn_pre_nms_top_N,
'post_nms_top_N': spec.infer_rpn_post_nms_top_N,
'nms_iou_thres': spec.infer_rpn_nms_iou_thres,
'bs_per_gpu': spec.infer_batch_size}
logger.info("Building inference model, may take a while...")
infer_model = build_inference_model(
train_model,
config_override,
create_session=False,
max_box_num=spec.infer_rcnn_post_nms_top_N,
regr_std_scaling=spec.rcnn_regr_std,
iou_thres=spec.infer_rcnn_nms_iou_thres,
score_thres=spec.infer_confidence_thres,
eval_rois=spec.infer_rpn_post_nms_top_N
)
infer_model.summary()
else:
# spec.inference_model is a TensorRT engine
logger.info('Running inference with TensorRT as backend.')
if (spec.image_h == 0 or spec.image_w == 0):
raise(
ValueError("TensorRT inference is not supported when using dynamic input shape.")
)
infer_model = TrtModel(spec.inference_model,
spec.infer_batch_size,
spec.image_h,
spec.image_w)
infer_model.build_or_load_trt_engine()
output_dir = spec.inference_output_images_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
old_files = glob.glob(output_dir+'/*')
for of in old_files:
if os.path.isfile(of):
os.remove(of)
image_set = os.listdir(img_path)
for img_name in image_set:
if not img_name.endswith(('.jpeg', '.jpg', '.png')):
logger.info('Invalid image found: {}, '
'please ensure the image extension '
'is jpg, jpeg or png'
', exit.'.format(img_name))
sys.exit(1)
image_num = len(image_set)
num_iters = (image_num + spec.infer_batch_size - 1) // spec.infer_batch_size
im_type = cv2.IMREAD_COLOR if spec.image_c == 3 else cv2.IMREAD_GRAYSCALE
for idx in tqdm(range(num_iters)):
# the last batch can be smaller
image_batch = image_set[idx*spec.infer_batch_size:(idx+1)*spec.infer_batch_size]
filepaths = [os.path.join(img_path, img_name) for img_name in image_batch]
img_list = [cv2.imread(filepath, im_type) for filepath in filepaths]
X, ratio, orig_shape = utils.preprocess_image_batch(
img_list,
spec.image_h,
spec.image_w,
spec.image_c,
spec.image_min,
spec.image_scaling_factor,
spec.image_mean_values,
spec.image_channel_order
)
# The Keras model is sensitive to batch size so we have to pad for the last
# batch if it is a smaller batch
use_pad = False
if X.shape[0] < spec.infer_batch_size:
use_pad = True
X_pad = np.zeros((spec.infer_batch_size,) + X.shape[1:], dtype=X.dtype)
X_pad[0:X.shape[0], ...] = X
else:
X_pad = X
nmsed_boxes, nmsed_scores, nmsed_classes, num_dets, _ = \
infer_model.predict(X_pad)
if use_pad:
nmsed_boxes = nmsed_boxes[0:X.shape[0], ...]
nmsed_scores = nmsed_scores[0:X.shape[0], ...]
nmsed_classes = nmsed_classes[0:X.shape[0], ...]
num_dets = num_dets[0:X.shape[0], ...]
for image_idx in range(nmsed_boxes.shape[0]):
img = img_list[image_idx]
# use PIL for TrueType fonts, for better visualization
# openCV: BGR, PIL: RGB
img_pil = Image.fromarray(np.array(img)[:, :, ::-1])
imgd = ImageDraw.Draw(img_pil)
all_dets_dump = []
orig_h, orig_w = orig_shape[image_idx]
for jk in range(num_dets[image_idx]):
new_probs = nmsed_scores[image_idx, jk]
# skip boxes whose confidences are lower than visualize thres
if (new_probs < spec.vis_conf or
nmsed_classes[image_idx, jk] not in spec.id_to_class):
continue
cls_name = spec.id_to_class[nmsed_classes[image_idx, jk]]
y1, x1, y2, x2 = nmsed_boxes[image_idx, jk, :]
(real_x1, real_y1, real_x2, real_y2) = utils.get_original_coordinates(
ratio[image_idx],
x1,
y1,
x2,
y2,
orig_h,
orig_w
)
p1 = (real_x1, real_y1)
p2 = (real_x2, real_y2)
p3 = (int(class_to_color[cls_name][0]),
int(class_to_color[cls_name][1]),
int(class_to_color[cls_name][2]))
# draw bbox and caption
imgd.rectangle([p1, p2], outline=p3)
textLabel = '{}: {:.4f}'.format(cls_name, new_probs)
if spec.inference_config.bbox_caption_on:
text_size = [p1, (real_x1 + 100, real_y1 + 10)]
imgd.rectangle(text_size, outline='white', fill='white')
imgd.text(p1, textLabel, fill='black')
det = {'x1': x1, 'x2': x2, 'y1': y1, 'y2': y2,
'class': cls_name, 'prob': new_probs}
all_dets_dump.append(det)
if type(ratio[image_idx]) is tuple:
ratio_2 = (1.0/ratio[image_idx][1], 1.0/ratio[image_idx][0])
elif type(ratio[image_idx]) is float:
ratio_2 = (1.0/ratio[image_idx], 1.0/ratio[image_idx])
else:
raise TypeError('invalid data type for ratio.')
utils.dump_kitti_labels(filepaths[image_idx],
all_dets_dump,
ratio_2,
spec.inference_output_labels_dir,
spec.vis_conf)
img_pil.save(os.path.join(output_dir, image_batch[image_idx]))
logger.info("Inference output images directory: {}".format(output_dir))
logger.info("Inference output labels directory: {}".format(spec.inference_output_labels_dir))
if options.results_dir:
s_logger.write(
status_level=status_logging.Status.SUCCESS,
message="Inference finished successfully."
)
if __name__ == '__main__':
try:
main()
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Inference was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
if type(e) == tf.errors.ResourceExhaustedError:
logger = logging.getLogger(__name__)
logger.error(
"Ran out of GPU memory, please lower the batch size, use a smaller input "
"resolution, use a smaller backbone or try model parallelism. See TLT "
"documentation on how to enable model parallelism for FasterRCNN."
)
status_logging.get_status_logger().write(
message="Ran out of GPU memory, please lower the batch size, use a smaller input "
"resolution, use a smaller backbone or try model parallelism. See TLT "
"documentation on how to enable model parallelism for FasterRCNN.",
status_level=status_logging.Status.FAILURE
)
sys.exit(1)
else:
# throw out the error as-is if they are not OOM error
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/scripts/inference.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FasterRCNN evaluation script."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import os
import sys
from keras import backend as K
import numpy as np
import tensorflow as tf
from tqdm import tqdm
try:
import tensorrt as trt # noqa pylint: disable=W0611 pylint: disable=W0611
from nvidia_tao_tf1.cv.faster_rcnn.tensorrt_inference.tensorrt_model import TrtModel
except Exception as e:
logger = logging.getLogger(__name__)
logger.warning(
"Failed to import TensorRT package, exporting TLT to a TensorRT engine "
"will not be available."
)
from nvidia_tao_tf1.cv.common import utils as iva_utils
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.faster_rcnn.data_loader.inputs_loader import InputsLoader
from nvidia_tao_tf1.cv.faster_rcnn.models.utils import build_inference_model
from nvidia_tao_tf1.cv.faster_rcnn.spec_loader import spec_loader, spec_wrapper
from nvidia_tao_tf1.cv.faster_rcnn.utils import utils
KERAS_MODEL_EXTENSIONS = ["tlt", "hdf5"]
def build_command_line_parser(parser=None):
"""Build a command line parser for evaluation."""
if parser is None:
parser = argparse.ArgumentParser(description='Evaluate a Faster-RCNN model.')
parser.add_argument("-e",
"--experiment_spec",
type=str,
required=True,
help="Experiment spec file has all the training params.")
parser.add_argument("-k",
"--key",
type=str,
required=False,
help="TLT encoding key, can override the one in the spec file.")
parser.add_argument("-m",
"--model_path",
type=str,
required=False,
default=None,
help="Path to the model to be used for evaluation")
parser.add_argument("-r",
"--results_dir",
type=str,
default=None,
help="Path to the files where the logs are stored.")
parser.add_argument('-i',
'--image_dir',
type=str,
required=False,
default=None,
help=argparse.SUPPRESS)
parser.add_argument('-l',
'--label_dir',
type=str,
required=False,
help=argparse.SUPPRESS)
parser.add_argument('-b',
'--batch_size',
type=int,
required=False,
default=1,
help=argparse.SUPPRESS)
return parser
def parse_args(args_in=None):
"""Parse arguments."""
parser = build_command_line_parser()
return parser.parse_known_args(args_in)[0]
def main(args=None):
"""Do evaluation on a pretrained model."""
options = parse_args(args)
spec = spec_loader.load_experiment_spec(options.experiment_spec)
# enc key in CLI will override the one in the spec file.
if options.key is not None:
spec.enc_key = options.key
# model in CLI will override the one in the spec file.
if options.model_path is not None:
spec.evaluation_config.model = options.model_path
spec = spec_wrapper.ExperimentSpec(spec)
# Set up status logging
if options.results_dir:
if not os.path.exists(options.results_dir):
os.makedirs(options.results_dir)
status_file = os.path.join(options.results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True,
verbosity=1,
append=True
)
)
s_logger = status_logging.get_status_logger()
s_logger.write(
status_level=status_logging.Status.STARTED,
message="Starting FasterRCNN evaluation."
)
verbosity = 'INFO'
if spec.verbose:
verbosity = 'DEBUG'
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level=verbosity)
logger = logging.getLogger(__name__)
# setup tf and keras
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
K.set_session(tf.Session(config=config))
K.set_image_data_format('channels_first')
K.set_learning_phase(0)
# set radom seed
utils.set_random_seed(spec.random_seed)
# load model and convert train model to infer model
if spec.eval_trt_config is not None:
# spec.eval_trt_engine will be deprecated, use spec.eval_model
logger.info('Running evaluation with TensorRT as backend.')
logger.warning(
"`spec.evaluation_config.trt_evaluation` is deprecated, "
"please use `spec.evaluation_config.model` in spec file or provide "
"the model/engine as a command line argument instead."
)
infer_model = TrtModel(spec.eval_trt_engine,
spec.eval_batch_size,
spec.image_h,
spec.image_w)
infer_model.build_or_load_trt_engine()
vis_path = os.path.dirname(spec.eval_trt_engine)
elif spec.eval_model.split('.')[-1] in KERAS_MODEL_EXTENSIONS:
# spec.eval_model is a TLT model
logger.info('Running evaluation with TAO Toolkit as backend.')
train_model = iva_utils.decode_to_keras(spec.eval_model,
str.encode(spec.enc_key),
input_model=None,
compile_model=False,
by_name=None)
config_override = {'pre_nms_top_N': spec.eval_rpn_pre_nms_top_N,
'post_nms_top_N': spec.eval_rpn_post_nms_top_N,
'nms_iou_thres': spec.eval_rpn_nms_iou_thres,
'bs_per_gpu': spec.eval_batch_size}
logger.info("Building evaluation model, may take a while...")
infer_model = build_inference_model(
train_model,
config_override,
create_session=True,
max_box_num=spec.eval_rcnn_post_nms_top_N,
regr_std_scaling=spec.rcnn_regr_std,
iou_thres=spec.eval_rcnn_nms_iou_thres,
score_thres=spec.eval_confidence_thres,
eval_rois=spec.eval_rpn_post_nms_top_N
)
infer_model.summary()
vis_path = os.path.dirname(spec.eval_model)
else:
# spec.eval_model is a TRT engine
logger.info('Running evaluation with TensorRT as backend.')
infer_model = TrtModel(spec.eval_model,
spec.eval_batch_size,
spec.image_h,
spec.image_w)
infer_model.build_or_load_trt_engine()
vis_path = os.path.dirname(spec.eval_model)
data_loader = InputsLoader(spec.training_dataset,
spec.data_augmentation,
spec.eval_batch_size,
spec.image_c,
spec.image_mean_values,
spec.image_scaling_factor,
bool(spec.image_channel_order == 'bgr'),
training=False,
max_objs_per_img=spec.max_objs_per_img,
session=K.get_session())
K.get_session().run(utils.get_init_ops())
num_examples = data_loader.num_samples
max_steps = (num_examples + spec.eval_batch_size - 1) // spec.eval_batch_size
prob_thresh = spec.eval_confidence_thres
T = [dict() for _ in spec.eval_gt_matching_iou_list]
P = [dict() for _ in spec.eval_gt_matching_iou_list]
RPN_RECALL = {}
for _ in tqdm(range(max_steps)):
images, gt_class_ids, gt_bboxes, gt_diff = data_loader.get_array_with_diff()
image_h, image_w = images.shape[2:]
# get the feature maps and output from the RPN
nmsed_boxes, nmsed_scores, nmsed_classes, num_dets, rois_output = \
infer_model.predict(images)
# apply the spatial pyramid pooling to the proposed regions
for image_idx in range(nmsed_boxes.shape[0]):
all_dets = utils.gen_det_boxes(
spec.id_to_class, nmsed_classes,
nmsed_boxes, nmsed_scores,
image_idx, num_dets,
)
# get detection results for each IoU threshold, for each image
utils.get_detection_results(
all_dets, gt_class_ids, gt_bboxes, gt_diff, image_h,
image_w, image_idx, spec.id_to_class, T, P,
spec.eval_gt_matching_iou_list
)
# # calculate RPN recall for each class, this will help debugging
# in TensorRT engine eval case, rois_output is None, so skip this
if rois_output is not None:
utils.calc_rpn_recall(
RPN_RECALL,
spec.id_to_class,
rois_output[image_idx, ...],
gt_class_ids[image_idx, ...],
gt_bboxes[image_idx, ...]
)
# finally, compute and print all the mAP values
maps = utils.compute_map_list(
T, P, prob_thresh,
spec.use_voc07_metric,
RPN_RECALL,
spec.eval_gt_matching_iou_list,
vis_path if spec.eval_config.visualize_pr_curve else None
)
mAP = np.mean(maps)
if options.results_dir:
s_logger.kpi.update({'mAP': float(mAP)})
s_logger.write(
status_level=status_logging.Status.SUCCESS,
message="Evaluation finished successfully."
)
if __name__ == '__main__':
try:
main()
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Evaluation was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
if type(e) == tf.errors.ResourceExhaustedError:
logger = logging.getLogger(__name__)
logger.error(
"Ran out of GPU memory, please lower the batch size, use a smaller input "
"resolution, use a smaller backbone or try model parallelism. See TLT "
"documentation on how to enable model parallelism for FasterRCNN."
)
status_logging.get_status_logger().write(
message="Ran out of GPU memory, please lower the batch size, use a smaller input "
"resolution, use a smaller backbone or try model parallelism. See TLT "
"documentation on how to enable model parallelism for FasterRCNN.",
status_level=status_logging.Status.FAILURE
)
sys.exit(1)
else:
# throw out the error as-is if they are not OOM error
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/scripts/evaluate.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""TLT command line wrapper to invoke CLI scripts."""
import sys
from nvidia_tao_tf1.cv.common.entrypoint.entrypoint import launch_job
import nvidia_tao_tf1.cv.faster_rcnn.scripts
def main():
"""Function to launch the job."""
launch_job(nvidia_tao_tf1.cv.faster_rcnn.scripts, "faster_rcnn", sys.argv[1:])
if __name__ == "__main__":
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/entrypoint/faster_rcnn.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""Entrypoint for TLT FasterRCNN."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/entrypoint/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
converter_functions.py
Conversion Functions for common layers.
Add new functions here with a decorator.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from uff.converters.tensorflow.converter import TensorFlowToUFFConverter as tf2uff
from uff.model.utils import convert_to_str
from uff.model.exceptions import * # noqa pylint: disable = W0401,W0614
import numpy as np
@tf2uff.register(["Placeholder"])
def convert_placeholder(name, tf_node, inputs, uff_graph, **kwargs):
dtype = tf2uff.convert_tf2numpy_dtype(tf_node.attr['dtype'].type)
shape = tf2uff.get_tf_shape_as_int_list(tf_node.attr['shape'])
uff_graph.input(shape, dtype, name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["Identity"])
def convert_identity(name, tf_node, inputs, uff_graph, **kwargs):
uff_graph.identity(inputs[0], name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["Const"])
def convert_const(name, tf_node, inputs, uff_graph, **kwargs):
array = tf2uff.convert_tf2numpy_const_node(tf_node)
uff_node = uff_graph.const(array, name)
uff_node.array = array
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["Add"])
def convert_add(name, tf_node, inputs, uff_graph, **kwargs):
uff_graph.binary(inputs[0], inputs[1], 'add', name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["Sub"])
def convert_sub(name, tf_node, inputs, uff_graph, **kwargs):
uff_graph.binary(inputs[0], inputs[1], 'sub', name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["Mul"])
def convert_mul(name, tf_node, inputs, uff_graph, **kwargs):
uff_graph.binary(inputs[0], inputs[1], 'mul', name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["Div", "RealDiv"])
def convert_div(name, tf_node, inputs, uff_graph, **kwargs):
uff_graph.binary(inputs[0], inputs[1], 'div', name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["Relu"])
def convert_relu(name, tf_node, inputs, uff_graph, **kwargs):
uff_graph.activation(inputs[0], 'relu', name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["Relu6"])
def convert_relu6(name, tf_node, inputs, uff_graph, **kwargs):
uff_graph.activation(inputs[0], 'relu6', name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["LeakyRelu"])
def convert_leaky_relu(name, tf_node, inputs, uff_graph, **kwargs):
uff_graph.leaky_relu(inputs[0], tf_node.attr['alpha'].f, name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["Tanh"])
def convert_tanh(name, tf_node, inputs, uff_graph, **kwargs):
uff_graph.activation(inputs[0], 'tanh', name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["Sigmoid"])
def convert_sigmoid(name, tf_node, inputs, uff_graph, **kwargs):
uff_graph.activation(inputs[0], 'sigmoid', name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["Elu"])
def convert_elu(name, tf_node, inputs, uff_graph, **kwargs):
uff_graph.activation(inputs[0], 'elu', name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["Selu"])
def convert_selu(name, tf_node, inputs, uff_graph, **kwargs):
uff_graph.activation(inputs[0], 'selu', name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["Softsign"])
def convert_softsign(name, tf_node, inputs, uff_graph, **kwargs):
uff_graph.activation(inputs[0], 'softsign', name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["Softplus"])
def convert_softplus(name, tf_node, inputs, uff_graph, **kwargs):
uff_graph.activation(inputs[0], 'softplus', name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["Neg"])
def convert_neg(name, tf_node, inputs, uff_graph, **kwargs):
uff_graph.unary(inputs[0], 'neg', name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["Abs"])
def convert_abs(name, tf_node, inputs, uff_graph, **kwargs):
uff_graph.unary(inputs[0], 'abs', name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["Acos"])
def convert_acos(name, tf_node, inputs, uff_graph, **kwargs):
uff_graph.unary(inputs[0], 'acos', name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["Acosh"])
def convert_acosh(name, tf_node, inputs, uff_graph, **kwargs):
uff_graph.unary(inputs[0], 'acosh', name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["Asin"])
def convert_asin(name, tf_node, inputs, uff_graph, **kwargs):
uff_graph.unary(inputs[0], 'asin', name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["Asinh"])
def convert_asinh(name, tf_node, inputs, uff_graph, **kwargs):
uff_graph.unary(inputs[0], 'asinh', name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["Atan"])
def convert_atan(name, tf_node, inputs, uff_graph, **kwargs):
uff_graph.unary(inputs[0], 'atan', name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["Atanh"])
def convert_atanh(name, tf_node, inputs, uff_graph, **kwargs):
uff_graph.unary(inputs[0], 'atanh', name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["Ceil"])
def convert_ceil(name, tf_node, inputs, uff_graph, **kwargs):
uff_graph.unary(inputs[0], 'ceil', name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["Cos"])
def convert_cos(name, tf_node, inputs, uff_graph, **kwargs):
uff_graph.unary(inputs[0], 'cos', name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["Cosh"])
def convert_cosh(name, tf_node, inputs, uff_graph, **kwargs):
uff_graph.unary(inputs[0], 'cosh', name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["Sin"])
def convert_sin(name, tf_node, inputs, uff_graph, **kwargs):
uff_graph.unary(inputs[0], 'sin', name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["Sinh"])
def convert_sinh(name, tf_node, inputs, uff_graph, **kwargs):
uff_graph.unary(inputs[0], 'sinh', name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["Tan"])
def convert_tan(name, tf_node, inputs, uff_graph, **kwargs):
uff_graph.unary(inputs[0], 'tan', name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["Floor"])
def convert_floor(name, tf_node, inputs, uff_graph, **kwargs):
uff_graph.unary(inputs[0], 'floor', name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["Sqrt"])
def convert_sqrt(name, tf_node, inputs, uff_graph, **kwargs):
uff_graph.unary(inputs[0], 'sqrt', name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["Rsqrt"])
def convert_rsqrt(name, tf_node, inputs, uff_graph, **kwargs):
uff_graph.unary(inputs[0], 'rsqrt', name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["Square"])
def convert_square(name, tf_node, inputs, uff_graph, **kwargs):
uff_graph.unary(inputs[0], 'square', name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["Pow"])
def convert_pow(name, tf_node, inputs, uff_graph, **kwargs):
uff_graph.unary(inputs[0], 'pow', name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["Exp"])
def convert_exp(name, tf_node, inputs, uff_graph, **kwargs):
uff_graph.unary(inputs[0], 'exp', name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["Log"])
def convert_log(name, tf_node, inputs, uff_graph, **kwargs):
uff_graph.unary(inputs[0], 'log', name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["Softmax"])
def convert_softmax(name, tf_node, inputs, uff_graph, **kwargs):
# Some Softmax ops don't have an axis node.
if len(inputs) > 1:
tf_axis_node = kwargs["tf_nodes"][inputs[-1]]
axis = int(tf2uff.convert_tf2numpy_const_node(tf_axis_node))
inputs = inputs[:-1]
else:
axis = 0
fmt = convert_to_str(tf_node.attr['data_format'].s)
fmt = fmt if fmt else "NCHW"
data_fmt = tf2uff.convert_tf2uff_data_format(fmt)
uff_graph.softmax(inputs[0], axis, data_fmt, name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["Minimum"])
def convert_minimum(name, tf_node, inputs, uff_graph, **kwargs):
uff_graph.binary(inputs[0], inputs[1], 'min', name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["Maximum"])
def convert_maximum(name, tf_node, inputs, uff_graph, **kwargs):
uff_graph.binary(inputs[0], inputs[1], 'max', name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["Shape"])
def convert_shape(name, tf_node, inputs, uff_graph, **kwargs):
uff_graph.shape(inputs[0], name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["ExpandDims"])
def convert_expand_dims(name, tf_node, inputs, uff_graph, **kwargs):
# Retrieve and remove the axis node.
tf_axis_node = kwargs["tf_nodes"][inputs[-1]]
if tf_axis_node.op != "Const":
raise UffException("ExpandDims Axis node has op " + str(tf_axis_node.op) + ", expected Const. The axis must be specified as a Const node.")
axis = int(tf2uff.convert_tf2numpy_const_node(tf_axis_node))
inputs.pop(-1)
# Add the op.
uff_graph.expand_dims(inputs[0], axis, name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["ArgMax"])
def convert_argmax(name, tf_node, inputs, uff_graph, **kwargs):
# Retrieve and remove the axis node.
tf_axis_input_node = kwargs["tf_nodes"][inputs[-1]]
if tf_axis_input_node.op != "Const":
raise UffException("ArgMax Axis node has op " + str(tf_axis_input_node.op) + ", expected Const. The axis must be specified as a Const node.")
axis = int(tf2uff.convert_tf2numpy_const_node(tf_axis_input_node))
inputs.pop(-1)
# Add the op.
uff_graph.argmax(inputs[0], axis, name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["ArgMin"])
def convert_argmin(name, tf_node, inputs, uff_graph, **kwargs):
# Retrieve and remove the axis node.
tf_axis_input_node = kwargs["tf_nodes"][inputs[-1]]
if tf_axis_input_node.op != "Const":
raise UffException("ArgMin Axis node has op " + str(tf_axis_input_node.op) + ", expected Const. The axis must be specified as a Const node.")
axis = int(tf2uff.convert_tf2numpy_const_node(tf_axis_input_node))
inputs.pop(-1)
# Add the op.
uff_graph.argmin(inputs[0], axis, name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["Reshape"])
def convert_reshape(name, tf_node, inputs, uff_graph, **kwargs):
str_name = tf_node.name.split('/')
if len(str_name) > 1 and tf_node.name.split('/')[-2].lower().find('flatten') != -1:
print('DEBUG: convert reshape to flatten node')
uff_graph.flatten(inputs[0], name=name) # flatten axis is ignored here
return [tf2uff.split_node_name_and_output(inputs[0])[0]] # second input of shape is dropped
uff_graph.reshape(inputs[0], inputs[1], name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
# tensorflow does not have flatten op.
# tensorflow.contrib.slim has a flatten function that combines slice/shape to
# implement flatten. 'We' decided to hack it through chopping the reshape and
# slice, to add a flatten op. So it's easier to patch it with uff/TensorRT.
#
# @tf2uff.register(["Flatten"])
# def _flatten_helper(name, tf_node, inputs, uff_graph, **kwargs):
# axis = tf2uff.get_tf_int_list(tf_node.attr['axis'])
# uff_graph.flatten(inputs[0], name=name, axis=axis)
# return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["Transpose"])
def convert_transpose(name, tf_node, inputs, uff_graph, **kwargs):
tf_permutation_node = kwargs["tf_nodes"][inputs[1]]
if tf_permutation_node.op != "Const":
raise UffException("Transpose permutation has op " + str(tf_permutation_node.op) + ", expected Const. Only constant permuations are supported in UFF.")
permutation = tf2uff.convert_tf2numpy_const_node(
tf_permutation_node).tolist()
inputs = inputs[:1]
uff_graph.transpose(inputs[0], permutation, name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["Pack"])
def convert_pack(name, tf_node, inputs, uff_graph, **kwargs):
axis = tf_node.attr['axis'].i
uff_graph.stack(inputs, axis, name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["ConcatV2"])
def convert_concatv2(name, tf_node, inputs, uff_graph, **kwargs):
if "axis" in tf_node.attr:
# Handle cases where the axis is not a node, but an attribute instead.
axis = tf_node.attr["axis"].i
else:
tf_axis_node = kwargs["tf_nodes"][inputs[-1]]
if tf_axis_node.op != "Const":
raise UffException("Concat Axis node has op " + str(tf_axis_node.op) + ", expected Const. The axis for a Concat op must be specified as either an attribute, or a Const node.")
axis = int(tf2uff.convert_tf2numpy_const_node(tf_axis_node))
inputs = inputs[:-1]
uff_graph.concat(inputs, axis, name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["MaxPool"])
def convert_maxpool(name, tf_node, inputs, uff_graph, **kwargs):
return _pool_helper(name, tf_node, inputs, uff_graph, func='max', **kwargs)
@tf2uff.register(["AvgPool"])
def convert_avgpool(name, tf_node, inputs, uff_graph, **kwargs):
return _pool_helper(name, tf_node, inputs, uff_graph, func='avg', **kwargs)
def _pool_helper(name, tf_node, inputs, uff_graph, **kwargs):
func = kwargs["func"]
window_size = tf2uff.get_tf_int_list(tf_node.attr['ksize'])
strides = tf2uff.get_tf_int_list(tf_node.attr['strides'])
fmt = convert_to_str(tf_node.attr['data_format'].s)
fmt = fmt if fmt else "NHWC"
inputs, padding, fields = tf2uff.apply_fused_padding(
tf_node, inputs, kwargs["tf_nodes"])
data_format = tf2uff.convert_tf2uff_data_format(fmt)
if fmt == 'NCHW':
window_size = window_size[2:]
strides = strides[2:]
if padding is not None:
padding = padding[2:]
elif fmt == 'NHWC':
window_size = [window_size[1], window_size[2]]
strides = [strides[1], strides[2]]
if padding is not None:
padding = [padding[1], padding[2]]
else:
raise ValueError("Unsupported data format: " + fmt)
uff_graph.pool(
inputs[0], func, window_size, strides, padding,
data_format=data_format, name=name, fields=fields)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["LRN"])
def convert_lrn(name, tf_node, inputs, uff_graph, **kwargs):
lhs = inputs[0]
fmt = convert_to_str(tf_node.attr['data_format'].s)
fmt = fmt if fmt else "NC+"
window_size = tf_node.attr["depth_radius"].i
alpha = tf_node.attr["alpha"].f
beta = tf_node.attr["beta"].f
bias = tf_node.attr["bias"].f
uff_graph.lrn(lhs, window_size, alpha, beta, bias, fmt, name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["MatMul"])
def convert_matmul(name, tf_node, inputs, uff_graph, **kwargs):
lhs, rhs = inputs
trans_a = tf_node.attr['transpose_a'].b
trans_b = tf_node.attr['transpose_b'].b
lhs_fmt = 'CN' if trans_a else 'NC'
rhs_fmt = 'KC' if trans_b else 'CK'
uff_graph.fully_connected(
lhs, rhs, lhs_fmt, rhs_fmt, name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["Conv2D"])
def convert_conv2d(name, tf_node, inputs, uff_graph, **kwargs):
return _conv2d_helper(name, tf_node, inputs, uff_graph, func="conv2d", **kwargs)
@tf2uff.register(["DepthwiseConv2dNative"])
def convert_depthwise_conv2d_native(name, tf_node, inputs, uff_graph,
**kwargs):
return _conv2d_helper(name, tf_node, inputs, uff_graph, func="depthwise", **kwargs)
def _conv2d_helper(name, tf_node, inputs, uff_graph, **kwargs):
func = kwargs["func"]
fmt = convert_to_str(tf_node.attr['data_format'].s)
fmt = fmt if fmt else "NHWC"
strides = tf2uff.get_tf_int_list(tf_node.attr['strides'])
inputs, padding, fields = tf2uff.apply_fused_padding(
tf_node, inputs, kwargs["tf_nodes"])
lhs_fmt = tf2uff.convert_tf2uff_data_format(fmt)
rhs_fmt = '+CK'
if fmt == 'NCHW':
strides = strides[2:]
if padding is not None:
padding = padding[2:]
elif fmt == 'NHWC':
strides = [strides[1], strides[2]]
if padding is not None:
padding = [padding[1], padding[2]]
else:
raise ValueError("Unsupported data format: " + fmt)
if func == "depthwise":
wt = kwargs["tf_nodes"][inputs[1]]
number_groups = int(wt.attr['value'].tensor.tensor_shape.dim[2].size)
else:
number_groups = None
# If this node represents a dilated conv, pull in the dilations.
dilation = None
if "dilations" in tf_node.attr:
if fmt == "NCHW":
dilation = tf2uff.get_tf_int_list(tf_node.attr['dilations'])[2:]
else:
dilation = tf2uff.get_tf_int_list(tf_node.attr['dilations'])[1:3]
# FIXME: Need a better way to check for dilated convs. This just checks if the block_shape input is as expected.
# Ideally we should have a 'get_input_by_name' function. Maybe we can leverage GS here.
# Another possibility is that GS can add these as attributes to the node rather than maintaining them as
# separate const nodes.
tf_block_shape_node = kwargs["tf_nodes"][inputs[1]]
if "block_shape" in tf_block_shape_node.name.split('/')[-1] and tf_block_shape_node.op == "Const":
# Get the second input (block_shape) - of the form [1, dilation_value, dilation_value]
dilation = np.frombuffer(tf_block_shape_node.attr["value"].tensor.tensor_content, dtype=np.int32).tolist()
if len(dilation) > 2:
dilation = [dilation[1], dilation[2]]
inputs.pop(1)
tf_paddings_node = kwargs["tf_nodes"][inputs[1]]
if "paddings" in tf_paddings_node.name.split('/')[-1] and tf_paddings_node.op == "Const":
# Get the second input (paddings, since block_shape is already removed)
paddings_temp = np.frombuffer(tf_paddings_node.attr["value"].tensor.tensor_content, dtype=np.int32).tolist()
inputs.pop(1)
# Get cropping information, but only if paddings is also present.
tf_crops_node = kwargs["tf_nodes"][inputs[1]]
if "crops" in tf_crops_node.name.split('/')[-1] and tf_crops_node.op == "Const":
# Get the second input (crops, since block_shape is already removed)
crops = np.frombuffer(tf_crops_node.attr["value"].tensor.tensor_content, dtype=np.int32)
inputs.pop(1)
paddings_temp = (np.array(paddings_temp) - crops).tolist()
# TF paddings are [[top,bottom], [left,right]], so we need to rearrange.
perm = [0, 2, 1, 3]
# HACK: Sometimes paddings has [0, 0] at the front.
if len(paddings_temp) == 6:
paddings_temp = paddings_temp[2:]
paddings_temp = [paddings_temp[p] for p in perm]
# Symmetric padding ("same")
if paddings_temp[0] == paddings_temp[2] and paddings_temp[1] == paddings_temp[3]:
paddings_temp = paddings_temp[0:2]
padding = paddings_temp if not padding else [p + pt for p, pt in zip(padding, paddings_temp)]
else:
print("Asymmetric padding for dilated convolutions is currently unsupported in the UFF converter.")
uff_graph.conv(
inputs[0], inputs[-1], strides, padding,
dilation=dilation, number_groups=number_groups,
left_format=lhs_fmt, right_format=rhs_fmt,
name=name, fields=fields)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["Conv2DBackpropInput"])
def convert_conv2d_backprop_input(name, tf_node, inputs, uff_graph, **kwargs):
return _conv2d_transpose_helper(name, tf_node, inputs, uff_graph,
func="conv2d_transpose", **kwargs)
def _conv2d_transpose_helper(name, tf_node, inputs, uff_graph, **kwargs):
kwargs.pop("func") # FIXME support depthwise transpose
fmt = convert_to_str(tf_node.attr['data_format'].s)
fmt = fmt if fmt else "NHWC"
strides = tf2uff.get_tf_int_list(tf_node.attr['strides'])
fields = {}
padding = None
number_groups = None
tf_padding = convert_to_str(tf_node.attr['padding'].s)
if tf_padding == "SAME":
fields['implicit_padding'] = "same"
elif tf_padding != "VALID":
raise ValueError("Padding mode %s not supported" % tf_padding)
lhs_fmt = tf2uff.convert_tf2uff_data_format(fmt)
rhs_fmt = '+KC'
if fmt == 'NCHW':
strides = strides[2:]
elif fmt == 'NHWC':
strides = [strides[1], strides[2]]
else:
raise ValueError("Unsupported data format: " + fmt)
uff_graph.conv_transpose(
inputs[2], inputs[1], inputs[0],
strides, padding,
dilation=None, number_groups=number_groups,
left_format=lhs_fmt, right_format=rhs_fmt,
name=name, fields=fields)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["BiasAdd"])
def convert_bias_add(name, tf_node, inputs, uff_graph, **kwargs):
fmt = convert_to_str(tf_node.attr['data_format'].s)
fmt = fmt if fmt else "NHWC"
biases_name = inputs[1]
biases_array = tf2uff.convert_tf2numpy_const_node(
kwargs["tf_nodes"][biases_name])
inputs = inputs[:1]
if fmt == 'NCHW':
ndim = 4
new_shape = [-1] + [1] * (ndim - 2)
biases_array = biases_array.reshape(new_shape)
uff_graph.const(biases_array, biases_name)
uff_graph.binary(inputs[0], biases_name, 'add', name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["FusedBatchNorm"])
def convert_fused_batch_norm(name, tf_node, inputs, uff_graph, **kwargs):
input_node, gamma, beta, mean, variance = inputs
eps = tf_node.attr['epsilon'].f
fmt = convert_to_str(tf_node.attr['data_format'].s)
fmt = fmt if fmt else "NHWC"
data_fmt = tf2uff.convert_tf2uff_data_format(fmt)
uff_graph.batchnorm(input_node, gamma, beta, mean,
variance, eps, data_fmt, name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["StridedSlice"])
def convert_strided_slice(name, tf_node, inputs, uff_graph, **kwargs):
begin_mask = tf_node.attr['begin_mask'].i
end_mask = tf_node.attr['end_mask'].i
shrink_axis_mask = tf_node.attr['shrink_axis_mask'].i
if tf_node.attr['ellipsis_mask'].i != 0:
raise ValueError("ellipsis_mask not supported")
if tf_node.attr['new_axis_mask'].i != 0:
raise ValueError("new_axis_mask not supported")
uff_graph.strided_slice(inputs[0], inputs[1], inputs[2], inputs[3],
begin_mask, end_mask, shrink_axis_mask, name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
def _reduce_helper(name, tf_node, inputs, uff_graph, **kwargs):
func = kwargs.pop("func")
tf_axes_node = kwargs["tf_nodes"][inputs[1]]
array = tf2uff.convert_tf2numpy_const_node(tf_axes_node)
axes = array.tolist()
inputs = inputs[:1]
keepdims = tf_node.attr['keep_dims'].b
print("Warning: keepdims is ignored by the UFF Parser and defaults to True")
uff_graph.reduce(inputs[0], func, axes, keepdims, name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["Sum"])
def convert_sum(name, tf_node, inputs, uff_graph, **kwargs):
return _reduce_helper(name, tf_node, inputs, uff_graph, func="sum", **kwargs)
@tf2uff.register(["Prod"])
def convert_prod(name, tf_node, inputs, uff_graph, **kwargs):
return _reduce_helper(name, tf_node, inputs, uff_graph, func="prod", **kwargs)
@tf2uff.register(["Min"])
def convert_min(name, tf_node, inputs, uff_graph, **kwargs):
return _reduce_helper(name, tf_node, inputs, uff_graph, func="min", **kwargs)
@tf2uff.register(["Max"])
def convert_max(name, tf_node, inputs, uff_graph, **kwargs):
return _reduce_helper(name, tf_node, inputs, uff_graph, func="max", **kwargs)
@tf2uff.register(["Mean"])
def convert_mean(name, tf_node, inputs, uff_graph, **kwargs):
return _reduce_helper(name, tf_node, inputs, uff_graph, func="mean", **kwargs)
@tf2uff.register(["Squeeze"])
def convert_squeeze(name, tf_node, inputs, uff_graph, **kwargs):
axis = tf2uff.get_tf_int_list(tf_node.attr['squeeze_dims'])
uff_graph.squeeze(inputs[0], name=name, axis=axis)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
# TODO: add attributes of MODE / constant_values
@tf2uff.register(["Pad"])
def convert_pad(name, tf_node, inputs, uff_graph, **kwargs):
pad = inputs[1]
uff_graph.pad(inputs[0], pad, name)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["Gather"])
def convert_gather(name, tf_node, inputs, uff_graph, **kwargs):
indices_dtype = tf2uff.convert_tf2numpy_dtype(tf_node.attr['Tindices'].type)
params_dtype = tf2uff.convert_tf2numpy_dtype(tf_node.attr['Tparams'].type)
validate_indices = tf_node.attr['validate_indices'].b
uff_graph.gather(inputs, name, indices_dtype, params_dtype, validate_indices)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["GatherV2"])
def convert_gather_v2(name, tf_node, inputs, uff_graph, **kwargs):
if len(inputs) > 2:
tf_axis_node = kwargs["tf_nodes"][inputs[-1]]
axis = int(tf2uff.convert_tf2numpy_const_node(tf_axis_node))
inputs = inputs[:-1]
else:
axis = 0
indices_dtype = tf2uff.convert_tf2numpy_dtype(tf_node.attr['Tindices'].type)
params_dtype = tf2uff.convert_tf2numpy_dtype(tf_node.attr['Tparams'].type)
uff_graph.gather_v2(inputs, name, axis, indices_dtype, params_dtype)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
@tf2uff.register(["ResourceGather"])
def convert_resource_gather(name, tf_node, inputs, uff_graph, **kwargs):
if len(inputs) > 2:
tf_axis_node = kwargs["tf_nodes"][inputs[-1]]
axis = int(tf2uff.convert_tf2numpy_const_node(tf_axis_node))
inputs = inputs[:-1]
else:
axis = 0
indices_dtype = tf2uff.convert_tf2numpy_dtype(tf_node.attr['Tindices'].type)
params_dtype = tf2uff.convert_tf2numpy_dtype(tf_node.attr['dtype'].type)
uff_graph.gather_v2(inputs, name, axis, indices_dtype, params_dtype)
return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/patched_uff/converter_functions.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Patch UFF only for FasterRCNN.
The background of this patch deserves some explanation. In FasterRCNN models, when exported to
UFF model, the Softmax layer has some issue in UFF converter and cannot be converted successfully.
In the official UFF converter functions, Softmax layer defaults to use axis=0 and
data format = NHWC, i.e, 'N+C', in UFF's notation. While this default settings doesn't work
for FasterRCNN due to the 5D tensor in it. If we dig it deeper, we will found this is due to
some transpose is applied during parsing the Softmax layer and that transpose operation doesn't
support 5D yet in UFF. So to walk around this, we have comed up with a solution. That is, force
the default data format to be NCHW for FasterRCNN model. However, this hack will break the unit
test 'test_3d_softmax' in 'nvdia_tao_tf1/core/export/test_export.py' and the reason is unclear.
Again, to avoid breaking this unit test, we would like to apply this patch not globally,
but rather for FasterRCNN only. For other parts of this repo, they will see the unpatched
UFF package and hence everything goes like before.
The converter_functions.py is copied from the UFF package with some changes of the code in it.
the code style in it is not conforming to the TLT standard. But to avoid confusion, we would
not change the code format in it, and hence we prefer to drop static tests for it in the BUILD file.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import uff as patched_uff
from nvidia_tao_tf1.cv.faster_rcnn.patched_uff import converter_functions
patched_uff.converters.tensorflow.converter_functions = converter_functions
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/patched_uff/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FasterRCNN calibrator class based on the tfrecord data loader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import numpy as np
import pycuda.autoinit # noqa pylint: disable=unused-import
import pycuda.driver as cuda
import tensorflow as tf
from nvidia_tao_tf1.cv.common.export.base_calibrator import BaseCalibrator
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.build_dataloader import build_dataloader
from nvidia_tao_tf1.cv.faster_rcnn.utils.utils import get_init_ops
logger = logging.getLogger(__name__)
class FasterRCNNCalibrator(BaseCalibrator):
"""Calibrator class based on data loader."""
def __init__(self, experiment_spec, cache_filename,
n_batches, batch_size,
*args, **kwargs):
"""Init routine.
This inherits from ``nvidia_tao_tf1.cv.common.export.base_calibrator.BaseCalibrator``
to implement the calibration interface that TensorRT needs to
calibrate the INT8 quantization factors. The data source here is assumed
to be the data tensors that are yielded from the dataloader.
Args:
experiment_spec(proto): experiment_spec proto for FasterRCNN.
cache_filename (str): name of calibration file to read/write to.
n_batches (int): number of batches for calibrate for.
batch_size (int): batch size to use for calibration data.
"""
super(FasterRCNNCalibrator, self).__init__(
cache_filename,
n_batches, batch_size,
*args, **kwargs
)
# Instantiate the dataloader.
self.instantiate_data_source(experiment_spec)
# Configure tensorflow before running tensorrt.
self.set_session()
def set_session(self):
"""Simple function to set the tensorflow session."""
# Setting this to minimize the default allocation at import.
gpu_options = tf.compat.v1.GPUOptions(
per_process_gpu_memory_fraction=0.33,
allow_growth=True)
# Configuring tensorflow to use CPU so that is doesn't interfere
# with tensorrt.
device_count = {'GPU': 0, 'CPU': 1}
session_config = tf.compat.v1.ConfigProto(
gpu_options=gpu_options,
device_count=device_count
)
self.session = tf.compat.v1.Session(
config=session_config,
graph=tf.get_default_graph()
)
self.session.run(get_init_ops())
def instantiate_data_source(self, experiment_spec):
"""Simple function to instantiate the data_source of the dataloader.
Args:
experiment_spec: experiment spec proto object.
Returns:
No explicit returns.
"""
dataloader = build_dataloader(
experiment_spec.training_dataset,
experiment_spec.data_augmentation
)
self._data_source, _, num_samples = dataloader.get_dataset_tensors(
self._batch_size,
training=True,
enable_augmentation=False
)
# preprocess images.
self._data_source *= 255.0
image_mean_values = experiment_spec.image_mean_values
if experiment_spec.image_c == 3:
flip_channel = bool(experiment_spec.image_channel_order == 'bgr')
if flip_channel:
perm = tf.constant([2, 1, 0])
self._data_source = tf.gather(self._data_source, perm, axis=1)
image_mean_values = image_mean_values[::-1]
self._data_source -= tf.constant(np.array(image_mean_values).reshape([1, 3, 1, 1]),
dtype=tf.float32)
elif experiment_spec.image_c == 1:
self._data_source -= tf.constant(image_mean_values, dtype=tf.float32)
else:
raise ValueError("Image channel number can only be 1 "
"or 3, got {}.".format(experiment_spec.image_c))
self._data_source /= experiment_spec.image_scaling_factor
logger.info("Number of samples in training dataset: {}".format(num_samples))
def get_data_from_source(self):
"""Simple function to get data from the defined data_source."""
batch = self.session.run(self._data_source)
if batch is None:
raise ValueError(
"Batch wasn't yielded from the data source. You may have run "
"out of batches. Please set the num batches accordingly")
return batch
def get_batch(self, names):
"""Return one batch.
Args:
names (list): list of memory bindings names.
"""
if self._batch_count < self._n_batches:
batch = self.get_data_from_source()
if batch is not None:
if self._data_mem is None:
# 4 bytes per float32.
self._data_mem = cuda.mem_alloc(batch.size * 4)
self._batch_count += 1
# Transfer input data to device.
cuda.memcpy_htod(self._data_mem, np.ascontiguousarray(
batch, dtype=np.float32))
return [int(self._data_mem)]
if self._batch_count >= self._n_batches:
self.session.close()
tf.reset_default_graph()
if self._data_mem is not None:
self._data_mem.free()
return None
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/export/faster_rcnn_calibrator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class to export trained .tlt models to etlt file."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import tempfile
import graphsurgeon as gs
from keras import backend as K
import numpy as np
import onnx
import onnx_graphsurgeon as onnx_gs
import tensorflow as tf
from nvidia_tao_tf1.core.export._onnx import keras_to_onnx
from nvidia_tao_tf1.core.export._uff import keras_to_pb
try:
from nvidia_tao_tf1.cv.common.export.tensorfile_calibrator import TensorfileCalibrator
except Exception as e:
logger = logging.getLogger(__name__)
logger.warning(
"Failed to import TensorRT package, exporting TLT to a TensorRT engine "
"will not be available."
)
from nvidia_tao_tf1.cv.common.export.keras_exporter import KerasExporter as Exporter
from nvidia_tao_tf1.cv.common.utils import decode_to_keras
try:
from nvidia_tao_tf1.cv.faster_rcnn.export.faster_rcnn_calibrator import FasterRCNNCalibrator
except Exception as e:
logger = logging.getLogger(__name__)
logger.warning(
"Failed to import TensorRT package, exporting TLT to a TensorRT engine "
"will not be available."
)
from nvidia_tao_tf1.cv.faster_rcnn.export.utils import (
_delete_td_reshapes,
_onnx_delete_td_reshapes,
_remove_node_input
)
from nvidia_tao_tf1.cv.faster_rcnn.layers.custom_layers import (
CropAndResize, NmsInputs, OutputParser, Proposal,
ProposalTarget, TFReshape,
)
from nvidia_tao_tf1.cv.faster_rcnn.models.utils import build_inference_model
from nvidia_tao_tf1.cv.faster_rcnn.patched_uff import patched_uff
from nvidia_tao_tf1.cv.faster_rcnn.qat._quantized import check_for_quantized_layers, \
process_quantized_layers
from nvidia_tao_tf1.cv.faster_rcnn.spec_loader import spec_loader, spec_wrapper
logger = logging.getLogger(__name__)
class FrcnnExporter(Exporter):
"""Exporter class to export a trained FasterRCNN model."""
def __init__(self,
model_path=None,
key=None,
data_type="fp32",
strict_type=False,
experiment_spec_path="",
backend="uff",
data_format="channels_first",
**kwargs):
"""Instantiate the exporter to export a trained FasterRCNN .tlt model.
Args:
model_path(str): Path to the .tlt model file.
key (str): Key to decode the model.
data_type (str): Backend data-type for the optimized TensorRT engine.
strict_type(bool): Apply TensorRT strict_type_constraints or not for INT8 mode.
experiment_spec_path (str): Path to the experiment spec file.
backend (str): Type of intermediate backend parser to be instantiated.
data_format(str): The keras data format.
"""
super(FrcnnExporter, self).__init__(model_path=model_path,
key=key,
data_type=data_type,
strict_type=strict_type,
backend=backend,
data_format=data_format,
**kwargs)
self.experiment_spec_path = experiment_spec_path
# Exception handling
assert experiment_spec_path is not None, \
"Experiment spec file should not be None when exporting a FasterRCNN model."
assert os.path.isfile(self.experiment_spec_path), \
"Experiment spec file not found at {}".format(self.experiment_spec_path)
assert os.path.isfile(model_path), \
"Model to export is not found at {}".format(model_path)
self.spec = None
self.tensor_scale_dict = None
def load_model(self):
"""Simple function to load the FasterRCNN Keras model."""
spec = \
spec_wrapper.ExperimentSpec(spec_loader.load_experiment_spec(self.experiment_spec_path))
if not(spec.image_h > 0 and spec.image_w > 0):
raise(
ValueError(
"Exporting a FasterRCNN model with dynamic input shape is not supported."
)
)
self.spec = spec
K.clear_session()
K.set_learning_phase(0)
force_batch_size = self.static_batch_size
# get the training model
if isinstance(self.key, str):
enc_key = self.key.encode()
else:
enc_key = self.key
train_model = decode_to_keras(self.model_path, enc_key, compile_model=False)
# convert training model to inference model: remove ProposalTarget layer, etc.
if force_batch_size > 0:
proposal_force_bs = force_batch_size
else:
proposal_force_bs = 1
config_override = {'pre_nms_top_N': spec.infer_rpn_pre_nms_top_N,
'post_nms_top_N': spec.infer_rpn_post_nms_top_N,
'nms_iou_thres': spec.infer_rpn_nms_iou_thres,
'bs_per_gpu': proposal_force_bs}
model = build_inference_model(
train_model,
config_override,
max_box_num=spec.infer_rcnn_post_nms_top_N,
regr_std_scaling=spec.rcnn_regr_std,
iou_thres=spec.infer_rcnn_nms_iou_thres,
score_thres=spec.infer_confidence_thres,
attach_keras_parser=False,
eval_rois=spec.infer_rpn_post_nms_top_N,
force_batch_size=force_batch_size
)
model.summary()
if check_for_quantized_layers(model):
model, self.tensor_scale_dict = process_quantized_layers(model, self.backend)
return model
def save_exported_file(self, model, output_file_name):
"""Save the exported model file.
This routine converts a keras model to onnx/uff model
based on the backend the exporter was initialized with.
Args:
model (keras.model.Model): Decoded keras model to be exported.
output_file_name (str): Path to the output file.
Returns:
tmp_uff_file (str): Path to the temporary uff file.
"""
# find the base featuremap(input[0] of CropAndResize)
base_feature_name = None
for kl in model.layers:
if kl.name.startswith('crop_and_resize_'):
inbound_layers = [l for n in kl._inbound_nodes for l in n.inbound_layers]
assert len(inbound_layers) == 3, 'CropAndResize should have exactly 3 inputs.'
if self.backend == "uff":
base_feature_name = inbound_layers[0].output.op.name
else:
base_feature_name = inbound_layers[0].output.name
break
assert (base_feature_name is not None) and (len(base_feature_name) > 0), \
''''Base feature map of FasterRCNN model cannot be found,
please check if the model is a valid FasterRCNN model.'''
if self.backend == "uff":
os_handle, tmp_pb_file = tempfile.mkstemp()
os.close(os_handle)
custom_objects = {'CropAndResize': CropAndResize,
'TFReshape': TFReshape,
'Proposal': Proposal,
'ProposalTarget': ProposalTarget,
'OutputParser': OutputParser,
"NmsInputs": NmsInputs}
keras_to_pb(model, tmp_pb_file, None, custom_objects=custom_objects)
tf.reset_default_graph()
dynamic_graph = gs.DynamicGraph(tmp_pb_file)
dynamic_graph = self.node_process(dynamic_graph, base_feature_name)
os.remove(tmp_pb_file)
patched_uff.from_tensorflow(dynamic_graph.as_graph_def(),
self.output_node_names,
output_filename=output_file_name,
text=False,
quiet=True)
return output_file_name
if self.backend == "onnx":
os_handle, tmp_onnx_file = tempfile.mkstemp()
os.close(os_handle)
custom_objects = {'CropAndResize': CropAndResize,
'TFReshape': TFReshape,
'Proposal': Proposal,
'ProposalTarget': ProposalTarget,
'OutputParser': OutputParser,
"NmsInputs": NmsInputs}
keras_to_onnx(model,
tmp_onnx_file,
custom_objects=custom_objects,
target_opset=self.target_opset)
tf.reset_default_graph()
onnx_model = onnx.load(tmp_onnx_file)
os.remove(tmp_onnx_file)
new_onnx_model = self.onnx_node_process(
onnx_model,
base_feature_name
)
onnx.save(new_onnx_model, output_file_name)
return output_file_name
raise NotImplementedError("Invalid backend provided. {}".format(self.backend))
def set_input_output_node_names(self):
"""Set input output node names."""
if self.backend == "uff":
self.input_node_names = ["input_image"]
else:
self.input_node_names = [""]
self.output_node_names = ["NMS"]
def _get_node_by_name(self, onnx_graph, node_name):
nodes = [n for n in onnx_graph.nodes if n.name == node_name]
assert len(nodes) == 1, (
"Expect only 1 node of the name: {}, got {}".format(node_name, len(nodes))
)
return nodes[0]
def _get_node_by_output_name(self, onnx_graph, output_name):
nodes = [n for n in onnx_graph.nodes if n.outputs[0].name == output_name]
assert len(nodes) == 1, (
"Expect only 1 node of the name: {}, got {}".format(output_name, len(nodes))
)
return nodes[0]
def _get_node_by_op(self, onnx_graph, op_name):
nodes = [n for n in onnx_graph.nodes if n.op == op_name]
assert len(nodes) == 1, (
"Expect only 1 node of the op: {}, got {}".format(op_name, len(nodes))
)
return nodes[0]
def onnx_node_process(self, onnx_graph, base_feature_name):
"""Manipulating the onnx graph for plugins."""
graph = onnx_gs.import_onnx(onnx_graph)
bfn = self._get_node_by_output_name(graph, base_feature_name).name
self._onnx_process_proposal(graph, self.spec)
self._onnx_process_crop_and_resize(graph, self.spec, bfn)
self._onnx_process_nms(graph, self.spec)
_onnx_delete_td_reshapes(graph)
self._fix_paddings(graph)
graph.cleanup().toposort()
# apply a filter to tensor_scale_dict with processed onnx graph
# in case there are some extra tensor scales unpresent in
# onnx model
if self.tensor_scale_dict:
retained_tensor_names = []
for n in graph.nodes:
for n_o in n.outputs:
if n_o.name not in retained_tensor_names:
retained_tensor_names.append(n_o.name)
dict_names = list(self.tensor_scale_dict.keys())
for tn in dict_names:
if tn not in retained_tensor_names:
self.tensor_scale_dict.pop(tn)
return onnx_gs.export_onnx(graph)
def _fix_paddings(self, graph):
"""Fix the paddings in onnx graph so it aligns with the Keras patch."""
# third_party/keras/tensorflow_backend.py patched the semantics of
# SAME padding, the onnx model has to align with it.
for node in graph.nodes:
if node.op == "Conv":
# in case of VALID padding, there is no 'pads' attribute
# simply skip it
if node.attrs["auto_pad"] == "VALID":
continue
k = node.attrs['kernel_shape']
g = node.attrs['group']
d = node.attrs['dilations']
# always assume kernel shape is square
effective_k = [1 + (k[ki] - 1) * d[ki] for ki in range(len(d))]
# (pad_w // 2 , pad_h // 2) == (pad_left, pad_top)
keras_paddings = tuple((ek - 1) // 2 for ek in effective_k)
# (pad_left, pad_top, pad_right, pad_bottom)
if g == 1:
# if it is not VALID, then it has to be NOTSET,
# to enable explicit paddings below
node.attrs["auto_pad"] = "NOTSET"
# only apply this patch for non-group convolutions
node.attrs['pads'] = keras_paddings * 2
elif node.op in ["AveragePool", "MaxPool"]:
# skip VALID padding case.
if node.attrs["auto_pad"] == "VALID":
continue
k = node.attrs['kernel_shape']
# (pad_w // 2 , pad_h // 2) == (pad_left, pad_top)
keras_paddings = tuple((ek - 1) // 2 for ek in k)
# force it to be NOTSET to enable explicit paddings below
node.attrs["auto_pad"] = "NOTSET"
# (pad_left, pad_top, pad_right, pad_bottom)
node.attrs['pads'] = keras_paddings * 2
def _onnx_process_proposal(self, graph, spec):
for node in graph.nodes:
if node.name == "proposal_1/packed:0_shape":
roi_shape_0_node = node
continue
if node.name.startswith("proposal_1") or node.name.startswith("_proposal_1"):
node.outputs.clear()
rpn_out_regress_node = self._get_node_by_name(
graph, "rpn_out_regress"
)
# reconnect rois shape[0](batch size) to an exisiting node
# e.g., here: rpn_out_regress_node
if self.static_batch_size <= 0:
roi_shape_0_node.inputs = [rpn_out_regress_node.outputs[0]]
rpn_out_class_node = self._get_node_by_name(
graph, "rpn_out_class"
)
proposal_out = onnx_gs.Variable(
"proposal_out",
dtype=np.float32
)
proposal_attrs = dict()
proposal_attrs["input_height"] = int(spec.image_h)
proposal_attrs["input_width"] = int(spec.image_w)
proposal_attrs["rpn_stride"] = int(spec.rpn_stride)
proposal_attrs["roi_min_size"] = 1.0
proposal_attrs["nms_iou_threshold"] = spec.infer_rpn_nms_iou_thres
proposal_attrs["pre_nms_top_n"] = int(spec.infer_rpn_pre_nms_top_N)
proposal_attrs["post_nms_top_n"] = int(spec.infer_rpn_post_nms_top_N)
proposal_attrs["anchor_sizes"] = spec.anchor_sizes
proposal_attrs["anchor_ratios"] = spec.anchor_ratios
Proposal_plugin = onnx_gs.Node(
op="ProposalDynamic",
name="proposal",
inputs=[
rpn_out_class_node.outputs[0],
rpn_out_regress_node.outputs[0]
],
outputs=[proposal_out],
attrs=proposal_attrs
)
roi_reshape_node = self._get_node_by_name(
graph,
"nms_inputs_1/Reshape_reshape"
)
roi_reshape_node.inputs = [Proposal_plugin.outputs[0], roi_reshape_node.inputs[1]]
graph.nodes.append(Proposal_plugin)
graph.cleanup().toposort()
# insert missing Sigmoid node for rpn_out_class
sigmoid_output = onnx_gs.Variable(
"sigmoid_output",
dtype=np.float32
)
rpn_class_sigmoid_node = onnx_gs.Node(
op="Sigmoid",
name="rpn_out_class/Sigmoid",
inputs=rpn_out_class_node.outputs,
outputs=[sigmoid_output]
)
Proposal_plugin.inputs = [sigmoid_output, Proposal_plugin.inputs[1]]
graph.nodes.append(rpn_class_sigmoid_node)
graph.cleanup().toposort()
def _onnx_process_crop_and_resize(self, graph, spec, base_feature_name):
pool_size = spec.roi_pool_size
if spec.roi_pool_2x:
pool_size *= 2
# crop_and_resize plugin
base_feature_node = self._get_node_by_name(graph, base_feature_name)
crop_and_resize_out = onnx_gs.Variable(
"crop_and_resize_out",
dtype=np.float32
)
crop_and_resize_attrs = dict()
crop_and_resize_attrs["crop_height"] = pool_size
crop_and_resize_attrs["crop_width"] = pool_size
Proposal_plugin = self._get_node_by_op(graph, "ProposalDynamic")
CropAndResize_plugin = onnx_gs.Node(
op="CropAndResizeDynamic",
name="CropAndResize_plugin",
inputs=[
base_feature_node.outputs[0],
Proposal_plugin.outputs[0]
],
outputs=[crop_and_resize_out],
attrs=crop_and_resize_attrs
)
graph.nodes.append(CropAndResize_plugin)
crop_and_resize_old_output_node = self._get_node_by_name(
graph,
"crop_and_resize_1/Reshape_1_reshape"
)
CropAndResize_plugin.outputs = crop_and_resize_old_output_node.outputs
for node in graph.nodes:
if (
node.name.startswith("crop_and_resize_1") or
node.name.startswith("_crop_and_resize_1")
):
# fix corner case for googlenet where the next pooling
# somehow has name with crop_and_resize_1 in it
if "pooling" not in node.name:
node.outputs.clear()
graph.cleanup().toposort()
def _onnx_process_nms(self, graph, spec):
prior_data_node = self._get_node_by_name(graph, "nms_inputs_1/prior_data_concat")
loc_data_node = self._get_node_by_name(graph, "nms_inputs_1/loc_data_reshape")
conf_data_node = self._get_node_by_name(graph, "nms_inputs_1/conf_data_reshape")
nms_out = onnx_gs.Variable(
"nms_out",
dtype=np.float32
)
nms_out_1 = onnx_gs.Variable(
"nms_out_1",
dtype=np.float32
)
nms_attrs = dict()
nms_attrs["shareLocation"] = 0
nms_attrs["varianceEncodedInTarget"] = 1
nms_attrs["backgroundLabelId"] = spec.num_classes - 1
nms_attrs["confidenceThreshold"] = self.spec.infer_confidence_thres
nms_attrs["nmsThreshold"] = spec.infer_rcnn_nms_iou_thres
nms_attrs["topK"] = spec.infer_rpn_post_nms_top_N
nms_attrs["codeType"] = 1
nms_attrs["keepTopK"] = spec.infer_rcnn_post_nms_top_N
nms_attrs["numClasses"] = spec.num_classes
nms_attrs["inputOrder"] = [1, 2, 0]
nms_attrs["confSigmoid"] = 0
nms_attrs["isNormalized"] = 1
nms_attrs["scoreBits"] = spec.infer_nms_score_bits
NMS_plugin = onnx_gs.Node(
op="NMSDynamic_TRT",
name="NMS",
inputs=[
prior_data_node.outputs[0],
loc_data_node.outputs[0],
conf_data_node.outputs[0]
],
outputs=[nms_out, nms_out_1],
attrs=nms_attrs
)
graph.nodes.append(NMS_plugin)
# delete reshape op in the TimeDistributed layers
graph.outputs = NMS_plugin.outputs
graph.cleanup().toposort()
def node_process(self, dynamic_graph, base_feature_name):
"""Manipulating the dynamic graph to make it compatible with TRT."""
spec = self.spec
# create TRT plugin nodes
pool_size = spec.roi_pool_size
if spec.roi_pool_2x:
pool_size *= 2
CropAndResize_plugin = \
gs.create_plugin_node(name='roi_pooling_conv_1/CropAndResize_new',
op="CropAndResize",
inputs=[base_feature_name,
'proposal'],
crop_height=pool_size,
crop_width=pool_size)
Proposal_plugin = \
gs.create_plugin_node(name='proposal',
op='Proposal',
inputs=['rpn_out_class/Sigmoid',
'rpn_out_regress/BiasAdd'],
input_height=int(spec.image_h),
input_width=int(spec.image_w),
rpn_stride=int(spec.rpn_stride),
roi_min_size=1.0,
nms_iou_threshold=spec.infer_rpn_nms_iou_thres,
pre_nms_top_n=int(spec.infer_rpn_pre_nms_top_N),
post_nms_top_n=int(spec.infer_rpn_post_nms_top_N),
anchor_sizes=spec.anchor_sizes,
anchor_ratios=spec.anchor_ratios)
# isNormalized is True because the Proposal plugin always normalizes the coordinates
NMS = gs.create_plugin_node(name='NMS', op='NMS_TRT',
inputs=["nms_inputs_1/prior_data",
'nms_inputs_1/loc_data',
'nms_inputs_1/conf_data'],
shareLocation=0,
varianceEncodedInTarget=1,
backgroundLabelId=self.spec.num_classes - 1,
confidenceThreshold=self.spec.infer_confidence_thres,
nmsThreshold=self.spec.infer_rcnn_nms_iou_thres,
topK=self.spec.infer_rpn_post_nms_top_N, # topK as NMS input
codeType=1,
keepTopK=self.spec.infer_rcnn_post_nms_top_N, # NMS output topK
numClasses=self.spec.num_classes,
inputOrder=[1, 2, 0],
confSigmoid=0,
isNormalized=1,
scoreBits=spec.infer_nms_score_bits,
# FasterRCNN takes RoI as inputs and they
# differs per image, so we should set it to
# False. By default, this parameter is True
# It was introduced start from OSS 21.06
# This issue should only impact UFF but not ONNX
# see: commit: a2b3d3d5cc9cd79c84dffc1b82b5439442cde201
isBatchAgnostic=False)
namespace_plugin_map = {
"crop_and_resize_1" : CropAndResize_plugin,
"proposal_1": Proposal_plugin,
}
# replace Tensorflow op with plugin nodes
dynamic_graph.collapse_namespaces(namespace_plugin_map)
_remove_node_input(dynamic_graph, "roi_pooling_conv_1/CropAndResize_new", 2)
_remove_node_input(dynamic_graph, "proposal", 2)
# delete reshape op in the TimeDistributed layers
_delete_td_reshapes(dynamic_graph)
dynamic_graph.append(NMS)
return dynamic_graph
def set_data_preprocessing_parameters(self, input_dims, image_mean=None):
"""Simple function to set data preprocessing parameters."""
# In FasterRCNN, we have configurable image scaling, means and channel order
# setup image scaling factor and per-channel mean values
image_config = self.spec.model_config.input_image_config
scale = 1.0 / image_config.image_scaling_factor
means = image_config.image_channel_mean
_num_channels = 3 if (image_config.image_type == 0) else 1
if _num_channels == 3:
means = [means['r'], means['g'], means['b']]
else:
means = [means['l']]
if image_config.image_channel_order == 'bgr':
flip_channel = True
means = means[::-1]
else:
flip_channel = False
self.preprocessing_arguments = {"scale": scale,
"means": means,
"flip_channel": flip_channel}
def get_calibrator(self,
calibration_cache,
data_file_name,
n_batches,
batch_size,
input_dims,
calibration_images_dir=None,
image_mean=None):
"""Simple function to get an int8 calibrator.
Args:
calibration_cache (str): Path to store the int8 calibration cache file.
data_file_name (str): Path to the TensorFile. If the tensorfile doesn't exist
at this path, then one is created with either n_batches of random tensors,
images from the file in calibration_images_dir of dimensions
(batch_size,) + (input_dims)
n_batches (int): Number of batches to calibrate the model over.
batch_size (int): Number of input tensors per batch.
input_dims (tuple): Tuple of input tensor dimensions in CHW order.
calibration_images_dir (str): Path to a directory of images to generate the
data_file from.
image_mean (list): Image mean values.
Returns:
calibrator(TensorfileCalibrator or FasterRCNNCalibrator):
TRTEntropyCalibrator2 instance to calibrate the TensorRT engine.
"""
if data_file_name and os.path.exists(data_file_name):
logger.info("Using existing tensor file for INT8 calibration.")
calibrator = TensorfileCalibrator(data_file_name,
calibration_cache,
n_batches,
batch_size)
elif data_file_name:
if (calibration_images_dir and os.path.exists(calibration_images_dir)):
logger.info("Generating tensor file from image directory and"
" then use the tensor file for INT8 calibration.")
self.generate_tensor_file(data_file_name,
calibration_images_dir,
input_dims,
n_batches=n_batches,
batch_size=batch_size)
calibrator = TensorfileCalibrator(data_file_name,
calibration_cache,
n_batches,
batch_size)
else:
logger.info("Using data loader to generate the data for INT8 calibration.")
# default to use data loader if neither tensorfile nor images is provided
calibrator = FasterRCNNCalibrator(
self.spec,
calibration_cache,
n_batches,
batch_size)
return calibrator
def get_class_labels(self):
"""Get list of class labels to serialize to a labels.txt file."""
if self.spec is None:
raise AttributeError(
"Experiment spec wasn't loaded. To get class labels "
"please provide the experiment spec file using the -e "
"option.")
target_classes = self.spec.class_mapping.values()
target_classes = sorted(set(target_classes))
target_classes.append('background')
return target_classes
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/export/exporter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Export a trained FasterRCNN model to an ETLT file for deployment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/export/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for FasterRCNN exporter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from graphsurgeon._utils import _generate_iterable_for_search
import tensorflow as tf
def _string_matches_regex(match_string, regex):
'''Check if a string matches a regular expression.'''
# Check for exact matches.
matches_name = regex == match_string
if matches_name:
return True
# Otherwise, treat as a regex
re_matches = re.match(regex, match_string)
# If we find matches...
if re_matches:
return True
return False
def _regex_list_contains_string(regex_list, match_string):
'''Check if a string matches any regex in the regex list.'''
for regex in regex_list:
if _string_matches_regex(match_string, regex):
return True
return False
def _find_nodes_by_name(graph, name):
'''Find the nodes by the given name.'''
def has_name(node, names):
node_name = node.name
return _regex_list_contains_string(names, node_name)
names = _generate_iterable_for_search(name)
return [node for node in graph._internal_graphdef.node if has_name(node, names)]
def _onnx_find_nodes_by_name(graph, name):
'''Find the nodes by the given name.'''
def has_name(node, names):
node_name = node.name
return _regex_list_contains_string(names, node_name)
names = _generate_iterable_for_search(name)
return [node for node in graph.nodes if has_name(node, names)]
def _remove_nodes(graph, name):
"""delete all the nodes that satisfy a name pattern."""
nodes = _find_nodes_by_name(graph, name)
for n in nodes:
graph.remove(n)
def _remove_node_input(graph, node_name, index):
"""Remove an input of a node."""
node = _find_nodes_by_name(graph, node_name)
assert len(node) == 1, (
"Only one node is expected to have the name: {} got {}".format(node_name, len(node))
)
node = node[0]
del node.input[index]
def _connect_at(dynamic_graph, triple):
'''Connect the node_a's output with node_b's input at the correct input index.'''
node_a_name, node_b_name, idx = triple
if node_a_name not in dynamic_graph.node_map[node_b_name].input:
dynamic_graph.node_map[node_b_name].input[idx] = node_a_name
def _search_backward(nodes, graph):
'''Search nodes in the graph.'''
ret = dict()
black_list = [b.name for b in nodes]
for n in nodes:
_n = n
while _n.name in black_list:
_n = graph.node_map[_n.input[0]]
ret[n.name] = _n.name
return ret
def _generate_reshape_key(name):
prefix_idx = name.split('/')[0].split('_')[-1]
if prefix_idx.isdigit():
prefix_idx = int(prefix_idx)
else:
prefix_idx = 0
suffix_idx = name.split('/')[1].split('_')[-1]
if suffix_idx.isdigit():
suffix_idx = int(suffix_idx)
else:
suffix_idx = 0
return prefix_idx * 1000000 + suffix_idx
def _select_first_and_last_reshape_op(graph, nodes):
node_names = [n.name for n in nodes]
node_names = sorted(node_names, key=_generate_reshape_key)
irregular_names = []
for n in node_names:
if n.split('/')[0] != "time_distributed_1":
irregular_names.append(n)
else:
break
irregular_names = sorted(irregular_names)
node_names = irregular_names + node_names[len(irregular_names):]
names = []
ret_names = []
prefix = node_names[0].split('/')[0]
idx = 0
while idx < len(node_names):
if node_names[idx].split('/')[0] == prefix:
names.append(node_names[idx])
idx += 1
else:
assert len(names) > 1
ret_names.append(names[0])
ret_names.append(names[-1])
names = []
prefix = node_names[idx].split('/')[0]
continue
if idx == len(node_names):
assert len(names) > 1
ret_names.append(names[0])
ret_names.append(names[-1])
break
return [node for node in graph._internal_graphdef.node if node.name in ret_names]
def _delete_td_reshapes(graph):
'''Delete TimeDistributed reshape operators since they are not supported in TensorRT.'''
pattern = ['time_distributed.*/Reshape.*',
'dense_regress_td.*/Reshape.*',
'dense_class_td.*/Reshape.*']
nodes = _find_nodes_by_name(graph, pattern)
excluded_pattern = 'time_distributed_flatten.*/Reshape_*[0-9]*$'
flatten_nodes = _find_nodes_by_name(graph, excluded_pattern)
if len(flatten_nodes):
assert len(flatten_nodes) == 3, 'flatten_nodes number can only be 0 or 3.'
excluded_node = _find_nodes_by_name(graph, 'time_distributed_flatten.*/Reshape_1$')
assert len(excluded_node) == 1, 'Flatten reshape op number can only be 1.'
nodes = [n for n in nodes if n != excluded_node[0]]
# only retain the first and last Reshape op for each name prefix
shape_consts = [n for n in nodes if n.op == 'Const']
reshape_ops = [n for n in nodes if n.op == 'Reshape']
reshape_ops = _select_first_and_last_reshape_op(graph, reshape_ops)
inputs_map = _search_backward(reshape_ops, graph)
for n in graph._internal_graphdef.node:
if n in shape_consts + reshape_ops:
continue
for idx, i in enumerate(n.input):
n_name = i
if n_name not in inputs_map:
continue
while n_name in inputs_map:
n_name = inputs_map[n_name]
_connect_at(graph, (n_name, n.name, idx))
graph.remove(reshape_ops)
def save_graph_to_pb(graph, save_path):
"""Save a graphdef graph to pb for debug."""
with tf.gfile.FastGFile(save_path, mode='wb') as f:
f.write(graph.SerializeToString())
def _onnx_delete_td_reshapes(graph):
'''Delete TimeDistributed reshape operators since they are not supported in TensorRT.'''
pattern = ['time_distributed.*/Reshape.*',
'dense_regress_td.*/Reshape.*',
'dense_class_td.*/Reshape.*']
nodes = _onnx_find_nodes_by_name(graph, pattern)
excluded_pattern = 'time_distributed_flatten.*/Reshape_*[0-9]*$'
flatten_nodes = _onnx_find_nodes_by_name(graph, excluded_pattern)
flatten_nodes = [n for n in flatten_nodes if n.op == "Reshape"]
if len(flatten_nodes):
assert len(flatten_nodes) == 3, (
'flatten_nodes number can only be 0 or 3, got {}'.format(len(flatten_nodes))
)
excluded_node = _onnx_find_nodes_by_name(graph, 'time_distributed_flatten.*/Reshape_1$')
assert len(excluded_node) == 1, 'Flatten reshape op number can only be 1.'
nodes = [n for n in nodes if n != excluded_node[0]]
# shape_consts = [n for n in nodes if n.op == 'Const']
reshape_ops = [n for n in nodes if n.op == 'Reshape']
for n in reshape_ops:
if n.inputs[0].inputs:
prev_node = n.i()
siblings = [_n for _n in prev_node.outputs[0].outputs if _n != n]
for s in siblings:
s.inputs = n.outputs
prev_node.outputs = n.outputs
n.outputs.clear()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/export/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Unit test for FasterRCNN model exporter interfaces.'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pytest
from nvidia_tao_tf1.cv.faster_rcnn.export.exporter import FrcnnExporter
@pytest.fixture()
def _spec_file():
'''default spec file.'''
parent_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
return os.path.join(parent_dir, 'experiment_spec/default_spec_ci.txt')
def test_export_args(_spec_file):
'''test to make sure the exporter raises proper errors when some args are missing.'''
# we are just testing the interfaces, so using spec file as model file
# should be OK
with pytest.raises(AssertionError):
FrcnnExporter(experiment_spec_path=None,
model_path=_spec_file,
key='tlt')
with pytest.raises(AssertionError):
FrcnnExporter(experiment_spec_path='',
model_path=_spec_file,
key='tlt')
with pytest.raises(AssertionError):
FrcnnExporter(experiment_spec_path=_spec_file,
model_path='',
key='tlt')
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/export/tests/test_export_args.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Unit test for FasterRCNN model export that uses data loader to generate calibration data.'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import keras
from keras.layers import Input
import pytest
import tensorflow as tf
from nvidia_tao_tf1.cv.common.utils import encode_from_keras
from nvidia_tao_tf1.cv.faster_rcnn.models.resnets import ResNet
from nvidia_tao_tf1.cv.faster_rcnn.spec_loader import spec_loader, spec_wrapper
backbone_configs = [
(ResNet, 10, False, False),
]
keras.backend.set_image_data_format('channels_first')
output_nodes = ['NMS']
@pytest.fixture()
def _spec_file():
'''default spec file.'''
parent_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
return os.path.join(parent_dir, 'experiment_spec/default_spec_ci.txt')
@pytest.fixture()
def spec(_spec_file):
'''spec.'''
return spec_wrapper.ExperimentSpec(spec_loader.load_experiment_spec(_spec_file))
@pytest.mark.skipif(os.getenv("RUN_ON_CI", "0") == "1", reason="Cannot be run on CI")
@pytest.mark.script_launch_mode('subprocess')
@pytest.mark.parametrize("model_type, nlayers, all_projections, use_pooling",
backbone_configs)
def test_export_data_loader(script_runner, tmpdir, spec, _spec_file, model_type,
nlayers, all_projections, use_pooling):
'''test INT8 export with calibration data.'''
keras.backend.clear_session()
gpu_options = tf.compat.v1.GPUOptions(
per_process_gpu_memory_fraction=0.33,
allow_growth=True
)
device_count = {'GPU': 0, 'CPU': 1}
session_config = tf.compat.v1.ConfigProto(
gpu_options=gpu_options,
device_count=device_count
)
session = tf.compat.v1.Session(config=session_config)
keras.backend.set_session(session)
model = model_type(nlayers, spec.batch_size_per_gpu,
spec.rpn_stride, spec.reg_type,
spec.weight_decay, spec.freeze_bn, spec.freeze_blocks,
spec.dropout_rate, spec.drop_connect_rate,
spec.conv_bn_share_bias, all_projections,
use_pooling, spec.anchor_sizes, spec.anchor_ratios,
spec.roi_pool_size, spec.roi_pool_2x, spec.num_classes,
spec.std_scaling, spec.rpn_pre_nms_top_N, spec.rpn_post_nms_top_N,
spec.rpn_nms_iou_thres, spec.gt_as_roi,
spec.rcnn_min_overlap, spec.rcnn_max_overlap, spec.rcnn_train_bs,
spec.rcnn_regr_std, spec.rpn_train_bs, spec.lambda_rpn_class,
spec.lambda_rpn_regr, spec.lambda_cls_class, spec.lambda_cls_regr,
f"frcnn_{spec._backbone.replace(':', '_')}", tmpdir,
spec.enc_key, spec.lr_scheduler)
img_input = Input(shape=spec.input_dims, name='input_image')
gt_cls_input = Input(shape=(None,), name='input_gt_cls')
gt_bbox_input = Input(shape=(None, 4), name='input_gt_bbox')
model.build_keras_model(img_input, gt_cls_input, gt_bbox_input)
os_handle, tmp_keras_model = tempfile.mkstemp()
os.close(os_handle)
encode_from_keras(model.keras_model, tmp_keras_model, spec.enc_key.encode())
os_handle, tmp_onnx_model = tempfile.mkstemp(suffix=".onnx")
os.close(os_handle)
os.remove(tmp_onnx_model)
# export to etlt model
script = 'nvidia_tao_tf1/cv/faster_rcnn/scripts/export.py'
env = os.environ.copy()
# export in INT8 mode with data loader to generate calibration dataset
os_handle, tmp_cache_file = tempfile.mkstemp()
os.close(os_handle)
os.remove(tmp_cache_file)
os_handle, tmp_data_file = tempfile.mkstemp()
os.close(os_handle)
os.remove(tmp_data_file)
args = ['-m', tmp_keras_model,
'-k', spec.enc_key,
'--experiment_spec', _spec_file,
'-o', tmp_onnx_model,
'--data_type', 'int8',
'--batches', '10',
'--batch_size', '8',
'--cal_data_file', tmp_data_file,
'--cal_cache_file', tmp_cache_file]
keras.backend.clear_session()
ret = script_runner.run(script, env=env, *args)
try:
# this is the last export, retain the etlt model for following check
assert ret.success
assert os.path.isfile(tmp_onnx_model)
if os.path.exists(tmp_cache_file):
os.remove(tmp_cache_file)
except AssertionError:
raise(AssertionError(ret.stdout + ret.stderr))
finally:
if os.path.exists(tmp_onnx_model):
os.remove(tmp_onnx_model)
if os.path.exists(tmp_keras_model):
os.remove(tmp_keras_model)
if os.path.exists(tmp_cache_file):
os.remove(tmp_cache_file)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/export/tests/test_export_data_loader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Unit test for FasterRCNN model export functionality.'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import keras
from keras.layers import Input
import pytest
import tensorflow as tf
from nvidia_tao_tf1.cv.common.utils import encode_from_keras
from nvidia_tao_tf1.cv.faster_rcnn.models.darknets import DarkNet
from nvidia_tao_tf1.cv.faster_rcnn.models.googlenet import GoogleNet
from nvidia_tao_tf1.cv.faster_rcnn.models.iva_vgg import IVAVGG
from nvidia_tao_tf1.cv.faster_rcnn.models.mobilenet_v1 import MobileNetV1
from nvidia_tao_tf1.cv.faster_rcnn.models.mobilenet_v2 import MobileNetV2
from nvidia_tao_tf1.cv.faster_rcnn.models.resnets import ResNet
from nvidia_tao_tf1.cv.faster_rcnn.models.vgg16 import VGG16
from nvidia_tao_tf1.cv.faster_rcnn.spec_loader import spec_loader, spec_wrapper
backbone_configs_all = [
(ResNet, 10, False, False),
(ResNet, 10, True, True),
(ResNet, 10, False, True),
(ResNet, 18, True, False),
(ResNet, 18, False, False),
(ResNet, 18, True, True),
(ResNet, 18, False, True),
(ResNet, 34, True, False),
(ResNet, 34, False, False),
(ResNet, 34, True, True),
(ResNet, 34, False, True),
(ResNet, 50, True, False),
(ResNet, 50, False, False),
(ResNet, 50, True, True),
(ResNet, 50, False, True),
(ResNet, 101, True, False),
(ResNet, 101, False, False),
(ResNet, 101, True, True),
(ResNet, 101, False, True),
(VGG16, None, False, True),
(IVAVGG, 16, False, True),
(IVAVGG, 19, False, True),
(GoogleNet, None, False, True),
(MobileNetV1, None, False, False),
(MobileNetV2, None, False, False),
(MobileNetV2, None, True, False),
(DarkNet, 19, None, None),
(DarkNet, 53, None, None),
]
backbone_configs_subset = [
(ResNet, 18, True, False),
(ResNet, 50, True, False),
]
if int(os.getenv("TLT_TF_CI_TEST_LEVEL", "0")) > 0:
backbone_configs = backbone_configs_all
else:
backbone_configs = backbone_configs_subset
keras.backend.set_image_data_format('channels_first')
@pytest.fixture()
def _spec_file():
'''default spec file.'''
parent_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
return os.path.join(parent_dir, 'experiment_spec/default_spec_ci.txt')
@pytest.fixture()
def spec(_spec_file):
'''spec.'''
return spec_wrapper.ExperimentSpec(spec_loader.load_experiment_spec(_spec_file))
@pytest.mark.skipif(os.getenv("RUN_ON_CI", "0") == "1", reason="Cannot be run on CI")
@pytest.mark.script_launch_mode('subprocess')
@pytest.mark.parametrize("model_type, nlayers, all_projections, use_pooling",
backbone_configs)
def test_export(script_runner, tmpdir, spec, _spec_file, model_type,
nlayers, all_projections, use_pooling):
'''test to make sure the export works and exported model can be parsed without issues.'''
keras.backend.clear_session()
gpu_options = tf.compat.v1.GPUOptions(
per_process_gpu_memory_fraction=0.33,
allow_growth=True
)
device_count = {'GPU': 0, 'CPU': 1}
session_config = tf.compat.v1.ConfigProto(
gpu_options=gpu_options,
device_count=device_count
)
session = tf.compat.v1.Session(config=session_config)
keras.backend.set_session(session)
model = model_type(nlayers, spec.batch_size_per_gpu,
spec.rpn_stride, spec.reg_type,
spec.weight_decay, spec.freeze_bn, spec.freeze_blocks,
spec.dropout_rate, spec.drop_connect_rate,
spec.conv_bn_share_bias, all_projections,
use_pooling, spec.anchor_sizes, spec.anchor_ratios,
spec.roi_pool_size, spec.roi_pool_2x, spec.num_classes,
spec.std_scaling, spec.rpn_pre_nms_top_N, spec.rpn_post_nms_top_N,
spec.rpn_nms_iou_thres, spec.gt_as_roi,
spec.rcnn_min_overlap, spec.rcnn_max_overlap, spec.rcnn_train_bs,
spec.rcnn_regr_std, spec.rpn_train_bs, spec.lambda_rpn_class,
spec.lambda_rpn_regr, spec.lambda_cls_class, spec.lambda_cls_regr,
f"frcnn_{spec._backbone.replace(':', '_')}", tmpdir,
spec.enc_key, spec.lr_scheduler,
enable_qat=True)
img_input = Input(shape=spec.input_dims, name='input_image')
gt_cls_input = Input(shape=(None,), name='input_gt_cls')
gt_bbox_input = Input(shape=(None, 4), name='input_gt_bbox')
model.build_keras_model(img_input, gt_cls_input, gt_bbox_input)
os_handle, tmp_keras_model = tempfile.mkstemp()
os.close(os_handle)
encode_from_keras(model.keras_model, tmp_keras_model, spec.enc_key.encode())
os_handle, tmp_onnx_model = tempfile.mkstemp(suffix=".onnx")
os.close(os_handle)
os.remove(tmp_onnx_model)
# export to etlt model
script = 'nvidia_tao_tf1/cv/faster_rcnn/scripts/export.py'
env = os.environ.copy()
# 1. export in FP32 mode
args = ['-m', tmp_keras_model,
'-k', spec.enc_key,
'--experiment_spec', _spec_file,
'-o', tmp_onnx_model]
keras.backend.clear_session()
ret = script_runner.run(script, env=env, *args)
# before abort, remove the created temp files when exception raises
try:
assert ret.success
assert os.path.isfile(tmp_onnx_model)
if os.path.exists(tmp_onnx_model):
os.remove(tmp_onnx_model)
except AssertionError:
# if the script runner failed, the tmp_onnx_model may not be created at all
if os.path.exists(tmp_onnx_model):
os.remove(tmp_onnx_model)
os.remove(tmp_keras_model)
raise(AssertionError(ret.stdout + ret.stderr))
# 2. export in FP16 mode
args = ['-m', tmp_keras_model,
'-k', spec.enc_key,
'--experiment_spec', _spec_file,
'-o', tmp_onnx_model,
'--data_type', 'fp16']
keras.backend.clear_session()
ret = script_runner.run(script, env=env, *args)
try:
assert ret.success
assert os.path.isfile(tmp_onnx_model)
if os.path.exists(tmp_onnx_model):
os.remove(tmp_onnx_model)
except AssertionError:
if os.path.exists(tmp_onnx_model):
os.remove(tmp_onnx_model)
os.remove(tmp_keras_model)
raise(AssertionError(ret.stdout + ret.stderr))
finally:
if os.path.exists(tmp_onnx_model):
os.remove(tmp_onnx_model)
if os.path.exists(tmp_keras_model):
os.remove(tmp_keras_model)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/export/tests/test_export_qat.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Unit test for FasterRCNN model export functionality.'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import keras
from keras.layers import Input
import pytest
import tensorflow as tf
from nvidia_tao_tf1.cv.common.utils import encode_from_keras
from nvidia_tao_tf1.cv.faster_rcnn.models.darknets import DarkNet
from nvidia_tao_tf1.cv.faster_rcnn.models.googlenet import GoogleNet
from nvidia_tao_tf1.cv.faster_rcnn.models.iva_vgg import IVAVGG
from nvidia_tao_tf1.cv.faster_rcnn.models.mobilenet_v1 import MobileNetV1
from nvidia_tao_tf1.cv.faster_rcnn.models.mobilenet_v2 import MobileNetV2
from nvidia_tao_tf1.cv.faster_rcnn.models.resnets import ResNet
from nvidia_tao_tf1.cv.faster_rcnn.models.vgg16 import VGG16
from nvidia_tao_tf1.cv.faster_rcnn.spec_loader import spec_loader, spec_wrapper
backbone_configs_all = [
(ResNet, 10, False, False),
(ResNet, 10, True, True),
(ResNet, 10, False, True),
(ResNet, 18, True, False),
(ResNet, 18, False, False),
(ResNet, 18, True, True),
(ResNet, 18, False, True),
(ResNet, 34, True, False),
(ResNet, 34, False, False),
(ResNet, 34, True, True),
(ResNet, 34, False, True),
(ResNet, 50, True, False),
(ResNet, 50, False, False),
(ResNet, 50, True, True),
(ResNet, 50, False, True),
(ResNet, 101, True, False),
(ResNet, 101, False, False),
(ResNet, 101, True, True),
(ResNet, 101, False, True),
(VGG16, None, False, True),
(IVAVGG, 16, False, True),
(IVAVGG, 19, False, True),
(GoogleNet, None, False, True),
(MobileNetV1, None, False, False),
(MobileNetV2, None, False, False),
(MobileNetV2, None, True, False),
(DarkNet, 19, None, None),
(DarkNet, 53, None, None),
]
backbone_configs_subset = [
(ResNet, 18, True, False),
(ResNet, 50, True, False),
]
if int(os.getenv("TLT_TF_CI_TEST_LEVEL", "0")) > 0:
backbone_configs = backbone_configs_all
else:
backbone_configs = backbone_configs_subset
keras.backend.set_image_data_format('channels_first')
output_nodes = ['NMS']
@pytest.fixture()
def _spec_file():
'''default spec file.'''
parent_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
return os.path.join(parent_dir, 'experiment_spec/default_spec_ci.txt')
@pytest.fixture()
def spec(_spec_file):
'''spec.'''
return spec_wrapper.ExperimentSpec(spec_loader.load_experiment_spec(_spec_file))
@pytest.mark.skipif(os.getenv("RUN_ON_CI", "0") == "1", reason="Cannot be run on CI")
@pytest.mark.script_launch_mode('subprocess')
@pytest.mark.parametrize("model_type, nlayers, all_projections, use_pooling",
backbone_configs)
def test_export(script_runner, tmpdir, spec, _spec_file, model_type,
nlayers, all_projections, use_pooling):
'''test export with FP32/FP16.'''
keras.backend.clear_session()
gpu_options = tf.compat.v1.GPUOptions(
per_process_gpu_memory_fraction=0.33,
allow_growth=True
)
device_count = {'GPU': 0, 'CPU': 1}
session_config = tf.compat.v1.ConfigProto(
gpu_options=gpu_options,
device_count=device_count
)
session = tf.compat.v1.Session(config=session_config)
keras.backend.set_session(session)
model = model_type(nlayers, spec.batch_size_per_gpu,
spec.rpn_stride, spec.reg_type,
spec.weight_decay, spec.freeze_bn, spec.freeze_blocks,
spec.dropout_rate, spec.drop_connect_rate,
spec.conv_bn_share_bias, all_projections,
use_pooling, spec.anchor_sizes, spec.anchor_ratios,
spec.roi_pool_size, spec.roi_pool_2x, spec.num_classes,
spec.std_scaling, spec.rpn_pre_nms_top_N, spec.rpn_post_nms_top_N,
spec.rpn_nms_iou_thres, spec.gt_as_roi,
spec.rcnn_min_overlap, spec.rcnn_max_overlap, spec.rcnn_train_bs,
spec.rcnn_regr_std, spec.rpn_train_bs, spec.lambda_rpn_class,
spec.lambda_rpn_regr, spec.lambda_cls_class, spec.lambda_cls_regr,
f"frcnn_{spec._backbone.replace(':', '_')}", tmpdir,
spec.enc_key, spec.lr_scheduler)
img_input = Input(shape=spec.input_dims, name='input_image')
gt_cls_input = Input(shape=(None,), name='input_gt_cls')
gt_bbox_input = Input(shape=(None, 4), name='input_gt_bbox')
model.build_keras_model(img_input, gt_cls_input, gt_bbox_input)
os_handle, tmp_keras_model = tempfile.mkstemp()
os.close(os_handle)
encode_from_keras(model.keras_model, tmp_keras_model, spec.enc_key.encode())
os_handle, tmp_onnx_model = tempfile.mkstemp(suffix=".onnx")
os.close(os_handle)
os.remove(tmp_onnx_model)
# export to etlt model
script = 'nvidia_tao_tf1/cv/faster_rcnn/scripts/export.py'
env = os.environ.copy()
# 1. export in FP32 mode
args = ['-m', tmp_keras_model,
'-k', spec.enc_key,
'--experiment_spec', _spec_file,
'-o', tmp_onnx_model]
keras.backend.clear_session()
ret = script_runner.run(script, env=env, *args)
# before abort, remove the created temp files when exception raises
try:
assert ret.success
assert os.path.isfile(tmp_onnx_model)
if os.path.exists(tmp_onnx_model):
os.remove(tmp_onnx_model)
except AssertionError:
# if the script runner failed, the tmp_onnx_model may not be created at all
if os.path.exists(tmp_onnx_model):
os.remove(tmp_onnx_model)
os.remove(tmp_keras_model)
raise(AssertionError(ret.stdout + ret.stderr))
# 2. export in FP16 mode
args = ['-m', tmp_keras_model,
'-k', spec.enc_key,
'--experiment_spec', _spec_file,
'-o', tmp_onnx_model,
'--data_type', 'fp16']
keras.backend.clear_session()
ret = script_runner.run(script, env=env, *args)
try:
assert ret.success
assert os.path.isfile(tmp_onnx_model)
if os.path.exists(tmp_onnx_model):
os.remove(tmp_onnx_model)
except AssertionError:
if os.path.exists(tmp_onnx_model):
os.remove(tmp_onnx_model)
os.remove(tmp_keras_model)
raise(AssertionError(ret.stdout + ret.stderr))
finally:
if os.path.exists(tmp_onnx_model):
os.remove(tmp_onnx_model)
if os.path.exists(tmp_keras_model):
os.remove(tmp_keras_model)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/export/tests/test_export.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IVA FasterRCNN Inputs loader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from nvidia_tao_tf1.blocks.multi_source_loader.types.bbox_2d_label import Bbox2DLabel
import nvidia_tao_tf1.core as tao_core
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.build_dataloader import build_dataloader
from nvidia_tao_tf1.cv.faster_rcnn.data_augmentation.augmentation import hflip_bboxes, random_hflip
from nvidia_tao_tf1.cv.faster_rcnn.layers.utils import batch_op, calculate_delta_tf, \
compute_rpn_target_np, iou_tf, \
make_anchors, safe_gather, unique_with_inverse, \
unpad_tf
class InputsLoader(object):
"""Data loader class for FasterRCNN.
A data loader for FasterRCNN that can load TFRecord labels along with images.
The format of the data source is the same as DetectNet_v2. Although ported from DetectNet_v2
data loader, we made some customizations for FasterRCNN data loader as below:
1. We append a background class in the class mapping and assign it the largest class ID.
2. We can support both BGR, RGB and grayscale images as output of data loader.
3. The images before mean subtraction and scaling is in the range 0-255 rather than (0, 1) as in
DetectNet_v2.
4. We apply a customizable per-channel mean subtraction to the image, then apply a scaling
to it(also customizable). The mean values and scaling are configurable in spec file.
5. We pad the groundtruth boxes per image to a maximum limit(defaults to 100) in order to
support multi-batch and multi-gpu training in FasterRCNN.
"""
def __init__(self,
dataset_config,
augmentation_config,
batch_size,
image_channel_num,
image_channel_means,
image_scaling_factor,
flip_channel,
max_objs_per_img=100,
training=True,
enable_augmentation=False,
session=None,
map_to_target_class_names=False,
visualizer=None,
rank=0):
"""Initialize data loader.
Args:
dataset_config(proto): the data source proto, the same as in DetectNet_v2.
augmentation_config(proto): the data augmentation proto, also the same as DetectNet_v2.
batch_size(int): the dataset batch size to output for the data loader.
image_channel_num(int): the image channel number, can be 3(color) or 1(grayscale).
image_channel_means(list of float): per channel image mean values for subtraction.
image_scaling_factor(float): A scaling factor for preprocessing after mean subtraction.
flip_channel(bool): Whether or not to flip the channels for image
(RGB to BGR or vice-versa). The original image channel order is RGB in DetectNet_v2,
If we want to use BGR ordering, we need a flip.
max_objs_per_img(int): The maximum number of objects in an image. This is used to pad
the groundtruth box numbers to the same number for batched training.
training(bool): Training phase or test phase.
enable_augmentation(bool): Whether or not to enable data augmentation.
session(Keras session): the Keras(TF) session used to generate the numpy array from TF
tensors for evaluation.
map_to_target_class_names(bool): Whether or not to apply the map from source class
names to target class names. This is useful when the detectnet_v2 data loader
forgets to handle this properly. Defaults to False.
visualizer(object): The visualizer object.
rank(int): Horovod rank.
"""
dataloader = build_dataloader(
dataset_proto=dataset_config,
augmentation_proto=augmentation_config)
self.images, self.ground_truth_labels, self.num_samples = \
dataloader.get_dataset_tensors(batch_size, training=training,
enable_augmentation=enable_augmentation)
if self.num_samples == 0:
return
cls_mapping_dict = dataset_config.target_class_mapping
self.classes = sorted({str(x) for x in cls_mapping_dict.values()})
# append background class so it maps to the largest number
assert 'background' not in self.classes, '''
Cannot have class name {} for ground truth objects'''.format('background')
self.classes.append('background')
cls_map = tao_core.processors.LookupTable(
keys=self.classes,
values=list(range(len(self.classes))),
default_value=-1
)
cls_map.build()
# use dynamic shape
self.H = tf.shape(self.images)[2]
self.W = tf.shape(self.images)[3]
# preprocess input.
self.images *= 255.0
# save the original images for ease of testing/debugging
self._original_images = self.images
# do data augmentation if using dynamic shape
if augmentation_config.preprocessing.output_image_min > 0:
flipped_images, is_flipped = random_hflip(
self.images[0, ...],
augmentation_config.spatial_augmentation.hflip_probability,
42
)
self.images = tf.expand_dims(flipped_images, axis=0)
# Vis the augmented images in TensorBoard
if rank == 0 and visualizer is not None:
if visualizer.enabled:
# NHWC format
vis_image = tf.transpose(self.images, (0, 2, 3, 1))
if image_channel_num == 3:
if flip_channel:
perm = tf.constant([2, 1, 0])
self.images = tf.gather(self.images, perm, axis=1)
image_channel_means = image_channel_means[::-1]
self.images -= tf.constant(np.array(image_channel_means).reshape([1, 3, 1, 1]),
dtype=tf.float32)
elif image_channel_num == 1:
self.images -= tf.constant(image_channel_means, dtype=tf.float32)
else:
raise ValueError('''Image channel number can only be 1
or 3, got {}.'''.format(image_channel_num))
self.images /= image_scaling_factor
gt_boxes = []
gt_classes = []
gt_diff = []
if isinstance(self.ground_truth_labels, list):
for l in self.ground_truth_labels:
obj_id = cls_map(l['target/object_class'])
x1 = tf.clip_by_value(tf.cast(tf.round(l['target/coordinates_x1']), tf.int32), 0,
self.W - 1)
x2 = tf.clip_by_value(tf.cast(tf.round(l['target/coordinates_x2']), tf.int32), 0,
self.W - 1)
y1 = tf.clip_by_value(tf.cast(tf.round(l['target/coordinates_y1']), tf.int32), 0,
self.H - 1)
y2 = tf.clip_by_value(tf.cast(tf.round(l['target/coordinates_y2']), tf.int32), 0,
self.H - 1)
# only select valid labels
select = tf.logical_and(tf.not_equal(obj_id, -1),
tf.logical_and(tf.less(x1, x2), tf.less(y1, y2)))
label = tf.stack([y1, x1, y2, x2], axis=1)
label = tf.boolean_mask(label, select)
obj_id = tf.boolean_mask(obj_id, select)
# pad to the same number of boxes for each image
# such that we can concat and work with batch size > 1
obj_count = tf.shape(label)[0]
# Runtime check that objects number does not exceed this limit
assert_op = tf.debugging.assert_less_equal(
obj_count,
max_objs_per_img,
message=('Maximum number of objects in ' +
'image exceeds the limit ' +
'{}'.format(max_objs_per_img)))
with tf.control_dependencies([assert_op]):
num_pad = tf.maximum(max_objs_per_img - obj_count, 0)
gt_classes.append(tf.pad(obj_id, [(0, num_pad)], constant_values=-1))
gt_boxes.append(tf.pad(label, [(0, num_pad), (0, 0)]))
self.frame_ids = [l['frame/id'] for l in self.ground_truth_labels]
elif isinstance(self.ground_truth_labels, Bbox2DLabel):
source_classes = self.ground_truth_labels.object_class
# apply source class to target class mapping in case the
# detectnet_v2 data loader forgets to apply it. Note that applying
# the mapping twice has no problem so we are safe to do this.
if map_to_target_class_names:
target_classes_names = self._map_to_model_target_classes(
source_classes.values,
cls_mapping_dict)
mapped_classes = tf.SparseTensor(
values=cls_map(target_classes_names),
indices=source_classes.indices,
dense_shape=source_classes.dense_shape)
else:
mapped_classes = tf.SparseTensor(
values=cls_map(source_classes.values),
indices=source_classes.indices,
dense_shape=source_classes.dense_shape)
mapped_labels = self.ground_truth_labels._replace(object_class=mapped_classes)
valid_indices = tf.not_equal(mapped_classes.values, -1)
filtered_labels = mapped_labels.filter(valid_indices)
filtered_obj_ids = tf.sparse.reshape(filtered_labels.object_class, [batch_size, -1, 1])
filtered_coords = tf.sparse.reshape(filtered_labels.vertices.coordinates,
[batch_size, -1, 4])
filtered_occlusion = tf.sparse.reshape(
filtered_labels.occlusion,
[batch_size, -1, 1]
)
filtered_coords = tf.sparse.SparseTensor(
values=tf.cast(tf.round(filtered_coords.values), tf.int32),
indices=filtered_coords.indices,
dense_shape=filtered_coords.dense_shape)
filtered_occlusion = tf.sparse.SparseTensor(
values=tf.cast(filtered_occlusion.values, tf.int32),
indices=filtered_occlusion.indices,
dense_shape=filtered_occlusion.dense_shape,
)
labels_all = tf.sparse.concat(
axis=-1,
sp_inputs=[filtered_obj_ids, filtered_occlusion, filtered_coords]
)
labels_split = tf.sparse.split(sp_input=labels_all, num_split=batch_size, axis=0)
labels_split = [tf.sparse.reshape(x, [-1, 6]) for x in labels_split]
labels = [tf.sparse.to_dense(self.get_non_empty_rows_2d_sparse(x))
for x in labels_split]
for l in labels:
obj_id = l[:, 0]
# difficult/occlusion flag
occ_diff = l[:, 1]
x1 = tf.clip_by_value(l[:, 2], 0, self.W - 1)
x2 = tf.clip_by_value(l[:, 4], 0, self.W - 1)
y1 = tf.clip_by_value(l[:, 3], 0, self.H - 1)
y2 = tf.clip_by_value(l[:, 5], 0, self.H - 1)
# only select valid labels
select = tf.logical_and(tf.not_equal(obj_id, -1),
tf.logical_and(tf.less(x1, x2), tf.less(y1, y2)))
label = tf.stack([y1, x1, y2, x2], axis=1)
label = tf.boolean_mask(label, select)
obj_id = tf.boolean_mask(obj_id, select)
occ_diff = tf.boolean_mask(occ_diff, select)
# pad to the same number of boxes for each image
# such that we can concat and work with batch size > 1
obj_count = tf.shape(label)[0]
# Runtime check that objects number does not exceed this limit
assert_op = tf.debugging.assert_less_equal(
obj_count,
max_objs_per_img,
message=('Maximum number of objects in ' +
'image exceeds the limit ' +
'{}'.format(max_objs_per_img)))
with tf.control_dependencies([assert_op]):
num_pad = tf.maximum(max_objs_per_img - obj_count, 0)
gt_classes.append(tf.pad(obj_id, [(0, num_pad)], constant_values=-1))
gt_diff.append(tf.pad(occ_diff, [(0, num_pad)], constant_values=0))
gt_boxes.append(tf.pad(label, [(0, num_pad), (0, 0)]))
self.frame_ids = self.ground_truth_labels.frame_id
else:
raise TypeError('Groundtruth labels must be either list or Bbox2DLabel type.')
self.gt_boxes = tf.cast(tf.stack(gt_boxes, axis=0), tf.float32)
# flip bboxes if using dynamic shape
if augmentation_config.preprocessing.output_image_min > 0:
flipped_boxes = tf.cond(
is_flipped,
true_fn=lambda: hflip_bboxes(self.gt_boxes[0], self.W),
false_fn=lambda: self.gt_boxes[0],
)
self.gt_boxes = tf.expand_dims(flipped_boxes, axis=0)
self.gt_classes = tf.stack(gt_classes, axis=0)
# we encode PASCAL VOC difficult objects as KITTI occlusion field >= 1
self.gt_diff = tf.cast(tf.greater_equal(tf.stack(gt_diff, axis=0), 1), tf.int32)
self.max_objs_per_img = max_objs_per_img
self.session = session
if rank == 0 and visualizer is not None:
if visualizer.enabled:
normalizer = tf.cast(
tf.stack([self.H, self.W, self.H, self.W], axis=0),
tf.float32
)
vis_boxes = self.gt_boxes / normalizer
vis_image = tf.cast(
tf.image.draw_bounding_boxes(vis_image, vis_boxes),
tf.uint8
)
visualizer.image("augmented_images", vis_image, data_format="channels_last")
def generate_rpn_targets(self, rpn_target_generator):
'''Generate the RPN target tensors for training RPN.'''
self.rpn_score_tensor, self.rpn_deltas_tensor = rpn_target_generator(self.gt_boxes)
return self.rpn_score_tensor, self.rpn_deltas_tensor
def get_array(self):
'''get numpy array from the TF tensors for evaluation.'''
return self.session.run([self.images, self.gt_classes, self.gt_boxes])
def get_array_with_diff(self):
'''get numpy array from the TF tensors for evaluation, with difficult tag.'''
return self.session.run([self.images, self.gt_classes, self.gt_boxes, self.gt_diff])
def get_array_and_frame_ids(self):
'''get the array and frame IDs for a batch.'''
return self.session.run([self.frame_ids, self.images, self.gt_classes, self.gt_boxes])
def get_array_diff_and_frame_ids(self):
'''get the array, difficult tag, and frame IDs for a batch.'''
return self.session.run(
[self.frame_ids, self.images, self.gt_classes, self.gt_boxes, self.gt_diff]
)
def get_non_empty_rows_2d_sparse(self, input_tensor):
"""
Helper function to retrieve non-empty rows of a 2d sparse tensor.
Args:
input_tensor (tf.sparse.SparseTensor): must be 2-D
Returns:
output_tensor (tf.sparse.SparseTensor): output tensor with all rows non-empty
"""
cols = input_tensor.dense_shape[1]
empty_tensor = tf.sparse.SparseTensor(
indices=tf.zeros(dtype=tf.int64, shape=[0, 2]),
values=tf.zeros(dtype=input_tensor.dtype, shape=[0]),
dense_shape=[0, cols])
return tf.cond(tf.equal(tf.size(input_tensor.indices), 0),
true_fn=lambda: empty_tensor,
false_fn=lambda: self._get_non_empty_rows_2d_sparse_non_empty(input_tensor))
def _get_non_empty_rows_2d_sparse_non_empty(self, input_tensor):
"""
Helper function to retrieve non-empty rows of a 2d sparse tensor.
Args:
input_tensor (tf.sparse.SparseTensor): must be 2-D and non-empty
Returns:
output_tensor (tf.sparse.SparseTensor): output tensor with all rows non-empty
"""
old_inds = input_tensor.indices
_, new_rows = tf.unique(old_inds[:, 0], out_idx=tf.int64)
num_new_rows = tf.reduce_max(new_rows) + 1
cols = old_inds[:, 1]
out_tensor = tf.sparse.SparseTensor(
indices=tf.stack([new_rows, cols], axis=1),
values=input_tensor.values,
dense_shape=[num_new_rows, input_tensor.dense_shape[1]])
return out_tensor
def _map_to_model_target_classes(self, src_classes, target_class_mapping):
"""Map object classes as they are defined in the data source to the model target classes.
Args:
src_classes(str Tensor): Source class names.
target_class_mapping(Protobuf map): Map from source class names to target
class names.
Returns
A str Tensor contains the mapped class names.
"""
datasource_target_classes = list(target_class_mapping.keys())
if len(datasource_target_classes) > 0:
mapped_target_classes = list(target_class_mapping.values())
default_value = tf.constant('-1')
lookup = tao_core.processors.LookupTable(
keys=datasource_target_classes,
values=mapped_target_classes,
default_value=default_value
)
return lookup(src_classes)
class RPNTargetGenerator(object):
"""Generate RPN Targets for training RPN heads.
A Class used to generate the target tensors for RPN. Basically, this class partitions the
anchor boxes into two categories: positive anchors and negative anchors. This paritition is by
IoU between anchor boxes and groundtruth boxes. For each anchor box, if the IoU is larger than
a specific threshold, then we regard it as a positive anchor. If an anchor's IoU with any of the
groundtruth box is smaller than a specific threshold(different from the previous threshold),
then we regard this anchor as a negative anchor. This is the foward pairing. But to ensure
each groundtruth box is matched to at least one anchor box, we also need the backgward
pairing. In the backward pairing, for each groundtruth box, we find the largest IoU with all
the anchor boxes and the corresponding anchor box. As long as this largest IoU is not zero,
we regard it as a successful pairing. For this successful pairing, we force assign this
corresponding anchor as positive anchor regardless of its best IoU with all the groundtruth
boxes(the best IoU can be very low, as long as it is not zero). With this kind of bipartite
pairing, we can ensure each groundtruth box is paired with at lest one anchor boxes and RPN
and RCNN will not miss any objects in groundtruth, as long as we use a reasonably good anchor
design.
"""
def __init__(self, image_w, image_h, rpn_w, rpn_h,
rpn_stride, anchor_sizes, anchor_ratios,
bs_per_gpu, iou_high_thres, iou_low_thres,
rpn_train_bs, max_objs_per_image=100):
'''Initialize the RPNTargetGenerator.
Args:
image_w(int/Tensor): the input image width.
image_h(int/Tensor): the input image height.
rpn_w(int/Tensor): the width of the input feature map of RPN.
rpn_h(int/Tensor): the height of the input feature map of RPN.
rpn_stride(int): the rpn stride relative to input image(16).
anchor_sizes(list): the list of anchor sizes, at input image scale.
anchor_ratios(list): the list of anchor ratios.
be_per_gpu(int): the image batch size per GPU.
iou_high_thres(float): the higher threshold above which we regard anchors as positive.
iou_low_thres(float): the lower threshold below which we regard anchors as negative.
rpn_train_bs(int): the anchor batch size used to train the RPN.
max_objs_per_image(int): the maximum number of groundtruth objects in one image.
'''
self.image_w = tf.cast(image_w, tf.float32)
self.image_h = tf.cast(image_h, tf.float32)
self.rpn_w = rpn_w
self.rpn_h = rpn_h
self.rpn_stride = rpn_stride
self.anchor_sizes = anchor_sizes
self.anchor_ratios = [np.sqrt(ar) for ar in anchor_ratios]
self.num_anchors = len(self.anchor_sizes) * len(self.anchor_ratios)
self.num_anc_ratios = len(self.anchor_ratios)
self.bs_per_gpu = bs_per_gpu
self.iou_high_thres = iou_high_thres
self.iou_low_thres = iou_low_thres
self.rpn_train_bs = rpn_train_bs
self.max_objs_per_image = max_objs_per_image
def generate_anchors(self):
'''Generate anchors and use it later as constants.'''
anc_x, anc_y = np.meshgrid(np.arange(self.rpn_w), np.arange(self.rpn_h))
ancs = make_anchors(self.anchor_sizes, self.anchor_ratios).reshape(-1, 2)
anc_pos = self.rpn_stride*(np.stack((anc_x, anc_y), axis=-1) + 0.5)
anc_pos = anc_pos.reshape(self.rpn_h, self.rpn_w, 1, 2)
# (H, W, A, 2)
anc_pos = np.broadcast_to(anc_pos, (self.rpn_h, self.rpn_w, ancs.shape[0], 2))
anc_left_top = anc_pos - ancs/2.0
anc_right_bot = anc_pos + ancs/2.0
# (y1, x1, y2, x2)
full_anc = np.concatenate((anc_left_top[:, :, :, ::-1],
anc_right_bot[:, :, :, ::-1]),
axis=-1)
self.full_anc = full_anc.reshape((-1, 4))
def generate_anchors_tf(self):
"""Generate anchors using tensorflow ops in case input image shape is dynamic."""
anc_x, anc_y = tf.meshgrid(tf.range(tf.cast(self.rpn_w, tf.float32)),
tf.range(tf.cast(self.rpn_h, tf.float32)))
ancs = make_anchors(self.anchor_sizes, self.anchor_ratios).reshape(-1, 2)
anc_pos = self.rpn_stride * (tf.stack([anc_x, anc_y], axis=-1) + 0.5)
anc_pos = tf.reshape(anc_pos, [self.rpn_h, self.rpn_w, 1, 2])
anc_pos = tf.broadcast_to(anc_pos, [self.rpn_h, self.rpn_w, ancs.shape[0], 2])
anc_left_top = anc_pos - (ancs / 2.0)
anc_right_bot = anc_pos + (ancs / 2.0)
full_anc = tf.concat([
anc_left_top[:, :, :, ::-1],
anc_right_bot[:, :, :, ::-1]
],
axis=-1)
self.full_anc = tf.reshape(full_anc, [-1, 4])
def build_rpn_target_batch(self, input_gt_boxes):
'''Batched processing of generating RPN target tensors.
Args:
input_gt_boxes(Tensor): the input groundtruth boxes, shape: (N, G, 4).
Returns:
the RPN target tensors: scores target tensor and deltas target tensor.
shape:
rpn_scores_gt: (N, A, H, W).
rpn_deltas_gt: (N, A4, H, W).
'''
rpn_scores_gt, rpn_deltas_gt = batch_op([input_gt_boxes],
self.rpn_target_tf,
self.bs_per_gpu)
return rpn_scores_gt, rpn_deltas_gt
def _rpn_target_tf(self, input_gt_boxes):
'''Generate RPN target tensors for single image.
Since we are going to do some paddings for each image and the padding size are different
for different image, so here we do this in a single image and then batch this set of Ops to
achieve the batched processing. This function implemented the anchor pairing step with TF
Ops to generate the positive and negative anchors, and also assign the groundtruth labels
and bbox coordinate deltas for them. For positive anchors, we will assign both class IDs and
bbox coordinate deltas to them, while for negative anchors we will only assign class IDs for
them. Below is a quick breakdown of the code:
1. Wrap the numpy anchors as TF constants.
2. Calculate the IoU between anchor boxes and groundtruth boxes.
3. Find the positive anchors and negative anchors by some IoU thresholds.
4. Assign class IDs and(or) bbox deltas to each anchor.
5. For each groundtruth box, find the best anchor that has largest IoU with it,
and make sure the IoU is positive. Then force this anchor to be positive anchor
and assign class IDs and bbox deltas to it.
6. GT format tranformation to make the shape and dimension compatible with that of the
RPN loss function.
Args:
input_gt_boxes(Tensor): the input groundtruth boxes, shape: (G, 4).
Returns:
the RPN scores and deltas target tensors for this single image.
'''
_ahw = tf.reshape(self.rpn_h*self.rpn_w*self.num_anchors, (1,))
# (H, W, A, 4)
if (
isinstance(self.image_h, int) and
isinstance(self.image_w, int) and
isinstance(self.rpn_h, int) and
isinstance(self.rpn_w, int)
):
# if static shape
self.generate_anchors()
anchors_yxyx = tf.constant(self.full_anc, dtype=tf.float32)
else:
# if dynamic shape
self.generate_anchors_tf()
anchors_yxyx = self.full_anc
valid_anc_mask = tf.logical_and(tf.logical_and(anchors_yxyx[:, 0] >= 0.,
anchors_yxyx[:, 1] >= 0.),
tf.logical_and(anchors_yxyx[:, 2] <= self.image_h - 1.,
anchors_yxyx[:, 3] <= self.image_w - 1.))
# unpad gt boxes and class IDs.
input_gt_boxes, _ = unpad_tf(input_gt_boxes)
ious = iou_tf(anchors_yxyx, input_gt_boxes)
# set IoUs for invalid anchors to 0
ious *= tf.expand_dims(tf.cast(valid_anc_mask, tf.float32), axis=1)
iou_max_over_gt = tf.reduce_max(ious, axis=-1)
positive_anchor_idxs = tf.where(iou_max_over_gt >= self.iou_high_thres)[:, 0]
positive_gt_idxs = tf.cond(tf.size(positive_anchor_idxs) > 0,
true_fn=lambda: tf.argmax(safe_gather(ious,
positive_anchor_idxs),
axis=-1),
false_fn=lambda: tf.constant([], dtype=tf.int64))
positive_gt_idxs_unique, _ = tf.unique(positive_gt_idxs)
negative_anchor_idxs = tf.where(tf.logical_and(iou_max_over_gt <= self.iou_low_thres,
valid_anc_mask))[:, 0]
# build outputs
# positive anchors
def lambda_t1():
return tf.scatter_nd(tf.reshape(tf.cast(positive_anchor_idxs, tf.int32),
(-1, 1)),
tf.ones_like(positive_anchor_idxs, dtype=tf.float32),
_ahw)
def lambda_f1():
return tf.zeros(shape=_ahw, dtype=tf.float32)
y_is_bbox_valid = tf.cond(tf.size(positive_anchor_idxs) > 0,
true_fn=lambda_t1,
false_fn=lambda_f1)
def lambda_t2():
return tf.scatter_nd(tf.reshape(tf.cast(positive_anchor_idxs, tf.int32),
(-1, 1)),
tf.ones_like(positive_anchor_idxs, tf.float32),
_ahw)
def lambda_f2():
return tf.zeros(shape=_ahw, dtype=tf.float32)
y_rpn_overlap = tf.cond(tf.size(positive_anchor_idxs) > 0,
true_fn=lambda_t2,
false_fn=lambda_f2)
def lambda_t3():
return calculate_delta_tf(safe_gather(anchors_yxyx, positive_anchor_idxs),
safe_gather(input_gt_boxes, positive_gt_idxs))
def lambda_f3():
return tf.constant([], dtype=tf.float32)
best_regr = tf.cond(tf.size(positive_anchor_idxs) > 0,
true_fn=lambda_t3,
false_fn=lambda_f3)
def lambda_t4():
return tf.scatter_nd(tf.reshape(tf.cast(positive_anchor_idxs, tf.int32),
(-1, 1)),
best_regr,
tf.stack([_ahw[0], 4]))
def lambda_f4():
return tf.zeros(shape=tf.stack([_ahw[0], 4]), dtype=tf.float32)
y_rpn_regr = tf.cond(tf.size(positive_anchor_idxs) > 0,
true_fn=lambda_t4,
false_fn=lambda_f4)
# negative anchors
def lambda_t5():
return tf.scatter_nd(tf.reshape(tf.cast(negative_anchor_idxs, tf.int32),
(-1, 1)),
tf.ones_like(negative_anchor_idxs, tf.float32),
_ahw)
def lambda_f5():
return tf.zeros(shape=_ahw, dtype=tf.float32)
y_is_bbox_valid_n = tf.cond(tf.size(negative_anchor_idxs) > 0,
true_fn=lambda_t5,
false_fn=lambda_f5)
# either positive or negative anchors are valid for training.
y_is_bbox_valid += y_is_bbox_valid_n
# find best match for missed GT boxes.
# scatter_nd requires shape known at graph define time, so we use
# self.max_objs_per_image and then slice to the actual gt box num
def lambda_t6():
return tf.scatter_nd(tf.reshape(tf.cast(positive_gt_idxs_unique,
tf.int32),
(-1, 1)),
tf.ones_like(positive_gt_idxs_unique, tf.float32),
tf.constant([self.max_objs_per_image]))
def lambda_f6():
return tf.zeros(shape=[self.max_objs_per_image], dtype=tf.float32)
num_anchors_for_bbox = tf.cond(tf.size(positive_gt_idxs_unique) > 0,
true_fn=lambda_t6,
false_fn=lambda_f6)
input_gt_boxes_mask = tf.ones_like(input_gt_boxes[:, 0], dtype=tf.float32)
input_gt_boxes_mask_idxs = tf.where(input_gt_boxes_mask > 0.0)[:, 0]
num_anchors_for_bbox = safe_gather(num_anchors_for_bbox, input_gt_boxes_mask_idxs)
iou_max_over_anc = tf.reduce_max(ious, axis=0)
iou_max_best_anc = tf.argmax(ious, axis=0)
best_dx_for_bbox = calculate_delta_tf(safe_gather(anchors_yxyx, iou_max_best_anc),
input_gt_boxes)
iou_max_best_anc = tf.where(iou_max_over_anc <= 0.0,
-1.0 * tf.ones_like(iou_max_best_anc, tf.float32),
tf.cast(iou_max_best_anc, tf.float32))
unmapped_gt_box_idxs_ = tf.where(tf.logical_and(tf.equal(num_anchors_for_bbox, 0.0),
iou_max_over_anc > 0.0))[:, 0]
unmapped_ancs_for_boxes_ = safe_gather(iou_max_best_anc, unmapped_gt_box_idxs_)
# as more than one gt boxes can map to the same anchors, we should remove duplication
# so that an anchor is mapped to a single box, we use the last box as used in V1
# at this moment for ease of testing.
# find the last occurence of each mapped anchor
unmapped_ancs_for_boxes_rev, _, _orig_idx = \
unique_with_inverse(unmapped_ancs_for_boxes_[::-1])
unmapped_ancs_for_boxes = unmapped_ancs_for_boxes_rev[::-1]
bbox_sel_idx = (tf.shape(unmapped_ancs_for_boxes_)[0] - 1 - _orig_idx)[::-1]
unmapped_gt_box_idxs = safe_gather(unmapped_gt_box_idxs_, bbox_sel_idx)
# short alias
uafb = unmapped_ancs_for_boxes
uafb_int32 = tf.cast(uafb, tf.int32)
def lambda_t7():
return tf.scatter_nd(tf.reshape(uafb_int32, (-1, 1)),
tf.ones_like(uafb, tf.float32),
_ahw)
def lambda_f7():
return tf.zeros(shape=_ahw, dtype=tf.float32)
y_is_bbox_valid_patch = tf.cond(tf.size(uafb) > 0,
true_fn=lambda_t7,
false_fn=lambda_f7)
y_is_bbox_valid = tf.where(y_is_bbox_valid_patch > 0.0,
y_is_bbox_valid_patch,
y_is_bbox_valid)
y_rpn_overlap = tf.where(y_is_bbox_valid_patch > 0.0,
y_is_bbox_valid_patch,
y_rpn_overlap)
def lambda_t8():
return tf.scatter_nd(tf.reshape(uafb_int32, (-1, 1)),
safe_gather(best_dx_for_bbox, unmapped_gt_box_idxs),
tf.stack([_ahw[0], 4]))
def lambda_f8():
return tf.zeros(shape=tf.stack([_ahw[0], 4]), dtype=tf.float32)
y_rpn_regr_patch = tf.cond(tf.size(uafb) > 0,
true_fn=lambda_t8,
false_fn=lambda_f8)
y_rpn_regr = tf.where(tf.tile(tf.reshape(y_is_bbox_valid_patch, (-1, 1)), [1, 4]) > 0.0,
y_rpn_regr_patch,
y_rpn_regr)
# (H, W, A) to (A, H, W)
y_rpn_regr = tf.reshape(y_rpn_regr, (self.rpn_h, self.rpn_w, self.num_anchors*4))
y_rpn_regr = tf.transpose(y_rpn_regr, (2, 0, 1))
y_rpn_overlap = tf.reshape(y_rpn_overlap, (self.rpn_h, self.rpn_w, self.num_anchors))
y_rpn_overlap = tf.transpose(y_rpn_overlap, (2, 0, 1))
y_rpn_overlap_save = y_rpn_overlap
y_is_bbox_valid = tf.reshape(y_is_bbox_valid, (self.rpn_h, self.rpn_w, self.num_anchors))
y_is_bbox_valid = tf.transpose(y_is_bbox_valid, (2, 0, 1))
y_is_bbox_valid_save = y_is_bbox_valid
pos_locs = tf.where(tf.logical_and(tf.equal(y_rpn_overlap, 1.0),
tf.equal(y_is_bbox_valid, 1.0)))
neg_locs = tf.where(tf.logical_and(tf.equal(y_rpn_overlap, 0.0),
tf.equal(y_is_bbox_valid, 1.0)))
num_pos = tf.shape(pos_locs)[0]
pos_limit = tf.floor_div(self.rpn_train_bs, 2)
real_pos_num = tf.minimum(num_pos, pos_limit)
num_pos_delete = num_pos - real_pos_num
delete_idxs = tf.random_shuffle(tf.where(tf.reshape(y_rpn_overlap,
(-1,)))[:, 0])[:num_pos_delete]
def lambda_t9():
return tf.scatter_nd(tf.reshape(tf.cast(delete_idxs, tf.int32), (-1, 1)),
tf.ones_like(delete_idxs, tf.float32),
_ahw)
def lambda_f9():
return tf.zeros(shape=_ahw, dtype=tf.float32)
delete_mask_ = tf.cond(tf.size(delete_idxs) > 0,
true_fn=lambda_t9,
false_fn=lambda_f9)
delete_mask = tf.reshape(1.0 - delete_mask_,
(self.num_anchors, self.rpn_h, self.rpn_w))
y_rpn_overlap = y_rpn_overlap * delete_mask
y_is_bbox_valid = y_is_bbox_valid * delete_mask
num_neg = tf.shape(neg_locs)[0]
neg_limit = tf.minimum(self.rpn_train_bs - real_pos_num, num_neg)
num_neg_delete = num_neg - neg_limit
neg_idxs = tf.where(tf.logical_and(tf.equal(tf.reshape(y_rpn_overlap, (-1,)), 0.0),
tf.equal(tf.reshape(y_is_bbox_valid, (-1,)), 1.0)))[:, 0]
neg_delete_idxs = tf.random_shuffle(neg_idxs)[:num_neg_delete]
def lambda_t10():
return tf.scatter_nd(tf.reshape(tf.cast(neg_delete_idxs, tf.int32), (-1, 1)),
tf.ones_like(neg_delete_idxs, tf.float32),
_ahw)
def lambda_f10():
return tf.zeros(shape=_ahw, dtype=tf.float32)
neg_delete_mask_ = tf.cond(tf.size(neg_delete_idxs) > 0,
true_fn=lambda_t10,
false_fn=lambda_f10)
neg_delete_mask = tf.reshape(1.0 - neg_delete_mask_,
(self.num_anchors, self.rpn_h, self.rpn_w))
y_is_bbox_valid *= neg_delete_mask
rpn_class_targets = tf.concat([y_is_bbox_valid, y_rpn_overlap], axis=0)
y_rpn_overlap_tiled = tf.tile(tf.reshape(y_rpn_overlap,
(self.num_anchors, 1, self.rpn_h, self.rpn_w)),
[1, 4, 1, 1])
y_rpn_overlap_tiled = tf.reshape(y_rpn_overlap_tiled,
(self.num_anchors*4, self.rpn_h, self.rpn_w))
rpn_bbox_targets = tf.concat([y_rpn_overlap_tiled, y_rpn_regr], axis=0)
return (rpn_class_targets, rpn_bbox_targets, ious,
positive_gt_idxs, iou_max_best_anc, anchors_yxyx,
unmapped_gt_box_idxs, unmapped_ancs_for_boxes,
y_is_bbox_valid_save, y_rpn_overlap_save)
def rpn_target_tf(self, input_gt_boxes):
'''Wrapper for _rpn_target_tf, to remove ious output as it is for debug and testing.'''
return self._rpn_target_tf(input_gt_boxes)[0:2]
def rpn_target_py_func(self, inpu_gt_boxes):
'''py_func implementation of rpn_target generator.'''
def py_func_core(input_boxes):
rpn_cls, rpn_deltas = \
compute_rpn_target_np(input_boxes, self.anchor_sizes, self.anchor_ratios,
self.rpn_stride, self.rpn_h, self.rpn_w,
self.image_h, self.image_w,
self.rpn_train_bs, self.iou_high_thres,
self.iou_low_thres)
return rpn_cls[0, ...].astype(np.float32), rpn_deltas[0, ...].astype(np.float32)
return tf.py_func(py_func_core, [inpu_gt_boxes], (tf.float32, tf.float32))
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/data_loader/inputs_loader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Data loader for FasterRCNN.'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/data_loader/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Unit test for data loader of FasterRCNN.'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/data_loader/tests/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Unit test for RPN target generator.'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import keras.backend as K
import mock
import numpy as np
import tensorflow as tf
from nvidia_tao_tf1.cv.faster_rcnn.data_loader.inputs_loader import RPNTargetGenerator
from nvidia_tao_tf1.cv.faster_rcnn.layers.utils import _compute_rpn_target_np
from nvidia_tao_tf1.cv.faster_rcnn.tests.utils import _take_first_k
from nvidia_tao_tf1.cv.faster_rcnn.utils.utils import get_init_ops
np.random.seed(42)
tf.set_random_seed(42)
class TestRPNTarget(unittest.TestCase):
'''Main class for testing the RPN target tensor generator.'''
def init(self):
'''Initialize.'''
self.image_w = 1280
self.image_h = 384
self.rpn_w = self.image_w // 16
self.rpn_h = self.image_h // 16
self.rpn_stride = 16
self.anchor_sizes = [64., 128., 256.]
self.anchor_ratios = [1., .5, 2.]
self.num_anchors = len(self.anchor_sizes) * len(self.anchor_ratios)
self.bs_per_gpu = 1
self.iou_high_thres = 0.7
self.iou_low_thres = 0.3
self.rpn_train_bs = 256
self.max_objs_per_image = 100
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
K.set_session(tf.Session(config=config))
self.session = K.get_session()
self.rpn_target_gen = RPNTargetGenerator(self.image_w,
self.image_h,
self.rpn_w,
self.rpn_h,
self.rpn_stride,
self.anchor_sizes,
self.anchor_ratios,
self.bs_per_gpu,
self.iou_high_thres,
self.iou_low_thres,
self.rpn_train_bs,
self.max_objs_per_image)
self.input_bboxes = tf.placeholder(tf.float32, shape=(None, 4), name='input_bboxes')
def compute_rpn_target_tf(self):
'''compute RPN target via tf Ops.'''
(self.rpn_target_tf_class, self.rpn_target_tf_bbox,
self.ious_tf, self.pos_idx, self.best_anc, self.anc_tf,
self.unmapped_box_tf, self.unmapped_anc_tf,
self.y_is_bbox_valid_save_tf, self.y_rpn_overlap_save_tf) = \
self.rpn_target_gen._rpn_target_tf(self.input_bboxes)
def get_boxes(self):
"""generate boxes as test vectors."""
boxes = np.array(
[
[599.41, 156.40, 629.75, 189.25],
[387.63, 181.54, 423.81, 203.12],
[676.60, 163.95, 688.98, 193.93],
[503.89, 169.71, 590.61, 190.13],
[511.35, 174.96, 527.81, 187.45],
[532.37, 176.35, 542.68, 185.27],
[559.62, 175.83, 575.40, 183.15]
]
)
num_boxes = boxes.shape[0]
all_boxes = np.pad(boxes, ((0, self.max_objs_per_image - num_boxes), (0, 0)))
return np.ascontiguousarray(all_boxes.astype(np.float32))
def test_rpn_target(self):
'''Compare the outputs from tf and numpy.'''
self.init()
# build tf graph
with mock.patch('tensorflow.random_shuffle', side_effect=tf.identity) \
as _: # noqa pylint: disable = W0612
self.compute_rpn_target_tf()
self.session.run(get_init_ops())
# get a batch with GT
gt_bbox = self.get_boxes()
# compuate in numpy
with mock.patch('numpy.random.choice', side_effect=_take_first_k) \
as _: # noqa pylint: disable=F841, W0612
(rpn_class_np, rpn_bbox_np, ious_np, pos_idx_np, best_anc_np, anc_np,
unmapped_box_np, unmapped_anc_np, y_is_bbox_valid_save_np,
y_rpn_overlap_save_np) = \
_compute_rpn_target_np(
gt_bbox,
self.anchor_sizes,
self.rpn_target_gen.anchor_ratios,
self.rpn_stride,
self.rpn_h,
self.rpn_w,
self.image_h,
self.image_w,
self.rpn_train_bs,
self.iou_high_thres,
self.iou_low_thres
)
# compute in tf
(rpn_class_tf, rpn_bbox_tf, ious_tf, pos_idx_tf, best_anc_tf, anc_tf,
unmapped_box_tf, unmapped_anc_tf, y_is_bbox_valid_save_tf,
y_rpn_overlap_save_tf) = \
self.session.run(
[
self.rpn_target_tf_class,
self.rpn_target_tf_bbox,
self.ious_tf,
self.pos_idx,
self.best_anc,
self.anc_tf,
self.unmapped_box_tf,
self.unmapped_anc_tf,
self.y_is_bbox_valid_save_tf,
self.y_rpn_overlap_save_tf
],
feed_dict={'input_bboxes:0': gt_bbox}
)
# # check
assert np.equal(anc_np, anc_tf[:, (1, 0, 3, 2)]).all()
assert np.equal(ious_np, ious_tf).all()
assert np.equal(pos_idx_tf, pos_idx_np).all()
assert np.equal(best_anc_tf, best_anc_np).all(), \
print(np.amax(ious_tf, axis=0), np.amax(ious_np, axis=0))
assert np.equal(unmapped_box_tf, unmapped_box_np).all()
assert np.equal(unmapped_anc_tf, unmapped_anc_np).all()
bbox_valid_diff = np.where(
y_is_bbox_valid_save_np.reshape(-1) -
y_is_bbox_valid_save_tf.reshape(-1))[0]
assert np.equal(y_is_bbox_valid_save_np, y_is_bbox_valid_save_tf).all(), \
print('bbox_valid_diff: {}, {}'.format(
y_is_bbox_valid_save_np.reshape(-1)[bbox_valid_diff],
y_is_bbox_valid_save_tf.reshape(-1)[bbox_valid_diff]))
assert np.equal(y_rpn_overlap_save_np, y_rpn_overlap_save_tf).all()
rpn_class_np_0 = rpn_class_np[0, 0:self.num_anchors, :, :]
rpn_class_tf_0 = rpn_class_tf[0:self.num_anchors, :, :]
class_diff_0 = np.where(rpn_class_np_0.reshape(-1)-rpn_class_tf_0.reshape(-1))[0]
assert np.allclose(rpn_class_np_0, rpn_class_tf_0, atol=1e-6), \
print('class first half diff: {}, {}'.format(
rpn_class_np_0.reshape(-1)[class_diff_0],
rpn_class_tf_0.reshape(-1)[class_diff_0]))
assert np.equal(rpn_class_np[0, ...], rpn_class_tf).all()
assert np.logical_or(np.equal(rpn_class_np, 0.), np.equal(rpn_class_np, 1.0)).all()
assert np.logical_or(np.equal(rpn_class_tf, 0.), np.equal(rpn_class_tf, 1.0)).all()
assert np.logical_or(np.equal(rpn_bbox_np[0, 0:self.num_anchors*4, :, :], 0),
np.equal(rpn_bbox_np[0, 0:self.num_anchors*4, :, :], 1)).all()
assert np.logical_or(np.equal(rpn_bbox_tf[:self.num_anchors*4, :, :], 0),
np.equal(rpn_bbox_tf[:self.num_anchors*4, :, :], 1)).all()
assert np.equal(rpn_bbox_tf[0:self.num_anchors*4, :, :],
rpn_bbox_np[0, 0:self.num_anchors*4, :, :]).all()
assert np.allclose(rpn_bbox_np[0, ...], rpn_bbox_tf, atol=1e-6)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/data_loader/tests/test_rpn_target.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Patch keras.engine.saving so that we can load pretrained weights for TimeDistributed layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import warnings
import keras
from keras import backend as K
from keras.engine.saving import load_attributes_from_hdf5_group, \
preprocess_weights_for_loading
from keras.layers import TimeDistributed
import numpy as np
logger = logging.getLogger(__name__)
def load_weights_from_hdf5_group_by_name(f, layers, skip_mismatch=False,
reshape=False):
"""Implements name-based weight loading.
(instead of topological weight loading).
Layers that have no matching name are skipped.
# Arguments
f: A pointer to a HDF5 group.
layers: A list of target layers.
skip_mismatch: Boolean, whether to skip loading of layers
where there is a mismatch in the number of weights,
or a mismatch in the shape of the weights.
reshape: Reshape weights to fit the layer when the correct number
of values are present but the shape does not match.
# Raises
ValueError: in case of mismatch between provided layers
and weights file and skip_mismatch=False.
"""
if 'keras_version' in f.attrs:
original_keras_version = f.attrs['keras_version'].decode('utf8')
else:
original_keras_version = '1'
if 'backend' in f.attrs:
original_backend = f.attrs['backend'].decode('utf8')
else:
original_backend = None
# New file format.
layer_names = load_attributes_from_hdf5_group(f, 'layer_names')
# Reverse index of layer name to list of layers with name.
index = {}
for layer in layers:
# If it is a TD layer, try to load weights with the inner layer's weights
if type(layer) == TimeDistributed:
if layer.layer.name:
index.setdefault(layer.layer.name, []).append(layer)
if layer.name:
index.setdefault(layer.name, []).append(layer)
# We batch weight value assignments in a single backend call
# which provides a speedup in TensorFlow.
weight_value_tuples = []
for k, name in enumerate(layer_names):
g = f[name]
weight_names = load_attributes_from_hdf5_group(g, 'weight_names')
weight_values = [np.asarray(g[weight_name]) for weight_name in weight_names]
for layer in index.get(name, []):
symbolic_weights = layer.weights
weight_values = preprocess_weights_for_loading(
layer,
weight_values,
original_keras_version,
original_backend,
reshape=reshape)
if len(weight_values) != len(symbolic_weights):
if skip_mismatch: # noqa pylint: disable = R1724
warnings.warn('Skipping loading of weights for '
'layer {}'.format(layer.name) + ' due to mismatch '
'in number of weights ({} vs {}).'.format(
len(symbolic_weights), len(weight_values)))
continue
else: # noqa pylint: disable = R1724
raise ValueError('Layer #' + str(k) +
' (named "' + layer.name +
'") expects ' +
str(len(symbolic_weights)) +
' weight(s), but the saved weights' +
' have ' + str(len(weight_values)) +
' element(s).')
# Set values.
for i in range(len(weight_values)):
symbolic_shape = K.int_shape(symbolic_weights[i])
if symbolic_shape != weight_values[i].shape:
if skip_mismatch: # noqa pylint: disable = R1724
warnings.warn('Skipping loading of weights for '
'layer {}'.format(layer.name) + ' due to '
'mismatch in shape ({} vs {}).'.format(
symbolic_weights[i].shape,
weight_values[i].shape))
continue
else: # noqa pylint: disable = R1724
raise ValueError('Layer #' + str(k) +
' (named "' + layer.name +
'"), weight ' +
str(symbolic_weights[i]) +
' has shape {}'.format(symbolic_shape) +
', but the saved weight has shape ' +
str(weight_values[i].shape) + '.')
else:
weight_value_tuples.append((symbolic_weights[i],
weight_values[i]))
K.batch_set_value(weight_value_tuples)
def _patch(f):
"""Apply the patches to the function."""
name = f.__name__
logger.debug('Patching %s' % name)
keras.engine.saving.__setattr__(name, f)
def patch():
'''Patch this function.'''
_patch(load_weights_from_hdf5_group_by_name)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/patched_keras/saving.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/faster_rcnn/patched_keras/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.