python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2017 - 2019, NVIDIA CORPORATION. All rights reserved.
"""Simple Stand-alone inference script for gridbox models trained using TAO."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import os
import time
from google.protobuf.json_format import MessageToDict
from PIL import Image
from tqdm import tqdm
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.common.mlops.wandb import check_wandb_logged_in, initialize_wandb
from nvidia_tao_tf1.cv.detectnet_v2.inferencer.build_inferencer import build_inferencer
from nvidia_tao_tf1.cv.detectnet_v2.postprocessor.bbox_handler import BboxHandler
from nvidia_tao_tf1.cv.detectnet_v2.spec_handler.spec_loader import load_experiment_spec
from nvidia_tao_tf1.cv.detectnet_v2.utilities.constants import valid_image_ext
logger = logging.getLogger(__name__)
def build_command_line_parser(parser=None):
'''Build argpase based command line parser for TLT infer.'''
if parser is None:
parser = argparse.ArgumentParser(description='TLT DetectNet_v2 Inference Tool')
parser.add_argument("-e",
"--experiment_spec",
default=None,
type=str,
help="Path to inferencer spec file.",
required=True)
parser.add_argument('-i',
'--image_dir',
help='The directory of input images or a single image for inference.',
type=str,
default=None,
required=True)
parser.add_argument("-k",
"--key",
default="",
help="Key to load the model.",
type=str,
required=False)
parser.add_argument('-r',
'--results_dir',
help='The directory to the output images and labels.'
' The annotated images are in inference_output/images_annotated and'
' \n labels are in image_dir/labels',
type=str,
required=True,
default=None)
parser.add_argument('-b',
'--batch_size',
help='Batch size to be used. '
'If not provided, will use value from the spec file',
type=int,
default=None)
parser.add_argument('-m',
'--model_path',
type=str,
help='Path to the DetectNetv2 model.'
'If not provided, will use value from the spec file')
parser.add_argument('-v',
'--verbosity',
action='store_true',
help="Flag to set for more detailed logs.")
return parser
def parse_command_line(cl_args=None):
"""Parse the command line arguments."""
parser = build_command_line_parser(parser=None)
args = vars(parser.parse_args(cl_args))
return args
def inference_wrapper_batch(inf_config, b_config,
inference_image_root=None,
output_root=None,
verbosity=False,
model_path=None,
batch_size=None,
key=None):
"""Wrapper function running batchwise inference on a directory of images using custom handlers.
Input:
inf_config (InferencerConfig Proto): Inferencer config proto object.
b_config (BBoxerConfig Proto): BBoxer config proto object.
output_root (str): Path to where the output would be stored.
verbosity (bool): Flag to set logger verbosity level.
model_path (str): path to the model
batch_size (int): Batch size to use.
key (str): Key to load the model for inference.
Returns:
No explicit returns.
Outputs:
- kitti labels in output_root/labels
- overlain images in output_root/images_annotated
"""
if not os.path.exists(inference_image_root):
raise ValueError('Invalid infer image root {}'.format(inference_image_root))
disable_overlay = not(b_config.disable_overlay)
# If batch size was passed from argument, use that over spec value
if batch_size:
inf_config.batch_size = batch_size
else:
batch_size = inf_config.batch_size
# If model path was passed from argument, use that over spec value
if model_path:
inf_config.tlt_config.model = model_path
if disable_overlay:
logger.info("Overlain images will be saved in the output path.")
framework, model = build_inferencer(inf_config=inf_config,
key=key,
verbose=verbosity)
bboxer = BboxHandler(save_kitti=b_config.kitti_dump,
image_overlay=disable_overlay,
batch_size=batch_size,
frame_height=inf_config.image_height if inf_config.image_height else 544,
frame_width=inf_config.image_width if inf_config.image_width else 960,
target_classes=inf_config.target_classes,
stride=inf_config.stride if inf_config.stride else 16,
postproc_classes=b_config.postproc_classes if b_config.postproc_classes
else inf_config.target_classes,
classwise_cluster_params=b_config.classwise_bbox_handler_config,
framework=framework)
# Initialize the network for inference.
model.network_init()
logger.info("Initialized model")
# # Preparing list of inference files.
if os.path.isfile(inference_image_root):
infer_files = [os.path.basename(inference_image_root)]
inference_image_root = os.path.dirname(inference_image_root)
elif os.path.isdir(inference_image_root):
infer_files = [images for images in sorted(os.listdir(inference_image_root))
if os.path.splitext(images)[1].lower() in valid_image_ext]
else:
raise IOError("Invalid input type given for the -i flag. {}".format(inference_image_root))
linewidth = b_config.overlay_linewidth
# Setting up directories for outputs. including crops, labels and annotated images.
output_image_root = os.path.join(output_root, 'images_annotated')
output_label_root = os.path.join(output_root, 'labels')
logger.info('Commencing inference')
for chunk in tqdm([infer_files[x:x+batch_size] for x in range(0, len(infer_files),
batch_size)]):
pil_list = []
time_start = time.time()
# Preparing the chunk of images for inference
for file_name in chunk:
# By default convert the images to RGB so that the rendered boxes can be
# set to different colors. Input preprocessing is handled in the
# BaseInferencer class.
pil_image = Image.open(os.path.join(inference_image_root,
file_name)).convert("RGB")
pil_list.append(pil_image)
time_end = time.time()
logger.debug("Time lapsed to prepare batch: {}".format(time_end - time_start))
# Predict on a batch of images.
time_start = time.time()
output_inferred, resized_size = model.infer_batch(pil_list)
time_end = time.time()
logger.debug("Time lapsed to infer batch: {}".format(time_end - time_start))
# Post process to obtain detections.
processed_inference = bboxer.bbox_preprocessing(output_inferred)
logger.debug("Preprocessing complete")
classwise_detections = bboxer.cluster_detections(processed_inference)
logger.debug("Classwise_detections")
# Overlaying information after detection.
time_start = time.time()
logger.debug("Postprocessing detections: overlaying, metadata and crops.")
bboxer.render_outputs(classwise_detections,
pil_list,
output_image_root,
output_label_root,
chunk,
resized_size,
linewidth=linewidth)
time_end = time.time()
logger.debug("Time lapsed: {}".format(time_end - time_start))
if framework == "tensorrt":
model.clear_buffers()
model.clear_trt_session()
logger.info("Inference complete")
def main(args=None):
"""Wrapper function for running inference on a single image or collection of images.
Args:
Dictionary arguments containing parameters defined by command line parameters
"""
arguments = parse_command_line(args)
# Setting up logger verbosity.
verbosity = arguments["verbosity"]
info_level = 'INFO'
if verbosity:
info_level = 'DEBUG'
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level=info_level)
results_dir = arguments['results_dir']
if results_dir is not None:
if not os.path.exists(results_dir):
os.makedirs(results_dir)
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True,
append=False,
verbosity=logger.getEffectiveLevel()
)
)
status_logging.get_status_logger().write(
data=None,
status_level=status_logging.Status.STARTED,
message="Starting DetectNet_v2 Inference"
)
inference_spec = load_experiment_spec(spec_path=arguments['experiment_spec'],
merge_from_default=False,
validation_schema="inference")
inferencer_config = inference_spec.inferencer_config
bbox_handler_config = inference_spec.bbox_handler_config
wandb_logged_in = check_wandb_logged_in()
if bbox_handler_config.HasField("wandb_config"):
wandb_config = bbox_handler_config.wandb_config
wandb_name = f"{wandb_config.name}" if wandb_config.name \
else "detectnet_v2_inference"
wandb_stream_config = MessageToDict(
inference_spec,
preserving_proto_field_name=True,
including_default_value_fields=True
)
initialize_wandb(
project=wandb_config.project if wandb_config.project else None,
entity=wandb_config.entity if wandb_config.entity else None,
config=wandb_stream_config,
notes=wandb_config.notes if wandb_config.notes else None,
tags=wandb_config.tags if wandb_config.tags else None,
sync_tensorboard=False,
save_code=False,
results_dir=results_dir,
wandb_logged_in=wandb_logged_in,
name=wandb_name
)
inference_wrapper_batch(inferencer_config, bbox_handler_config,
inference_image_root=arguments['image_dir'],
output_root=results_dir,
verbosity=verbosity,
model_path=arguments['model_path'],
key=arguments['key'],
batch_size=arguments['batch_size'])
if __name__ == "__main__":
try:
main()
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Inference finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Inference was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/scripts/inference.py |
# Copyright (c) 2017 - 2019, NVIDIA CORPORATION. All rights reserved.
"""Simple standalone script to evaluate a gridbox model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import os
import tensorflow as tf
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.detectnet_v2.evaluation.build_evaluator import (
build_evaluator_for_trained_gridbox
)
from nvidia_tao_tf1.cv.detectnet_v2.model.build_model import get_base_model_config
from nvidia_tao_tf1.cv.detectnet_v2.spec_handler.spec_loader import load_experiment_spec
from nvidia_tao_tf1.cv.detectnet_v2.training.utilities import (
get_singular_monitored_session,
setup_keras_backend
)
from nvidia_tao_tf1.cv.detectnet_v2.utilities.timer import time_function
logger = logging.getLogger(__name__)
def build_command_line_parser(parser=None):
"""Build the command line parser using argparse.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism.
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(
prog='evaluate', description='Evaluate a DetectNet_v2 model.'
)
parser.add_argument(
'-e',
'--experiment_spec',
type=str,
required=True,
help='Absolute path to a single file containing a complete Experiment prototxt.')
parser.add_argument(
'-m',
'--model_path',
type=str,
help='Path to the .tlt model file or tensorrt engine file under evaluation.',
required=True)
parser.add_argument(
'-v',
'--verbose',
action='store_true',
help='Include this flag in command line invocation for verbose logs.'
)
parser.add_argument(
'-r',
'--results_dir',
type=str,
default=None,
help="Path to report the mAP and logs file."
)
parser.add_argument(
'--use_training_set',
action='store_true',
help='Set this flag to evaluate over entire tfrecord and not just validation fold or '
'the validation data source mentioned in the spec file.'
)
parser.add_argument(
'-k',
'--key',
required=False,
help="Key to load the tlt model.",
default=""
)
parser.add_argument(
'-f',
'--framework',
help="The backend framework to be used.",
choices=["tlt", "tensorrt"],
default="tlt"
)
# Dummy arguments for Deploy
parser.add_argument(
'-i',
'--image_dir',
type=str,
required=False,
default=None,
help=argparse.SUPPRESS
)
parser.add_argument(
'-l',
'--label_dir',
type=str,
required=False,
help=argparse.SUPPRESS
)
parser.add_argument(
'-b',
'--batch_size',
type=int,
required=False,
default=1,
help=argparse.SUPPRESS
)
return parser
def parse_command_line(args=None):
"""Simple function to parse command line arguments.
Args:
args (list): List of strings used as command line arguments.
If None, sys.argv is used.
Returns:
args_parsed: Parsed arguments.
"""
parser = build_command_line_parser()
args_parsed = parser.parse_args(args)
return args_parsed
@time_function(__name__)
def main(cl_args=None):
"""
Prepare and run gridbox evaluation process.
Args:
cl_args (list): list of strings used as command-line arguments to the script.
If None (default), arguments will be parsed from sys.argv.
Raises:
IOError if the specified experiment spec file doesn't exist.
"""
args_parsed = parse_command_line(cl_args)
# Setting logger configuration
verbosity = 'INFO'
verbose = args_parsed.verbose
if verbose:
verbosity = "DEBUG"
# Configure logging to get Maglev log messages.
logging.basicConfig(format='%(asctime)s [%(levelname)s] '
'%(name)s: %(message)s',
level=verbosity)
# Defining the results directory.
results_dir = args_parsed.results_dir
if results_dir is not None:
if not os.path.exists(results_dir):
os.makedirs(results_dir)
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True,
append=False,
verbosity=logger.getEffectiveLevel()
)
)
status_logging.get_status_logger().write(
data=None,
status_level=status_logging.Status.STARTED,
message="Starting DetectNet_v2 Evaluation"
)
# Check that the experiment spec file exists and parse it.
experiment_spec_file = args_parsed.experiment_spec
if not os.path.exists(experiment_spec_file):
raise IOError("The specified experiment file doesn't exist: %s" %
experiment_spec_file)
logger.debug("Setting up experiment from experiment specs.")
experiment_spec = load_experiment_spec(
experiment_spec_file, merge_from_default=False,
validation_schema="train_val")
# Extract core model config, which might be wrapped inside a TemporalModelConfig.
model_config = get_base_model_config(experiment_spec)
# Set up Keras backend with correct computation precision and learning phase.
setup_keras_backend(model_config.training_precision, is_training=False)
# Expand and validate model file argument.
model_path = args_parsed.model_path
# Build dashnet evaluator engine for keras models.
use_training_set = args_parsed.use_training_set
logger.debug("Constructing evaluator.")
framework = args_parsed.framework
evaluator = build_evaluator_for_trained_gridbox(experiment_spec=experiment_spec,
model_path=model_path,
use_training_set=use_training_set,
use_confidence_models=False,
key=args_parsed.key,
framework=framework)
# Run validation.
logger.debug("Running evaluation session.")
with get_singular_monitored_session(evaluator.keras_models,
session_config=evaluator.get_session_config()) as session:
metrics_results, validation_cost, median_inference_time = \
evaluator.evaluate(session.raw_session())
evaluator.print_metrics(
metrics_results, validation_cost, median_inference_time)
logger.info("Evaluation complete.")
if __name__ == "__main__":
try:
main()
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Evaluation finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Evaluation was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
if type(e) == tf.errors.ResourceExhaustedError:
logger.error(
"Ran out of GPU memory, please lower the batch size, use a smaller input "
"resolution, or use a smaller backbone."
)
status_logging.get_status_logger().write(
message="Ran out of GPU memory, please lower the batch size, use a smaller input "
"resolution, or use a smaller backbone.",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
exit(1)
else:
# throw out the error as-is if they are not OOM error
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/scripts/evaluate.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""Tools to convert datasets into .tfrecords."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/entrypoint/__init__.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""TLT command line wrapper to invoke CLI scripts."""
import sys
from nvidia_tao_tf1.cv.common.entrypoint.entrypoint import launch_job
import nvidia_tao_tf1.cv.detectnet_v2.scripts
def main():
"""Function to launch the job."""
launch_job(nvidia_tao_tf1.cv.detectnet_v2.scripts, "detectnet_v2", sys.argv[1:])
if __name__ == "__main__":
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/entrypoint/detectnet_v2.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model template definitions. One model per file in this directory."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/model/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GridboxModel class that takes care of constructing, training and validating a model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import logging
from math import ceil
import keras
from keras.layers import Input
from keras.models import Model
import tensorflow as tf
import nvidia_tao_tf1.core
from nvidia_tao_tf1.core.export._quantized import check_for_quantized_layers
from nvidia_tao_tf1.core.models.quantize_keras_model import create_quantized_keras_model
from nvidia_tao_tf1.core.templates.darknet import DarkNet
from nvidia_tao_tf1.core.templates.efficientnet import EfficientNetB0
from nvidia_tao_tf1.core.templates.googlenet import GoogLeNet
from nvidia_tao_tf1.core.templates.mobilenet import MobileNet, MobileNetV2
from nvidia_tao_tf1.core.templates.resnet import ResNet
from nvidia_tao_tf1.core.templates.squeezenet import SqueezeNet
from nvidia_tao_tf1.core.templates.vgg import VggNet
from nvidia_tao_tf1.cv.common.utils import (
encode_from_keras,
get_num_params
)
from nvidia_tao_tf1.cv.detectnet_v2.model.utilities import get_class_predictions
from nvidia_tao_tf1.cv.detectnet_v2.model.utilities import inference_learning_phase
from nvidia_tao_tf1.cv.detectnet_v2.model.utilities import model_io
from nvidia_tao_tf1.cv.detectnet_v2.objectives.objective_set import build_objective_set
from nvidia_tao_tf1.cv.detectnet_v2.visualization.visualizer import \
DetectNetTBVisualizer as Visualizer
logger = logging.getLogger(__name__)
# Setting up supported feature extractor templates.
SUPPORTED_TEMPLATES = ["resnet", "darknet", "mobilenet_v1", "mobilenet_v2",
"squeezenet", "googlenet", "vgg", "helnet",
"efficientnet_b0"]
class GridboxModel(object):
"""GridboxModel class.
GridboxModel contains functionality for constructing and manipulating a Keras based models
with gridbox head, building training and validation graphs for the model, and visualizing
predictions.
"""
def __init__(self, num_layers, use_pooling, use_batch_norm, dropout_rate,
objective_set_config, activation_config, target_class_names,
freeze_pretrained_layers, allow_loaded_model_modification,
template='resnet', all_projections=True, freeze_blocks=None,
freeze_bn=None, enable_qat=False):
"""Init function.
Args:
num_layers (int): Number of layers for scalable feature extractors.
use_pooling (bool): Whether to add pooling layers to the feature extractor.
use_batch_norm (bool): Whether to add batch norm layers.
dropout_rate (float): Fraction of the input units to drop. 0.0 means dropout is
not used.
objective_set_config (ObjectiveSet proto): The ObjectiveSet configuration proto.
target_class_names (list): A list of target class names.
freeze_pretrained_layers (bool): Prevent updates to pretrained layers' parameters.
allow_loaded_model_modification (bool): Allow loaded model modification.
template (str): Model template to use for feature extractor.
"""
self.num_layers = num_layers
self.use_pooling = use_pooling
self.use_batch_norm = use_batch_norm
self.dropout_rate = dropout_rate
self.template = template
self.enable_qat = enable_qat
self.max_batch_size = None
# Note: the order of target_class_names determines the order of classes in network output.
self.target_class_names = target_class_names
self.objective_set_config = objective_set_config
self.activation_config = activation_config
self.freeze_pretrained_layers = freeze_pretrained_layers
self.freeze_blocks = freeze_blocks
self.freeze_bn = freeze_bn
self.allow_loaded_model_modification = allow_loaded_model_modification
self.constructed = False
self.all_projections = all_projections
def construct_model(self, input_shape, kernel_regularizer=None, bias_regularizer=None,
pretrained_weights_file=None, enc_key=None):
"""Create a Keras model with gridbox head.
Args:
input_shape (tuple / list / TensorShape):
model input shape without batch dimension (C, H, W).
kernel_regularizer (keras.regularizers.Regularizer instance):
Regularizer to be applied to convolution kernels.
bias_regularizer (keras.regularizers.Regularizer instance):
Regularizer to be applied to biases.
pretrained_weights_file (str): An optional model weights file to be loaded.
Raises:
NotImplementedError: If pretrained_weights_file is not None.
"""
data_format = 'channels_first'
model = self._construct_feature_extractor(input_shape=input_shape,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer)
# If you have weights you've trained previously, you can load them into this model.
if pretrained_weights_file is not None:
if pretrained_weights_file.endswith(".h5"):
model.load_weights(str(pretrained_weights_file), by_name=True)
else:
loaded_model = model_io(pretrained_weights_file, enc_key=enc_key)
loaded_model_layers = [layer.name for layer in loaded_model.layers]
logger.info("Loading weights from pretrained "
"model file. {}".format(pretrained_weights_file))
for layer in model.layers:
if layer.name in loaded_model_layers:
pretrained_layer = loaded_model.get_layer(layer.name)
weights_pretrained = pretrained_layer.get_weights()
model_layer = model.get_layer(layer.name)
try:
model_layer.set_weights(weights_pretrained)
logger.info(
"Layer {} weights set from pre-trained model.".format(
model_layer.name
)
)
except ValueError:
logger.info("Layer {} weights skipped from pre-trained model.".format(
model_layer.name
)
)
continue
del loaded_model
gc.collect()
model = self._construct_objectives_head(model=model,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer)
valid_qat_template = self.template not in [
"mobilenet_v1", "mobilenet_v2"]
no_quantized_layers_in_model = not(check_for_quantized_layers(model))
if self.enable_qat and valid_qat_template:
assert no_quantized_layers_in_model, (
"Model already has quantized layers. Please consider using a non QAT model "
"or set the enable_qat flag in the training config to false."
)
logger.info("Converting the keras model to quantize keras model.")
model = create_quantized_keras_model(model)
self.keras_model = model
self.constructed = True
def _construct_feature_extractor(self, input_shape, data_format, kernel_regularizer=None,
bias_regularizer=None):
"""Generate a keras stride 16 feature extractor model.
Args:
input_shape: model input shape (N,C,H,W). N is ignored.
data_format: Order of the dimensions (C, H, W).
kernel_regularizer: Keras regularizer to be applied to convolution kernels.
bias_regularizer: Keras regularizer to be applied to biases.
pretrained_weights_file: An optional model weights file to be loaded.
Raises:
AssertionError: If the model is already constructed.
Returns:
model (keras.model): The model for feature extraction.
"""
assert not self.constructed, "Model already constructed."
# Define entry points to the model.
assert len(input_shape) == 3
self.input_num_channels = int(input_shape[0])
self.input_height = int(input_shape[1])
self.input_width = int(input_shape[2])
inputs = Input(shape=(self.input_num_channels, self.input_height, self.input_width))
# Set up positional arguments and key word arguments to instantiate feature extractor
# templates.
args = [self.num_layers, inputs]
kwargs = {'use_batch_norm': self.use_batch_norm,
'kernel_regularizer': kernel_regularizer,
'bias_regularizer': bias_regularizer,
'freeze_blocks': self.freeze_blocks,
'freeze_bn': self.freeze_bn}
# Decide feature extractor architecture.
if self.template == "resnet":
model_class = ResNet
kwargs['all_projections'] = self.all_projections
kwargs['use_pooling'] = self.use_pooling
elif self.template == "vgg":
model_class = VggNet
kwargs['use_pooling'] = self.use_pooling
elif self.template == "googlenet":
model_class = GoogLeNet
# Remove nlayers as positional arguments to the googlenet template.
args.pop(0)
elif self.template == "mobilenet_v1":
model_class = MobileNet
kwargs['alpha'] = 1.0
kwargs['depth_multiplier'] = 1
kwargs['dropout'] = self.dropout_rate
kwargs['stride'] = 16
kwargs['add_head'] = False
# Remove nlayers as positional arguments to the googlenet template.
args.pop(0)
elif self.template == "mobilenet_v2":
model_class = MobileNetV2
kwargs['alpha'] = 1.0
kwargs['depth_multiplier'] = 1
kwargs['stride'] = 16
kwargs['add_head'] = False
# Remove nlayers as positional arguments to the googlenet template.
args.pop(0)
elif self.template == "darknet":
model_class = DarkNet
args.pop(1)
kwargs["input_tensor"] = inputs
kwargs['alpha'] = 0.1
elif self.template == "efficientnet_b0":
model_class = EfficientNetB0
kwargs["add_head"] = False
kwargs["input_tensor"] = inputs
kwargs["stride16"] = True
# No positional args are required to generate the
# efficientnet template
while args:
args.pop()
elif self.template == "squeezenet":
model_class = SqueezeNet
kwargs.pop("freeze_bn", None)
kwargs.pop("use_batch_norm", None)
args.pop(0)
else:
error_string = "Unsupported model template: {}.\nPlease choose one" \
"from the following: {}".format(self.template, SUPPORTED_TEMPLATES)
raise NotImplementedError(error_string)
model = model_class(*args, **kwargs)
# Feature extractor output shape.
self.output_height = model.output_shape[2]
self.output_width = model.output_shape[3]
return model
def _construct_objectives_head(self, model, data_format, kernel_regularizer, bias_regularizer):
"""Construct the detector head on top of a feature extractor.
Args:
data_format (str): Order of the dimensions. Set to 'channels_first'.
model (keras.model): Keras model that performs the feature extraction.
kernel_regularizer (keras.regularizers.Regularizer instance):
Regularizer to be applied to convolution kernels.
bias_regularizer (keras.regularizers.Regularizer instance):
Regularizer to be applied to biases.
Returns:
model (keras.model): An end to end keras model, where the gridbox head is
attached to the feature extractor.
"""
# Build the set of objectives (cov, bbox, ...).
self.objective_set = build_objective_set(self.objective_set_config,
self.output_height,
self.output_width,
self.input_height,
self.input_width)
# Construct DNN heads and get their output tensors for predicting the objectives.
num_classes = len(self.target_class_names)
outputs = self.objective_set.construct_outputs(model, num_classes, data_format,
kernel_regularizer, bias_regularizer)
# Construct the complete model.
return Model(inputs=model.inputs, outputs=outputs, name='%s_detectnet_v2' % (model.name))
def predictions_to_dict(self, predictions):
"""Helper for converting Model predictions into a dictionary for easy parsing.
Slices per class predictions to their own dimension.
Args:
predictions: Model predictions list.
Returns:
Dictionary of model predictions indexed objective name.
"""
pred_dict = {}
for objective in self.objective_set.learnable_objectives:
matching_preds = [pred for pred in predictions if 'output_'+objective.name in pred.name]
assert len(matching_preds) < 2, "Ambigous model predictions %s for objective %s" % \
(matching_preds, objective.name)
assert matching_preds, "Model predictions not found for objective %s" % \
objective.name
# Reshape such that class has its own dimension.
pred = objective.reshape_output(matching_preds[0],
num_classes=len(self.target_class_names))
pred_dict[objective.name] = pred
return pred_dict
def save_model(self, file_name, enc_key=None):
"""Save the model to disk.
Args:
file_name (str): Model file name.
enc_key (str): Key string for encryption.
Raises:
ValueError if postprocessing_config is None but save_metadata is True.
"""
self.keras_model.save(file_name, overwrite=True, include_optimizer=False)
def load_model_weights(self, model_file,
custom_objects=None,
enc_key=None,
input_num_channels=None,
input_height=None,
input_width=None):
"""Load a previously saved TLT model.
Args:
model_file (str): Model file name.
custom_objects (dict): Dictionary for the custom Keras layers in the model.
enc_key (str): Key for decryption.
input_num_channels (int): Number of channels in the input to the model.
input_height (int): Height of the input to the model.
input_width (int): Width of the input to the model.
"""
input_overrides = {input_num_channels, input_height, input_width}
if input_overrides != {None}:
if None in input_overrides:
raise ValueError('HelnetGridbox.load_model_weights expects either no input / '
'output shape overrides, or all of them to be overridden.')
if model_file.endswith('.h5'):
raise NotImplementedError("Cannot load just weights for a pruned model.")
else:
model = model_io(model_file, enc_key=enc_key)
assert model, "Couldn't load model."
if self.enable_qat:
# Convert loaded gridbox model to a QAT enabled model with
# QuantizedConv2D and QDQ nodes.
assert not(check_for_quantized_layers(model)), (
"The model provided already seems to have quantized layers. Please consider "
"using a non QAT trained model as pretrained_model_file or set the enable_qat "
"flag in training_config to false."
)
model = create_quantized_keras_model(model)
self.keras_model = model
# Set input and output size variables.
default_output_shape = self.keras_model.get_layer(
"output_cov").output_shape
model_stride = max(self.keras_model.input_shape[2] // default_output_shape[-2],
self.keras_model.input_shape[3] // default_output_shape[-1])
if input_overrides == {None}:
# Retrieve them from saved model, and assume the user wants to infer using the same
# shapes as those.
self.input_num_channels = self.keras_model.input_shape[1]
self.input_height = self.keras_model.input_shape[2]
self.input_width = self.keras_model.input_shape[3]
# The last two dimensions are height and width.
self.output_height = default_output_shape[-2]
self.output_width = default_output_shape[-1]
else:
self.input_num_channels = input_num_channels
self.input_height = input_height
self.input_width = input_width
self.output_height = int(ceil(input_height / model_stride))
self.output_width = int(ceil(input_width / model_stride))
self.objective_set = build_objective_set(self.objective_set_config,
self.output_height,
self.output_width,
self.input_height,
self.input_width)
self.constructed = True
def update_regularizers(self, kernel_regularizer=None,
bias_regularizer=None):
"""Update regularizers for models that are being loaded."""
model_config = self.keras_model.get_config()
for layer, layer_config in zip(self.keras_model.layers, model_config['layers']):
# Updating regularizer parameters for conv2d, depthwise_conv2d and dense layers.
if type(layer) in [keras.layers.convolutional.Conv2D,
keras.layers.core.Dense,
keras.layers.DepthwiseConv2D]:
if hasattr(layer, 'kernel_regularizer'):
layer_config['config']['kernel_regularizer'] = kernel_regularizer
if hasattr(layer, 'bias_regularizer'):
layer_config['config']['bias_regularizer'] = bias_regularizer
prev_model = self.keras_model
self.keras_model = keras.models.Model.from_config(model_config)
self.keras_model.set_weights(prev_model.get_weights())
@classmethod
def load_model(cls, model_file, objective_set_config, target_class_names):
"""Create a new GridboxModel instance with model metadata.
Args:
model_file: Model file name.
objective_set_config: Loaded objective set config from model metadata.
target_class_names (list): Loaded target class names from model metadata.
Returns:
GridboxModel object.
"""
gridbox_model = cls(num_layers=None,
template=None,
use_pooling=None,
use_batch_norm=None,
dropout_rate=None,
objective_set_config=None,
activation_config=None,
target_class_names=None,
freeze_pretrained_layers=None,
allow_loaded_model_modification=None)
if objective_set_config is not None:
assert gridbox_model.objective_set_config is None, \
"Loaded config would override spec."
gridbox_model.objective_set_config = objective_set_config
if target_class_names is not None:
assert gridbox_model.target_class_names is None, \
"Loaded config would override spec."
gridbox_model.target_class_names = target_class_names
gridbox_model.load_model_weights(model_file)
return gridbox_model
@property
def objective_names(self):
"""Return the objective names this model is outputting.
Returns:
objective_names (set): Set of objective names, each of them a str.
Raises:
RuntimeError: If the model has not been constructed yet (in which case it does not make
sense to ask for objectives).
"""
if not self.constructed:
raise RuntimeError("Objective names cannot be determined before the model has been"
"constructed.")
return set(obj.name for obj in self.objective_set.learnable_objectives)
@property
def output_layer_names(self):
"""Return the model output layer names.
Returns:
output_layer_names (list): List of output layer names, each of them a str.
Raises:
RuntimeError: If the model has not been constructed yet (in which case it does not make
sense to ask for outputs).
"""
if not self.constructed:
raise RuntimeError("Output layer names cannot be determined before the model has been"
"constructed.")
return ['output_' + o.name for o in self.objective_set.learnable_objectives]
def add_missing_outputs(self, kernel_regularizer=None, bias_regularizer=None):
"""Add missing outputs to a loaded model.
Args:
kernel_regularizer: Keras regularizer to be applied to convolution kernels.
bias_regularizer: Keras regularizer to be applied to biases.
Raises:
AssertionError: if the model modification is not allowed and the model does
not contain heads for all learnable objectives.
"""
# If model modification is not allowed, return.
if not self.allow_loaded_model_modification:
# Model should be good to go as is. Assert that the model heads are in place.
for objective in self.objective_set.learnable_objectives:
assert any([objective.name in o for o in self.keras_model.output_names]), \
"Objective head is missing from model, and model modification is not allowed."
return
# Construct outputs. In case the loaded model does not contain outputs for all
# objectives, we need to construct the associated model heads.
outputs = self.objective_set.construct_outputs(model=self.keras_model,
num_classes=len(self.target_class_names),
data_format='channels_first',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer)
self.keras_model = Model(inputs=self.keras_model.inputs,
outputs=outputs,
name=self.keras_model.name)
def _print_model_summary_recurse(self, model):
"""Print model summary recursively.
Helper function for printing nested models (ie. models that have models as layers).
Args:
model: Keras model to print.
"""
model.summary()
for l in model.layers:
if isinstance(l, keras.engine.training.Model):
print('where %s is' % l.name)
self._print_model_summary_recurse(l)
def print_model_summary(self):
"""Print model summary."""
self._print_model_summary_recurse(self.keras_model)
def get_model_name(self):
"""Return model name."""
return self.keras_model.name
def _cost_func(self, target_classes, cost_combiner_func, ground_truth_tensors_dict,
pred_tensors_dict, loss_masks=None):
"""Model cost function.
Args:
target_classes (list): A list of TargetClass instances.
cost_combiner_func: A function that takes in a dictionary of objective costs,
and total cost by computing a weighted sum of the objective costs.
ground_truth_tensors_dict (dict): Maps from [target_class_name][objective_name] to
rasterized ground truth tensors.
pred_tensors_dict (dict): Maps fro [target_class_name][objective_name] to dnn
prediction tensors.
loss_masks (nested dict): [target_class_name][objective_name]. The leaf values are the
corresponding loss masks (tf.Tensor) for a batch of frames.
Returns:
total_cost: Scalar cost.
"""
# Compute per target class per objective costs.
component_costs = self.objective_set.compute_component_costs(ground_truth_tensors_dict,
pred_tensors_dict,
target_classes, loss_masks)
# Use external cost_combiner_func to compute total cost.
return cost_combiner_func(component_costs)
def build_training_graph(self, inputs, ground_truth_tensors, optimizer, target_classes,
cost_combiner_func, train_op_generator, loss_masks=None):
"""Build a training graph.
Args:
inputs: Dataset input tensors to be used for training.
ground_truth_tensors (dict): [target_class_name][objective_name] -> tf.Tensor in model
output space.
optimizer: Optimizer to be used for updating model weights.
target_classes (list): A list of TargetClass instances.
cost_combiner_func: A function that takes in a dictionary of objective costs,
and returns the total cost by computing a weighted sum of the objective costs.
train_op_generator: Object that creates TF op for one training step.
loss_masks (nested dict): [target_class_name][objective_name]. The leaf values are the
corresponding loss masks (tf.Tensor) for a batch of frames.
Raises:
AssertionError: If the model hasn't been constructed yet.
"""
assert self.constructed, "Construct the model before build_training_graph."
model_name = self.get_model_name()
inputs = Input(tensor=inputs, name="input_images")
predictions = self.keras_model(inputs)
# Build a training model that connects input tensors to model.
self.keras_training_model = keras.models.Model(inputs=inputs,
outputs=predictions,
name=model_name)
# Convert the network predictions to float32 for accuracte cost computation
predictions = self._convert_to_fp32(predictions)
output_dict = self.predictions_to_dict(predictions)
pred_dict = get_class_predictions(output_dict, self.target_class_names)
# Compute task cost.
task_cost = self._cost_func(target_classes, cost_combiner_func, ground_truth_tensors,
pred_dict, loss_masks)
tf.summary.scalar('task_cost', task_cost)
# Compute regularization cost.
regularization_cost = tf.reduce_sum(self.keras_training_model.losses)
tf.summary.scalar('regularization_cost', regularization_cost)
# Compute total cost.
self.total_cost = task_cost + regularization_cost
tf.summary.scalar('total_cost', self.total_cost)
# Create training op and apply dynamic cost scaling if enabled in spec.
self.train_op = train_op_generator.get_train_op(optimizer=optimizer,
total_cost=self.total_cost,
var_list=self.keras_model.trainable_weights)
if Visualizer.enabled:
# Set histogram plot collection.
histogram_collections = [nvidia_tao_tf1.core.hooks.utils.INFREQUENT_SUMMARY_KEY]
# Add weight histogram to tf summary.
Visualizer.keras_model_weight_histogram(
self.keras_training_model,
collections=histogram_collections
)
@staticmethod
def _convert_to_fp32(tensor_list):
"""Convert a list of TF tensors to float32 TF tensors.
Args:
tensor_list: A list of TF tensors of any numeric data type.
Returns:
A list of float32 TF tensors.
"""
# Cast operation must maintain the name of the input tensor
return [tf.cast(tensor, dtype=tf.float32, name=tensor.name.split(':')[0] + '/cast_to_32')
for tensor in tensor_list]
def get_total_cost(self):
"""Return total cost."""
return self.total_cost
def get_train_op(self):
"""Return train op."""
return self.train_op
def get_keras_training_model(self):
"""Return Keras training model."""
return self.keras_training_model
def get_ground_truth_labels(self, ground_truth_labels):
"""Get ground truth labels.
For the base GridboxModel class, this is a pass-through.
Args:
ground_truth_labels (list): Each element is a dict of target features.
Returns:
ground_truth_labels (list): Unchanged.
"""
return ground_truth_labels
def generate_ground_truth_tensors(self, bbox_rasterizer, batch_labels):
"""Generate ground truth tensors.
Args:
bbox_rasterizer (BboxRasterizer): Instance of the BboxRasterizer class that will handle
label-to-rasterizer-arg translation and provide the target_gradient() methods with
the necessary inputs, as well as perform the final call to the SDK's rasterizer.
batch_labels (list): Each element is a dict of target features (each a tf.Tensor).
Returns:
target_tensors (dict): [target_class_name][objective_name] rasterizer ground truth
tensor.
"""
target_tensors = \
self.objective_set.generate_ground_truth_tensors(bbox_rasterizer, batch_labels)
return target_tensors
@inference_learning_phase
def build_inference_graph(self, inputs):
"""Set up the model for pure inference.
Args:
inputs: Input tensors of shape (N, 3, H, W). Can come from keras.layers.Input or be
some tf.Tensor / placeholder.
Returns:
raw_predictions: pure output from the keras model. This is a list of output tensors for
each objective.
class_predictions: (dict) [target_class_name][output_name] = tensor of shape
[N, obj_depth, output_height, output_width]
"""
assert self.constructed, "Construct the model before build_inference_graph."
raw_predictions = self.keras_model(inputs)
# Convert the network predictions to float32 for accuracte cost computation
raw_predictions = self._convert_to_fp32(raw_predictions)
output_dict = self.predictions_to_dict(raw_predictions)
raw_predictions = get_class_predictions(output_dict, self.target_class_names)
input_space_predictions = {}
# Convert predictions to input image space (e.g. to absolute bbox coordinates)
absolute_predictions = self.objective_set.predictions_to_absolute(output_dict)
input_space_predictions = self.objective_set.transform_predictions(absolute_predictions)
# Get the predictions per class.
input_space_predictions = self.get_class_predictions(input_space_predictions)
# Return inference outputs
return raw_predictions, input_space_predictions
def get_class_predictions(self, predictions):
"""Converting predictions dictionary to be indexed by class names.
Args:
predictions (dict): Dictionary of model predictions indexed by objective name.
Returns:
pred_dict: Dictionary of model predictions indexed by
target class name and objective name.
"""
pred_dict = get_class_predictions(predictions, self.target_class_names)
return pred_dict
@inference_learning_phase
def build_validation_graph(self, inputs, ground_truth_tensors, target_classes,
cost_combiner_func, loss_masks=None):
"""Set up the model for validation.
Args:
inputs: Dataset input tensors to be used for validation.
ground_truth_tensors (dict): [target_class_name][objective_name] -> tf.Tensor.
target_classes (list): A list of TargetClass instances.
cost_combiner_func: A function that takes in a dictionary of objective costs,
and returns the total cost by computing a weighted sum of the objective costs.
loss_masks (nested dict): [target_class_name][objective_name]. The leaf values are the
corresponding loss masks (tf.Tensor) for a batch of frames.
Raises:
AssertionError: If the model hasn't been constructed yet.
"""
assert self.constructed, "Construct the model before build_validation_graph."
class_predictions, self.validation_predictions = \
self.build_inference_graph(inputs)
# Compute validation cost using model raw predictions.
# Disable visualization during validation cost computation to avoid Tensorboard clutter.
with Visualizer.disable():
self.validation_cost = self._cost_func(target_classes, cost_combiner_func,
ground_truth_tensors,
class_predictions, loss_masks)
def get_validation_tensors(self):
"""Get a list of tensors for validating/evaluating the model."""
return [self.validation_predictions, self.validation_cost]
def get_model_weights(self):
"""Return model weights as numpy arrays."""
return self.keras_model.get_weights()
@property
def num_params(self):
"""Get the number of parameters in the keras model."""
if not self.constructed:
raise RuntimeError(
"Model parameter count cannot be derived unless the"
"GridBox Model class sets self.constructed to True"
)
return get_num_params(self.keras_model)
def set_model_weights(self, weights):
"""Set model weights from numpy arrays.
Args:
weights: Model weights as numpy arrays.
"""
return self.keras_model.set_weights(weights)
def get_target_class_names(self):
"""Return a list of model target classes."""
return self.target_class_names
@staticmethod
def get_session_config():
"""Retrieve a TensorFlow session config.
Returns:
config (tf.compat.v1.ConfigProto): Retrive tensorflow config
with GPU options set.
"""
gpu_options = tf.compat.v1.GPUOptions(
allow_growth=True
)
config = tf.compat.v1.ConfigProto(gpu_options=gpu_options)
return config
def visualize_predictions(self):
"""Visualize bboxes predicted by the model."""
if Visualizer.enabled is False or Visualizer.num_images <= 0:
return
# Compute the number of images to visualize as the minimum of the user
# parameter and the actual minibatch size.
batch_size = self.keras_training_model.inputs[0].shape[0]
batch_size = min(Visualizer.num_images, batch_size)
# We're visualizing only a part of the minibatch.
inputs = self.keras_training_model.inputs[0][0:batch_size, :3]
raw_outputs = self.keras_training_model.outputs
# For visualization, float32 input and output is required
inputs = tf.cast(inputs, dtype=tf.float32)
raw_outputs = self._convert_to_fp32(raw_outputs)
predictions = self.predictions_to_dict(raw_outputs)
predictions = {output_name: tensor[0:batch_size] for output_name, tensor in
predictions.items()}
abs_predictions = self.objective_set.predictions_to_absolute(predictions)
Visualizer.visualize_elliptical_bboxes(self.target_class_names, inputs,
abs_predictions['cov'],
abs_predictions['bbox'])
Visualizer.visualize_rectangular_bboxes(self.target_class_names, inputs,
abs_predictions['cov'],
abs_predictions['bbox'])
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/model/detectnet_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build a DetectNet V2 model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nvidia_tao_tf1.cv.detectnet_v2.model.detectnet_model import GridboxModel
from nvidia_tao_tf1.cv.detectnet_v2.model.tensorrt_detectnet_model import TensorRTGridboxModel
from nvidia_tao_tf1.cv.detectnet_v2.proto.model_config_pb2 import ModelConfig
def select_model_proto(experiment_spec):
"""Select the model proto depending on type defined in the spec.
Args:
experiment_spec: nvidia_tao_tf1.cv.detectnet_v2.proto.experiment proto message.
Returns:
model_proto (ModelConfig):
Raises:
ValueError: If model_config_type is not valid.
"""
return experiment_spec.model_config
def get_base_model_config(experiment_spec):
"""Get the model config from the experiment spec.
Args:
experiment_spec: nvidia_tao_tf1.cv.detectnet_v2.proto.experiment proto message.
Returns:
model_config (ModelConfig): Model configuration proto.
Raises:
ValueError: If model config proto of the given experiment spec is of unknown type.
"""
model_config = select_model_proto(experiment_spec)
if isinstance(model_config, ModelConfig):
return model_config
raise ValueError("Model config is of unknown type.")
def build_model(m_config, target_class_names, enable_qat=False, framework="tlt"):
"""Build a DetectNet V2 model.
The model is a GridboxModel or a TensorRTGridboxModel instance.
Arguments:
m_config (ModelConfig): Model configuration proto.
target_class_names (list): A list of target class names.
enable_qat (bool): Flag to enable tlt model to qat model conversion.
framework (str): Model backend framework.
Choices: ["tlt", "tensorrt"]. Default "tlt".
Returns:
A DetectNet V2 model. By default, a GridboxModel instance with resnet feature extractor
is returned.
"""
# model_config.num_layers is checked during GridboxModel.construct_model. Only certain values
# are supported.
# Initial dictionary of the arguments for building the model.
model_constructor_arguments = dict()
assert isinstance(m_config, ModelConfig),\
"Unsupported model_proto message."
# Check sanity of the parameters.
if m_config.dropout_rate < 0.0 or m_config.dropout_rate > 1.0:
raise ValueError("ModelConfig.dropout_rate must be >= 0 and <= 1")
if target_class_names is None or not target_class_names:
raise ValueError("target_class_names must contain at least one class")
if m_config.freeze_pretrained_layers:
assert m_config.pretrained_model_file, \
"Freezing layers makes only sense if pretrained model is loaded."
if m_config.freeze_blocks:
assert m_config.pretrained_model_file, \
"Freeze blocks is only possible if a pretrained model file is provided."
assert framework in ["tlt", "tensorrt"], (
"Detectnet model only supports either tlt or tensorrt frameworks."
"Unsupported framework '{}' encountered.".format(framework)
)
# Common model building arguments for all model types.
args = {'num_layers': m_config.num_layers if m_config.num_layers else 18,
'use_pooling': m_config.use_pooling,
'use_batch_norm': m_config.use_batch_norm,
'dropout_rate': m_config.dropout_rate if m_config.dropout_rate else 0.0,
'objective_set_config': m_config.objective_set,
'activation_config': m_config.activation,
'target_class_names': target_class_names,
'freeze_pretrained_layers': m_config.freeze_pretrained_layers,
'freeze_blocks': m_config.freeze_blocks if m_config.freeze_blocks else None,
'freeze_bn': m_config.freeze_bn,
'allow_loaded_model_modification': m_config.allow_loaded_model_modification,
'all_projections': m_config.all_projections,
'enable_qat': enable_qat}
# Switch to default template if feature extractor template is missing.
if not m_config.arch:
pass
else:
args['template'] = m_config.arch
model_constructor_arguments.update(args)
# Defining model instance class.
model_class = GridboxModel
if framework == "tensorrt":
model_class = TensorRTGridboxModel
return model_class(**model_constructor_arguments)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/model/build_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for different DetectNet V2 models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import tempfile
from zipfile import BadZipFile, ZipFile
from keras import backend as K
import six
from nvidia_tao_tf1.cv.common.utils import decode_to_keras, load_keras_model
from nvidia_tao_tf1.encoding import encoding
logger = logging.getLogger(__name__)
def inference_learning_phase(fn):
"""Decorator that sets the learning phase to 0 temporarily before switching it back."""
def _fn_wrapper(*args, **kwargs):
old_learning_phase = K.learning_phase()
try:
K.set_learning_phase(0)
return fn(*args, **kwargs)
finally:
# After everything is done, restore old learning phase.
K.set_learning_phase(old_learning_phase)
return _fn_wrapper
def get_class_predictions(predictions, target_class_names):
"""Helper for converting predictions dictionary to be indexed by class names.
Args:
predictions (dict): Dictionary of model predictions indexed by objective name.
target_class_names (list): Model target class names as a list of strings.
Returns:
pred_dict: Dictionary of model predictions indexed by target class name and objective name.
"""
pred_dict = {}
num_target_classes = len(target_class_names)
for target_class_index, target_class_name in enumerate(target_class_names):
pred_dict[target_class_name] = {}
for output_name, pred in six.iteritems(predictions):
num_predicted_classes = int(pred.shape[1])
assert num_predicted_classes == num_target_classes, \
"Mismatch in the number of predicted (%d) and requested (%d) target_classes." % \
(num_predicted_classes, num_target_classes)
pred_dict[target_class_name][output_name] = pred[:, target_class_index]
return pred_dict
def get_pretrained_model_path(model_file):
"""Get pretrained model file name and check it exists.
If the supplied model file is not absolute it will be prepended with the
data root. The data root is set according to current path.
Args:
model_file (string): Name of the stored model file (.hdf5).
Returns:
Absolute path to the model file if the input model_file is not an
empty string. Else None.
Raises:
AssertionError if the model file does not exist.
"""
if model_file:
if not os.path.isabs(model_file):
model_file = os.path.join(os.getcwd(),
model_file)
assert os.path.isfile(model_file), "Pretrained model file not found: %s" % model_file
else:
model_file = None
return model_file
def model_io(model_path, enc_key=None):
"""Simple utility to handle model file based on file extensions.
Args:
pretrained_model_file (str): Path to the model file.
enc_key (str): Key to load tlt file.
Returns:
model (keras.models.Model): Loaded keras model.
"""
assert os.path.exists(model_path), "Pretrained model not found at {}".format(model_path)
if model_path.endswith('.tlt'):
assert enc_key is not None, "Key must be provided to load the model."
model = decode_to_keras(str(model_path), bytes(enc_key, 'utf-8'))
elif model_path.endswith('.hdf5'):
model = load_keras_model(str(model_path), compile=False)
else:
raise NotImplementedError("Invalid model file extension. {}".format(model_path))
return model
def extract_checkpoint_file(tmp_zip_file):
"""Simple function to extract a checkpoint file.
Args:
tmp_zip_file (str): Path to the extracted zip file.
Returns:
tmp_checkpoint_path (str): Path to the extracted checkpoint.
"""
# Set-up the temporary directory.
temp_checkpoint_path = tempfile.mkdtemp()
try:
with ZipFile(tmp_zip_file, 'r') as zip_object:
for member in zip_object.namelist():
zip_object.extract(member, path=temp_checkpoint_path)
except BadZipFile:
raise ValueError(
"The zipfile extracted was corrupt. Please check your key "
"or delete the latest `*.ckzip` and re-run the command."
)
except Exception:
raise IOError(
"The last checkpoint file is not saved properly. "
"Please delete it and rerun the script."
)
return temp_checkpoint_path
def get_tf_ckpt(ckzip_path, enc_key, latest_step):
"""Simple function to extract and get a trainable checkpoint.
Args:
ckzip_path (str): Path to the encrypted checkpoint.
Returns:
tf_ckpt_path (str): Path to the decrypted tf checkpoint
"""
os_handle, temp_zip_path = tempfile.mkstemp()
os.close(os_handle)
# Decrypt the checkpoint file.
try:
# Try reading a checkpoint file directly.
temp_checkpoint_path = extract_checkpoint_file(ckzip_path)
except ValueError:
# Decrypt and load checkpoints for TAO < 5.0
with open(ckzip_path, 'rb') as encoded_file, open(temp_zip_path, 'wb') as tmp_zip_file:
encoding.decode(encoded_file, tmp_zip_file, bytes(enc_key, 'utf-8'))
encoded_file.closed
tmp_zip_file.closed
# Load zip file and extract members to a tmp_directory.
temp_checkpoint_path = extract_checkpoint_file(temp_zip_path)
# Removing the temporary zip path.
os.remove(temp_zip_path)
return os.path.join(temp_checkpoint_path,
"model.ckpt-{}".format(latest_step))
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/model/utilities.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A wrapper for a TensorRT (TRT) DriveNet engine.
The engine can be a FP32, FP16, or a calibrated INT8 engine.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
import tensorflow as tf
from nvidia_tao_tf1.core.export import load_tensorrt_engine
from nvidia_tao_tf1.cv.detectnet_v2.model.detectnet_model import GridboxModel
from nvidia_tao_tf1.cv.detectnet_v2.objectives.objective_set import build_objective_set
def unravel_dimensions(dims):
"""Unravel dimensions to c,h,w."""
if len(dims) == 3:
height, width = (dims[1], dims[2])
elif len(dims) == 4 and dims[0] not in [-1, None]:
height, width = (dims[2], dims[3])
else:
raise NotImplementedError(
"Unhandled shape: {shape} for dimensions {dims}".format(
shape=len(dims), dims=dims)
)
return height, width
class TensorRTGridboxModel(GridboxModel):
"""A wrapper class for a TensorRT (TRT) DriveNet engine.
Provides interfaces for evaluation that match those of the GridboxModel class.
This allows the TRT engine to be used with the standard evaluation and inference scripts.
"""
def load_model_weights(self, model_file, **kwargs):
"""Load a TensorRT engine for inference.
Args:
model_file (str): TensorRT engine filename.
"""
self._engine = load_tensorrt_engine(model_file)
for binding in self._engine._engine:
if self._engine._engine.binding_is_input(binding):
input_dims = self._engine._engine.get_binding_shape(binding)
else:
output_dims = self._engine._engine.get_binding_shape(binding)
self.max_batch_size = self._engine._engine.max_batch_size
self.input_height, self.input_width = unravel_dimensions(input_dims)
self.output_height, self.output_width = unravel_dimensions(output_dims)
self.num_output_classes = len(self.target_class_names)
self.objective_set = build_objective_set(self.objective_set_config,
self.output_height,
self.output_width,
self.input_height,
self.input_width)
self.constructed = True
self._prediction_placeholders = None
self.ground_truth_placeholders = None
def load_model(self, *args, **kwargs):
"""Not implemented."""
raise NotImplementedError("Loading TensorRT engine with metadata is not implemented.")
def build_training_graph(self, *args, **kwargs):
"""Not implemented."""
raise NotImplementedError("Training a TensorRT engine is not implemented.")
@property
def num_params(self):
"""Get number of parameters from TensorRT evaluate."""
# TODO: @vpraveen Need to figure out how to get the number of
# params from a TensorRT engine if at all possible.
return 0
def _get_prediction_placeholders(self, inputs):
"""Create placeholders for the prediction tensors.
Args
inputs: Dataset input tensors to be used for validation.
Returns:
predictions (list): Each element is a tf.placeholder of the correct dtype and shape.
"""
predictions = []
batch_size = int(inputs.shape[0])
# Create a placeholder for each output of the TensorRT engine.
for objective in self.objective_set.learnable_objectives:
shape = [batch_size, self.num_output_classes, objective.num_channels,
self.output_height, self.output_width]
predictions.append(tf.compat.v1.placeholder(dtype=tf.float32, shape=shape,
name='output_'+objective.name))
return predictions
def build_validation_graph(self, inputs, ground_truth_tensors,
target_classes, cost_combiner_func):
"""Set up the TensorRT engine for validation.
Args:
inputs: Dataset input tensors to be used for validation.
ground_truth_tensors (dict): [target_class_name][objective_name] -> tf.Tensor.
target_classes (list): A list of TargetClass instances.
cost_combiner_func: A function that takes in a dictionary of objective costs,
and total cost by computing a weighted sum of the objective costs.
"""
# Predictions are done outside the TensorFlow graph using the TensorRT engine. For this
# reason, replace prediction and ground truth tensors with placeholders. They will be
# fed after doing inference using the engine. Otherwise, construct the validation graph
# normally using the GridboxModel code.
self._prediction_placeholders = self._get_prediction_placeholders(inputs)
# Replace model predictions with the prediction placeholders.
self.keras_model = lambda x: self._prediction_placeholders
# Build the validation graph using the code from GridboxModel.
super(TensorRTGridboxModel, self).build_validation_graph(
inputs,
ground_truth_tensors,
target_classes,
cost_combiner_func)
def print_model_summary(self):
"""Print a summary of the TensorRT engine."""
outputs = [o.name for o in self.objective_set.learnable_objectives]
print('Outputs of the TensorRT engine are:', outputs)
def prune(self, *args, **kwargs):
"""Not implemented."""
raise NotImplementedError("Pruning a TensorRT engine is not implemented.")
def _reshape_trt_predictions(self, predictions):
"""TRT flattens the prediction arrays, reshape to the correct shape.
Args:
predictions: Dictionary of numpy arrays containing TRT inference results. Keys are
output names of the Caffe/UFF model and values are the prediction numpy arrays.
Returns:
reshaped_predictions: A list of reshaped numpy arrays.
"""
reshaped_predictions = []
for objective in self.objective_set.learnable_objectives:
# Map TRT output names to output names used in the graph.
trt_output = objective.name.replace('output_', '')
prediction = next(value
for key, value in six.iteritems(predictions) if trt_output in key)
batch_size = np.shape(prediction)[0]
expected_shape = [batch_size, self.num_output_classes, objective.num_channels,
self.output_height, self.output_width]
reshaped_predictions.append(prediction.reshape(expected_shape))
return reshaped_predictions
def get_predictions_feed_dict(self, images):
"""Get a prediction tensors dictionary for validating/evaluating the model."""
# Get one batch of images and ground truths for validation.
predictions = self._engine.infer(images)
# Reshape TRT predictions to the shape expected by evaluation code.
predictions = self._reshape_trt_predictions(predictions)
# Match numeric predictions and ground truth labels with the corresponding tensors to
# run the validation graph.
predictions_feed_dict = dict()
for tensor, prediction in zip(self._prediction_placeholders, predictions):
predictions_feed_dict[tensor] = prediction
return predictions_feed_dict
@staticmethod
def get_session_config():
"""Retrieve a TensorFlow session config.
Returns:
config (tf.compat.v1.ConfigProto): Retrive tensorflow config
with GPU options set.
"""
gpu_options = tf.compat.v1.GPUOptions(
per_process_gpu_memory_fraction=0.33
)
config = tf.compat.v1.ConfigProto(
gpu_options=gpu_options,
device_count={'GPU': 0, 'CPU': 1}
)
return config
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/model/tensorrt_detectnet_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test DetectNet V2 model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import keras
from keras import backend as K
import numpy as np
import pytest
from six.moves import zip
import tensorflow as tf
from nvidia_tao_tf1.cv.detectnet_v2.cost_function.cost_function_parameters import (
get_target_class_names
)
from nvidia_tao_tf1.cv.detectnet_v2.model.build_model import build_model
from nvidia_tao_tf1.cv.detectnet_v2.proto.model_config_pb2 import ModelConfig
from nvidia_tao_tf1.cv.detectnet_v2.spec_handler.spec_loader import load_experiment_spec
from nvidia_tao_tf1.cv.detectnet_v2.training.training_proto_utilities import (
build_optimizer,
build_regularizer,
build_train_op_generator
)
from nvidia_tao_tf1.cv.detectnet_v2.training.utilities import setup_keras_backend
from nvidia_tao_tf1.cv.detectnet_v2.visualization.visualizer import \
DetectNetTBVisualizer as Visualizer
update_regularizers_test_cases = [
# (regularization_type, regularization_weight)
("L1", 0.25),
("L2", 0.5),
("NO_REG", 0.99)]
def check_layer_weights(test_layer, ref_layer):
"""Helper for checking the weights of two layers match.
Args:
test_layer, ref_layer (Keras.Layer): Layers to be checked.
Raises:
AssertionError if the layers weights are not the same.
"""
test_weights = test_layer.get_weights()
ref_weights = ref_layer.get_weights()
assert len(test_weights) == len(ref_weights), \
"Wrong length of loaded model weights at layer %s." % test_layer
for test_weight, ref_weight in zip(test_weights, ref_weights):
np.testing.assert_almost_equal(test_weight, ref_weight)
class TestGridbox:
"""Test DetectNet V2 model methods, e.g. loading and updating regularizers."""
@pytest.fixture(scope='class')
def model_file_path(self):
return 'dummy_model.hdf5'
@pytest.fixture(scope='class', params=[
ModelConfig.TrainingPrecision.FLOAT16,
ModelConfig.TrainingPrecision.FLOAT32])
def experiment_spec(self, request):
spec_proto = load_experiment_spec()
spec_proto.model_config.num_layers = 10
spec_proto.model_config.training_precision.backend_floatx = request.param
return spec_proto
@pytest.fixture(scope='class')
def gridbox_model(self, experiment_spec):
K.clear_session()
# Prepare model initialization parameters.
width = 224
height = 224
shape = (3, width, height)
# Setup the backend with correct computation precision
setup_keras_backend(experiment_spec.model_config.training_precision, is_training=True)
# Set up regularization.
kernel_regularizer, bias_regularizer = build_regularizer(
experiment_spec.training_config.regularizer)
# Construct a gridbox_model.
target_class_names = get_target_class_names(experiment_spec.cost_function_config)
gridbox_model = build_model(experiment_spec.model_config, target_class_names)
gridbox_model.construct_model(shape, kernel_regularizer, bias_regularizer,
pretrained_weights_file=None)
return gridbox_model
def test_load_model_weights(self, gridbox_model, experiment_spec, model_file_path):
"""Test loading weights of a saved DetectNet V2 model."""
gridbox_model.keras_model.save(model_file_path)
# Load the gridbox_model from file.
target_class_names = get_target_class_names(experiment_spec.cost_function_config)
gridbox_model_reloaded = build_model(experiment_spec.model_config, target_class_names)
gridbox_model_reloaded.load_model_weights(model_file_path)
# Give gridbox_model a new name for readability.
gridbox_model_expected = gridbox_model
assert gridbox_model_expected.num_layers == gridbox_model_reloaded.num_layers
assert gridbox_model_expected.input_height == gridbox_model_reloaded.input_height
assert gridbox_model_expected.input_width == gridbox_model_reloaded.input_width
assert gridbox_model_expected.input_num_channels == \
gridbox_model_reloaded.input_num_channels
assert gridbox_model_expected.output_height == gridbox_model_reloaded.output_height
assert gridbox_model_expected.output_width == gridbox_model_reloaded.output_width
# Test the loaded models layers weights match the original ones.
assert len(gridbox_model_expected.keras_model.layers) \
== len(gridbox_model_reloaded.keras_model.layers)
for ref_layer in gridbox_model_expected.keras_model.layers:
test_layer = gridbox_model_reloaded.keras_model.get_layer(name=ref_layer.name)
check_layer_weights(test_layer, ref_layer)
# Remove the temporary file.
os.remove(model_file_path)
def test_update_regularizer(self, gridbox_model, experiment_spec):
"""Test update regularizer function to override the previous model regs."""
previous_model = gridbox_model.keras_model
regularizer_config = experiment_spec.training_config.regularizer
kernel_regularizer, bias_regularizer = build_regularizer(regularizer_config)
gridbox_model.update_regularizers(kernel_regularizer, bias_regularizer)
model_config = gridbox_model.keras_model.get_config()
for r_layer, l_config in zip(gridbox_model.keras_model.layers, model_config['layers']):
if r_layer in [keras.layers.convolutional.Conv2D,
keras.layers.core.Dense,
keras.layers.DepthwiseConv2D]:
if hasattr(r_layer, 'kernel_regularizer'):
assert l_config['config']['kernel_regularizer'] == kernel_regularizer
if hasattr(r_layer, 'bias_regularizer'):
assert l_config['config']['bias_regularizer'] == bias_regularizer
test_layer = previous_model.get_layer(name=r_layer.name)
check_layer_weights(test_layer, r_layer)
def test_model_regularizers(self, gridbox_model, experiment_spec):
"""Test that the regularizers specified in spec are present in the DetectNet V2 model."""
# Set up regularization.
kernel_regularizer, bias_regularizer = build_regularizer(
experiment_spec.training_config.regularizer)
for layer in gridbox_model.keras_model.layers:
if 'kernel_regularizer' in layer.get_config():
assert layer.kernel_regularizer.l1 == kernel_regularizer.l1
assert layer.kernel_regularizer.l2 == kernel_regularizer.l2
if 'bias_regularizer' in layer.get_config():
assert layer.bias_regularizer.l1 == bias_regularizer.l1
assert layer.bias_regularizer.l2 == bias_regularizer.l2
def test_build_training_graph(self, gridbox_model, experiment_spec, model_file_path, mocker):
"""Test building training graph."""
# Set up optimizer.
optimizer = build_optimizer(experiment_spec.training_config.optimizer, 1.0)
train_op_generator = build_train_op_generator(experiment_spec.training_config.cost_scaling)
# Build the Visualizer.
visualizer_config = experiment_spec.training_config.visualizer
visualizer_config.enabled = True
Visualizer.build_from_config(visualizer_config)
# Build training graph.
K.set_learning_phase(0)
dummy_input = tf.zeros((1, 3, gridbox_model.input_height, gridbox_model.input_width),
dtype=K.floatx())
# Mock gradient computation to be able to check how it was called.
wrapped = optimizer.compute_gradients
compute_gradients_mock = mocker.patch.object(tf.train.AdamOptimizer, 'compute_gradients',
wraps=wrapped)
# Set one layer as non-trainable to check optimizer behavior.
gridbox_model.keras_model.layers[1].trainable = False
gridbox_model.build_training_graph(inputs=dummy_input, ground_truth_tensors=None,
optimizer=optimizer, target_classes=[],
cost_combiner_func=lambda *args: 0.0,
train_op_generator=train_op_generator)
# Check that optimizer is called with correct variables.
var_list = []
for call_arg in compute_gradients_mock.call_args:
if 'var_list' in call_arg:
var_list = call_arg['var_list']
for layer in gridbox_model.keras_model.layers:
for weight in layer.weights:
# All trainable layers params should be mentioned, except for BN moving averages.
if layer.trainable and 'moving_' not in weight.name:
assert weight in var_list, "Trainable weight was not passed to optimizer."
else:
assert weight not in var_list, "Non-trainable weight was passed to optimizer."
def test_objective_names(self, gridbox_model):
"""Test objective_names."""
assert gridbox_model.objective_names == set(['bbox', 'cov'])
def test_output_layer_names(self, gridbox_model):
"""Test output_layer_names."""
assert gridbox_model.output_layer_names == ['output_bbox', 'output_cov']
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/model/tests/test_gridbox_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""File containing constants for the spec handling."""
from nvidia_tao_tf1.cv.common.spec_validator import ValueChecker
TRAINVAL_OPTIONAL_CHECK_DICT = {
"max_objective_weight": ValueChecker(">=", 0.0),
"min_objective_weight": ValueChecker(">=", 0.0),
"checkpoint_interval": ValueChecker(">", 0.0),
"num_images": ValueChecker(">", 0),
"scales": ValueChecker("!=", ""),
"steps": ValueChecker("!=", ""),
"offsets": ValueChecker("!=", "")
}
TRAINVAL_VALUE_CHECK_DICT = {
# model config parameters.
"arch": [ValueChecker("!=", ""),
ValueChecker("in", ["resnet",
"vgg",
"darknet",
"mobilenet_v1",
"mobilenet_v2",
"squeezenet",
"googlenet",
"efficientnet_b0"])],
"num_layers": [ValueChecker(">=", 0)],
"scale": [ValueChecker(">", 0)],
"offset": [ValueChecker(">=", 0)],
# bbox rasterizer parameters.
"cov_center_x": [ValueChecker(">=", 0), ValueChecker("<=", 1.0)],
"cov_center_y": [ValueChecker(">=", 0), ValueChecker("<=", 1.0)],
"cov_radius_x": [ValueChecker(">=", 0), ValueChecker("<=", 1.0)],
"cov_radius_y": [ValueChecker(">=", 0), ValueChecker("<=", 1.0)],
"bbox_min_radius": [ValueChecker(">=", 0), ValueChecker("<=", 1.0)],
"deadzone_radius": [ValueChecker(">=", 0), ValueChecker("<=", 1.0)],
# Training config.
"batch_size_per_gpu": [ValueChecker(">", 0)],
"num_epochs": [ValueChecker(">", 0)],
"min_learning_rate": [ValueChecker(">", 0)],
"max_learning_rate": [ValueChecker(">", 0)],
"soft_start": [ValueChecker(">=", 0), ValueChecker("<", 1.0)],
"annealing": [ValueChecker(">=", 0), ValueChecker("<", 1.0)],
# evaluation config parameters.
"validation_period_during_training": [ValueChecker(">", 0)],
"first_validation_epoch": [ValueChecker(">", 0)],
"minimum_height": [ValueChecker(">=", 0)],
"minimum_width": [ValueChecker(">=", 0)],
"maximum_height": [ValueChecker(">", 0)],
"maximum_width": [ValueChecker(">", 0)],
"batch_size": [ValueChecker(">", 0)],
# regularizer
"weight": [ValueChecker(">=", 0.0)],
# Postprocessing config.
"coverage_threshold": [ValueChecker(">=", 0.), ValueChecker("<=", 1.0)],
"minimum_bounding_box_height": [ValueChecker(">=", 0.)],
"confidence_threshold": [ValueChecker(">=", 0),
ValueChecker("<=", 1.0)],
"nms_iou_threshold": [ValueChecker(">=", 0), ValueChecker("<=", 1.0)],
"nms_confidence_threshold": [ValueChecker(">=", 0), ValueChecker("<=", 1.0)],
"dbscan_eps": [ValueChecker(">=", 0), ValueChecker("<=", 1.0)],
"dbscan_min_samples": [ValueChecker(">=", 0), ValueChecker("<=", 1.0)],
"neighborhood_size": [ValueChecker(">=", 0), ValueChecker("<=", 1.0)],
"dbscan_confidence_threshold": [ValueChecker(">=", 0), ValueChecker("<=", 1.0)],
# augmentation_config
"min_bbox_width": [ValueChecker(">=", 0.0)],
"min_bbox_height": [ValueChecker(">=", 0.0)],
"output_image_width": [ValueChecker(">", 0), ValueChecker("%", 16)],
"output_image_height": [ValueChecker(">", 0), ValueChecker("%", 16)],
"output_channel": [ValueChecker("in", [1, 3])],
# spatial augmentation config
"hflip_probability": [ValueChecker(">=", 0), ValueChecker("<=", 1.0)],
"vflip_probability": [ValueChecker(">=", 0), ValueChecker("<=", 1.0)],
"zoom_min": [ValueChecker(">=", 0)],
"zoom_max": [ValueChecker(">=", 0)],
"translate_max_x": [ValueChecker(">=", 0)],
"translate_max_y": [ValueChecker(">=", 0)],
# color augmentation parameters
"color_shift_stddev": [ValueChecker(">=", 0.0), ValueChecker("<=", 1.0)],
"hue_rotation_max": [ValueChecker(">=", 0)],
"saturation_shift_max": [ValueChecker(">=", 0), ValueChecker("<=", 1.0)],
"contrast_scale_max": [ValueChecker(">=", 0), ValueChecker("<=", 1.0)],
"contrast_center": [ValueChecker(">=", 0), ValueChecker("<=", 1.0)],
"tfrecords_path": [ValueChecker("!=", "")],
"image_directory_path": [ValueChecker("!=", "")],
# cost scaling config,
"initial_exponent": [ValueChecker(">", 0.0)],
"increment": [ValueChecker(">", 0.0)],
"decrement": [ValueChecker(">", 0.0)],
# optimizer config
"epsilon": [ValueChecker(">", 0.0)],
"beta1": [ValueChecker(">", 0.0)],
"beta2": [ValueChecker(">", 0.0)],
# Cost function config.
"name": [ValueChecker("!=", "")],
"class_weight": [ValueChecker(">=", 0.0)],
"coverage_foreground_weight": [ValueChecker(">=", 0.0)],
"initial_weight": [ValueChecker(">=", 0.0)],
"weight_target": [ValueChecker(">=", 0.0)],
}
TRAINVAL_EXP_REQUIRED_MSG = ["model_config", "training_config", "evaluation_config",
"cost_function_config", "augmentation_config",
"bbox_rasterizer_config", "postprocessing_config",
"dataset_config"]
EVALUATION_EXP_REQUIRED_MSG = ["model_config", "training_config", "evaluation_config",
"augmentation_config", "postprocessing_config", "dataset_config",
"cost_function_config", "bbox_rasterizer_config"]
INFERENCE_EXP_REQUIRED_MSG = ["inferencer_config", "bbox_handler_config"]
INFERENCE_REQUIRED_MSG_DICT = {
"inferencer_config": [
"model_config_type", "batch_size",
"image_height", "image_width", "image_channels",
"target_classes"
],
"tlt_config": ["model"],
"calibrator_config": [
"calibration_cache"
],
"bbox_handler_config": [
"classwise_bbox_handler_config",
"confidence_model",
"output_map",
"bbox_color",
"clustering_config"
]
}
TRAINVAL_REQUIRED_MSG_DICT = {
# Required parameter for augmentation config.
"augmentation_config": ["preprocessing"],
# Required parameter for bbox rasterizer config.
"bbox_rasterizer_config": [
"target_class_config",
"dead_zone_radius"
],
# Required parameters for the target_class_config.
"target_class_config": [
"cov_center_x", "cov_center_y",
"cov_radius_x", "cov_radius_y",
"bbox_min_radius"
],
# Required parameter of the training_config.
"training_config": [
"num_epochs",
"learning_rate",
"regularizer",
"optimizer",
"cost_scaling"
],
"optimizer": ["adam"],
"adam": ["epsilon", "beta", "gamma"],
"cost_scaling": ["initial_exponent", "increment, decrement"],
# Required parameters for the evaluation config.
"evaluation_config": ["minimum_detection_ground_truth_overlap",
"evaluation_box_config"],
# Required parameters for the cost_function_config.
"cost_function_config": ["target_classes"],
# Required parameters for the cost_function_config, target_classes
"target_classes": [
"name",
"class_weights",
"coverage_foreground_weight",
"objectives"
],
"objectives": [
"name"
],
"postprocessing_config": ["target_class_config"],
"clustering_config": [
"coverage_threshold"
],
"soft_start_annealing_schedule": [
"min_learning_rate",
"max_learning_rate",
"soft_start",
"annealing"
],
"early_stopping_annealing_schedule": [
"min_learning_rate",
"max_learning_rate",
"soft_start_epochs",
"annealing_epochs",
"patience_steps"
],
"dataset_config": ["data_sources", "target_class_mapping"],
"data_sources": ["tfrecords_path", "image_directory_path"],
# model_config
"model_config": ["objective_set", "arch"],
"objective_set": ["bbox", "cov"],
"bbox": ["scale", "offset"],
}
INFERENCE_VALUE_CHECK_DICT = {
# inferencer config
"batch_size": [ValueChecker(">", 0)],
"image_height": [ValueChecker(">", 0)],
"image_width": [ValueChecker(">", 0)],
"image_channels": [ValueChecker("in", [1, 3])],
# calibrator_config
"calibration_cache": [ValueChecker("!=", "")],
"coverage_threshold": [ValueChecker(">=", 0.), ValueChecker("<=", 1.0)],
"minimum_bounding_box_height": [ValueChecker(">=", 0.)],
}
INFERENCE_OPTIONAL_CHECK_DICT = {
"calibration_tensorfile": [ValueChecker("!=", "")],
"n_batches": [ValueChecker(">", 0)],
"etlt_model": [ValueChecker("!=", "")],
"caffemodel": [ValueChecker("!=", "")],
"prototxt": [ValueChecker("!=", "")],
"uff_model": [ValueChecker("!=", "")],
"trt_engine": [ValueChecker("!=", "")],
"nms_iou_threshold": [ValueChecker(">=", 0), ValueChecker("<=", 1.0)],
"nms_confidence_threshold": [ValueChecker(">=", 0), ValueChecker("<=", 1.0)],
"dbscan_eps": [ValueChecker(">=", 0), ValueChecker("<=", 1.0)],
"dbscan_min_samples": [ValueChecker(">=", 0), ValueChecker("<=", 1.0)],
"neighborhood_size": [ValueChecker(">=", 0), ValueChecker("<=", 1.0)],
"dbscan_confidence_threshold": [ValueChecker(">=", 0), ValueChecker("<=", 1.0)],
}
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/spec_handler/constants.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Folder defining spec handling module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/spec_handler/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Load an experiment spec file to run GridBox training, evaluation, pruning."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import sys
from google.protobuf.text_format import Merge as merge_text_proto
from nvidia_tao_tf1.cv.common.spec_validator import SpecValidator
import nvidia_tao_tf1.cv.detectnet_v2.proto.experiment_pb2 as experiment_pb2
import nvidia_tao_tf1.cv.detectnet_v2.proto.inference_pb2 as inference_pb2
from nvidia_tao_tf1.cv.detectnet_v2.spec_handler.constants import (
INFERENCE_EXP_REQUIRED_MSG,
INFERENCE_OPTIONAL_CHECK_DICT,
INFERENCE_REQUIRED_MSG_DICT,
INFERENCE_VALUE_CHECK_DICT,
TRAINVAL_EXP_REQUIRED_MSG,
TRAINVAL_OPTIONAL_CHECK_DICT,
TRAINVAL_REQUIRED_MSG_DICT,
TRAINVAL_VALUE_CHECK_DICT
)
logger = logging.getLogger(__name__)
VALIDATION_SCHEMA = {
"train_val": {
"required_msg_dict": TRAINVAL_REQUIRED_MSG_DICT,
"value_checker_dict": TRAINVAL_VALUE_CHECK_DICT,
"required_msg": TRAINVAL_EXP_REQUIRED_MSG,
"optional_check_dict": TRAINVAL_OPTIONAL_CHECK_DICT,
"proto": experiment_pb2.Experiment(),
"default_spec": "experiment_specs/default_spec.txt"
},
"inference": {
"required_msg_dict": INFERENCE_REQUIRED_MSG_DICT,
"value_checker_dict": INFERENCE_VALUE_CHECK_DICT,
"required_msg": INFERENCE_EXP_REQUIRED_MSG,
"optional_check_dict": INFERENCE_OPTIONAL_CHECK_DICT,
"proto": inference_pb2.Inference(),
"default_spec": "experiment_specs/inferencer_spec_etlt.prototxt"
}
}
def validate_spec(spec, validation_schema="train_val"):
"""Validate the loaded experiment spec file."""
assert validation_schema in list(VALIDATION_SCHEMA.keys()), (
"Invalidation specification file schema: {}".format(validation_schema)
)
schema = VALIDATION_SCHEMA[validation_schema]
if schema["required_msg"] is None:
schema["required_msg"] = []
spec_validator = SpecValidator(required_msg_dict=schema["required_msg_dict"],
value_checker_dict=schema["value_checker_dict"])
try:
spec_validator.validate(spec, schema["required_msg"])
except AssertionError as e:
logger.info(
"Spec file validation failed.\n{}".format(e)
)
sys.exit(1)
def load_proto(spec_path, proto_buffer, default_spec_path=None, merge_from_default=True):
"""Load spec from file and merge with given proto_buffer instance.
Args:
spec_path (str): location of a file containing the custom spec proto.
proto_buffer(pb2): protocal buffer instance to be loaded.
default_spec_path(str): location of default spec to use if merge_from_default is True.
merge_from_default (bool): disable default spec, if False, spec_path must be set.
Returns:
proto_buffer(pb2): protocol buffer instance updated with spec.
"""
def _load_from_file(filename, pb2):
if not os.path.exists(filename):
raise IOError("Specfile not found at: {}".format(filename))
with open(filename, "r") as f:
merge_text_proto(f.read(), pb2)
# Setting this flag false prevents concatenating repeated-fields
if merge_from_default:
assert default_spec_path, \
"default spec path has to be defined if merge_from_default is enabled"
# Load the default spec
_load_from_file(default_spec_path, proto_buffer)
else:
assert spec_path, "spec_path has to be defined, if merge_from_default is disabled"
# Merge a custom proto on top of the default spec, if given
if spec_path:
logger.info("Merging specification from %s", spec_path)
_load_from_file(spec_path, proto_buffer)
return proto_buffer
def load_experiment_spec(spec_path=None, merge_from_default=True, validation_schema="train_val"):
"""Load experiment spec from a .txt file and return an experiment_pb2.Experiment object.
Args:
spec_path (str): location of a file containing the custom experiment spec proto.
dataset_export_spec_paths (list of str): paths to the dataset export specs.
merge_from_default (bool): disable default spec, if False, spec_path must be set.
Returns:
experiment_spec: protocol buffer instance of type experiment_pb2.Experiment.
"""
experiment_spec = VALIDATION_SCHEMA[validation_schema]["proto"]
file_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
default_spec_path = os.path.join(
file_path,
VALIDATION_SCHEMA[validation_schema]["default_spec"]
)
experiment_spec = load_proto(spec_path, experiment_spec, default_spec_path,
merge_from_default)
validate_spec(experiment_spec, validation_schema=validation_schema)
return experiment_spec
def load_inference_spec(spec_path=None, merge_from_default=True):
"""Simple function to load an inference spec file.
Args:
spec_path (str): Path to the inference spec file.
Returns:
inference_spec: protocol buffer instance of type inference_pb2.Inference()
"""
inference_spec = inference_pb2.Inference()
file_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
default_spec_path = os.path.join(file_path, 'experiment_spec/inferencer_spec_etlt.prototxt')
inference_spec = load_proto(spec_path, inference_spec, default_spec_path,
merge_from_default)
validate_spec(inference_spec, validation_schema="inference")
return inference_spec
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/spec_handler/spec_loader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test detectnet_v2 spec file loader and validator."""
import os
from google.protobuf.text_format import ParseError
import pytest
from nvidia_tao_tf1.cv.detectnet_v2.spec_handler.spec_loader import load_experiment_spec
detectnet_root = os.path.dirname(
os.path.dirname(
os.path.dirname(os.path.realpath(__file__))
)
)
gt_training_spec = os.path.join(
detectnet_root, "experiment_specs/default_spec.txt"
)
gt_inference_spec = os.path.join(
detectnet_root, "experiment_specs/inferencer_spec_etlt.prototxt"
)
topologies = [
("train_val", gt_training_spec),
("inference", gt_inference_spec),
("train_val", gt_inference_spec),
("inference", gt_training_spec),
]
class TestDetectnetSpecloader():
"""Simple class to test the specification file loader."""
@pytest.mark.parametrize(
"schema_validation, spec_file_path", # noqa: E501
topologies
)
def test_spec_loader(self, schema_validation, spec_file_path):
"""Load and check if the spec file validator throws an error."""
error_raises = False
if schema_validation == "train_val":
if os.path.basename(spec_file_path) == "inferencer_spec_etlt.prototxt":
error_raises = True
elif schema_validation == "inference":
if os.path.basename(spec_file_path) == "default_spec.txt":
error_raises = True
if error_raises:
with pytest.raises((AssertionError, ParseError)):
load_experiment_spec(spec_path=spec_file_path,
merge_from_default=False,
validation_schema=schema_validation)
else:
load_experiment_spec(
spec_path=spec_file_path,
merge_from_default=False,
validation_schema=schema_validation)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/spec_handler/tests/test_spec_loader.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""EvaluationConfig class that holds evaluation parameters."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def _build_evaluation_box_config(evaluation_box_config_proto, target_classes):
"""Build EvaluationBoxConfig from proto.
Args:
evaluation_box_config_proto: evaluation_config.evaluation_box_config message.
Returns:
A dict of EvalutionBoxConfig instances indexed by target class names.
"""
evaluation_box_configs = {}
for key in target_classes:
# Check if key is present in the evaluation config.
if key not in evaluation_box_config_proto.keys():
raise ValueError("Evaluation box config is missing for {}".format(key))
config = evaluation_box_config_proto[key]
evaluation_box_configs[key] = EvaluationBoxConfig(config.minimum_height,
config.maximum_height,
config.minimum_width,
config.maximum_width)
return evaluation_box_configs
def build_evaluation_config(evaluation_config_proto, target_classes):
"""Build EvaluationConfig from proto.
Args:
evaluation_config_proto: evaluation_config message.
Returns:
EvaluationConfig object.
"""
# Get validation_period_during_training.
validation_period_during_training = evaluation_config_proto.validation_period_during_training
# Get first_validation_epoch.
first_validation_epoch = evaluation_config_proto.first_validation_epoch
# Create minimum_detection_ground_truth_overlap dict from evaluation_config_proto.
minimum_detection_ground_truth_overlaps = {}
for key in target_classes:
if key not in evaluation_config_proto.minimum_detection_ground_truth_overlap.keys():
raise ValueError("Cannot find a min overlap threshold for {}".format(key))
minimum_detection_ground_truth_overlaps[key] = evaluation_config_proto.\
minimum_detection_ground_truth_overlap[key]
# Build EvaluationBoxConfig from evaluation_config_proto.
evaluation_box_configs = \
_build_evaluation_box_config(
evaluation_config_proto.evaluation_box_config,
target_classes)
average_precision_mode = evaluation_config_proto.average_precision_mode
# Build EvaluationConfig object.
evaluation_config = EvaluationConfig(validation_period_during_training,
first_validation_epoch,
minimum_detection_ground_truth_overlaps,
evaluation_box_configs,
average_precision_mode)
return evaluation_config
class EvaluationBoxConfig(object):
"""Holds parameters for EvaluationBoxConfig."""
def __init__(self, minimum_height, maximum_height, minimum_width,
maximum_width):
"""Constructor.
Evaluation boc configs are used to filter detections based on object height, width.
Args:
minimum_height (int): Ground truths with height below this value are ignored.
maximum_height (int): Ground truths with height above this value are ignored.
minimum_width (int): Ground truths with width below this value are ignored.
maximum_width (int): Ground truths with width above this value are ignored.
"""
self.minimum_height = minimum_height
self.maximum_height = maximum_height
self.minimum_width = minimum_width
self.maximum_width = maximum_width
class EvaluationConfig(object):
"""Holds parameters for EvaluationConfig."""
def __init__(self, validation_period_during_training, first_validation_epoch,
minimum_detection_ground_truth_overlap, evaluation_box_configs,
average_precision_mode):
"""Constructor.
EvaluationConfig is a class definition for specifying parameters to
evaluate DriveNet detections against ground truth labels.
Allows the user to specify:
- Minimum overlap between a detection and ground truth to count as a true positive.
- Evaluation boxs that each specify e.g. minimum and maximum object height.
- Weights for computing weighted metrics (e.g. weighted AP).
Args:
validation_period_during_training (int): The frequency for model validation during
training (in epochs).
first_validation_epoch (int): The first validation epoch. After this, validation is done
on epochs first_validation_epoch + i*validation_period_during_training.
minimum_detection_ground_truth_overlap (dict): Minimum overlap of a ground truth and a
detection bbox to consider the detection to be a true positive.
evaluation_box_config (dict): dict in which keys are class names
values are EvaluationBoxConfig objects containing
parameters such as minimum and maximum bbox height.
"""
self.validation_period_during_training = validation_period_during_training
self.first_validation_epoch = first_validation_epoch
self.minimum_detection_ground_truth_overlap = minimum_detection_ground_truth_overlap
self.evaluation_box_configs = evaluation_box_configs
self.average_precision_mode = average_precision_mode
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/evaluation/evaluation_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A class to evaluate a DriveNet TensorRT engine."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from timeit import default_timer
import six
from six.moves import range as xrange
import tensorflow as tf
from nvidia_tao_tf1.blocks.multi_source_loader.types import Bbox2DLabel
from nvidia_tao_tf1.blocks.multi_source_loader.types import Coordinates2D
import nvidia_tao_tf1.core
from nvidia_tao_tf1.cv.detectnet_v2.evaluation.evaluation import Evaluator
Canvas2D = nvidia_tao_tf1.core.types.Canvas2D
logger = logging.getLogger(__name__)
class TensorRTEvaluator(Evaluator):
"""Class for running evaluation using a TensorRT (TRT) engine."""
def __init__(self,
postprocessing_config,
evaluation_config,
gridbox_model,
images,
ground_truth_labels,
steps,
confidence_models=None,
target_class_mapping=None,
sqlite_paths=None):
"""Constructor.
Args:
postprocessing_config (PostProcessingConfig): Object holding postprocessing parameters.
evaluation_config: evaluation_config_pb2.EvaluationConfig object
gridbox_model (GridboxModel): A GridboxModel instance.
images: Dataset input tensors to be used for validation.
ground_truth_labels (list): Each element is a dict of target features (each
a tf.Tensor).
steps (int): Number of minibatches to loop the validation dataset once.
confidence_models (dict): A dict of ConfidenceModel instances, indexed by
target class name. Can be None.
target_class_mapping (dict): Maps from source class to target class (both str). Defaults
to ``None``. If provided, forwards the information to dnn_metrics.
sqlite_paths (list): If provided, is expected to be a list of paths (str) to HumanLoop
sqlite exports. The reason this exists is to support the usecase where one has
access to a sqlite file that contains labels for both detection ('BOX') and
lanes ('POLYLINE').
"""
super(TensorRTEvaluator, self).__init__(postprocessing_config=postprocessing_config,
evaluation_config=evaluation_config,
gridbox_model=gridbox_model,
images=images,
ground_truth_labels=ground_truth_labels,
steps=steps,
confidence_models=confidence_models,
target_class_mapping=target_class_mapping,
sqlite_paths=sqlite_paths)
self._ground_truth_labels_placeholders = self._get_ground_truth_labels_placeholders()
@property
def keras_models(self):
"""Point to Keras Model objects with which to initialize the weights.
Since the underlying models for TensorRT are not Keras models, return None.
"""
return None
def _get_ground_truth_labels_placeholders(self):
"""Create a placeholder for each ground truth tensor.
Returns:
placeholders (list or Bbox2DLabel): If self._ground_truth_labels is a list: List of
dicts of tf.placeholders for ground truth labels. Each key in each dict is a
ground truth label feature.
If self._ground_truth_labels is a Bbox2DLabel: A Bbox2DLabel with placeholders.
"""
if isinstance(self._ground_truth_labels, list):
placeholders = []
for frame_labels in self._ground_truth_labels:
framewise_placeholders = dict()
for label_name, tensor in six.iteritems(frame_labels):
framewise_placeholders[label_name] = tf.compat.v1.placeholder(
tensor.dtype)
placeholders.append(framewise_placeholders)
elif isinstance(self._ground_truth_labels, Bbox2DLabel):
kwargs_bbox2dlabel = dict()
labels_as_dict = self._ground_truth_labels._asdict()
for field_name in Bbox2DLabel._fields:
field_value = labels_as_dict[field_name]
if field_name == 'vertices':
placeholder = Coordinates2D(
coordinates=tf.compat.v1.sparse.placeholder(
field_value.coordinates.values.dtype),
canvas_shape=Canvas2D(
height=tf.compat.v1.placeholder(
field_value.canvas_shape.height.dtype),
width=tf.compat.v1.placeholder(
field_value.canvas_shape.width.dtype)
))
elif isinstance(field_value, tf.SparseTensor):
placeholder = tf.compat.v1.sparse.placeholder(
field_value.values.dtype)
elif isinstance(field_value, tf.Tensor):
placeholder = tf.compat.v1.placeholder(field_value.dtype)
else:
raise TypeError("Unknown ground truth label field type")
kwargs_bbox2dlabel[field_name] = placeholder
placeholders = Bbox2DLabel(**kwargs_bbox2dlabel)
else:
raise TypeError("Unknown ground truth label type")
return placeholders
def _get_ground_truth_labels_feed_dict(self, ground_truth_labels):
"""Construct a feed dict for ground truth placeholders given a list of dicts of labels.
Returns:
feed_dict (dict): Keys are placeholders for the ground truth label features, values
are the corresponding feature values.
"""
feed_dict = dict()
if isinstance(ground_truth_labels, list):
for values, tensors in zip(ground_truth_labels, self._ground_truth_labels_placeholders):
for name, tensor in six.iteritems(tensors):
feed_dict[tensor] = values[name]
elif isinstance(ground_truth_labels, Bbox2DLabel):
label_dict = ground_truth_labels._asdict()
placeholders_dict = self._ground_truth_labels_placeholders._asdict()
for field_name in Bbox2DLabel._fields:
placeholder = placeholders_dict[field_name]
field_value = label_dict[field_name]
if field_name == 'vertices':
feed_dict[placeholder.coordinates] = field_value.coordinates
feed_dict[placeholder.canvas_shape.width] = field_value.canvas_shape.width
feed_dict[placeholder.canvas_shape.height] = field_value.canvas_shape.height
else:
feed_dict[placeholder] = field_value
else:
raise TypeError("Unknown ground truth label type")
return feed_dict
@property
def ground_truth_labels(self):
"""Return labels placeholders to be used for validation."""
return self._ground_truth_labels_placeholders
@staticmethod
def get_session_config():
"""Constrain TensorFlow to use CPU to avoid conflicting with TensorRT on the GPU.
Returns:
Tensorflow session config.
"""
gpu_options = tf.compat.v1.GPUOptions(
per_process_gpu_memory_fraction=0.33)
session_config = tf.compat.v1.ConfigProto(
gpu_options=gpu_options,
device_count={'GPU': 0, 'CPU': 1})
return session_config
def _get_validation_iterator(self, session, dataset_percentage=100.0):
"""Generator that yields batch predictions, labels, and cost.
Args:
session (tf.Session): Session to be used for evaluation.
dataset_percentage (float): % of the dataset to evaluate.
Returns:
predictions_batch: Raw predictions for current batch.
gt_batch: List of ground truth labels dicts for current batch.
batch_val_cost: Validation cost for current batch.
inference_time (float): Inference time for one image
"""
num_steps = int(self._steps * dataset_percentage / 100.0)
log_steps = 10
prev_start = default_timer()
for step in xrange(num_steps):
im_batch, gt_batch = \
session.run([self._images, self._ground_truth_labels])
feed_dict = dict()
ground_truth_labels_feed_dict = \
self._get_ground_truth_labels_feed_dict(
gt_batch)
predictions_feed_dict = self.gridbox.get_predictions_feed_dict(
im_batch)
feed_dict.update(ground_truth_labels_feed_dict)
feed_dict.update(predictions_feed_dict)
start = default_timer()
if (step % log_steps) == 0:
logger.info("step %d / %d, %.2fs/step" %
(step, num_steps, (start-prev_start)/log_steps))
prev_start = start
predictions_batch, batch_val_cost = \
session.run(self.gridbox.get_validation_tensors(),
feed_dict=feed_dict)
end = default_timer()
batch_size = len(list(predictions_batch.values())[0]['bbox'])
inference_time = (end - start) / batch_size
yield predictions_batch, gt_batch, batch_val_cost,\
inference_time
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/evaluation/tensorrt_evaluator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Gather camera metadata for evaluation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import warnings
import numpy as np
from nvidia_tao_tf1.blocks.multi_source_loader.types import Bbox2DLabel
FRAME_ID_KEY = 'frame/id'
CAMERA_LOCATION_KEY = 'frame/camera_location'
IMAGE_DIMENSIONS_KEY = 'frame/image_dimensions'
VALID_METADATA_KEYS = {FRAME_ID_KEY, CAMERA_LOCATION_KEY, IMAGE_DIMENSIONS_KEY}
Frame = namedtuple("Frame", [
'frame_id',
'dimensions',
'camera',
])
def get_metadata_from_batch_ground_truth(batch_data, num_frames):
"""Parse a batch of metadata.
Args:
batch_data: A list of dict of lists containing the ground truth and possible metadata.
num_frames (int): Number of frames seen so far. The frame number is set as the frame id.
Return:
metadata: Metadata for the current minibatch. Keys are frame number (integer) and values
are tuple (frame_id, camera_location, image_dimension). Values for camera_location and
image_dimension are None if not defined, frame_id is frame_idx when not defined.
"""
if isinstance(batch_data, list):
missing_keys = VALID_METADATA_KEYS - set(batch_data[0].keys())
if len(missing_keys) > 0:
warnings.warn("One or more metadata field(s) are missing from ground_truth batch_data, "
"and will be replaced with defaults: %s" % list(missing_keys))
metadata = []
for frame_idx, frame_data in enumerate(batch_data, num_frames):
frame_id = frame_data.get(FRAME_ID_KEY, (frame_idx,))[0]
camera_location = frame_data.get(CAMERA_LOCATION_KEY, (None,))[0]
image_dimensions = tuple(frame_data[IMAGE_DIMENSIONS_KEY][0]) \
if IMAGE_DIMENSIONS_KEY in frame_data else None
metadata.append((frame_id, camera_location, image_dimensions))
elif isinstance(batch_data, Bbox2DLabel):
metadata = [Frame(
np.squeeze(batch_data.frame_id[i]).flatten()[
0].decode().replace("/", "_"),
(batch_data.vertices.canvas_shape.width[i].size,
batch_data.vertices.canvas_shape.height[i].size),
None)
for i in range(batch_data.object_class.dense_shape[0])]
else:
raise NotImplementedError("Unhandled batch data of type: {}".format(type(Bbox2DLabel)))
return metadata
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/evaluation/metadata.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A class to evaluate a DetectNet V2 model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import logging
import sys
from timeit import default_timer
import numpy as np
from six.moves import range
from tabulate import tabulate
import tensorflow as tf
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.detectnet_v2.evaluation.compute_metrics import ComputeMetrics
from nvidia_tao_tf1.cv.detectnet_v2.evaluation.ground_truth import process_batch_ground_truth
from nvidia_tao_tf1.cv.detectnet_v2.postprocessor.postprocessing import PostProcessor
logger = logging.getLogger(__name__)
class Evaluator(object):
'''
Computes detection metrics for gridbox model.
Class to compute metrics suite: mAP, average precision (VOC 2009)
default IOU = 0.5
'''
def __init__(self,
postprocessing_config,
evaluation_config,
gridbox_model,
images,
ground_truth_labels,
steps,
confidence_models=None,
target_class_mapping=None,
sqlite_paths=None):
'''
Init function.
Arguments:
postprocessing_config: default object for postprocessing
evaluation_config: default object for evaluation
gridbox_model: gridbox model object
images: image tensor on which evaluation will run
ground_truth_labels: ground truth tensor
setps: When to run evaluation
confidence_models: If MLP or any confidence regressor is used
target_class_mapping: how classes are mapped
'''
self._postprocessing_config = postprocessing_config
self._evaluation_config = evaluation_config
self._images = images
self._ground_truth_labels = ground_truth_labels
self._steps = steps
self._target_class_mapping = target_class_mapping
self._target_class_names = gridbox_model.get_target_class_names()
self._confidence_models = None
self.confidence_models = {}
self.gridbox = gridbox_model
self._postprocessor = PostProcessor(
postprocessing_config=self._postprocessing_config,
confidence_models=self._confidence_models,
image_size=(self.gridbox.input_width, self.gridbox.input_height))
self._sqlite_paths = None
@property
def keras_models(self):
"""Return list of Keras models to be loaded in current session."""
self._keras_models = [self.gridbox.keras_model]
self._keras_models.extend([conf_model.keras_model for conf_model
in self.confidence_models.values()])
return self._keras_models
@property
def ground_truth_labels(self):
"""Wrap labels to be used for validation.
Child classes may override this if needed. This base class does nothing.
"""
return self._ground_truth_labels
@staticmethod
def get_session_config():
"""Return session configuration specific to this Evaluator."""
gpu_options = tf.compat.v1.GPUOptions(
allow_growth=True
)
config = tf.compat.v1.ConfigProto(gpu_options=gpu_options)
return config
def _get_validation_iterator(self, session, dataset_percentage):
"""Generator that yields batch predictions, labels, and cost.
Args:
session (tf.Session): Session to be used for evaluation.
dataset_percentage (float): % of the dataset to evaluate.
Returns:
batch_predictions: Raw predictions for current batch.
batch_ground_truth_labels: List of ground truth labels dicts for current batch.
batch_mean_validation_cost (float): Mean validation cost for current batch.
inference_time (float): Inference time for one image.
"""
num_steps = int(self._steps * dataset_percentage / 100.0)
prev_start = default_timer()
log_steps = 10
for step in range(num_steps):
start = default_timer()
if (step % log_steps) == 0:
logger.info("step %d / %d, %.2fs/step" %
(step, num_steps, (start-prev_start)/log_steps))
prev_start = start
batch_predictions, batch_validation_cost, batch_ground_truth_labels = \
session.run(self.gridbox.get_validation_tensors() +
[self.ground_truth_labels])
end = default_timer()
batch_size = len(list(batch_predictions.values())[0]['bbox'])
inference_time = (end - start) / batch_size
batch_mean_validation_cost = batch_validation_cost / num_steps
yield batch_predictions, batch_ground_truth_labels, batch_mean_validation_cost,\
inference_time
def evaluate(self, session, dataset_percentage=100.0):
"""Evaluate a DetectNet V2 model.
Make predictions using the model and convert prediction and ground truth arrays to
Detection and GroundTruth objects, respectively. Also, compile frame metadata for
metrics computation, if available.
Arguments:
session (tf.Session): Session to be used for evaluation.
dataset_percentage (float): % of the dataset to evaluate.
Return:
metrics_results: DetectionResults object from the metrics library.
metrics_results_with_confidence: DetectionResults with confidence models applied.
validation_cost: Validation cost value.
inference_time: Median inference time for one image in seconds.
"""
target_class_names = self._target_class_names
# The ordering of these elements is used to determine the order in which to print them
# during print_metrics.
clustered_detections = collections.OrderedDict(
(target_class, []) for target_class in target_class_names)
ground_truths = []
frame_metadata = []
inference_times = []
validation_cost = 0.
batch_count = 0
for batch_predictions, batch_ground_truth_labels, batch_mean_validation_cost, \
inference_time in self._get_validation_iterator(
session, dataset_percentage):
validation_cost += batch_mean_validation_cost
inference_times.append(inference_time)
# Append detections, ground truths and metadata from this batch to the data structures
# passed to metrics computation.
batch_detections = \
self._postprocessor.cluster_predictions(
predictions=batch_predictions)
for target_class in target_class_names:
clustered_detections[target_class] += batch_detections[target_class]
# Process groundtruth tensors to get GroundTruth objects and frame_metadata needed by
# metrics.
batch_ground_truth_objects, batch_metadata = \
process_batch_ground_truth(
batch_ground_truth_labels, len(frame_metadata))
ground_truths += batch_ground_truth_objects
frame_metadata += batch_metadata
batch_count += 1
cdm = ComputeMetrics(clustered_detections, ground_truths,
self.gridbox.input_width,
self.gridbox.input_height,
self._target_class_names,
self._evaluation_config)
metrics_results = cdm(num_recall_points=11, ignore_neutral_boxes=False)
# Use median instead of average to be more robust against outliers.
inference_time = np.median(inference_times)
return metrics_results, validation_cost, inference_time
@staticmethod
def print_metrics(metrics_results, validation_cost, median_inference_time):
""""Print a consolidated metrics table along with validation cost.
Args:
metrics_results (dict): a DetectionResults dict.
validation_cost (float): calculated validation cost.
median_inference_time (float): Median inference time.
"""
print()
print('Validation cost: %f' % validation_cost)
print('Mean average_precision (in %): {:0.4f}'.format(metrics_results['mAP'] * 100.))
print()
headers = ['class name', 'average precision (in %)']
# flip the code and name and sort
data = sorted([(k, v * 100)
for k, v in list(metrics_results['average_precisions'].items())])
print(tabulate(data, headers=headers, tablefmt="pretty"))
# Flush to make the output look sequential.
print('\nMedian Inference Time: %f' % median_inference_time)
kpi_data = {
"validation cost": round(validation_cost, 8),
"mean average precision": round(metrics_results['mAP'] * 100, 4)
}
categorical_data = {
"average_precision": {
k: round(v * 100, 4) for k, v in list(
metrics_results['average_precisions'].items()
)
}
}
s_logger = status_logging.get_status_logger()
if isinstance(s_logger, status_logging.StatusLogger):
s_logger.categorical = categorical_data
s_logger.kpi = kpi_data
s_logger.write(
status_level=status_logging.Status.RUNNING,
message="Evaluation metrics generated."
)
sys.stdout.flush()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/evaluation/evaluation.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Datastructures and functions for detection ground truths."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
from itertools import repeat
import numpy as np
from six.moves import zip
from nvidia_tao_tf1.blocks.multi_source_loader.types import Bbox2DLabel
from nvidia_tao_tf1.cv.detectnet_v2.evaluation.metadata import get_metadata_from_batch_ground_truth
# Python 2 vs Python 3 string.
try:
# Python 2.
unicode_func = unicode
except NameError:
def unicode_func(x):
"""Convert a string to unicode."""
if isinstance(x, str):
return x
return x.decode()
GroundTruth = namedtuple('GroundTruth', [
'class_name', # e.g. 'car'
'bbox', # (x1, y1, x2, y2)
'truncation', # float in KITTI
'truncation_type', # int (0 or 1) in Cyclops
'occlusion', # int in KITTI
'is_cvip', # boolean
'world_bbox_z', # float
'front', # float.
'back', # float.
'orientation', # float.
])
DONT_CARE_CLASS_NAME = 'dontcare'
def _populate_ground_truths(object_data):
"""Helper function to populate the GroundTruth instances from dataloader labels.
Args:
object_data (iterator): Each element is expected to contain fields in a particular order.
Returns:
ground_truths (list): List of GroundTruth instances populated appropriately.
"""
ground_truths = []
for object_class, bbox, occlusion, truncation, truncation_type, \
is_cvip, world_bbox_z, non_facing, front, back, orientation in object_data:
if non_facing:
# TODO(@williamz): This is because of metrics code. Properly should be handled
# differently.
# Map non-facing road signs to dontcare class.
object_class = DONT_CARE_CLASS_NAME
ground_truth = GroundTruth(class_name=unicode_func(object_class), bbox=bbox,
truncation=truncation, truncation_type=truncation_type,
occlusion=occlusion, is_cvip=bool(is_cvip),
world_bbox_z=world_bbox_z, front=front, back=back,
orientation=orientation)
ground_truths.append(ground_truth)
return ground_truths
def get_ground_truth_objects_from_batch_ground_truth(batch_data):
"""Parse a batch of ground truth dictionaries to GroundTruth objects.
Args:
batch_data: Ground truth data parsed from tfrecords as a list of dicts. Each dict
represents ground truth values such as bbox coordinates for one frame.
Returns:
ground_truths: List of list of GroundTruth objects parsed from this minibatch.
"""
ground_truths = []
for frame_data in batch_data:
frame_ground_truths = []
object_data = zip(frame_data['target/object_class'],
frame_data['target/bbox_coordinates'],
# Optional.
frame_data.get('target/occlusion', repeat(0)),
# Optional.
frame_data.get('target/truncation', repeat(0.0)),
# Optional.
frame_data.get('target/truncation_type', repeat(0)),
# Optional.
frame_data.get('target/is_cvip', repeat(False)),
# Optional.
frame_data.get('target/world_bbox_z', repeat(0.0)),
# Optional.
frame_data.get('target/non_facing', repeat(False)),
# Optional.
frame_data.get('target/front', repeat(-1.0)),
# Optional.
frame_data.get('target/back', repeat(-1.0)),
frame_data.get('target/orientation', repeat(-1.0))
)
frame_ground_truths = _populate_ground_truths(object_data)
ground_truths.append(frame_ground_truths)
return ground_truths
def _get_features_from_bbox_2d_label(bbox_label, feature_name, start_idx, end_idx, default_value):
"""Helper function to extract relevant values in a Bbox2DLabel.
Args:
bbox_label (Bbox2DLabel): Label containing all the features for a minibatch.
feature_name (str): Name of the field to look for in ``bbox_label``. These should be one
of the fields of the ``Bbox2DLabel`` namedtuple.
start_idx (int): Start index of the values.
end_idx (int): End index of the values.
default_value (variable): If the field is not "present" in ``bbox_label`` (e.g. an optional
field such as 'front' or 'back' marker), the ``default_value`` iterator to return
instead.
Returns:
If the ``feature_name`` is properly populated in the ``bbox_label``, then the values
corresponding to the indices provided are returned. Otherwise, an iterator with
``default_value`` is returned.
"""
feature_values = getattr(bbox_label, feature_name, [])
if hasattr(feature_values, "values"):
if isinstance(feature_values.values, np.ndarray) and feature_values.values.size > 0:
return feature_values.values[start_idx:end_idx]
# TODO(@williamz): consider removing this as only unit tests would realistically populate
# the fields with lists instead of arrays.
if type(feature_values.values) == list and len(feature_values.values) > 0:
return feature_values.values[start_idx:end_idx]
return repeat(default_value)
def get_ground_truth_objects_from_bbox_label(bbox_label):
"""Parse a Bbox2DLabel to GroundTruth objects.
Args:
bbox_label (Bbox2DLabel): Contains all the features for a minibatch.
Returns:
ground_truths: List of list of GroundTruth objects parsed from this minibatch.
"""
ground_truths = []
batch_size = bbox_label.vertices.coordinates.dense_shape[0]
# Because the last frame(s) may very well be devoid of any labels, we need to make sure
# the bincount still has ``batch_size`` entries.
num_ground_truths_per_image = \
np.bincount(
bbox_label.object_class.indices[:, 0], minlength=batch_size)
# The leading [0] here is to start the cumulative sum at 0 and not
# num_ground_truths_per_image[0].
start_end_indices = \
np.cumsum(np.concatenate(([0], num_ground_truths_per_image)))
bbox_coords = np.reshape(bbox_label.vertices.coordinates.values, (-1, 4))
for batch_idx in range(batch_size):
start_idx = start_end_indices[batch_idx]
end_idx = start_end_indices[batch_idx+1]
object_data = zip(
bbox_label.object_class.values[start_idx:end_idx],
bbox_coords[start_idx:end_idx, :],
_get_features_from_bbox_2d_label(
bbox_label, 'occlusion', start_idx, end_idx, 0),
_get_features_from_bbox_2d_label(
bbox_label, 'truncation', start_idx, end_idx, 0.0),
_get_features_from_bbox_2d_label(
bbox_label, 'truncation_type', start_idx, end_idx, 0),
_get_features_from_bbox_2d_label(
bbox_label, 'is_cvip', start_idx, end_idx, False),
_get_features_from_bbox_2d_label(
bbox_label, 'world_bbox_z', start_idx, end_idx, 0.0),
_get_features_from_bbox_2d_label(
bbox_label, 'non_facing', start_idx, end_idx, False),
_get_features_from_bbox_2d_label(
bbox_label, 'front', start_idx, end_idx, -1.0),
_get_features_from_bbox_2d_label(
bbox_label, 'back', start_idx, end_idx, -1.0),
_get_features_from_bbox_2d_label(
bbox_label, 'orientation', start_idx, end_idx, -1.0),
)
frame_ground_truths = _populate_ground_truths(object_data)
ground_truths.append(frame_ground_truths)
return ground_truths
def process_batch_ground_truth(batch_data, num_frames):
"""
Process the batch ground truth dicts to get GroundTruth objects and frame_metadata dict.
Args:
batch_data: Ground truth data parsed from tfrecords as a list of dicts. Each dict
represents ground truth values such as bbox coordinates for one frame.
num_frames: Number of frames seen so far. Used as index for frame_metadata.
Returns:
ground_truths: List of list of GroundTruth objects parsed from this minibatch.
frame_metadata: Metadata for the current minibatch. Keys are frame number (integer) and
values are tuple (frame_identifier, camera_location, image_dimension). camera_location
and image_dimension are None if not available.
"""
if isinstance(batch_data, list):
batch_groundtruth_objects = get_ground_truth_objects_from_batch_ground_truth(
batch_data)
elif isinstance(batch_data, Bbox2DLabel):
batch_groundtruth_objects = get_ground_truth_objects_from_bbox_label(
batch_data)
else:
raise NotImplementedError("Unhandled batch data of type: {}".format(type(batch_data)))
frame_metadata = get_metadata_from_batch_ground_truth(
batch_data, num_frames)
return batch_groundtruth_objects, frame_metadata
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/evaluation/ground_truth.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A class to compute detection metrics on the gridbox model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import math
import sys
import numpy as np
from tqdm import trange
logger = logging.getLogger(__name__)
def iou(boxes1, boxes2, border_pixels='half'):
'''
numpy version of element-wise iou.
Computes the intersection-over-union similarity (also known as Jaccard similarity)
of two sets of axis-aligned 2D rectangular boxes.
Let `boxes1` and `boxes2` contain `m` and `n` boxes, respectively.
In 'outer_product' mode, returns an `(m,n)` matrix with the IoUs for all possible
combinations of the boxes in `boxes1` and `boxes2`.
In 'element-wise' mode, `m` and `n` must be broadcast-compatible. Refer to the explanation
of the `mode` argument for details.
Arguments:
boxes1 (array): Either a 1D Numpy array of shape `(4, )` containing the coordinates
for one box in the format specified by `coords` or a 2D Numpy array of shape
`(m, 4)` containing the coordinates for `m` boxes. If `mode` is set to 'element_wise',
the shape must be broadcast-compatible with `boxes2`.
boxes2 (array): Either a 1D Numpy array of shape `(4, )` containing the coordinates for
one box in the format specified by `coords` or a 2D Numpy array of shape `(n, 4)`
containing the coordinates for `n` boxes. If `mode` is set to 'element_wise', the
shape must be broadcast-compatible with `boxes1`.`.
border_pixels (str, optional): How to treat the border pixels of the bounding boxes.
Can be 'include', 'exclude', or 'half'. If 'include', the border pixels belong
to the boxes. If 'exclude', the border pixels do not belong to the boxes.
If 'half', then one of each of the two horizontal and vertical borders belong
to the boxex, but not the other.
Returns:
A 1D or 2D Numpy array (refer to the `mode` argument for details) of dtype float
containing values in [0,1], the Jaccard similarity of the boxes in `boxes1` and
`boxes2`. 0 means there is no overlap between two given boxes, 1 means their
coordinates are identical.
'''
# Make sure the boxes have the right shapes.
if boxes1.ndim > 2:
raise ValueError("boxes1 must have rank either 1 or 2, but has rank {}."
.format(boxes1.ndim))
if boxes2.ndim > 2:
raise ValueError("boxes2 must have rank either 1 or 2, but has rank {}."
.format(boxes2.ndim))
if boxes1.ndim == 1:
boxes1 = np.expand_dims(boxes1, axis=0)
if boxes2.ndim == 1:
boxes2 = np.expand_dims(boxes2, axis=0)
if not (boxes1.shape[1] == boxes2.shape[1] == 4):
raise ValueError("Boxes list last dim should be 4 but got shape {} and {}, respectively."
.format(boxes1.shape, boxes2.shape))
# Set the correct coordinate indices for the respective formats.
xmin = 0
ymin = 1
xmax = 2
ymax = 3
# Compute the union areas.
if border_pixels == 'half':
d = 0
elif border_pixels == 'include':
d = 1
elif border_pixels == 'exclude':
d = -1
# Compute the IoU.
min_xy = np.maximum(boxes1[:, [xmin, ymin]], boxes2[:, [xmin, ymin]])
max_xy = np.minimum(boxes1[:, [xmax, ymax]], boxes2[:, [xmax, ymax]])
# Compute the side lengths of the intersection rectangles.
side_lengths = np.maximum(0, max_xy - min_xy + d)
intersection_areas = side_lengths[:, 0] * side_lengths[:, 1]
boxes1_areas = (boxes1[:, xmax] - boxes1[:, xmin] + d) * \
(boxes1[:, ymax] - boxes1[:, ymin] + d)
boxes2_areas = (boxes2[:, xmax] - boxes2[:, xmin] + d) * \
(boxes2[:, ymax] - boxes2[:, ymin] + d)
union_areas = boxes1_areas + boxes2_areas - intersection_areas
return intersection_areas / union_areas
class ComputeMetrics(object):
'''
Simple class to compute metrics for a detection model.
Returns VOC 2009 metrics: mAP, precision, recall.
'''
def __init__(self,
clustered_detections, ground_truth_labels,
image_width, image_height, target_class_names,
evaluation_config):
'''
Init function.
Arguments:
clustered_detections: postprocessed final detection tensor
ground_truth_labels: tensor containing ground truth
target_class_names: mapping of target class names
evaluation config: for getting iou thresholds, min/max box config for evaluations
'''
self.target_class_names = target_class_names
self.n_classes = len(self.target_class_names)
gt_format = {'class_id': 0, 'xmin': 1,
'ymin': 2, 'xmax': 3, 'ymax': 4}
self.gt_format = gt_format
self.prediction_results = None
self.num_gt_per_class = None
self.true_positives = None
self.false_positives = None
self.cumulative_true_positives = None
self.cumulative_false_positives = None
self.valid_evaluation_modes = {0: "sample",
1: "integrate"}
# "Cumulative" means that the i-th element in each list represents the precision for the
# first i highest condidence predictions for that class.
self.cumulative_precisions = None
# "Cumulative" means that the i-th element in each list represents the recall for the first
# i highest condidence predictions for that class.
self.cumulative_recalls = None
self.average_precisions = None
self.mean_average_precision = None
self.image_ids = []
self.image_labels = {}
self.image_height = image_height
self.image_width = image_width
# specifications from evaluation config proto
self.min_iou_thresholds = \
evaluation_config.minimum_detection_ground_truth_overlap
ap_key = evaluation_config.average_precision_mode
self.average_precision_mode = self.valid_evaluation_modes[ap_key]
self.detection_spec_for_gtruth_matching = \
evaluation_config.evaluation_box_configs
# call preparation of gtruth and detection config
self._prepare_internal_structures(
clustered_detections, ground_truth_labels)
def __call__(self,
round_confidences=False,
border_pixels='include',
sorting_algorithm='quicksort',
num_recall_points=11,
ignore_neutral_boxes=True,
verbose=True):
'''
Computes the mean average precision of the given Keras SSD model on the given dataset.
Optionally also returns the averages precisions, precisions, and recalls.
All the individual steps of the overall evaluation algorithm can also be called separately
(check out the other methods of this class) but this runs the overall algorithm all at once.
Arguments:
img_height (int): The input image height for the model.
img_width (int): The input image width for the model.
batch_size (int): The batch size for the evaluation.
data_generator_mode (str, optional): Either of 'resize' and 'pad'. If 'resize', the
input images will be resized (i.e. warped) to `(img_height, img_width)`. This mode
does not preserve the aspect ratios of the images. If 'pad', the input images will
be first padded so that they have the aspect ratio defined by `img_height` and
`img_width` and then resized to `(img_height, img_width)`. This mode preserves the
aspect ratios of the images.
round_confidences (int, optional): `False` or an integer that is the number of decimals
that the prediction confidences will be rounded to. If `False`, the confidences will
not be rounded.
matching_iou_threshold (float, optional): A prediction will be considered a true
positive if it has a Jaccard overlap of at least `matching_iou_threshold` with any
ground truth bounding box of the same class.
border_pixels (str, optional): How to treat the border pixels of the bounding boxes.
Can be 'include', 'exclude', or 'half'. If 'include', the border pixels belong
to the boxes. If 'exclude', the border pixels do not belong to the boxes.
If 'half', then one of each of the two horizontal and vertical borders belong
to the boxex, but not the other.
sorting_algorithm (str, optional): Which sorting algorithm the matching algorithm should
use. This argument accepts any valid sorting algorithm for Numpy's `argsort()`
function. You will usually want to choose between 'quicksort' (fastest and most
memory efficient, but not stable) and 'mergesort' (slight slower and less memory
efficient, but stable). The official Matlab evaluation algorithm uses a stable
sorting algorithm, so this algorithm is only guaranteed to behave identically if you
choose 'mergesort' as the sorting algorithm, but it will almost always behave
identically even if you choose 'quicksort' (but no guarantees).
average_precision_mode (str, optional): Can be either 'sample' or 'integrate'. In the
case of 'sample', the average precision will be computed according to the Pascal VOC
formula that was used up until VOC 2009, where the precision will be sampled for
`num_recall_points` recall values. In the case of 'integrate', the average precision
will be computed according to the Pascal VOC formula that was used from VOC 2010
onward, where the average precision will be computed by numerically integrating
over the whole preciscion-recall curve instead of sampling individual points from
it. 'integrate' mode is basically just the limit case of 'sample' mode as the number
of sample points increases.
num_recall_points (int, optional): The number of points to sample from the
precision-recall-curve to compute the average precisions. In other words, this is
the number of equidistant recall values for which the resulting precision will be
computed. 11 points is the value used in the official Pascal VOC 2007 detection
evaluation algorithm.
ignore_neutral_boxes (bool, optional): In case the data generator provides annotations
indicating whether a ground truth bounding box is supposed to either count or be
neutral for the evaluation, this argument decides what to do with these annotations.
If `False`, even boxes that are annotated as neutral will be counted into the
evaluation. If `True`, neutral boxes will be ignored for the evaluation. An example
for evaluation-neutrality are the ground truth boxes annotated as "difficult" in the
Pascal VOC datasets, which are usually treated as neutral for the evaluation.
return_precisions (bool, optional): If `True`, returns a nested list containing the
cumulative precisions for each class.
return_recalls (bool, optional): If `True`, returns a nested list containing the
cumulative recalls for each class.
return_average_precisions (bool, optional): If `True`, returns a list containing the
average precision for each class.
verbose (bool, optional): If `True`, will print out the progress during runtime.
Returns:
A float, the mean average precision, plus any optional returns specified in the
arguments.
'''
############################################################################################
# Get the total number of ground truth boxes for each class.
############################################################################################
self.get_num_gt_per_class(ignore_neutral_boxes=ignore_neutral_boxes,
verbose=False,
ret=False)
############################################################################################
# Match predictions to ground truth boxes for all classes.
############################################################################################
self.match_predictions(ignore_neutral_boxes=ignore_neutral_boxes,
matching_iou_threshold=self.min_iou_thresholds,
border_pixels=border_pixels,
sorting_algorithm=sorting_algorithm,
verbose=verbose,
ret=False)
############################################################################################
# Compute the cumulative precision and recall for all classes.
############################################################################################
self.compute_precision_recall(verbose=verbose, ret=False)
############################################################################################
# Compute the average precision for this class.
############################################################################################
self.compute_average_precisions(mode=self.average_precision_mode,
num_recall_points=num_recall_points,
verbose=verbose,
ret=False)
############################################################################################
# Compute the mean average precision.
############################################################################################
mean_average_precision = self.compute_mean_average_precision(ret=True)
############################################################################################
# Compile the returns.
dict_average_precisions = {}
for idx, t in enumerate(self.target_class_names):
dict_average_precisions[t] = self.average_precisions[idx]
composite_metrics = {}
composite_metrics['mAP'] = mean_average_precision
composite_metrics['average_precisions'] = dict_average_precisions
return composite_metrics
def _check_if_bbox_is_valid(self, bbox, class_name):
'''
Checks if a box is valid based on evaluation config.
Arguments:
bbox - [x1, y1, x2, y2]
class name - class name (string)
return: returns "True" is the box meets spec standards
'''
w = bbox[2] - bbox[0]
h = bbox[3] - bbox[1]
if h < self.detection_spec_for_gtruth_matching[class_name].minimum_height:
return False
if w < self.detection_spec_for_gtruth_matching[class_name].minimum_width:
return False
if h > self.detection_spec_for_gtruth_matching[class_name].maximum_height:
return False
if w > self.detection_spec_for_gtruth_matching[class_name].maximum_width:
return False
return True
def _prepare_internal_structures(self, clustered_detections,
ground_truth_labels):
'''
configures detections and ground truth tensor to right format.
Also fills up the gtruth tensor
Arguments:
clustered_detections: tensor containing postprocessed detections
ground_truth_labels: tensor containing ground truth
'''
logger.debug("Preparing internal datastructures")
target_class_names = self.target_class_names
results = [list() for _ in range(len(target_class_names))]
self.image_ids = []
for frame_index, frame_ground_truths in enumerate(ground_truth_labels):
gtruth_per_frame = []
self.image_ids.append(frame_index)
for target_class_id, target_class in enumerate(target_class_names):
for box_struct in clustered_detections[target_class][frame_index]:
# for box_struct in frame_ground_truths:
bbox = box_struct.bbox
if (self._check_if_bbox_is_valid(bbox, target_class)):
confidence = box_struct.confidence
prediction = (int(frame_index), confidence,
round(bbox[0]), round(bbox[1]),
round(bbox[2]), round(bbox[3]))
results[target_class_id].append(prediction)
# build gtruth
for box_struct in frame_ground_truths:
bbox = box_struct.bbox
cid = box_struct.class_name
if cid == '-1':
continue
if not (self._check_if_bbox_is_valid(bbox, cid)):
continue
cid = target_class_names.index(cid)
gtruth = (int(cid),
round(bbox[0]), round(bbox[1]),
round(bbox[2]), round(bbox[3]))
gtruth_per_frame.append(gtruth)
self.image_labels[frame_index] = gtruth_per_frame
self.prediction_results = results
logger.debug("Internal datastructure prepared.")
def write_predictions_to_txt(self,
classes=None,
out_file_prefix='comp3_det_test_',
verbose=True):
'''
Writes the predictions for all classes to txt according to the Pascal VOC results format.
Arguments:
classes (list, optional): `None` or a list of strings containing the class names of all
classes in the dataset, including some arbitrary name for the background class. This
list will be used to name the output text files. The ordering of the names in the
list represents the ordering of the classes as they are predicted by the model,
i.e. the element with index 3 in this list should correspond to the class with class
ID 3 in the model's predictions. If `None`, the output text files will be named by
their class IDs.
out_file_prefix (str, optional): A prefix for the output text file names. The suffix to
each output text file name will be the respective class name followed by the `.txt`
file extension. This string is also how you specify the directory in which the
results are to be saved.
verbose (bool, optional): If `True`, will print out the progress during runtime.
Returns:
None.
'''
if self.prediction_results is None:
raise ValueError("There are no prediction results. You must run `predict_on_dataset()` \
before calling this method.")
# We generate a separate results file for each class.
for class_id in range(self.n_classes):
logger.debug("Writing results file for class {}/{}.".format(class_id+1,
self.n_classes+1))
if classes is None:
class_suffix = '{:04d}'.format(class_id)
else:
class_suffix = classes[class_id]
results_file = open('{}{}.txt'.format(
out_file_prefix, class_suffix), 'w')
logger.debug("Print out the file of results path: {}".format(results_file))
for prediction in self.prediction_results[class_id]:
prediction_list = list(prediction)
prediction_list[0] = '{:06d}'.format(int(prediction_list[0]))
prediction_list[1] = round(prediction_list[1], 4)
prediction_txt = ' '.join(map(str, prediction_list)) + '\n'
results_file.write(prediction_txt)
results_file.close()
logger.debug("All results files saved.")
def get_num_gt_per_class(self,
ignore_neutral_boxes=True,
verbose=True,
ret=False):
'''
Counts the number of ground truth boxes for each class across the dataset.
Arguments:
ignore_neutral_boxes (bool, optional): In case the data generator provides annotations
indicating whether a ground truth bounding box is supposed to either count or be
neutral for the evaluation, this argument decides what to do with these annotations.
If `True`, only non-neutral ground truth boxes will be counted, otherwise all ground
truth boxes will be counted.
verbose (bool, optional): If `True`, will print out the progress during runtime.
ret (bool, optional): If `True`, returns the list of counts.
Returns:
None by default. Optionally, a list containing a count of the number of ground truth
boxes for each class across the entire dataset.
'''
if self.image_labels is None:
raise ValueError("Computing the number of ground truth boxes per class not possible, \
no ground truth given.")
num_gt_per_class = np.zeros(shape=(self.n_classes), dtype=np.int)
class_id_index = self.gt_format['class_id']
ground_truth = self.image_labels
if verbose:
logger.debug('Computing the number of positive ground truth boxes per class.')
tr = trange(len(ground_truth), file=sys.stdout)
else:
tr = range(len(ground_truth))
# Iterate over the ground truth for all images in the dataset.
for i in tr:
boxes = np.asarray(ground_truth[i])
# Iterate over all ground truth boxes for the current image.
for j in range(boxes.shape[0]):
# If there is no such thing as evaluation-neutral boxes for
# our dataset, always increment the counter for the respective
# class ID.
class_id = int(boxes[j, class_id_index])
num_gt_per_class[class_id] += 1
self.num_gt_per_class = num_gt_per_class
if ret:
return num_gt_per_class
return None
def match_predictions(self,
matching_iou_threshold,
ignore_neutral_boxes=True,
border_pixels='include',
sorting_algorithm='quicksort',
verbose=True,
ret=False):
'''
Matches predictions to ground truth boxes.
Note that `predict_on_dataset()` must be called before calling this method.
Arguments:
ignore_neutral_boxes (bool, optional): In case the data generator provides annotations
indicating whether a ground truth bounding box is supposed to either count or be
neutral for the evaluation, this argument decides what to do with these annotations.
If `False`, even boxes that are annotated as neutral will be counted into the
evaluation. If `True`, neutral boxes will be ignored for the evaluation. An example
for evaluation-neutrality are the ground truth boxes annotated as "difficult" in the
Pascal VOC datasets, which are usually treated as neutral for the evaluation.
matching_iou_threshold (per class threshold dict): A prediction will be considered true
positive if it has a Jaccard overlap of at least `matching_iou_threshold` with any
ground truth bounding box of the same class.
border_pixels (str, optional): How to treat the border pixels of the bounding boxes.
Can be 'include', 'exclude', or 'half'. If 'include', the border pixels belong
to the boxes. If 'exclude', the border pixels do not belong to the boxes.
If 'half', then one of each of the two horizontal and vertical borders belong
to the boxex, but not the other.
sorting_algorithm (str, optional): Which sorting algorithm the matching algorithm should
use. This argument accepts any valid sorting algorithm for Numpy's `argsort()`
function. You will usually want to choose between 'quicksort' (fastest and most
memory efficient, but not stable) and 'mergesort' (slight slower and less memory
efficient, but stable). The official Matlab evaluation algorithm uses a stable
sorting algorithm, so this algorithm is only guaranteed to behave identically if you
choose 'mergesort' as the sorting algorithm, but it will almost always behave
identically even if you choose 'quicksort' (but no guarantees).
verbose (bool, optional): If `True`, will print out the progress during runtime.
ret (bool, optional): If `True`, returns the true and false positives.
Returns:
None by default. Optionally, four nested lists containing the true positives, false
positives, cumulative true positives, and cumulative false positives for each class.
'''
if self.image_labels is None:
raise ValueError("Matching predictions to ground truth boxes not possible, no ground \
truth given.")
if self.prediction_results is None:
raise ValueError("There are no prediction results. You must run `predict_on_dataset()` \
before calling this method.")
class_id_gt = self.gt_format['class_id']
xmin_gt = self.gt_format['xmin']
ymin_gt = self.gt_format['ymin']
xmax_gt = self.gt_format['xmax']
ymax_gt = self.gt_format['ymax']
# Convert the ground truth to a more efficient format for what we need
# to do, which is access ground truth by image ID repeatedly.
ground_truth = {}
# Whether or not we have annotations to decide whether ground truth boxes should be neutral
# or not.
eval_neutral_available = False
for i in range(len(self.image_ids)):
image_id = self.image_ids[i]
labels = self.image_labels[i]
ground_truth[image_id] = np.asarray(labels)
# The false positives for each class, sorted by descending confidence.
true_positives = []
# The true positives for each class, sorted by descending confidence.
false_positives = []
cumulative_true_positives = []
cumulative_false_positives = []
# Iterate over all classes.
for class_id in range(self.n_classes):
if not self.target_class_names[class_id] in self.min_iou_thresholds:
raise ValueError("class {}, not in spec file for minimum overlap thresh"
.format(self.target_class_names[class_id]))
matching_iou_threshold = self.min_iou_thresholds[self.target_class_names[class_id]]
predictions = self.prediction_results[class_id]
# Store the matching results in these lists:
# 1 for every prediction that is a true positive, 0 otherwise
true_pos = np.zeros(len(predictions), dtype=np.int)
# 1 for every prediction that is a false positive, 0 otherwise
false_pos = np.zeros(len(predictions), dtype=np.int)
# In case there are no predictions at all for this class, we're done here.
if len(predictions) == 0:
logger.debug("No predictions for class {}/{}".format(class_id + 1,
self.n_classes))
true_positives.append(true_pos)
false_positives.append(false_pos)
cumulative_true_positives.append(np.cumsum(true_pos))
cumulative_false_positives.append(np.cumsum(false_pos))
continue
# Convert the predictions list for this class into a structured array so that we can
# sort it by confidence.
# Get the number of characters needed to store the image ID strings in the structured
# array.
# Create the data type for the structured array.
preds_data_type = np.dtype([('image_id', 'int'),
('confidence', 'f4'),
('xmin', 'f4'),
('ymin', 'f4'),
('xmax', 'f4'),
('ymax', 'f4')])
# Create the structured array
predictions = np.array(predictions, dtype=preds_data_type)
# Sort the detections by decreasing confidence.
descending_indices = np.argsort(-predictions['confidence'], kind=sorting_algorithm)
predictions_sorted = predictions[descending_indices]
if verbose:
tr = trange(len(predictions), file=sys.stdout)
tr.set_description("Matching predictions to ground truth, class {}/{}."
.format(class_id + 1, self.n_classes))
else:
tr = range(len(predictions.shape))
# Keep track of which ground truth boxes were already matched to a detection.
gt_matched = {}
# Iterate over all predictions.
for i in tr:
prediction = predictions_sorted[i]
image_id = prediction['image_id']
# Convert the structured array element to a regular array.
pred_box = np.asarray(list(prediction[['xmin', 'ymin', 'xmax', 'ymax']]))
# Get the relevant ground truth boxes for this prediction,
# i.e. all ground truth boxes that match the prediction's
# image ID and class ID.
# The ground truth could either be a tuple with
# `(ground_truth_boxes, eval_neutral_boxes)` or only `ground_truth_boxes`.
if ignore_neutral_boxes and eval_neutral_available:
gt, eval_neutral = ground_truth[image_id]
else:
gt = ground_truth[image_id]
gt = np.asarray(gt)
if gt.size == 0:
# If the image doesn't contain any objects of this class,
# the prediction becomes a false positive.
false_pos[i] = 1
continue
else:
class_mask = gt[:, class_id_gt] == class_id
gt = gt[class_mask]
if gt.size == 0:
# If the image doesn't contain any objects of this class,
# the prediction becomes a false positive.
false_pos[i] = 1
continue
if ignore_neutral_boxes and eval_neutral_available:
eval_neutral = eval_neutral[class_mask]
# Compute the IoU of this prediction with all ground truth boxes of the same class.
overlaps = iou(boxes1=gt[:, [xmin_gt, ymin_gt, xmax_gt, ymax_gt]],
boxes2=pred_box,
border_pixels=border_pixels)
# For each detection, match the ground truth box with the highest overlap.
# It's possible that the same ground truth box will be matched to multiple
# detections.
gt_match_index = np.argmax(overlaps)
gt_match_overlap = overlaps[gt_match_index]
if gt_match_overlap < matching_iou_threshold:
# False positive, IoU threshold violated:
# Those predictions whose matched overlap is below the threshold become
# false positives.
false_pos[i] = 1
else:
if (not (ignore_neutral_boxes and eval_neutral_available) or
(eval_neutral[gt_match_index] is False)):
# If this is not a ground truth that is supposed to be evaluation-neutral
# (i.e. should be skipped for the evaluation) or if we don't even have the
# concept of neutral boxes.
if not (image_id in gt_matched):
# True positive:
# If the matched ground truth box for this prediction hasn't been
# matched to a different prediction already, we have a true positive.
true_pos[i] = 1
gt_matched[image_id] = np.zeros(
shape=(gt.shape[0]), dtype=np.bool)
gt_matched[image_id][gt_match_index] = True
elif not gt_matched[image_id][gt_match_index]:
# True positive:
# If the matched ground truth box for this prediction hasn't been
# matched to a different prediction already, we have a true positive.
true_pos[i] = 1
gt_matched[image_id][gt_match_index] = True
else:
# False positive, duplicate detection:
# If the matched ground truth box for this prediction has already been
# matched to a different prediction previously, it is a duplicate
# detection for an already detected object, which counts as a false
# positive.
false_pos[i] = 1
true_positives.append(true_pos)
false_positives.append(false_pos)
# Cumulative sums of the true positives
cumulative_true_pos = np.cumsum(true_pos)
# Cumulative sums of the false positives
cumulative_false_pos = np.cumsum(false_pos)
cumulative_true_positives.append(cumulative_true_pos)
cumulative_false_positives.append(cumulative_false_pos)
self.true_positives = true_positives
self.false_positives = false_positives
self.cumulative_true_positives = cumulative_true_positives
self.cumulative_false_positives = cumulative_false_positives
if ret:
return true_positives, false_positives, cumulative_true_positives, \
cumulative_false_positives
return None
def compute_precision_recall(self, verbose=True, ret=False):
'''
Computes the precisions and recalls for all classes.
Note that `match_predictions()` must be called before calling this method.
Arguments:
verbose (bool, optional): If `True`, will print out the progress during runtime.
ret (bool, optional): If `True`, returns the precisions and recalls.
Returns:
None by default. Optionally, two nested lists containing the cumulative precisions and
recalls for each class.
'''
if (self.cumulative_true_positives is None) or (self.cumulative_false_positives is None):
raise ValueError("True and false positives not available. You must run \
`match_predictions()` before you call this method.")
if (self.num_gt_per_class is None):
raise ValueError("Number of ground truth boxes per class not available. You must run \
`get_num_gt_per_class()` before you call this method.")
cumulative_precisions = []
cumulative_recalls = []
# Iterate over all classes.
for class_id in range(self.n_classes):
logger.debug("Computing precisions and recalls, class {}/{}".format(class_id + 1,
self.n_classes))
tp = self.cumulative_true_positives[class_id]
fp = self.cumulative_false_positives[class_id]
# 1D array with shape `(num_predictions,)`
cumulative_precision = np.where(tp + fp > 0, tp / (tp + fp), 0)
# 1D array with shape `(num_predictions,)`
cumulative_recall = tp / self.num_gt_per_class[class_id]
cumulative_precisions.append(cumulative_precision)
cumulative_recalls.append(cumulative_recall)
self.cumulative_precisions = cumulative_precisions
self.cumulative_recalls = cumulative_recalls
if ret:
return cumulative_precisions, cumulative_recalls
return None
def compute_average_precisions(self, mode='sample', num_recall_points=11,
verbose=True, ret=False):
'''
Computes the average precision for each class.
Can compute the Pascal-VOC-style average precision in both the pre-2010 (k-point sampling)
and post-2010 (integration) algorithm versions.
Note that `compute_precision_recall()` must be called before calling this method.
Arguments:
mode (str, optional): Can be either 'sample' or 'integrate'. In the case of 'sample',
the average precision will be computed according to the Pascal VOC formula that was
used up until VOC 2009, where the precision will be sampled for `num_recall_points`
recall values. In the case of 'integrate', the average precision will be computed
according to the Pascal VOC formula that was used from VOC 2010 onward, where the
average precision will be computed by numerically integrating over the whole
preciscion-recall curve instead of sampling individual points from it. 'integrate'
mode is basically just the limit case of 'sample' mode as the number of sample
points increases. For details, see the references below.
num_recall_points (int, optional): Only relevant if mode is 'sample'. The number of
points to sample from the precision-recall-curve to compute the average precisions.
In other words, this is the number of equidistant recall values for which the
resulting precision will be computed. 11 points is the value used in the official
Pascal VOC pre-2010 detection evaluation algorithm.
verbose (bool, optional): If `True`, will print out the progress during runtime.
ret (bool, optional): If `True`, returns the average precisions.
Returns:
None by default. Optionally, a list containing average precision for each class.
References:
http://host.robots.ox.ac.uk/pascal/VOC/voc2012/htmldoc/devkit_doc.html#sec:ap
'''
if (self.cumulative_precisions is None) or (self.cumulative_recalls is None):
raise ValueError("Precisions and recalls not available. You must run \
`compute_precision_recall()` before you call this method.")
if not (mode in {'sample', 'integrate'}):
raise ValueError("`mode` can be either 'sample' or 'integrate', but received '{}'"
.format(mode))
average_precisions = []
# Iterate over all classes.
for class_id in range(self.n_classes):
logger.debug("Computing average precision, class {}/{}".format(class_id + 1,
self.n_classes))
cumulative_precision = self.cumulative_precisions[class_id]
cumulative_recall = self.cumulative_recalls[class_id]
average_precision = 0.0
if mode == 'sample':
for t in np.linspace(start=0, stop=1, num=num_recall_points, endpoint=True):
cum_prec_recall_greater_t = cumulative_precision[cumulative_recall >= t]
if cum_prec_recall_greater_t.size == 0:
precision = 0.0
else:
precision = np.amax(cum_prec_recall_greater_t)
average_precision += precision
average_precision /= num_recall_points
elif mode == 'integrate':
# We will compute the precision at all unique recall values.
unique_recalls, unique_recall_indices, _ \
= np.unique(cumulative_recall, return_index=True,
return_counts=True)
# Store the maximal precision for each recall value and the absolute difference
# between any two unique recal values in the lists below. The products of these
# two nummbers constitute the rectangular areas whose sum will be our numerical
# integral.
maximal_precisions = np.zeros(unique_recalls.shape, unique_recalls.dtype)
recall_deltas = np.zeros(unique_recalls.shape, unique_recalls.dtype)
# Iterate over all unique recall values in reverse order. This saves a lot of
# computation: For each unique recall value `r`, we want to get the maximal
# precision value obtained for any recall value `r* >= r`. Once we know the maximal
# precision for the last `k` recall values after a given iteration, then in the next
# iteration, in order compute the maximal precisions for the last `l > k` recall
# values, we only need to compute the maximal precision for `l - k` recall values
# and then take the maximum between that and the previously computed maximum instead
# of computing the maximum over all `l` values. We skip the very last recall value,
# since the precision after between the last recall value recall 1.0 is defined to
# be zero.
for i in range(len(unique_recalls)-2, -1, -1):
begin = unique_recall_indices[i]
end = unique_recall_indices[i + 1]
# When computing the maximal precisions, use the maximum of the previous
# iteration to avoid unnecessary repeated computation over the same precision
# values. The maximal precisions are the heights of the rectangle areas of our
# integral under the precision-recall curve.
maximal_precisions[i] = np.maximum(np.amax(cumulative_precision[begin:end]),
maximal_precisions[i + 1])
# The differences between two adjacent recall values are the widths of our
# rectangle areas.
recall_deltas[i] = unique_recalls[i + 1] - \
unique_recalls[i]
average_precision = np.sum(maximal_precisions * recall_deltas)
average_precisions.append(average_precision)
self.average_precisions = [val if not math.isnan(val) else 0 for val in average_precisions]
if ret:
return average_precisions
return None
def compute_mean_average_precision(self, ret=True):
'''
Computes the mean average precision over all classes.
Note that `compute_average_precisions()` must be called before calling this method.
Arguments:
ret (bool, optional): If `True`, returns the mean average precision.
Returns:
A float, the mean average precision, by default. Optionally, None.
'''
if self.average_precisions is None:
raise ValueError("Average precisions not available. You must run \
`compute_average_precisions()` before you call this method.")
# The first element is for the background class, so skip it.
mean_average_precision = np.average(self.average_precisions)
self.mean_average_precision = mean_average_precision
if ret:
return mean_average_precision
return None
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/evaluation/compute_metrics.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""All functions and classes related to DetectNet V2 model evaluation is added here."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/evaluation/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A builder for DetectNet V2 Evaluator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import keras
import tensorflow as tf
from nvidia_tao_tf1.core.utils import set_random_seed
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.common.utils import get_model_file_size
from nvidia_tao_tf1.cv.detectnet_v2.cost_function.cost_auto_weight_hook import (
build_cost_auto_weight_hook
)
from nvidia_tao_tf1.cv.detectnet_v2.cost_function.cost_function_parameters import (
build_target_class_list,
get_target_class_names
)
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.build_dataloader import build_dataloader
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.build_dataloader import select_dataset_proto
from nvidia_tao_tf1.cv.detectnet_v2.evaluation.evaluation import Evaluator
from nvidia_tao_tf1.cv.detectnet_v2.evaluation.evaluation_config import build_evaluation_config
from nvidia_tao_tf1.cv.detectnet_v2.evaluation.tensorrt_evaluator import TensorRTEvaluator
from nvidia_tao_tf1.cv.detectnet_v2.model.build_model import build_model
from nvidia_tao_tf1.cv.detectnet_v2.model.build_model import select_model_proto
from nvidia_tao_tf1.cv.detectnet_v2.postprocessor.postprocessing_config import (
build_postprocessing_config
)
from nvidia_tao_tf1.cv.detectnet_v2.rasterizers.bbox_rasterizer import BboxRasterizer
from nvidia_tao_tf1.cv.detectnet_v2.rasterizers.build_bbox_rasterizer_config import (
build_bbox_rasterizer_config
)
from nvidia_tao_tf1.cv.detectnet_v2.visualization.visualizer import \
DetectNetTBVisualizer as Visualizer
logger = logging.getLogger(__name__)
EVALUATOR_CLASS = {
"tlt": Evaluator,
"tensorrt": TensorRTEvaluator
}
def build_evaluator_for_trained_gridbox(experiment_spec,
model_path,
use_training_set,
use_confidence_models,
key=None,
framework="tlt"):
"""Load a trained DetectNet V2 model and data for evaluation.
Args:
experiment_spec: experiment_pb2.Experiment object.
model_path (str): Absolute path to a model file.
model_type (str): Model type: 'keras' or 'tensorrt'.
use_training_set (bool): If True, evaluate training set, else evaluate validation set.
use_confidence_models (bool): If True, load confidence models.
key (str): Key to load tlt model file.
framework (str): Backend framework for the evaluator.
Choices: ['tlt', 'tensorrt'].
Returns:
Evaluator instance.
Raises:
ValueError: if the model type is unsupported for evaluation.
"""
# Set testing phase.
keras.backend.set_learning_phase(0)
# Load the trained model.
Visualizer.build_from_config(experiment_spec.training_config.visualizer)
target_class_names = get_target_class_names(
experiment_spec.cost_function_config)
target_classes = build_target_class_list(
experiment_spec.cost_function_config)
# Select the model config, which might have ModelConfig / TemporalModelConfig type.
model_config = select_model_proto(experiment_spec)
gridbox_model = build_model(m_config=model_config,
target_class_names=target_class_names,
framework=framework)
config = gridbox_model.get_session_config()
keras.backend.set_session(tf.Session(config=config))
constructor_kwargs = {}
if framework == "tlt":
assert key is not None, (
"The key to load the model must be provided when using the tlt framework "
"to evaluate."
)
constructor_kwargs['enc_key'] = key
logging.info("Loading model weights.")
gridbox_model.load_model_weights(model_path, **constructor_kwargs)
if framework == "tensorrt":
gridbox_model.print_model_summary()
# Set Maglev random seed.
set_random_seed(experiment_spec.random_seed)
# For now, use e.g. batch_size from the training parameters.
if gridbox_model.max_batch_size:
batch_size = gridbox_model.max_batch_size
else:
batch_size = experiment_spec.training_config.batch_size_per_gpu
dataset_proto = select_dataset_proto(experiment_spec)
target_class_mapping = dict(dataset_proto.target_class_mapping)
# Build a dataloader.
logging.info("Building dataloader.")
dataloader = build_dataloader(
dataset_proto=dataset_proto,
augmentation_proto=experiment_spec.augmentation_config)
# Note that repeat is set to true, or we will not be able to get records into a
# fixed-shaped list.
images, ground_truth_labels, num_samples = dataloader.get_dataset_tensors(
batch_size, training=use_training_set, enable_augmentation=False, repeat=True)
logger.info("Found %d samples in validation set", num_samples)
# Note: this rounds up. If num_samples is not a multiple of batch_size, the last batch will
# be duplicate work. However, this is masked from metrics and is correct.
steps = (num_samples + batch_size - 1) // batch_size
postprocessing_config = build_postprocessing_config(
experiment_spec.postprocessing_config)
evaluation_config = build_evaluation_config(
experiment_spec.evaluation_config, target_class_names)
confidence_models = None
evaluator = EVALUATOR_CLASS[framework](
postprocessing_config, evaluation_config, gridbox_model, images,
ground_truth_labels, steps, confidence_models
)
# Setup the cost function.
cost_auto_weight_hook =\
build_cost_auto_weight_hook(
experiment_spec.cost_function_config, steps)
# Get a BboxRasterizer.
bbox_rasterizer_config = \
build_bbox_rasterizer_config(experiment_spec.bbox_rasterizer_config)
bbox_rasterizer = BboxRasterizer(input_width=gridbox_model.input_width,
input_height=gridbox_model.input_height,
output_width=gridbox_model.output_width,
output_height=gridbox_model.output_height,
target_class_names=target_class_names,
bbox_rasterizer_config=bbox_rasterizer_config,
target_class_mapping=target_class_mapping)
# Build ops for doing validation.
# NOTE: because of potential specifity in how labels are fed to the DetectNet V2 (or child
# class) object, we use the ones the Evaluator object's wrapper around it.
ground_truth_tensors = gridbox_model.generate_ground_truth_tensors(
bbox_rasterizer=bbox_rasterizer,
batch_labels=evaluator.ground_truth_labels)
gridbox_model.build_validation_graph(images, ground_truth_tensors, target_classes,
cost_auto_weight_hook.cost_combiner_func)
gridbox_model.print_model_summary()
model_metadata = {
"size": get_model_file_size(model_path),
"param_count": gridbox_model.num_params
}
status_logging.get_status_logger().write(
data=model_metadata,
message="Model constructed."
)
return evaluator
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/evaluation/build_evaluator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the early stoping hook."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import pytest
import tensorflow as tf
from nvidia_tao_tf1.core.distribution.distribution import Distributor, hvd
from nvidia_tao_tf1.cv.detectnet_v2.tfhooks.early_stopping_hook import (
get_variable_softstart_annealing_learning_rate,
LRAnnealingEarlyStoppingHook,
)
@pytest.fixture(autouse=True)
def reset_graph():
tf.reset_default_graph()
class TestLRAnnealingEarlyStoppingHook:
"""Tests for LRAnnealingEarlyStoppingHook."""
@pytest.fixture(scope="class", autouse=True)
def set_up(self):
"""Need to initialize horovod once."""
hvd().init()
def get_stopping_hook(self, validation_period=1, first_validation_epoch=1, last_epoch=123,
steps_per_epoch=1, results_dir="results", num_validation_steps=2,
num_patience_steps=1, max_learning_rate=5e-4, min_learning_rate=5e-6,
num_soft_start_epochs=1, num_annealing_epochs=1):
"""Create an early stopping hook instance."""
# Reset default graph to start fresh.
validation_cost = tf.Variable(0.42)
return LRAnnealingEarlyStoppingHook(
validation_period,
last_epoch,
steps_per_epoch,
results_dir,
first_validation_epoch,
num_validation_steps,
num_patience_steps,
max_learning_rate,
min_learning_rate,
num_soft_start_epochs,
num_annealing_epochs,
validation_cost,
)
@pytest.mark.skipif(os.getenv("RUN_ON_CI", "0") == "1", reason="Cannot be run on CI")
def test_validate_master(self, mocker):
"""Test that validate calls Evaluator.evaluate and tensorboard update."""
stopping_hook = self.get_stopping_hook(validation_period=1, first_validation_epoch=1,
steps_per_epoch=1, num_patience_steps=2,
num_soft_start_epochs=1, num_annealing_epochs=1)
# Make sure we are master.
rank_mock = mocker.patch.object(Distributor, "is_master")
rank_mock.return_value = True
broadcast_op = mocker.patch.object(LRAnnealingEarlyStoppingHook, "broadcast_state")
# Initialize session and hook.
inc_step = tf.assign_add(tf.train.get_or_create_global_step(), 1)
session = tf.train.SingularMonitoredSession(hooks=[stopping_hook])
# Note: we use raw sessions and call the hook's validate method directly to be more
# flexible. BaseValidationHook only calls the hook when
# global_step_value + 1 % steps_per_epoch = 0, which also makes things confusing.
raw_session = session.raw_session()
run_context = tf.train.SessionRunContext(None, session.raw_session())
# Step 0, check initialization.
assert raw_session.run(stopping_hook._should_continue)
assert not raw_session.run(stopping_hook._in_annealing_phase)
# Check the broadcast op is being called and validation loss is computed.
stopping_hook.validate(run_context)
assert np.isclose(stopping_hook._min_cost, 0.42)
broadcast_op.assert_called()
# Step 1 + 2, check we are in soft start.
# Adjusting annealing learning rate depends on steps,
# hence step increase is needed in addition to epoch counter addition
raw_session.run(inc_step)
stopping_hook.epoch_counter += 1
stopping_hook.validate(run_context)
assert raw_session.run(stopping_hook._should_continue)
assert not raw_session.run(stopping_hook._in_annealing_phase)
raw_session.run(inc_step)
stopping_hook.epoch_counter += 1
stopping_hook.validate(run_context)
assert raw_session.run(stopping_hook._should_continue)
assert not raw_session.run(stopping_hook._in_annealing_phase)
# Step 3, we initiate annealing.
raw_session.run(inc_step)
stopping_hook.epoch_counter += 1
stopping_hook.validate(run_context)
assert raw_session.run(stopping_hook._should_continue)
assert raw_session.run(stopping_hook._in_annealing_phase)
# Step 4 + 5, check we should continue and are in annealing.
raw_session.run(inc_step)
stopping_hook.epoch_counter += 1
stopping_hook.validate(run_context)
assert raw_session.run(stopping_hook._should_continue)
assert raw_session.run(stopping_hook._in_annealing_phase)
raw_session.run(inc_step)
stopping_hook.epoch_counter += 1
stopping_hook.validate(run_context)
assert raw_session.run(stopping_hook._should_continue)
assert raw_session.run(stopping_hook._in_annealing_phase)
# Step 6, we're done.
raw_session.run(inc_step)
stopping_hook.epoch_counter += 1
stopping_hook.validate(run_context)
assert not raw_session.run(stopping_hook._should_continue)
assert raw_session.run(stopping_hook._in_annealing_phase)
assert run_context._stop_requested
@pytest.mark.skipif(os.getenv("RUN_ON_CI", "0") == "1", reason="Cannot be run on CI")
def test_hook_master(self, mocker):
"""Test that validate calls Evaluator.evaluate and tensorboard update."""
stopping_hook = self.get_stopping_hook(validation_period=1, first_validation_epoch=1,
steps_per_epoch=1, num_patience_steps=2,
num_soft_start_epochs=1, num_annealing_epochs=1)
# Make sure we are master.
rank_mock = mocker.patch.object(Distributor, "is_master")
rank_mock.return_value = True
broadcast_op = mocker.patch.object(LRAnnealingEarlyStoppingHook, "broadcast_state")
# Initialize session and hook.
inc_step = tf.assign_add(tf.train.get_or_create_global_step(), 1)
session = tf.train.SingularMonitoredSession(hooks=[stopping_hook])
raw_session = session.raw_session()
run_context = tf.train.SessionRunContext(None, session.raw_session())
# Step 0, check initialization.
assert raw_session.run(stopping_hook._should_continue)
assert not raw_session.run(stopping_hook._in_annealing_phase)
# Check the broadcast op is being called and validation loss is computed.
session.run(inc_step)
stopping_hook.validate(run_context)
assert np.isclose(stopping_hook._min_cost, 0.42)
broadcast_op.assert_called()
# Step 1 + 2, check we are in soft start.
# Adjusting annealing learning rate depends on steps,
# hence step increase is needed in addition to epoch counter addition
raw_session.run(inc_step)
stopping_hook.epoch_counter += 1
stopping_hook.validate(run_context)
assert raw_session.run(stopping_hook._should_continue)
assert not raw_session.run(stopping_hook._in_annealing_phase)
raw_session.run(inc_step)
stopping_hook.epoch_counter += 1
stopping_hook.validate(run_context)
assert raw_session.run(stopping_hook._should_continue)
assert not raw_session.run(stopping_hook._in_annealing_phase)
# Step 3, we initiate annealing.
raw_session.run(inc_step)
stopping_hook.epoch_counter += 1
stopping_hook.validate(run_context)
assert raw_session.run(stopping_hook._should_continue)
assert raw_session.run(stopping_hook._in_annealing_phase)
# Step 4 + 5, check we should continue and are in annealing.
raw_session.run(inc_step)
stopping_hook.epoch_counter += 1
stopping_hook.validate(run_context)
assert raw_session.run(stopping_hook._should_continue)
assert raw_session.run(stopping_hook._in_annealing_phase)
raw_session.run(inc_step)
stopping_hook.epoch_counter += 1
stopping_hook.validate(run_context)
assert raw_session.run(stopping_hook._should_continue)
assert raw_session.run(stopping_hook._in_annealing_phase)
# Step 6, we're done.
raw_session.run(inc_step)
stopping_hook.epoch_counter += 1
stopping_hook.validate(run_context)
assert not raw_session.run(stopping_hook._should_continue)
assert raw_session.run(stopping_hook._in_annealing_phase)
assert run_context._stop_requested
def test_validate_worker(self, mocker):
"""Test that validate calls Evaluator.evaluate and tensorboard update."""
stopping_hook = self.get_stopping_hook()
# Make sure we aren't master.
rank_mock = mocker.patch.object(Distributor, "is_master")
rank_mock.return_value = False
broadcast_op = mocker.patch.object(LRAnnealingEarlyStoppingHook, "broadcast_state")
validation_compute = mocker.patch.object(
LRAnnealingEarlyStoppingHook, "_compute_validation_cost"
)
# Initialize session and hook.
session = tf.train.SingularMonitoredSession(hooks=[stopping_hook])
run_context = tf.train.SessionRunContext(None, session.raw_session())
# Check initialization.
assert session.run(stopping_hook._should_continue)
assert not session.run(stopping_hook._in_annealing_phase)
# Check only broadcast op is being called.
stopping_hook.validate(run_context)
broadcast_op.assert_called()
validation_compute.assert_not_called()
@pytest.mark.parametrize(
"soft_start_steps, plateau_steps, annealing_steps",
[(10, 10, 10), (100, 100, 100), (0, 1, 1), (1, 0, 0), (0, 1, 0),
(1, 1, 1), (40000, 10000, 40000)]
)
def test_variable_softstart_annealing_learning_rate(
soft_start_steps, plateau_steps, annealing_steps, base_lr=0.1, min_lr=0.001
):
"""Test learning rates with different soft_start and annealing_steps values."""
def expected_lr(step, soft_start_steps, annealing_steps, is_annealing):
if is_annealing:
if annealing_steps > 0:
progress = 1 - float(step) / annealing_steps
else:
progress = 1.0
progress = max(0.0, progress)
else:
if soft_start_steps > 0:
progress = float(step) / soft_start_steps
else:
progress = 1.0
progress = min(1.0, progress)
lr = np.exp(np.log(min_lr) + progress * (np.log(base_lr) - np.log(min_lr)))
return lr
def computed_lr(step, soft_start_steps, annealing_steps, is_annealing):
return get_variable_softstart_annealing_learning_rate(
lr_step=step,
soft_start_steps=soft_start_steps,
annealing_steps=annealing_steps,
start_annealing=is_annealing,
base_lr=base_lr,
min_lr=min_lr,
)
# Check across various phases and steps that computed is ~ expected lr.
with tf.Session() as session:
# Spread equally across all steps + some slack at the end.
total_steps = soft_start_steps + plateau_steps + annealing_steps + 10
steps = np.linspace(0, total_steps, 50, dtype=np.uint32)
is_annealing = [step > soft_start_steps + plateau_steps for step in steps]
tf_steps = [tf.constant(step) for step in steps]
lrs = [
computed_lr(step, soft_start_steps, annealing_steps, anneal)
for step, anneal in zip(tf_steps, is_annealing)
]
lrs = session.run(lrs)
expected_lrs = [
expected_lr(step, soft_start_steps, annealing_steps, anneal)
for step, anneal in zip(steps, is_annealing)
]
# Default relative tolerance of 1e-07 seems to be too small.
np.testing.assert_allclose(lrs, expected_lrs, rtol=1e-06)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/evaluation/tests/test_early_stopping_hook.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ground truths."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pytest
import tensorflow as tf
from nvidia_tao_tf1.blocks.multi_source_loader.types import Bbox2DLabel
from nvidia_tao_tf1.blocks.multi_source_loader.types import Coordinates2D
from nvidia_tao_tf1.cv.detectnet_v2.evaluation.ground_truth import (
get_ground_truth_objects_from_batch_ground_truth,
get_ground_truth_objects_from_bbox_label,
GroundTruth
)
ground_truth1 = GroundTruth(class_name='car',
bbox=np.array([1., 1., 2., 2.]),
truncation=0.1,
truncation_type=0,
occlusion=1,
is_cvip=False,
world_bbox_z=0.0,
front=0.0,
back=0.0,
orientation=None)
ground_truth2 = GroundTruth(class_name='car',
bbox=np.array([3., 3., 4., 4.]),
truncation=0.2,
truncation_type=0,
occlusion=2,
is_cvip=True,
world_bbox_z=0.0,
front=0.0,
back=0.0,
orientation=None)
ground_truth3 = GroundTruth(class_name='car',
bbox=np.array([1., 1., 2., 2.]),
truncation=0.0,
truncation_type=1,
occlusion=1,
is_cvip=False,
world_bbox_z=0.0,
front=0.0,
back=0.0,
orientation=None)
ground_truth4 = GroundTruth(class_name='dontcare',
bbox=np.array([1., 1., 2., 2.]),
truncation=0.1,
truncation_type=0,
occlusion=1,
is_cvip=False,
world_bbox_z=0.0,
front=0.0,
back=0.0,
orientation=None)
ground_truth5 = GroundTruth(class_name='some_facing_object',
bbox=np.array([1., 1., 2., 2.]),
truncation=0.1,
truncation_type=0,
occlusion=1,
is_cvip=False,
world_bbox_z=0.0,
front=0.0,
back=0.0,
orientation=None)
no_objects = [{'target/object_class': [],
'target/bbox_coordinates': [],
'target/truncation': [],
'target/occlusion': []}]
no_objects_expected = [[]]
single_object = [{'target/object_class': ['car'],
'target/bbox_coordinates': [[1., 1., 2., 2.]],
'target/truncation': [0.1],
'target/occlusion': [1],
'target/non_facing': [0]},
{'target/object_class': ['some_non_facing_object'],
'target/bbox_coordinates': [[1., 1., 2., 2.]],
'target/truncation': [0.1],
'target/occlusion': [1],
'target/non_facing': [1]},
{'target/object_class': ['some_facing_object'],
'target/bbox_coordinates': [[1., 1., 2., 2.]],
'target/truncation': [0.1],
'target/occlusion': [1],
'target/non_facing': [0]}]
single_object_expected = [[ground_truth1], [ground_truth4], [ground_truth5]]
two_frames = no_objects + [{'target/object_class': ['car', 'car'],
'target/bbox_coordinates': [[1., 1., 2., 2.], [3., 3., 4., 4.]],
'target/truncation': [0.1, 0.2],
'target/occlusion': [1, 2],
'target/is_cvip': [False, True]}]
two_frames_expected = [[], [ground_truth1, ground_truth2]]
single_object_truncation_type = [{'target/object_class': ['car'],
'target/bbox_coordinates': [[1., 1., 2., 2.]],
'target/truncation_type': [1],
'target/occlusion': [1]}]
single_object_truncation_type_expected = [[ground_truth3]]
def compare_ground_truths(a, b):
"""Compare GroundTruths by their attributes."""
assert a.class_name == b.class_name
np.testing.assert_array_almost_equal(a.bbox, b.bbox)
np.testing.assert_almost_equal(a.truncation, b.truncation)
np.testing.assert_almost_equal(a.truncation_type, b.truncation_type)
np.testing.assert_almost_equal(a.occlusion, b.occlusion)
assert a.is_cvip == b.is_cvip
return True
@pytest.mark.parametrize("batch_ground_truth,expected_ground_truths",
[(no_objects, no_objects_expected),
(single_object, single_object_expected),
(two_frames, two_frames_expected),
(single_object_truncation_type, single_object_truncation_type_expected)])
def test_get_ground_truth_objects_from_batch_ground_truth(monkeypatch, batch_ground_truth,
expected_ground_truths):
"""Test generation of GroundTruth objects from tensors."""
ground_truths = get_ground_truth_objects_from_batch_ground_truth(
batch_ground_truth)
# Compare objects by their attributes, not ids
monkeypatch.setattr(GroundTruth, '__eq__', compare_ground_truths)
assert ground_truths == expected_ground_truths
# The following lines mimic the above test but using Bbox2DLabel (that have been eval'ed).
def _get_empty_bbox_2d_label_kwargs():
return {field_name: [] for field_name in Bbox2DLabel._fields}
no_objects_kwargs = _get_empty_bbox_2d_label_kwargs()
no_objects_kwargs.update({
'vertices': Coordinates2D(
coordinates=tf.compat.v1.SparseTensorValue(
values=[], dense_shape=[1, 0, 0, 0], indices=[]),
canvas_shape=None),
'object_class': tf.compat.v1.SparseTensorValue(
values=[], dense_shape=[1, 0, 0],
indices=np.reshape(np.array([], dtype=np.int64), [0, 3]))})
no_objects_bis = Bbox2DLabel(**no_objects_kwargs)
single_object_kwargs = _get_empty_bbox_2d_label_kwargs()
single_object_kwargs.update({
'vertices': Coordinates2D(
coordinates=tf.compat.v1.SparseTensorValue(
values=[1., 1., 2., 2., 1., 1., 2., 2., 1., 1., 2., 2.],
dense_shape=[3, 1, 2, 2],
indices=np.array(
[[i, 0, j, k] for i in range(3) for j in range(2) for k in range(2)])),
canvas_shape=None),
'object_class': tf.compat.v1.SparseTensorValue(
values=['car', 'some_non_facing_object', 'some_facing_object'],
dense_shape=[3, 1, 1],
indices=np.array([[i, 0, 0] for i in range(3)])),
'truncation': tf.compat.v1.SparseTensorValue(
values=[0.1, 0.1, 0.1],
dense_shape=[3, 1, 1],
indices=np.array([[i, 0, 0] for i in range(3)])),
'occlusion': tf.compat.v1.SparseTensorValue(
values=[1, 1, 1],
dense_shape=[3, 1, 1],
indices=np.array([[i, 0, 0] for i in range(3)])),
'non_facing': tf.compat.v1.SparseTensorValue(
values=[0, 1, 0],
dense_shape=[3, 1, 1],
indices=np.array([[i, 0, 0] for i in range(3)]))})
single_object_bis = Bbox2DLabel(**single_object_kwargs)
two_frames_kwargs = _get_empty_bbox_2d_label_kwargs()
two_frames_kwargs.update({
'vertices': Coordinates2D(
coordinates=tf.compat.v1.SparseTensorValue(
values=[1., 1., 2., 2., 3., 3., 4., 4.],
dense_shape=[2, 2, 2, 2],
indices=np.array(
[[1, i, j, k] for i in range(2) for j in range(2) for k in range(2)])),
canvas_shape=None),
'object_class': tf.compat.v1.SparseTensorValue(
values=['car', 'car'],
dense_shape=[2, 2, 1],
indices=np.array([[1, i, 0] for i in range(2)])),
'truncation': tf.compat.v1.SparseTensorValue(
values=[0.1, 0.2],
dense_shape=[2, 2, 1],
indices=np.array([[1, i, 0] for i in range(2)])),
'occlusion': tf.compat.v1.SparseTensorValue(
values=[1, 2],
dense_shape=[2, 2, 1],
indices=np.array([[1, i, 0] for i in range(2)])),
'is_cvip': tf.compat.v1.SparseTensorValue(
values=[False, True],
dense_shape=[2, 2, 1],
indices=np.array([[1, i, 0] for i in range(2)]))})
two_frames_bis = Bbox2DLabel(**two_frames_kwargs)
single_object_truncation_type_kwargs = _get_empty_bbox_2d_label_kwargs()
single_object_truncation_type_kwargs.update({
'vertices': Coordinates2D(
coordinates=tf.compat.v1.SparseTensorValue(
values=[1., 1., 2., 2.],
dense_shape=[1, 1, 2, 2],
indices=np.array([[0, 0, i, j] for i in range(2) for j in range(2)])),
canvas_shape=None),
'object_class': tf.compat.v1.SparseTensorValue(
values=['car'],
dense_shape=[1, 1, 1],
indices=np.array([[0, 0, 0]])),
'truncation_type': tf.compat.v1.SparseTensorValue(
values=[1],
dense_shape=[1, 1, 1],
indices=np.array([[0, 0, 0]])),
'occlusion': tf.compat.v1.SparseTensorValue(
values=[1],
dense_shape=[1, 1, 1],
indices=np.array([[0, 0, 0]])),
# Add an empty sparse tensor to test that the code properly handles it.
'world_bbox_z': tf.compat.v1.SparseTensorValue(
values=[],
dense_shape=[1, 1, 1],
indices=[],
)
})
single_object_truncation_type_bis = Bbox2DLabel(
**single_object_truncation_type_kwargs)
@pytest.mark.parametrize("bbox_2d_label,expected_ground_truths",
[(no_objects_bis, no_objects_expected),
(single_object_bis, single_object_expected),
(two_frames_bis, two_frames_expected),
(single_object_truncation_type_bis,
single_object_truncation_type_expected)])
def test_get_ground_truth_objects_from_bbox_label(monkeypatch, bbox_2d_label,
expected_ground_truths):
"""Test generation of GroundTruth objects from Bbox2DLabel."""
ground_truths = get_ground_truth_objects_from_bbox_label(bbox_2d_label)
# Compare objects by their attributes, not ids
monkeypatch.setattr(GroundTruth, '__eq__', compare_ground_truths)
assert ground_truths == expected_ground_truths
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/evaluation/tests/test_ground_truth.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple inference handler for TLT trained DetectNet_v2 models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import keras
from keras import backend as K
import numpy as np
import tensorflow as tf
from nvidia_tao_tf1.cv.detectnet_v2.inferencer.base_inferencer import Inferencer
from nvidia_tao_tf1.cv.detectnet_v2.model.utilities import model_io
logger = logging.getLogger(__name__)
class TLTInferencer(Inferencer):
"""Network handler for inference tool."""
def __init__(self, framework='tlt', target_classes=None,
image_height=544, image_width=960, image_channels=3,
enc_key=None, tlt_model=None, gpu_set=0,
batch_size=1):
"""Setting up handler class for TLT DetectNet_v2 model.
Args:
framework (str): The framework in which the model under inference was serialized.
target_classes (list): List of target classes in order of the network output.
image_height (int): Height of the image at inference.
image_width (int): Width of the image under inference.
enc_key (str): Key to decode tlt model.
tlt_model (str): Path to the .tlt model file generated post training.
gpu_set (int): Id of the GPU in which inference will be run.
batch_size (int): Number of images per batch when inferred.
"""
# Initialize base class.
super(TLTInferencer, self).__init__(target_classes=target_classes,
image_height=image_height,
image_width=image_width,
image_channels=image_channels,
gpu_set=gpu_set,
batch_size=batch_size)
self._key = enc_key
self._model = tlt_model
self.framework = framework
# Initializing the input output nodes.
self._set_input_output_nodes()
for node in self.output_nodes:
if "cov" in node:
self.cov_blob = node
elif "bbox" in node:
self.bbox_blob = node
else:
raise ValueError("Invalid output blobs mentioned.")
self.constructed = False
def _set_input_output_nodes(self):
"""Set the input output nodes of the inferencer."""
self.input_node = "input_1"
self.output_nodes = ["output_bbox", "output_cov"]
def network_init(self):
"""Initializing the keras model and compiling it for inference.
Args:
None
Returns:
No explicit returns. Defines the self.mdl attribute to the intialized
keras model.
"""
# Limit keras to using only 1 gpu of gpu id.
gpu_id = str(self.gpu_set)
# Restricting the number of GPU's to be used.
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = gpu_id
K.set_session(tf.Session(config=config))
logger.info("Loading model from {}:".format(self._model))
model = model_io(self._model, enc_key=self._key)
# Check for model encapsulation.
layer_types = {type(layer) for layer in model.layers}
if keras.engine.training.Model in layer_types:
# Model in model case.
if layer_types != set([keras.engine.topology.InputLayer, keras.engine.training.Model]):
raise NotImplementedError("Model encapsulation is only supported if outer model "
"consists of input layers")
# Extracting only model.
model = [l for l in model.layers if (type(l) == keras.engine.training.Model)][0]
# Setting data format for loaded model. This can be derived from the last layer
# since all the layers in a DNv2 model follows the same channel order.
self.data_format = model.get_layer(self.cov_blob).data_format
assert self.data_format == "channels_first", "Only channels first supported"
self.num_channels = model.layers[0].input_shape[1]
input_shape = (self.num_channels, self.image_height, self.image_width)
# Reshaping input to inference shape defined in the clusterfile and
# encapusulating new model.
# Peeling out reshape layers.
intermediate_outputs = [model.get_layer(self.cov_blob).output,
model.get_layer(self.bbox_blob).output]
model = keras.models.Model(inputs=model.inputs, outputs=intermediate_outputs)
logger.debug("Reshaping inputs to clusterfile dimensions")
inputs = keras.layers.Input(shape=input_shape)
model = keras.models.Model(inputs=inputs, outputs=model(inputs))
model.summary()
self.mdl = model
self.constructed = True
def infer_batch(self, chunk):
"""Function to infer a batch of images using trained keras model.
Args:
chunk (array): list of images in the batch to infer.
Returns:
infer_out: raw_predictions from model.predict.
resized: resized size of the batch.
"""
if not self.constructed:
raise ValueError("Cannot run inference. Run Inferencer.network_init() first.")
infer_shape = (len(chunk),) + self.mdl.layers[0].input_shape[1:]
logger.debug("Inference shape per batch: {}".format(infer_shape))
infer_input = np.zeros(infer_shape)
# Prepare image batches.
logger.debug("Inferring images")
for idx, image in enumerate(chunk):
input_image, resized = self.input_preprocessing(image)
infer_input[idx, :, :, :] = input_image
# Infer on image batches.
output = self.mdl.predict(infer_input, batch_size=len(chunk))
infer_dict = self.predictions_to_dict(output)
logger.debug("Inferred_outputs: {}".format(len(output)))
infer_out = self.keras_output_map(infer_dict)
return infer_out, resized
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/inferencer/tlt_inferencer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DetectNet V2 model modules to handle standalone inference."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/inferencer/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple inference handler for TLT trained DetectNet_v2 models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from addict import Dict
import numpy as np
from PIL import Image
from six.moves import range
logger = logging.getLogger(__name__)
class Inferencer(object):
"""Base inference handler for TLT generated models."""
def __init__(self,
target_classes=None,
image_height=544,
image_width=960,
image_channels=3,
gpu_set=0,
batch_size=1):
"""Setting up init for the base inference module.
Args:
target_classes (list): List of target classes in order of the network output.
image_height (int): Height of the image at inference.
image_width (int): Width of the image under inference.
gpu_set (int): Id of the GPU in which inference will be run.
batch_size (int): Number of images per batch when inferred.
"""
self.gpu_set = gpu_set
self.batch_size = batch_size
self.num_channels = None
self.target_classes = target_classes
self.image_height = image_height
self.image_width = image_width
self.num_channels = image_channels
assert self.num_channels in [1, 3], (
"Number of channels in the input: {}".format(self.num_channels)
)
def _set_input_output_nodes(self):
"""Set the input output nodes of the inferencer."""
raise NotImplementedError("Implemented in the derived classes.")
def network_init(self):
"""Initializing the keras model and compiling it for inference.
Args:
None
Returns:
No explicit returns. Defines the self.mdl attribute to the intialized
keras model.
"""
raise NotImplementedError("Implemented in derived classes.")
def infer_batch(self, chunk):
"""Function to infer a batch of images using trained keras model.
Args:
chunk (array): list of images in the batch to infer.
Returns:
infer_out: raw_predictions from model.predict.
resized: resized size of the batch.
"""
raise NotImplementedError("Implemented in derived classes.")
def predictions_to_dict(self, outputs):
"""Function to convert raw predictions into a dictionary.
Args:
outputs (array): Raw outputs from keras model.predict.
Returns:
out_dict (Dictionary): Output predictions in a dictionary of coverages and bboxes.
"""
out_dict = {}
for out in outputs:
if out.shape[1] == len(self.target_classes):
out_dict["cov"] = out
if out.shape[1] == len(self.target_classes) * 4:
out_dict["bbox"] = out
return out_dict
def input_preprocessing(self, image):
"""Pre processing an image before preparing the batch."""
mdl_size = (self.image_width, self.image_height)
im = image.resize(mdl_size, Image.ANTIALIAS)
if self.num_channels == 1:
logger.debug("Converting image from RGB to Grayscale")
if im.mode in ('RGBA', 'LA') or (im.mode == 'P' and 'transparency' in im.info):
bg_colour = (255, 255, 255)
# Need to convert to RGBA if LA format due to a bug in PIL
alpha = im.convert('RGBA').split()[-1]
# Create a new background image of our matt color.
# Must be RGBA because paste requires both images have the same format
bg = Image.new("RGBA", im.size, bg_colour + (255,))
bg.paste(im, mask=alpha)
# Convert image to grayscale.
im = im.convert('L')
keras_input = np.asarray(im).astype(np.float32)
keras_input = keras_input[:, :, np.newaxis]
elif self.num_channels == 3:
keras_input = np.asarray(im).astype(np.float32)
else:
raise NotImplementedError("Inference can only be run for 1 or 3 channels. "
"Did you forget to run Inference.network_init(). "
"Number of channels: {}".format(self.num_channels))
keras_input = keras_input.transpose(2, 0, 1) / 255.0
keras_input.shape = (1, ) + keras_input.shape
return keras_input, im.size
def keras_output_map(self, output):
"""Function to map outputs from a cov and bbox to classwise dictionary.
Realigns outputs from coverage and bbox dictionary to class-wise dictionary of
coverage and bbox blobs. So now the output dictionary looks like:
{'class': {'cov': coverage outputs blob of shape [n, 1, output height, output_width],
{'bbox': bbox rects outputs blob of shape [n, 4, output height, output_width]}
}
Args:
output (dict): from predictions to dict member function
Returns:
out2cluster (dict): output dictionary for bbox post processing
"""
out2cluster = Dict()
blobs = list(output.keys())
target_classes = self.target_classes
# Separating and reshaping keras outputs blobs to classwise outputs.
for blob in blobs:
if 'cov' in blob:
output_meta_cov = output[blob].transpose(0, 1, 3, 2)
elif 'bbox' in blob:
output_meta_bbox = output[blob].transpose(0, 1, 3, 2)
else:
raise ValueError('Invalid output blob: cov and bbox expected in output blob names')
# Remapping output to a nested dictionary.
for i in range(len(target_classes)):
key = target_classes[i]
classwise = Dict()
for blob_name in blobs:
if 'cov' in blob_name:
classwise['cov'] = output_meta_cov[:, i, :, :]
elif 'bbox' in blob_name:
classwise['bbox'] = output_meta_bbox[:, 4*i: 4*i+4, :, :]
out2cluster[key] = classwise
return out2cluster
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/inferencer/base_inferencer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple script to build inferencer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from nvidia_tao_tf1.cv.detectnet_v2.inferencer.tlt_inferencer import TLTInferencer
from nvidia_tao_tf1.cv.detectnet_v2.inferencer.trt_inferencer import DEFAULT_MAX_WORKSPACE_SIZE
from nvidia_tao_tf1.cv.detectnet_v2.inferencer.trt_inferencer import TRTInferencer
SUPPORTED_INFERENCERS = {'tlt': TLTInferencer,
'tensorrt': TRTInferencer}
TRT_PARSERS = {0: 'etlt',
1: 'uff',
2: 'caffe'}
TRT_BACKEND_DATATYPE = {0: "fp32",
1: "fp16",
2: "int8"}
logger = logging.getLogger(__name__)
def build_inferencer(inf_config=None, verbose=True, key=None):
"""Simple function to build inferencer.
The function looks at the inference framework mentioned and then calls the right
inferencer.
Args:
inf_config (InferencerConfig protobuf): Config container parameters to configure
the inferencer
verbose (bool): Flag to define the verbosity of the logger.
key (str): Key to load the model.
Returns:
model(tlt_inferencer/trt_inferencer object): The inferencer object for the respective
framework.
Raises:
NotImplementedError for the wrong frameworks.
"""
if key is None:
raise ValueError("The key to load a model cannot be set to None.")
# Setting up common constructor arguments
constructor_kwargs = {'batch_size': inf_config.batch_size if inf_config.batch_size else 1,
'gpu_set': inf_config.gpu_index if inf_config.gpu_index else 0,
'target_classes': inf_config.target_classes if inf_config.target_classes
else None,
'image_height': inf_config.image_height,
'image_width': inf_config.image_width,
'image_channels': inf_config.image_channels}
# Extracting framework specific inferencer parameters.
model_config_type = inf_config.WhichOneof('model_config_type')
config = getattr(inf_config, model_config_type)
if model_config_type == 'tlt_config':
# Setting up tlt inferencer based on the config file parameters
logger.debug("Initializing TLT inferencer.")
framework = 'tlt'
constructor_kwargs.update({'tlt_model': config.model,
'enc_key': key,
'framework': framework})
elif model_config_type == 'tensorrt_config':
# Setting up tensorrt inferencer based on the config file parameters.
logger.debug("Initializing Tensorrt inferencer.")
framework = 'tensorrt'
constructor_kwargs.update({'framework': framework,
'uff_model': config.uff_model if config.uff_model else None,
'caffemodel': config.caffemodel if config.caffemodel else None,
'prototxt': config.prototxt if config.prototxt else None,
'etlt_model': config.etlt_model if config.etlt_model else None,
'etlt_key': key,
'parser': TRT_PARSERS[config.parser],
'verbose': verbose,
'max_workspace_size': DEFAULT_MAX_WORKSPACE_SIZE,
'data_type': TRT_BACKEND_DATATYPE[config.backend_data_type],
'trt_engine': config.trt_engine if config.trt_engine else None,
'save_engine': config.save_engine})
# Setting up calibrator if calibrator specific parameters are present.
if TRT_BACKEND_DATATYPE[config.backend_data_type] == "int8":
assert hasattr(config, "calibrator_config"), "Please instantiate an calibrator config "\
"when running in int8 mode."
calib_conf = getattr(config, "calibrator_config")
# Set calibrator config arguments.
n_batches = 1
if calib_conf.n_batches:
n_batches = calib_conf.n_batches
calibration_cache = None
if calib_conf.calibration_cache:
calibration_cache = calib_conf.calibration_cache
calibration_tensorfile = None
if calib_conf.calibration_tensorfile:
calibration_tensorfile = calib_conf.calibration_tensorfile
constructor_kwargs.update({'calib_tensorfile': calibration_tensorfile,
'n_batches': n_batches,
'calib_file': calibration_cache})
else:
raise NotImplementedError("Unsupported framework: {}".format(model_config_type))
logger.info("Constructing inferencer")
return framework, SUPPORTED_INFERENCERS[framework](**constructor_kwargs)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/inferencer/build_inferencer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for different DetectNet_v2 Models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import numpy as np
import pycuda.autoinit # noqa pylint: disable=unused-import
import pycuda.driver as cuda
try:
import tensorrt as trt # noqa pylint: disable=W0611 pylint: disable=W0611
trt_available = True
from nvidia_tao_tf1.core.export.data import TensorFile
except Exception as e:
logger = logging.getLogger(__name__)
logger.warning(
"Failed to import TensorRT package, exporting TLT to a TensorRT engine "
"will not be available."
)
trt_available = False
logger = logging.getLogger(__name__)
# Simple helper class to define host, device buffers.
class HostDeviceMem(object):
"""Clean data structure to handle host/device memory."""
def __init__(self, host_mem, device_mem, name, npshape):
"""Initialize a HostDeviceMem data structure.
Args:
host_mem (cuda.pagelocked_empty): A cuda.pagelocked_empty memory buffer.
device_mem (cuda.mem_alloc): Allocated memory pointer to the buffer in the GPU.
name (str): Name of the binding blob in TensorRT.
npshape (tuple): Shape of the input dimensions.
Returns:
HostDeviceMem instance.
"""
self.host = host_mem
self.device = device_mem
self.numpy_shape = npshape
self.name = name
def __str__(self):
"""String containing pointers to the TRT Memory."""
return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device)
def __repr__(self):
"""Return the canonical string representation of the object."""
return self.__str__()
if trt_available:
# Simple helper class for calibration.
class Calibrator(trt.IInt8EntropyCalibrator2):
"""Calibrator class."""
def __init__(self, data_filename, cache_filename,
n_batches, batch_size,
*args, **kwargs):
"""Init routine.
This inherits from ``trt.IInt8EntropyCalibrator2`` to implement
the calibration interface that TensorRT needs to calibrate the
INT8 quantization factors.
Args:
data_filename (str): ``TensorFile`` data file to use.
cache_filename (str): name of calibration file to read/write to.
n_batches (int): number of batches for calibrate for.
batch_size (int): batch size to use for calibration (this must be
smaller or equal to the batch size of the provided data).
"""
super(Calibrator, self).__init__(*args, **kwargs)
self._data_file = None
if data_filename is not None and os.path.exists(data_filename):
self._data_file = TensorFile(data_filename, 'r')
else:
logger.info("A valid tensorfile doesn't exist at {}. "
"The calibrator will attempt to read from a cache file "
"if provided.".format(self._data_file))
self._cache_filename = cache_filename
self._batch_size = batch_size
self._n_batches = n_batches
self._batch_count = 0
self._data_mem = None
def get_batch(self, names):
"""Return one batch.
Args:
names (list): list of memory bindings names.
"""
if self._batch_count < self._n_batches:
batch = np.array(self._data_file.read())
if batch is not None:
# Adding a pylint error disable check due to a pylint issue
# TODO: <vpraveen> Remove this after pylint/issues/3139
batch_size = batch.shape[0] # pylint: disable=E1136
if batch_size < self._batch_size:
raise ValueError("Data file batch size (%d) < request batch size (%d)" %
(batch_size, self._batch_size))
batch = batch[:self._batch_size]
if self._data_mem is None:
self._data_mem = cuda.mem_alloc(batch.size * 4) # 4 bytes per float32.
self._batch_count += 1
# Transfer input data to device.
cuda.memcpy_htod(self._data_mem, np.ascontiguousarray(batch, dtype=np.float32))
return [int(self._data_mem)]
if self._data_mem is not None:
self._data_mem.free()
return None
def get_batch_size(self):
"""Return batch size."""
return self._batch_size
def read_calibration_cache(self):
"""Read calibration from file."""
if os.path.isfile(self._cache_filename):
logger.warning("Calibration file exists at {}."
" Reading this cache.".format(self._cache_filename))
with open(self._cache_filename, "rb") as cal_file:
return cal_file.read()
return None
def write_calibration_cache(self, cache):
"""Write calibration to file.
Args:
cache (memoryview): buffer to read calibration data from.
"""
logger.info("Saving calibration cache (size %d) to %s",
len(cache), self._cache_filename)
with open(self._cache_filename, 'wb') as f:
f.write(cache)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/inferencer/utilities.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple inference handler for maglev trained DetectNet_v2 models serialized to TRT."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import struct
import sys
import tempfile
import traceback
import numpy as np
import pycuda.autoinit # noqa pylint: disable=unused-import
import pycuda.driver as cuda
from six.moves import range
try:
import tensorrt as trt # noqa pylint: disable=W0611 pylint: disable=W0611
from nvidia_tao_tf1.cv.detectnet_v2.inferencer.utilities import Calibrator
except Exception as e:
logger = logging.getLogger(__name__)
logger.warning(
"Failed to import TensorRT package, exporting TLT to a TensorRT engine "
"will not be available."
)
from nvidia_tao_tf1.cv.detectnet_v2.inferencer.base_inferencer import Inferencer
from nvidia_tao_tf1.cv.detectnet_v2.inferencer.utilities import HostDeviceMem
from nvidia_tao_tf1.encoding import encoding
logger = logging.getLogger(__name__)
trt_loggers = []
# TensorRT default params.
DEFAULT_MAX_WORKSPACE_SIZE = 1 << 15 # amounts to 1GB of context space.
def _create_tensorrt_logger(verbose=False):
"""Create a TensorRT logger.
Args:
verbose(bool): Flag to set logger as verbose or not.
Return:
tensorrt_logger(trt.infer.ConsoleLogger): TensorRT console logger object.
"""
if verbose:
trt_verbosity = trt.Logger.INFO
else:
trt_verbosity = trt.Logger.WARNING
tensorrt_logger = trt.Logger(trt_verbosity)
trt_loggers.append(tensorrt_logger)
return tensorrt_logger
def _exception_check(check_case, fail_string):
"""Simple function for exception handling and traceback print.
Args:
check_case: item to check exception for.
fail_string (str): String to be printed at traceback error.
Returns:
No explicit returns.
Raises:
Prints out traceback and raises AssertionError with line number and
error text.
"""
try:
assert check_case
except AssertionError:
logger.error("Fail string")
_, _, tb = sys.exc_info()
traceback.print_tb(tb) # Fixed format
tb_info = traceback.extract_tb(tb)
_, line, _, text = tb_info[-1]
raise AssertionError('Failed in {} in statement {}'.format(line,
text))
class TRTInferencer(Inferencer):
"""Network handler for inference tool."""
def __init__(self, target_classes=None, framework="tensorrt",
image_height=544, image_width=960, image_channels=3,
uff_model=None, caffemodel=None,
etlt_model=None, etlt_key=None, prototxt=None, parser="caffe",
calib_tensorfile=None, n_batches=None, input_nodes=None,
output_nodes=None, max_workspace_size=1 << 30,
data_type="fp32", calib_file=None, trt_engine=None, gpu_set=0, batch_size=1,
save_engine=False, verbose=False):
"""Setting up handler class for tensorrt exported DetectNet_v2 model.
Args:
target_classes (list): List of target classes the model will detect.
This is in order of the network output, and therefore must be taken from
the costfunction_config of the spec file.
frameworks (str): The inference backend framework being used.
image_height (int): Vertical dimension which the model will inference the image.
image_width (int): Horizontal dimension at which the model will inference the image.
uff_model (str): Path to the TRT Model uff file.
caffemodel (str): Path to the caffemodel file for exported Caffe model.
prototxt (str): Path to the prototxt file for exported Caffe model.
parser (str): Type of TRT parser to be used.
calib_tensorfile (str): Path to the calibration tensorfile.
n_batches (int): No. of batches to calibrate the network when running on int8 mode.
input_nodes (list): List of input nodes to the graph.
output_nodes (list): List of output nodes in the graph.
max_workspace_size (int): Max size of the TRT workspace to be set (Default: 1GB)
data_type (int): TensorRT backend datatype.
calib_file (str): Path to save the calibration cache file.
trt_engine (str): Path to save the TensorRT engine file.
gpu_set (int): Index of the GPU to be used for inference.
batch_size (int): Number of images per batch at inference.
save_engine (bool): Flag to save optimized TensorRT engine or not.
verbose (bool): Whether or not to log with debug details.
Returns:
Initialized TRTInferencer object.
"""
super(TRTInferencer, self).__init__(target_classes=target_classes,
image_height=image_height,
image_width=image_width,
image_channels=image_channels,
gpu_set=gpu_set,
batch_size=batch_size)
self.framework = framework
self._uff_model = uff_model
self._caffemodel = caffemodel
self._prototxt = prototxt
self._etlt_model = etlt_model
self._etlt_key = etlt_key
self._parser_kind = parser
self._trt_logger = _create_tensorrt_logger(verbose)
self._calib_tensorfile = calib_tensorfile
self.n_batches = n_batches
self.max_workspace_size = max_workspace_size
self._data_type = data_type
self._calib_file = calib_file
self._engine_file = trt_engine
self._save_engine = save_engine
# Initializing variables that will be used in subsequent steps.
self.builder = None
self.calibrator = None
self.network = None
self.context = None
self.runtime = None
self.stream = None
self._set_input_output_nodes()
if self._data_type == "int8":
# Check if the correct file combinations are present. Either a
# tensorfile must be present, or a valid cache file.
check_tensorfile_exists = self._calib_tensorfile is not None and \
os.path.exists(self._calib_tensorfile)
check_int8_cache_exists = self._calib_file is not None and \
os.path.exists(self._calib_file)
error_string = "Either a valid tensorfile must be present or a cache file."
assert check_tensorfile_exists or check_int8_cache_exists, error_string
self.input_dims = (self.num_channels,
self.image_height,
self.image_width)
self.constructed = False
def _set_input_output_nodes(self):
"""Set the input output nodes in the TensorRTInferencer."""
self.input_node = ["input_1"]
if self._parser_kind == "caffe":
self.output_nodes = ["output_bbox", "output_cov/Sigmoid"]
elif self._parser_kind in ["uff", "etlt"]:
self.output_nodes = ["output_bbox/BiasAdd", "output_cov/Sigmoid"]
else:
raise NotImplementedError("Parser kind not supported.")
def _platform_compatibility_check(self):
"""Check for builder compatibility.
Return:
None:
Raises:
AttributeError: Whether configuration is compatible or not.
"""
if self._dtype == trt.DataType.HALF and not self.builder.platform_has_fast_fp16:
logger.error("Specified FP16 but not supported on platform.")
raise AttributeError("Specified FP16 but not supported on platform.")
if self._dtype == trt.DataType.INT8 and not self.builder.platform_has_fast_int8:
logger.error("Specified INT8 but not supported on platform.")
raise AttributeError("Specified INT8 but not supported on platform.")
if self._dtype == trt.DataType.INT8 and self.calibrator is None:
logger.error("Specified INT8 but no calibrator provided.")
raise AttributeError("Specified INT8 but no calibrator provided.")
def _parse_caffe_model(self):
"""Simple function to parse a caffe model.
Args:
None.
Returns
None.
Raises:
Assertion error for network creation.
"""
self.parser = trt.CaffeParser()
assert os.path.isfile(self._caffemodel), "{} not found.".format(self._caffemodel)
assert os.path.isfile(self._prototxt), "{} not found.".format(self._prototxt)
self.blob_name_to_tensor = self.parser.parse(self._prototxt,
self._caffemodel,
self.network,
trt.float32)
_exception_check(self.blob_name_to_tensor,
"Failed to parse caffe model")
# Mark output blobs.
for l in self.output_nodes:
logger.info("Marking {} as output layer".format(l))
t = self.blob_name_to_tensor.find(str(l))
_exception_check(t, "Failed to find output layer")
self.network.mark_output(t)
def _parse_uff_model(self):
"""Simple function to parse a uff model.
Args:
None.
Returns
None.
Raises:
Assertion error for network creation.
"""
self.parser = trt.UffParser()
assert os.path.isfile(self._uff_model), "{} not found.".format(self._uff_model)
# Register input blob
for blob in self.input_node:
self.parser.register_input(blob.encode(), self.input_dims)
# Register the output blobs
for blob in self.output_nodes:
self.parser.register_output(blob.encode())
_exception_check(self.parser.parse(self._uff_model,
self.network,
trt.float32),
"Failed to parse UFF model")
def _parse_etlt_model(self):
"""Simple function to parse an etlt model.
Args:
None.
Returns
None.
Raises:
Assertion error for network creation.
"""
if not os.path.exists(self._etlt_model):
raise ValueError("Cannot find etlt file.")
os_handle, tmp_uff_file = tempfile.mkstemp()
os.close(os_handle)
# Unpack etlt file.
with open(self._etlt_model, "rb") as efile:
num_chars = efile.read(4)
num_chars = struct.unpack("<i", num_chars)[0]
input_node = str(efile.read(num_chars))
with open(tmp_uff_file, "wb") as tfile:
encoding.decode(efile, tfile, self._etlt_key.encode())
self._uff_model = tmp_uff_file
self._input_node = [input_node]
# Parse the decoded UFF file.
self._parse_uff_model()
os.remove(self._uff_model)
logger.debug("Parsed ETLT model file.")
def _set_dtype(self):
"""Simple function to set backend datatype.
Args:
None.
Returns
None.
Raises:
ValueError for unsupported datatype.
"""
if self._data_type == 'int8':
self._dtype = trt.int8
elif self._data_type == 'fp16':
self._dtype = trt.float16
elif self._data_type == 'fp32':
self._dtype = trt.float32
else:
raise ValueError("Unsupported data type: %s" % self._data_type)
def network_init(self):
"""Initializing the keras model and compiling it for inference.
Args:
None
Returns:
No explicit returns. Defines the self.mdl attribute to the intialized
keras model.
"""
# Creating a runtime handler.
self.runtime = trt.Runtime(self._trt_logger)
if not os.path.isfile(self._engine_file):
logger.info("Engine file not found at {}".format(self._engine_file))
logger.info("Using TensorRT to optimize model and generate an engine.")
# Set backend tensorrt data type.
self._set_dtype()
# Instantiate a builder.
self.builder = trt.Builder(self._trt_logger)
self.calibrator = None
# Set up calibrator
if self._data_type == "int8":
logger.info("Initializing int8 calibration table.")
# TODO:<vpraveen> Update to use custom calibrator when the repo
# moves to TRT 5.1.
self.calibrator = Calibrator(self._calib_tensorfile,
self._calib_file,
self.n_batches,
self.batch_size)
# Check if platform is compatible for the configuration of TRT engine
# that will be created.
self._platform_compatibility_check()
# Instantiate the network.
self.network = self.builder.create_network()
builder_config = self.builder.create_builder_config()
# Parse the model using caffe / uff parser.
if self._parser_kind == "caffe":
self._parse_caffe_model()
elif self._parser_kind == "uff":
self._parse_uff_model()
elif self._parser_kind == "etlt":
self._parse_etlt_model()
else:
raise NotImplementedError("{} parser is not supported".format(self._parser_kind))
# set context information batch size and workspace for trt backend.
self.builder.max_batch_size = self.batch_size
builder_config.max_workspace_size = self.max_workspace_size
# Set fp16 or int 8 mode based on inference.
if self._dtype == trt.float16:
builder_config.set_flag(trt.BuilderFlag.FP16)
# Setting the engine builder to create int8 engine and calibrate the
# graph.
if self._dtype == trt.int8:
logger.debug("Setting trt calibrator")
builder_config.set_flag(trt.BuilderFlag.INT8)
builder_config.int8_calibrator = self.calibrator
# Sometimes TensorRT may choose non int8 implementations of
# layers for discrete Volta GPU setup since Volta GPU's don't
# have tensor core. Therefore it may be best to force the build
# restrictions, to choose int8 kernels for GPU's without
# int8 tensor cores.
builder_config.set_flag(trt.BuilderFlag.STRICT_TYPES)
# Build tensorrt engine.
self.engine = self.builder.build_engine(self.network, builder_config)
logger.debug("Number of bindings {}".format(self.engine.num_bindings))
logger.debug("TensorRT engine built")
# Serialize and save the tensorrt engine for future use.
if self._save_engine:
logger.info("Saving engine to {} for further use".format(self._engine_file))
with open(self._engine_file, "wb") as ef:
ef.write(self.engine.serialize())
ef.closed
del self.builder
del self.network
else:
# Reading from a pre serialized engine file if one exists.
logger.info("Reading from engine file at: {}".format(self._engine_file))
with open(self._engine_file, "rb") as ef:
self.engine = self.runtime.deserialize_cuda_engine(ef.read())
ef.closed
# Create an execution context to enqueue operations to.
self.context = self.engine.create_execution_context()
logger.debug("Generated TRT execution context.")
# Create pycuda execution stream.
self.stream = cuda.Stream()
self.allocate_buffers()
self.constructed = True
def allocate_buffers(self):
"""Simple function to allocate CPU-GPU buffers.
Engine bindings are interated across and memory buffers are allocated based
on the binding dimensions.
Args:
self(TRTInferencer object): all required arguments are class members.
Returns:
No explicit returns.
"""
self.inputs = []
self.outputs = []
self.bindings = []
for binding in range(self.engine.num_bindings):
size = self.engine.get_binding_shape(binding)
npshape = size
binding_name = self.engine.get_binding_name(binding)
logger.debug("Binding name: {}, size: {}".format(binding_name,
trt.volume(size)))
num_elements = trt.volume(size) * self.batch_size
dtype = trt.nptype(self.engine.get_binding_dtype(binding))
host_mem = cuda.pagelocked_empty(num_elements, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
self.bindings.append(int(device_mem))
if self.engine.binding_is_input(binding):
self.inputs.append(HostDeviceMem(host_mem, device_mem, binding_name, npshape))
else:
self.outputs.append(HostDeviceMem(host_mem, device_mem, binding_name, npshape))
def infer_batch(self, chunk):
"""Function to infer a batch of images using trained keras model.
Args:
chunk (array): list of images in the batch to infer.
Returns:
infer_out: raw_predictions from model.predict.
resized: resized size of the batch.
"""
if not self.constructed:
raise ValueError("Cannot run inference. Run Inferencer.network_init() first.")
infer_shape = (self.batch_size,) + (self.num_channels, self.image_height, self.image_width)
infer_input = np.zeros(infer_shape)
# Prepare image batches.
logger.debug("Inferring images")
for idx, image in enumerate(chunk):
input_image, resized = self.input_preprocessing(image)
infer_input[idx, :, :, :] = input_image
# Infer on image batches.
logger.debug("Number of input blobs {}".format(len(self.inputs)))
# copy buffers to GPU.
np.copyto(self.inputs[0].host, infer_input.ravel())
for inp in self.inputs:
cuda.memcpy_htod_async(inp.device, inp.host, self.stream)
# Enqueue inference context.
self.context.execute_async(stream_handle=self.stream.handle,
bindings=self.bindings,
batch_size=self.batch_size)
# Copy inference back from the GPU to host.
for out in self.outputs:
cuda.memcpy_dtoh_async(out.host, out.device, self.stream)
# Sychronize cuda stream events.
self.stream.synchronize()
output = self.get_reshaped_outputs()
infer_dict = self.predictions_to_dict(output)
logger.debug("Inferred_outputs: {}".format(len(output)))
infer_out = self.keras_output_map(infer_dict)
return infer_out, resized
def get_reshaped_outputs(self):
"""Function to collate outputs and get results in NCHW formatself.
Args:
self(TRTInferencer object): all required arguments are class members.
Returns:
output (list): list of reshaped np arrays
"""
# Collate results.
output = [out.host for out in self.outputs]
logger.debug("Number of outputs: {}".format(len(output)))
for idx, out in enumerate(output):
logger.debug("Output shape: {}, {}".format(out.shape,
self.outputs[idx].numpy_shape))
out_shape = (self.batch_size,) + tuple(self.outputs[idx].numpy_shape)
output[idx] = np.reshape(output[idx], out_shape)
logger.debug("Coverage blob shape: {}".format(output[0].shape))
return output
def clear_buffers(self):
"""Simple function to free input, output buffers allocated earlier.
Args:
No explicit arguments. Inputs and outputs are member variables.
Returns:
No explicit returns.
Raises:
ValueError if buffers not found.
"""
# Loop through inputs and free inputs.
logger.info("Clearing input buffers.")
for inp in self.inputs:
inp.device.free()
# Loop through outputs and free them.
logger.info("Clearing output buffers.")
for out in self.outputs:
out.device.free()
def clear_trt_session(self):
"""Simple function to free destroy tensorrt handlers.
Args:
No explicit arguments. Destroys context, runtime and engine.
Returns:
No explicit returns.
Raises:
ValueError if buffers not found.
"""
if self.runtime:
logger.info("Clearing tensorrt runtime.")
del self.runtime
if self.context:
logger.info("Clearing tensorrt context.")
del self.context
if self.engine:
logger.info("Clearing tensorrt engine.")
del self.engine
del self.stream
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/inferencer/trt_inferencer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple inference handler for TLT trained gridbox models serialized to TRT."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import logging
import tempfile
import traceback
import struct
import numpy as np
import keras
from keras import backend as K
import tensorflow as tf
from PIL import Image
import pytest
import pycuda.autoinit
import pycuda.driver as cuda
from nvidia_tao_tf1.core.export._uff import keras_to_uff
from nvidia_tao_tf1.cv.detectnet_v2.cost_function.cost_function_parameters import get_target_class_names
from nvidia_tao_tf1.cv.detectnet_v2.inferencer.build_inferencer import build_inferencer
from nvidia_tao_tf1.cv.detectnet_v2.model.build_model import build_model
from nvidia_tao_tf1.cv.detectnet_v2.spec_handler.spec_loader import load_experiment_spec
from nvidia_tao_tf1.encoding import encoding
logger = logging.getLogger(__name__)
# Todo: <vpraveen> Use GB Feature extractor constructor to construct GB model and export
# to TRT serializable format for inference
detectnet_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
caffe_inferencer_spec = os.path.join(detectnet_root,
"experiment_specs/inferencer_spec_caffe.prototxt")
etlt_inferencer_spec = os.path.join(detectnet_root,
"experiment_specs/inferencer_spec_etlt.prototxt")
training_spec = os.path.join(detectnet_root,
"experiment_specs/default_spec.txt")
DEFAULT_MAX_WORKSPACE_SIZE = 1 << 30
ENC_KEY = 'tlt_encode'
topologies = [(etlt_inferencer_spec, 1, True, "resnet", 18, (3, 544, 960)),
(etlt_inferencer_spec, 1, False, "resnet", 10, (3, 544, 960)),
(etlt_inferencer_spec, 1, False, "vgg", 16, (3, 544, 960)),
(etlt_inferencer_spec, 1, False, "efficientnet_b0", 16, (3, 544, 960))]
# Restricting the number of GPU's to be used by tensorflow to 0.
gpu_options = tf.compat.v1.GPUOptions(
per_process_gpu_memory_fraction=0.33,
allow_growth=True
)
device_count = {'GPU': 0, 'CPU': 1}
config = tf.compat.v1.ConfigProto(
gpu_options=gpu_options,
device_count=device_count
)
K.set_session(tf.Session(config=config))
def get_gridbox_tlt_model(arch, num_layers=None, input_shape=(3, 544, 960)):
"""Simple function to generate a TLT model."""
experiment_spec = load_experiment_spec(training_spec)
if hasattr(experiment_spec, "model_config"):
model_config = experiment_spec.model_config
else:
raise ValueError("Invalid spec file without model_config at {}".format(training_spec))
if hasattr(experiment_spec, "cost_function_config"):
cost_function_config = experiment_spec.cost_function_config
else:
raise ValueError("Invalid spec without costfunction config at {}".format(training_spec))
target_class_names = get_target_class_names(cost_function_config)
model_config.arch = arch
model_config.num_layers = num_layers
gridbox_model = build_model(model_config, target_class_names)
gridbox_model.construct_model(input_shape=input_shape,
kernel_regularizer=None,
bias_regularizer=None,
pretrained_weights_file=None,
enc_key=ENC_KEY)
return gridbox_model
def convert_to_tlt(model,
output_node_names="output_bbox/BiasAdd,output_cov/Sigmoid"):
"""Simple function to generate etlt file from tlt file."""
os_handle, tmp_uff_file_name = tempfile.mkstemp()
os.close(os_handle)
os_handler, tmp_etlt_file_name = tempfile.mkstemp()
os.close(os_handle)
# Convert keras to uff
output_node_names = output_node_names.split(',')
in_tensor_name, out_tensor_names, _ = keras_to_uff(model,
tmp_uff_file_name,
output_node_names=output_node_names)
# We only support models with a single input tensor.
if isinstance(in_tensor_name, list):
in_tensor_name = in_tensor_name[0]
K.clear_session()
# Encode temporary uff to output file
with open(tmp_uff_file_name, "rb") as open_temp_file, open(tmp_etlt_file_name,
"wb") as open_encoded_file:
open_encoded_file.write(struct.pack("<i", len(in_tensor_name)))
open_encoded_file.write(in_tensor_name.encode())
encoding.encode(open_temp_file, open_encoded_file, ENC_KEY)
os.remove(tmp_uff_file_name)
return tmp_etlt_file_name
def get_inferencer_input(input_shape):
"""Simple function to get an input array.
Args:
input_shape (tuple): shape of the input array.
Return:
pil_input (pil.Image): A pil image object.
"""
c = input_shape[0]
h = input_shape[1]
w = input_shape[2]
np_input = np.random.random((h, w, c)) * 255
pil_input = Image.fromarray(np_input.astype(np.uint8))
return pil_input
def check_output(keras_output, trt_output, dtype='fp32', parser="caffe"):
"""Check keras and tensorrt inputs."""
assert len(keras_output.keys()) == len(trt_output.keys())
# ToDo <vpraveen> Check for output nodes of TensorRT and fine corresponding
# Uff nodes that do match.
if dtype == "fp32":
for tclass in list(keras_output.keys()):
np.array_equal(keras_output[tclass]['cov'],
trt_output[tclass]['cov'])
np.array_equal(keras_output[tclass]['bbox'],
trt_output[tclass]['bbox'])
def get_keras_inferences(input_chunk, trt_inferencer, keras_model_path):
"""Get keras inferences for the current model.
Args:
input_chunk (list): list of PIL.Image objects to run inference on.
trt_inferencer (nvidia_tao_tf1.cv.gridbox.inferencer.TRTInferencer): TRTInferencer object
to run.
Returns:
"""
# Setting up inputs.
input_shape = (3, trt_inferencer.image_height,
trt_inferencer.image_width)
batch_size = len(input_chunk)
infer_shape = (batch_size, ) + input_shape
infer_input = np.zeros(infer_shape)
assert os.path.exists(keras_model_path)
keras_model = keras.models.load_model(keras_model_path, compile=False)
graph = tf.get_default_graph()
# preprocessing inputs
for idx, image in enumerate(input_chunk):
input_image, resized = trt_inferencer.input_preprocessing(image)
infer_input[idx, :, :, :] = input_image
# Inferring on the keras model.
with graph.as_default():
output = keras_model.predict(infer_input, batch_size=batch_size)
infer_dict = trt_inferencer.predictions_to_dict(output)
infer_out = trt_inferencer.keras_output_map(infer_dict)
return infer_out, resized
def set_logger(verbose=False):
info_level = 'INFO'
if verbose:
info_level = 'DEBUG'
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level=info_level)
def prepare_test_input(arch, num_layers, input_shape):
gridbox_model = get_gridbox_tlt_model(arch, num_layers=num_layers, input_shape=input_shape)
tlt_model = gridbox_model.keras_model
os_handle, tmp_tlt_path = tempfile.mkstemp()
os.close(os_handle)
tlt_model.save(tmp_tlt_path)
etlt_model_path = convert_to_tlt(tlt_model)
return tmp_tlt_path, etlt_model_path
@pytest.mark.skipif(os.getenv("RUN_ON_CI", "0") == "1", reason="Cannot be run on CI")
@pytest.mark.parametrize("spec, batch_size, save_engine, arch, num_layers, input_shape", topologies)
def test_trt_inferencer(spec,
batch_size,
save_engine,
arch,
num_layers,
input_shape,
gpu_set=0):
"""Simple function to test trt inferencer engine.
This function creates reads in a model template, creates and instance of TRT-inferencer,
generates a TRT engine and then runs the model.
Args:
spec (string): Path to an inferencer spec file.
batch_size (int): Number of images per batch of inference.
save_engine (bool): Flag to save engine.
gpu_set (int): Gpu device id to run inference under.
arch (str): The architecture of the model under test.
num_layers (int): Depth of the network if scalable.
input_shape (tuple(ints)): Shape of the input in (C, H, W) format.
Return:
No explicit returns.
"""
verbose = False
n_batches = 1
set_logger(verbose)
tlt_model_path, etlt_model_path = prepare_test_input(arch, num_layers, input_shape)
inference_spec = load_experiment_spec(spec, merge_from_default=False, validation_schema="inference")
if hasattr(inference_spec, 'inferencer_config'):
inferencer_config = inference_spec.inferencer_config
else:
raise ValueError("Invalid spec file provided at {}".format(spec))
inferencer_config.tensorrt_config.etlt_model = etlt_model_path
inferencer_config.image_height = input_shape[1]
inferencer_config.image_width = input_shape[2]
inferencer_config.batch_size = batch_size
# Setup trt inferencer based on the test case topology.
_, trt_inferencer = build_inferencer(inf_config=inferencer_config,
verbose=True,
key=ENC_KEY)
# Generate random inputs.
infer_chunk = []
for idx in range(batch_size):
infer_chunk.append(get_inferencer_input(input_shape))
# Generating inference for the keras model.
keras_output, resized = get_keras_inferences(infer_chunk, trt_inferencer, tlt_model_path)
K.clear_session()
# Setup trt session and allocate buffers.
trt_inferencer.network_init()
trt_engine = trt_inferencer._engine_file
# Run inference using TRT inferencer.
trt_output, resized = trt_inferencer.infer_batch(infer_chunk)
check_output(keras_output, trt_output, parser='etlt')
if os.path.isfile(trt_engine):
os.remove(trt_engine)
# Free up session and buffers.
trt_inferencer.clear_buffers()
trt_inferencer.clear_trt_session()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/inferencer/tests/test_trt_inferencer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class to export trained .tlt models to etlt file."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
try:
import tensorrt as trt # noqa pylint: disable=W0611 pylint: disable=W0611
from nvidia_tao_tf1.cv.common.export.tensorfile_calibrator import TensorfileCalibrator
from nvidia_tao_tf1.cv.detectnet_v2.export.detectnet_calibrator import DetectNetCalibrator
except Exception as e:
logger = logging.getLogger(__name__)
logger.warning(
"Failed to import TensorRT package, exporting TLT to a TensorRT engine "
"will not be available."
)
from nvidia_tao_tf1.cv.common.export.keras_exporter import KerasExporter as Exporter
from nvidia_tao_tf1.cv.detectnet_v2.spec_handler.spec_loader import load_experiment_spec
logger = logging.getLogger(__name__)
CUSTOM_OBJS = None
class DetectNetExporter(Exporter):
"""Define an exporter for trained DetectNet_v2 models."""
def __init__(self, model_path=None,
key=None,
data_type="fp32",
strict_type=False,
experiment_spec_path=None,
backend="uff",
data_format="channels_first",
onnx_route="keras2onnx",
**kwargs):
"""Initialize the DetectNet_v2 exporter.
Args:
model_path (str): Path to the model file.
key (str): Key to load the model.
data_type (str): Path to the TensorRT backend data type.
strict_type(bool): Apply TensorRT strict_type_constraints or not for INT8 mode.
backend (str): TensorRT parser to be used.
experiment_spec_path (str): Path to the experiment spec file.
data_format (str): Format of the input_channels.
Returns:
None.
"""
super(DetectNetExporter, self).__init__(model_path=model_path,
key=key,
data_type=data_type,
strict_type=strict_type,
backend=backend,
data_format=data_format,
onnx_route=onnx_route,
**kwargs)
if experiment_spec_path is not None:
assert os.path.exists(experiment_spec_path), (
"Experiment spec file is not found at: {}",
format(experiment_spec_path)
)
self.experiment_spec = load_experiment_spec(
spec_path=experiment_spec_path,
merge_from_default=False,
validation_schema="train_val")
def get_class_labels(self):
"""Get list of class labels to serialize to a labels.txt file."""
if self.experiment_spec is None:
raise AttributeError(
"Experiment spec wasn't loaded. To get class labels "
"please provide the experiment spec file using the -e "
"option.")
if not self.experiment_spec.HasField("cost_function_config"):
raise AttributeError(
"cost_function_config not defined in the experiment spec file."
)
cf_config = self.experiment_spec.cost_function_config
target_classes = [
target_class.name for target_class in cf_config.target_classes
]
return target_classes
def set_input_output_node_names(self):
"""Set input output node names."""
self.output_node_names = ["output_cov/Sigmoid", "output_bbox/BiasAdd"]
self.input_node_names = ["input_1"]
def set_data_preprocessing_parameters(self, input_dims, image_mean=None):
"""Set data pre-processing parameters for the int8 calibration."""
logger.debug("Input dimensions: {}".format(input_dims))
num_channels = input_dims[0]
scale = 1.0/255.0
if num_channels == 3:
means = [0., 0., 0.]
elif num_channels == 1:
means = [0]
else:
raise NotImplementedError("Invalid number of dimensions {}.".format(num_channels))
self.preprocessing_arguments = {"scale": scale,
"means": means,
"flip_channel": False}
def get_calibrator(self,
calibration_cache,
data_file_name,
n_batches,
batch_size,
input_dims,
calibration_images_dir=None,
image_mean=None):
"""Simple function to get an int8 calibrator.
Args:
calibration_cache (str): Path to store the int8 calibration cache file.
data_file_name (str): Path to the TensorFile. If the tensorfile doesn't exist
at this path, then one is created with either n_batches of random tensors,
images from the file in calibration_images_dir of dimensions
(batch_size,) + (input_dims)
n_batches (int): Number of batches to calibrate the model over.
batch_size (int): Number of input tensors per batch.
input_dims (tuple): Tuple of input tensor dimensions in CHW order.
calibration_images_dir (str): Path to a directory of images to generate the
data_file from.
image_mean (tuple): Pixel mean for channel-wise mean subtraction.
Returns:
calibrator (nvidia_tao_tf1.cv.common.export.base_calibrator.TensorfileCalibrator):
TRTEntropyCalibrator2 instance to calibrate the TensorRT engine.
"""
if self.experiment_spec is not None:
# Get calibrator based on the detectnet dataloader.
calibrator = DetectNetCalibrator(
self.experiment_spec,
calibration_cache,
n_batches,
batch_size)
else:
if not os.path.exists(data_file_name):
self.generate_tensor_file(data_file_name,
calibration_images_dir,
input_dims,
n_batches=n_batches,
batch_size=batch_size)
calibrator = TensorfileCalibrator(data_file_name,
calibration_cache,
n_batches,
batch_size)
return calibrator
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/export/exporter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to export a trained DetectNet_v2 model to an ETLT file for deployment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/export/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DetectNet_v2 calibrator class for TensorRT INT8 Calibration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import numpy as np
import pycuda.autoinit # noqa pylint: disable=unused-import
import pycuda.driver as cuda
import tensorflow as tf
# Simple helper class for calibration.
from nvidia_tao_tf1.cv.common.export.base_calibrator import BaseCalibrator
from nvidia_tao_tf1.cv.detectnet_v2.common.graph import get_init_ops
# Import Detectnet_v2 dataloader.
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.build_dataloader import build_dataloader
logger = logging.getLogger(__name__)
class DetectNetCalibrator(BaseCalibrator):
"""Detectnet_v2 calibrator class."""
def __init__(self, experiment_spec, cache_filename,
n_batches, batch_size,
*args, **kwargs):
"""Init routine.
This inherits from ``nvidia_tao_tf1.cv.common.export.base_calibrator.BaseCalibrator``
to implement the calibration interface that TensorRT needs to
calibrate the INT8 quantization factors. The data source here is assumed
to be the data tensors that are yielded from the DetectNet_v2 dataloader.
Args:
data_filename (str): ``TensorFile`` data file to use.
cache_filename (str): name of calibration file to read/write to.
n_batches (int): number of batches for calibrate for.
batch_size (int): batch size to use for calibration (this must be
smaller or equal to the batch size of the provided data).
"""
super(DetectNetCalibrator, self).__init__(
cache_filename,
n_batches, batch_size,
*args, **kwargs
)
self._data_source = None
# Instantiate the dataloader.
self.instantiate_data_source(experiment_spec)
# Configure tensorflow before running tensorrt.
self.set_session()
def set_session(self):
"""Simple function to set the tensorflow session."""
# Setting this to minimize the default allocation at import.
gpu_options = tf.compat.v1.GPUOptions(
per_process_gpu_memory_fraction=0.33,
allow_growth=True)
# Configuring tensorflow to use CPU so that is doesn't interfere
# with tensorrt.
device_count = {'GPU': 0, 'CPU': 1}
session_config = tf.compat.v1.ConfigProto(
gpu_options=gpu_options,
device_count=device_count
)
self.session = tf.compat.v1.Session(
config=session_config,
graph=tf.get_default_graph()
)
self.session.run(get_init_ops())
def instantiate_data_source(self, experiment_spec):
"""Simple function to instantiate the data_source of the dataloader.
Args:
experiment_spec (nvidia_tao_tf1.cv.detectnet_v2.proto.experiment_pb2): Detectnet_v2
experiment spec proto object.
Returns:
No explicit returns.
"""
if not (hasattr(experiment_spec, 'dataset_config') or
hasattr(experiment_spec, 'augmentation_config')):
raise ValueError(
"Experiment spec doesnt' have dataset_config or "
"augmentation config. Please make sure the dataset_config "
"and augmentation config are both present in the experiment_spec "
"file provided.")
dataset_config = experiment_spec.dataset_config
augmentation_config = experiment_spec.augmentation_config
dataloader = build_dataloader(dataset_config, augmentation_config)
self._data_source, _, num_samples = dataloader.get_dataset_tensors(
self._batch_size,
training=True,
enable_augmentation=False
)
logger.info("Number of samples from the dataloader: {}".format(num_samples))
def get_data_from_source(self):
"""Simple function to get data from the defined data_source."""
batch = self.session.run(self._data_source)
if batch is None:
raise ValueError(
"Batch wasn't yielded from the data source. You may have run "
"out of batches. Please set the num batches accordingly")
return batch
def get_batch(self, names):
"""Return one batch.
Args:
names (list): list of memory bindings names.
"""
if self._batch_count < self._n_batches:
batch = self.get_data_from_source()
if batch is not None:
if self._data_mem is None:
# 4 bytes per float32.
self._data_mem = cuda.mem_alloc(batch.size * 4)
self._batch_count += 1
# Transfer input data to device.
cuda.memcpy_htod(self._data_mem, np.ascontiguousarray(
batch, dtype=np.float32))
return [int(self._data_mem)]
if self._batch_count >= self._n_batches:
self.session.close()
tf.reset_default_graph()
if self._data_mem is not None:
self._data_mem.free()
return None
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/export/detectnet_calibrator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test detectnet exporter to generate model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import tempfile
from keras import backend as K
import pytest
import tensorflow as tf
from nvidia_tao_tf1.cv.common.export.app import (
DEFAULT_MAX_BATCH_SIZE,
DEFAULT_MAX_WORKSPACE_SIZE,
run_export
)
from nvidia_tao_tf1.cv.detectnet_v2.cost_function.cost_function_parameters import (
get_target_class_names
)
from nvidia_tao_tf1.cv.detectnet_v2.export.exporter import DetectNetExporter as Exporter
from nvidia_tao_tf1.cv.detectnet_v2.model.build_model import build_model
from nvidia_tao_tf1.cv.detectnet_v2.spec_handler.spec_loader import load_experiment_spec
# Todo: <vpraveen> Use GB Feature extractor constructor to construct GB model and export
# to TRT serializable format for inference
detectnet_root = os.path.dirname(os.path.dirname(
os.path.dirname(os.path.realpath(__file__))))
training_spec = os.path.join(detectnet_root,
"experiment_specs/default_spec.txt")
ENC_KEY = 'tlt_encode'
logging.basicConfig(format='%(asctime)s [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
# Restricting the number of GPU's to be used.
gpu_options = tf.compat.v1.GPUOptions(
per_process_gpu_memory_fraction=0.33,
allow_growth=True
)
device_count = {'GPU': 0, 'CPU': 1}
config = tf.compat.v1.ConfigProto(
gpu_options=gpu_options,
device_count=device_count
)
K.set_session(tf.Session(config=config))
topologies = [
("resnet", 18, 'channels_first', 16, "fp32", (3, 960, 544), False, False, False, "uff"),
# ("resnet", 10, 'channels_first', 2, "int8", (3, 480, 272), True, False, False, "onnx"),
# ("vgg", 16, 'channels_first', 2, "int8", (3, 480, 272), True, True, True, "onnx"),
("resnet", 18, 'channels_first', 8, "int8", (3, 480, 272), False, False, False, "uff"),
("resnet", 10, 'channels_first', 8, "int8", (3, 480, 272), False, True, False, "uff"),
("efficientnet_b0", 10, 'channels_first', 8, "int8", (3, 272, 480), False, True, False, "onnx"),
("efficientnet_b0", 10, 'channels_first', 8, "int8", (3, 272, 480), False, True, False, "uff"),
]
def get_tmp_file(suffix=None):
"""Simple wrapper to get a temp file with a suffix.
Args:
suffix (str): String suffix to end the temp file path.
Return:
tmpfile_path (str): Path to the tmp file.
"""
os_handle, temp_file = tempfile.mkstemp(suffix=suffix)
os.close(os_handle)
os.unlink(temp_file)
return temp_file
class TestDetectnetExporter(object):
"""Class to test DetectNet exporter."""
def _setup_gridbox_model_instance(self, enable_qat):
"""Simple function to generate a test bench for TRT Export."""
experiment_spec = load_experiment_spec(training_spec, validation_schema="train_val")
if hasattr(experiment_spec, "model_config"):
model_config = experiment_spec.model_config
else:
raise ValueError(
"Invalid spec file without model_config at {}".format(training_spec))
if hasattr(experiment_spec, "cost_function_config"):
cost_function_config = experiment_spec.cost_function_config
else:
raise ValueError(
"Invalid spec without costfunction config at {}".format(training_spec))
target_class_names = get_target_class_names(cost_function_config)
self.gridbox_model = build_model(model_config, target_class_names, enable_qat=enable_qat)
def _generate_keras_model(self, arch, num_layers, input_shape):
"""Simple function to construct a detectnet_v2 model."""
self.gridbox_model.template = arch
self.gridbox_model.num_layers = num_layers
self.gridbox_model.construct_model(input_shape=input_shape,
kernel_regularizer=None,
bias_regularizer=None,
pretrained_weights_file=None,
enc_key=ENC_KEY)
keras_model_file = get_tmp_file(suffix=".hdf5")
# save keras model to a temp file.
self.gridbox_model.save_model(keras_model_file, enc_key=ENC_KEY)
self.gridbox_model.keras_model.summary()
tf.reset_default_graph()
K.clear_session()
return keras_model_file
def _common(self, exporter_args, backend):
"""Simple function to run common exporter test routines.
Args:
exporter_args (dict): Arguments of exporter.
Returns:
No explicit returns
"""
run_export(Exporter, exporter_args, backend=backend)
output_file = exporter_args["output_file"]
calibration_cache_file = exporter_args["cal_cache_file"]
data_type = exporter_args["data_type"]
engine_file = exporter_args["engine_file"]
gen_ds_config = exporter_args["gen_ds_config"]
# Check if etlt file was written
assert os.path.isfile(output_file), (
"etlt file was not written."
)
assert os.path.isfile(engine_file), (
"Engine file was not generated."
)
# Check if int8 calibration file was written.
if data_type == "int8":
assert os.path.isfile(calibration_cache_file), (
"Calibration cache file wasn't written."
)
if gen_ds_config:
output_root = os.path.dirname(output_file)
output_ds_file = os.path.join(output_root, "nvinfer_config.txt")
assert os.path.isfile(output_ds_file), (
"DS config file wasn't generated"
)
def clear_previous_files(self):
"""Clear previously generated files."""
removable_extensions = [
".tlt", ".json", ".etlt", ".bin",
".trt", ".json", ".txt", ".onnx",
".uff", ".hdf5"
]
for item in os.listdir("/tmp"):
for ext in removable_extensions:
if item.endswith(ext):
os.remove(os.path.join("/tmp", item))
@pytest.mark.skipif(os.getenv("RUN_ON_CI", "0") == "1", reason="Cannot be run on CI")
@pytest.mark.parametrize(
"arch, num_layers, data_format, batch_size, data_type, input_shape, enable_qat, from_spec, gen_ds_config, backend", # noqa: E501
topologies
)
def test_detectnet_v2_exporter(self,
arch,
num_layers,
data_format,
batch_size,
data_type,
input_shape,
enable_qat,
from_spec,
gen_ds_config,
backend):
"""Simple function to test the DetectNet_v2 exporter."""
# Parsing command line arguments.
self._setup_gridbox_model_instance(enable_qat)
model_path = self._generate_keras_model(arch,
num_layers,
input_shape)
cal_cache_file = get_tmp_file(suffix=".bin")
output_file = get_tmp_file(suffix=f".{backend}")
engine_file = get_tmp_file(suffix=".trt")
tensorfile_path = get_tmp_file(suffix=".tensorfile")
cal_json_file = ""
if enable_qat:
cal_json_file = get_tmp_file(suffix=".json")
exporter_args = {
'model': model_path,
'export_module': "detectnet_v2",
'key': ENC_KEY,
"cal_cache_file": cal_cache_file,
"cal_image_dir": "",
"cal_data_file": tensorfile_path,
"batch_size": batch_size,
"batches": 2,
"data_type": data_type,
"output_file": output_file,
"max_workspace_size": DEFAULT_MAX_WORKSPACE_SIZE,
"max_batch_size": DEFAULT_MAX_BATCH_SIZE,
"verbose": False,
"engine_file": engine_file,
"strict_type_constraints": True,
"static_batch_size": -1,
"force_ptq": False,
"gen_ds_config": gen_ds_config,
"min_batch_size": batch_size,
"opt_batch_size": batch_size,
"target_opset": 12,
"cal_json_file": cal_json_file
}
if backend == "onnx":
exporter_args["onnx_route"] = "tf2onnx"
# Choose whether to calibrate from the spec file or not.
if from_spec:
exporter_args["experiment_spec"] = training_spec
else:
exporter_args["experiment_spec"] = None
try:
self._common(exporter_args, backend)
except AssertionError:
raise AssertionError("Exporter failed.")
finally:
self.clear_previous_files()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/export/tests/test_exporter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test detectnet exporter to generate model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import numpy as np
import pycuda.autoinit # noqa pylint: disable=unused-import
import pycuda.driver as cuda
import pytest
import tensorrt as trt
from nvidia_tao_tf1.cv.detectnet_v2.export.detectnet_calibrator import DetectNetCalibrator
from nvidia_tao_tf1.cv.detectnet_v2.inferencer.utilities import HostDeviceMem
from nvidia_tao_tf1.cv.detectnet_v2.spec_handler.spec_loader import load_experiment_spec
detectnet_root = os.path.dirname(os.path.dirname(
os.path.dirname(os.path.realpath(__file__))))
training_spec = os.path.join(detectnet_root,
"experiment_specs/default_spec.txt")
topologies = [
(1, 2), # case 1.
(10, 6)
]
class TestCalibrator(object):
"""Simple class to test the int8 calibrator."""
def _setup_calibrator_instance(self, n_batches, batch_size):
"""Simple function to instantiate a Detectnetv2 calibrator."""
self._experiment_spec = load_experiment_spec(
training_spec,
merge_from_default=False,
validation_schema="train_val")
os_handle, calibration_cachefile = tempfile.mkstemp(suffix=".bin")
os.close(os_handle)
self.calibrator = DetectNetCalibrator(
self._experiment_spec,
calibration_cachefile,
n_batches,
batch_size)
self._batch_count = 0
def allocate_io_memory(self, n_batches, batch_size):
assert hasattr(self._experiment_spec, "augmentation_config"), (
"Augmentation config is required to get the data shape."
)
preprocessing = self._experiment_spec.augmentation_config.preprocessing
input_shape = (
batch_size,
preprocessing.output_image_height,
preprocessing.output_image_width,
preprocessing.output_image_channel
)
num_elements = input_shape[0] * input_shape[1] * \
input_shape[2] * input_shape[3]
# Set up array to receive tf data.
self.tf_data = cuda.pagelocked_empty(
num_elements, trt.nptype(trt.float32)
)
# Set up arrays to mimic tensorrt data in the GPU.
host_mem = cuda.pagelocked_empty(
num_elements, trt.nptype(trt.float32)
)
device_mem = cuda.mem_alloc(host_mem.nbytes)
self.tensorrt_data = HostDeviceMem(
host_mem, device_mem,
"tensorrt_input", input_shape)
def common(self, n_batches):
"""Common function to yield batches and check tensorrt cuda transfer and back."""
while self._batch_count < n_batches:
self._batch_count += 1
batch = self.calibrator.get_data_from_source()
np.copyto(self.tf_data, batch.ravel())
# Copy data from host to device
cuda.memcpy_htod(self.tensorrt_data.device, self.tf_data)
# Copy data from device to host
cuda.memcpy_dtoh(self.tensorrt_data.host, self.tensorrt_data.device)
data_under_test = np.reshape(self.tensorrt_data.host, batch.shape)
assert np.array_equal(batch, data_under_test), (
"The roundtrip from CPU to GPU and back failed."
)
# first test case
@pytest.mark.skipif(os.getenv("RUN_ON_CI", "0") == "1", reason="Cannot be run on CI")
@pytest.mark.parametrize(
"n_batches, batch_size",
topologies
)
def test_calibrator(self,
n_batches,
batch_size):
self._setup_calibrator_instance(n_batches, batch_size)
assert self.calibrator, (
"Calibrator was not created."
)
self.allocate_io_memory(n_batches, batch_size)
try:
self.common(n_batches)
finally:
# Freeing up the GPU memory at the end of the
# test, irrespective of whether it passes or fails.
self.tensorrt_data.device.free()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/export/tests/test_calibrator.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""Base label filter class that defines the interface for label filtering."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nvidia_tao_tf1.cv.detectnet_v2.label_filter.base_label_filter import BaseLabelFilter
class SourceClassLabelFilter(BaseLabelFilter):
"""Label filter that selects only those ground truth objects matching certain names."""
def __init__(self, source_class_names=None):
"""Constructor.
Args:
source_class_names (list of str): Original class names to which this filter will be
applied. If None, the filter is a no-op / not applied.
"""
self.source_class_names = \
set(source_class_names) if source_class_names is not None else None
def is_criterion_satisfied_dict(self, frame_ground_truth_labels):
"""Method that implements the filter criterion as TF.ops.
Selects on ground truth objects whose name is in self.source_class_names.
Args:
frame_ground_truth_labels (dict of Tensors): contains the labels for a single frame.
Returns:
filtered_indices (bool tf.Tensor): follows indexing in <frame_ground_truth_labels> and,
for each element, is True if it satisfies the criterion.
"""
source_classes = frame_ground_truth_labels['target/object_class']
if self.source_class_names is None:
# This means 'pass-through'.
filtered_indices = \
super(SourceClassLabelFilter, self).\
is_criterion_satisfied_dict(frame_ground_truth_labels)
else:
# Initialize to all False.
filtered_indices = tf.zeros_like(source_classes, dtype=tf.bool)
# Now check individual classes.
for object_class_name in self.source_class_names:
filtered_indices = \
tf.logical_or(filtered_indices,
tf.equal(source_classes, tf.constant(object_class_name)))
return filtered_indices
def is_criterion_satisfied_bbox_2d_label(self, ground_truth_labels):
"""Method that implements the filter criterion as TF.ops.
Selects on ground truth objects whose name is in self.source_class_names.
Args:
ground_truth_labels (Bbox2DLabel): Contains the labels for all
frames within a batch.
Returns:
filtered_indices (bool tf.Tensor): follows indexing in <ground_truth_labels> and,
for each element, is True if it satisfies the criterion.
"""
source_classes = ground_truth_labels.object_class
if self.source_class_names is None:
# This means 'pass-through'.
filtered_indices = \
super(SourceClassLabelFilter, self).\
is_criterion_satisfied_bbox_2d_label(ground_truth_labels)
else:
# Initialize to all False.
filtered_indices = tf.zeros_like(
source_classes.values, dtype=tf.bool)
# Now check individual classes.
for object_class_name in self.source_class_names:
filtered_indices = \
tf.logical_or(filtered_indices,
tf.equal(source_classes.values, tf.constant(object_class_name)))
return filtered_indices
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/label_filter/source_class_label_filter.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""Base label filter class that defines the interface for label filtering."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import tensorflow as tf
from nvidia_tao_tf1.blocks.multi_source_loader.types.bbox_2d_label import Bbox2DLabel
class BaseLabelFilter(object):
"""Label filter base class defining the interface for selection / filtering."""
def __init__(self):
"""Constructor."""
def is_criterion_satisfied_bbox_2d_label(self, ground_truth_labels):
"""Method that implements the filter criterion as TF.ops.
The base class's filter is a pass-through layer.
Args:
ground_truth_labels (Bbox2DLabel): Contains the labels for all
frames within a batch.
Returns:
filtered_indices (bool tf.Tensor): follows indexing in <ground_truth_labels> and,
for each element, is True if it satisfies the criterion.
"""
source_classes = ground_truth_labels.object_class
filtered_indices = tf.ones_like(source_classes.values, dtype=tf.bool)
return filtered_indices
def is_criterion_satisfied_dict(self, frame_ground_truth_labels):
"""Method that implements the filter criterion as TF.ops.
The base class's filter is a pass-through layer.
Args:
frame_ground_truth_labels (dict of Tensors): contains the labels for a single frame.
Returns:
filtered_indices (bool tf.Tensor): follows indexing in <frame_ground_truth_labels> and,
for each element, is True if it satisfies the criterion.
"""
filtered_indices = \
tf.ones_like(
frame_ground_truth_labels['target/object_class'], dtype=tf.bool)
return filtered_indices
def is_criterion_satisfied(self, frame_ground_truth_labels):
"""Method that implements the filter criterion as TF.ops.
The base class's filter is a pass-through layer.
Args:
frame_ground_truth_labels (dict of Tensors): contains the labels for a single frame.
Returns:
filtered_indices (bool tf.Tensor): follows indexing in <frame_ground_truth_labels> and,
for each element, is True if it satisfies the criterion.
"""
filtered_indices = None
if isinstance(frame_ground_truth_labels, dict):
filtered_indices = self.is_criterion_satisfied_dict(
frame_ground_truth_labels)
elif isinstance(frame_ground_truth_labels, Bbox2DLabel):
filtered_indices = self.is_criterion_satisfied_bbox_2d_label(
frame_ground_truth_labels)
else:
raise ValueError("Unsupported type.")
return filtered_indices
def filter_labels(ground_truth_labels, indices):
"""Filter ground truth labels according to indices indicating criterion satisfaction.
Args:
frame_ground_truth_labels (dict of Tensors): contains the labels for a single frame.
indices (bool tf.Tensor): follows indexing of <frame_ground_truth_labels> and indicates
for each element whether the criterion has been met.
Returns:
filtered_ground_truth_labels (dict of Tensors): contains the same fields as the input,
but keeps only those examples that satisfy the criterion.
"""
filtered_ground_truth_labels = {}
if isinstance(ground_truth_labels, dict):
filtered_ground_truth_labels = filter_labels_dict(
ground_truth_labels, indices)
elif isinstance(ground_truth_labels, Bbox2DLabel):
filtered_ground_truth_labels = ground_truth_labels.filter(indices)
else:
raise ValueError("Unsupported type.")
return filtered_ground_truth_labels
def filter_labels_dict(frame_ground_truth_labels, indices):
"""Filter ground truth labels according to indices indicating criterion satisfaction.
Args:
frame_ground_truth_labels (dict of Tensors): contains the labels for a single frame.
indices (bool tf.Tensor): follows indexing of <frame_ground_truth_labels> and indicates
for each element whether the criterion has been met.
Returns:
filtered_ground_truth_labels (dict of Tensors): contains the same fields as the input,
but keeps only those examples that satisfy the criterion.
"""
coordinate_mask = None
filtered_ground_truth_labels = dict()
# Filter target features based on the valid indices. Other features are left as is.
for feature_name, feature_tensor in six.iteritems(frame_ground_truth_labels):
# Features which are mapped by index need a gathered mask.
if feature_name.startswith('target/coordinates/'):
coordinate_mask = coordinate_mask if coordinate_mask is not None else \
tf.gather(
indices, frame_ground_truth_labels['target/coordinates/index'])
if feature_name == 'target/coordinates/index':
# Broadcast the boolean mask to the index of all polygons, then ensure that the
# index contains no ordinal greater than its count.
masked_tensor = tf.unique(tf.boolean_mask(
feature_tensor, coordinate_mask))[1]
else:
# Broadcast the boolean mask to the vertices of all polygons.
masked_tensor = tf.boolean_mask(
feature_tensor, coordinate_mask)
filtered_ground_truth_labels[feature_name] = masked_tensor
elif feature_name.startswith('target/'):
# TODO(@williamz): when TF >= 1.5, use the 'axis' kwarg.
filtered_ground_truth_labels[feature_name] = \
tf.boolean_mask(feature_tensor, indices)
else:
filtered_ground_truth_labels[feature_name] = feature_tensor
return filtered_ground_truth_labels
def get_chained_filters_indices(label_filters, frame_ground_truth_labels, mode=None):
"""Helper function that returns the boolean mask of the filters via logical-or or logical-and.
Args:
label_filters (list): Each element is an instance of any of BaseLabelFilter's child classes.
frame_ground_truth_labels (dict of Tensors): contains the labels for a single frame.
mode (str): How to chain all the elements in <label_filters>. Supported modes are 'or'
and 'and'. Note that it is ignored when there is only one item in <label_filters>.
Returns:
filtered_indices (bool tf.Tensor): Follows indexing in <frame_ground_truth_labels> and,
for each element, is True if it satisfies one of the criteria in <label_filters>.
Raises:
AssertionError: If <label_filters> is empty.
ValueError: If <label_filters> has more than 1 filters but mode parameter is not valid.
"""
assert label_filters, "Please provide at least one filter."
# Get a list where each element is a tf.Tensor of bool values for the corresponding entry in
# in <label_filters>.
filtered_indices_list = [
x.is_criterion_satisfied(frame_ground_truth_labels) for x in label_filters]
if len(label_filters) == 1:
# No logical operation needed.
filtered_indices = filtered_indices_list[0]
elif mode == 'or':
# Apply them as a logical-or.
filtered_indices = tf.reduce_any(tf.stack(filtered_indices_list), axis=0)
elif mode == 'and':
# Apply them as a logical-and.
filtered_indices = tf.reduce_all(tf.stack(filtered_indices_list), axis=0)
else:
# When using multiple filters, mode parameter is necessary.
raise ValueError("Mode should be either 'or' or 'and' when filter number > 1.")
return filtered_indices
def apply_label_filters(label_filters, ground_truth_labels, mode=None):
"""Apply multiple label filters via using user-specified mode to labels.
Args:
label_filters (list): Each element is an instance of any of BaseLabelFilter's child classes.
ground_truth_labels (dict of Tensors or Bbox2DLabel):
dict of Tensors: contains the labels for a single frame.
Bbox2DLabel: contains bboxes for all frames in a batch.
mode (str): How to chain all the elements in <label_filters>. Supported modes are 'or'
and 'and'. Note that it is ignored when there is only one item in <label_filters>.
Returns:
filtered_ground_truth_labels (dict of Tensors): Contains the same fields as the input,
but keeps only those examples that satisfy the criterion.
"""
# Get filtered indices.
filtered_indices = get_chained_filters_indices(label_filters, ground_truth_labels, mode)
# Now apply the boolean mask.
filtered_ground_truth_labels = filter_labels(ground_truth_labels, filtered_indices)
return filtered_ground_truth_labels
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/label_filter/base_label_filter.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""Crop label filter. Filters ground truth objects based on cropping area."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nvidia_tao_tf1.cv.detectnet_v2.label_filter.base_label_filter import BaseLabelFilter
class BboxCropLabelFilter(BaseLabelFilter):
"""Filter labels based on crop coordinates and bbox coordinates."""
def __init__(self,
crop_left=None,
crop_right=None,
crop_top=None,
crop_bottom=None):
"""Constructor.
Args:
crop_left/crop_right/crop_top/crop_bottom (int32): crop rectangle coordinates.
Check if the given crop coordinates constitute a valid crop rectangle. If
any of them is None, the filter does not remove any label. If all of them
are 0, the filter does not remove any label.
Raises:
ValueError: if crop_left > crop_right, or crop_top > crop_bottom, raise error.
"""
super(BboxCropLabelFilter, self).__init__()
# Check if the given crop coordinates constitute a valid crop rectangle.
if any(item is None for item in [crop_left, crop_right, crop_top, crop_bottom]):
self._valid_crop = False
elif all(item == 0 for item in [crop_left, crop_right, crop_top, crop_bottom]):
self._valid_crop = False
elif crop_left < crop_right and crop_top < crop_bottom:
self._valid_crop = True
else:
raise ValueError("crop_right/crop_bottom should be greater than crop_left/crop_right.")
self._crop_left = crop_left
self._crop_right = crop_right
self._crop_top = crop_top
self._crop_bottom = crop_bottom
def is_criterion_satisfied_dict(self, frame_ground_truth_labels):
"""Method that implements the filter criterion as TF.ops.
Only keeps those labels whose bounding boxes have overlap with crop region.
Args:
frame_ground_truth_labels (dict of Tensors): contains the labels for a single frame.
Returns:
filtered_indices (bool tf.Tensor): follows indexing in <frame_ground_truth_labels> and,
for each element, is True if it satisfies the criterion.
"""
filtered_indices = \
super(BboxCropLabelFilter, self).\
is_criterion_satisfied_dict(frame_ground_truth_labels)
if self._valid_crop:
crop_left = tf.cast(self._crop_left, tf.float32)
crop_right = tf.cast(self._crop_right, tf.float32)
crop_top = tf.cast(self._crop_top, tf.float32)
crop_bottom = tf.cast(self._crop_bottom, tf.float32)
# Retrieve bbox coordinates.
x1, y1, x2, y2 = tf.unstack(frame_ground_truth_labels['target/bbox_coordinates'],
axis=1)
filtered_indices = \
tf.logical_and(filtered_indices, tf.less(x1, crop_right))
filtered_indices = \
tf.logical_and(filtered_indices, tf.greater(x2, crop_left))
filtered_indices = \
tf.logical_and(filtered_indices, tf.less(y1, crop_bottom))
filtered_indices = \
tf.logical_and(filtered_indices, tf.greater(y2, crop_top))
return filtered_indices
def is_criterion_satisfied_bbox_2d_label(self, ground_truth_labels):
"""Method that implements the filter criterion as TF.ops.
Only keeps those labels whose bounding boxes have overlap with crop region.
Args:
ground_truth_labels (Bbox2DLabel): Contains the labels for all
frames within a batch.
Returns:
filtered_indices (bool tf.Tensor): follows indexing in <ground_truth_labels> and,
for each element, is True if it satisfies the criterion.
"""
filtered_indices = \
super(BboxCropLabelFilter, self).\
is_criterion_satisfied_bbox_2d_label(ground_truth_labels)
if self._valid_crop:
crop_left = tf.cast(self._crop_left, tf.float32)
crop_right = tf.cast(self._crop_right, tf.float32)
crop_top = tf.cast(self._crop_top, tf.float32)
crop_bottom = tf.cast(self._crop_bottom, tf.float32)
# Retrieve bbox coordinates.
x1 = ground_truth_labels.vertices.coordinates.values[::4]
y1 = ground_truth_labels.vertices.coordinates.values[1::4]
x2 = ground_truth_labels.vertices.coordinates.values[2::4]
y2 = ground_truth_labels.vertices.coordinates.values[3::4]
filtered_indices = \
tf.logical_and(filtered_indices, tf.less(x1, crop_right))
filtered_indices = \
tf.logical_and(filtered_indices, tf.greater(x2, crop_left))
filtered_indices = \
tf.logical_and(filtered_indices, tf.less(y1, crop_bottom))
filtered_indices = \
tf.logical_and(filtered_indices, tf.greater(y2, crop_top))
return filtered_indices
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/label_filter/bbox_crop_label_filter.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""Builder for BaseLabelFilter and child classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nvidia_tao_tf1.cv.detectnet_v2.label_filter.bbox_crop_label_filter import BboxCropLabelFilter
from nvidia_tao_tf1.cv.detectnet_v2.label_filter.bbox_dimensions_label_filter import (
BboxDimensionsLabelFilter
)
from nvidia_tao_tf1.cv.detectnet_v2.label_filter.source_class_label_filter import (
SourceClassLabelFilter
)
ONE_OF_KEY = "label_filter_params"
FILTER_MAPPING = {
"bbox_dimensions_label_filter": BboxDimensionsLabelFilter,
"bbox_crop_label_filter": BboxCropLabelFilter,
"source_class_label_filter": SourceClassLabelFilter,
}
def build_label_filter(label_filter_config):
"""Build the necessary filter.
Args:
label_filter_config: nvidia_tao_tf1.cv.detectnet_v2.proto.LabelFilter proto message.
Returns:
label_filter: BaseLabelFilter or child class.
"""
# First, determine what kind of label filter the config pertains to.
one_of_name = label_filter_config.WhichOneof(ONE_OF_KEY)
# Then, extract the required kwargs.
label_filter_kwargs = {
# Here you notice proto_field.name has to be the same as the kwarg for the __init__
# of the <XYZ>LabelFilter. Note that .ListFields() only lists fields in the message
# which are not empty.
proto_field.name: arg_value for proto_field, arg_value in
getattr(label_filter_config, one_of_name).ListFields()
}
# Return appropriate class and kwargs.
return FILTER_MAPPING[one_of_name](**label_filter_kwargs)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/label_filter/build_label_filter.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""Defines functions and classes for filtering labels for DetectNet V2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/label_filter/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test ground truth label filters."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pytest
import tensorflow as tf
from nvidia_tao_tf1.blocks.multi_source_loader import types
import nvidia_tao_tf1.core
from nvidia_tao_tf1.cv.detectnet_v2.label_filter.base_label_filter import apply_label_filters
from nvidia_tao_tf1.cv.detectnet_v2.label_filter.base_label_filter import BaseLabelFilter
from nvidia_tao_tf1.cv.detectnet_v2.label_filter.bbox_crop_label_filter import BboxCropLabelFilter
from nvidia_tao_tf1.cv.detectnet_v2.label_filter.bbox_dimensions_label_filter import (
BboxDimensionsLabelFilter
)
from nvidia_tao_tf1.cv.detectnet_v2.label_filter.source_class_label_filter import (
SourceClassLabelFilter
)
Canvas2D = nvidia_tao_tf1.core.types.Canvas2D
# TODO(@williamz): Similar things appear in dataloader/default_dataloader.py. If we need constants,
# surely there is a better place / way to have them?
OBJECT_CLASS_KEY = 'target/object_class'
def get_dummy_labels(source_class_names, other_attributes=None):
"""Create a set of TF constant variables that will act as labels.
Args:
source_class_names (list of str): List of object class names to be used for the target
labels.
other_attributes (dict): Key value pairs with which to update the label examples. e.g.
{'target/coordinates_x1': ...}
Returns:
frame_ground_truth_labels (dict): Contains a single key 'target/object_class' to which
a tf.constant tensor with the input <source_class_names> is mapped.
"""
frame_ground_truth_labels = dict()
frame_ground_truth_labels[OBJECT_CLASS_KEY] = tf.constant(
source_class_names)
if other_attributes is not None:
for attribute_name, attribute_values in other_attributes.items():
frame_ground_truth_labels[attribute_name] = tf.constant(
attribute_values)
return frame_ground_truth_labels
def _get_bbox_2d_labels():
"""Bbox2DLabel for test preparation."""
frame_indices = [0, 1, 2, 3, 3]
object_class = tf.constant(
['pedestrian', 'unmapped', 'automobile', 'truck', 'truck'])
bbox_coordinates = tf.constant(
[7.0, 6.0, 8.0, 9.0,
2.0, 3.0, 4.0, 5.0,
0.0, 0.0, 3.0, 4.0,
1.2, 3.4, 5.6, 7.8,
4.0, 4.0, 10.0, 10.0])
world_bbox_z = tf.constant([1.0, 2.0, 3.0, -1.0, -2.0])
front = tf.constant([0.5, 1.0, -0.5, -1.0, 0.5])
back = tf.constant([-1.0, 0.0, 0.0, 0.63, -1.0])
canvas_shape = Canvas2D(height=tf.ones([1, 12]), width=tf.ones([1, 12]))
sparse_coordinates = tf.SparseTensor(
values=bbox_coordinates,
dense_shape=[5, 5, 2, 2],
indices=[[f, 0, j, k]
for f in frame_indices
for j in range(2)
for k in range(2)])
sparse_object_class = tf.SparseTensor(
values=object_class,
dense_shape=[5, 5, 1],
indices=[[f, 0, 0]
for f in frame_indices])
sparse_world_bbox_z = tf.SparseTensor(
values=world_bbox_z,
dense_shape=[5, 5, 1],
indices=[[f, 0, 0]
for f in frame_indices])
sparse_front = tf.SparseTensor(
values=front,
dense_shape=[5, 5, 1],
indices=[[f, 0, 0]
for f in frame_indices])
sparse_back = tf.SparseTensor(
values=back,
dense_shape=[5, 5, 1],
indices=[[f, 0, 0]
for f in frame_indices])
source_weight = [tf.constant(2.0, tf.float32)]
# Initialize all fields to empty lists (to signify 'optional' fields).
bbox_2d_label_kwargs = {field_name: []
for field_name in types.Bbox2DLabel._fields}
bbox_2d_label_kwargs.update({
'frame_id': tf.constant('bogus'),
'object_class': sparse_object_class,
'vertices': types.Coordinates2D(
coordinates=sparse_coordinates, canvas_shape=canvas_shape),
'world_bbox_z': sparse_world_bbox_z,
'front': sparse_front,
'back': sparse_back,
'source_weight': source_weight})
return types.Bbox2DLabel(**bbox_2d_label_kwargs)
class TestBaseLabelFilter:
def test_no_op_dict(self):
"""Test that the base filter acts as no-op with default settings."""
base_label_filter = BaseLabelFilter()
original_labels = get_dummy_labels(['class_1', 'class_2', 'class_3'])
filtered_labels = apply_label_filters(
[base_label_filter], original_labels)
with tf.compat.v1.Session() as sess:
for feature in original_labels:
original_feature, filtered_feature = sess.run([original_labels[feature],
filtered_labels[feature]])
assert np.all(original_feature == filtered_feature)
def test_no_op_bbox_2d_label(self):
"""Test that the base filter acts as no-op with default settings with bbox 2d labels."""
base_label_filter = BaseLabelFilter()
original_labels = _get_bbox_2d_labels()
filtered_labels = apply_label_filters(
[base_label_filter], original_labels)
original_source_shape = tf.shape(
input=original_labels.object_class.values)
filtered_source_shape = tf.shape(
input=filtered_labels.object_class.values)
with tf.compat.v1.Session() as sess:
output_original_source_shape = sess.run(original_source_shape)
output_filtered_source_shape = sess.run(filtered_source_shape)
np.testing.assert_equal(
output_original_source_shape, output_filtered_source_shape)
class TestSourceClassLabelFilter:
def test_class_filtering_dict(self):
"""Test that supplying target class names only keeps those labels in the base filter."""
source_class_names = ['class_1', 'class_2', 'class_3', 'class_4']
with tf.compat.v1.Session() as sess:
for i, _ in enumerate(source_class_names):
# Exclude one class.
remaining_class_names = source_class_names[:i] + \
source_class_names[i+1:]
# Get base filter that only applies to the remaining classes.
label_filter = SourceClassLabelFilter(
source_class_names=remaining_class_names)
# Duplicate an entry, for good measure.
remaining_class_names = [
remaining_class_names[0]] + remaining_class_names
# Get dummy labels.
original_labels = get_dummy_labels(
remaining_class_names,
other_attributes={'target/object_class': remaining_class_names})
filtered_labels = apply_label_filters(
[label_filter], original_labels)
filtered_class_labels = sess.run(
[filtered_labels[OBJECT_CLASS_KEY]])[0]
# Check that the filter worked.
assert np.all(remaining_class_names ==
filtered_class_labels.astype(str))
@pytest.mark.parametrize(
"filtered_class_names,exp_filtered_class_names",
[
(['truck', 'automobile'], [b'automobile', b'truck', b'truck']),
(['pedestrian', 'automobile'], [b'pedestrian', b'automobile']),
]
)
def test_class_filtering_bbox_2d_label(self, filtered_class_names, exp_filtered_class_names):
"""Test source class label filter could filter label correctly with bbox 2d labels.
Args:
filtered_class_names (list of str): Source classes to filter.
exp_filtered_class_names (list of str): Expected class names returned
after label filter.
Raises:
AssertionError: If the filtering did not behave as expected.
"""
original_labels = _get_bbox_2d_labels()
label_filter = SourceClassLabelFilter(
source_class_names=filtered_class_names)
filtered_labels = apply_label_filters([label_filter], original_labels)
filtered_source_classes = filtered_labels.object_class.values
with tf.compat.v1.Session() as sess:
output_filtered_source_classes = sess.run(filtered_source_classes)
for i in range(len(exp_filtered_class_names)):
assert output_filtered_source_classes[i] == exp_filtered_class_names[i]
def _get_dummy_bbox_labels(source_class_names, heights, widths, other_attributes=None):
"""Generate some dummy labels with given dimensions.
Args:
source_class_names (list of str): List of object class names for the target labels.
heights (list of float): Follows indexing of <source_class_names> and has the corresponding
height.
widths (list of float): Likewise but for width.
other_attributes (dict): Key value pairs with which to update the label examples. e.g.
{'target/coordinates_x1': ...}
Returns:
frame_ground_truth_labels (dict): Contains the keys 'target/object_class' and the
coordinates' keys.
"""
num_targets = len(source_class_names)
assert len(source_class_names) == len(heights) == len(
widths), "Inputs of different lengths"
if other_attributes is None:
other_attributes = dict()
# TODO(@williamz): Again, these keys should really be better defined than hardcoded in every
# file that needs them.
x1 = np.random.uniform(low=-50.0, high=1000.0, size=num_targets)
x2 = x1 + widths
y1 = np.random.uniform(low=-1000.0, high=-200.0, size=num_targets)
y2 = y1 + heights
other_attributes['target/bbox_coordinates'] = np.stack(
[x1, y1, x2, y2], axis=1)
if 'target/object_class' not in other_attributes:
other_attributes['target/object_class'] = source_class_names
return get_dummy_labels(source_class_names, other_attributes=other_attributes)
def check_filtered_labels(original_labels, filtered_labels, kept_indices):
"""Assert that the filtering was done as expected.
Args:
original_labels (dict of tf.Tensors): pre-filtering ground truth labels.
filtered_labels (dict of tf.Tensors): post-filtering ground truth labels.
kept_indices (list of ints): Indicates which indices should have been kept after applying
the filter(s).
Raises:
AssertionError: if the filtering did not behave as expected.
"""
for feature_name in original_labels:
original_feature = original_labels[feature_name]
filtered_feature = filtered_labels[feature_name]
# Now check that only the ones that should have been kept are kept.
assert len(filtered_feature) == len(kept_indices)
# Check the values.
if len(kept_indices) == 0: # This means there should be nothing left.
assert filtered_feature.size == 0
else:
for i, original_kept_index in enumerate(kept_indices):
np.testing.assert_equal(
filtered_feature[i], original_feature[original_kept_index])
class TestBboxDimensionsLabelFilter:
@pytest.mark.parametrize(
"min_width,min_height,max_width,max_height,is_valid",
[
(-10.0, 0.5, 10.0, 1.0, True),
(-10.0, 0.5, -11.1, 0.6, False),
(-10.0, 0.0, -11.0, 0.0, False),
]
)
def test_bbox_dimensions_ranges(self, min_width, min_height, max_width, max_height, is_valid):
"""Test that the BboxDimensionsLabelFilter makes the necessary checks.
Args:
min/max_width/height (float): Thresholds for bbox dimensions.
is_valid (bool): If True, the instantiation should happen gracefully. If False, an
AssertionError is expected.
"""
if is_valid:
BboxDimensionsLabelFilter(min_width=min_width,
min_height=min_height,
max_width=max_width,
max_height=max_height)
else:
with pytest.raises(AssertionError):
BboxDimensionsLabelFilter(min_width=min_width,
min_height=min_height,
max_width=max_width,
max_height=max_height)
@pytest.mark.parametrize(
"source_class_names,heights,widths,params,kept_indices",
[
# Since there are no bounds, everything should be kept.
(["class_1", "class_2", "class_3"], [123.4, 567.8, 90.1], [23., 456., 78.9],
dict(), range(3)),
# Now min_width should be in effect.
(["class_4", "class_5", "class_6"], [123.4, 567.8, 90.1], [23., 456., 78.9],
{'min_width': np.float64(24.0)}, [1, 2]), # By default TF casts stuff as float32.
# Similar test case for min_height.
(["class_7", "class_8", "class_9"], [123.4, 567.8, 90.1], [23., 456., 78.9],
{'min_height': np.float64(124.0)}, [1]),
# Try multiple bounds.
(["class_7", "class_8", "class_9"], [123.4, 567.8, 90.1], [23., 456., 78.9],
{'min_height': np.float64(124.0), 'max_width': np.float64(501.0)}, [1]),
(["class_7", "class_8", "class_9"], [123.4, 567.8, 90.1], [23., 456., 78.9],
{'max_height': np.float64(124.0), 'min_width': np.float64(401.0)}, []),
(["class_7", "class_8", "class_9"], [123.4, 567.8, 90.1], [23., 456., 78.9],
{'max_height': np.float64(124.0), 'min_width': np.float64(55.2)}, [2])
]
)
def test_bbox_dimensions_label_filter_dict(self,
source_class_names,
heights,
widths,
params,
kept_indices):
"""Test the BboxDimensionsLabelFilter.
Args:
source_class_names (list of str): List of object class names to be used for the target
labels.
heights (list of float): Follows indexing of <source_class_names> and has the
corresponding height.
widths (list of float): Likewise but for width.
params (dict): Contains the keyword arguments with which the BboxDimensionsLabelFilter
will be instantiated.
kept_indices (list): Contains the indices in the input (<source_class_names>, <heights>,
<width>) that should be kept after the filtering happens.
Raises:
AssertionError: if the filtering did not behave as expected.
"""
original_labels = _get_dummy_bbox_labels(
source_class_names, heights, widths)
bbox_dimensions_label_filter = BboxDimensionsLabelFilter(**params)
filtered_labels = apply_label_filters(
[bbox_dimensions_label_filter], original_labels)
# First, check that all keys are kept.
assert set(original_labels.keys()) == set(filtered_labels.keys())
with tf.compat.v1.Session() as sess:
np_original_labels, np_filtered_labels = sess.run(
[original_labels, filtered_labels])
check_filtered_labels(np_original_labels,
np_filtered_labels, kept_indices)
@pytest.mark.parametrize(
"min_width,min_height,max_width,max_height,exp_class_names",
[
# Case 1: normal case.
(2.0, 2.0, 5.0, 4.0,
[b'unmapped', b'automobile']),
# Case 2: None input - by default, if part of params are set to None,
# it loosens the constraint.
(None, 2.0, 5.0, 4.0,
[b'pedestrian', b'unmapped', b'automobile'])
]
)
def test_bbox_dimensions_label_filter_bbox_2d_label(self,
min_width,
min_height,
max_width,
max_height,
exp_class_names):
"""Test the BboxDimensionsLabelFilter with bbox 2d labels.
Args:
min/max_width/height (float): Thresholds above/below which to keep bounding
box objects. If None, the corresponding threshold is not used.
exp_class_names (list of str): expected output class names from filtered labels.
Raises:
AssertionError: If the filtering did not behave as expected.
"""
original_labels = _get_bbox_2d_labels()
label_filter = BboxDimensionsLabelFilter(min_width=min_width,
min_height=min_height,
max_width=max_width,
max_height=max_height)
filtered_labels = apply_label_filters([label_filter], original_labels)
filtered_source_classes = filtered_labels.object_class.values
with tf.compat.v1.Session() as sess:
output_filtered_source_classes = sess.run(filtered_source_classes)
for i in range(len(exp_class_names)):
assert output_filtered_source_classes[i] == exp_class_names[i]
class TestBboxCropLabelFilter:
@pytest.mark.parametrize(
"crop_left,crop_right,crop_top,crop_bottom,is_valid",
[
(0, 0, 0, 0, True),
(0, 10, 0, 10, True),
(20, 10, 10, 20, False),
(None, None, None, None, True),
]
)
def test_bbox_crop_ranges(self, crop_left, crop_right, crop_top, crop_bottom, is_valid):
"""Test that the BboxCropLabelFilter makes the necessary checks.
Args:
crop_left/crop_right/crop_top/crop_bottom: crop coordinates.
is_valid (bool): If True, the instantiation should happen gracefully. If False, an
AssertionError is expected.
"""
if is_valid:
BboxCropLabelFilter(crop_left=crop_left,
crop_right=crop_right,
crop_top=crop_top,
crop_bottom=crop_bottom)
else:
with pytest.raises(ValueError):
BboxCropLabelFilter(crop_left=crop_left,
crop_right=crop_right,
crop_top=crop_top,
crop_bottom=crop_bottom)
@pytest.mark.parametrize(
"x1,x2,y1,y2,params,kept_indices",
[
# Try multiple bounds.
([0., 20., 40.], [10., 30., 50.], [0., 20., 40.], [10., 30., 50.],
{'crop_left': None, 'crop_right': None,
'crop_top': None, 'crop_bottom': None}, [0, 1, 2]),
([0., 20., 40.], [10., 30., 50.], [0., 20., 40.], [10., 30., 50.],
{'crop_left': np.int32(0), 'crop_right': np.int32(0),
'crop_top': np.int32(0), 'crop_bottom': np.int32(0)}, [0, 1, 2]),
([0., 20., 40.], [10., 30., 50.], [0., 20., 40.], [10., 30., 50.],
{'crop_left': np.int32(25), 'crop_right': np.int32(50),
'crop_top': np.int32(25), 'crop_bottom': np.int32(50)}, [1, 2]),
([0., 20., 5.], [10., 25., 15.], [0., 20., 5.], [10., 25., 15.],
{'crop_left': np.int32(25), 'crop_right': np.int32(50),
'crop_top': np.int32(25), 'crop_bottom': np.int32(50)}, []),
([25., 20., 40.], [30., 30., 50.], [25., 20., 40.], [30., 30., 50.],
{'crop_left': np.int32(25), 'crop_right': np.int32(50),
'crop_top': np.int32(25), 'crop_bottom': np.int32(50)}, [0, 1, 2]),
([0., 20., 40.], [10., 30., 50.], [0., 20., 40.], [10., 30., 50.],
{'crop_left': np.int32(5), 'crop_right': np.int32(20),
'crop_top': np.int32(5), 'crop_bottom': np.int32(20)}, [0]),
]
)
def test_bbox_crop_label_filter_dict(self, x1, x2, y1, y2, params, kept_indices):
"""Test the BboxCropLabelFilter.
Args:
x1/x2/y1/y2: bbox coordinates.
params (dict): Contains the keyword arguments with which the BboxCropLabelFilter
will be instantiated.
kept_indices (list): Contains the indices in the input
that should be kept after the filtering happens.
Raises:
AssertionError: if the filtering did not behave as expected.
"""
original_labels = dict()
original_labels['target/object_class'] = tf.constant(
['class_1', 'class_2', 'class_3'])
original_labels['target/bbox_coordinates'] = tf.stack(
[x1, y1, x2, y2], axis=1)
bbox_crop_label_filter = BboxCropLabelFilter(**params)
filtered_labels = apply_label_filters(
[bbox_crop_label_filter], original_labels)
# First, check that all keys are kept.
assert set(original_labels.keys()) == set(filtered_labels.keys())
with tf.compat.v1.Session() as sess:
np_original_labels, np_filtered_labels = sess.run(
[original_labels, filtered_labels])
check_filtered_labels(np_original_labels,
np_filtered_labels, kept_indices)
@pytest.mark.parametrize(
"crop_left,crop_right,crop_top,crop_bottom,exp_class_names",
[
# Case 1: normal case.
(2.0, 3.6, 2.0, 3.8,
[b'unmapped', b'automobile', b'truck']),
# Case 2: None input - by default, it pass through by returning all Trues.
(2.0, 3.6, 2.0, None,
[b'pedestrian', b'unmapped', b'automobile', b'truck', b'truck'])
]
)
def test_bbox_crop_label_filter_bbox_2d_label(self,
crop_left,
crop_right,
crop_top,
crop_bottom,
exp_class_names):
"""Test the BboxCropLabelFilter with bbox 2d labels.
Args:
crop_left/right/top/bottom: bbox coordinates.
exp_class_names (list of str): expected output class names from filtered labels.
Raises:
AssertionError: if the filtering did not behave as expected.
"""
original_labels = _get_bbox_2d_labels()
label_filter = BboxCropLabelFilter(crop_left=crop_left,
crop_right=crop_right,
crop_top=crop_top,
crop_bottom=crop_bottom)
filtered_labels = apply_label_filters([label_filter], original_labels)
filtered_source_classes = filtered_labels.object_class.values
with tf.compat.v1.Session() as sess:
output_filtered_source_classes = sess.run(filtered_source_classes)
for i in range(len(exp_class_names)):
assert output_filtered_source_classes[i] == exp_class_names[i]
class TestChainedFilters:
@pytest.mark.parametrize(
"source_class_names,heights,widths,bbox_params,object_class_params,mode,kept_indices",
[
# No-op.
(["class_1", "class_2", "class_3"], [123.4, 567.8, 90.1], [23., 456., 78.9],
dict(), dict(), 'or', range(3)),
# Check logical-or when filters are chained.
(["class_1", "class_2", "class_3"], [123.4, 567.8, 90.1], [23., 456., 78.9],
dict(), dict(source_class_names=["class_1", "class_2"]), 'or', range(3)),
# Check logical-and when filters are chained.
(["class_1", "class_2", "class_3"], [123.4, 567.8, 90.1], [23., 456., 78.9],
dict(), dict(source_class_names=["class_1", "class_2"]), 'and', range(2)),
# Now do some actual filtering.
(["class_1", "class_2", "class_3"], [123.4, 567.8, 90.1], [23., 456., 78.9],
# Bbox filter: the only bbox within max_height is the last one.
dict(max_height=np.float64(100.0)),
dict(source_class_names=["class_3"]), 'or', [2]),
# Another 'concrete' filtering.
(["class_1", "class_2", "class_3"], [123.4, 567.8, 90.1], [23., 456., 78.9],
# Bbox filter: only keep the first 2 labels.
dict(min_height=np.float64(100.0)),
# Object class filter: keep 1st and 2nd class.
dict(source_class_names=["class_1", "class_2"]), 'or', [0, 1]),
# A 'concrete' filtering for logical-and.
(["class_1", "class_2", "class_3"], [123.4, 567.8, 90.1], [23., 456., 78.9],
# Bbox filter: only keep the first 2 labels.
dict(min_height=np.float64(100.0)),
# Object class filter: keep 1st and 2nd class.
dict(source_class_names=["class_1", "class_3"]), 'and', [0]),
# Do some width filtering.
(["class_1", "class_2", "class_3"], [123.4, 567.8, 90.1], [23., 456., 78.9],
# Bbox filter: only 2nd label satisfies this.
dict(min_width=np.float64(100.0)),
# Object class filter: only the 3rd left (but logical-or results in [1, 2] being kept.
dict(source_class_names=["class_2", "class_3"]), 'or', [1, 2]),
]
)
def test_bbox_and_object_class_label_filters_dict(self,
source_class_names,
heights,
widths,
bbox_params,
object_class_params,
mode,
kept_indices):
original_labels = \
_get_dummy_bbox_labels(source_class_names, heights, widths)
# Get the filters.
bbox_dimensions_label_filter = BboxDimensionsLabelFilter(**bbox_params)
source_class_label_filter = SourceClassLabelFilter(
**object_class_params)
# Chain them.
filtered_labels = apply_label_filters([bbox_dimensions_label_filter,
source_class_label_filter],
original_labels, mode)
# First, check that all keys are kept.
assert set(original_labels.keys()) == set(filtered_labels.keys())
with tf.compat.v1.Session() as sess:
np_original_labels, np_filtered_labels = sess.run(
[original_labels, filtered_labels])
check_filtered_labels(np_original_labels,
np_filtered_labels, kept_indices)
@pytest.mark.parametrize(
"mode,exp_class_names",
[
('or', [b'unmapped', b'automobile', b'truck', b'truck']),
('and', [b'automobile'])
]
)
def test_bbox_and_object_class_label_filters_bbox_2d_label(self, mode, exp_class_names):
"""Test chain logic works correctly for multiple label filters with bbox 2d label.
Args:
mode (str): The chain mode, which should be 'or' or 'and'.
Raises:
AssertionError: if the chain did not behave as expected.
"""
original_labels = _get_bbox_2d_labels()
bbox_dimensions_label_filter = BboxDimensionsLabelFilter(min_width=2.0,
min_height=2.0,
max_width=5.0,
max_height=4.0)
source_class_label_filter = \
SourceClassLabelFilter(source_class_names=['truck', 'automobile'])
filtered_labels = apply_label_filters([bbox_dimensions_label_filter,
source_class_label_filter],
original_labels, mode=mode)
filtered_class_names = filtered_labels.object_class.values
sess = tf.compat.v1.Session()
output_filtered_labels = sess.run(filtered_class_names)
for i in range(len(exp_class_names)):
assert output_filtered_labels[i] == exp_class_names[i]
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/label_filter/test_label_filter.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""Height label filter. Filters ground truth objects based on their height."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nvidia_tao_tf1.cv.detectnet_v2.label_filter.base_label_filter import BaseLabelFilter
class BboxDimensionsLabelFilter(BaseLabelFilter):
"""Filter labels based on bounding box dimension thresholds."""
def __init__(self,
min_width=None,
min_height=None,
max_width=None,
max_height=None):
"""Constructor.
Args:
min/max_width/height (float): Thresholds above/below which to keep bounding box objects.
If None, the corresponding threshold is not used.
"""
super(BboxDimensionsLabelFilter, self).__init__()
self.min_width = min_width
self.min_height = min_height
self.max_width = max_width
self.max_height = max_height
# Check bounds if necessary.
if self.min_width is not None and self.max_width is not None:
assert self.min_width < self.max_width, "max_width should be greater than min_width."
if self.min_height is not None and self.max_height is not None:
assert self.min_height < self.max_height, \
"max_height should be greater than min_height."
def is_criterion_satisfied_dict(self, frame_ground_truth_labels):
"""Method that implements the filter criterion as TF.ops.
Only keeps those labels whose bounding boxes' width is in [self.min_width, self.max_width]
and height is in [self.min_height, self.max_height].
Args:
frame_ground_truth_labels (dict of Tensors): contains the labels for a single frame.
Returns:
filtered_indices (bool tf.Tensor): follows indexing in <frame_ground_truth_labels> and,
for each element, is True if it satisfies the criterion.
"""
filtered_indices = \
super(BboxDimensionsLabelFilter, self).\
is_criterion_satisfied_dict(frame_ground_truth_labels)
if {self.min_width, self.min_height, self.max_width, self.max_height} != {None}:
# Retrieve labels' width and height.
x1, y1, x2, y2 = tf.unstack(frame_ground_truth_labels['target/bbox_coordinates'],
axis=1)
width = x2 - x1
height = y2 - y1
# Chain the constraints.
if self.min_width is not None:
filtered_indices = \
tf.logical_and(filtered_indices,
tf.greater_equal(width, tf.constant(self.min_width)))
if self.max_width is not None:
filtered_indices = tf.logical_and(filtered_indices,
tf.less_equal(width, tf.constant(self.max_width)))
if self.min_height is not None:
filtered_indices = tf.logical_and(filtered_indices,
tf.greater_equal(height,
tf.constant(self.min_height)))
if self.max_height is not None:
filtered_indices = \
tf.logical_and(filtered_indices,
tf.less_equal(height, tf.constant(self.max_height)))
return filtered_indices
def is_criterion_satisfied_bbox_2d_label(self, ground_truth_labels):
"""Method that implements the filter criterion as TF.ops.
Only keeps those labels whose bounding boxes' width is in [self.min_width, self.max_width]
and height is in [self.min_height, self.max_height].
Args:
ground_truth_labels (Bbox2DLabel): Contains the labels for all
frames within a batch.
Returns:
filtered_indices (bool tf.Tensor): follows indexing in <ground_truth_labels> and,
for each element, is True if it satisfies the criterion.
"""
filtered_indices = \
super(BboxDimensionsLabelFilter, self).\
is_criterion_satisfied_bbox_2d_label(ground_truth_labels)
if {self.min_width, self.min_height, self.max_width, self.max_height} != {None}:
# Retrieve labels' width and height.
x1 = ground_truth_labels.vertices.coordinates.values[::4]
y1 = ground_truth_labels.vertices.coordinates.values[1::4]
x2 = ground_truth_labels.vertices.coordinates.values[2::4]
y2 = ground_truth_labels.vertices.coordinates.values[3::4]
width = x2 - x1
height = y2 - y1
# Chain the constraints.
if self.min_width is not None:
filtered_indices = \
tf.logical_and(filtered_indices,
tf.greater_equal(width, tf.constant(self.min_width)))
if self.max_width is not None:
filtered_indices = tf.logical_and(filtered_indices,
tf.less_equal(width, tf.constant(self.max_width)))
if self.min_height is not None:
filtered_indices = tf.logical_and(filtered_indices,
tf.greater_equal(height,
tf.constant(self.min_height)))
if self.max_height is not None:
filtered_indices = \
tf.logical_and(filtered_indices,
tf.less_equal(height, tf.constant(self.max_height)))
return filtered_indices
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/label_filter/bbox_dimensions_label_filter.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""
Dataloader base class defining the interface to data loading.
All data loader classes are expected to conform to the interface defined here.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import ABCMeta, abstractmethod
import six
class BaseDataloader(six.with_metaclass(ABCMeta, object)):
"""Dataloader base class defining the interface to data loading."""
@abstractmethod
def __init__(self,
training_data_source_list,
augmentation_config=None,
validation_fold=None,
validation_data_source_list=None):
"""Instantiate the dataloader.
Args:
training_data_source_list (list): List of tuples (tfrecord_file_pattern,
image_directory_path) to use for training.
augmentation_config (nvidia_tao_tf1.cv.detectnet_v2.dataloader.augmentation_config.
AugmentationConfig): Holds the parameters for augmentation and preprocessing.
validation_fold (int): Indicates which fold from the training data to use as validation.
Can be None.
validation_data_source_list (list): List of tuples (tfrecord_file_pattern,
image_directory_path) to use for validation. Can be None.
"""
pass
@abstractmethod
def get_dataset_tensors(self, batch_size, training, enable_augmentation, repeat=True):
"""Interface for getting tensors for training and validation.
Args:
batch_size (int): Minibatch size.
training (bool): Get samples from the training (True) or validation (False) set.
enable_augmentation (bool): Whether to augment input images and labels.
repeat (bool): Whether the dataset can be looped over multiple times or only once.
"""
pass
@abstractmethod
def get_data_tensor_shape(self):
"""Interface for querying data tensor shape.
Returns:
Data tensor shape as a tuple without the batch dimension.
"""
pass
@abstractmethod
def get_num_samples(self, training):
"""Get number of dataset samples.
Args:
training (bool): Get number of samples in the training (true) or
validation (false) set.
Returns:
Number of samples in the chosen set.
"""
pass
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/dataloader/base_dataloader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build a Dataloader from proto."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.augmentation.build_augmentation_config \
import build_augmentation_config
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.data_source_config import DataSourceConfig
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.default_dataloader import DefaultDataloader
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.drivenet_dataloader import DriveNetDataloader
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.legacy_dataloader import LegacyDataloader
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.utilities import get_absolute_data_path
from nvidia_tao_tf1.cv.detectnet_v2.proto.dataset_config_pb2 import DatasetConfig
FOLD_STRING = "fold-{:03d}-of-"
DATALOADER = {
0: DriveNetDataloader,
1: LegacyDataloader,
2: DefaultDataloader
}
SAMPLING_MODE = {
0: "user_defined",
1: "proportional",
2: "uniform"
}
def select_dataset_proto(experiment_spec):
"""Select the dataset proto depending on type defined in the spec.
Args:
experiment_spec: nvidia_tao_tf1.cv.detectnet_v2.proto.experiment proto message.
Returns:
A dataset_proto depending on type defined in the spec.
"""
if hasattr(experiment_spec, 'dataset_config'):
return experiment_spec.dataset_config
raise ValueError("Invalid experiment spec file. Dataset config not mentioned.")
def _pattern_to_files(pattern):
"""Convert a file pattern to a list of absolute file paths.
Args:
pattern (str): File pattern.
Returns:
A list of absolute file paths.
"""
abs_pattern = get_absolute_data_path(pattern)
# Convert pattern to list of files.
files = glob.glob(abs_pattern)
assert len(files) > 0, \
"No files match pattern {}.".format(abs_pattern)
return files
def build_data_source_lists(dataset_proto):
"""Build training and validation data source lists from proto.
Args:
dataset_proto (nvidia_tao_tf1.cv.detectnet_v2.proto.dataset_config proto message)
Returns:
training_data_source_list (list): List of tuples (tfrecord_file_pattern,
image_directory_path) to use for training.
validation_data_source_list (list): List of tuples (tfrecord_file_pattern,
image_directory_path) to use for validation. Can be None.
validation_fold (int): Validation fold number (0-based). Indicates which fold from the
training data to use as validation. Can be None.
"""
# Determine how we are getting validation data sources.
dataset_split_type = dataset_proto.WhichOneof('dataset_split_type')
training_data_source_list = []
validation_data_source_list = []
validation_fold = None
for data_source_proto in dataset_proto.data_sources:
source_weight = data_source_proto.source_weight
images_path = get_absolute_data_path(
str(data_source_proto.image_directory_path)
)
tfrecords_path = str(data_source_proto.tfrecords_path)
tfrecords_files = _pattern_to_files(tfrecords_path)
# Filter out files based on validation fold only if validation fold specified.
if dataset_split_type == "validation_fold":
# Defining the fold number for the glob pattern.
fold_identifier = FOLD_STRING.format(dataset_proto.validation_fold)
validation_fold = dataset_proto.validation_fold
# Take all .tfrecords files except the one matching the validation fold.
training_tfrecords_files = [filename for filename in tfrecords_files
if fold_identifier not in filename]
# Take only the file matching the validation fold.
validation_tfrecords_files = [filename for filename in tfrecords_files
if fold_identifier in filename]
validation_data_source_list.append(DataSourceConfig(
dataset_type='tfrecord',
dataset_files=validation_tfrecords_files,
images_path=images_path,
export_format=None,
split_db_path=None,
split_tags=None,
source_weight=source_weight))
else:
training_tfrecords_files = tfrecords_files
training_data_source_list.append(DataSourceConfig(
dataset_type='tfrecord',
dataset_files=training_tfrecords_files,
images_path=images_path,
export_format=None,
split_db_path=None,
split_tags=None,
source_weight=source_weight))
# Get validation data sources, if available.
if dataset_split_type == "validation_data_source":
data_source_proto = dataset_proto.validation_data_source
source_weight = data_source_proto.source_weight
images_path = get_absolute_data_path(
str(data_source_proto.image_directory_path)
)
tfrecords_path = str(data_source_proto.tfrecords_path)
tfrecords_files = _pattern_to_files(tfrecords_path)
validation_data_source_list.append(DataSourceConfig(
dataset_type='tfrecord',
dataset_files=tfrecords_files,
images_path=images_path,
export_format=None,
split_db_path=None,
split_tags=None,
source_weight=source_weight))
return training_data_source_list, validation_data_source_list, validation_fold
def build_legacy_data_source_lists(dataset_proto):
"""Build training and validation data source lists from proto.
Args:
dataset_proto (nvidia_tao_tf1.cv.detectnet_v2.proto.dataset_config proto message)
Returns:
training_data_source_list (list): List of tuples (tfrecord_file_pattern,
image_directory_path) to use for training.
validation_data_source_list (list): List of tuples (tfrecord_file_pattern,
image_directory_path) to use for validation. Can be None.
validation_fold (int): Validation fold number (0-based). Indicates which fold from the
training data to use as validation. Can be None.
"""
# Determine how we are getting validation data sources.
dataset_split_type = dataset_proto.WhichOneof('dataset_split_type')
def build_data_source_list(data_source_proto):
"""Build a list of data sources from a DataSource proto.
Args:
data_source_proto (nvidia_tao_tf1.cv.detectnet_v2.proto.DataSource).
Returns:
Tuple (tfrecord_file_pattern, image_directory_path).
"""
return str(data_source_proto.tfrecords_path), str(data_source_proto.image_directory_path)
training_data_source_list = map(
build_data_source_list, dataset_proto.data_sources)
# Get the validation data sources, depending on the scenario.
if dataset_split_type == "validation_fold":
validation_fold = dataset_proto.validation_fold
validation_data_source_list = None
elif dataset_split_type == "validation_data_source":
validation_fold = None
validation_data_source_list = [build_data_source_list(
dataset_proto.validation_data_source)]
else:
validation_fold = None
validation_data_source_list = None
return training_data_source_list, validation_data_source_list, validation_fold
def build_dataloader(dataset_proto, augmentation_proto):
"""Build a Dataloader from a proto.
Args:
dataset_proto (nvidia_tao_tf1.cv.detectnet_v2.proto.dataset_config.DatasetConfig)
augmentation_proto (nvidia_tao_tf1.cv.detectnet_v2.proto.augmentation_config.
AugmentationConfig)
Returns:
dataloader (nvidia_tao_tf1.cv.detectnet_v2.dataloader.default_dataloader.DefaultDataloader).
"""
if isinstance(dataset_proto, DatasetConfig):
dataset_config = dataset_proto
else:
raise ValueError('Unsupported dataset_proto message.')
# First, build the augmentation related parameters.
augmentation_config = build_augmentation_config(augmentation_proto)
# Now, get the class mapping.
source_to_target_class_mapping = dict(dataset_config.target_class_mapping)
# Image file encoding.
image_file_encoding = dataset_config.image_extension
# Get the data source lists.
training_data_source_list, validation_data_source_list, validation_fold = \
build_data_source_lists(dataset_config)
try:
assert isinstance(dataset_proto, DatasetConfig)
except NotImplementedError:
raise NotImplementedError("Unsupported dataloader called for.")
dataloader_mode = dataset_config.dataloader_mode
sampling_mode = dataset_config.sampling_mode
dataloader_kwargs = dict(
training_data_source_list=training_data_source_list,
image_file_encoding=image_file_encoding,
augmentation_config=augmentation_config
)
if dataloader_mode == 1:
# Generate the legacy dataloader from TLT v1.0.
training_data_source_list, validation_data_source_list, validation_fold = \
build_legacy_data_source_lists(dataset_config)
dataloader_kwargs = dict(
training_data_source_list=training_data_source_list,
target_class_mapping=source_to_target_class_mapping,
image_file_encoding=image_file_encoding,
augmentation_config=augmentation_config,
validation_fold=validation_fold,
validation_data_source_list=validation_data_source_list)
elif dataloader_mode == 2:
# Generate default dataloader with dict features.
dataloader_kwargs.update(dict(
validation_fold=validation_fold,
target_class_mapping=source_to_target_class_mapping,
validation_data_source_list=validation_data_source_list
))
else:
# Generate the multisource dataloader with sampling.
dataloader_kwargs.update(dict(
validation_data_source_list=validation_data_source_list,
target_class_mapping=source_to_target_class_mapping,
auto_resize=augmentation_proto.preprocessing.enable_auto_resize,
sampling_mode=SAMPLING_MODE[sampling_mode]
))
return DATALOADER[dataloader_mode](**dataloader_kwargs)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/dataloader/build_dataloader.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""Default dataloader for DetectNet V2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
import glob
import logging
from keras import backend as K
import nvidia_tao_tf1.core
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.augment import (
apply_all_transformations_to_image,
apply_spatial_transformations_to_bboxes,
get_all_transformations_matrices,
get_transformation_ops
)
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.base_dataloader import BaseDataloader
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.process_markers import augment_orientation_labels
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.process_markers import INVALID_ORIENTATION
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.process_markers import map_markers_to_orientations
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.utilities import extract_tfrecords_features
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.utilities import get_absolute_data_path
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.utilities import get_num_samples
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.utilities import get_tfrecords_iterator
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.utilities import process_image_for_dnn_input
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.utilities import read_image
from nvidia_tao_tf1.cv.detectnet_v2.label_filter.base_label_filter import apply_label_filters
from nvidia_tao_tf1.cv.detectnet_v2.label_filter.bbox_crop_label_filter import BboxCropLabelFilter
from nvidia_tao_tf1.cv.detectnet_v2.label_filter.bbox_dimensions_label_filter import (
BboxDimensionsLabelFilter
)
import six
import tensorflow as tf
# Constants used for reading required features from tfrecords.
FRAME_ID_KEY = 'frame/id'
HEIGHT_KEY = 'frame/height'
WIDTH_KEY = 'frame/width'
UNKNOWN_CLASS = '-1'
FOLD_STRING = "fold-{:03d}-of-"
logger = logging.getLogger(__name__)
class LegacyDataloader(BaseDataloader):
"""Legacy dataloader for object detection datasets such as KITTI and Cyclops.
Implements a data loader that reads labels and frame id from .tfrecords files and compiles
image and ground truth tensors used in training and validation.
"""
def __init__(self,
training_data_source_list,
target_class_mapping,
image_file_encoding,
augmentation_config,
validation_fold=None,
validation_data_source_list=None):
"""Instantiate the dataloader.
Args:
training_data_source_list (list): List of tuples (tfrecord_file_pattern,
image_directory_path) to use for training.
target_class_mapping (dict): maps from source class to target class (both str).
image_file_encoding (str): How the images to be produced by the dataset are encoded.
Can be e.g. "jpg", "fp16", "png".
augmentation_config (nvidia_tao_tf1.cv.detectnet_v2.dataloader.augmentation_config.
AugmentationConfig): Holds the parameters for augmentation and preprocessing.
validation_fold (int): Validation fold number (0-based). Indicates which fold from the
training data to use as validation. Can be None.
validation_data_source_list (list): List of tuples (tfrecord_file_pattern,
image_directory_path) to use for validation. Can be None.
"""
self.target_class_mapping = target_class_mapping
self.image_file_encoding = image_file_encoding
self.augmentation_config = augmentation_config
self.validation_fold = validation_fold
# Get training data sources.
self.training_data_sources = \
self.get_data_sources(data_source_list=training_data_source_list,
validation_fold=self.validation_fold, training=True)
# Now, potentially, get the validation data sources.
self.validation_data_sources = []
if self.validation_fold is not None:
# Take one fold from training data as validation.
self.validation_data_sources.extend(
self.get_data_sources(data_source_list=training_data_source_list,
validation_fold=self.validation_fold, training=False))
if validation_data_source_list is not None:
# TODO(@williamz): This "training=True" part is really confusing.
self.validation_data_sources.extend(
self.get_data_sources(data_source_list=validation_data_source_list,
validation_fold=None, training=True))
if augmentation_config is None:
self.num_input_channels = 3
else:
self.num_input_channels = self.augmentation_config.preprocessing.output_image_channel
assert(self.num_input_channels in [1, 3]), "Set the output_image_channel param to 1 " \
"or 3 in the augmentation config."
# TODO(@williamz): Why do some tests supply None as augmentation_config? Should we make
# a kwarg?
if augmentation_config is None:
bbox_dimensions_label_filter_kwargs = dict()
bbox_crop_label_filter_kwargs = dict()
else:
bbox_dimensions_label_filter_kwargs = {
'min_width': self.augmentation_config.preprocessing.min_bbox_width,
'min_height': self.augmentation_config.preprocessing.min_bbox_height}
bbox_crop_label_filter_kwargs = {
'crop_left': 0,
'crop_right': self.augmentation_config.preprocessing.output_image_width,
'crop_top': 0,
'crop_bottom': self.augmentation_config.preprocessing.output_image_height}
self._bbox_dimensions_label_filter = \
BboxDimensionsLabelFilter(**bbox_dimensions_label_filter_kwargs)
self._bbox_crop_label_filter = \
BboxCropLabelFilter(**bbox_crop_label_filter_kwargs)
self.target_class_to_source_classes_mapping = defaultdict(list)
for source_class_name, target_class_name in \
six.iteritems(self.target_class_mapping):
# TODO: @vpraveen Update this from lower to sentence case for running with
# JSON based records.
self.target_class_to_source_classes_mapping[target_class_name].\
append(source_class_name.lower())
self.target_class_to_source_classes_mapping = \
dict(self.target_class_to_source_classes_mapping)
# Get the transformation ops.
self._stm_op, self._ctm_op = get_transformation_ops()
def get_data_tensor_shape(self):
"""Interface for querying data tensor shape.
Returns:
Data tensor shape as a (C,H,W) tuple without the batch dimension.
"""
return (self.num_input_channels,
self.augmentation_config.preprocessing.output_image_height,
self.augmentation_config.preprocessing.output_image_width)
def get_num_samples(self, training):
"""Get number of dataset samples.
Args:
training (bool): Get number of samples in the training (true) or
validation (false) set.
Returns:
Number of samples in the chosen set.
"""
data_sources = self.training_data_sources if training else self.validation_data_sources
# In case file list is empty, don't load anything.
if len(data_sources) == 0:
return 0
return get_num_samples(data_sources, training)
def get_dataset_tensors(self, batch_size, training, enable_augmentation, repeat=True):
"""Get input images and ground truth labels as tensors for training and validation.
Returns also the number of minibatches required to loop over the dataset once.
Args:
batch_size (int): Minibatch size.
training (bool): Get samples from the training (True) or validation (False) set.
enable_augmentation (bool): Whether to augment input images and labels.
repeat (bool): Whether the dataset can be looped over multiple times or only once.
Returns:
images (Tensor of shape (batch, channels, height, width)): Input images with values
in the [0, 1] range.
ground_truth_labels (list of dicts of Tensors): Each element in this list corresponds
to the augmented and filtered labels in a frame.
num_samples (int): Total number of samples found in the dataset.
"""
data_sources = self.training_data_sources if training else self.validation_data_sources
# In case file list is empty, don't load anything
if len(data_sources) == 0:
return None, None, 0
# Get the location of .tfrecords files for each validation fold and the location of images
tfrecords_iterator, num_samples = get_tfrecords_iterator(data_sources,
batch_size,
training=training,
repeat=repeat)
# Extract features from a sample tfrecords file. These features are then read from all
# tfrecords files.
tfrecords_file = None
# Find the first tfrecord file.
for tfrecords_file, _ in data_sources:
if tfrecords_file:
tfrecords_file = tfrecords_file[0]
break
assert tfrecords_file, "No valid tfrecords files found in %s" % data_sources
self.extracted_features = extract_tfrecords_features(tfrecords_file)
# Generate augmented input images and ground truth labels.
images, ground_truth_labels =\
self._generate_images_and_ground_truth_labels(tfrecords_iterator,
enable_augmentation)
# DNN input data type has to match the computation precision.
images = tf.cast(images, dtype=K.floatx())
return images, ground_truth_labels, num_samples
def _generate_images_and_ground_truth_labels(self, tfrecords_iterator,
enable_augmentation=False):
"""Return generators for input image and output target tensors.
Args:
tfrecords_iterator (TFRecordsIterator): Iterator for dataset .tfrecords files.
enable_augmentation (bool): Augment input images and ground truths.
Returns:
images (Tensor of shape (batch, channels, height, width)): Input images with values
in the [0, 1] range.
ground_truth_labels (list of dicts of Tensors): Each dict contains e.g. tensors for
the augmented bbox coordinates, their class name, etc.
"""
# Create the proto parser.
parse_example_proto_layer = self._get_parse_example_proto()
# We first yield our tfrecords, by calling the processor we created earlier.
# This will return a tuple - list of individual samples, that all contain 1 record
# and a list of image directory paths, 1 for each record.
records, img_dirs, source_weights = tfrecords_iterator()
# Loop over each record, and deserialize the example proto. This will yield the tensors.
# Both the number of records and the loop's length are the same as the batch size.
# We are repeating the same operation for each item in the batch. Our batch size is hence
# fixed.
images = []
ground_truth_labels = []
for record, img_dir, source_weight in zip(records, img_dirs, source_weights):
# Deserialize the record. It will yield a dictionary of items in this proto.
# Inside this (now deserialized) Example is the image, label, metadata, etc.
example = parse_example_proto_layer(record) # Returns a dict.
# Load network input image tensors.
image = self._load_input_tensors(example, img_dir)
# Map target classes in the datasource to target classes of the model.
example = self._map_to_model_target_classes(
example, self.target_class_mapping)
# Now get additional labels.
additional_labels = self._translate_additional_labels(example)
# Retrieve the augmentation matrices.
sm, cm = get_all_transformations_matrices(self.augmentation_config,
enable_augmentation)
# Apply augmentations to input image tensors.
image, rmat = self._apply_augmentations_to_input_tensors(
image, sm, cm, example)
# Apply augmentations to ground truth labels.
labels = self._apply_augmentations_to_ground_truth_labels(
example, sm, rmat, tf.shape(image))
# Apply augmentations to additional labels.
additional_labels = self._apply_augmentations_to_additional_labels(
additional_labels, sm)
labels.update(additional_labels)
# Do possible label filtering.
labels = apply_label_filters(
label_filters=[self._bbox_dimensions_label_filter,
self._bbox_crop_label_filter],
ground_truth_labels=labels, mode='and')
labels["source_weight"] = source_weight
images.append(image)
ground_truth_labels.append(labels)
# Zip together the results as extracted on a per-sample basis to one entire batch.
# What happened beforehand, on a per-image basis, happened in parallel
# for each sample individually. From this point on, we are working with batches.
images = tf.stack(images, axis=0)
return images, ground_truth_labels
def _get_parse_example_proto(self):
"""Get the maglev example proto parser.
Returns:
nvidia_tao_tf1.core.processors.ParseExampleProto object to parse example(s).
"""
return nvidia_tao_tf1.core.processors.ParseExampleProto(
features=self.extracted_features, single=True)
def _apply_augmentations_to_input_tensors(self, input_tensors, sm, cm, example):
"""
Apply augmentations to input image tensors.
Args:
input_tensors (3-D Tensor, HWC): Input image tensors.
sm (2-D Tensor): 3x3 spatial transformation/augmentation matrix.
cm (2-D Tensor): 3x3 color augmentation matrix.
example: tf.train.Example protobuf message. (Unused here but used in subclasses.)
Returns:
image (Tensor, CHW): Augmented input tensor. The values are scaled between [0, 1].
rmat: Matrix that transforms from augmented space to the original image space.
"""
# Apply cropping, zero padding, resizing, and color and spatial augmentations to images.
image, rmat = apply_all_transformations_to_image(self.augmentation_config.
preprocessing.output_image_height,
self.augmentation_config.
preprocessing.output_image_width,
self._stm_op, self._ctm_op,
sm, cm, input_tensors,
self.num_input_channels)
# Apply cropping, zero padding, resizing, and color and spatial augmentations to images.
# HWC -> CHW
image = process_image_for_dnn_input(image)
return image, rmat
def _apply_augmentations_to_ground_truth_labels(self, example, sm, rmat, image_shape):
"""
Apply augmentations to ground truth labels.
Args:
example: tf.train.Example protobuf message.
sm (2-D Tensor): 3x3 spatial transformation/augmentation matrix.
rmat (Tensor): 3x3 matrix that transforms from augmented space to the original
image space.
image_shape (Tensor): Image shape.
Returns:
augmented_labels (dict): Ground truth labels for the frame, after preprocessing and /
or augmentation have been applied.
"""
augmented_labels = dict()
# if not self.augmentation_config.preprocessing.input_mono:
xmin, ymin, xmax, ymax = \
apply_spatial_transformations_to_bboxes(
sm, example['target/coordinates_x1'], example['target/coordinates_y1'],
example['target/coordinates_x2'], example['target/coordinates_y2'])
augmented_labels['target/bbox_coordinates'] = tf.stack(
[xmin, ymin, xmax, ymax], axis=1)
# TODO(@williamz): Remove the need for this redundancy.
augmented_labels['target/coordinates_x1'] = xmin
augmented_labels['target/coordinates_y1'] = ymin
augmented_labels['target/coordinates_x2'] = xmax
augmented_labels['target/coordinates_y2'] = ymax
# Used as a frame metadata in evaluation.
image_dimensions = tf.stack([image_shape[1:][::-1]])
# Compile ground truth data to a list of dicts used in training and validation.
augmented_labels['frame/augmented_to_input_matrices'] = rmat
augmented_labels['frame/image_dimensions'] = image_dimensions
# For anything that is unaffected by augmentation or preprocessing, forward it through.
for feature_name, feature_tensor in six.iteritems(example):
if feature_name not in augmented_labels:
augmented_labels[feature_name] = feature_tensor
# Update bbox and truncation info in example.
# Clip cropped coordinates to image boundary.
image_height = self.augmentation_config.preprocessing.output_image_height
image_width = self.augmentation_config.preprocessing.output_image_width
# if not self.augmentation_config.preprocessing.input_mono:
augmented_labels = self._update_example_after_crop(crop_left=0,
crop_right=image_width, crop_top=0,
crop_bottom=image_height,
example=augmented_labels)
return augmented_labels
def _load_input_tensors(self, example, file_dir):
"""
Return a generator for the input image tensors.
Args:
example: tf.train.Example protobuf message.
file_dir (string): Dataset input image directory.
Returns:
image (3-D Tensor, HWC): The image.
"""
# Reshape image_path to have rank 0 as expected by TensorFlow's ReadFile.
image_path = tf.string_join([file_dir, example[FRAME_ID_KEY]])
image_path = tf.reshape(image_path, [])
height, width = tf.reshape(example[HEIGHT_KEY], []), \
tf.reshape(example[WIDTH_KEY], [])
image_path = tf.string_join(
[image_path, '.' + self.image_file_encoding])
image = read_image(image_path, self.image_file_encoding, self.num_input_channels,
width, height)
return image
def _map_to_model_target_classes(self, example, target_class_mapping):
"""Map object classes as they are defined in the data source to the model target classes.
Args:
example (tf.train.Example): Labels for one sample.
target_class_mapping: Protobuf map.
Returns
example (tf.train.Example): Labels where data source target classes are mapped to
model target classes. If target_class_mapping is not defined, then example is
unchanged.
"""
datasource_target_classes = list(target_class_mapping.keys())
if len(datasource_target_classes) > 0:
mapped_target_classes = list(target_class_mapping.values())
default_value = tf.constant(UNKNOWN_CLASS)
lookup = nvidia_tao_tf1.core.processors.LookupTable(keys=datasource_target_classes,
values=mapped_target_classes,
default_value=default_value)
# Retain source class.
example['target/source_class'] = example['target/object_class']
# Overwrite 'object_class' with mapped target class.
new_target_classes = lookup(example['target/object_class'])
example['target/object_class'] = new_target_classes
return example
@staticmethod
def _update_example_after_crop(crop_left, crop_right, crop_top, crop_bottom, example):
"""Update bbox and truncation_type according to cropping preprocess.
Args:
crop_left/crop_right/crop_top/crop_bottom (int): crop rectangle coordinates.
example (tf.train.Example): Labels for one sample.
Returns
example (tf.train.Example): Labels where bbox and truncation_type are updated according
to crop preprocess.
Raises:
ValueError: if crop_left > crop_right, or crop_top > crop_bottom, raise error.
"""
if all(item == 0 for item in [crop_left, crop_right, crop_top, crop_bottom]):
return example
if crop_left > crop_right or crop_top > crop_bottom:
raise ValueError(
"crop_right/crop_bottom should be larger than crop_left/crop_top.")
crop_left = tf.cast(crop_left, tf.float32)
crop_right = tf.cast(crop_right, tf.float32)
crop_top = tf.cast(crop_top, tf.float32)
crop_bottom = tf.cast(crop_bottom, tf.float32)
# The coordinates have their origin as (0, 0) in the image.
x1, y1, x2, y2 = tf.unstack(example['target/bbox_coordinates'], axis=1)
if 'target/truncation_type' in example:
# Update Truncation Type of truncated objects.
overlap = tf.ones_like(
example['target/object_class'], dtype=tf.bool)
overlap = tf.logical_and(overlap, tf.less(x1, crop_right))
overlap = tf.logical_and(overlap, tf.greater(x2, crop_left))
overlap = tf.logical_and(overlap, tf.less(y1, crop_bottom))
overlap = tf.logical_and(overlap, tf.greater(y2, crop_top))
truncated = tf.zeros_like(
example['target/truncation_type'], dtype=tf.bool)
truncated = tf.logical_or(truncated,
tf.logical_and(overlap, tf.less(x1, crop_left)))
truncated = tf.logical_or(truncated,
tf.logical_and(overlap, tf.greater(x2, crop_right)))
truncated = tf.logical_or(truncated,
tf.logical_and(overlap, tf.less(y1, crop_top)))
truncated = tf.logical_or(truncated,
tf.logical_and(overlap, tf.greater(y2, crop_bottom)))
truncation_type =\
tf.logical_or(truncated, tf.cast(
example['target/truncation_type'], dtype=tf.bool))
example['target/truncation_type'] = tf.cast(
truncation_type, dtype=tf.int32)
elif 'target/truncation' in example:
logger.debug("target/truncation is not updated to match the crop area "
"if the dataset contains target/truncation.")
# Update bbox coordinates.
new_x1 = tf.maximum(x1, crop_left)
new_x2 = tf.minimum(x2, crop_right)
new_y1 = tf.maximum(y1, crop_top)
new_y2 = tf.minimum(y2, crop_bottom)
new_augmented_coordinates = tf.stack(
[new_x1, new_y1, new_x2, new_y2], axis=1)
example.update({'target/bbox_coordinates': new_augmented_coordinates,
'target/coordinates_x1': new_x1,
'target/coordinates_x2': new_x2,
'target/coordinates_y1': new_y1,
'target/coordinates_y2': new_y2})
return example
def _translate_additional_labels(self, labels):
"""Translate additional labels if required.
This private helper takes care of parsing labels on top of those needed for 'bare 2D'
detection, and translating them to the domain expected by the model.
E.g. This can translate (front, back) markers to an orientation value.
Args:
labels (dict): Keys are label feature names, values the corresponding tf.Tensor.
Returns:
additional_labels (dict): Keys are label feature names produced from the translation,
the values the corresponding tf.Tensor.
"""
additional_labels = dict()
if 'target/orientation' not in labels:
if 'target/front' in labels and 'target/back' in labels:
orientation = \
map_markers_to_orientations(
front_markers=labels['target/front'],
back_markers=labels['target/back'])
additional_labels['target/orientation'] = orientation
else:
additional_labels['target/orientation'] = \
tf.ones(
tf.shape(labels['target/object_class'])) * INVALID_ORIENTATION
return additional_labels
def _apply_augmentations_to_additional_labels(self, additional_labels, stm):
"""Apply augmentations to additional labels.
This private helper applies augmentations (currently only spatial augmentations) to those
labels produced by _translate_additional_labels().
Args:
additional_labels (dict): Keys are (additional) label feature names, values the
corresponding tf.Tensor.
stm (tf.Tensor): 3x3 Spatial transformation matrix.
Returns:
augmented_additional_labels (dict): Keys are the same as <additional_labels>, values the
corresponding tf.Tensor with augmentation applied to them.
"""
augmented_additional_labels = dict()
if 'target/orientation' in additional_labels:
augmented_orientation_labels = \
augment_orientation_labels(
additional_labels['target/orientation'], stm)
augmented_additional_labels['target/orientation'] = augmented_orientation_labels
return augmented_additional_labels
@staticmethod
def get_data_sources(data_source_list, validation_fold, training):
"""Get data sources.
Args:
data_source_list: (list) List of tuples (tfrecord_file_pattern, image_directory_path).
validation_fold: (int) Validation fold number (0-based), can be None.
training: (bool) Whether or not this call pertains to building a training set.
Returns:
data_sources: (list) List of tuples (list_of_tfrecord_files, image_directory_path).
Raises:
AssertionError: If specified data sources were not found.
"""
# No validation fold specified and training False means no validation data.
if validation_fold is None and not training:
return [([], [])]
data_sources = []
for tfrecords_pattern, image_dir_path in data_source_list:
# Convert both to absolute paths.
abs_tfrecords_pattern = get_absolute_data_path(tfrecords_pattern)
abs_image_dir_path = get_absolute_data_path(image_dir_path)
# Convert pattern to list of files.
tfrecords_paths = glob.glob(abs_tfrecords_pattern)
assert len(tfrecords_paths) > 0, \
"No tfrecord files match pattern {}.".format(
abs_tfrecords_pattern)
# Filter out files based on validation fold only if validation fold specified.
if validation_fold is not None:
fold_identifier = FOLD_STRING.format(validation_fold)
if training:
# Take all .tfrecords files expect the one matching to the validation fold.
tfrecords_paths = [filename for filename in tfrecords_paths
if fold_identifier not in filename]
else:
# Take only the file matching to the validation fold.
tfrecords_paths = [filename for filename in tfrecords_paths
if fold_identifier in filename]
assert len(tfrecords_paths) != 0, "Cannot find val tfrecords for fold {}"\
"for tfrecord: {}. Please check the validation fold number and retry".\
format(validation_fold, tfrecords_pattern)
data_sources.append((tfrecords_paths, abs_image_dir_path))
return data_sources
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/dataloader/legacy_dataloader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data source config class for DriveNet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class DataSourceConfig(object):
"""Hold all data source related parameters."""
def __init__(self,
dataset_type,
dataset_files,
images_path,
export_format,
split_db_path,
split_tags,
source_weight=1.0,
minimum_target_class_imbalance=None,
num_duplicates=0,
skip_empty_frames=False,
ignored_classifiers_for_skip=None,
additional_conditions=None):
"""Constructor.
Args:
dataset_type (string): Currently only 'tfrecord' and 'sqlite' are supported.
dataset_files (list): A list of absolute paths to dataset files. In case of
tfrecords, a list of absolute paths to .tfrecord files.
images_path (string): Absolute path to images directory.
export_format (string): (SQL only) Image format name.
split_db_path (string): (SQL only) Path to split database.
split_tags (list of strings): (SQL only) A list of split tags (eg. ['train'] or
['val0', 'val1']).
source_weight (float): Value by which to weight the loss for samples
coming from this DataSource.
minimum_target_class_imbalance (map<string, float>): Minimum ratio
(#dominant_class_instances/#target_class_instances) criteria for duplication
of frames. The string is the non-dominant class name and the float is the
ratio for duplication.
num_duplicates (int): Number of duplicates of frames to be added, if the frame
satifies the minimum_target_class_imbalance.
skip_empty_frames (bool): Whether to ignore empty frames (i.e frames without relevant
features. By default, False, i.e all frames are returned.
ignored_classifiers_for_skip (set): Names of classifiers to ignore when
considering if frame is empty. I.e if frame only has these classes, it is still
regarded as empty.
additional_conditions (list): List of additional sql conditions for a 'where' clause.
It's only for SqliteDataSource, and other data sources will ignore it.
"""
self.dataset_type = dataset_type
self.dataset_files = dataset_files
self.images_path = images_path
self.export_format = export_format
self.split_db_path = split_db_path
self.split_tags = split_tags
if source_weight < 0.0:
raise ValueError("source_weight cannot be negative value")
elif source_weight == 0.0:
# Assume it was meant to be 1.0.
self.source_weight = 1.0
else:
self.source_weight = source_weight
self.minimum_target_class_imbalance = minimum_target_class_imbalance
self.num_duplicates = num_duplicates
self.skip_empty_frames = skip_empty_frames
self.ignored_classifiers_for_skip = ignored_classifiers_for_skip
self.additional_conditions = additional_conditions
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/dataloader/data_source_config.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""Defines functions and classes for the DetectNet V2 dataloader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/dataloader/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data augmentation for DetectNet V2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nvidia_tao_tf1.core.processors import ColorTransform
from nvidia_tao_tf1.core.processors import SpatialTransform
from nvidia_tao_tf1.core.processors.augment import spatial
from nvidia_tao_tf1.core.processors.augment.color import get_random_color_transformation_matrix
from nvidia_tao_tf1.core.processors.augment.spatial import get_random_spatial_transformation_matrix
import tensorflow as tf
def _matrix3(values):
return tf.reshape(tf.stack(tf.cast(values, tf.float32)), [3, 3])
def _matrix4(values):
return tf.reshape(tf.stack(tf.cast(values, tf.float32)), [4, 4])
def _crop_image(crop_left, crop_top, crop_right, crop_bottom):
"""
Compute spatial transformation matrix and output image size for cropping.
All the crop positions specified are in the original image space.
Args:
crop_left: (int) left boundary of the cropped part.
crop_top: (int) top boundary of the cropped part.
crop_right: (int) right boundary of the cropped part.
crop_bottom: (int) bottom boundary of the cropped part.
Returns:
stm: (tf.Tensor) spatial transformation matrix
output_width: (int) width of the resulting cropped image
output_height: (int) height of the resulting cropped image
"""
# Compute spatial transformation matrix (translation).
crop_matrix = _matrix3([1., 0., 0.,
0., 1., 0.,
-crop_left, -crop_top, 1.])
# Compute output image dimensions.
output_width = crop_right - crop_left
output_height = crop_bottom - crop_top
return crop_matrix, output_width, output_height
def get_transformation_ops():
"""Generate ops which will apply spatial / color transformations.
Returns:
stm_op: spatial transformation op.
ctm_op: color transformation op.
"""
# Set up spatial transform op.
stm_op = SpatialTransform(method='bilinear', background_value=0.0, data_format='channels_last')
# Set up color transform op.
# Note that its output is always normalized to [0,1] range.
ctm_op = ColorTransform(min_clip=0.0, max_clip=1.0, data_format='channels_last')
return stm_op, ctm_op
def get_spatial_preprocessing_matrix(crop_left, crop_top, crop_right, crop_bottom,
scale_width, scale_height):
"""Generate a spatial preprocessing matrix.
Args:
crop_left (int): Left boundary of a crop to extract in original image space.
crop_top (int): Top boundary of a crop to extract in original image space.
crop_right (int): Right boundary of a crop to extract in original image space.
crop_bottom (int): Bottom boundary of a crop to extract in original image space.
scale_width (float): Resize factor used for scaling width.
scale_height (float): Resize factor used for scaling height.
Returns:
sm (tf.Tensor): matrix that transforms from original image space to augmented space.
"""
# Start from an identity spatial transformation matrix.
sm = tf.eye(3)
# Cropping happens if the crop rectangle has width > 0 and height > 0. Otherwise
# cropping is considered to be disabled.
if crop_right > crop_left and crop_bottom > crop_top:
crop_matrix, _, _ = _crop_image(crop_left, crop_top, crop_right, crop_bottom)
sm = crop_matrix
# Image is resized if valid scale factors are provided for width and height.
if scale_width > 0 and scale_height > 0:
scale_stm = spatial.zoom_matrix((scale_width, scale_height))
sm = tf.matmul(scale_stm, sm)
return sm
def get_spatial_transformations_matrix(preprocessing_config,
spatial_augmentation_config):
"""
Generate a spatial transformations matrix that applies both preprocessing and augmentations.
Args:
preprocessing_config: AugmentationConfig.Preprocessing object.
spatial_augmentation_config: AugmentationConfig.SpatialAugmentation object. Can be None,
which disables random spatial augmentations.
Returns:
sm: matrix that transforms from original image space to augmented space.
"""
width = preprocessing_config.output_image_width
height = preprocessing_config.output_image_height
# Get spatial transformation matrix corresponding to preprocessing ops.
sm = get_spatial_preprocessing_matrix(
crop_left=preprocessing_config.crop_left,
crop_top=preprocessing_config.crop_top,
crop_right=preprocessing_config.crop_right,
crop_bottom=preprocessing_config.crop_bottom,
scale_width=preprocessing_config.scale_width,
scale_height=preprocessing_config.scale_height)
# Apply resizing and spatial augmentation ops to spatial transformation matrix.
if spatial_augmentation_config is not None:
sm = get_random_spatial_transformation_matrix(
width, height,
stm=sm,
flip_lr_prob=spatial_augmentation_config.hflip_probability,
translate_max_x=int(spatial_augmentation_config.translate_max_x),
translate_max_y=int(spatial_augmentation_config.translate_max_y),
zoom_ratio_min=spatial_augmentation_config.zoom_min,
zoom_ratio_max=spatial_augmentation_config.zoom_max,
rotate_rad_max=spatial_augmentation_config.rotate_rad_max)
return sm
def get_color_augmentation_matrix(color_augmentation_config):
"""
Generate a color transformations matrix applying augmentations.
Args:
color_augmentation_config: AugmentationConfig.ColorAugmentation object. Can be None, which
disables random color transformations.
Returns:
Matrix describing the color transformation to be applied.
"""
cm = tf.eye(4, dtype=tf.float32)
if color_augmentation_config is not None:
# Compute color transformation matrix.
cm = get_random_color_transformation_matrix(
ctm=cm,
hue_rotation_max=color_augmentation_config.hue_rotation_max,
saturation_shift_max=color_augmentation_config.saturation_shift_max,
contrast_scale_max=color_augmentation_config.contrast_scale_max,
contrast_center=color_augmentation_config.contrast_center,
brightness_scale_max=color_augmentation_config.color_shift_stddev * 2.0,
brightness_uniform_across_channels=False)
return cm
def get_all_transformations_matrices(augmentation_config, enable_augmentation):
"""
Generate all the color and spatial transformations as defined in augmentation_config.
Input image values are assumed to be in the [0, 1] range.
Args:
augmentation_config: AugmentationConfig object.
enable_augmentation (bool): Toggle for enabling/disabling augmentations.
Returns:
sm: matrix that transforms from original image space to augmented space.
cm: color transformation matrix.
"""
if enable_augmentation:
spatial_augmentation_config = augmentation_config.spatial_augmentation
color_augmentation_config = augmentation_config.color_augmentation
else:
spatial_augmentation_config = None
color_augmentation_config = None
# Compute spatial transformation matrix (preprocessing + augmentation).
sm = get_spatial_transformations_matrix(
preprocessing_config=augmentation_config.preprocessing,
spatial_augmentation_config=spatial_augmentation_config)
# Compute color transformation matrix.
cm = get_color_augmentation_matrix(color_augmentation_config=color_augmentation_config)
return sm, cm
def apply_spatial_transformations_to_polygons(sm, vertices_x, vertices_y):
"""Apply spatial transformations to polygons.
Args:
sm: spatial transform.
vertices_x: vector of x-coordinates.
vertices_y: vector of y-coordinates.
Returns:
vectors of transformed polygon coordinates in the augmented space.
"""
# TODO(@drendleman) Should we use the maglev PolygonTransform processor here?
ones = tf.ones(shape=[tf.size(input=vertices_x)])
transformed_coords = tf.transpose(
a=tf.matmul(tf.transpose(a=[vertices_x, vertices_y, ones]), sm))
return transformed_coords[0], transformed_coords[1]
def apply_spatial_transformations_to_bboxes(sm, x1, y1, x2, y2):
"""Apply spatial transformations to bboxes.
Transform top-left and bottom-right bbox coordinates by the matrix and
compute new bbox coordinates. Note that the code below assumes that the
matrix contains just scaling, mirroring, and 90 degree rotations.
TODO(jrasanen) do we need to allow generic transformations? In that case we'd need to
transform all four corners of the bbox and recompute a new axis aligned
box. This will be overly pessimistic for elliptical ground truth, so
it would be better to compute a tight fit around a transformed ellipse.
This will be used only for legacy dataloader if needed.
Args:
sm: spatial transform.
x1: vector of left edge coordinates of the input bboxes. Coordinates
are in input image scale.
y1: vector of top edge coordinates of the input bboxes.
x2: vector of right edge coordinates of the input bboxes.
y2: vector of bottom edge coordinates of the input bboxes.
Returns:
vectors of transformed bbox coordinates in the augmented space.
"""
one = tf.ones(shape=[tf.size(x1)])
top_left = tf.transpose(tf.matmul(tf.transpose([x1, y1, one]), sm))
bottom_right = tf.transpose(tf.matmul(tf.transpose([x2, y2, one]), sm))
# The following lines are to be able to return the bounding box coordinates in the expected
# L, T, R, B order when (potential) horizontal flips happen.
x1 = tf.minimum(top_left[0], bottom_right[0])
y1 = tf.minimum(top_left[1], bottom_right[1])
x2 = tf.maximum(top_left[0], bottom_right[0])
y2 = tf.maximum(top_left[1], bottom_right[1])
return x1, y1, x2, y2
def apply_color_transformations(image, ctm_op, cm):
"""
Apply color transformations to an image.
Args:
image: input image of shape (height, width, 3) with values in [0, 1].
ctm_op: instance of ColorTransform processor.
cm: color transform matrix.
Returns:
image: transformed image of shape (height, width, 3) with values in [0, 1].
"""
# Note 1: color matrix needs to be reshaped into a batch of one 4x4 matrix
# Note 2: input colors must be unnormalized, ie. in [0,255] range, have 3 channels
# Note 3: output colors are always normalized to [0,1] range, 3 channels
# TODO it would be faster to batch transform the images. Not sure if it makes a difference.
# TODO the fastest implementation would combine spatial and color transforms into a single op.
return ctm_op(images=image, ctms=tf.stack([cm]))
def apply_spatial_transformations_to_image(image, height, width, stm_op, sm):
"""
Apply spatial transformations to an image.
Spatial transform op maps destination image pixel P into source image location Q
by matrix M: Q = P M. Here we first compute a forward mapping Q M^-1 = P, and
finally invert the matrix.
Args:
image (tf.Tensor): Input image of shape (height, width, 3) with values in [0, 1].
height (int): Height of the output image.
width (int): Width of the output image.
stm_op (SpatialTransform): Instance of SpatialTransform processor.
sm (tf.Tensor): 3x3 spatial transformation matrix.
Returns:
image: Transformed image of shape (height, width, 3) with values in [0, 1].
dm: Matrix that transforms from augmented space to the original image space.
"""
dm = tf.matrix_inverse(sm)
# Convert image to float if needed (stm_op requirement)
if image.dtype != tf.float32:
image = tf.cast(image, tf.float32)
# TODO undo image gamma? Probably doesn't make a difference.
# Apply spatial transformations
# Note 1: spatial transform op currently only supports 3 channel input images
# Note 2: since the input image sizes may vary, this op can't be batched
# Note 3: image and matrix need to be reshaped into a batch of one for this op
image = stm_op(images=tf.stack([image]), stms=tf.stack([dm]), shape=[height, width])
return image, dm
def apply_all_transformations_to_image(height, width, stm_op, ctm_op, sm, cm, image, num_channels):
"""Apply spatial and color transformations to an image.
Args:
height: height of the output image.
width: width of the output image.
stm_op: instance of SpatialTransform processor.
ctm_op: instance of ColorTransform processor.
sm: spatial transform matrix.
cm: color transform matrix.
image: input image of shape (height, width, 3). Values are assumed to be in [0, 1].
Returns:
transformed image of shape (height, width, 3) with values in [0, 1].
matrix that transforms from augmented space to the original image space.
"""
image, dm = apply_spatial_transformations_to_image(image, height, width, stm_op, sm)
if num_channels == 3:
image = apply_color_transformations(image, ctm_op, cm)
# Reshape into a single image
return tf.reshape(image, [height, width, num_channels]), dm
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/dataloader/augment.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataloader for DriveNet based on dlav common dataloader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from keras import backend as K
import nvidia_tao_tf1.core
from nvidia_tao_tf1.blocks.multi_source_loader import CHANNELS_FIRST
from nvidia_tao_tf1.blocks.multi_source_loader.data_loader import DataLoader
from nvidia_tao_tf1.blocks.multi_source_loader.processors import (
BboxClipper,
Crop,
Pipeline,
RandomBrightness,
RandomContrast,
RandomFlip,
RandomHueSaturation,
RandomRotation,
RandomTranslation,
RandomZoom,
Scale,
)
from nvidia_tao_tf1.blocks.multi_source_loader.sources import (
TFRecordsDataSource,
)
from nvidia_tao_tf1.blocks.multi_source_loader.types import (
Bbox2DLabel,
Coordinates2D,
FEATURE_CAMERA,
filter_bbox_label_based_on_minimum_dims,
Images2DReference,
LABEL_OBJECT,
SequenceExample,
set_auto_resize,
set_image_channels,
set_max_side,
set_min_side,
sparsify_dense_coordinates,
vector_and_counts_to_sparse_tensor,
)
from nvidia_tao_tf1.core import distribution
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.default_dataloader import DefaultDataloader
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.default_dataloader import FRAME_ID_KEY
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.default_dataloader import HEIGHT_KEY
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.default_dataloader import UNKNOWN_CLASS
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.default_dataloader import WIDTH_KEY
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.utilities import extract_tfrecords_features
from nvidia_tao_tf1.cv.detectnet_v2.label_filter.base_label_filter import filter_labels
import tensorflow as tf
Canvas2D = nvidia_tao_tf1.core.types.Canvas2D
BW_POLY_COEFF1_60FC = 0.000545421498827636
logger = logging.getLogger()
class DriveNetTFRecordsParser(object):
"""Parse tf.train.Example protos into DriveNet Examples."""
def __init__(
self, tfrecord_path, image_dir, extension,
channels, source_weight=1.0, auto_resize=False
):
"""Construct a parser for drivenet labels.
Args:
tfrecord_path (list): List of paths to tfrecords file.
image_dir (str): Path to the directory where images are contained.
extension (str): Extension for images that get loaded (
".fp16", ".png", ".jpg" or ".jpeg").
channels (int): Number of channels in each image.
auto_resize(bool): Flag to enable automatic image resize or not.
Raises:
ValueError: If the number of input channels is not unsupported (i.e. must be equal to 3)
"""
if channels not in [1, 3]:
raise ValueError("DriveNetTFRecordsParser: unsupported number of channels %d." %
channels)
self._image_file_extension = extension
# These will be set once all data sources have been instantiated and the common
# maximum image size is known.
self._output_height = None
self._output_width = None
self._output_min = None
self._output_max = None
self._num_input_channels = channels
set_image_channels(self._num_input_channels)
self._image_dir = image_dir
if not self._image_dir.endswith('/'):
self._image_dir += '/'
self._tfrecord_path = tfrecord_path
# Delay the actual definition to call time.
self._parse_example = None
# Set the source_weight.
self.source_weight = source_weight
# auto_resize
self.auto_resize = auto_resize
set_auto_resize(self.auto_resize)
def _get_parse_example(self):
if self._parse_example is None:
extracted_features = extract_tfrecords_features(self._tfrecord_path[0])
self._parse_example = nvidia_tao_tf1.core.processors.ParseExampleProto(
features=extracted_features,
single=True
)
return self._parse_example
def set_target_size(self, height, width, min_side=None, max_side=None):
"""Set size for target image.
Args:
height (int): Target image height.
width (int): Target image width.
min_side(int): Target minimal side of the image(either width or height)
max_side(int): The larger side of the image(the one other than min_side).
"""
self._output_height = height
self._output_width = width
self._output_min = min_side
self._output_max = max_side
set_min_side(min_side)
set_max_side(max_side)
def __call__(self, tfrecord):
"""Parse a tfrecord.
Args:
tfrecord (tensor): a serialized example proto.
Returns:
(Example) Example compatible with Processors.
"""
example = self._get_parse_example()(tfrecord)
example = DefaultDataloader.translate_bbox_to_polygon(example)
# Reshape to have rank 0.
height = tf.cast(tf.reshape(example[HEIGHT_KEY], []), dtype=tf.int32)
width = tf.cast(tf.reshape(example[WIDTH_KEY], []), dtype=tf.int32)
example[HEIGHT_KEY] = height
example[WIDTH_KEY] = width
# Reshape image_path to have rank 0 as expected by TensorFlow's ReadFile.
image_path = tf.strings.join([self._image_dir, example[FRAME_ID_KEY]])
image_path = tf.reshape(image_path, [])
extension = tf.convert_to_tensor(value=self._image_file_extension)
image_path = tf.strings.join([image_path, extension])
labels = self._extract_bbox_labels(example)
# @vpraveen: This is the point where the image datastructure is populated. The loading
# and decoding functions are defined as member variables in Images2DReference.
return SequenceExample(
instances={
FEATURE_CAMERA: Images2DReference(
path=image_path,
extension=extension,
canvas_shape=Canvas2D(
height=tf.ones([self._output_height]),
width=tf.ones([self._output_width])),
input_height=height,
input_width=width
),
# TODO(@williamz): This is where FEATURE_SESSION: Session() would be populated
# if we ever went down that path.
},
labels={LABEL_OBJECT: labels}
)
def _extract_depth(self, example):
"""Extract depth label.
Args:
example (dict): Maps from feature name (str) to tf.Tensor.
Returns:
depth (tf.Tensor): depth values with possible scale adjustments.
"""
depth = example['target/world_bbox_z']
# Use the ratio of the first order backward polynomial coefficients as the scaling factor.
# Default camera is 60 degree camera, and this is the first order bw-poly coeff of it.
if 'frame/bw_poly_coeff1' in example:
scale_factor = example['frame/bw_poly_coeff1'] / \
BW_POLY_COEFF1_60FC
else:
scale_factor = 1.0
depth *= scale_factor
return depth
def _extract_bbox_labels(self, example):
"""Extract relevant features from labels.
Args:
example (dict): Maps from feature name (str) to tf.Tensor.
Returns:
bbox_label (Bbox2DLabel): Named tuple containing all the feature in tf.SparseTensor
form.
"""
# Cast polygons to rectangles. For polygon support, use SQLite.
coord_x = example['target/coordinates/x']
coord_y = example['target/coordinates/y']
coord_idx = example['target/coordinates/index']
xmin = tf.math.segment_min(coord_x, coord_idx)
xmax = tf.math.segment_max(coord_x, coord_idx)
ymin = tf.math.segment_min(coord_y, coord_idx)
ymax = tf.math.segment_max(coord_y, coord_idx)
# Massage the above to get a [N, 2] tensor. N refers to the number of vertices, so 2
# per bounding box, and always in (x, y) order.
dense_coordinates = tf.reshape(
tf.stack([xmin, ymin, xmax, ymax], axis=1),
(-1, 2))
# scale coordinates if resize and keep AR
if self._output_min > 0:
height = tf.cast(tf.reshape(example[HEIGHT_KEY], []), dtype=tf.int32)
width = tf.cast(tf.reshape(example[WIDTH_KEY], []), dtype=tf.int32)
dense_coordinates = self._scale_coordinates(
dense_coordinates, height,
width, self._output_min,
self._output_max
)
else:
# resize coordidnates to target size without keeping aspect ratio
if self.auto_resize:
height = tf.cast(tf.reshape(example[HEIGHT_KEY], []), dtype=tf.int32)
width = tf.cast(tf.reshape(example[WIDTH_KEY], []), dtype=tf.int32)
dense_coordinates = self._resize_coordinates(
dense_coordinates, height,
width, self._output_height,
self._output_width
)
counts = tf.ones_like(example['target/object_class'], dtype=tf.int64)
# 2 vertices per bounding box (since we can infer the other 2 using just these).
vertex_counts_per_polygon = 2 * counts
sparse_coordinates = \
sparsify_dense_coordinates(dense_coordinates, vertex_counts_per_polygon)
# This will be used to instantiate the namedtuple Bbox2DLabel.
bbox_2d_label_kwargs = dict()
bbox_2d_label_kwargs['vertices'] = Coordinates2D(
coordinates=sparse_coordinates,
canvas_shape=Canvas2D(
height=tf.ones([self._output_height]),
width=tf.ones([self._output_width]))
)
bbox_2d_label_kwargs['frame_id'] = tf.reshape(example[FRAME_ID_KEY], [])
# Take care of all other possible target features.
for feature_name in Bbox2DLabel._fields:
if feature_name in {'vertices', 'frame_id'}:
continue
if 'target/' + feature_name in example:
if feature_name == 'world_bbox_z':
sparse_feature_tensor = vector_and_counts_to_sparse_tensor(
vector=self._extract_depth(example),
counts=counts)
else:
sparse_feature_tensor = vector_and_counts_to_sparse_tensor(
vector=example['target/' + feature_name],
counts=counts)
else:
# TODO(@williamz): Is there a better way to handle optional labels?
sparse_feature_tensor = []
bbox_2d_label_kwargs[feature_name] = sparse_feature_tensor
# Assign source_weight.
bbox_2d_label_kwargs['source_weight'] = [tf.constant(self.source_weight, tf.float32)]
bbox_label = Bbox2DLabel(**bbox_2d_label_kwargs)
# Filter out labels whose dimensions are too small. NOTE: this is mostly for historical
# reasons (the DefaultDataloader has such a mechanism by default), and due to the fact
# that labels are actually not enforced to have x2 > x1 and y2 > y1.
bbox_label = filter_bbox_label_based_on_minimum_dims(
bbox_2d_label=bbox_label, min_height=1.0, min_width=1.0)
return bbox_label
def _scale_coordinates(
self,
dense_coordinates,
height,
width,
min_side,
max_side
):
"""Scale coordinates for resize and keep AR."""
scaling = self._calculate_scale(height, width, min_side, max_side)
return dense_coordinates * scaling
def _calculate_scale(self, height, width, min_side, max_side):
"""Calculate the scaling factor for resize and keep aspect ratio."""
scale_factor = tf.cond(
tf.less_equal(height, width),
true_fn=lambda: tf.cast(min_side / height, tf.float32),
false_fn=lambda: tf.cast(min_side / width, tf.float32)
)
# if the scale factor resulting in image's larger side
# exceed max_side, then calculate scale factor again
# such that the larger side is scaled to max_side.
scale_factor2 = tf.cond(
tf.less_equal(height, width),
true_fn=lambda: tf.cast(max_side / width, tf.float32),
false_fn=lambda: tf.cast(max_side / height, tf.float32)
)
# take the smaller scale factor, which ensures the scaled image size is
# no bigger than min_side x max_side
scale_factor = tf.minimum(scale_factor, scale_factor2)
return scale_factor
def _resize_coordinates(
self,
dense_coordinates,
height,
width,
target_height,
target_width
):
"""Resize coordinates to target size, do not keep AR."""
scale_x = tf.cast(target_width / width, tf.float32)
scale_y = tf.cast(target_height / height, tf.float32)
scale_xy = tf.reshape(tf.stack([scale_x, scale_y]), (-1, 2))
return dense_coordinates * scale_xy
# TODO(@williamz): Should the `set_target_size` be upstreamed to TFRecordsDataSource?
class DriveNetTFRecordsDataSource(TFRecordsDataSource):
"""DataSource for reading examples from TFRecords files."""
def __init__(self, tfrecord_path, image_dir, extension,
height, width, channels, subset_size,
preprocessing, sample_ratio=1.0,
source_weight=1.0, min_side=None,
max_side=None, auto_resize=False):
"""Construct a DriveNetTFRecordsDataSource.
Args:
tfrecord_path (str): Path, or a list of paths to tfrecords file(s).
image_dir (str): Path to directory where images referenced by examples are stored.
extension (str): Extension of image files.
height (int): Output image height.
width (int): Output image width.
channels (int): Number of channels for images stored in this dataset.
subset_size (int): Number of images from tfrecord_path to use.
preprocessing (Pipeline): Preprocessing processors specific to this dataset.
sample_ratio (float): probability at which a sample from this data source is picked
for inclusion in a batch.
source_weight (float): Value by which to weight the loss for samples
coming from this DataSource.
min_side(int): Minimal side of the image.
max_side(int): Maximal side of the image.
auto_resize(bool): Flag to enable automatic resize or not.
"""
super(DriveNetTFRecordsDataSource, self).__init__(
tfrecord_path=tfrecord_path,
image_dir=image_dir,
extension=extension,
height=height,
width=width,
channels=channels,
subset_size=subset_size,
preprocessing=preprocessing,
sample_ratio=sample_ratio
)
self._parser = None
if self.tfrecord_path:
self._parser = DriveNetTFRecordsParser(
tfrecord_path=self.tfrecord_path,
image_dir=image_dir,
extension=extension,
channels=channels,
source_weight=source_weight,
auto_resize=auto_resize)
self.num_samples = sum([sum(1 for _ in tf.compat.v1.python_io.tf_record_iterator(filename))
for filename in self.tfrecord_path])
self.min_side = min_side
self.max_side = max_side
self.max_image_width, self.max_image_height = self._get_max_image_size()
# Set the target size for the parser.
self.set_target_size(height=self.max_image_height,
width=self.max_image_width,
min_side=self.min_side,
max_side=self.max_side)
@property
def parse_example(self):
"""Parser for labels in TFRecords used by DriveNet."""
return lambda dataset: dataset.map(self._parser)
def set_target_size(self, height, width, min_side=None, max_side=None):
"""Set size for target image .
Args:
height (int): Target image height.
width (int): Target image width.
min_side(int): Minimal side of the image.
max_side(int): Maximal side of the image.
"""
if self._parser:
self._parser.set_target_size(
height=height,
width=width,
min_side=min_side,
max_side=max_side
)
def _get_max_image_size(self):
"""Scan for the maximum image size of this data source.
Returns:
(int) max_image_width, max_image_height.
"""
max_image_height = 0
max_image_width = 0
for path in self.tfrecord_path:
for record in tf.compat.v1.python_io.tf_record_iterator(path):
example = tf.train.Example()
example.ParseFromString(record)
height = int(str(example.features.feature[HEIGHT_KEY].int64_list.value[0]))
width = int(str(example.features.feature[WIDTH_KEY].int64_list.value[0]))
max_image_height = max(max_image_height, height)
max_image_width = max(max_image_width, width)
return max_image_width, max_image_height
class DriveNetDataloader(DefaultDataloader):
"""Dataloader for object detection datasets such as KITTI and Cyclops.
Implements a data loader that reads labels and frame id from datasets and compiles
image and ground truth tensors used in training and validation.
"""
def __init__(self,
training_data_source_list,
image_file_encoding,
augmentation_config,
validation_data_source_list=None,
data_sequence_length_in_frames=None,
target_class_mapping=None,
auto_resize=False,
sampling_mode="user_defined"):
"""Instantiate the dataloader.
Args:
training_data_source_list (list): List of DataSourceConfigs specifying training set.
image_file_encoding (str): How the images to be produced by the dataset are encoded.
Can be e.g. "jpg", "fp16", "png".
augmentation_config (dlav.drivenet.common.dataloader.augmentation_config.
AugmentationConfig): Holds the parameters for augmentation and preprocessing.
validation_data_source_list (list): List of DataSourceConfigs specifying validation
set. Can be None.
data_sequence_length_in_frames (int): Number of frames in each sequence. If not None,
the output images will be 5D tensors with additional temporal dimension.
target_class_mapping (dict): source to target class mapper from the ModelConfig proto.
auto_resize(bool): Flag to enable automatic resize or not.
"""
super(DriveNetDataloader, self).__init__(
training_data_source_list=training_data_source_list,
image_file_encoding=image_file_encoding,
augmentation_config=augmentation_config,
validation_data_source_list=validation_data_source_list,
target_class_mapping=target_class_mapping)
self._min_image_side = self.augmentation_config.preprocessing.output_image_min
self._max_image_side = self.augmentation_config.preprocessing.output_image_max
self._sequence_length_in_frames = data_sequence_length_in_frames
self.auto_resize = auto_resize
self.training_sources, self.num_training_samples =\
self._construct_data_sources(self.training_data_sources)
if validation_data_source_list is not None:
self.validation_sources, self.num_validation_samples =\
self._construct_data_sources(self.validation_data_sources)
else:
self.validation_sources = None
self.num_validation_samples = 0
# Set up a look up table for class mapping.
self._target_class_lookup = None
if self.target_class_mapping is not None:
self._target_class_lookup = nvidia_tao_tf1.core.processors.LookupTable(
keys=list(self.target_class_mapping.keys()),
values=list(self.target_class_mapping.values()),
default_value=tf.constant(UNKNOWN_CLASS)
)
if sampling_mode not in ["user_defined", "proportional", "uniform"]:
raise NotImplementedError(
f"Sampling mode: {sampling_mode} requested wasn't implemented."
)
self.sampling_mode = sampling_mode
logger.info(
"Sampling mode of the dataloader was set to {sample_mode}.".format(
sample_mode=self.sampling_mode
)
)
def _construct_data_sources(self, data_source_list):
"""Instantiate data sources.
Args:
data_source_list (list): List of DataSourceConfigs.
Returns:
data_sources (list): A list of DataSource instances.
num_samples (int): Sum of the number of samples in the above data sources.
Raises:
ValueError: If an unknown dataset type was encountered.
"""
data_sources = []
for data_source_config in data_source_list:
if data_source_config.dataset_type == 'tfrecord':
data_source =\
DriveNetTFRecordsDataSource(
tfrecord_path=data_source_config.dataset_files,
image_dir=data_source_config.images_path,
extension='.' + self.image_file_encoding,
height=0,
width=0,
channels=self.num_input_channels,
subset_size=0, # TODO(jrasanen) use this.
sample_ratio=1.0, # TODO(jrasanen) use this.
preprocessing=[],
source_weight=data_source_config.source_weight,
min_side=self._min_image_side,
max_side=self._max_image_side,
auto_resize=self.auto_resize
)
else:
raise ValueError("Unknown dataset type \'%s\'" % data_source_config.dataset_type)
data_sources.append(data_source)
if self.auto_resize:
# Use specified target image size in augmentation_config
self._max_image_height = self.augmentation_config.preprocessing.output_image_height
self._max_image_width = self.augmentation_config.preprocessing.output_image_width
else:
# Scan through all data sources and compute the maximum image size. Needed so that we
# can pad all images to the same size for minibatching.
max_image_height = 0
max_image_width = 0
for data_source in data_sources:
max_image_height = max(data_source.max_image_height, max_image_height)
max_image_width = max(data_source.max_image_width, max_image_width)
max_image_height = max(
max_image_height, self.augmentation_config.preprocessing.output_image_height)
max_image_width = max(
max_image_width, self.augmentation_config.preprocessing.output_image_width)
self._max_image_height = max_image_height
self._max_image_width = max_image_width
num_samples = 0
for data_source in data_sources:
# TODO(@williamz): There should be some API at the DataSource ABC level to allow
# these "batchability" mechanics.
if isinstance(data_source, DriveNetTFRecordsDataSource):
data_source.set_target_size(
height=self._max_image_height,
width=self._max_image_width,
min_side=self._min_image_side,
max_side=self._max_image_side
)
source_samples = len(data_source)
num_samples += source_samples
# This is to be consistent with the DefaultDataloader's concatenation behavior.
# Note that it doesn't functionally reproduce concatenating multiple sources into one,
# but statistically should lead to the samples being seen the same amount of times.
data_source.sample_ratio = source_samples
return data_sources, num_samples
def get_num_samples(self, training):
"""Get number of dataset samples.
Args:
training (bool): Get number of samples in the training (true) or
validation (false) set.
Returns:
Number of samples in the chosen set.
"""
if training:
return self.num_training_samples
return self.num_validation_samples
def get_dataset_tensors(self, batch_size, training, enable_augmentation, repeat=True):
"""Get input images and ground truth labels as tensors for training and validation.
Args:
batch_size (int): Minibatch size.
training (bool): Get samples from the training (True) or validation (False) set.
enable_augmentation (bool): Whether to augment input images and labels.
repeat (bool): Whether the dataset can be looped over multiple times or only once.
Returns:
images (Tensor of shape (batch, channels, height, width)): Input images with values
in the [0, 1] range.
labels (Bbox2DLabel): Contains labels corresponding to ``images``.
num_samples (int): Total number of samples found in the dataset.
"""
# TODO(jrasanen) Need to support repeat in dlav/common data loader? Looks like we
# currently have repeat=True everywhere, so could actually remove the arg.
assert repeat is True
data_sources = self.training_sources if training else self.validation_sources
# Construct data source independent augmentation pipeline.
if self._min_image_side == 0:
augmentation_pipeline = _get_augmentation_pipeline(
augmentation_config=self.augmentation_config,
max_image_height=self._max_image_height,
max_image_width=self._max_image_width,
enable_augmentation=enable_augmentation,
)
preprocessors = []
if training:
num_gpus = distribution.get_distributor().size()
local_gpu = distribution.get_distributor().rank()
else:
# We want data to be unsharded during evaluation because currently only single-GPU
# evaluation is enabled.
num_gpus = 1
local_gpu = 0
data_loader = DataLoader(data_sources=data_sources,
augmentation_pipeline=[],
batch_size=batch_size * num_gpus,
shuffle=training,
sampling=self.sampling_mode,
preprocessing=preprocessors,
pipeline_dtype=tf.float16) # Use fp16 image processing.
data_loader.set_shard(shard_count=num_gpus, shard_index=local_gpu)
# Instantiate the data loader pipeline.
sequence_example = data_loader()
if self._min_image_side == 0:
# Compute augmentation transform matrices.
# TODO(@williamz/@jrasanen): Can this also be moved back up to the `DataLoader`?
transformed_example = augmentation_pipeline(sequence_example)
# Apply augmentations and cast to model dtype.
sequence_example = transformed_example(output_image_dtype=K.floatx())
# Since TransformedExample only lazily captures augmentations but does not apply them,
# the BboxClipper processor has to be applied outside of the augmentation_pipeline,
# as it expects to deal with transformed labels. Hence, it is not included in the above
# ``augmentation_pipeline``.
# DriveNet quirks: update truncation_type, throw out labels outside the crop,
# and clip the coordinates of those that are partially outside.
bbox_clipper = BboxClipper(
crop_left=self.augmentation_config.preprocessing.crop_left,
crop_top=self.augmentation_config.preprocessing.crop_top,
crop_right=self.augmentation_config.preprocessing.crop_right,
crop_bottom=self.augmentation_config.preprocessing.crop_bottom)
sequence_example = bbox_clipper.process(sequence_example)
images = sequence_example.instances[FEATURE_CAMERA].images
if images.dtype != tf.float32:
images = tf.cast(images, dtype=tf.float32)
labels = sequence_example.labels[LABEL_OBJECT]
if self._sequence_length_in_frames is None:
images = images[:, 0, ...]
if self.target_class_mapping is not None:
labels = self._map_to_model_target_classes(labels)
return images, labels, len(data_loader)
def _map_to_model_target_classes(self, labels):
"""Map object classes in the data source to the target classes in the dataset_config.
Args:
labels(BBox2DLabel): Input data label.
Returns:
filterred_labels (Bbox2DLabel): Output labels with mapped class names.
"""
source_classes = labels.object_class
mapped_classes = tf.SparseTensor(
values=self._target_class_lookup(source_classes.values),
indices=source_classes.indices,
dense_shape=source_classes.dense_shape)
mapped_labels = labels._replace(object_class=mapped_classes)
valid_indices = tf.not_equal(mapped_classes.values, UNKNOWN_CLASS)
return filter_labels(mapped_labels, valid_indices)
def _get_augmentation_pipeline(
augmentation_config,
max_image_height,
max_image_width,
enable_augmentation=False,
):
"""Define an augmentation (+preprocessing) pipeline.
Args:
augmentation_config (AugmentationConfig)
max_image_height (int)
max_image_width (int)
enable_augmentation (bool): Whether to enable augmentation or not.
Returns:
pipeline (Pipeline): Augmentation pipeline.
"""
# Note: once / if we are able to move to a common spec / builder, this should be removed.
processors = []
# So our lines aren't too long.
spatial_config = augmentation_config.spatial_augmentation
color_config = augmentation_config.color_augmentation
num_channels = augmentation_config.preprocessing.output_image_channel
# Preprocessing: scaling and cropping.
# First: scaling (e.g. downscale 0.5 for side camera models).
scale_width = augmentation_config.preprocessing.scale_width
scale_height = augmentation_config.preprocessing.scale_height
if scale_width != 0. or scale_height != 0.:
if scale_height != 0.:
scaled_height = scale_height * max_image_height
else:
scaled_height = max_image_height
if scale_width != 0.:
scaled_width = scale_width * max_image_width
else:
scaled_width = max_image_width
processors.append(Scale(height=scaled_height, width=scaled_width))
# Then: cropping. Note that we're adding Crop unconditionally so that we're guaranteed to
# end up with a non-empty pipeline.
crop_left, crop_top, crop_right, crop_bottom = \
augmentation_config.preprocessing.crop_left, \
augmentation_config.preprocessing.crop_top, \
augmentation_config.preprocessing.crop_right, \
augmentation_config.preprocessing.crop_bottom
if {crop_left, crop_top, crop_right, crop_bottom} == {0}:
crop_left = 0
crop_right = augmentation_config.preprocessing.output_image_width
crop_top = 0
crop_bottom = augmentation_config.preprocessing.output_image_height
processors.append(Crop(
left=crop_left,
top=crop_top,
right=crop_right,
bottom=crop_bottom))
# Spatial and color augmentation.
if enable_augmentation:
processors.append(RandomFlip(horizontal_probability=spatial_config.hflip_probability))
processors.append(RandomTranslation(
max_x=int(spatial_config.translate_max_x),
max_y=int(spatial_config.translate_max_y)))
processors.append(RandomZoom(
ratio_min=spatial_config.zoom_min,
ratio_max=spatial_config.zoom_max,
probability=1.0))
processors.append(RandomRotation(
min_angle=-spatial_config.rotate_rad_max,
max_angle=spatial_config.rotate_rad_max,
probability=spatial_config.rotate_probability
))
# Color augmentation.
if num_channels == 3:
processors.append(RandomBrightness(
scale_max=color_config.color_shift_stddev * 2.0,
uniform_across_channels=True))
processors.append(RandomHueSaturation(
hue_rotation_max=color_config.hue_rotation_max,
saturation_shift_max=color_config.saturation_shift_max))
processors.append(RandomContrast(
scale_max=color_config.contrast_scale_max,
center=color_config.contrast_center))
augmentation_pipeline = Pipeline(
processors=processors,
input_data_format=CHANNELS_FIRST,
output_data_format=CHANNELS_FIRST)
return augmentation_pipeline
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/dataloader/drivenet_dataloader.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""Default dataloader for DetectNet V2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
import logging
from nvidia_tao_tf1.blocks.multi_source_loader.types.bbox_2d_label import \
augment_marker_labels
import nvidia_tao_tf1.core
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.augment import (
apply_all_transformations_to_image,
apply_spatial_transformations_to_polygons,
get_all_transformations_matrices,
get_transformation_ops
)
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.base_dataloader import BaseDataloader
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.utilities import (
extract_tfrecords_features,
get_num_samples,
get_tfrecords_iterator,
process_image_for_dnn_input,
read_image
)
from nvidia_tao_tf1.cv.detectnet_v2.label_filter.base_label_filter import apply_label_filters
from nvidia_tao_tf1.cv.detectnet_v2.label_filter.bbox_crop_label_filter import BboxCropLabelFilter
from nvidia_tao_tf1.cv.detectnet_v2.label_filter.bbox_dimensions_label_filter import (
BboxDimensionsLabelFilter
)
import six
from six.moves import zip
import tensorflow as tf
# Constants used for reading required features from tfrecords.
BW_POLY_COEFF1_60FC = 0.000545421498827636
FRAME_ID_KEY = 'frame/id'
HEIGHT_KEY = 'frame/height'
WIDTH_KEY = 'frame/width'
UNKNOWN_CLASS = '-1'
UNKNOWN_ORIENTATION = 0.0
UNKNOWN_DISTANCE = 0.0
logger = logging.getLogger(__name__)
class DefaultDataloader(BaseDataloader):
"""Default dataloader for object detection datasets such as KITTI and Cyclops.
Implements a data loader that reads labels and frame id from .tfrecords files and compiles
image and ground truth tensors used in training and validation.
"""
def __init__(self,
training_data_source_list,
image_file_encoding,
augmentation_config,
validation_fold=None,
validation_data_source_list=None,
target_class_mapping=None):
"""Instantiate the dataloader.
Args:
training_data_source_list (list): List of tuples (tfrecord_file_pattern,
image_directory_path) to use for training.
target_class_mapping (dict): maps from source class to target class (both str).
image_file_encoding (str): How the images to be produced by the dataset are encoded.
Can be e.g. "jpg", "fp16", "png".
augmentation_config (nvidia_tao_tf1.cv.detectnet_v2.dataloader.augmentation_config.
AugmentationConfig): Holds the parameters for augmentation and preprocessing.
validation_fold (int): Validation fold number (0-based). Indicates which fold from the
training data to use as validation. Can be None.
validation_data_source_list (list): List of tuples (tfrecord_file_pattern,
image_directory_path) to use for validation. Can be None.
"""
self.target_class_mapping = target_class_mapping
self.image_file_encoding = image_file_encoding
self.augmentation_config = augmentation_config
self.validation_fold = validation_fold
# Get training data sources.
self.training_data_sources = training_data_source_list
# Now, potentially, get the validation data sources.
self.validation_data_sources = validation_data_source_list
if augmentation_config is None:
self.num_input_channels = 3
else:
self.num_input_channels = self.augmentation_config.preprocessing.output_image_channel
assert(self.num_input_channels in [1, 3]), "Set the output_image_channel param to 1 " \
"or 3 in the augmentation config."
# TODO(@williamz): Why do some tests supply None as augmentation_config? Should we make
# a kwarg?
if augmentation_config is None:
bbox_dimensions_label_filter_kwargs = dict()
bbox_crop_label_filter_kwargs = dict()
else:
bbox_dimensions_label_filter_kwargs = {
'min_width': self.augmentation_config.preprocessing.min_bbox_width,
'min_height': self.augmentation_config.preprocessing.min_bbox_height}
bbox_crop_label_filter_kwargs = {
'crop_left': 0,
'crop_right': self.augmentation_config.preprocessing.output_image_width,
'crop_top': 0,
'crop_bottom': self.augmentation_config.preprocessing.output_image_height}
self._bbox_dimensions_label_filter = \
BboxDimensionsLabelFilter(**bbox_dimensions_label_filter_kwargs)
self._bbox_crop_label_filter = \
BboxCropLabelFilter(**bbox_crop_label_filter_kwargs)
self.target_class_to_source_classes_mapping = defaultdict(list)
if self.target_class_mapping is not None:
for source_class_name, target_class_name in \
six.iteritems(self.target_class_mapping):
self.target_class_to_source_classes_mapping[
target_class_name].append(source_class_name.lower())
self.target_class_to_source_classes_mapping = \
dict(self.target_class_to_source_classes_mapping)
# Get the transformation ops.
self._stm_op, self._ctm_op = get_transformation_ops()
# For parsing TF records.
self._extracted_features = dict()
def get_data_tensor_shape(self):
"""Interface for querying data tensor shape.
Returns:
Data tensor shape as a (C,H,W) tuple without the batch dimension.
"""
return (self.num_input_channels,
self.augmentation_config.preprocessing.output_image_height,
self.augmentation_config.preprocessing.output_image_width)
def get_num_samples(self, training):
"""Get number of dataset samples.
Args:
training (bool): Get number of samples in the training (true) or
validation (false) set.
Returns:
Number of samples in the chosen set.
"""
data_sources = self.training_data_sources if training else self.validation_data_sources
# In case file list is empty, don't load anything.
if not data_sources:
return 0
return get_num_samples(data_sources, training)
def get_dataset_tensors(self, batch_size, training, enable_augmentation, repeat=True):
"""Get input images and ground truth labels as tensors for training and validation.
Returns also the number of minibatches required to loop over the dataset once.
Args:
batch_size (int): Minibatch size.
training (bool): Get samples from the training (True) or validation (False) set.
enable_augmentation (bool): Whether to augment input images and labels.
repeat (bool): Whether the dataset can be looped over multiple times or only once.
Returns:
images (Tensor of shape (batch, channels, height, width)): Input images with values
in the [0, 1] range.
ground_truth_labels (list of dicts of Tensors): Each element in this list corresponds
to the augmented and filtered labels in a frame.
num_samples (int): Total number of samples found in the dataset.
"""
data_sources = self.training_data_sources if training else self.validation_data_sources
# In case file list is empty, don't load anything
if not data_sources:
return None, None, 0
# Get the location of .tfrecords files for each validation fold and the location of images
tfrecords_iterator, num_samples = get_tfrecords_iterator(data_sources,
batch_size,
training=training,
repeat=repeat)
# Extract features from a sample tfrecords file. These features are then read from all
# tfrecords files.
tfrecords_file = None
# Find the first tfrecord file.
at_least_one_tfrecord = False
for data_source_config in data_sources:
if data_source_config.dataset_files:
at_least_one_tfrecord = True
# Assume all tfrecords in a single source will have the same schema.
tfrecords_file = data_source_config.dataset_files[0]
self._extracted_features.update(extract_tfrecords_features(tfrecords_file))
assert at_least_one_tfrecord, "No valid tfrecords files found in %s" % data_sources
# Generate augmented input images and ground truth labels.
images, ground_truth_labels =\
self._generate_images_and_ground_truth_labels(tfrecords_iterator,
enable_augmentation)
return images, ground_truth_labels, num_samples
def _generate_images_and_ground_truth_labels(self, tfrecords_iterator,
enable_augmentation=False):
"""Return generators for input image and output target tensors.
Args:
tfrecords_iterator (TFRecordsIterator): Iterator for dataset .tfrecords files.
enable_augmentation (bool): Augment input images and ground truths.
Returns:
images (Tensor of shape (batch, channels, height, width)): Input images with values
in the [0, 1] range.
ground_truth_labels (list of dicts of Tensors): Each dict contains e.g. tensors for
the augmented bbox coordinates, their class name, etc.
"""
# Create the proto parser.
parse_example_proto_layer = self._get_parse_example_proto()
# We first yield our tfrecords, by calling the processor we created earlier.
# This will return a tuple - list of individual samples, that all contain 1 record
# and a list of image directory paths, 1 for each record.
records, img_dirs, source_weights = tfrecords_iterator()
# Loop over each record, and deserialize the example proto. This will yield the tensors.
# Both the number of records and the loop's length are the same as the batch size.
# We are repeating the same operation for each item in the batch. Our batch size is hence
# fixed.
images = []
ground_truth_labels = []
for record, img_dir, source_weight in zip(records, img_dirs, source_weights):
# Deserialize the record. It will yield a dictionary of items in this proto.
# Inside this (now deserialized) Example is the image, label, metadata, etc.
example = parse_example_proto_layer(record) # Returns a dict.
# Map target classes in the datasource to target classes of the model.
if self.target_class_mapping is not None:
example = self._map_to_model_target_classes(
example, self.target_class_mapping)
# Load network input image tensors.
image = self._load_input_tensors(example, img_dir)
labels, image = self._augment(example, image, enable_augmentation)
labels["source_weight"] = source_weight
images.append(image)
ground_truth_labels.append(labels)
# Zip together the results as extracted on a per-sample basis to one entire batch.
# What happened beforehand, on a per-image basis, happened in parallel
# for each sample individually. From this point on, we are working with batches.
images = tf.stack(images, axis=0)
return images, ground_truth_labels
def _augment(self, example, input_tensors, enable_augmentation):
"""Apply augmentation operations to example.
Args:
example: tf.train.Example protobuf message.
input_tensors (3-D Tensor, HWC): Input tensors.
enable_augmentation (boolean): True if random augmentations are enabled.
Returns:
labels (dict): Augmented labels.
input_tensors (3-D Tensor, CHW): Augmented input tensors.
"""
# Retrieve the augmentation matrices.
sm, cm = get_all_transformations_matrices(self.augmentation_config,
enable_augmentation)
# Get additional labels.
additional_labels = self._translate_additional_labels(example)
# The old format for bbox labels in TFRecords was 'target/coordinates_x1',
# 'target/coordinates_x2', 'target/coordinates_y1', 'target/coordinates_y2'.
# The new format has 'target/coordinates/x', 'target/coordinates/y',
# and 'target/coordinates/index' which more closely resembles how arbitrary polygons
# might be specified. The following call ensures the old format gets translated to the new
# one.
example = self.translate_bbox_to_polygon(example)
# Apply augmentations to input tensors.
input_tensors, rmat = self._apply_augmentations_to_input_tensors(
input_tensors, sm, cm, example)
# Apply augmentations to ground truth labels.
labels = self._apply_augmentations_to_ground_truth_labels(
example, sm, rmat)
# Apply augmentations to additional labels.
additional_labels = self._apply_augmentations_to_additional_labels(
additional_labels, sm)
labels.update(additional_labels)
# Do possible label filtering.
labels = apply_label_filters(
label_filters=[self._bbox_dimensions_label_filter, self._bbox_crop_label_filter],
ground_truth_labels=labels,
mode='and')
return labels, input_tensors
@staticmethod
def translate_bbox_to_polygon(example):
"""Cast all bounding box coordinates to polygon coordinates, if they exist.
Args:
example (dict): Labels for one sample.
Returns:
new_example (dict): Where bounding box labels, if any, have been cast to polygon
coordinates.
"""
needed_bbox_features = {'target/coordinates_x1', 'target/coordinates_x2',
'target/coordinates_y1', 'target/coordinates_y2'}
is_bbox_present = all(f in example for f in needed_bbox_features)
needed_polygon_features = {'target/coordinates/x', 'target/coordinates/y',
'target/coordinates/index'}
is_polygon_present = all(f in example for f in needed_polygon_features)
if is_polygon_present and not is_bbox_present:
# No need to convert.
return example
if is_polygon_present and is_bbox_present:
logger.warning('The data sources, combined, have both old-style bbox coordinates, '
'and new-style polygon vertices. Translating the old ones to new ones '
'where applicable')
elif is_bbox_present:
# The tfrecord is guaranteed to have bbox features. Convert them to polygon features.
logger.info("Bounding box coordinates were detected in the input specification! Bboxes"
" will be automatically converted to polygon coordinates.")
x1, x2, y1, y2 = (example['target/coordinates_x1'], example['target/coordinates_x2'],
example['target/coordinates_y1'], example['target/coordinates_y2'])
# Create an index like 0,0,0,0, 1,1,1,1, ... n-1,n-1,n-1,n-1, where N is the number of
# bboxes.
num_bboxes = tf.size(x1)
coordinates_index = tf.cast(tf.floor(tf.range(num_bboxes, delta=0.25)), dtype=tf.int64)
# Construct polygon bounding boxes with the coordinate ordering TL, TR, BR, BL.
coordinates_x = tf.reshape(tf.stack([x1, x2, x2, x1], axis=1), shape=(-1,))
coordinates_y = tf.reshape(tf.stack([y1, y1, y2, y2], axis=1), shape=(-1,))
if is_polygon_present and is_bbox_present:
# If these are empty, replace the coordinates with the polygon vertices.
is_empty = tf.equal(num_bboxes, 0)
coordinates_x = tf.cond(is_empty,
lambda: example['target/coordinates/x'],
lambda: coordinates_x)
coordinates_y = tf.cond(is_empty,
lambda: example['target/coordinates/y'],
lambda: coordinates_y)
coordinates_index = tf.cond(is_empty,
lambda: example['target/coordinates/index'],
lambda: coordinates_index)
new_example = {
k: v
for k, v in six.iteritems(example)
if k not in needed_bbox_features
}
new_example.update({
'target/coordinates/x': coordinates_x,
'target/coordinates/y': coordinates_y,
'target/coordinates/index': coordinates_index
})
return new_example
def _get_parse_example_proto(self):
"""Get the maglev example proto parser.
Returns:
nvidia_tao_tf1.core.processors.ParseExampleProto object to parse example(s).
"""
return nvidia_tao_tf1.core.processors.ParseExampleProto(
features=self._extracted_features, single=True)
def _apply_augmentations_to_input_tensors(self, input_tensors, sm, cm, example):
"""
Apply augmentations to input image tensors.
Args:
input_tensors (3-D Tensor, HWC): Input image tensors.
sm (2-D Tensor): 3x3 spatial transformation/augmentation matrix.
cm (2-D Tensor): 3x3 color augmentation matrix.
example: tf.train.Example protobuf message. (Unused here but used in subclasses.)
Returns:
image (Tensor, CHW): Augmented input tensor. The values are scaled between [0, 1].
rmat: Matrix that transforms from augmented space to the original image space.
"""
# Apply cropping, zero padding, resizing, and color and spatial augmentations to images.
image, rmat = \
apply_all_transformations_to_image(
self.augmentation_config.preprocessing.output_image_height,
self.augmentation_config.preprocessing.output_image_width,
self._stm_op, self._ctm_op, sm, cm, input_tensors, self.num_input_channels)
# Apply cropping, zero padding, resizing, and color and spatial augmentations to images.
# HWC -> CHW
image = process_image_for_dnn_input(image)
return image, rmat
def _apply_augmentations_to_ground_truth_labels(self, example, sm, rmat):
"""
Apply augmentations to ground truth labels.
Args:
example: tf.train.Example protobuf message.
sm (2-D Tensor): 3x3 spatial transformation/augmentation matrix.
rmat (Tensor): 3x3 matrix that transforms from augmented space to the original
image space.
Returns:
augmented_labels (dict): Ground truth labels for the frame, after preprocessing and /
or augmentation have been applied.
"""
augmented_labels = dict()
augmented_x, augmented_y = apply_spatial_transformations_to_polygons(
sm, example['target/coordinates/x'], example['target/coordinates/y'])
augmented_labels['target/coordinates/x'] = augmented_x
augmented_labels['target/coordinates/y'] = augmented_y
self._update_bbox_from_polygon_coords(example)
# Used as a frame metadata in evaluation.
image_height = self.augmentation_config.preprocessing.output_image_height
image_width = self.augmentation_config.preprocessing.output_image_width
image_dimensions = tf.constant([[image_width, image_height]])
# Compile ground truth data to a list of dicts used in training and validation.
augmented_labels['frame/augmented_to_input_matrices'] = rmat
augmented_labels['frame/image_dimensions'] = image_dimensions
if 'target/front' in example and 'target/back' in example:
augmented_front_labels = \
augment_marker_labels(example['target/front'], sm)
augmented_labels['target/front'] = augmented_front_labels
augmented_back_labels = \
augment_marker_labels(example['target/back'], sm)
augmented_labels['target/back'] = augmented_back_labels
# For anything that is unaffected by augmentation or preprocessing, forward it through.
for feature_name, feature_tensor in six.iteritems(example):
if feature_name not in augmented_labels:
augmented_labels[feature_name] = feature_tensor
# Update bbox and truncation info in example.
# Clip cropped coordinates to image boundary.
augmented_labels = self._update_example_after_crop(crop_left=0, crop_right=image_width,
crop_top=0, crop_bottom=image_height,
example=augmented_labels)
return augmented_labels
def _load_input_tensors(self, example, file_dir):
"""
Return a generator for the input image tensors.
Args:
example: tf.train.Example protobuf message.
file_dir (string): Dataset input image directory.
Returns:
image (3-D Tensor, HWC): The image.
"""
# Reshape image_path to have rank 0 as expected by TensorFlow's ReadFile.
image_path = tf.string_join([file_dir, example[FRAME_ID_KEY]])
image_path = tf.reshape(image_path, [])
height, width = tf.reshape(example[HEIGHT_KEY], []), \
tf.reshape(example[WIDTH_KEY], [])
image_path = tf.string_join([image_path, '.' + self.image_file_encoding])
image = read_image(image_path, self.image_file_encoding, self.num_input_channels,
width, height)
return image
@staticmethod
def _update_bbox_from_polygon_coords(example):
"""Update the non-rotated bounding rectangle of a polygon from its coordinates.
Args:
example (dict): Labels for one sample.
Returns:
example (dict): A reference to the now modified example.
"""
coord_x = example['target/coordinates/x']
coord_y = example['target/coordinates/y']
coord_idx = example['target/coordinates/index']
xmin = tf.math.segment_min(coord_x, coord_idx)
xmax = tf.math.segment_max(coord_x, coord_idx)
ymin = tf.math.segment_min(coord_y, coord_idx)
ymax = tf.math.segment_max(coord_y, coord_idx)
example['target/bbox_coordinates'] = tf.stack([xmin, ymin, xmax, ymax], axis=1)
return example
@classmethod
def _update_example_after_crop(cls, crop_left, crop_right, crop_top, crop_bottom, example):
"""Update bbox and truncation_type according to cropping preprocess.
Args:
crop_left/crop_right/crop_top/crop_bottom (int): crop rectangle coordinates.
example (tf.train.Example): Labels for one sample.
Returns
example (tf.train.Example): Labels where bbox and truncation_type are updated according
to crop preprocess.
Raises:
ValueError: if crop_left > crop_right, or crop_top > crop_bottom, raise error.
"""
coord_x = example['target/coordinates/x']
coord_y = example['target/coordinates/y']
coord_idx = example['target/coordinates/index']
# TODO(@drendleman) Use the Maglev ClipPolygon transformer here?
if all(item == 0 for item in [crop_left, crop_right, crop_top, crop_bottom]):
cls._update_bbox_from_polygon_coords(example)
return example
if crop_left > crop_right or crop_top > crop_bottom:
raise ValueError("crop_right/crop_bottom should be larger than crop_left/crop_top.")
crop_left = tf.cast(crop_left, tf.float32)
crop_right = tf.cast(crop_right, tf.float32)
crop_top = tf.cast(crop_top, tf.float32)
crop_bottom = tf.cast(crop_bottom, tf.float32)
# The coordinates have their origin as (0, 0) in the image.
if 'target/truncation_type' in example:
# Update Truncation Type of truncated objects.
# Overlap: is any single vertex per each polygon inside the crop region?
overlap = tf.ones_like(coord_idx, dtype=tf.bool)
overlap = tf.logical_and(overlap, tf.less(coord_x, crop_right))
overlap = tf.logical_and(overlap, tf.greater(coord_x, crop_left))
overlap = tf.logical_and(overlap, tf.less(coord_y, crop_bottom))
overlap = tf.logical_and(overlap, tf.greater(coord_y, crop_top))
# Logical OR together all overlapped coordinate statuses for each polygon.
overlap = tf.math.segment_max(tf.cast(overlap, dtype=tf.int32), coord_idx)
# Truncated: is any single vertex per each polygon outside the crop region?
truncated = tf.zeros_like(coord_idx, dtype=tf.bool)
truncated = tf.logical_or(truncated, tf.less(coord_x, crop_left))
truncated = tf.logical_or(truncated, tf.greater(coord_x, crop_right))
truncated = tf.logical_or(truncated, tf.less(coord_y, crop_top))
truncated = tf.logical_or(truncated, tf.greater(coord_y, crop_bottom))
# Logical OR all truncated coordinate statuses for each polygon.
truncated = tf.math.segment_max(tf.cast(truncated, dtype=tf.int32), coord_idx)
# Ensure an object is still truncated if it was originally truncated.
truncation_type = \
tf.logical_and(tf.cast(truncated, dtype=tf.bool), tf.cast(overlap, dtype=tf.bool))
truncation_type = \
tf.logical_or(tf.cast(example['target/truncation_type'], dtype=tf.bool),
truncation_type)
example['target/truncation_type'] = tf.cast(truncation_type, dtype=tf.int32)
elif 'target/truncation' in example:
logger.debug("target/truncation is not updated to match the crop area "
"if the dataset contains target/truncation.")
# Update bbox coordinates.
# TODO(@drendleman) We can't use clip_by_value here because of a tensorflow bug when both
# the tensor and the clip values are empty.
truncated_x = tf.minimum(tf.maximum(example['target/coordinates/x'], crop_left), crop_right)
truncated_y = tf.minimum(tf.maximum(example['target/coordinates/y'], crop_top), crop_bottom)
example.update({'target/coordinates/x': truncated_x,
'target/coordinates/y': truncated_y})
cls._update_bbox_from_polygon_coords(example)
return example
def _map_to_model_target_classes(self, example, target_class_mapping):
"""Map object classes as they are defined in the data source to the model target classes.
Args:
example (tf.train.Example): Labels for one sample.
target_class_mapping: Protobuf map.
Returns
example (tf.train.Example): Labels where data source target classes are mapped to
model target classes. If target_class_mapping is not defined, then example is
unchanged.
"""
datasource_target_classes = list(target_class_mapping.keys())
if datasource_target_classes:
mapped_target_classes = list(target_class_mapping.values())
default_value = tf.constant(UNKNOWN_CLASS)
lookup = nvidia_tao_tf1.core.processors.LookupTable(keys=datasource_target_classes,
values=mapped_target_classes,
default_value=default_value)
# Retain source class.
example['target/source_class'] = example['target/object_class']
# Overwrite 'object_class' with mapped target class.
new_target_classes = lookup(example['target/object_class'])
example['target/object_class'] = new_target_classes
return example
def _translate_additional_labels(self, labels):
"""Translate additional labels if required.
This private helper takes care of parsing labels on top of those needed for 'bare 2D'
detection, and translating them to the domain expected by the model.
E.g. This can translate (front, back) markers to an orientation value.
Args:
labels (dict): Keys are label feature names, values the corresponding tf.Tensor.
Returns:
additional_labels (dict): Keys are label feature names produced from the translation,
the values the corresponding tf.Tensor.
"""
additional_labels = dict()
depth_name = 'target/world_bbox_z'
if depth_name in labels:
additional_labels[depth_name] = labels[depth_name]
# Now adjust to camera if the information is present.
if 'frame/bw_poly_coeff1' in labels:
# Use the ratio of the first order backward polynomial coefficients as the scaling
# factor. Default camera is 60FOV, and this is its first order bw-poly coeff.
scale_factor = labels['frame/bw_poly_coeff1'] / \
BW_POLY_COEFF1_60FC
additional_labels[depth_name] *= scale_factor
else:
additional_labels[depth_name] = \
tf.ones(tf.shape(input=labels['target/object_class'])) * UNKNOWN_DISTANCE
return additional_labels
def _apply_augmentations_to_additional_labels(self, additional_labels, stm):
"""Apply augmentations to additional labels.
This private helper applies augmentations (currently only spatial augmentations) to those
labels produced by _translate_additional_labels().
Args:
additional_labels (dict): Keys are (additional) label feature names, values the
corresponding tf.Tensor.
stm (tf.Tensor): 3x3 Spatial transformation matrix.
Returns:
augmented_additional_labels (dict): Keys are the same as <additional_labels>, values the
corresponding tf.Tensor with augmentation applied to them.
"""
augmented_additional_labels = dict()
augmented_additional_labels.update(additional_labels)
if 'target/world_bbox_z' in additional_labels:
# Zoom factor is the square root of the inverse of the determinant of the left-top 2x2
# corner of the spatial transformation matrix.
abs_determinant = tf.abs(tf.linalg.det(stm[:2, :2]))
# Although in practice the spatial transaformation matrix should always be invertible,
# add a runtime check here.
with tf.control_dependencies([tf.compat.v1.assert_greater(abs_determinant, 0.001)]):
scale_factor = 1. / tf.sqrt(abs_determinant)
augmented_additional_labels['target/world_bbox_z'] *= scale_factor
return augmented_additional_labels
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/dataloader/default_dataloader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utility functions for data loading.
These can be used independently of the dataloader classes.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from keras import backend as K
import nvidia_tao_tf1.core
import six
from six.moves import zip
import tensorflow as tf
def get_data_root():
"""Get path to dataset base directory."""
# In case DATA_BASE_PATH environment variable is not set, use ~/datasets
FALLBACK_DIR = os.path.join(os.path.expanduser('~'), 'datasets')
data_root = os.getenv('DATA_BASE_PATH', FALLBACK_DIR)
return data_root
def get_absolute_data_path(path):
"""Get absolute path from path or paths relative to DATA_BASE_PATH."""
dataset_path = get_data_root()
# Note that os.path.join ignores dataset_path if tf_records_path or image_directory_path
# are absolute.
if isinstance(path, list):
return [os.path.join(dataset_path, p) for p in path]
return os.path.join(dataset_path, path)
class ImageTFRecordsIterator(nvidia_tao_tf1.core.processors.TFRecordsIterator):
"""
Processor that sets up a TFRecordsDataset and yields (value, image_dir) tuples from the input.
Extends nvidia_tao_tf1.core.processors.TFRecordsIterator.
"""
def call(self):
"""call method.
Returns:
tuple (records, image_dirs) where
records: a list or dense tensor (depending on the value of ``batch_as_list``) containing
the next batch as yielded from the `TFRecordsDataset`. Each new call will pull a
fresh batch of samples. The set of input records cannot be depleted, as the records
will wrap around to the next epoch as required.
If the iterator reaches end of dataset, reinitialize the iterator
image_dirs: a list of tf.string Tensors specifying image directory path for each of the
records.
"""
records, image_dirs, source_weights = self.iterator.get_next()
records = self.process_records(records)
image_dirs = self.process_records(image_dirs)
source_weights = self.process_records(source_weights)
return (records, image_dirs, source_weights)
def _create_tf_dataset(file_list, img_dir_list, source_weights_list):
"""
Helper function to create a TFRecordDataset to be passed to the iterator.
Args:
file_list: (list) List of lists containing the files for each data source.
img_dir_list: (list) List of lists of image directory paths.
Returns:
datasets: (tf.data.TFRecordDataset) a concatenation of possibly multiple datasets.
"""
# Prepare the dataset to be passed to the iterator.
assert len(file_list) == len(img_dir_list),\
"Lengths of lists don't match!" \
"`file_list` and `imge_dir_list` should have the same lengths, but instead "\
"are {} and {}.".format(len(file_list), len(img_dir_list))
datasets = None
for files, image_dir, data_weight in zip(file_list, img_dir_list, source_weights_list):
dataset = tf.data.TFRecordDataset(files)
if not image_dir.endswith('/'):
image_dir += '/'
def add_image_dir(serialized_example, img_dir=image_dir, source_weight=data_weight):
return (serialized_example, tf.constant(img_dir),
tf.constant(source_weight, tf.float32))
dataset = dataset.map(add_image_dir)
if datasets is None:
datasets = dataset
else:
datasets = datasets.concatenate(dataset)
return datasets
def get_num_samples(data_sources, training):
"""Get number of samples in the training/validation dataset.
Args:
data_sources: (list) List of tuples (list_of_tfrecord_files, image_directory_path)
training: (boolean)
Returns:
num_samples: Number of samples in the training/validation dataset.
"""
# file_list is actually a list of lists
for data_source in data_sources:
if not isinstance(data_source, tuple):
assert data_source.dataset_type == "tfrecord"
dataset_types = {type(data_source) for data_source in data_sources}
assert len(dataset_types) == 1, (
"All the data source types should either of the same type. Here we got {}".
format(dataset_types)
)
if tuple in dataset_types:
file_list = [data_source[0] for data_source in data_sources]
else:
file_list = [data_source.dataset_files for data_source in data_sources]
num_samples = sum([sum(1 for _ in tf.compat.v1.python_io.tf_record_iterator(filename))
for tfrecords_files in file_list for filename in tfrecords_files])
return num_samples
def get_tfrecords_iterator(data_sources, batch_size, training, repeat):
"""Get TFRecordsIterator for given .tfrecords files.
Args:
data_sources: (list) List of tuples (list_of_tfrecord_files, image_directory_path)
batch_size: (int)
training: (boolean)
repeat (boolean): Allow looping over the dataset multiple times.
Returns:
tfrecords_iterator:
num_samples: Number of samples in the training/validation dataset.
"""
# file_list is actually a list of lists
for data_source in data_sources:
if not isinstance(data_source, tuple):
assert data_source.dataset_type == "tfrecord"
dataset_types = {type(data_source) for data_source in data_sources}
assert len(dataset_types) == 1, (
"All the data source types should either of the same type. Here we got {}".
format(dataset_types)
)
if tuple in dataset_types:
file_list = [data_source[0] for data_source in data_sources]
img_dir_list = [data_source[1] for data_source in data_sources]
source_weight_list = [1.0 for data_source in data_sources]
else:
file_list = [data_source.dataset_files for data_source in data_sources]
img_dir_list = [data_source.images_path for data_source in data_sources]
source_weight_list = [data_source.source_weight for data_source in data_sources]
# Shuffle samples if training.
shuffle = training
num_samples = get_num_samples(data_sources, training)
# Samples in the buffer can be shuffled. However, memory consumption increases with larger
# buffer size.
shuffle_buffer_size = num_samples if shuffle else 0
tfrecords_iterator = ImageTFRecordsIterator(file_list=None, # Using our own dataset object.
shuffle_buffer_size=shuffle_buffer_size,
batch_size=batch_size,
batch_as_list=True,
sequence_length=0,
repeat=repeat,
shuffle=shuffle
)
# Pass in our own dataset, instead of using the default one from a file list.
dataset = _create_tf_dataset(file_list, img_dir_list, source_weight_list)
tfrecords_iterator.build(dataset=dataset)
return tfrecords_iterator, num_samples
def process_image_for_dnn_input(image):
"""
Process the image in such a way that a DNN expects.
This is needed because augmentation ops expect HWC, but DNN models expect CHW.
Args:
image: A representation of the image in HWC order.
Returns:
transposed: The input image in CHW order.
"""
# DNN input data type has to match the computation precision.
# A more appropriate place for this would be in the read_image function but
# currently our spatial augmentations require and return FP32 images, hence this needs
# be done after augmentation.
image = tf.cast(image, dtype=K.floatx())
return tf.transpose(image, (2, 0, 1))
def extract_tfrecords_features(tfrecords_file):
"""Extract features in a tfrecords file for parsing a series of tfrecords files."""
tfrecords_iterator = tf.compat.v1.python_io.tf_record_iterator(tfrecords_file)
# Each record is assumed to contain all the features even if the features would contain no
# values.
record = next(tfrecords_iterator)
extracted_features = dict()
example = tf.train.Example()
example.ParseFromString(record)
features = example.features.feature
for feature_name, feature in six.iteritems(features):
if feature.HasField('int64_list'):
feature_dtype = tf.int64
elif feature.HasField('float_list'):
feature_dtype = tf.float32
elif feature.HasField('bytes_list'):
feature_dtype = tf.string
else:
raise RuntimeError("Unknown tfrecords feature kind: %s" % feature)
# The tf.train.Example does not contain information about the FixedLen/VarLen
# distinction. Use VarLenFeature for FixedLenFeatures.
extracted_features[feature_name] = tf.io.VarLenFeature(dtype=feature_dtype)
return extracted_features
# TODO(@williamz): add a test for this sucker and find out all the places that use it.
def get_target_class_to_source_classes_mapping(source_to_target_class_mapping):
"""Generate the mapping from target class names to a list of source class names.
Args:
source_to_target_class_mapping (dict): from source to target class names.
Returns:
target_class_to_source_classes_mapping (dict): maps from a target class name (str) to
a list of source classes (str).
"""
# Convert unicode strings to python strings for class mapping
_source_to_target_class_mapping = \
{str(k): str(v) for k, v in source_to_target_class_mapping.items()}
target_class_names = set(_source_to_target_class_mapping.values())
# Initialize empty lists.
target_class_to_source_classes_mapping = \
{target_class_name: [] for target_class_name in target_class_names}
# Now populate them.
for source_class_name, target_class_name in six.iteritems(_source_to_target_class_mapping):
target_class_to_source_classes_mapping[target_class_name].append(source_class_name)
return target_class_to_source_classes_mapping
def get_file_list_from_directory(path):
"""Finds all files from path.
Args:
path (str): Path to directory, where files are searched.
Returns:
Sorted list of all files from path. The files are sorted so that this function could be
used in parallel tasks and each task takes the files corresponding to its
index (given by MAGLEV_WORKFLOW_STEP_INSTANCE_INDEX).
"""
return sorted([f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))])
def read_image(image_path, image_file_encoding, num_input_channels=3,
width=None, height=None, data_format="channels_last"):
"""
Set up image tensor reading pipeline.
Args:
image_path: A representation to the path of the image (e.g. str or tf.string)
image_file_encoding: (str) e.g. ".jpg", ".png", or ".fp16". See nvidia_tao_tf1.core.processors for
more info
num_input_channels: (int) Number of channels in the input image.
width: Likewise, but for image width.
height: A representation of the image height (e.g. int or tf.int32)
data_format (str): One of "channels_first" or "channels_last". Defaults to the former.
Returns:
image: (tf.Tensor) Tensor containing loaded image, shape (H, W, C), scaled to [0, 1] range.
"""
file_loader = nvidia_tao_tf1.core.processors.LoadFile()
# JPG and PNG images are in [0, 255], while .fp16 images are in [0, 1] range. Normalize all
# image types to [0, 1].
normalize = 1.0 if image_file_encoding == 'fp16' else 255.
image_decoder = nvidia_tao_tf1.core.processors.DecodeImage(
encoding=image_file_encoding,
channels=num_input_channels,
normalize=normalize,
data_format=data_format
)
data = file_loader(image_path)
image = image_decoder(data)
if image_file_encoding == 'fp16':
assert width is not None and height is not None
image_shape = tf.stack([num_input_channels, height, width])
image_shape = tf.cast(image_shape, dtype=tf.int32)
image = tf.reshape(image, image_shape)
# CHW -> HWC if required. Note that this is only needed here because the `fp16` case
# returns a 1-D vector, as opposed to a 3-D vector for JPEG / PNGs.
if data_format == 'channels_last':
image = tf.transpose(a=image, perm=[1, 2, 0])
return image
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/dataloader/utilities.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to process front / back markers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tensorflow as tf
REFERENCE_ANGLE = 0.0
INVALID_ORIENTATION = 0.0
FRONT_BACK_TOLERANCE = 5.0 * math.pi / 180.
SIDE_ONLY_TOLERANCE = 2.0 * math.pi / 180.
def _round_marker_tf(markers, tolerance=0.01):
"""Round markers to account for potential labeling errors.
Args:
markers (tf.Tensor): Either front or back marker values.
tolerance (float): The tolerance within which we start rounding values.
Returns:
rounded_markers (tf.Tensor): <markers> that have been rounded.
"""
# First, round values close to 0.0.
rounded_markers = \
tf.where(tf.logical_and(tf.less(markers, tolerance),
tf.greater_equal(markers, 0.0)),
tf.zeros_like(markers), # If condition is True.
markers)
# Then, round values close to 1.0.
rounded_markers = \
tf.where(tf.greater(rounded_markers, 1.0 - tolerance),
tf.ones_like(rounded_markers),
rounded_markers)
return rounded_markers
def _minus_pi_plus_pi(orientations):
"""Puts orientations values in [-pi; pi[ range.
Args:
orientations (tf.Tensor): Contains values for orientation in radians. Shape is (N,).
Returns:
new_orientations (tf.Tensor): Same values as <orientations> but in [-pi; pi[ range.
"""
new_orientations = tf.mod(orientations, 2. * math.pi)
new_orientations = \
tf.where(new_orientations > math.pi,
new_orientations - 2. * math.pi,
new_orientations)
return new_orientations
def map_markers_to_orientations(front_markers,
back_markers,
invalid_orientation=INVALID_ORIENTATION,
ref_angle=REFERENCE_ANGLE,
tolerance=0.01,
clockwise=False):
"""Map front / back markers to orientation values.
An angle of 0.0 corresponds to the scenario where an object is in the same direction as the ego
camera, its back facing towards the camera. Outputs radian values in the ]-pi; pi] range.
Args:
front_markers (tf.Tensor): Denotes the front marker of target objects. Shape is (N,),
where N is the number of targets in a frame.
back_markers (tf.Tensor): Likewise, but for the back marker.
invalid_orientation (float): Value to populate bogus entries correpsonding to bogus
(<front_markers, back_markers>) combos.
ref_angle (float): Reference angle corresponding to the scenario where a vehicle is right
in front of the camera with its back facing towards the camera. For legacy reasons,
the default is -pi/2.
clockwise (bool): Whether to count clockwise angles as positive values. False would
correspond to trigonometric convention.
Returns:
orientations (tf.Tensor): Shape (N,) tensor containing the angle corresponding to
(<front_markers>, <back_markers>). Values are radians.
Raises:
ValueError: If parameters are outside accepted ranges.
"""
if not (0.0 < tolerance < 1.0):
raise ValueError("map_markers_to_orientations accepts a tolerance in ]0.; 1.[ range only.")
if not (-math.pi <= ref_angle < math.pi):
raise ValueError("map_markers_to_orientations accepts a ref_angle in [-pi; pi[ range only.")
# First, round the markers.
rounded_front_markers = _round_marker_tf(front_markers)
rounded_back_markers = _round_marker_tf(back_markers)
ones = tf.ones_like(rounded_front_markers) # Used for constants and what not.
orientations = tf.zeros_like(front_markers)
# Back only.
is_back_only = tf.logical_and(
tf.equal(rounded_front_markers, -1.0),
tf.logical_or(tf.equal(rounded_back_markers, 0.0),
tf.equal(rounded_back_markers, 1.0)))
orientations = \
tf.where(is_back_only,
tf.zeros_like(rounded_front_markers),
orientations)
# Front only.
is_front_only = tf.logical_and(
tf.equal(rounded_back_markers, -1.0),
tf.logical_or(tf.equal(rounded_front_markers, 0.0),
tf.equal(rounded_front_markers, 1.0)))
orientations = \
tf.where(is_front_only, math.pi * ones, orientations)
# Front and right.
is_front_and_right = \
tf.logical_and(tf.logical_and(tf.greater(rounded_front_markers, 0.0),
tf.less(rounded_front_markers, 1.0)),
tf.equal(rounded_back_markers, 0.0))
orientations = \
tf.where(is_front_and_right,
-(math.pi / 2.0) * (2.0 * ones - rounded_front_markers),
orientations)
# Front and left.
is_front_and_left = \
tf.logical_and(tf.logical_and(tf.greater(rounded_front_markers, 0.0),
tf.less(rounded_front_markers, 1.0)),
tf.equal(rounded_back_markers, 1.0))
orientations = \
tf.where(is_front_and_left,
(math.pi / 2.0) * (ones + rounded_front_markers),
orientations)
# Back + right or left.
is_back_and_side = \
tf.logical_and(tf.logical_or(tf.equal(rounded_front_markers, 0.0), # Left side.
tf.equal(rounded_front_markers, 1.0)), # Right side.
tf.logical_and(tf.greater(rounded_back_markers, 0.0),
tf.less(rounded_back_markers, 1.0)))
orientations = \
tf.where(is_back_and_side,
(math.pi / 2.0) * (rounded_back_markers - rounded_front_markers),
orientations)
# Finally, only one of the sides is visible (when either (0.0, 1.0) or (1.0, 0.0)).
is_side_only = tf.logical_or(
tf.logical_and(tf.equal(rounded_front_markers, 0.0), tf.equal(rounded_back_markers, 1.0)),
tf.logical_and(tf.equal(rounded_front_markers, 1.0), tf.equal(rounded_back_markers, 0.0)))
orientations = \
tf.where(is_side_only,
(math.pi / 2.0) * (rounded_back_markers - rounded_front_markers),
orientations)
# Shift and scale.
if clockwise:
orientations = -orientations
orientations = orientations + ref_angle
# Keep things in [-pi; pi[ range.
orientations = _minus_pi_plus_pi(orientations)
# Finally, if none of the cases had hit, set the entries to <invalid_orientation>.
all_scenarios = tf.stack([is_back_only,
is_front_only,
is_front_and_right,
is_front_and_left,
is_back_and_side,
is_side_only])
is_any_scenario = tf.reduce_any(all_scenarios, axis=0)
orientations = tf.where(is_any_scenario,
orientations, # Keep as is.
invalid_orientation * ones)
return orientations
def augment_orientation_labels(orientation_labels, stm, ref_angle=REFERENCE_ANGLE):
"""Augment orientation labels.
Why is the below check enough? For Gridbox, all STMs start out as a 3x3 identity matrices M.
In determining the final STM, input STMS are right multiplied sequentially with a
flip LR STM, and a combination of translation/zoom STMs which use the same underlying
representation. A quick matrix multiply will show you that applying both a translate and a
zoom STM is pretty much the same as applying one such STM with different parameters.
Furthermore, given that the parameters passed to get the translate and scale STMs are always
positive, the end result R of multiplying the initial STM M by the flip LR STM x
translate/zoom STM shows that R[0, 0] is positive if and only if no flip LR STM was applied.
NOTE: If rotations are introduced, this reasoning is no longer sufficient.
Args:
orientation_labels (tf.Tensor): Contains the orientation values for the targets in a single
frame.
stm (tf.Tensor): 3x3 spatial transformation matrix.
ref_angle (float): Reference angle corresponding to the scenario where a vehicle is right
in front of the camera with its back facing towards the camera. For legacy reasons,
the default is -pi/2.
Returns:
augmented_orientation_labels (tf.Tensor): Contains the orientation values with the spatial
transformations encapsulated by <stm> applied to them.
Raises:
ValueError: If ref_angle is outside accepted range.
"""
if not (-math.pi <= ref_angle < math.pi):
raise ValueError("augment_orientation_labels accepts a ref_angle in [-pi; pi[ range only.")
# Define the callables for tf.cond.
def no_flip(): return orientation_labels
def flip(): return _minus_pi_plus_pi(2. * ref_angle - orientation_labels)
with tf.control_dependencies([tf.assert_equal(stm[0, 1], 0.),
tf.assert_equal(stm[1, 0], 0.)]):
augmentated_orientation_labels = \
tf.cond(stm[0, 0] < 0.0, flip, no_flip)
return augmentated_orientation_labels
def _round_marker(marker, epsilon=0.05):
"""Helper function to round a marker value to either 0.0 or 1.0.
Args:
marker (float): Marker value. Expected to be in [0.0, 1.0] range.
epsilon (float): Value within which to round.
Returns:
rounded_marker (float): <marker> rounded to either 0.0 or 1.0 if it is within epsilon of
one or the other.
"""
rounded_marker = marker
if abs(marker) < epsilon:
rounded_marker = 0.0
elif abs(marker - 1.0) < epsilon:
rounded_marker = 1.0
return rounded_marker
def map_orientation_to_markers(
orientation,
ref_angle=REFERENCE_ANGLE,
clockwise=False,
front_back_tolerance=FRONT_BACK_TOLERANCE,
side_only_tolerance=SIDE_ONLY_TOLERANCE):
"""Map orientation value to (front, back) marker values.
Args:
orientation (float): Orientation value in radians. Values are expected to be in [-pi; pi[.
ref_angle (float): Reference angle corresponding to the scenario where a vehicle is right
in front of the camera with its back facing towards the camera. For legacy reasons,
the default is -pi/2.
clockwise (bool): Whether to count clockwise angles as positive values. False would
correspond to trigonometric convention.
front_back_tolerance (float): Radian tolerance within which we consider <orientation> to be
equal to that of a front- / back-only scenarios.
side_only_tolerance (float): Likewise, but for either of the side-only scenarios.
Returns:
front (float): Corresponding front marker value.
back (float): Idem, but for back marker value.
Raises:
ValueError: If ref_angle is outside accepted range.
"""
if not (-math.pi <= ref_angle < math.pi):
raise ValueError("augment_orientation_labels accepts a ref_angle in [-pi; pi[ range only.")
# Adjust orientation coordinate system if need be.
_orientation = orientation - ref_angle
if clockwise:
_orientation *= -1.
# Put in [-pi, pi[ range.
_orientation = _orientation % (2. * math.pi)
_orientation = _orientation - 2. * math.pi if _orientation > math.pi else _orientation
front = 0.
back = 0.
radian_factor = 2. / math.pi
# For the following scenarios, we allow a certain tolerance on the orientation value:
# - front or back only: if within <front_back_tolerance> of the exact value.
# - side only: if within <side_only_tolerance> of the exact value.
# As such, their corresponding checks will appear first in the following if / elif clause.
if abs(_orientation) < front_back_tolerance:
# Back only.
front = -1.0
back = 0.0
elif abs(_orientation - math.pi) < front_back_tolerance or \
abs(_orientation + math.pi) < front_back_tolerance:
# Front only.
front = 0.0
back = -1.0
elif abs(_orientation - math.pi / 2.0) < side_only_tolerance:
# Left only.
front = 0.0
back = 1.0
elif abs(_orientation + math.pi / 2.0) < side_only_tolerance:
# Right only.
front = 1.0
back = 0.0
elif (-math.pi / 2. < _orientation <= 0.0):
# ]-pi/2; 0] - back + right.
front = 1.0
back = radian_factor * _orientation + 1.
elif (-math.pi < _orientation <= -math.pi / 2.):
# ]-pi; -pi/2] - front + right.
front = radian_factor * _orientation + 2.
back = 0.
elif (0. < _orientation <= math.pi / 2.):
# ]0; pi/2] - back + left.
front = 0.
back = radian_factor * _orientation
elif (math.pi / 2. < _orientation <= math.pi):
# ]pi/2; pi]. - front + left.
front = radian_factor * _orientation - 1
back = 1.
# Additional rounding. This is to be able to hard classify certain examples as side only, etc.
front = _round_marker(front)
back = _round_marker(back)
return front, back
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/dataloader/process_markers.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""Build AugmentationConfig for DetectNet V2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.augmentation.augmentation_config import AugmentationConfig
def build_preprocessing_config(preprocessing_proto):
"""Build a Preprocessing object from a proto.
Args:
preprocessing_proto (nvidia_tao_tf1.cv.detectnet_v2.proto.augmentation_config.
AugmentationConfig.Preprocessing proto message).
Returns:
nvidia_tao_tf1.cv.detectnet_v2.dataloader.augmentation_config.Preprocessing object.
"""
return AugmentationConfig.Preprocessing(
output_image_width=preprocessing_proto.output_image_width,
output_image_height=preprocessing_proto.output_image_height,
crop_left=preprocessing_proto.crop_left,
crop_top=preprocessing_proto.crop_top,
crop_right=preprocessing_proto.crop_right,
crop_bottom=preprocessing_proto.crop_bottom,
min_bbox_width=preprocessing_proto.min_bbox_width,
min_bbox_height=preprocessing_proto.min_bbox_height,
scale_width=preprocessing_proto.scale_width,
scale_height=preprocessing_proto.scale_height,
output_image_channel=preprocessing_proto.output_image_channel,
output_image_min=preprocessing_proto.output_image_min,
output_image_max=preprocessing_proto.output_image_max
)
def build_spatial_augmentation_config(spatial_augmentation_proto):
"""Build a SpatialAugmentation object from a proto.
Args:
spatial_augmentation_proto (nvidia_tao_tf1.cv.detectnet_v2.dataloader.proto.augmentation_config.
AugmentationConfig.SpatialAugmentation proto message).
Returns:
nvidia_tao_tf1.cv.detectnet_v2.dataloader.augmentation_config.SpatialAugmentation object.
"""
return AugmentationConfig.SpatialAugmentation(
hflip_probability=spatial_augmentation_proto.hflip_probability,
vflip_probability=spatial_augmentation_proto.vflip_probability,
zoom_min=spatial_augmentation_proto.zoom_min,
zoom_max=spatial_augmentation_proto.zoom_max,
translate_max_x=spatial_augmentation_proto.translate_max_x,
translate_max_y=spatial_augmentation_proto.translate_max_y,
rotate_rad_max=spatial_augmentation_proto.rotate_rad_max,
rotate_probability=spatial_augmentation_proto.rotate_probability,
)
def build_color_augmentation_config(color_augmentation_proto):
"""Build a ColorAugmentation object from a proto.
Args:
color_augmentation_proto (nvidia_tao_tf1.cv.detectnet_v2.dataloader.proto.augmentation_config.
AugmentationConfig.ColorAugmentation proto message).
Returns:
nvidia_tao_tf1.cv.detectnet_v2.dataloader.augmentation_config.ColorAugmentation object.
"""
return AugmentationConfig.ColorAugmentation(
color_shift_stddev=color_augmentation_proto.color_shift_stddev,
hue_rotation_max=color_augmentation_proto.hue_rotation_max,
saturation_shift_max=color_augmentation_proto.saturation_shift_max,
contrast_scale_max=color_augmentation_proto.contrast_scale_max,
contrast_center=color_augmentation_proto.contrast_center
)
def build_augmentation_config(augmentation_proto):
"""Build an AugmentationConfig object from a proto.
Args:
augmentation_proto (nvidia_tao_tf1.cv.detectnet_v2.dataloader.proto.augmentation_config.
AugmentationConfig proto message).
Returns:
nvidia_tao_tf1.cv.detectnet_v2.dataloader.augmentation_config.AugmentationConfig object.
"""
preprocessing = build_preprocessing_config(augmentation_proto.preprocessing)
spatial_augmentation = \
build_spatial_augmentation_config(augmentation_proto.spatial_augmentation)
color_augmentation = build_color_augmentation_config(augmentation_proto.color_augmentation)
return AugmentationConfig(
preprocessing=preprocessing,
spatial_augmentation=spatial_augmentation,
color_augmentation=color_augmentation
)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/dataloader/augmentation/build_augmentation_config.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""Defines functions and classes for data augmentation for the DetectNet V2 dataloader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/dataloader/augmentation/__init__.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""Augmentation config classes for DetectNet V2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class AugmentationConfig(object):
"""Hold all preprocessing and augmentation related parameters."""
def __init__(self,
preprocessing,
spatial_augmentation,
color_augmentation):
"""Constructor.
Args:
preprocessing (AugmentationConfig.Preprocessing):
spatial_augmentation (AugmentationConfig.SpatialAugmentation):
color_augmentation (AugmentationConfig.ColorAugmentation):
"""
self.preprocessing = preprocessing
self.spatial_augmentation = spatial_augmentation
self.color_augmentation = color_augmentation
# Define inner classes.
class Preprocessing(object):
"""Hold the preprocessing parameters."""
def __init__(self,
output_image_width,
output_image_height,
output_image_channel,
output_image_min,
output_image_max,
crop_left,
crop_top,
crop_right,
crop_bottom,
min_bbox_width,
min_bbox_height,
scale_width,
scale_height):
"""Constructor.
Args:
output_image_width/height (int): Dimensions of the image after preprocessing has
been applied.
output_image_min/max (int): Image smaller side size and larger side size
in dynamic shape case.
crop_left/top (int): How much top crop from the left/top vertex of an image.
crop_right/bottom (int): How much to crop to. e.g. If the full image is 1000 pixels
in width, but we want only up till the 900th pixel, <crop_right> should be set
to 900.
min_bbox_width/height (float): Minimum bbox dimensions.
scale_width/height (float): Scaling factor for resizing width/height after crop.
Raises:
ValueError: if the provided values are not in expected ranges.
"""
if output_image_min == 0:
if output_image_width <= 0 or output_image_height <= 0:
raise ValueError("Preprocessing.output_image_height/width must be > 0.")
if crop_left > crop_right or crop_top > crop_bottom:
raise ValueError("Preprocessing crop parameters lead to null output dim(s).")
if scale_width < 0 or scale_height < 0:
raise ValueError("Preprocessing.scale_width/height must be positive.")
self.output_image_width = output_image_width
self.output_image_height = output_image_height
self.crop_left = crop_left
self.crop_top = crop_top
self.crop_right = crop_right
self.crop_bottom = crop_bottom
self.min_bbox_width = min_bbox_width
self.min_bbox_height = min_bbox_height
self.scale_width = scale_width
self.scale_height = scale_height
self.output_image_channel = output_image_channel
self.output_image_min = output_image_min
self.output_image_max = output_image_max
class SpatialAugmentation(object):
"""Hold the spatial augmentation parameters."""
def __init__(self,
hflip_probability,
vflip_probability,
zoom_min,
zoom_max,
translate_max_x,
translate_max_y,
rotate_rad_max,
rotate_probability):
"""Constructor.
Args:
hflip_probability (float): Probability value for flipping an image horizontally
(i.e. from left to right).
vflip_probability (float): Same but for vertical axis.
zoom_min/max (float): Minimum/maximum zoom ratios. Set min = max = 1 to keep
original size.
translate_max_x/y (float): Maximum translation along the x/y axis in pixel values.
Raises:
ValueError: if the provided values are not in expected ranges.
"""
if not 0.0 <= hflip_probability <= 1.0:
raise ValueError("hflip_probability should be in [0., 1.] range.")
if not 0.0 <= vflip_probability <= 1.0:
raise ValueError("vflip_probability should be in [0., 1.] range.")
if zoom_min > zoom_max:
raise ValueError("zoom_min must be <= zoom_max.")
if zoom_min <= 0.0:
raise ValueError("zoom_min must be > 0.0")
if translate_max_x < 0.0 or translate_max_y < 0.0:
raise ValueError("translate_max_x/y must be >= 0.0.")
self.hflip_probability = hflip_probability
self.vflip_probability = vflip_probability
self.zoom_min = zoom_min
self.zoom_max = zoom_max
self.translate_max_x = translate_max_x
self.translate_max_y = translate_max_y
self.rotate_rad_max = rotate_rad_max if rotate_rad_max else 0.0
self.rotate_probability = rotate_probability if rotate_probability else 1.0
class ColorAugmentation(object):
"""Hold the color augmentation parameters."""
def __init__(self,
color_shift_stddev,
hue_rotation_max,
saturation_shift_max,
contrast_scale_max,
contrast_center):
"""Constructor.
Args:
color_shift_stddev (float): Standard deviation for color shift augmentation.
hue_rotation_max (float): Maximum hue rotation, in degrees.
saturation_shift_max (float): Maximum value for saturation shift.
constrast_scale_max (float): Maximum scale shift for contrast augmentation. Set to
0.0 to disable.
contrast_center (float): Center point for contrast augmentation. Set to 0.5 to
disable.
"""
self.color_shift_stddev = color_shift_stddev
self.hue_rotation_max = hue_rotation_max
self.saturation_shift_max = saturation_shift_max
self.contrast_scale_max = contrast_scale_max
self.contrast_center = contrast_center
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/dataloader/augmentation/augmentation_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test data loading."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import Counter
import os
import numpy as np
import pytest
import tensorflow as tf
from nvidia_tao_tf1.core.utils import set_random_seed
from nvidia_tao_tf1.cv.detectnet_v2.common.graph import get_init_ops
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.augmentation.augmentation_config import (
AugmentationConfig
)
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.build_dataloader import build_data_source_lists
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.data_source_config import DataSourceConfig
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.default_dataloader import DefaultDataloader
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.default_dataloader import FRAME_ID_KEY
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.drivenet_dataloader import BW_POLY_COEFF1_60FC
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.drivenet_dataloader import DriveNetDataloader
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.tests.utilities.data_generation import (
generate_dummy_dataset,
generate_dummy_images,
generate_dummy_labels
)
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.utilities import extract_tfrecords_features
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.utilities import get_num_samples
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.utilities import get_tfrecords_iterator
from nvidia_tao_tf1.cv.detectnet_v2.dataio.kitti_converter_lib import (
_bytes_feature,
_float_feature,
_int64_feature
)
from nvidia_tao_tf1.cv.detectnet_v2.proto.dataset_config_pb2 import DatasetConfig
from nvidia_tao_tf1.cv.detectnet_v2.proto.dataset_config_pb2 import DataSource
@pytest.fixture(scope='module')
def test_paths(tmpdir_factory):
"""Test paths."""
# Generate three folds with 1, 2, and 3 samples, respectively.
tmpdir = tmpdir_factory.mktemp('test_data')
folds = [str(tmpdir.join("data.tfrecords-fold-00%d-of-003" % i))
for i in range(3)]
for num_examples, path in enumerate(folds, 1):
generate_dummy_labels(path=path, num_samples=num_examples)
return folds
def _get_dataloader(dataloader_class, training_data_source_list,
validation_data_source_list, augmentation_config,
image_file_encoding):
"""Helper function for creating a dataloader class."""
if dataloader_class == 'DefaultDataloader':
dataloader = DefaultDataloader(
training_data_source_list=training_data_source_list,
image_file_encoding=image_file_encoding,
validation_data_source_list=validation_data_source_list,
augmentation_config=augmentation_config
)
elif dataloader_class == 'DriveNetDataloader':
dataloader = DriveNetDataloader(
training_data_source_list=training_data_source_list,
image_file_encoding=image_file_encoding,
validation_data_source_list=validation_data_source_list,
augmentation_config=augmentation_config
)
return dataloader
def _get_dummy_augmentation_config(width=16, height=8, channel=3, min_side=0,
max_side=0, crop_right=16, crop_bottom=8,
scale_width=0.0, scale_height=0.0):
"""Construct a dummy augmentation config."""
preprocessing = AugmentationConfig.Preprocessing(
output_image_width=width, output_image_height=height,
output_image_channel=channel, output_image_min=min_side,
output_image_max=max_side, crop_left=0, crop_top=0,
crop_right=crop_right, crop_bottom=crop_bottom,
min_bbox_width=0., min_bbox_height=0.,
scale_width=scale_width, scale_height=scale_height)
spatial = AugmentationConfig.SpatialAugmentation(0., 0., 1.0, 1.0, 0., 0., 0., 0.)
color = AugmentationConfig.ColorAugmentation(0.0, 0.0, 0.0, 0.0, 0.5,)
return AugmentationConfig(preprocessing, spatial, color)
@pytest.mark.parametrize("dataloader_class,validation_fold,expected_num_samples",
[('DefaultDataloader', 0, (5, 1)),
('DefaultDataloader', 1, (4, 2)),
('DefaultDataloader', None, (6, 0)),
('DriveNetDataloader', 0, (5, 1)),
('DriveNetDataloader', 1, (4, 2)),
('DriveNetDataloader', None, (6, 0))])
def test_dataloader_kfold_split(test_paths, dataloader_class,
validation_fold, expected_num_samples):
"""Test Dataloader returning correct values when doing kfold validation split."""
dataset_proto = DatasetConfig()
data_sources_proto = DataSource()
data_sources_proto.tfrecords_path = os.path.join(
os.path.dirname(test_paths[0]), "*")
data_sources_proto.image_directory_path = os.path.dirname(test_paths[0])
dataset_proto.data_sources.extend([data_sources_proto])
if validation_fold is not None:
dataset_proto.validation_fold = validation_fold
training_data_source_list, validation_data_source_list, _ = \
build_data_source_lists(dataset_proto)
image_file_encoding = "fp16"
dataloader = _get_dataloader(dataloader_class=dataloader_class,
training_data_source_list=training_data_source_list,
validation_data_source_list=validation_data_source_list,
augmentation_config=_get_dummy_augmentation_config(),
image_file_encoding=image_file_encoding)
# Check that the data source lists have the expected number of elements.
if validation_fold is not None:
expected_num_training_sources, expected_num_validation_sources = 1, 1
else:
expected_num_training_sources, expected_num_validation_sources = 1, 0
assert len(dataloader.training_data_sources) == expected_num_training_sources
assert len(
dataloader.validation_data_sources) == expected_num_validation_sources
num_training_samples = dataloader.get_num_samples(True)
num_validation_samples = dataloader.get_num_samples(False)
expected_num_training_samples, expected_num_validation_samples = expected_num_samples
assert num_training_samples == expected_num_training_samples
assert num_validation_samples == expected_num_validation_samples
@pytest.mark.parametrize("dataloader_class,validation_data_used,expected_num_samples",
[('DefaultDataloader', True, (6, 1)),
('DefaultDataloader', False, (6, 0)),
('DriveNetDataloader', True, (6, 1)),
('DriveNetDataloader', False, (6, 0))])
def test_dataloader_path_split(test_paths, dataloader_class,
validation_data_used, expected_num_samples):
"""Test Dataloader returning correct values when specifying path to validation files."""
dataset_proto = DatasetConfig()
for fold in test_paths:
training_data_sources_proto = DataSource()
training_data_sources_proto.tfrecords_path = fold
training_data_sources_proto.image_directory_path = os.path.dirname(
fold)
dataset_proto.data_sources.extend([training_data_sources_proto])
if validation_data_used:
dataset_proto.validation_data_source.tfrecords_path = test_paths[0]
dataset_proto.validation_data_source.image_directory_path = os.path.dirname(
test_paths[0])
training_data_source_list, validation_data_source_list, _ = \
build_data_source_lists(dataset_proto)
num_paths = len(test_paths)
expected_num_training_sources = num_paths
if validation_data_used:
expected_num_validation_sources = 1
else:
expected_num_validation_sources = 0
image_file_encoding = "fp16"
dataloader = _get_dataloader(dataloader_class=dataloader_class,
training_data_source_list=training_data_source_list,
validation_data_source_list=validation_data_source_list,
augmentation_config=_get_dummy_augmentation_config(),
image_file_encoding=image_file_encoding)
# Check that the data source lists have the expected number of elements.
assert len(dataloader.training_data_sources) == expected_num_training_sources
assert len(
dataloader.validation_data_sources) == expected_num_validation_sources
# Check that data tensor shape matches AugmentationConfig.
assert dataloader.get_data_tensor_shape() == (3, 8, 16)
num_training_samples = dataloader.get_num_samples(True)
num_validation_samples = dataloader.get_num_samples(False)
expected_num_training_samples, expected_num_validation_samples = expected_num_samples
assert num_training_samples == expected_num_training_samples
assert num_validation_samples == expected_num_validation_samples
def _create_dummy_example():
example = dict()
example['target/object_class'] = tf.constant(
['car', 'pedestrian', 'cyclist'])
return example
features1 = {'bytes': _bytes_feature('test')}
expected_features1 = {'bytes': tf.io.VarLenFeature(dtype=tf.string)}
features2 = {'float': _float_feature(1.0)}
expected_features2 = {'float': tf.io.VarLenFeature(dtype=tf.float32)}
features3 = {'int64': _int64_feature(1)}
expected_features3 = {'int64': tf.io.VarLenFeature(dtype=tf.int64)}
features4 = {'bytes1': _bytes_feature('test1'), 'bytes2': _bytes_feature('test2'),
'float1': _float_feature(1.0), 'float2': _float_feature(2.0),
'int64_1': _int64_feature(1), 'int64_2': _int64_feature(2)}
expected_features4 = {'bytes1': tf.io.VarLenFeature(dtype=tf.string),
'bytes2': tf.io.VarLenFeature(dtype=tf.string),
'float1': tf.io.VarLenFeature(dtype=tf.float32),
'float2': tf.io.VarLenFeature(dtype=tf.float32),
'int64_1': tf.io.VarLenFeature(dtype=tf.int64),
'int64_2': tf.io.VarLenFeature(dtype=tf.int64)}
test_cases = [(features1, expected_features1), (features2, expected_features2),
(features3, expected_features3), (features4, expected_features4)]
@pytest.mark.parametrize("features,expected_features", test_cases)
def test_extract_tfrecords_features(tmpdir_factory, features, expected_features):
"""Test that tfrecords features are extracted correctly from a sample file."""
# Generate a sample tfrecords file with the given features.
tffile = str(tmpdir_factory.mktemp('test_data').join("test.tfrecords"))
generate_dummy_labels(path=tffile, num_samples=1, labels=[features])
extracted_features = extract_tfrecords_features(tffile)
assert extracted_features == expected_features
@pytest.mark.parametrize("dataloader_class", [('DefaultDataloader'),
('DriveNetDataloader')])
def test_get_dataset_tensors(tmpdir_factory, dataloader_class):
"""Test dataloader.get_dataset_tensors."""
def get_frame_id(label):
"""Extract frame id from label.
Args:
label: Dataset label.
Returns:
Frame ID (str).
"""
if isinstance(label, list):
return label[0][FRAME_ID_KEY][0]
return np.squeeze(label.frame_id).flatten()[0]
result_dir = tmpdir_factory.mktemp('test_dataloader_dataset')
# Generate a tfrecords file for the test.
image_width = 16
image_height = 12
num_dataset_samples = 3 # Must be > 2.
tfrecords_path, image_directory_path =\
generate_dummy_dataset(tmpdir=result_dir, num_samples=num_dataset_samples,
width=image_width, height=image_height)
dataset_proto = DatasetConfig()
training_data_sources_proto = DataSource()
training_data_sources_proto.tfrecords_path =\
tfrecords_path.replace('*', 'dummy-fold-000-of-002')
training_data_sources_proto.image_directory_path = image_directory_path
dataset_proto.data_sources.extend([training_data_sources_proto])
dataset_proto.validation_data_source.tfrecords_path =\
tfrecords_path.replace('*', 'dummy-fold-001-of-002')
dataset_proto.validation_data_source.image_directory_path = image_directory_path
training_data_source_list, validation_data_source_list, _ = \
build_data_source_lists(dataset_proto)
# Instantiate a dataloader.
augmentation_config = _get_dummy_augmentation_config(width=image_width,
height=image_height,
crop_right=image_width,
crop_bottom=image_height)
dataloader = _get_dataloader(dataloader_class=dataloader_class,
training_data_source_list=training_data_source_list,
validation_data_source_list=validation_data_source_list,
augmentation_config=augmentation_config,
image_file_encoding="png")
# Test training set iteration.
training_images, training_labels, num_training_samples =\
dataloader.get_dataset_tensors(1, True, False)
assert num_training_samples == num_dataset_samples
assert dataloader.get_num_samples(True) == num_dataset_samples
expected_image_shape = (1, 3, image_height, image_width)
# Note that get_data_tensor_shape does not include the batch dimension.
assert dataloader.get_data_tensor_shape() == expected_image_shape[1:]
with tf.compat.v1.Session() as session:
session.run(get_init_ops())
# Loop training set twice to test repeat.
for _ in range(2):
# Loop over training set once, storing the frame IDs.
training_set = set()
for _ in range(num_training_samples):
image, label = session.run([training_images, training_labels])
assert image.shape == expected_image_shape
training_set.add(get_frame_id(label))
# Assert that each frame id is present. Note that the order of the IDs is random.
assert {str(i).encode()
for i in range(num_training_samples)} == training_set
# Test validation set iteration.
validation_images, validation_labels, num_validation_samples =\
dataloader.get_dataset_tensors(1, False, False)
assert num_validation_samples == num_dataset_samples
assert dataloader.get_num_samples(False) == num_dataset_samples
with tf.compat.v1.Session() as session:
session.run(get_init_ops())
# Loop over the validation set twice.
for i in range(num_validation_samples):
image, label = session.run([validation_images, validation_labels])
assert image.shape == expected_image_shape
assert get_frame_id(label) == str(
i % num_validation_samples).encode()
X_COORDS = [20, 24, 24, 20, 12, 16, 16, 12, 16, 20, 20, 16]
Y_COORDS = [16, 16, 26, 26, 26, 26, 36, 36, 20, 20, 22, 22]
COORDINATE_FORMATS = [
{'target/coordinates/x': _float_feature(*X_COORDS),
'target/coordinates/y': _float_feature(*Y_COORDS),
'target/coordinates/index': _int64_feature(0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2)},
{'target/coordinates_x1': _float_feature(20, 12, 16),
'target/coordinates_x2': _float_feature(24, 16, 20),
'target/coordinates_y1': _float_feature(16, 26, 20),
'target/coordinates_y2': _float_feature(26, 36, 22)}
]
@pytest.fixture(params=COORDINATE_FORMATS)
def label_filters_test_data(request):
image_width = 16
image_height = 12
input_labels = [{
'frame/id': _bytes_feature("0"),
'frame/height': _int64_feature(image_height),
'frame/width': _int64_feature(image_width),
'target/object_class': _bytes_feature("car", "car", "car")
}]
input_labels[0].update(request.param)
# After applying stm, bbox1 should be clipped to crop region, bbox2 should be filtered out as
# it is completely outside the crop region, bbox 3 is retained.
expected_output = [
{'frame/id': [b"0"], 'frame/height': [image_height], 'frame/width': [image_width],
'target/object_class': [b"car", b"car"],
'target/coordinates/x': [10, 12, 12, 10, 8, 10, 10, 8],
'target/coordinates/y': [8, 8, 12, 12, 10, 10, 11, 11],
'target/coordinates/index': [0, 0, 0, 0, 1, 1, 1, 1],
'target/bbox_coordinates': [[10., 8., 12., 12.],
[8., 10., 10., 11.]]}]
fixture = {'image_width': image_width, 'image_height': image_height,
'input_labels': input_labels, 'expected_output': expected_output}
return fixture
# TODO(@williamz): How to adapt this test to DriveNetDataloader?
@pytest.mark.parametrize("dataloader_class", ['DefaultDataloader'])
def test_get_dataset_tensors_filters(tmpdir, label_filters_test_data, dataloader_class):
"""Test that get_dataset_tensors() applies label filters correctly."""
# Generate a tfrecords file for the test.
tfrecords_path = str(tmpdir.mkdir("labels"))
image_directory_path = str(tmpdir.mkdir("images"))
training_tfrecords_path = tfrecords_path + '/dummy-fold-000-of-002'
image_width = label_filters_test_data['image_width']
image_height = label_filters_test_data['image_height']
generate_dummy_images(directory=image_directory_path,
num_samples=len(
label_filters_test_data['input_labels']),
height=image_height, width=image_width,
num_channels=3)
generate_dummy_labels(path=training_tfrecords_path,
num_samples=len(
label_filters_test_data['input_labels']),
labels=label_filters_test_data['input_labels'])
dataset_proto = DatasetConfig()
training_data_sources_proto = DataSource()
training_data_sources_proto.tfrecords_path = training_tfrecords_path
training_data_sources_proto.image_directory_path = image_directory_path
dataset_proto.data_sources.extend([training_data_sources_proto])
training_data_source_list, validation_data_source_list, _ = \
build_data_source_lists(dataset_proto)
# Instantiate a dataloader.
augmentation_config = _get_dummy_augmentation_config(width=image_width,
height=image_height,
crop_right=image_width//2,
crop_bottom=image_height//2,
scale_width=0.5,
scale_height=0.5)
dataloader = _get_dataloader(dataloader_class=dataloader_class,
training_data_source_list=training_data_source_list,
validation_data_source_list=validation_data_source_list,
augmentation_config=augmentation_config,
image_file_encoding="png")
_, training_labels, _ = dataloader.get_dataset_tensors(1, True, False)
with tf.compat.v1.Session() as session:
session.run(get_init_ops())
label = session.run(training_labels)
for key, value in label_filters_test_data['expected_output'][0].items():
assert np.array_equal(value, label[0][key])
def test_get_tfrecords_iterator(test_paths):
"""Test that samples from all data sources are iterated over."""
data_sources = []
for i, test_path in enumerate(test_paths):
data_sources.append(
DataSourceConfig(dataset_type='tfrecord',
dataset_files=[test_path],
images_path=str(i),
export_format=None,
split_db_path=None,
split_tags=None))
num_samples = get_num_samples(data_sources, training=False)
tfrecords_iterator, num_samples2 = get_tfrecords_iterator(
data_sources=data_sources,
batch_size=1,
training=False,
repeat=True)
assert num_samples == num_samples2
# Initialize the iterator.
with tf.compat.v1.Session() as session:
session.run(get_init_ops())
# Loop once through the entire dataset, and check that all sources have been iterated over.
samples_per_source = Counter()
for _ in range(num_samples):
sample = session.run(tfrecords_iterator())
img_dir = sample[1][0].decode()
samples_per_source[img_dir] += 1
# Given the test_paths fixture puts 1, 2 and 3 samples in each source, check that this is
# correct.
for i in range(len(test_paths)):
assert samples_per_source[str(i) + '/'] == i + 1
crop_rect = [{'left': 5, 'right': 45, 'top': 5, 'bottom': 45},
{'left': 0, 'right': 0, 'top': 0, 'bottom': 0}]
coordinates = {
'x': [0., 10., 10., 0., 20., 30., 30., 20., 40., 50., 50., 40.],
'y': [0., 0., 10., 10., 20., 20., 30., 30., 40., 40., 50., 50.],
'idx': [0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2]
}
truncation_type = [1, 0, 0]
expected_bboxes = [[[5., 20., 40.],
[10., 30., 45.],
[5., 20., 40.],
[10., 30., 45.]],
[[0., 20., 40.],
[10., 30., 50.],
[0., 20., 40.],
[10., 30., 50.]]]
expected_truncation_type = [[1, 0, 1], [1, 0, 0]]
test_cases = [(crop_rect[0], coordinates, truncation_type,
expected_bboxes[0], expected_truncation_type[0]),
(crop_rect[1], coordinates, truncation_type,
expected_bboxes[1], expected_truncation_type[1])]
@pytest.mark.parametrize(
"crop_rect,coordinates,truncation_type,expected_bboxes,expected_truncation_type",
test_cases)
def test_update_example_after_crop(crop_rect, coordinates, truncation_type,
expected_bboxes, expected_truncation_type):
"""Test expected updated bbox and truncation_type can be obtained."""
# Create an example.
example = _create_dummy_example()
example['target/coordinates/x'] = tf.constant(coordinates['x'])
example['target/coordinates/y'] = tf.constant(coordinates['y'])
example['target/coordinates/index'] = tf.constant(coordinates['idx'])
example['target/truncation_type'] = tf.constant(truncation_type)
# Calculate preprocessed truncation.
example = DefaultDataloader._update_example_after_crop(crop_rect['left'],
crop_rect['right'],
crop_rect['top'],
crop_rect['bottom'],
example)
# Compute and compare results.
with tf.compat.v1.Session() as session:
session.run(get_init_ops())
result_coords, result_truncation_type = \
session.run([example['target/bbox_coordinates'],
example['target/truncation_type']])
result_x1, result_y1, result_x2, result_y2 = np.split(
result_coords, 4, axis=1)
# Check the correctness of bbox coordinates.
result_bboxes = [result_x1.tolist(), result_x2.tolist(),
result_y1.tolist(), result_y2.tolist()]
for result, expected in zip(result_bboxes, expected_bboxes):
for x, y in zip(result, expected):
assert np.isclose(x, y)
# Check the correctness of truncation_type.
for x, y in zip(result_truncation_type, expected_truncation_type):
assert x == y
def get_hflip_augmentation_config(hflip_probability):
"""Get an AugmentationConfig for testing horizontal flips."""
assert hflip_probability in {0., 1.0}
# Define some augmentation configurables.
width, height = 12, 34
preprocessing = AugmentationConfig.Preprocessing(
output_image_width=width,
output_image_height=height,
output_image_channel=3,
output_image_min=0,
output_image_max=0,
crop_left=0,
crop_top=0,
crop_right=width,
crop_bottom=height,
min_bbox_width=0.0,
min_bbox_height=0.0,
scale_width=0.0,
scale_height=0.0)
spatial_augmentation = AugmentationConfig.SpatialAugmentation(
hflip_probability=hflip_probability,
vflip_probability=0.0,
zoom_min=0.9,
zoom_max=1.2,
translate_max_x=2.0,
translate_max_y=3.0,
rotate_rad_max=0.0,
rotate_probability=1.0)
return AugmentationConfig(preprocessing, spatial_augmentation, None)
@pytest.mark.parametrize(
"hflip_probability,expected_front_marker,expected_back_marker",
[(0.0, 0.0, 0.5), (1.0, 1.0, 0.5)]
)
@pytest.mark.parametrize("dataloader_class,num_preceding_frames",
[('DefaultDataloader', 0)])
def test_markers_are_augmented(tmpdir,
dataloader_class,
num_preceding_frames,
hflip_probability,
expected_front_marker,
expected_back_marker):
"""Test that get_dataset_tensors produces the expected front / back markers."""
set_random_seed(123)
num_samples, height, width = 1, 10, 20
tfrecords_path, image_directory_path = \
generate_dummy_dataset(tmpdir=tmpdir, num_samples=num_samples,
height=height, width=width)
dataset_proto = DatasetConfig()
training_data_sources_proto = DataSource()
training_data_sources_proto.tfrecords_path = tfrecords_path
training_data_sources_proto.image_directory_path = image_directory_path
dataset_proto.data_sources.extend([training_data_sources_proto])
training_data_source_list, validation_data_source_list, _ = \
build_data_source_lists(dataset_proto)
augmentation_config = get_hflip_augmentation_config(
hflip_probability=hflip_probability)
dataloader = _get_dataloader(
dataloader_class=dataloader_class,
training_data_source_list=training_data_source_list,
image_file_encoding='png',
validation_data_source_list=validation_data_source_list,
augmentation_config=augmentation_config)
labels_tensors = dataloader.get_dataset_tensors(
batch_size=num_samples,
training=True,
enable_augmentation=True)[1]
with tf.compat.v1.Session() as sess:
sess.run(get_init_ops())
labels = sess.run(labels_tensors)
for frame_labels in labels:
assert np.allclose(frame_labels['target/front'], expected_front_marker)
assert np.allclose(frame_labels['target/back'], expected_back_marker)
# Augmentation matrices for depth augmentation test.
identity_stm = np.eye(3, dtype=np.float32)
zoom_out_stm = [[0.5, 0.0, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.0, 1.0]]
zoom_in_stm = [[2.0, 0.0, 0.0],
[0.0, 2.0, 0.0],
[0.0, 0.0, 1.0]]
isq2 = np.sqrt(2.0) / 2.0
rotation_stm = [[isq2, -isq2, 0.0],
[isq2, isq2, 0.0],
[0.0, 0.0, 1.0]]
@pytest.mark.parametrize("label_depth,stm,bw_poly_coeff1,expected_depth",
[
# Regular depth.
([12.34], identity_stm,
BW_POLY_COEFF1_60FC, [12.34]),
# Zoom in 2x.
([8.0], zoom_in_stm,
BW_POLY_COEFF1_60FC, [4.0]),
# Zoom out 2x.
([8.0], zoom_out_stm,
BW_POLY_COEFF1_60FC, [16.0]),
# Rotation 45 degrees.
([8.0], rotation_stm,
BW_POLY_COEFF1_60FC, [8.0]),
# Different FOV camera.
([8.0], identity_stm, 0.001, [14.6675553]),
])
def test_depth_is_augmented(label_depth, stm, bw_poly_coeff1, expected_depth):
"""Test that deth is augmented properly."""
set_random_seed(123)
augmentation_config = _get_dummy_augmentation_config()
dataloader = DefaultDataloader(
training_data_source_list=[],
image_file_encoding='png',
validation_data_source_list=[],
augmentation_config=augmentation_config)
labels_tensors = {
'target/world_bbox_z': tf.constant(label_depth),
'frame/bw_poly_coeff1': tf.constant(bw_poly_coeff1)}
labels_tensors = dataloader._translate_additional_labels(labels_tensors)
labels_tensors = dataloader._apply_augmentations_to_additional_labels(
additional_labels=labels_tensors,
stm=tf.constant(stm))
with tf.compat.v1.Session() as sess:
sess.run(get_init_ops())
labels = sess.run(labels_tensors)
assert np.allclose(labels['target/world_bbox_z'], expected_depth)
class TestMultipleTFRecordsFormats(object):
"""
Test that the Dataloader is able to handle both old and new TFRecords formats, when BOTH
are supplied at the same time.
"""
@pytest.fixture(scope='function')
def dataloader(self, tmpdir):
"""Define a Dataloader instance for this test's purpose."""
set_random_seed(123)
num_samples, height, width = 1, 40, 40
def get_common_labels(i):
return {
'target/object_class': _bytes_feature('car', 'cat', 'cart'),
'target/truncation': _float_feature(0.0, 0.0, 0.0),
'target/occlusion': _int64_feature(0, 0, 0),
'target/front': _float_feature(0.0, 0.0, 0.0),
'target/back': _float_feature(0.5, 0.5, 0.5),
'frame/id': _bytes_feature(str(i)),
'frame/height': _int64_feature(height),
'frame/width': _int64_feature(width)}
# Generate one tfrecord for each type of format.
dataset_proto = DatasetConfig()
for i, coordinate_format in enumerate(COORDINATE_FORMATS):
labels = get_common_labels(i)
labels.update(coordinate_format)
tfrecords_path, image_directory_path = \
generate_dummy_dataset(tmpdir=tmpdir.mkdir("dir%d" % i), num_samples=num_samples,
height=height, width=width,
labels=[labels])
training_data_sources_proto = DataSource()
training_data_sources_proto.tfrecords_path = tfrecords_path
training_data_sources_proto.image_directory_path = image_directory_path
dataset_proto.data_sources.extend([training_data_sources_proto])
training_data_source_list, validation_data_source_list, _ = \
build_data_source_lists(dataset_proto)
augmentation_config = _get_dummy_augmentation_config(
width=width, height=height, crop_right=width, crop_bottom=height)
dataloader = DefaultDataloader(
training_data_source_list=training_data_source_list,
image_file_encoding='png',
validation_data_source_list=validation_data_source_list,
augmentation_config=augmentation_config)
return dataloader
def test_multiple_formats(self, dataloader):
"""Test that the labels coming out of the dataloader are as expected."""
total_num_samples = dataloader.get_num_samples(training=True)
labels_tensors = dataloader.get_dataset_tensors(
batch_size=1, training=True, enable_augmentation=True)[1]
frame_ids = set()
with tf.compat.v1.Session() as sess:
sess.run(get_init_ops())
# 1 sample from each source.
for _ in range(total_num_samples):
batch_labels = sess.run(labels_tensors)
for frame_labels in batch_labels:
frame_ids.add(frame_labels["frame/id"][0])
np.testing.assert_allclose(
frame_labels["target/coordinates/x"],
np.array(X_COORDS, dtype=np.float32))
np.testing.assert_allclose(
frame_labels["target/coordinates/y"],
np.array(Y_COORDS, dtype=np.float32))
assert frame_ids == {b"0", b"1"}
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/dataloader/tests/test_dataloader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test data loading."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import Counter
import os
import numpy as np
import pytest
from six.moves import range
from six.moves import zip
import tensorflow as tf
from nvidia_tao_tf1.core.utils import set_random_seed
from nvidia_tao_tf1.cv.detectnet_v2.common.dataio.converter_lib import (
_bytes_feature,
_float_feature
)
from nvidia_tao_tf1.cv.detectnet_v2.common.dataio.converter_lib import _int64_feature
from nvidia_tao_tf1.cv.detectnet_v2.common.graph import get_init_ops
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.augmentation.augmentation_config import (
AugmentationConfig
)
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.legacy_dataloader import FRAME_ID_KEY
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.legacy_dataloader import LegacyDataloader as \
DefaultDataloader
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.tests.utilities.data_generation import (
generate_dummy_dataset,
generate_dummy_images,
generate_dummy_labels
)
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.utilities import extract_tfrecords_features
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.utilities import get_num_samples
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.utilities import get_tfrecords_iterator
@pytest.fixture(scope='module')
def test_paths(tmpdir_factory):
"""Test paths."""
# Generate three folds with 1, 2, and 3 samples, respectively.
tmpdir = tmpdir_factory.mktemp('test_data')
folds = [str(tmpdir.join("data.tfrecords-fold-00%d-of-003" % i)) for i in range(3)]
for num_examples, path in enumerate(folds, 1):
generate_dummy_labels(path, num_examples)
return folds
@pytest.fixture
def mock_generate_tensors(mocker):
"""Skip the generation of input images and ground truth label tensors."""
def make_mock(dataloader):
mock = mocker.patch.object(dataloader, '_generate_images_and_ground_truth_labels')
dummy_tensor = tf.constant([0.0])
mock.return_value = dummy_tensor, dummy_tensor
return mock
return make_mock
def _get_dataloader(dataloader_class, training_data_source_list, target_class_mapping,
validation_fold, validation_data_source_list, augmentation_config):
"""Helper function for creating a dataloader class."""
if dataloader_class == 'DefaultDataloader':
dataloader = DefaultDataloader(
training_data_source_list=training_data_source_list,
target_class_mapping=target_class_mapping,
image_file_encoding="fp16",
validation_fold=validation_fold,
validation_data_source_list=validation_data_source_list,
augmentation_config=augmentation_config
)
# HOW TO CONFIGURE AN ASSERT FOR OTHER dataloades - temporal and point cloud
return dataloader
@pytest.mark.parametrize("dataloader_class,validation_fold,expected_num_samples",
[('DefaultDataloader', 0, (5, 1)),
('DefaultDataloader', 1, (4, 2)),
('DefaultDataloader', None, (6, 0))])
def test_dataloader_kfold_split(mock_generate_tensors, test_paths, dataloader_class,
validation_fold, expected_num_samples):
"""Test Dataloader returning correct values when doing kfold validation split."""
training_data_source_list = [(os.path.join(os.path.dirname(test_paths[0]), "*"), "")]
target_class_mapping = dict()
validation_data_source_list = None
augmentation_config = None
dataloader = _get_dataloader(dataloader_class, training_data_source_list, target_class_mapping,
validation_fold, validation_data_source_list, augmentation_config)
mock_generate_tensors(dataloader)
# Check that the data source lists have the expected number of elements.
if validation_fold is not None:
expected_num_training_sources, expected_num_validation_sources = 1, 1
else:
expected_num_training_sources, expected_num_validation_sources = 1, 0
assert len(dataloader.training_data_sources) == expected_num_training_sources
assert len(dataloader.validation_data_sources) == expected_num_validation_sources
_, _, num_training_samples = dataloader.get_dataset_tensors(10, True, False)
_, _, num_validation_samples = dataloader.get_dataset_tensors(10, False, False)
expected_num_training_samples, expected_num_validation_samples = expected_num_samples
assert num_training_samples == expected_num_training_samples
assert num_validation_samples == expected_num_validation_samples
@pytest.mark.parametrize("dataloader_class,validation_data_used,expected_num_samples,num_channels",
[('DefaultDataloader', True, (6, 1), 1),
('DefaultDataloader', True, (6, 1), 3),
('DefaultDataloader', False, (6, 0), 1),
('DefaultDataloader', False, (6, 0), 3)])
def test_dataloader_path_split(mock_generate_tensors, test_paths, dataloader_class,
validation_data_used, expected_num_samples, num_channels):
"""Test Dataloader returning correct values when specifying path to validation files."""
training_data_source_list = [(fold, "") for fold in test_paths]
num_paths = len(test_paths)
expected_num_training_sources = num_paths
if validation_data_used:
validation_data_source_list = [(test_paths[0], "")]
expected_num_validation_sources = 1
else:
validation_data_source_list = None
expected_num_validation_sources = 0
preprocessing = AugmentationConfig.Preprocessing(
output_image_width=16, output_image_height=8,
output_image_channel=num_channels,
output_image_min=0, output_image_max=0,
crop_left=0, crop_top=0, crop_right=16, crop_bottom=8,
min_bbox_width=0., min_bbox_height=0., scale_width=0., scale_height=0.)
augmentation_config = AugmentationConfig(preprocessing, None, None)
target_class_mapping = dict()
validation_fold = None
dataloader = _get_dataloader(dataloader_class, training_data_source_list, target_class_mapping,
validation_fold, validation_data_source_list, augmentation_config)
mock_generate_tensors(dataloader)
# Check that the data source lists have the expected number of elements.
assert len(dataloader.training_data_sources) == expected_num_training_sources
assert len(dataloader.validation_data_sources) == expected_num_validation_sources
# Check that data tensor shape matches AugmentationConfig.
assert dataloader.get_data_tensor_shape() == \
(augmentation_config.preprocessing.output_image_channel, 8, 16)
num_training_samples = dataloader.get_num_samples(True)
num_validation_samples = dataloader.get_num_samples(False)
_, _, num_training_samples2 = dataloader.get_dataset_tensors(10, True, False)
_, _, num_validation_samples2 = dataloader.get_dataset_tensors(10, False, False)
# Check that get_num_samples and get_dataset_tensors return the same number of samples.
assert num_training_samples == num_training_samples2
assert num_validation_samples == num_validation_samples2
expected_num_training_samples, expected_num_validation_samples = expected_num_samples
assert num_training_samples == expected_num_training_samples
assert num_validation_samples == expected_num_validation_samples
def _create_dummy_example():
example = dict()
example['target/object_class'] = tf.constant(['car', 'pedestrian', 'cyclist'])
return example
@pytest.mark.parametrize("dataloader_class", [('DefaultDataloader')])
def test_target_class_mapping(test_paths, dataloader_class):
"""Target class mapping."""
training_data_source_list = [(fold, "") for fold in test_paths]
target_class_mapping = {b'car': b'car', b'cyclist': b'cyclist', b'pedestrian': b'cyclist'}
validation_fold = None
validation_data_source_list = None
augmentation_config = None
dataloader = _get_dataloader(dataloader_class, training_data_source_list, target_class_mapping,
validation_fold, validation_data_source_list, augmentation_config)
example = _create_dummy_example()
example = dataloader._map_to_model_target_classes(example, target_class_mapping)
with tf.Session() as session:
session.run(get_init_ops())
example = session.run(example)
object_classes = example['target/object_class']
# target_class_values = [f.decode() for f in target_class_mapping.values()
assert set(object_classes) == set(target_class_mapping.values())
features1 = {'bytes': _bytes_feature('test')}
expected_features1 = {'bytes': tf.VarLenFeature(dtype=tf.string)}
features2 = {'float': _float_feature(1.0)}
expected_features2 = {'float': tf.VarLenFeature(dtype=tf.float32)}
features3 = {'int64': _int64_feature(1)}
expected_features3 = {'int64': tf.VarLenFeature(dtype=tf.int64)}
features4 = {'bytes1': _bytes_feature('test1'), 'bytes2': _bytes_feature('test2'),
'float1': _float_feature(1.0), 'float2': _float_feature(2.0),
'int64_1': _int64_feature(1), 'int64_2': _int64_feature(2)}
expected_features4 = {'bytes1': tf.VarLenFeature(dtype=tf.string),
'bytes2': tf.VarLenFeature(dtype=tf.string),
'float1': tf.VarLenFeature(dtype=tf.float32),
'float2': tf.VarLenFeature(dtype=tf.float32),
'int64_1': tf.VarLenFeature(dtype=tf.int64),
'int64_2': tf.VarLenFeature(dtype=tf.int64)}
test_cases = [(features1, expected_features1), (features2, expected_features2),
(features3, expected_features3), (features4, expected_features4)]
@pytest.mark.parametrize("features,expected_features", test_cases)
def test_extract_tfrecords_features(tmpdir_factory, features, expected_features):
"""Test that tfrecords features are extracted correctly from a sample file."""
# Generate a sample tfrecords file with the given features.
tffile = str(tmpdir_factory.mktemp('test_data').join("test.tfrecords"))
generate_dummy_labels(tffile, 1, labels=[features])
extracted_features = extract_tfrecords_features(tffile)
assert extracted_features == expected_features
@pytest.mark.parametrize("num_channels", [1, 3])
def test_get_dataset_tensors(tmpdir_factory, num_channels):
"""Test dataloader.get_dataset_tensors."""
def get_frame_id(label):
"""Extract frame id from label.
Args:
label: Dataset label.
Returns:
Frame ID (str).
"""
return label[0][FRAME_ID_KEY][0]
result_dir = tmpdir_factory.mktemp('test_dataloader_dataset')
# Generate a tfrecords file for the test.
image_width = 16
image_height = 12
num_dataset_samples = 3 # Must be > 2.
num_channels = num_channels
tfrecords_path, image_directory_path = generate_dummy_dataset(result_dir, num_dataset_samples,
image_width, image_height,
labels=None,
num_channels=num_channels)
training_tfrecords_path = tfrecords_path.replace('*', 'dummy-fold-000-of-002')
training_data_source_list = [(training_tfrecords_path, image_directory_path)]
validation_tfrecords_path = tfrecords_path.replace('*', 'dummy-fold-001-of-002')
validation_data_source_list = [(validation_tfrecords_path, image_directory_path)]
# Instantiate a DefaultDataloader.
preprocessing = AugmentationConfig.Preprocessing(
output_image_width=image_width,
output_image_height=image_height,
output_image_channel=num_channels,
output_image_min=0,
output_image_max=0,
crop_left=0, crop_top=0,
crop_right=image_width, crop_bottom=image_height,
min_bbox_width=0., min_bbox_height=0., scale_width=0., scale_height=0.)
augmentation_config = AugmentationConfig(preprocessing=preprocessing,
spatial_augmentation=None,
color_augmentation=None)
dataloader = DefaultDataloader(
training_data_source_list=training_data_source_list,
target_class_mapping=dict(),
image_file_encoding="png",
validation_fold=None,
validation_data_source_list=validation_data_source_list,
augmentation_config=augmentation_config
)
# Test training set iteration.
training_images, training_labels, num_training_samples =\
dataloader.get_dataset_tensors(1, True, False)
assert num_training_samples == num_dataset_samples
expected_image_shape = (1, augmentation_config.preprocessing.output_image_channel, image_height,
image_width)
# Note that get_data_tensor_shape does not include the batch dimension.
assert dataloader.get_data_tensor_shape() == expected_image_shape[1:]
with tf.Session() as session:
session.run(get_init_ops())
# Loop training set twice to test repeat.
for _ in range(2):
# Loop over training set once, storing the frame IDs.
training_set = set()
for _ in range(num_training_samples):
image, label = session.run([training_images, training_labels])
assert image.shape == expected_image_shape
training_set.add(get_frame_id(label))
# Assert that each frame id is present. Note that the order of the IDs is random.
assert set(bytes(str(i), 'utf-8') for i in range(num_training_samples)) == training_set
# Test validation set iteration.
validation_images, validation_labels, num_validation_samples =\
dataloader.get_dataset_tensors(1, False, False)
assert num_validation_samples == num_dataset_samples
with tf.Session() as session:
session.run(get_init_ops())
# Loop over the validation set twice.
for i in range(num_validation_samples):
image, label = session.run([validation_images, validation_labels])
assert image.shape == expected_image_shape
assert get_frame_id(label) == bytes(str(i % num_validation_samples), 'utf-8')
@pytest.fixture
def label_filters_test_data():
image_width = 16
image_height = 12
input_labels = [
{'frame/id': _bytes_feature("xx"), 'frame/height': _int64_feature(image_height),
'frame/width': _int64_feature(image_width),
'target/object_class': _bytes_feature("car", "car", "car"),
'target/coordinates_x1': _float_feature(20, 12, 16),
'target/coordinates_x2': _float_feature(24, 16, 20),
'target/coordinates_y1': _float_feature(16, 26, 20),
'target/coordinates_y2': _float_feature(26, 36, 22)}]
# After applying stm, bbox1 should be clipped to crop region, bbox2 should be filtered out as
# it is completely outside the crop region, bbox 3 is retained.
expected_output = [
{'frame/id': [b"xx"], 'frame/height': [image_height], 'frame/width': [image_width],
'target/object_class': [b"car", b"car"], 'target/coordinates_x1': [10., 8.],
'target/coordinates_x2':[12., 10.], 'target/coordinates_y1': [8., 10.],
'target/coordinates_y2': [12., 11.],
'target/bbox_coordinates': [[10., 8., 12., 12.],
[8., 10., 10., 11.]]}]
fixture = {'image_width': image_width, 'image_height': image_height,
'input_labels': input_labels, 'expected_output': expected_output
}
return fixture
@pytest.mark.parametrize("num_channels", [1, 3])
def test_get_dataset_tensors_filters(tmpdir, label_filters_test_data, num_channels):
"""Test that get_dataset_tensors() applies label filters correctly."""
# Generate a tfrecords file for the test.
tfrecords_path = str(tmpdir.mkdir("labels"))
image_directory_path = str(tmpdir.mkdir("images"))
training_tfrecords_path = tfrecords_path + '/dummy-fold-000-of-002'
num_channels = num_channels
image_width = label_filters_test_data['image_width']
image_height = label_filters_test_data['image_height']
generate_dummy_images(image_directory_path, len(label_filters_test_data['input_labels']),
image_width, image_height, num_channels)
generate_dummy_labels(training_tfrecords_path, len(label_filters_test_data['input_labels']),
labels=label_filters_test_data['input_labels'])
training_data_source_list = [(training_tfrecords_path, image_directory_path)]
# Instantiate a DefaultDataloader.
preprocessing = AugmentationConfig.Preprocessing(
output_image_width=image_width,
output_image_height=image_height,
output_image_channel=num_channels,
output_image_min=0,
output_image_max=0,
crop_left=0, crop_top=0,
crop_right=image_width//2, crop_bottom=image_height//2,
min_bbox_width=0., min_bbox_height=0., scale_width=0.5, scale_height=0.5)
augmentation_config = AugmentationConfig(preprocessing=preprocessing,
spatial_augmentation=None,
color_augmentation=None)
dataloader = DefaultDataloader(
training_data_source_list=training_data_source_list,
target_class_mapping=dict(),
image_file_encoding="png",
validation_fold=None,
validation_data_source_list=None,
augmentation_config=augmentation_config
)
_, training_labels, _ = dataloader.get_dataset_tensors(1, True, False)
with tf.Session() as session:
session.run(get_init_ops())
label = session.run([training_labels])
for key, value in label_filters_test_data['expected_output'][0].items():
assert np.array_equal(value, label[0][0][key])
def test_get_tfrecords_iterator(mock_generate_tensors, test_paths):
"""Test that samples from all data sources are iterated over."""
data_sources = []
for i, test_path in enumerate(test_paths):
data_source = ([test_path], str(i))
data_sources.append(data_source)
num_samples = get_num_samples(data_sources, training=False)
tfrecords_iterator, num_samples2 = get_tfrecords_iterator(
data_sources=data_sources,
batch_size=1,
training=False,
repeat=True)
assert num_samples == num_samples2
# Initialize the iterator.
with tf.Session() as session:
session.run(get_init_ops())
# Loop once through the entire dataset, and check that all sources have been iterated over.
samples_per_source = Counter()
for _ in range(num_samples):
sample = session.run(tfrecords_iterator())
img_dir = sample[1][0]
samples_per_source[img_dir] += 1
# Given the test_paths fixture puts 1, 2 and 3 samples in each source, check that this is
# correct.
for i in range(len(test_paths)):
assert samples_per_source[bytes(str(i), 'utf-8') + b'/'] == i + 1
crop_rect = [{'left': 5, 'right': 45, 'top': 5, 'bottom': 45},
{'left': 0, 'right': 0, 'top': 0, 'bottom': 0}]
bboxes = {'x1': [0., 20., 40.],
'x2': [10., 30., 50.],
'y1': [0., 20., 40.],
'y2': [10., 30., 50.]}
truncation_type = [1, 0, 0]
expected_bboxes = [[[5., 20., 40.],
[10., 30., 45.],
[5., 20., 40.],
[10., 30., 45.]],
[[0., 20., 40.],
[10., 30., 50.],
[0., 20., 40.],
[10., 30., 50.]]]
expected_truncation_type = [[1, 0, 1], [1, 0, 0]]
test_cases = [(crop_rect[0], bboxes, truncation_type,
expected_bboxes[0], expected_truncation_type[0]),
(crop_rect[1], bboxes, truncation_type,
expected_bboxes[1], expected_truncation_type[1])]
@pytest.mark.parametrize(
"crop_rect,bboxes,truncation_type,expected_bboxes,expected_truncation_type",
test_cases)
def test_update_example_after_crop(crop_rect, bboxes, truncation_type,
expected_bboxes, expected_truncation_type):
"""Test expected updated bbox and truncation_type can be obtained."""
# Create an example.
example = _create_dummy_example()
example['target/bbox_coordinates'] = tf.stack([bboxes['x1'], bboxes['y1'], bboxes['x2'],
bboxes['y2']], axis=1)
example['target/truncation_type'] = tf.constant(truncation_type)
# Calculate preprocessed truncation.
example = DefaultDataloader._update_example_after_crop(crop_rect['left'],
crop_rect['right'],
crop_rect['top'],
crop_rect['bottom'],
example)
# Compute and compare results.
with tf.Session() as session:
session.run(get_init_ops())
result_coords, result_truncation_type = \
session.run([example['target/bbox_coordinates'],
example['target/truncation_type']])
result_x1, result_y1, result_x2, result_y2 = np.split(result_coords, 4, axis=1)
# Check the correctness of bbox coordinates.
result_bboxes = [result_x1.tolist(), result_x2.tolist(),
result_y1.tolist(), result_y2.tolist()]
for result, expected in zip(result_bboxes, expected_bboxes):
for x, y in zip(result, expected):
assert np.isclose(x, y)
# Check the correctness of truncation_type.
for x, y in zip(result_truncation_type, expected_truncation_type):
assert x == y
def get_hflip_augmentation_config(hflip_probability, num_channels):
"""Get an AugmentationConfig for testing horizontal flips."""
assert hflip_probability in {0., 1.0}
# Define some augmentation configurables.
width, height, num_channels = 12, 34, num_channels
preprocessing = AugmentationConfig.Preprocessing(
output_image_width=width,
output_image_height=height,
output_image_channel=num_channels,
output_image_min=0,
output_image_max=0,
crop_left=0,
crop_top=0,
crop_right=width,
crop_bottom=height,
min_bbox_width=0.0,
min_bbox_height=0.0,
scale_width=0.0,
scale_height=0.0)
spatial_augmentation = AugmentationConfig.SpatialAugmentation(
hflip_probability=hflip_probability,
vflip_probability=0.0,
zoom_min=0.9,
zoom_max=1.2,
translate_max_x=2.0,
translate_max_y=3.0,
rotate_rad_max=0.0,
rotate_probability=1.0)
return AugmentationConfig(preprocessing, spatial_augmentation, None)
@pytest.mark.parametrize(
"hflip_probability,expected_orientation,num_channels", [(0.0, np.pi / 4., 1),
(0.0, np.pi / 4., 3),
(1.0, -np.pi / 4., 1),
(1.0, -np.pi / 4., 3)]
)
def test_get_dataset_tensors_produces_additional_labels(tmpdir,
hflip_probability,
expected_orientation,
num_channels):
"""Test that get_dataset_tensors produces the expected additional tensors."""
set_random_seed(123)
num_samples, height, width, num_channels = 1, 10, 20, num_channels
tfrecords_path, image_directory_path = \
generate_dummy_dataset(tmpdir=tmpdir,
num_samples=num_samples,
height=height,
width=width,
labels=None,
num_channels=num_channels)
training_data_source_list = [(tfrecords_path, image_directory_path)]
target_class_mapping = dict()
validation_data_source_list = None
validation_fold = 1
augmentation_config = get_hflip_augmentation_config(hflip_probability=hflip_probability,
num_channels=num_channels)
# if not augmentation_config.preprocessing.input_mono:
dataloader = DefaultDataloader(
training_data_source_list=training_data_source_list,
target_class_mapping=target_class_mapping,
image_file_encoding='png',
validation_fold=validation_fold,
validation_data_source_list=validation_data_source_list,
augmentation_config=augmentation_config)
labels_tensors = dataloader.get_dataset_tensors(
batch_size=num_samples,
training=True,
enable_augmentation=True)[1]
with tf.Session() as sess:
sess.run(get_init_ops())
labels = sess.run(labels_tensors)
for frame_labels in labels:
assert np.allclose(frame_labels['target/orientation'], expected_orientation)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/dataloader/tests/test_legacy_dataloader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions to generate data for tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from PIL import Image
from six.moves import range
import tensorflow as tf
from nvidia_tao_tf1.cv.detectnet_v2.common.dataio.converter_lib import _bytes_feature
from nvidia_tao_tf1.cv.detectnet_v2.common.dataio.converter_lib import _float_feature
from nvidia_tao_tf1.cv.detectnet_v2.common.dataio.converter_lib import _int64_feature
def generate_dummy_images(directory, num_samples, height, width, num_channels):
"""Generate num_samples dummy images of given shape and store them to the given directory.
Images will have the shape [H, W, 3] and the values are in [0, 255]. The images will be
stored as 0.png, 1.png, etc.
Args:
directory: Directory where images are stored.
num_samples: Number of images to generate.
height, width: Image shape.
"""
for i in range(num_samples):
img_array = np.random.rand(height, width, 3) * 255
im = Image.fromarray(img_array.astype('uint8')).convert('RGBA')
if num_channels == 1:
if im.mode in ('RGBA', 'LA') or (im.mode == 'P' and 'transparency' in im.info):
bg_colour = (255, 255, 255)
# Need to convert to RGBA if LA format due to a bug in PIL
alpha = im.convert('RGBA').split()[-1]
# Create a new background image of our matt color.
# Must be RGBA because paste requires both images have the same format
bg = Image.new("RGBA", im.size, bg_colour + (255,))
bg.paste(im, mask=alpha)
im = im.convert('L')
# img = Image.fromarray(img_array.astype('uint8')).convert('L')
# img = Image.fromarray(img_array)
# img_conv = img.convert('L')
# img = Image.fromarray(img_array.astype('uint8'))
img_path = os.path.join(str(directory), '%d.png' % i)
im.save(img_path)
def generate_dummy_labels(path, num_samples, height=0, width=0, labels=None):
"""Generate num_samples dummy labels and store them to a tfrecords file in the given path.
Args:
path: Path to the generated tfrecords file.
num_samples: Labels will be generated for this many images.
height, width: Optional image shape.
labels: Optional, list of custom labels to write into the tfrecords file. The user is
expected to provide a label for each sample. Each label is dictionary with the label
name as the key and value as the corresponding tf.train.Feature.
"""
if labels is None:
labels = [{'target/object_class': _bytes_feature('car'),
'target/coordinates_x1': _float_feature(1.0),
'target/coordinates_y1': _float_feature(1.0),
'target/coordinates_x2': _float_feature(1.0),
'target/coordinates_y2': _float_feature(1.0),
'target/truncation': _float_feature(0.0),
'target/occlusion': _int64_feature(0),
'target/front': _float_feature(0.0),
'target/back': _float_feature(0.5),
'frame/id': _bytes_feature(str(i)),
'frame/height': _int64_feature(height),
'frame/width': _int64_feature(width)} for i in range(num_samples)]
else:
num_custom_labels = len(labels)
assert num_custom_labels == num_samples, \
"Expected %d custom labels, got %d." % (num_samples, num_custom_labels)
writer = tf.python_io.TFRecordWriter(str(path))
for label in labels:
features = tf.train.Features(feature=label)
example = tf.train.Example(features=features)
writer.write(example.SerializeToString())
writer.close()
def generate_dummy_dataset(tmpdir, num_samples, height, width, labels=None, num_channels=3):
"""Construct a dummy dataset and return paths to it.
Args:
tmpdir (pytest tmpdir): Temporary folder under which the dummy dataset will be created.
num_samples (int): Number of samples to use for the dataset.
height (int): Height of the dummy images.
width (int): Width for the dummy images.
labels: Optional, list of custom labels to write into the tfrecords file. The user is
expected to provide a label for each sample. Each label is a dictionary with the label
name as the key and value as the corresponding tf.train.Feature.
Returns:
tfrecords_path (str): Path to generated tfrecords.
image_directory_path (str): Path to generated images.
"""
images_path = tmpdir.mkdir("images")
labels_path = tmpdir.mkdir("labels")
validation_tfrecords = str(labels_path.join('dummy-fold-000-of-002'))
training_tfrecords = str(labels_path.join('dummy-fold-001-of-002'))
generate_dummy_images(images_path, num_samples, height, width, num_channels)
generate_dummy_labels(training_tfrecords, num_samples, height, width, labels=labels)
generate_dummy_labels(validation_tfrecords, num_samples, height, width, labels=labels)
tfrecords_path = str(labels_path.join('*'))
image_directory_path = str(images_path)
return tfrecords_path, image_directory_path
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/dataloader/tests/utilities/data_generation.py |
"""IVA EfficientDet."""
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Weighted fusion layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
class WeightedFusion(tf.keras.layers.Layer):
"""Weighted Fusion Layer."""
def __init__(self, epsilon=1e-4, **kwargs):
"""Init."""
super(WeightedFusion, self).__init__(**kwargs)
self.epsilon = epsilon
def build(self, input_shape):
"""Build."""
num_in = len(input_shape)
self.w = self.add_weight(name=self.name,
shape=(num_in,),
initializer=tf.keras.initializers.constant(1 / num_in),
trainable=True,
dtype=tf.float32)
def call(self, inputs, **kwargs):
"""Call."""
w = tf.keras.activations.relu(self.w)
x = tf.reduce_sum([w[i] * inputs[i] for i in range(len(inputs))], axis=0)
x = x / (tf.reduce_sum(w) + self.epsilon)
return x
def compute_output_shape(self, input_shape):
"""Compute output shape."""
return input_shape[0]
def get_config(self):
"""Config."""
config = super(WeightedFusion, self).get_config()
config.update({
'epsilon': self.epsilon
})
return config
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/layers/weighted_fusion_layer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientDet custom layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow import keras
class ImageResizeLayer(keras.layers.Layer):
'''A Keras layer to wrap tf.image.resize_nearst_neighbor function.'''
def __init__(self,
target_height=128,
target_width=128,
**kwargs):
'''Init function.'''
self.height = target_height
self.width = target_width
super(ImageResizeLayer, self).__init__(**kwargs)
def call(self, inputs):
"""Resize."""
return tf.cast(tf.compat.v1.image.resize_nearest_neighbor(
tf.cast(inputs, tf.float32), [self.height, self.width]), dtype=inputs.dtype)
def get_config(self):
'''Keras layer get config.'''
config = {
'target_height': self.height,
'target_width': self.width,
}
base_config = super(ImageResizeLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/layers/image_resize_layer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IVA EfficientDet layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/layers/__init__.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/efficientdet/proto/training_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/efficientdet/proto/training_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n:nvidia_tao_tf1/cv/efficientdet/proto/training_config.proto\"\xf9\x04\n\x0eTrainingConfig\x12\x18\n\x10train_batch_size\x18\x01 \x01(\r\x12\x1b\n\x13iterations_per_loop\x18\x02 \x01(\r\x12\x0f\n\x07use_xla\x18\x03 \x01(\x08\x12\x17\n\x0f\x64isable_logging\x18\x04 \x01(\x08\x12\x12\n\ncheckpoint\x18\x05 \x01(\t\x12\x15\n\rstop_at_epoch\x18\x06 \x01(\r\x12\x0e\n\x06resume\x18\x07 \x01(\x08\x12\x19\n\x11\x63heckpoint_period\x18\x08 \x01(\r\x12\x1b\n\x13keep_checkpoint_max\x18\t \x01(\r\x12\x1e\n\x16num_examples_per_epoch\x18\n \x01(\r\x12\x12\n\nnum_epochs\x18\x0b \x01(\r\x12!\n\x19skip_checkpoint_variables\x18\x0c \x01(\t\x12\x1a\n\x12profile_skip_steps\x18\r \x01(\r\x12\x16\n\x0etf_random_seed\x18\x0e \x01(\r\x12\x1c\n\x14moving_average_decay\x18\x0f \x01(\x02\x12\x17\n\x0flr_warmup_epoch\x18\x10 \x01(\x02\x12\x16\n\x0elr_warmup_init\x18\x11 \x01(\x02\x12\x15\n\rlearning_rate\x18\x12 \x01(\x02\x12\x0b\n\x03\x61mp\x18\x13 \x01(\x08\x12\x17\n\x0fl2_weight_decay\x18\x14 \x01(\x02\x12\x17\n\x0fl1_weight_decay\x18\x15 \x01(\x02\x12\x19\n\x11pruned_model_path\x18\x16 \x01(\t\x12\x1b\n\x13\x63lip_gradients_norm\x18\x17 \x01(\x02\x12\x10\n\x08momentum\x18\x18 \x01(\x02\x12\x19\n\x11logging_frequency\x18\x19 \x01(\rb\x06proto3')
)
_TRAININGCONFIG = _descriptor.Descriptor(
name='TrainingConfig',
full_name='TrainingConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='train_batch_size', full_name='TrainingConfig.train_batch_size', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='iterations_per_loop', full_name='TrainingConfig.iterations_per_loop', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='use_xla', full_name='TrainingConfig.use_xla', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='disable_logging', full_name='TrainingConfig.disable_logging', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='checkpoint', full_name='TrainingConfig.checkpoint', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stop_at_epoch', full_name='TrainingConfig.stop_at_epoch', index=5,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='resume', full_name='TrainingConfig.resume', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='checkpoint_period', full_name='TrainingConfig.checkpoint_period', index=7,
number=8, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='keep_checkpoint_max', full_name='TrainingConfig.keep_checkpoint_max', index=8,
number=9, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_examples_per_epoch', full_name='TrainingConfig.num_examples_per_epoch', index=9,
number=10, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_epochs', full_name='TrainingConfig.num_epochs', index=10,
number=11, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='skip_checkpoint_variables', full_name='TrainingConfig.skip_checkpoint_variables', index=11,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='profile_skip_steps', full_name='TrainingConfig.profile_skip_steps', index=12,
number=13, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tf_random_seed', full_name='TrainingConfig.tf_random_seed', index=13,
number=14, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='moving_average_decay', full_name='TrainingConfig.moving_average_decay', index=14,
number=15, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='lr_warmup_epoch', full_name='TrainingConfig.lr_warmup_epoch', index=15,
number=16, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='lr_warmup_init', full_name='TrainingConfig.lr_warmup_init', index=16,
number=17, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='learning_rate', full_name='TrainingConfig.learning_rate', index=17,
number=18, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='amp', full_name='TrainingConfig.amp', index=18,
number=19, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='l2_weight_decay', full_name='TrainingConfig.l2_weight_decay', index=19,
number=20, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='l1_weight_decay', full_name='TrainingConfig.l1_weight_decay', index=20,
number=21, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pruned_model_path', full_name='TrainingConfig.pruned_model_path', index=21,
number=22, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='clip_gradients_norm', full_name='TrainingConfig.clip_gradients_norm', index=22,
number=23, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='momentum', full_name='TrainingConfig.momentum', index=23,
number=24, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='logging_frequency', full_name='TrainingConfig.logging_frequency', index=24,
number=25, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=63,
serialized_end=696,
)
DESCRIPTOR.message_types_by_name['TrainingConfig'] = _TRAININGCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TrainingConfig = _reflection.GeneratedProtocolMessageType('TrainingConfig', (_message.Message,), dict(
DESCRIPTOR = _TRAININGCONFIG,
__module__ = 'nvidia_tao_tf1.cv.efficientdet.proto.training_config_pb2'
# @@protoc_insertion_point(class_scope:TrainingConfig)
))
_sym_db.RegisterMessage(TrainingConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/proto/training_config_pb2.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/proto/__init__.py |
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/efficientdet/proto/experiment.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_tao_tf1.cv.efficientdet.proto import aug_config_pb2 as nvidia__tao__tf1_dot_cv_dot_efficientdet_dot_proto_dot_aug__config__pb2
from nvidia_tao_tf1.cv.efficientdet.proto import dataset_config_pb2 as nvidia__tao__tf1_dot_cv_dot_efficientdet_dot_proto_dot_dataset__config__pb2
from nvidia_tao_tf1.cv.efficientdet.proto import eval_config_pb2 as nvidia__tao__tf1_dot_cv_dot_efficientdet_dot_proto_dot_eval__config__pb2
from nvidia_tao_tf1.cv.efficientdet.proto import model_config_pb2 as nvidia__tao__tf1_dot_cv_dot_efficientdet_dot_proto_dot_model__config__pb2
from nvidia_tao_tf1.cv.efficientdet.proto import training_config_pb2 as nvidia__tao__tf1_dot_cv_dot_efficientdet_dot_proto_dot_training__config__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/efficientdet/proto/experiment.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n5nvidia_tao_tf1/cv/efficientdet/proto/experiment.proto\x1a\x35nvidia_tao_tf1/cv/efficientdet/proto/aug_config.proto\x1a\x39nvidia_tao_tf1/cv/efficientdet/proto/dataset_config.proto\x1a\x36nvidia_tao_tf1/cv/efficientdet/proto/eval_config.proto\x1a\x37nvidia_tao_tf1/cv/efficientdet/proto/model_config.proto\x1a:nvidia_tao_tf1/cv/efficientdet/proto/training_config.proto\"\xcd\x01\n\nExperiment\x12&\n\x0e\x64\x61taset_config\x18\x01 \x01(\x0b\x32\x0e.DatasetConfig\x12(\n\x0ftraining_config\x18\x02 \x01(\x0b\x32\x0f.TrainingConfig\x12 \n\x0b\x65val_config\x18\x03 \x01(\x0b\x32\x0b.EvalConfig\x12\'\n\x13\x61ugmentation_config\x18\x04 \x01(\x0b\x32\n.AugConfig\x12\"\n\x0cmodel_config\x18\x05 \x01(\x0b\x32\x0c.ModelConfigb\x06proto3')
,
dependencies=[nvidia__tao__tf1_dot_cv_dot_efficientdet_dot_proto_dot_aug__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_efficientdet_dot_proto_dot_dataset__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_efficientdet_dot_proto_dot_eval__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_efficientdet_dot_proto_dot_model__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_efficientdet_dot_proto_dot_training__config__pb2.DESCRIPTOR,])
_EXPERIMENT = _descriptor.Descriptor(
name='Experiment',
full_name='Experiment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dataset_config', full_name='Experiment.dataset_config', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='training_config', full_name='Experiment.training_config', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='eval_config', full_name='Experiment.eval_config', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='augmentation_config', full_name='Experiment.augmentation_config', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='model_config', full_name='Experiment.model_config', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=345,
serialized_end=550,
)
_EXPERIMENT.fields_by_name['dataset_config'].message_type = nvidia__tao__tf1_dot_cv_dot_efficientdet_dot_proto_dot_dataset__config__pb2._DATASETCONFIG
_EXPERIMENT.fields_by_name['training_config'].message_type = nvidia__tao__tf1_dot_cv_dot_efficientdet_dot_proto_dot_training__config__pb2._TRAININGCONFIG
_EXPERIMENT.fields_by_name['eval_config'].message_type = nvidia__tao__tf1_dot_cv_dot_efficientdet_dot_proto_dot_eval__config__pb2._EVALCONFIG
_EXPERIMENT.fields_by_name['augmentation_config'].message_type = nvidia__tao__tf1_dot_cv_dot_efficientdet_dot_proto_dot_aug__config__pb2._AUGCONFIG
_EXPERIMENT.fields_by_name['model_config'].message_type = nvidia__tao__tf1_dot_cv_dot_efficientdet_dot_proto_dot_model__config__pb2._MODELCONFIG
DESCRIPTOR.message_types_by_name['Experiment'] = _EXPERIMENT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Experiment = _reflection.GeneratedProtocolMessageType('Experiment', (_message.Message,), dict(
DESCRIPTOR = _EXPERIMENT,
__module__ = 'nvidia_tao_tf1.cv.efficientdet.proto.experiment_pb2'
# @@protoc_insertion_point(class_scope:Experiment)
))
_sym_db.RegisterMessage(Experiment)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/proto/experiment_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/efficientdet/proto/eval_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/efficientdet/proto/eval_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n6nvidia_tao_tf1/cv/efficientdet/proto/eval_config.proto\"\xdf\x01\n\nEvalConfig\x12\x19\n\x11min_eval_interval\x18\x01 \x01(\r\x12\x14\n\x0c\x65val_timeout\x18\x02 \x01(\r\x12\x17\n\x0f\x65val_batch_size\x18\x03 \x01(\r\x12\x18\n\x10\x65val_epoch_cycle\x18\x04 \x01(\r\x12\x1b\n\x13\x65val_after_training\x18\x05 \x01(\x08\x12\x14\n\x0c\x65val_samples\x18\x06 \x01(\r\x12\x18\n\x10min_score_thresh\x18\x07 \x01(\x02\x12 \n\x18max_detections_per_image\x18\x08 \x01(\rb\x06proto3')
)
_EVALCONFIG = _descriptor.Descriptor(
name='EvalConfig',
full_name='EvalConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='min_eval_interval', full_name='EvalConfig.min_eval_interval', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='eval_timeout', full_name='EvalConfig.eval_timeout', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='eval_batch_size', full_name='EvalConfig.eval_batch_size', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='eval_epoch_cycle', full_name='EvalConfig.eval_epoch_cycle', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='eval_after_training', full_name='EvalConfig.eval_after_training', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='eval_samples', full_name='EvalConfig.eval_samples', index=5,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_score_thresh', full_name='EvalConfig.min_score_thresh', index=6,
number=7, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_detections_per_image', full_name='EvalConfig.max_detections_per_image', index=7,
number=8, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=59,
serialized_end=282,
)
DESCRIPTOR.message_types_by_name['EvalConfig'] = _EVALCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
EvalConfig = _reflection.GeneratedProtocolMessageType('EvalConfig', (_message.Message,), dict(
DESCRIPTOR = _EVALCONFIG,
__module__ = 'nvidia_tao_tf1.cv.efficientdet.proto.eval_config_pb2'
# @@protoc_insertion_point(class_scope:EvalConfig)
))
_sym_db.RegisterMessage(EvalConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/proto/eval_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/efficientdet/proto/model_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/efficientdet/proto/model_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n7nvidia_tao_tf1/cv/efficientdet/proto/model_config.proto\"\xb2\x01\n\x0bModelConfig\x12\x12\n\nmodel_name\x18\x01 \x01(\t\x12\x11\n\tfreeze_bn\x18\x02 \x01(\x08\x12\x15\n\rfreeze_blocks\x18\x03 \x01(\t\x12\x15\n\raspect_ratios\x18\x04 \x01(\t\x12\x14\n\x0c\x61nchor_scale\x18\x05 \x01(\x02\x12\x11\n\tmin_level\x18\x06 \x01(\r\x12\x11\n\tmax_level\x18\x07 \x01(\r\x12\x12\n\nnum_scales\x18\x08 \x01(\rb\x06proto3')
)
_MODELCONFIG = _descriptor.Descriptor(
name='ModelConfig',
full_name='ModelConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='model_name', full_name='ModelConfig.model_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='freeze_bn', full_name='ModelConfig.freeze_bn', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='freeze_blocks', full_name='ModelConfig.freeze_blocks', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='aspect_ratios', full_name='ModelConfig.aspect_ratios', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='anchor_scale', full_name='ModelConfig.anchor_scale', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_level', full_name='ModelConfig.min_level', index=5,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_level', full_name='ModelConfig.max_level', index=6,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_scales', full_name='ModelConfig.num_scales', index=7,
number=8, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=60,
serialized_end=238,
)
DESCRIPTOR.message_types_by_name['ModelConfig'] = _MODELCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ModelConfig = _reflection.GeneratedProtocolMessageType('ModelConfig', (_message.Message,), dict(
DESCRIPTOR = _MODELCONFIG,
__module__ = 'nvidia_tao_tf1.cv.efficientdet.proto.model_config_pb2'
# @@protoc_insertion_point(class_scope:ModelConfig)
))
_sym_db.RegisterMessage(ModelConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/proto/model_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/efficientdet/proto/aug_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/efficientdet/proto/aug_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n5nvidia_tao_tf1/cv/efficientdet/proto/aug_config.proto\"]\n\tAugConfig\x12\x12\n\nrand_hflip\x18\x01 \x01(\x08\x12\x1d\n\x15random_crop_min_scale\x18\x02 \x01(\x02\x12\x1d\n\x15random_crop_max_scale\x18\x03 \x01(\x02\x62\x06proto3')
)
_AUGCONFIG = _descriptor.Descriptor(
name='AugConfig',
full_name='AugConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='rand_hflip', full_name='AugConfig.rand_hflip', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='random_crop_min_scale', full_name='AugConfig.random_crop_min_scale', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='random_crop_max_scale', full_name='AugConfig.random_crop_max_scale', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=57,
serialized_end=150,
)
DESCRIPTOR.message_types_by_name['AugConfig'] = _AUGCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AugConfig = _reflection.GeneratedProtocolMessageType('AugConfig', (_message.Message,), dict(
DESCRIPTOR = _AUGCONFIG,
__module__ = 'nvidia_tao_tf1.cv.efficientdet.proto.aug_config_pb2'
# @@protoc_insertion_point(class_scope:AugConfig)
))
_sym_db.RegisterMessage(AugConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/proto/aug_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/efficientdet/proto/dataset_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/efficientdet/proto/dataset_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n9nvidia_tao_tf1/cv/efficientdet/proto/dataset_config.proto\"\x87\x02\n\rDatasetConfig\x12\x1d\n\x15training_file_pattern\x18\x01 \x01(\t\x12\x1f\n\x17validation_file_pattern\x18\x02 \x01(\t\x12\x1c\n\x14validation_json_file\x18\x03 \x01(\t\x12\x13\n\x0btestdev_dir\x18\x04 \x01(\t\x12\x13\n\x0bnum_classes\x18\x05 \x01(\r\x12\x12\n\nimage_size\x18\x06 \x01(\t\x12\x15\n\ruse_fake_data\x18\x07 \x01(\x08\x12\x1f\n\x17max_instances_per_image\x18\x08 \x01(\r\x12\"\n\x1askip_crowd_during_training\x18\t \x01(\x08\x62\x06proto3')
)
_DATASETCONFIG = _descriptor.Descriptor(
name='DatasetConfig',
full_name='DatasetConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='training_file_pattern', full_name='DatasetConfig.training_file_pattern', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='validation_file_pattern', full_name='DatasetConfig.validation_file_pattern', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='validation_json_file', full_name='DatasetConfig.validation_json_file', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='testdev_dir', full_name='DatasetConfig.testdev_dir', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_classes', full_name='DatasetConfig.num_classes', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image_size', full_name='DatasetConfig.image_size', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='use_fake_data', full_name='DatasetConfig.use_fake_data', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_instances_per_image', full_name='DatasetConfig.max_instances_per_image', index=7,
number=8, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='skip_crowd_during_training', full_name='DatasetConfig.skip_crowd_during_training', index=8,
number=9, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=62,
serialized_end=325,
)
DESCRIPTOR.message_types_by_name['DatasetConfig'] = _DATASETCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DatasetConfig = _reflection.GeneratedProtocolMessageType('DatasetConfig', (_message.Message,), dict(
DESCRIPTOR = _DATASETCONFIG,
__module__ = 'nvidia_tao_tf1.cv.efficientdet.proto.dataset_config_pb2'
# @@protoc_insertion_point(class_scope:DatasetConfig)
))
_sym_db.RegisterMessage(DatasetConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/proto/dataset_config_pb2.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IVA EfficientDet augment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/aug/__init__.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AutoAugment.
[1] Barret, et al. Learning Data Augmentation Strategies for Object Detection.
Arxiv: https://arxiv.org/abs/1906.11172
"""
from __future__ import absolute_import
from __future__ import division
# gtype import
from __future__ import print_function
import inspect
import logging
import math
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.contrib import image as image_ops # pylint: disable=g-import-not-at-top
import tensorflow_probability as tfp
from nvidia_tao_tf1.cv.efficientdet.utils import hparams_config
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level='INFO')
logger = logging.getLogger(__name__)
# This signifies the max integer that the controller RNN could predict for the
# augmentation scheme.
_MAX_LEVEL = 10.
# Represents an invalid bounding box that is used for checking for padding
# lists of bounding box coordinates for a few augmentation operations
_INVALID_BOX = [[-1.0, -1.0, -1.0, -1.0]]
def policy_v0():
"""Autoaugment policy that was used in AutoAugment Detection Paper."""
# Each tuple is an augmentation operation of the form
# (operation, probability, magnitude). Each element in policy is a
# sub-policy that will be applied sequentially on the image.
policy = [
[('TranslateX_BBox', 0.6, 4), ('Equalize', 0.8, 10)],
[('TranslateY_Only_BBoxes', 0.2, 2), ('Cutout', 0.8, 8)],
[('Sharpness', 0.0, 8), ('ShearX_BBox', 0.4, 0)],
[('ShearY_BBox', 1.0, 2), ('TranslateY_Only_BBoxes', 0.6, 6)],
[('Rotate_BBox', 0.6, 10), ('Color', 1.0, 6)],
]
return policy
def policy_v1():
"""Autoaugment policy that was used in AutoAugment Detection Paper."""
# Each tuple is an augmentation operation of the form
# (operation, probability, magnitude). Each element in policy is a
# sub-policy that will be applied sequentially on the image.
policy = [
[('TranslateX_BBox', 0.6, 4), ('Equalize', 0.8, 10)],
[('TranslateY_Only_BBoxes', 0.2, 2), ('Cutout', 0.8, 8)],
[('Sharpness', 0.0, 8), ('ShearX_BBox', 0.4, 0)],
[('ShearY_BBox', 1.0, 2), ('TranslateY_Only_BBoxes', 0.6, 6)],
[('Rotate_BBox', 0.6, 10), ('Color', 1.0, 6)],
[('Color', 0.0, 0), ('ShearX_Only_BBoxes', 0.8, 4)],
[('ShearY_Only_BBoxes', 0.8, 2), ('Flip_Only_BBoxes', 0.0, 10)],
[('Equalize', 0.6, 10), ('TranslateX_BBox', 0.2, 2)],
[('Color', 1.0, 10), ('TranslateY_Only_BBoxes', 0.4, 6)],
[('Rotate_BBox', 0.8, 10), ('Contrast', 0.0, 10)],
[('Cutout', 0.2, 2), ('Brightness', 0.8, 10)],
[('Color', 1.0, 6), ('Equalize', 1.0, 2)],
[('Cutout_Only_BBoxes', 0.4, 6), ('TranslateY_Only_BBoxes', 0.8, 2)],
[('Color', 0.2, 8), ('Rotate_BBox', 0.8, 10)],
[('Sharpness', 0.4, 4), ('TranslateY_Only_BBoxes', 0.0, 4)],
[('Sharpness', 1.0, 4), ('SolarizeAdd', 0.4, 4)],
[('Rotate_BBox', 1.0, 8), ('Sharpness', 0.2, 8)],
[('ShearY_BBox', 0.6, 10), ('Equalize_Only_BBoxes', 0.6, 8)],
[('ShearX_BBox', 0.2, 6), ('TranslateY_Only_BBoxes', 0.2, 10)],
[('SolarizeAdd', 0.6, 8), ('Brightness', 0.8, 10)],
]
return policy
def policy_vtest():
"""Autoaugment test policy for debugging."""
# Each tuple is an augmentation operation of the form
# (operation, probability, magnitude). Each element in policy is a
# sub-policy that will be applied sequentially on the image.
policy = [
[('TranslateX_BBox', 1.0, 4), ('Equalize', 1.0, 10)],
]
return policy
def policy_v2():
"""Additional policy that performs well on object detection."""
# Each tuple is an augmentation operation of the form
# (operation, probability, magnitude). Each element in policy is a
# sub-policy that will be applied sequentially on the image.
policy = [
[('Color', 0.0, 6), ('Cutout', 0.6, 8), ('Sharpness', 0.4, 8)],
[('Rotate_BBox', 0.4, 8), ('Sharpness', 0.4, 2),
('Rotate_BBox', 0.8, 10)],
[('TranslateY_BBox', 1.0, 8), ('AutoContrast', 0.8, 2)],
[('AutoContrast', 0.4, 6), ('ShearX_BBox', 0.8, 8),
('Brightness', 0.0, 10)],
[('SolarizeAdd', 0.2, 6), ('Contrast', 0.0, 10),
('AutoContrast', 0.6, 0)],
[('Cutout', 0.2, 0), ('Solarize', 0.8, 8), ('Color', 1.0, 4)],
[('TranslateY_BBox', 0.0, 4), ('Equalize', 0.6, 8),
('Solarize', 0.0, 10)],
[('TranslateY_BBox', 0.2, 2), ('ShearY_BBox', 0.8, 8),
('Rotate_BBox', 0.8, 8)],
[('Cutout', 0.8, 8), ('Brightness', 0.8, 8), ('Cutout', 0.2, 2)],
[('Color', 0.8, 4), ('TranslateY_BBox', 1.0, 6), ('Rotate_BBox', 0.6, 6)],
[('Rotate_BBox', 0.6, 10), ('BBox_Cutout', 1.0, 4), ('Cutout', 0.2, 8)],
[('Rotate_BBox', 0.0, 0), ('Equalize', 0.6, 6), ('ShearY_BBox', 0.6, 8)],
[('Brightness', 0.8, 8), ('AutoContrast', 0.4, 2),
('Brightness', 0.2, 2)],
[('TranslateY_BBox', 0.4, 8), ('Solarize', 0.4, 6),
('SolarizeAdd', 0.2, 10)],
[('Contrast', 1.0, 10), ('SolarizeAdd', 0.2, 8), ('Equalize', 0.2, 4)],
]
return policy
def policy_v3():
""""Additional policy that performs well on object detection."""
# Each tuple is an augmentation operation of the form
# (operation, probability, magnitude). Each element in policy is a
# sub-policy that will be applied sequentially on the image.
policy = [
[('Posterize', 0.8, 2), ('TranslateX_BBox', 1.0, 8)],
[('BBox_Cutout', 0.2, 10), ('Sharpness', 1.0, 8)],
[('Rotate_BBox', 0.6, 8), ('Rotate_BBox', 0.8, 10)],
[('Equalize', 0.8, 10), ('AutoContrast', 0.2, 10)],
[('SolarizeAdd', 0.2, 2), ('TranslateY_BBox', 0.2, 8)],
[('Sharpness', 0.0, 2), ('Color', 0.4, 8)],
[('Equalize', 1.0, 8), ('TranslateY_BBox', 1.0, 8)],
[('Posterize', 0.6, 2), ('Rotate_BBox', 0.0, 10)],
[('AutoContrast', 0.6, 0), ('Rotate_BBox', 1.0, 6)],
[('Equalize', 0.0, 4), ('Cutout', 0.8, 10)],
[('Brightness', 1.0, 2), ('TranslateY_BBox', 1.0, 6)],
[('Contrast', 0.0, 2), ('ShearY_BBox', 0.8, 0)],
[('AutoContrast', 0.8, 10), ('Contrast', 0.2, 10)],
[('Rotate_BBox', 1.0, 10), ('Cutout', 1.0, 10)],
[('SolarizeAdd', 0.8, 6), ('Equalize', 0.8, 8)],
]
return policy
def blend(image1, image2, factor):
"""Blend image1 and image2 using 'factor'.
Factor can be above 0.0. A value of 0.0 means only image1 is used.
A value of 1.0 means only image2 is used. A value between 0.0 and
1.0 means we linearly interpolate the pixel values between the two
images. A value greater than 1.0 "extrapolates" the difference
between the two pixel values, and we clip the results to values
between 0 and 255.
Args:
image1: An image Tensor of type uint8.
image2: An image Tensor of type uint8.
factor: A floating point value above 0.0.
Returns:
A blended image Tensor of type uint8.
"""
if factor == 0.0:
return tf.convert_to_tensor(image1)
if factor == 1.0:
return tf.convert_to_tensor(image2)
image1 = tf.to_float(image1)
image2 = tf.to_float(image2)
difference = image2 - image1
scaled = factor * difference
# Do addition in float.
temp = tf.to_float(image1) + scaled
# Interpolate
if 1.0 > factor > 0.0:
# Interpolation means we always stay within 0 and 255.
return tf.cast(temp, tf.uint8)
# Extrapolate:
#
# We need to clip and then cast.
return tf.cast(tf.clip_by_value(temp, 0.0, 255.0), tf.uint8)
def cutout(image, pad_size, replace=0):
"""Apply CutOut (https://arxiv.org/abs/1708.04552) to image.
This operation applies a (2*pad_size x 2*pad_size) mask of zeros to
a random location within `img`. The pixel values filled in will be of the
value `replace`. The located where the mask will be applied is randomly
chosen uniformly over the whole image.
Args:
image: An image Tensor of type uint8.
pad_size: Specifies how big the zero mask that will be generated is that
is applied to the image. The mask will be of size
(2*pad_size x 2*pad_size).
replace: What pixel value to fill in the image in the area that has
the cutout mask applied to it.
Returns:
An image Tensor that is of type uint8.
"""
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# Sample the center location in the image where the zero mask will be applied.
cutout_center_height = tf.random_uniform(
shape=[], minval=0, maxval=image_height,
dtype=tf.int32)
cutout_center_width = tf.random_uniform(
shape=[], minval=0, maxval=image_width,
dtype=tf.int32)
lower_pad = tf.maximum(0, cutout_center_height - pad_size)
upper_pad = tf.maximum(0, image_height - cutout_center_height - pad_size)
left_pad = tf.maximum(0, cutout_center_width - pad_size)
right_pad = tf.maximum(0, image_width - cutout_center_width - pad_size)
cutout_shape = [image_height - (lower_pad + upper_pad),
image_width - (left_pad + right_pad)]
padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]]
mask = tf.pad(
tf.zeros(cutout_shape, dtype=image.dtype),
padding_dims, constant_values=1)
mask = tf.expand_dims(mask, -1)
mask = tf.tile(mask, [1, 1, 3])
image = tf.where(
tf.equal(mask, 0),
tf.ones_like(image, dtype=image.dtype) * replace,
image)
return image
def solarize(image, threshold=128):
"""Apply Solarize augmentation."""
# For each pixel in the image, select the pixel
# if the value is less than the threshold.
# Otherwise, subtract 255 from the pixel.
return tf.where(image < threshold, image, 255 - image)
def solarize_add(image, addition=0, threshold=128):
"""Apply Solarize Add augmentation."""
# For each pixel in the image less than threshold
# we add 'addition' amount to it and then clip the
# pixel value to be between 0 and 255. The value
# of 'addition' is between -128 and 128.
added_image = tf.cast(image, tf.int64) + addition
added_image = tf.cast(tf.clip_by_value(added_image, 0, 255), tf.uint8)
return tf.where(image < threshold, added_image, image)
def color(image, factor):
"""Equivalent of PIL Color."""
degenerate = tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(image))
return blend(degenerate, image, factor)
def contrast(image, factor):
"""Equivalent of PIL Contrast."""
degenerate = tf.image.rgb_to_grayscale(image)
# Cast before calling tf.histogram.
degenerate = tf.cast(degenerate, tf.int32)
# Compute the grayscale histogram, then compute the mean pixel value,
# and create a constant image size of that value. Use that as the
# blending degenerate target of the original image.
hist = tf.histogram_fixed_width(degenerate, [0, 255], nbins=256)
mean = tf.reduce_sum(tf.cast(hist, tf.float32)) / 256.0
degenerate = tf.ones_like(degenerate, dtype=tf.float32) * mean
degenerate = tf.clip_by_value(degenerate, 0.0, 255.0)
degenerate = tf.image.grayscale_to_rgb(tf.cast(degenerate, tf.uint8))
return blend(degenerate, image, factor)
def brightness(image, factor):
"""Equivalent of PIL Brightness."""
degenerate = tf.zeros_like(image)
return blend(degenerate, image, factor)
def posterize(image, bits):
"""Equivalent of PIL Posterize."""
shift = 8 - bits
return tf.bitwise.left_shift(tf.bitwise.right_shift(image, shift), shift)
def rotate(image, degrees, replace):
"""Rotates the image by degrees either clockwise or counterclockwise.
Args:
image: An image Tensor of type uint8.
degrees: Float, a scalar angle in degrees to rotate all images by. If
degrees is positive the image will be rotated clockwise otherwise it will
be rotated counterclockwise.
replace: A one or three value 1D tensor to fill empty pixels caused by
the rotate operation.
Returns:
The rotated version of image.
"""
# Convert from degrees to radians.
degrees_to_radians = math.pi / 180.0
radians = degrees * degrees_to_radians
# In practice, we should randomize the rotation degrees by flipping
# it negatively half the time, but that's done on 'degrees' outside
# of the function.
image = image_ops.rotate(wrap(image), radians)
return unwrap(image, replace)
def random_shift_bbox(image, bbox, pixel_scaling, replace,
new_min_bbox_coords=None):
"""Move the bbox and the image content to a slightly new random location.
Args:
image: 3D uint8 Tensor.
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
The potential values for the new min corner of the bbox will be between
[old_min - pixel_scaling * bbox_height/2,
old_min - pixel_scaling * bbox_height/2].
pixel_scaling: A float between 0 and 1 that specifies the pixel range
that the new bbox location will be sampled from.
replace: A one or three value 1D tensor to fill empty pixels.
new_min_bbox_coords: If not None, then this is a tuple that specifies the
(min_y, min_x) coordinates of the new bbox. Normally this is randomly
specified, but this allows it to be manually set. The coordinates are
the absolute coordinates between 0 and image height/width and are int32.
Returns:
The new image that will have the shifted bbox location in it along with
the new bbox that contains the new coordinates.
"""
# Obtains image height and width and create helper clip functions.
image_height = tf.to_float(tf.shape(image)[0])
image_width = tf.to_float(tf.shape(image)[1])
def clip_y(val):
return tf.clip_by_value(val, 0, tf.to_int32(image_height) - 1)
def clip_x(val):
return tf.clip_by_value(val, 0, tf.to_int32(image_width) - 1)
# Convert bbox to pixel coordinates.
min_y = tf.to_int32(image_height * bbox[0])
min_x = tf.to_int32(image_width * bbox[1])
max_y = clip_y(tf.to_int32(image_height * bbox[2]))
max_x = clip_x(tf.to_int32(image_width * bbox[3]))
bbox_height, bbox_width = (max_y - min_y + 1, max_x - min_x + 1)
image_height = tf.to_int32(image_height)
image_width = tf.to_int32(image_width)
# Select the new min/max bbox ranges that are used for sampling the
# new min x/y coordinates of the shifted bbox.
minval_y = clip_y(
min_y - tf.to_int32(pixel_scaling * tf.to_float(bbox_height) / 2.0))
maxval_y = clip_y(
min_y + tf.to_int32(pixel_scaling * tf.to_float(bbox_height) / 2.0))
minval_x = clip_x(
min_x - tf.to_int32(pixel_scaling * tf.to_float(bbox_width) / 2.0))
maxval_x = clip_x(
min_x + tf.to_int32(pixel_scaling * tf.to_float(bbox_width) / 2.0))
# Sample and calculate the new unclipped min/max coordinates of the new bbox.
if new_min_bbox_coords is None:
unclipped_new_min_y = tf.random_uniform(
shape=[], minval=minval_y, maxval=maxval_y,
dtype=tf.int32)
unclipped_new_min_x = tf.random_uniform(
shape=[], minval=minval_x, maxval=maxval_x,
dtype=tf.int32)
else:
unclipped_new_min_y, unclipped_new_min_x = (
clip_y(new_min_bbox_coords[0]), clip_x(new_min_bbox_coords[1]))
unclipped_new_max_y = unclipped_new_min_y + bbox_height - 1
unclipped_new_max_x = unclipped_new_min_x + bbox_width - 1
# Determine if any of the new bbox was shifted outside the current image.
# This is used for determining if any of the original bbox content should be
# discarded.
new_min_y, new_min_x, new_max_y, new_max_x = (
clip_y(unclipped_new_min_y), clip_x(unclipped_new_min_x),
clip_y(unclipped_new_max_y), clip_x(unclipped_new_max_x))
shifted_min_y = (new_min_y - unclipped_new_min_y) + min_y
shifted_max_y = max_y - (unclipped_new_max_y - new_max_y)
shifted_min_x = (new_min_x - unclipped_new_min_x) + min_x
shifted_max_x = max_x - (unclipped_new_max_x - new_max_x)
# Create the new bbox tensor by converting pixel integer values to floats.
new_bbox = tf.stack([
tf.to_float(new_min_y) / tf.to_float(image_height),
tf.to_float(new_min_x) / tf.to_float(image_width),
tf.to_float(new_max_y) / tf.to_float(image_height),
tf.to_float(new_max_x) / tf.to_float(image_width)])
# Copy the contents in the bbox and fill the old bbox location
# with gray (128).
bbox_content = image[shifted_min_y:shifted_max_y + 1,
shifted_min_x:shifted_max_x + 1, :]
def mask_and_add_image(
min_y_, min_x_, max_y_, max_x_, mask, content_tensor, image_):
"""Applies mask to bbox region in image then adds content_tensor to it."""
mask = tf.pad(mask,
[[min_y_, (image_height - 1) - max_y_],
[min_x_, (image_width - 1) - max_x_],
[0, 0]], constant_values=1)
content_tensor = tf.pad(
content_tensor,
[[min_y_, (image_height - 1) - max_y_],
[min_x_, (image_width - 1) - max_x_],
[0, 0]], constant_values=0)
return image_ * mask + content_tensor
# Zero out original bbox location.
mask = tf.zeros_like(image)[min_y:max_y+1, min_x:max_x+1, :]
grey_tensor = tf.zeros_like(mask) + replace[0]
image = mask_and_add_image(
min_y, min_x, max_y, max_x, mask, grey_tensor, image)
# Fill in bbox content to new bbox location.
mask = tf.zeros_like(bbox_content)
image = mask_and_add_image(
new_min_y, new_min_x, new_max_y, new_max_x, mask,
bbox_content, image)
return image, new_bbox
def _clip_bbox(min_y, min_x, max_y, max_x):
"""Clip bounding box coordinates between 0 and 1.
Args:
min_y: Normalized bbox coordinate of type float between 0 and 1.
min_x: Normalized bbox coordinate of type float between 0 and 1.
max_y: Normalized bbox coordinate of type float between 0 and 1.
max_x: Normalized bbox coordinate of type float between 0 and 1.
Returns:
Clipped coordinate values between 0 and 1.
"""
min_y = tf.clip_by_value(min_y, 0.0, 1.0)
min_x = tf.clip_by_value(min_x, 0.0, 1.0)
max_y = tf.clip_by_value(max_y, 0.0, 1.0)
max_x = tf.clip_by_value(max_x, 0.0, 1.0)
return min_y, min_x, max_y, max_x
def _check_bbox_area(min_y, min_x, max_y, max_x, delta=0.05):
"""Adjusts bbox coordinates to make sure the area is > 0.
Args:
min_y: Normalized bbox coordinate of type float between 0 and 1.
min_x: Normalized bbox coordinate of type float between 0 and 1.
max_y: Normalized bbox coordinate of type float between 0 and 1.
max_x: Normalized bbox coordinate of type float between 0 and 1.
delta: Float, this is used to create a gap of size 2 * delta between
bbox min/max coordinates that are the same on the boundary.
This prevents the bbox from having an area of zero.
Returns:
Tuple of new bbox coordinates between 0 and 1 that will now have a
guaranteed area > 0.
"""
height = max_y - min_y
width = max_x - min_x
def _adjust_bbox_boundaries(min_coord, max_coord):
# Make sure max is never 0 and min is never 1.
max_coord = tf.maximum(max_coord, 0.0 + delta)
min_coord = tf.minimum(min_coord, 1.0 - delta)
return min_coord, max_coord
min_y, max_y = tf.cond(tf.equal(height, 0.0),
lambda: _adjust_bbox_boundaries(min_y, max_y),
lambda: (min_y, max_y))
min_x, max_x = tf.cond(tf.equal(width, 0.0),
lambda: _adjust_bbox_boundaries(min_x, max_x),
lambda: (min_x, max_x))
return min_y, min_x, max_y, max_x
def _scale_bbox_only_op_probability(prob):
"""Reduce the probability of the bbox-only operation.
Probability is reduced so that we do not distort the content of too many
bounding boxes that are close to each other. The value of 3.0 was a chosen
hyper parameter when designing the autoaugment algorithm that we found
empirically to work well.
Args:
prob: Float that is the probability of applying the bbox-only operation.
Returns:
Reduced probability.
"""
return prob / 3.0
def _apply_bbox_augmentation(image, bbox, augmentation_func, *args):
"""Applies augmentation_func to the subsection of image indicated by bbox.
Args:
image: 3D uint8 Tensor.
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
augmentation_func: Augmentation function that will be applied to the
subsection of image.
*args: Additional parameters that will be passed into augmentation_func
when it is called.
Returns:
A modified version of image, where the bbox location in the image will
have `ugmentation_func applied to it.
"""
image_height = tf.to_float(tf.shape(image)[0])
image_width = tf.to_float(tf.shape(image)[1])
min_y = tf.to_int32(image_height * bbox[0])
min_x = tf.to_int32(image_width * bbox[1])
max_y = tf.to_int32(image_height * bbox[2])
max_x = tf.to_int32(image_width * bbox[3])
image_height = tf.to_int32(image_height)
image_width = tf.to_int32(image_width)
# Clip to be sure the max values do not fall out of range.
max_y = tf.minimum(max_y, image_height - 1)
max_x = tf.minimum(max_x, image_width - 1)
# Get the sub-tensor that is the image within the bounding box region.
bbox_content = image[min_y:max_y + 1, min_x:max_x + 1, :]
# Apply the augmentation function to the bbox portion of the image.
augmented_bbox_content = augmentation_func(bbox_content, *args)
# Pad the augmented_bbox_content and the mask to match the shape of original
# image.
augmented_bbox_content = tf.pad(
augmented_bbox_content,
[[min_y, (image_height - 1) - max_y],
[min_x, (image_width - 1) - max_x],
[0, 0]])
# Create a mask that will be used to zero out a part of the original image.
mask_tensor = tf.zeros_like(bbox_content)
mask_tensor = tf.pad(
mask_tensor,
[[min_y, (image_height - 1) - max_y],
[min_x, (image_width - 1) - max_x],
[0, 0]],
constant_values=1)
# Replace the old bbox content with the new augmented content.
image = image * mask_tensor + augmented_bbox_content
return image
def _concat_bbox(bbox, bboxes):
"""Helper function that concats bbox to bboxes along the first dimension."""
# Note if all elements in bboxes are -1 (_INVALID_BOX), then this means
# we discard bboxes and start the bboxes Tensor with the current bbox.
bboxes_sum_check = tf.reduce_sum(bboxes)
bbox = tf.expand_dims(bbox, 0)
# This check will be true when it is an _INVALID_BOX
bboxes = tf.cond(
tf.equal(bboxes_sum_check, -4.0),
lambda: bbox,
lambda: tf.concat([bboxes, bbox], 0))
return bboxes
def _apply_bbox_augmentation_wrapper(image, bbox, new_bboxes, prob,
augmentation_func, func_changes_bbox,
*args):
"""Applies _apply_bbox_augmentation with probability prob.
Args:
image: 3D uint8 Tensor.
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
new_bboxes: 2D Tensor that is a list of the bboxes in the image after they
have been altered by aug_func. These will only be changed when
func_changes_bbox is set to true. Each bbox has 4 elements
(min_y, min_x, max_y, max_x) of type float that are the normalized
bbox coordinates between 0 and 1.
prob: Float that is the probability of applying _apply_bbox_augmentation.
augmentation_func: Augmentation function that will be applied to the
subsection of image.
func_changes_bbox: Boolean. Does augmentation_func return bbox in addition
to image.
*args: Additional parameters that will be passed into augmentation_func
when it is called.
Returns:
A tuple. Fist element is a modified version of image, where the bbox
location in the image will have augmentation_func applied to it if it is
chosen to be called with probability `prob`. The second element is a
Tensor of Tensors of length 4 that will contain the altered bbox after
applying augmentation_func.
"""
should_apply_op = tf.cast(
tf.floor(tf.random_uniform([], dtype=tf.float32) + prob), tf.bool)
if func_changes_bbox:
augmented_image, bbox = tf.cond(
should_apply_op,
lambda: augmentation_func(image, bbox, *args),
lambda: (image, bbox))
else:
augmented_image = tf.cond(
should_apply_op,
lambda: _apply_bbox_augmentation(image, bbox, augmentation_func, *args),
lambda: image)
new_bboxes = _concat_bbox(bbox, new_bboxes)
return augmented_image, new_bboxes
def _apply_multi_bbox_augmentation(image, bboxes, prob, aug_func, func_changes_bbox, *args):
"""Applies aug_func to the image for each bbox in bboxes.
Args:
image: 3D uint8 Tensor.
bboxes: 2D Tensor that is a list of the bboxes in the image. Each bbox
has 4 elements (min_y, min_x, max_y, max_x) of type float.
prob: Float that is the probability of applying aug_func to a specific
bounding box within the image.
aug_func: Augmentation function that will be applied to the
subsections of image indicated by the bbox values in bboxes.
func_changes_bbox: Boolean. Does augmentation_func return bbox in addition
to image.
*args: Additional parameters that will be passed into augmentation_func
when it is called.
Returns:
A modified version of image, where each bbox location in the image will
have augmentation_func applied to it if it is chosen to be called with
probability prob independently across all bboxes. Also the final
bboxes are returned that will be unchanged if func_changes_bbox is set to
false and if true, the new altered ones will be returned.
"""
# Will keep track of the new altered bboxes after aug_func is repeatedly
# applied. The -1 values are a dummy value and this first Tensor will be
# removed upon appending the first real bbox.
new_bboxes = tf.constant(_INVALID_BOX)
# If the bboxes are empty, then just give it _INVALID_BOX. The result
# will be thrown away.
bboxes = tf.cond(
tf.equal(tf.shape(bboxes)[0], 0),
lambda: tf.constant(_INVALID_BOX),
lambda: bboxes)
bboxes = tf.ensure_shape(bboxes, (None, 4))
def wrapped_aug_func(_image, bbox, _new_bboxes):
return _apply_bbox_augmentation_wrapper(
_image, bbox, _new_bboxes, prob, aug_func, func_changes_bbox, *args)
# Setup the while_loop.
num_bboxes = tf.shape(bboxes)[0] # We loop until we go over all bboxes.
idx = tf.constant(0) # Counter for the while loop.
# Conditional function when to end the loop once we go over all bboxes
# images_and_bboxes contain (_image, _new_bboxes)
def cond(_idx, _images_and_bboxes):
return tf.less(_idx, num_bboxes)
# Shuffle the bboxes so that the augmentation order is not deterministic if
# we are not changing the bboxes with aug_func.
if not func_changes_bbox:
loop_bboxes = tf.random.shuffle(bboxes)
else:
loop_bboxes = bboxes
# Main function of while_loop where we repeatedly apply augmentation on the
# bboxes in the image.
def body(_idx, _images_and_bboxes):
return [
_idx + 1, wrapped_aug_func(
_images_and_bboxes[0],
loop_bboxes[_idx],
_images_and_bboxes[1])]
_, (image, new_bboxes) = tf.while_loop(
cond, body, [idx, (image, new_bboxes)],
shape_invariants=[
idx.get_shape(),
(image.get_shape(), tf.TensorShape([None, 4]))])
# Either return the altered bboxes or the original ones depending on if
# we altered them in anyway.
if func_changes_bbox:
final_bboxes = new_bboxes
else:
final_bboxes = bboxes
return image, final_bboxes
def _apply_multi_bbox_augmentation_wrapper(image, bboxes, prob, aug_func,
func_changes_bbox, *args):
"""Checks to be sure num bboxes > 0 before calling inner function."""
num_bboxes = tf.shape(bboxes)[0]
image, bboxes = tf.cond(
tf.equal(num_bboxes, 0),
lambda: (image, bboxes),
# pylint:disable=g-long-lambda
lambda: _apply_multi_bbox_augmentation(
image, bboxes, prob, aug_func, func_changes_bbox, *args))
# pylint:enable=g-long-lambda
return image, bboxes
def rotate_only_bboxes(image, bboxes, prob, degrees, replace):
"""Apply rotate to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, rotate, func_changes_bbox, degrees, replace)
def shear_x_only_bboxes(image, bboxes, prob, level, replace):
"""Apply shear_x to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, shear_x, func_changes_bbox, level, replace)
def shear_y_only_bboxes(image, bboxes, prob, level, replace):
"""Apply shear_y to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, shear_y, func_changes_bbox, level, replace)
def translate_x_only_bboxes(image, bboxes, prob, pixels, replace):
"""Apply translate_x to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, translate_x, func_changes_bbox, pixels, replace)
def translate_y_only_bboxes(image, bboxes, prob, pixels, replace):
"""Apply translate_y to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, translate_y, func_changes_bbox, pixels, replace)
def flip_only_bboxes(image, bboxes, prob):
"""Apply flip_lr to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, tf.image.flip_left_right, func_changes_bbox)
def solarize_only_bboxes(image, bboxes, prob, threshold):
"""Apply solarize to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, solarize, func_changes_bbox, threshold)
def equalize_only_bboxes(image, bboxes, prob):
"""Apply equalize to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, equalize, func_changes_bbox)
def cutout_only_bboxes(image, bboxes, prob, pad_size, replace):
"""Apply cutout to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, cutout, func_changes_bbox, pad_size, replace)
def _rotate_bbox(bbox, image_height, image_width, degrees):
"""Rotates the bbox coordinated by degrees.
Args:
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
image_height: Int, height of the image.
image_width: Int, height of the image.
degrees: Float, a scalar angle in degrees to rotate all images by. If
degrees is positive the image will be rotated clockwise otherwise it will
be rotated counterclockwise.
Returns:
A tensor of the same shape as bbox, but now with the rotated coordinates.
"""
image_height, image_width = (
tf.to_float(image_height), tf.to_float(image_width))
# Convert from degrees to radians.
degrees_to_radians = math.pi / 180.0
radians = degrees * degrees_to_radians
# Translate the bbox to the center of the image and turn the normalized 0-1
# coordinates to absolute pixel locations.
# Y coordinates are made negative as the y axis of images goes down with
# increasing pixel values, so we negate to make sure x axis and y axis points
# are in the traditionally positive direction.
min_y = -tf.to_int32(image_height * (bbox[0] - 0.5))
min_x = tf.to_int32(image_width * (bbox[1] - 0.5))
max_y = -tf.to_int32(image_height * (bbox[2] - 0.5))
max_x = tf.to_int32(image_width * (bbox[3] - 0.5))
coordinates = tf.stack(
[[min_y, min_x], [min_y, max_x], [max_y, min_x], [max_y, max_x]])
coordinates = tf.cast(coordinates, tf.float32)
# Rotate the coordinates according to the rotation matrix clockwise if
# radians is positive, else negative
rotation_matrix = tf.stack(
[[tf.cos(radians), tf.sin(radians)],
[-tf.sin(radians), tf.cos(radians)]])
new_coords = tf.cast(
tf.matmul(rotation_matrix, tf.transpose(coordinates)), tf.int32)
# Find min/max values and convert them back to normalized 0-1 floats.
min_y = -(tf.to_float(tf.reduce_max(new_coords[0, :])) / image_height - 0.5)
min_x = tf.to_float(tf.reduce_min(new_coords[1, :])) / image_width + 0.5
max_y = -(tf.to_float(tf.reduce_min(new_coords[0, :])) / image_height - 0.5)
max_x = tf.to_float(tf.reduce_max(new_coords[1, :])) / image_width + 0.5
# Clip the bboxes to be sure the fall between [0, 1].
min_y, min_x, max_y, max_x = _clip_bbox(min_y, min_x, max_y, max_x)
min_y, min_x, max_y, max_x = _check_bbox_area(min_y, min_x, max_y, max_x)
return tf.stack([min_y, min_x, max_y, max_x])
def rotate_with_bboxes(image, bboxes, degrees, replace):
"""Equivalent of PIL Rotate that rotates the image and bbox.
Args:
image: 3D uint8 Tensor.
bboxes: 2D Tensor that is a list of the bboxes in the image. Each bbox
has 4 elements (min_y, min_x, max_y, max_x) of type float.
degrees: Float, a scalar angle in degrees to rotate all images by. If
degrees is positive the image will be rotated clockwise otherwise it will
be rotated counterclockwise.
replace: A one or three value 1D tensor to fill empty pixels.
Returns:
A tuple containing a 3D uint8 Tensor that will be the result of rotating
image by degrees. The second element of the tuple is bboxes, where now
the coordinates will be shifted to reflect the rotated image.
"""
# Rotate the image.
image = rotate(image, degrees, replace)
# Convert bbox coordinates to pixel values.
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
def wrapped_rotate_bbox(bbox):
return _rotate_bbox(
bbox, image_height, image_width, degrees)
bboxes = tf.map_fn(wrapped_rotate_bbox, bboxes)
return image, bboxes
def translate_x(image, pixels, replace):
"""Equivalent of PIL Translate in X dimension."""
image = image_ops.translate(wrap(image), [-pixels, 0])
return unwrap(image, replace)
def translate_y(image, pixels, replace):
"""Equivalent of PIL Translate in Y dimension."""
image = image_ops.translate(wrap(image), [0, -pixels])
return unwrap(image, replace)
def _shift_bbox(bbox, image_height, image_width, pixels, shift_horizontal):
"""Shifts the bbox coordinates by pixels.
Args:
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
image_height: Int, height of the image.
image_width: Int, width of the image.
pixels: An int. How many pixels to shift the bbox.
shift_horizontal: Boolean. If true then shift in X dimension else shift in
Y dimension.
Returns:
A tensor of the same shape as bbox, but now with the shifted coordinates.
"""
pixels = tf.to_int32(pixels)
# Convert bbox to integer pixel locations.
min_y = tf.to_int32(tf.to_float(image_height) * bbox[0])
min_x = tf.to_int32(tf.to_float(image_width) * bbox[1])
max_y = tf.to_int32(tf.to_float(image_height) * bbox[2])
max_x = tf.to_int32(tf.to_float(image_width) * bbox[3])
if shift_horizontal:
min_x = tf.maximum(0, min_x - pixels)
max_x = tf.minimum(image_width, max_x - pixels)
else:
min_y = tf.maximum(0, min_y - pixels)
max_y = tf.minimum(image_height, max_y - pixels)
# Convert bbox back to floats.
min_y = tf.to_float(min_y) / tf.to_float(image_height)
min_x = tf.to_float(min_x) / tf.to_float(image_width)
max_y = tf.to_float(max_y) / tf.to_float(image_height)
max_x = tf.to_float(max_x) / tf.to_float(image_width)
# Clip the bboxes to be sure the fall between [0, 1].
min_y, min_x, max_y, max_x = _clip_bbox(min_y, min_x, max_y, max_x)
min_y, min_x, max_y, max_x = _check_bbox_area(min_y, min_x, max_y, max_x)
return tf.stack([min_y, min_x, max_y, max_x])
def translate_bbox(image, bboxes, pixels, replace, shift_horizontal):
"""Equivalent of PIL Translate in X/Y dimension that shifts image and bbox.
Args:
image: 3D uint8 Tensor.
bboxes: 2D Tensor that is a list of the bboxes in the image. Each bbox
has 4 elements (min_y, min_x, max_y, max_x) of type float with values
between [0, 1].
pixels: An int. How many pixels to shift the image and bboxes
replace: A one or three value 1D tensor to fill empty pixels.
shift_horizontal: Boolean. If true then shift in X dimension else shift in
Y dimension.
Returns:
A tuple containing a 3D uint8 Tensor that will be the result of translating
image by pixels. The second element of the tuple is bboxes, where now
the coordinates will be shifted to reflect the shifted image.
"""
if shift_horizontal:
image = translate_x(image, pixels, replace)
else:
image = translate_y(image, pixels, replace)
# Convert bbox coordinates to pixel values.
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
def wrapped_shift_bbox(bbox):
return _shift_bbox(
bbox, image_height, image_width, pixels, shift_horizontal)
bboxes = tf.map_fn(wrapped_shift_bbox, bboxes)
return image, bboxes
def shear_x(image, level, replace):
"""Equivalent of PIL Shearing in X dimension."""
# Shear parallel to x axis is a projective transform
# with a matrix form of:
# [1 level
# 0 1].
image = image_ops.transform(
wrap(image), [1., level, 0., 0., 1., 0., 0., 0.])
return unwrap(image, replace)
def shear_y(image, level, replace):
"""Equivalent of PIL Shearing in Y dimension."""
# Shear parallel to y axis is a projective transform
# with a matrix form of:
# [1 0
# level 1].
image = image_ops.transform(
wrap(image), [1., 0., 0., level, 1., 0., 0., 0.])
return unwrap(image, replace)
def _shear_bbox(bbox, image_height, image_width, level, shear_horizontal):
"""Shifts the bbox according to how the image was sheared.
Args:
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
image_height: Int, height of the image.
image_width: Int, height of the image.
level: Float. How much to shear the image.
shear_horizontal: If true then shear in X dimension else shear in
the Y dimension.
Returns:
A tensor of the same shape as bbox, but now with the shifted coordinates.
"""
image_height, image_width = (
tf.to_float(image_height), tf.to_float(image_width))
# Change bbox coordinates to be pixels.
min_y = tf.to_int32(image_height * bbox[0])
min_x = tf.to_int32(image_width * bbox[1])
max_y = tf.to_int32(image_height * bbox[2])
max_x = tf.to_int32(image_width * bbox[3])
coordinates = tf.stack(
[[min_y, min_x], [min_y, max_x], [max_y, min_x], [max_y, max_x]])
coordinates = tf.cast(coordinates, tf.float32)
# Shear the coordinates according to the translation matrix.
if shear_horizontal:
translation_matrix = tf.stack(
[[1, 0], [-level, 1]])
else:
translation_matrix = tf.stack(
[[1, -level], [0, 1]])
translation_matrix = tf.cast(translation_matrix, tf.float32)
new_coords = tf.cast(
tf.matmul(translation_matrix, tf.transpose(coordinates)), tf.int32)
# Find min/max values and convert them back to floats.
min_y = tf.to_float(tf.reduce_min(new_coords[0, :])) / image_height
min_x = tf.to_float(tf.reduce_min(new_coords[1, :])) / image_width
max_y = tf.to_float(tf.reduce_max(new_coords[0, :])) / image_height
max_x = tf.to_float(tf.reduce_max(new_coords[1, :])) / image_width
# Clip the bboxes to be sure the fall between [0, 1].
min_y, min_x, max_y, max_x = _clip_bbox(min_y, min_x, max_y, max_x)
min_y, min_x, max_y, max_x = _check_bbox_area(min_y, min_x, max_y, max_x)
return tf.stack([min_y, min_x, max_y, max_x])
def shear_with_bboxes(image, bboxes, level, replace, shear_horizontal):
"""Applies Shear Transformation to the image and shifts the bboxes.
Args:
image: 3D uint8 Tensor.
bboxes: 2D Tensor that is a list of the bboxes in the image. Each bbox
has 4 elements (min_y, min_x, max_y, max_x) of type float with values
between [0, 1].
level: Float. How much to shear the image. This value will be between
-0.3 to 0.3.
replace: A one or three value 1D tensor to fill empty pixels.
shear_horizontal: Boolean. If true then shear in X dimension else shear in
the Y dimension.
Returns:
A tuple containing a 3D uint8 Tensor that will be the result of shearing
image by level. The second element of the tuple is bboxes, where now
the coordinates will be shifted to reflect the sheared image.
"""
if shear_horizontal:
image = shear_x(image, level, replace)
else:
image = shear_y(image, level, replace)
# Convert bbox coordinates to pixel values.
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
def wrapped_shear_bbox(bbox):
return _shear_bbox(
bbox, image_height, image_width, level, shear_horizontal)
bboxes = tf.map_fn(wrapped_shear_bbox, bboxes)
return image, bboxes
def autocontrast(image):
"""Implements Autocontrast function from PIL using TF ops.
Args:
image: A 3D uint8 tensor.
Returns:
The image after it has had autocontrast applied to it and will be of type
uint8.
"""
def scale_channel(image):
"""Scale the 2D image using the autocontrast rule."""
# A possibly cheaper version can be done using cumsum/unique_with_counts
# over the histogram values, rather than iterating over the entire image.
# to compute mins and maxes.
lo = tf.to_float(tf.reduce_min(image))
hi = tf.to_float(tf.reduce_max(image))
# Scale the image, making the lowest value 0 and the highest value 255.
def scale_values(im):
scale = 255.0 / (hi - lo)
offset = -lo * scale
im = tf.to_float(im) * scale + offset
im = tf.clip_by_value(im, 0.0, 255.0)
return tf.cast(im, tf.uint8)
result = tf.cond(hi > lo, lambda: scale_values(image), lambda: image)
return result
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image[:, :, 0])
s2 = scale_channel(image[:, :, 1])
s3 = scale_channel(image[:, :, 2])
image = tf.stack([s1, s2, s3], 2)
return image
def sharpness(image, factor):
"""Implements Sharpness function from PIL using TF ops."""
orig_image = image
image = tf.cast(image, tf.float32)
# Make image 4D for conv operation.
image = tf.expand_dims(image, 0)
# SMOOTH PIL Kernel.
kernel = tf.constant(
[[1, 1, 1], [1, 5, 1], [1, 1, 1]], dtype=tf.float32,
shape=[3, 3, 1, 1]) / 13.
# Tile across channel dimension.
kernel = tf.tile(kernel, [1, 1, 3, 1])
strides = [1, 1, 1, 1]
degenerate = tf.nn.depthwise_conv2d(
image, kernel, strides, padding='VALID', rate=[1, 1])
degenerate = tf.clip_by_value(degenerate, 0.0, 255.0)
degenerate = tf.squeeze(tf.cast(degenerate, tf.uint8), [0])
# For the borders of the resulting image, fill in the values of the
# original image.
mask = tf.ones_like(degenerate)
padded_mask = tf.pad(mask, [[1, 1], [1, 1], [0, 0]])
padded_degenerate = tf.pad(degenerate, [[1, 1], [1, 1], [0, 0]])
result = tf.where(tf.equal(padded_mask, 1), padded_degenerate, orig_image)
# Blend the final result.
return blend(result, orig_image, factor)
def equalize(image):
"""Implements Equalize function from PIL using TF ops."""
def scale_channel(im, c):
"""Scale the data in the channel to implement equalize."""
im = tf.cast(im[:, :, c], tf.int32)
# Compute the histogram of the image channel.
histo = tf.histogram_fixed_width(im, [0, 255], nbins=256)
# For the purposes of computing the step, filter out the nonzeros.
nonzero = tf.where(tf.not_equal(histo, 0))
nonzero_histo = tf.reshape(tf.gather(histo, nonzero), [-1])
step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // 255
def build_lut(histo, step):
# Compute the cumulative sum, shifting by step // 2
# and then normalization by step.
lut = (tf.cumsum(histo) + (step // 2)) // step
# Shift lut, prepending with 0.
lut = tf.concat([[0], lut[:-1]], 0)
# Clip the counts to be in range. This is done
# in the C code for image.point.
return tf.clip_by_value(lut, 0, 255)
# If step is zero, return the original image. Otherwise, build
# lut from the full histogram and step and then index from it.
result = tf.cond(
tf.equal(step, 0),
lambda: im,
lambda: tf.gather(build_lut(histo, step), im))
return tf.cast(result, tf.uint8)
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image, 0)
s2 = scale_channel(image, 1)
s3 = scale_channel(image, 2)
image = tf.stack([s1, s2, s3], 2)
return image
def wrap(image):
"""Returns 'image' with an extra channel set to all 1s."""
shape = tf.shape(image)
extended_channel = tf.ones([shape[0], shape[1], 1], image.dtype)
extended = tf.concat([image, extended_channel], 2)
return extended
def unwrap(image, replace):
"""Unwraps an image produced by wrap.
Where there is a 0 in the last channel for every spatial position,
the rest of the three channels in that spatial dimension are grayed
(set to 128). Operations like translate and shear on a wrapped
Tensor will leave 0s in empty locations. Some transformations look
at the intensity of values to do preprocessing, and we want these
empty pixels to assume the 'average' value, rather than pure black.
Args:
image: A 3D Image Tensor with 4 channels.
replace: A one or three value 1D tensor to fill empty pixels.
Returns:
image: A 3D image Tensor with 3 channels.
"""
image_shape = tf.shape(image)
# Flatten the spatial dimensions.
flattened_image = tf.reshape(image, [-1, image_shape[2]])
# Find all pixels where the last channel is zero.
alpha_channel = flattened_image[:, 3]
replace = tf.concat([replace, tf.ones([1], image.dtype)], 0)
# Where they are zero, fill them in with 'replace'.
flattened_image = tf.where(
tf.equal(alpha_channel, 0),
tf.ones_like(flattened_image, dtype=image.dtype) * replace,
flattened_image)
image = tf.reshape(flattened_image, image_shape)
image = tf.slice(image, [0, 0, 0], [image_shape[0], image_shape[1], 3])
return image
def _cutout_inside_bbox(image, bbox, pad_fraction):
"""Generates cutout mask and the mean pixel value of the bbox.
First a location is randomly chosen within the image as the center where the
cutout mask will be applied. Note this can be towards the boundaries of the
image, so the full cutout mask may not be applied.
Args:
image: 3D uint8 Tensor.
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
pad_fraction: Float that specifies how large the cutout mask should be in
in reference to the size of the original bbox. If pad_fraction is 0.25,
then the cutout mask will be of shape
(0.25 * bbox height, 0.25 * bbox width).
Returns:
A tuple. Fist element is a tensor of the same shape as image where each
element is either a 1 or 0 that is used to determine where the image
will have cutout applied. The second element is the mean of the pixels
in the image where the bbox is located.
"""
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# Transform from shape [1, 4] to [4].
bbox = tf.squeeze(bbox)
min_y = tf.to_int32(tf.to_float(image_height) * bbox[0])
min_x = tf.to_int32(tf.to_float(image_width) * bbox[1])
max_y = tf.to_int32(tf.to_float(image_height) * bbox[2])
max_x = tf.to_int32(tf.to_float(image_width) * bbox[3])
# Calculate the mean pixel values in the bounding box, which will be used
# to fill the cutout region.
mean = tf.reduce_mean(
image[min_y:max_y + 1, min_x:max_x + 1],
reduction_indices=[0, 1])
# Cutout mask will be size pad_size_heigh * 2 by pad_size_width * 2 if the
# region lies entirely within the bbox.
box_height = max_y - min_y + 1
box_width = max_x - min_x + 1
pad_size_height = tf.to_int32(pad_fraction * (box_height / 2))
pad_size_width = tf.to_int32(pad_fraction * (box_width / 2))
# Sample the center location in the image where the zero mask will be applied.
cutout_center_height = tf.random_uniform(
shape=[], minval=min_y, maxval=max_y+1,
dtype=tf.int32)
cutout_center_width = tf.random_uniform(
shape=[], minval=min_x, maxval=max_x+1,
dtype=tf.int32)
lower_pad = tf.maximum(
0, cutout_center_height - pad_size_height)
upper_pad = tf.maximum(
0, image_height - cutout_center_height - pad_size_height)
left_pad = tf.maximum(
0, cutout_center_width - pad_size_width)
right_pad = tf.maximum(
0, image_width - cutout_center_width - pad_size_width)
cutout_shape = [image_height - (lower_pad + upper_pad),
image_width - (left_pad + right_pad)]
padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]]
mask = tf.pad(
tf.zeros(cutout_shape, dtype=image.dtype),
padding_dims, constant_values=1)
mask = tf.expand_dims(mask, 2)
mask = tf.tile(mask, [1, 1, 3])
return mask, mean
def bbox_cutout(image, bboxes, pad_fraction, replace_with_mean):
"""Applies cutout to the image according to bbox information.
This is a cutout variant that using bbox information to make more informed
decisions on where to place the cutout mask.
Args:
image: 3D uint8 Tensor.
bboxes: 2D Tensor that is a list of the bboxes in the image. Each bbox
has 4 elements (min_y, min_x, max_y, max_x) of type float with values
between [0, 1].
pad_fraction: Float that specifies how large the cutout mask should be in
in reference to the size of the original bbox. If pad_fraction is 0.25,
then the cutout mask will be of shape
(0.25 * bbox height, 0.25 * bbox width).
replace_with_mean: Boolean that specified what value should be filled in
where the cutout mask is applied. Since the incoming image will be of
uint8 and will not have had any mean normalization applied, by default
we set the value to be 128. If replace_with_mean is True then we find
the mean pixel values across the channel dimension and use those to fill
in where the cutout mask is applied.
Returns:
A tuple. First element is a tensor of the same shape as image that has
cutout applied to it. Second element is the bboxes that were passed in
that will be unchanged.
"""
def apply_bbox_cutout(image, bboxes, pad_fraction):
"""Applies cutout to a single bounding box within image."""
# Choose a single bounding box to apply cutout to.
random_index = tf.random_uniform(
shape=[], maxval=tf.shape(bboxes)[0], dtype=tf.int32)
# Select the corresponding bbox and apply cutout.
chosen_bbox = tf.gather(bboxes, random_index)
mask, mean = _cutout_inside_bbox(image, chosen_bbox, pad_fraction)
# When applying cutout we either set the pixel value to 128 or to the mean
# value inside the bbox.
replace = mean if replace_with_mean else 128
# Apply the cutout mask to the image. Where the mask is 0 we fill it with
# `replace`.
image = tf.where(
tf.equal(mask, 0),
tf.cast(tf.ones_like(image, dtype=image.dtype) * replace,
dtype=image.dtype),
image)
return image
# Check to see if there are boxes, if so then apply boxcutout.
image = tf.cond(tf.equal(tf.shape(bboxes)[0], 0), lambda: image,
lambda: apply_bbox_cutout(image, bboxes, pad_fraction))
return image, bboxes
NAME_TO_FUNC = {
'AutoContrast': autocontrast,
'Equalize': equalize,
'Posterize': posterize,
'Solarize': solarize,
'SolarizeAdd': solarize_add,
'Color': color,
'Contrast': contrast,
'Brightness': brightness,
'Sharpness': sharpness,
'Cutout': cutout,
'BBox_Cutout': bbox_cutout,
'Rotate_BBox': rotate_with_bboxes,
# pylint:disable=g-long-lambda
'TranslateX_BBox': lambda image, bboxes, pixels, replace: translate_bbox(
image, bboxes, pixels, replace, shift_horizontal=True),
'TranslateY_BBox': lambda image, bboxes, pixels, replace: translate_bbox(
image, bboxes, pixels, replace, shift_horizontal=False),
'ShearX_BBox': lambda image, bboxes, level, replace: shear_with_bboxes(
image, bboxes, level, replace, shear_horizontal=True),
'ShearY_BBox': lambda image, bboxes, level, replace: shear_with_bboxes(
image, bboxes, level, replace, shear_horizontal=False),
# pylint:enable=g-long-lambda
'Rotate_Only_BBoxes': rotate_only_bboxes,
'ShearX_Only_BBoxes': shear_x_only_bboxes,
'ShearY_Only_BBoxes': shear_y_only_bboxes,
'TranslateX_Only_BBoxes': translate_x_only_bboxes,
'TranslateY_Only_BBoxes': translate_y_only_bboxes,
'Flip_Only_BBoxes': flip_only_bboxes,
'Solarize_Only_BBoxes': solarize_only_bboxes,
'Equalize_Only_BBoxes': equalize_only_bboxes,
'Cutout_Only_BBoxes': cutout_only_bboxes,
}
def _randomly_negate_tensor(tensor):
"""With 50% prob turn the tensor negative."""
should_flip = tf.cast(tf.floor(tf.random_uniform([]) + 0.5), tf.bool)
final_tensor = tf.cond(should_flip, lambda: tensor, lambda: -tensor)
return final_tensor
def _rotate_level_to_arg(level):
level = (level/_MAX_LEVEL) * 30.
level = _randomly_negate_tensor(level)
return (level,)
def _shrink_level_to_arg(level):
"""Converts level to ratio by which we shrink the image content."""
if level == 0:
return (1.0,) # if level is zero, do not shrink the image
# Maximum shrinking ratio is 2.9.
level = 2. / (_MAX_LEVEL / level) + 0.9
return (level,)
def _enhance_level_to_arg(level):
return ((level/_MAX_LEVEL) * 1.8 + 0.1,)
def _shear_level_to_arg(level):
level = (level/_MAX_LEVEL) * 0.3
# Flip level to negative with 50% chance.
level = _randomly_negate_tensor(level)
return (level,)
def _translate_level_to_arg(level, translate_const):
level = (level/_MAX_LEVEL) * float(translate_const)
# Flip level to negative with 50% chance.
level = _randomly_negate_tensor(level)
return (level,)
def _bbox_cutout_level_to_arg(level, hparams):
cutout_pad_fraction = (level/_MAX_LEVEL) * hparams.cutout_max_pad_fraction
return (cutout_pad_fraction,
hparams.cutout_bbox_replace_with_mean)
def level_to_arg(hparams):
"""Convert augmentation style to dict of func."""
return {
'AutoContrast': lambda level: (),
'Equalize': lambda level: (),
'Posterize': lambda level: (int((level/_MAX_LEVEL) * 4),),
'Solarize': lambda level: (int((level/_MAX_LEVEL) * 256),),
'SolarizeAdd': lambda level: (int((level/_MAX_LEVEL) * 110),),
'Color': _enhance_level_to_arg,
'Contrast': _enhance_level_to_arg,
'Brightness': _enhance_level_to_arg,
'Sharpness': _enhance_level_to_arg,
'Cutout': lambda level: (int((level/_MAX_LEVEL) * hparams.cutout_const),),
# pylint:disable=g-long-lambda
'BBox_Cutout': lambda level: _bbox_cutout_level_to_arg(
level, hparams),
'TranslateX_BBox': lambda level: _translate_level_to_arg(
level, hparams.translate_const),
'TranslateY_BBox': lambda level: _translate_level_to_arg(
level, hparams.translate_const),
# pylint:enable=g-long-lambda
'ShearX_BBox': _shear_level_to_arg,
'ShearY_BBox': _shear_level_to_arg,
'Rotate_BBox': _rotate_level_to_arg,
'Rotate_Only_BBoxes': _rotate_level_to_arg,
'ShearX_Only_BBoxes': _shear_level_to_arg,
'ShearY_Only_BBoxes': _shear_level_to_arg,
# pylint:disable=g-long-lambda
'TranslateX_Only_BBoxes': lambda level: _translate_level_to_arg(
level, hparams.translate_bbox_const),
'TranslateY_Only_BBoxes': lambda level: _translate_level_to_arg(
level, hparams.translate_bbox_const),
# pylint:enable=g-long-lambda
'Flip_Only_BBoxes': lambda level: (),
'Solarize_Only_BBoxes': lambda level: (int((level/_MAX_LEVEL) * 256),),
'Equalize_Only_BBoxes': lambda level: (),
# pylint:disable=g-long-lambda
'Cutout_Only_BBoxes': lambda level: (
int((level/_MAX_LEVEL) * hparams.cutout_bbox_const),),
# pylint:enable=g-long-lambda
}
def bbox_wrapper(func):
"""Adds a bboxes function argument to func and returns unchanged bboxes."""
def wrapper(images, bboxes, *args, **kwargs):
return (func(images, *args, **kwargs), bboxes)
return wrapper
def _parse_policy_info(name, prob, level, replace_value, augmentation_hparams):
"""Return the function that corresponds to `name` and update `level` param."""
func = NAME_TO_FUNC[name]
args = level_to_arg(augmentation_hparams)[name](level)
# Check to see if prob is passed into function. This is used for operations
# where we alter bboxes independently.
# pytype:disable=wrong-arg-types
if 'prob' in inspect.getfullargspec(func)[0]:
args = tuple([prob] + list(args))
# pytype:enable=wrong-arg-types
# Add in replace arg if it is required for the function that is being called.
if 'replace' in inspect.getfullargspec(func)[0]:
# Make sure replace is the final argument
assert 'replace' == inspect.getfullargspec(func)[0][-1]
args = tuple(list(args) + [replace_value])
# Add bboxes as the second positional argument for the function if it does
# not already exist.
if 'bboxes' not in inspect.getfullargspec(func)[0]:
func = bbox_wrapper(func)
return (func, prob, args)
def _apply_func_with_prob(func, image, args, prob, bboxes):
"""Apply `func` to image w/ `args` as input with probability `prob`."""
assert isinstance(args, tuple)
assert 'bboxes' == inspect.getfullargspec(func)[0][1]
# If prob is a function argument, then this randomness is being handled
# inside the function, so make sure it is always called.
if 'prob' in inspect.getfullargspec(func)[0]:
prob = 1.0
# Apply the function with probability `prob`.
should_apply_op = tf.cast(
tf.floor(tf.random_uniform([], dtype=tf.float32) + prob), tf.bool)
augmented_image, augmented_bboxes = tf.cond(
should_apply_op,
lambda: func(image, bboxes, *args),
lambda: (image, bboxes))
return augmented_image, augmented_bboxes
def select_and_apply_random_policy(policies, image, bboxes):
"""Select a random policy from `policies` and apply it to `image`."""
policy_to_select = tf.random_uniform([], maxval=len(policies), dtype=tf.int32)
# Note that using tf.case instead of tf.conds would result in significantly
# larger graphs and would even break export for some larger policies.
for (i, policy) in enumerate(policies):
image, bboxes = tf.cond(
tf.equal(i, policy_to_select),
lambda selected_policy=policy: selected_policy(image, bboxes),
lambda: (image, bboxes))
return (image, bboxes)
def select_and_apply_random_policy_augmix(policies,
image,
bboxes,
mixture_width=3,
mixture_depth=-1,
alpha=1):
"""Select a random policy from `policies` and apply it to `image`."""
policy_to_select = tf.random_uniform([], maxval=len(policies), dtype=tf.int32)
# Note that using tf.case instead of tf.conds would result in significantly
# larger graphs and would even break export for some larger policies.
ws = tfp.distributions.Dirichlet([alpha] * mixture_width).sample()
m = tfp.distributions.Beta(alpha, alpha).sample()
mix = tf.zeros_like(image, dtype=tf.float32)
for j in range(mixture_width):
aug_image = image
depth = mixture_depth if mixture_depth > 0 else np.random.randint(1, 4)
for _ in range(depth):
for (i, policy) in enumerate(policies):
aug_image, bboxes = tf.cond(
tf.equal(i, policy_to_select),
lambda policy_fn=policy, img=aug_image: policy_fn(img, bboxes),
lambda img=aug_image: (img, bboxes))
mix += ws[j] * tf.cast(aug_image, tf.float32)
mixed = tf.cast((1 - m) * tf.cast(image, tf.float32) + m * mix, tf.uint8)
return (mixed, bboxes)
def build_and_apply_nas_policy(policies, image, bboxes,
augmentation_hparams, use_augmix=False,
mixture_width=3, mixture_depth=-1, alpha=1):
"""Build a policy from the given policies passed in and apply to image.
Args:
policies: list of lists of tuples in the form `(func, prob, level)`, `func`
is a string name of the augmentation function, `prob` is the probability
of applying the `func` operation, `level` is the input argument for
`func`.
image: tf.Tensor that the resulting policy will be applied to.
bboxes: tf.Tensor of shape [N, 4] representing ground truth boxes that are
normalized between [0, 1].
augmentation_hparams: Hparams associated with the NAS learned policy.
use_augmix: whether use augmix[https://arxiv.org/pdf/1912.02781.pdf]
mixture_width: Width of augmentation chain
mixture_depth: Depth of augmentation chain. -1 enables stochastic depth
uniformly from [1, 3].
alpha: Probability coefficient for Beta and Dirichlet distributions.
Returns:
A version of image that now has data augmentation applied to it based on
the `policies` pass into the function. Additionally, returns bboxes if
a value for them is passed in that is not None
"""
replace_value = [128, 128, 128]
# func is the string name of the augmentation function, prob is the
# probability of applying the operation and level is the parameter associated
# with the tf op.
# tf_policies are functions that take in an image and return an augmented
# image.
tf_policies = []
for policy in policies:
tf_policy = []
# Link string name to the correct python function and make sure the correct
# argument is passed into that function.
for policy_info in policy:
policy_info = list(policy_info) + [replace_value, augmentation_hparams]
tf_policy.append(_parse_policy_info(*policy_info))
# Now build the tf policy that will apply the augmentation procedue
# on image.
def make_final_policy(tf_policy_):
def final_policy(image_, bboxes_):
for func, prob, args in tf_policy_:
image_, bboxes_ = _apply_func_with_prob(
func, image_, args, prob, bboxes_)
return image_, bboxes_
return final_policy
tf_policies.append(make_final_policy(tf_policy))
if use_augmix:
augmented_images, augmented_bboxes = select_and_apply_random_policy_augmix(
tf_policies, image, bboxes, mixture_width, mixture_depth, alpha)
else:
augmented_images, augmented_bboxes = select_and_apply_random_policy(
tf_policies, image, bboxes)
# If no bounding boxes were specified, then just return the images.
return (augmented_images, augmented_bboxes)
def distort_image_with_autoaugment(image,
bboxes,
augmentation_name,
use_augmix=False,
mixture_width=3,
mixture_depth=-1,
alpha=1):
"""Applies the AutoAugment policy to `image` and `bboxes`.
Args:
image: `Tensor` of shape [height, width, 3] representing an image.
bboxes: `Tensor` of shape [N, 4] representing ground truth boxes that are
normalized between [0, 1].
augmentation_name: The name of the AutoAugment policy to use. The available
options are `v0`, `v1`, `v2`, `v3` and `test`. `v0` is the policy used for
all of the results in the paper and was found to achieve the best results
on the COCO dataset. `v1`, `v2` and `v3` are additional good policies
found on the COCO dataset that have slight variation in what operations
were used during the search procedure along with how many operations are
applied in parallel to a single image (2 vs 3).
use_augmix: whether use augmix[https://arxiv.org/pdf/1912.02781.pdf]
mixture_width: Width of augmentation chain
mixture_depth: Depth of augmentation chain. -1 enables stochastic depth
uniformly from [1, 3].
alpha: Probability coefficient for Beta and Dirichlet distributions.
Returns:
A tuple containing the augmented versions of `image` and `bboxes`.
"""
logger.info('Using autoaugmention policy: %s', augmentation_name)
available_policies = {
'v0': policy_v0, 'v1': policy_v1, 'v2': policy_v2,
'v3': policy_v3, 'test': policy_vtest}
if augmentation_name not in available_policies:
raise ValueError('Invalid augmentation_name: {}'.format(augmentation_name))
policy = available_policies[augmentation_name]()
# Hparams that will be used for AutoAugment.
augmentation_hparams = hparams_config.Config(dict(
cutout_max_pad_fraction=0.75,
cutout_bbox_replace_with_mean=False,
cutout_const=100,
translate_const=250,
cutout_bbox_const=50,
translate_bbox_const=120))
with tf.device('/cpu:0'):
return build_and_apply_nas_policy(
policy, image, bboxes,
augmentation_hparams, use_augmix,
mixture_width, mixture_depth, alpha)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/aug/autoaugment.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tool to inspect a model."""
from __future__ import absolute_import
from __future__ import division
# gtype import
from __future__ import print_function
import os
import time
from typing import List, Text, Tuple
from absl import flags
from absl import logging
import numpy as np
from PIL import Image
import tensorflow.compat.v1 as tf
from tensorflow.python.client import timeline # pylint: disable=g-direct-tensorflow-import
from nvidia_tao_tf1.cv.efficientdet.inferencer import inference
from nvidia_tao_tf1.cv.efficientdet.utils import hparams_config
from nvidia_tao_tf1.cv.efficientdet.utils import utils
# inspect on 1 GPU
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
flags.DEFINE_string('model_name', 'efficientdet-d0', 'Model.')
flags.DEFINE_string('logdir', '/tmp/deff/', 'log directory.')
flags.DEFINE_string('runmode', 'dry', 'Run mode: {freeze, bm, dry}')
flags.DEFINE_string('trace_filename', None, 'Trace file name.')
flags.DEFINE_integer('threads', 0, 'Number of threads.')
flags.DEFINE_integer('bm_runs', 10, 'Number of benchmark runs.')
flags.DEFINE_string('tensorrt', None, 'TensorRT mode: {None, FP32, FP16, INT8}')
flags.DEFINE_bool('delete_logdir', True, 'Whether to delete logdir.')
flags.DEFINE_bool('freeze', False, 'Freeze graph.')
flags.DEFINE_bool('xla', False, 'Run with xla optimization.')
flags.DEFINE_integer('batch_size', 1, 'Batch size for inference.')
flags.DEFINE_string('ckpt_path', None, 'checkpoint dir used for eval.')
flags.DEFINE_string('export_ckpt', None, 'Path for exporting new models.')
flags.DEFINE_string(
'hparams', '', 'Comma separated k=v pairs of hyperparameters or a module'
' containing attributes to use as hyperparameters.')
flags.DEFINE_string('input_image', None, 'Input image path for inference.')
flags.DEFINE_string('output_image_dir', None, 'Output dir for inference.')
# For video.
flags.DEFINE_string('input_video', None, 'Input video path for inference.')
flags.DEFINE_string('output_video', None, 'Output video path. If None, play it online instead.')
# For visualization.
flags.DEFINE_integer('line_thickness', None, 'Line thickness for box.')
flags.DEFINE_integer('max_boxes_to_draw', None, 'Max number of boxes to draw.')
flags.DEFINE_float('min_score_thresh', None, 'Score threshold to show box.')
# For saved model.
flags.DEFINE_string('saved_model_dir', '/tmp/saved_model',
'Folder path for saved model.')
flags.DEFINE_string('tflite_path', None, 'Path for exporting tflite file.')
FLAGS = flags.FLAGS
class ModelInspector(object):
"""A simple helper class for inspecting a model."""
def __init__(self,
model_name: Text,
logdir: Text,
tensorrt: Text = False,
use_xla: bool = False,
ckpt_path: Text = None,
export_ckpt: Text = None,
saved_model_dir: Text = None,
tflite_path: Text = None,
batch_size: int = 1,
hparams: Text = ''):
"""Init."""
self.model_name = model_name
self.logdir = logdir
self.tensorrt = tensorrt
self.use_xla = use_xla
self.ckpt_path = ckpt_path
self.export_ckpt = export_ckpt
self.saved_model_dir = saved_model_dir
self.tflite_path = tflite_path
model_config = hparams_config.get_detection_config(model_name)
model_config.override(hparams) # Add custom overrides
model_config.is_training_bn = False
model_config.image_size = utils.parse_image_size(model_config.image_size)
# If batch size is 0, then build a graph with dynamic batch size.
self.batch_size = batch_size or None
self.labels_shape = [batch_size, model_config.num_classes]
height, width = model_config.image_size
if model_config.data_format == 'channels_first':
self.inputs_shape = [batch_size, 3, height, width]
else:
self.inputs_shape = [batch_size, height, width, 3]
self.model_config = model_config
def build_model(self, inputs: tf.Tensor) -> List[tf.Tensor]:
"""Build model with inputs and labels and print out model stats."""
logging.info('start building model')
cls_outputs, box_outputs = inference.build_model(
self.model_name,
inputs,
**self.model_config)
# Write to tfevent for tensorboard.
train_writer = tf.summary.FileWriter(self.logdir)
train_writer.add_graph(tf.get_default_graph())
train_writer.flush()
all_outputs = list(cls_outputs.values()) + list(box_outputs.values())
return all_outputs
def export_saved_model(self, **kwargs):
"""Export a saved model for inference."""
tf.enable_resource_variables()
driver = inference.ServingDriver(
self.model_name,
self.ckpt_path,
batch_size=self.batch_size,
use_xla=self.use_xla,
model_params=self.model_config.as_dict(),
**kwargs)
driver.build()
driver.export(self.saved_model_dir, self.tflite_path, self.tensorrt)
def saved_model_inference(self, image_path_pattern, output_dir, **kwargs):
"""Perform inference for the given saved model."""
driver = inference.ServingDriver(
self.model_name,
self.ckpt_path,
batch_size=self.batch_size,
use_xla=self.use_xla,
model_params=self.model_config.as_dict(),
**kwargs)
driver.load(self.saved_model_dir)
# Serving time batch size should be fixed.
batch_size = self.batch_size or 1
all_files = list(tf.io.gfile.glob(image_path_pattern))
print('all_files=', all_files)
num_batches = (len(all_files) + batch_size - 1) // batch_size
for i in range(num_batches):
batch_files = all_files[i * batch_size:(i + 1) * batch_size]
height, width = self.model_config.image_size
images = [Image.open(f) for f in batch_files]
if len({m.size for m in images}) > 1:
# Resize only if images in the same batch have different sizes.
images = [m.resize(height, width) for m in images]
raw_images = [np.array(m) for m in images]
size_before_pad = len(raw_images)
if size_before_pad < batch_size:
padding_size = batch_size - size_before_pad
raw_images += [np.zeros_like(raw_images[0])] * padding_size
detections_bs = driver.serve_images(raw_images)
for j in range(size_before_pad):
img = driver.visualize(raw_images[j], detections_bs[j], **kwargs)
img_id = str(i * batch_size + j)
output_image_path = os.path.join(output_dir, img_id + '.jpg')
Image.fromarray(img).save(output_image_path)
logging.info('writing file to %s', output_image_path)
def saved_model_benchmark(self,
image_path_pattern,
trace_filename=None,
**kwargs):
"""Perform inference for the given saved model."""
driver = inference.ServingDriver(
self.model_name,
self.ckpt_path,
batch_size=self.batch_size,
use_xla=self.use_xla,
model_params=self.model_config.as_dict(),
**kwargs)
driver.load(self.saved_model_dir)
raw_images = []
all_files = list(tf.io.gfile.glob(image_path_pattern))
if len(all_files) < self.batch_size:
all_files = all_files * (self.batch_size // len(all_files) + 1)
raw_images = [np.array(Image.open(f)) for f in all_files[:self.batch_size]]
driver.benchmark(raw_images, trace_filename)
def saved_model_video(self, video_path: Text, output_video: Text, **kwargs):
"""Perform video inference for the given saved model."""
import cv2 # pylint: disable=g-import-not-at-top
driver = inference.ServingDriver(
self.model_name,
self.ckpt_path,
batch_size=1,
use_xla=self.use_xla,
model_params=self.model_config.as_dict())
driver.load(self.saved_model_dir)
cap = cv2.VideoCapture(video_path)
if not cap.isOpened():
print('Error opening input video: {}'.format(video_path))
out_ptr = None
if output_video:
frame_width, frame_height = int(cap.get(3)), int(cap.get(4))
out_ptr = cv2.VideoWriter(
output_video,
cv2.VideoWriter_fourcc('m', 'p', '4', 'v'), 25,
(frame_width, frame_height))
while cap.isOpened():
# Capture frame-by-frame
ret, frame = cap.read()
if not ret:
break
raw_frames = [np.array(frame)]
detections_bs = driver.serve_images(raw_frames)
new_frame = driver.visualize(raw_frames[0], detections_bs[0], **kwargs)
if out_ptr:
# write frame into output file.
out_ptr.write(new_frame)
else:
# show the frame online, mainly used for real-time speed test.
cv2.imshow('Frame', new_frame)
# Press Q on keyboard to exit
if cv2.waitKey(1) & 0xFF == ord('q'):
break
def inference_single_image(self, image_image_path, output_dir, **kwargs):
"""Run inference with one image."""
driver = inference.InferenceDriver(
self.model_name,
self.ckpt_path,
self.model_config.as_dict())
driver.inference(image_image_path, output_dir, **kwargs)
def build_and_save_model(self):
"""Build and save the model into self.logdir."""
with tf.Graph().as_default(), tf.Session() as sess:
# Build model with inputs and labels.
inputs = tf.placeholder(tf.float32, name='input', shape=self.inputs_shape)
outputs = self.build_model(inputs)
# Run the model
inputs_val = np.random.rand(*self.inputs_shape).astype(float)
labels_val = np.zeros(self.labels_shape).astype(np.int64)
labels_val[:, 0] = 1
if self.ckpt_path:
# Load the true weights if available.
inference.restore_ckpt(
sess, self.ckpt_path,
self.model_config.moving_average_decay,
self.export_ckpt)
else:
sess.run(tf.global_variables_initializer())
# Run a single train step.
sess.run(outputs, feed_dict={inputs: inputs_val})
all_saver = tf.train.Saver(save_relative_paths=True)
all_saver.save(sess, os.path.join(self.logdir, self.model_name))
tf_graph = os.path.join(self.logdir, self.model_name + '_train.pb')
with tf.io.gfile.GFile(tf_graph, 'wb') as f:
f.write(sess.graph_def.SerializeToString())
def eval_ckpt(self):
"""build and save the model into self.logdir."""
with tf.Graph().as_default(), tf.Session() as sess:
# Build model with inputs and labels.
inputs = tf.placeholder(tf.float32, name='input', shape=self.inputs_shape)
self.build_model(inputs)
inference.restore_ckpt(
sess, self.ckpt_path,
self.model_config.moving_average_decay,
self.export_ckpt)
def freeze_model(self) -> Tuple[Text, Text]:
"""Freeze model and convert them into tflite and tf graph."""
with tf.Graph().as_default(), tf.Session() as sess:
inputs = tf.placeholder(tf.float32, name='input', shape=self.inputs_shape)
outputs = self.build_model(inputs)
if self.ckpt_path:
# Load the true weights if available.
inference.restore_ckpt(
sess, self.ckpt_path,
self.model_config.moving_average_decay,
self.export_ckpt)
else:
# Load random weights if not checkpoint is not available.
self.build_and_save_model()
checkpoint = tf.train.latest_checkpoint(self.logdir)
logging.info('Loading checkpoint: %s', checkpoint)
saver = tf.train.Saver()
saver.restore(sess, checkpoint)
output_node_names = [node.op.name for node in outputs]
graphdef = tf.graph_util.convert_variables_to_constants(
sess, sess.graph_def, output_node_names)
tf_graph = os.path.join(self.logdir, self.model_name + '_frozen.pb')
tf.io.gfile.GFile(tf_graph, 'wb').write(graphdef.SerializeToString())
return graphdef
def benchmark_model(self,
warmup_runs,
bm_runs,
num_threads,
trace_filename=None):
"""Benchmark model."""
if self.tensorrt:
print('Using tensorrt ', self.tensorrt)
graphdef = self.freeze_model()
if num_threads > 0:
print('num_threads for benchmarking: {}'.format(num_threads))
sess_config = tf.ConfigProto(
intra_op_parallelism_threads=num_threads,
inter_op_parallelism_threads=1)
else:
sess_config = tf.ConfigProto()
# rewriter_config_pb2.RewriterConfig.OFF
sess_config.graph_options.rewrite_options.dependency_optimization = 2
if self.use_xla:
sess_config.graph_options.optimizer_options.global_jit_level = (
tf.OptimizerOptions.ON_2)
# enable amp
sess_config.graph_options.rewrite_options.auto_mixed_precision = True
with tf.Graph().as_default(), tf.Session(config=sess_config) as sess:
inputs = tf.placeholder(tf.float32, name='input', shape=self.inputs_shape)
output = self.build_model(inputs)
img = np.random.uniform(size=self.inputs_shape)
sess.run(tf.global_variables_initializer())
if self.tensorrt:
fetches = [inputs.name] + [i.name for i in output]
goutput = self.convert_tr(graphdef, fetches)
inputs, output = goutput[0], goutput[1:]
if not self.use_xla:
# Don't use tf.group because XLA removes the whole graph for tf.group.
output = tf.group(*output)
else:
output = tf.add_n([tf.reduce_sum(x) for x in output])
output_name = [output.name]
input_name = inputs.name
graphdef = tf.graph_util.convert_variables_to_constants(
sess, sess.graph_def, output_name)
with tf.Graph().as_default(), tf.Session(config=sess_config) as sess:
tf.import_graph_def(graphdef, name='')
for i in range(warmup_runs):
start_time = time.time()
sess.run(output_name, feed_dict={input_name: img})
logging.info('Warm up: {} {:.4f}s'.format(i, time.time() - start_time))
print('Start benchmark runs total={}'.format(bm_runs))
start = time.perf_counter()
for i in range(bm_runs):
sess.run(output_name, feed_dict={input_name: img})
end = time.perf_counter()
inference_time = (end - start) / bm_runs
print('Per batch inference time: ', inference_time)
print('FPS: ', self.batch_size / inference_time)
if trace_filename:
run_options = tf.RunOptions()
run_options.trace_level = tf.RunOptions.FULL_TRACE
run_metadata = tf.RunMetadata()
sess.run(
output_name,
feed_dict={input_name: img},
options=run_options,
run_metadata=run_metadata)
logging.info('Dumping trace to %s', trace_filename)
trace_dir = os.path.dirname(trace_filename)
if not tf.io.gfile.exists(trace_dir):
tf.io.gfile.makedirs(trace_dir)
with tf.io.gfile.GFile(trace_filename, 'w') as trace_file:
trace = timeline.Timeline(step_stats=run_metadata.step_stats)
trace_file.write(trace.generate_chrome_trace_format(show_memory=True))
def convert_tr(self, graph_def, fetches):
"""Convert to TensorRT."""
# pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
from tensorflow.python.compiler.tensorrt import trt
converter = trt.TrtGraphConverter(
nodes_blacklist=[t.split(':')[0] for t in fetches],
input_graph_def=graph_def,
precision_mode=self.tensorrt)
infer_graph = converter.convert()
goutput = tf.import_graph_def(infer_graph, return_elements=fetches)
return goutput
def run_model(self, runmode, **kwargs):
"""Run the model on devices."""
if runmode == 'dry':
self.build_and_save_model()
elif runmode == 'freeze':
self.freeze_model()
elif runmode == 'ckpt':
self.eval_ckpt()
elif runmode == 'saved_model_benchmark':
self.saved_model_benchmark(
kwargs['input_image'],
trace_filename=kwargs.get('trace_filename', None))
elif runmode in ('infer', 'saved_model', 'saved_model_infer', 'saved_model_video'):
config_dict = {}
if kwargs.get('line_thickness', None):
config_dict['line_thickness'] = kwargs.get('line_thickness')
if kwargs.get('max_boxes_to_draw', None):
config_dict['max_boxes_to_draw'] = kwargs.get('max_boxes_to_draw')
if kwargs.get('min_score_thresh', None):
config_dict['min_score_thresh'] = kwargs.get('min_score_thresh')
if runmode == 'saved_model':
self.export_saved_model(**config_dict)
elif runmode == 'infer':
self.inference_single_image(
kwargs['input_image'],
kwargs['output_image_dir'], **config_dict)
elif runmode == 'saved_model_infer':
self.saved_model_inference(
kwargs['input_image'],
kwargs['output_image_dir'], **config_dict)
elif runmode == 'saved_model_video':
self.saved_model_video(
kwargs['input_video'],
kwargs['output_video'],
**config_dict)
elif runmode == 'bm':
self.benchmark_model(
warmup_runs=5,
bm_runs=kwargs.get('bm_runs', 10),
num_threads=kwargs.get('threads', 0),
trace_filename=kwargs.get('trace_filename', None))
else:
raise ValueError('Unkown runmode {}'.format(runmode))
def main(_):
"""Main."""
if tf.io.gfile.exists(FLAGS.logdir) and FLAGS.delete_logdir:
logging.info('Deleting log dir ...')
tf.io.gfile.rmtree(FLAGS.logdir)
inspector = ModelInspector(
model_name=FLAGS.model_name,
logdir=FLAGS.logdir,
tensorrt=FLAGS.tensorrt,
use_xla=FLAGS.xla,
ckpt_path=FLAGS.ckpt_path,
export_ckpt=FLAGS.export_ckpt,
saved_model_dir=FLAGS.saved_model_dir,
tflite_path=FLAGS.tflite_path,
batch_size=FLAGS.batch_size,
hparams=FLAGS.hparams)
inspector.run_model(
FLAGS.runmode,
input_image=FLAGS.input_image,
output_image_dir=FLAGS.output_image_dir,
input_video=FLAGS.input_video,
output_video=FLAGS.output_video,
line_thickness=FLAGS.line_thickness,
max_boxes_to_draw=FLAGS.max_boxes_to_draw,
min_score_thresh=FLAGS.min_score_thresh,
bm_runs=FLAGS.bm_runs,
threads=FLAGS.threads,
trace_filename=FLAGS.trace_filename)
if __name__ == '__main__':
logging.set_verbosity(logging.WARNING)
tf.disable_eager_execution()
tf.app.run()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/utils/model_inspect.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""IoU utils for box regression with iou losses.
Distance-IoU Loss: Faster and Better Learning for Bounding Box Regression.
https://arxiv.org/pdf/1911.08287.pdf
"""
from __future__ import absolute_import
from __future__ import division
# gtype import
from __future__ import print_function
import math
from typing import Text, Union
import numpy as np
import tensorflow.compat.v1 as tf
FloatType = Union[tf.Tensor, float, np.float32, np.float64]
def _get_v(b1_height: FloatType, b1_width: FloatType, b2_height: FloatType,
b2_width: FloatType) -> tf.Tensor:
"""Get the consistency measurement of aspect ratio for ciou."""
@tf.custom_gradient
def _get_grad_v(height, width):
"""backpropogate gradient."""
arctan = tf.atan(tf.math.divide_no_nan(b1_width, b1_height)) - tf.atan(
tf.math.divide_no_nan(width, height))
v = 4 * ((arctan / math.pi)**2)
def _grad_v(dv, variables=None):
gdw = dv * 8 * arctan * height / (math.pi**2)
gdh = -dv * 8 * arctan * width / (math.pi**2)
return [gdh, gdw], tf.gradients(v, variables, grad_ys=dv)
return v, _grad_v
return _get_grad_v(b2_height, b2_width)
def _iou_per_anchor(pred_boxes: FloatType,
target_boxes: FloatType,
iou_type: Text = 'iou') -> tf.Tensor:
"""Computing the IoU for a single anchor.
Args:
pred_boxes: predicted boxes, with coordinate [y_min, x_min, y_max, x_max].
target_boxes: target boxes, with coordinate [y_min, x_min, y_max, x_max].
iou_type: one of ['iou', 'ciou', 'diou', 'giou'].
Returns:
IoU loss float `Tensor`.
"""
# t_ denotes target boxes and p_ denotes predicted boxes.
t_ymin, t_xmin, t_ymax, t_xmax = target_boxes
p_ymin, p_xmin, p_ymax, p_xmax = pred_boxes
zero = tf.convert_to_tensor(0.0, t_ymin.dtype)
p_width = tf.maximum(zero, p_xmax - p_xmin)
p_height = tf.maximum(zero, p_ymax - p_ymin)
t_width = tf.maximum(zero, t_xmax - t_xmin)
t_height = tf.maximum(zero, t_ymax - t_ymin)
p_area = p_width * p_height
t_area = t_width * t_height
intersect_ymin = tf.maximum(p_ymin, t_ymin)
intersect_xmin = tf.maximum(p_xmin, t_xmin)
intersect_ymax = tf.minimum(p_ymax, t_ymax)
intersect_xmax = tf.minimum(p_xmax, t_xmax)
intersect_width = tf.maximum(zero, intersect_xmax - intersect_xmin)
intersect_height = tf.maximum(zero, intersect_ymax - intersect_ymin)
intersect_area = intersect_width * intersect_height
union_area = p_area + t_area - intersect_area
iou_v = tf.math.divide_no_nan(intersect_area, union_area)
if iou_type == 'iou':
return iou_v # iou is the simplest form.
enclose_ymin = tf.minimum(p_ymin, t_ymin)
enclose_xmin = tf.minimum(p_xmin, t_xmin)
enclose_ymax = tf.maximum(p_ymax, t_ymax)
enclose_xmax = tf.maximum(p_xmax, t_xmax)
assert iou_type in ('giou', 'diou', 'ciou')
if iou_type == 'giou': # giou is the generalized iou.
enclose_width = tf.maximum(zero, enclose_xmax - enclose_xmin)
enclose_height = tf.maximum(zero, enclose_ymax - enclose_ymin)
enclose_area = enclose_width * enclose_height
giou_v = iou_v - tf.math.divide_no_nan(
(enclose_area - union_area), enclose_area)
return giou_v
assert iou_type in ('diou', 'ciou')
p_center = tf.stack([(p_ymin + p_ymax) / 2, (p_xmin + p_xmax) / 2])
t_center = tf.stack([(t_ymin + t_ymax) / 2, (t_xmin + t_xmax) / 2])
euclidean = tf.linalg.norm(t_center - p_center)
diag_length = tf.linalg.norm(
[enclose_ymax - enclose_ymin, enclose_xmax - enclose_xmin])
diou_v = iou_v - tf.math.divide_no_nan(euclidean**2, diag_length**2)
if iou_type == 'diou': # diou is the distance iou.
return diou_v
assert iou_type == 'ciou'
v = _get_v(p_height, p_width, t_height, t_width)
alpha = tf.math.divide_no_nan(v, ((1 - iou_v) + v))
return diou_v - alpha * v # the last one is ciou.
def iou_loss(pred_boxes: FloatType,
target_boxes: FloatType,
iou_type: Text = 'iou') -> tf.Tensor:
"""A unified interface for computing various IoU losses.
Let B and B_gt denotes the pred_box and B_gt is the target box (ground truth):
IoU = |B & B_gt| / |B | B_gt|
GIoU = IoU - |C - B U B_gt| / C, where C is the smallest box covering B and
B_gt.
DIoU = IoU - E(B, B_gt)^2 / c^2, E is the Euclidean distance of the center
points of B and B_gt, and c is the diagonal length of the smallest box
covering the two boxes
CIoU = IoU - DIoU - a * v, where a is a positive trade-off parameter, and
v measures the consistency of aspect ratio:
v = (arctan(w_gt / h_gt) - arctan(w / h)) * 4 / pi^2
where (w_gt, h_gt) and (w, h) are the width and height of the target and
predicted box respectively.
The returned loss is computed as 1 - one of {IoU, GIoU, DIoU, CIoU}.
Args:
pred_boxes: predicted boxes, with coordinate [y_min, x_min, y_max, x_max]*.
It can be multiple anchors, with each anchor box has four coordinates.
target_boxes: target boxes, with coordinate [y_min, x_min, y_max, x_max]*.
It can be multiple anchors, with each anchor box has four coordinates.
iou_type: one of ['iou', 'ciou', 'diou', 'giou'].
Returns:
IoU loss float `Tensor`.
"""
if iou_type not in ('iou', 'ciou', 'diou', 'giou'):
raise ValueError(
'Unknown loss_type {}, not iou/ciou/diou/giou'.format(iou_type))
pred_boxes = tf.convert_to_tensor(pred_boxes, tf.float32)
target_boxes = tf.cast(target_boxes, pred_boxes.dtype)
# t_ denotes target boxes and p_ denotes predicted boxes: (y, x, y_max, x_max)
pred_boxes_list = tf.unstack(pred_boxes, None, axis=-1)
target_boxes_list = tf.unstack(target_boxes, None, axis=-1)
assert len(pred_boxes_list) == len(target_boxes_list)
assert len(pred_boxes_list) % 4 == 0
iou_loss_list = []
for i in range(0, len(pred_boxes_list), 4):
pred_boxes = pred_boxes_list[i: i + 4]
target_boxes = target_boxes_list[i: i + 4]
# Compute mask.
t_ymin, t_xmin, t_ymax, t_xmax = target_boxes
mask = tf.not_equal((t_ymax - t_ymin) * (t_xmax - t_xmin), 0)
mask = tf.cast(mask, t_ymin.dtype)
# Loss should be mask * (1 - iou) = mask - masked_iou.
pred_boxes = [b * mask for b in pred_boxes]
iou_loss_list.append(
mask - tf.squeeze(_iou_per_anchor(pred_boxes, target_boxes, iou_type)))
if len(iou_loss_list) == 1:
return iou_loss_list[0]
return tf.reduce_sum(tf.stack(iou_loss_list), 0)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/utils/iou_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IVA EfficientDet utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common utils."""
from __future__ import absolute_import
from __future__ import division
# gtype import
from __future__ import print_function
import contextlib
import logging
import os
import re
from typing import Text, Tuple, Union
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow.compat.v2 as tf2
# pylint: disable=logger-format-interpolation
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level='INFO')
logger = logging.getLogger(__name__)
def swish(x):
"""Swish activation function.
# Arguments
x: Input tensor.
# Returns
The Swish activation: `x * sigmoid(x)`.
# References
[Searching for Activation Functions](https://arxiv.org/abs/1710.05941)
"""
return x * tf.keras.backend.sigmoid(x)
def activation_fn(features: tf.Tensor, act_type: Text):
"""Customized non-linear activation type."""
if act_type == 'swish':
return tf.keras.layers.Activation(swish)(features)
if act_type == 'swish_native':
return tf.keras.layers.Activation(swish)(features)
if act_type == 'relu':
return tf.keras.layers.ReLU()(features)
if act_type == 'relu6':
return tf.keras.layers.ReLU(6.)(features) # tf.nn.relu6(features)
raise ValueError('Unsupported act_type {}'.format(act_type))
def get_ema_vars():
"""Get all exponential moving average (ema) variables."""
ema_vars = tf.trainable_variables() + tf.get_collection('moving_vars')
for v in tf.global_variables():
# We maintain mva for batch norm moving mean and variance as well.
if 'moving_mean' in v.name or 'moving_variance' in v.name:
ema_vars.append(v)
return list(set(ema_vars))
def get_ckpt_var_map(ckpt_path, ckpt_scope, var_scope, var_exclude_expr=None):
"""Get a var map for restoring from pretrained checkpoints.
Args:
ckpt_path: string. A pretrained checkpoint path.
ckpt_scope: string. Scope name for checkpoint variables.
var_scope: string. Scope name for model variables.
var_exclude_expr: string. A regex for excluding variables.
This is useful for finetuning with different classes, where
var_exclude_expr='.*class-predict.*' can be used.
Returns:
var_map: a dictionary from checkpoint name to model variables.
"""
# logger.info('Init model from checkpoint {}'.format(ckpt_path))
if not ckpt_scope.endswith('/') or not var_scope.endswith('/'):
raise ValueError('Please specific scope name ending with /')
if ckpt_scope.startswith('/'):
ckpt_scope = ckpt_scope[1:]
if var_scope.startswith('/'):
var_scope = var_scope[1:]
var_map = {}
# Get the list of vars to restore.
model_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=var_scope)
reader = tf.train.load_checkpoint(ckpt_path)
ckpt_var_names = set(reader.get_variable_to_shape_map().keys())
exclude_matcher = re.compile(var_exclude_expr) if var_exclude_expr else None
for v in model_vars:
if exclude_matcher and exclude_matcher.match(v.op.name):
continue
if not v.op.name.startswith(var_scope):
logger.info('skip {} -- does not match scope {}'.format(
v.op.name, var_scope))
ckpt_var = ckpt_scope + v.op.name[len(var_scope):]
if ckpt_var not in ckpt_var_names:
if v.op.name.endswith('/ExponentialMovingAverage'):
ckpt_var = ckpt_scope + v.op.name[:-len('/ExponentialMovingAverage')]
if ckpt_var not in ckpt_var_names:
if 'Momentum' not in ckpt_var and 'RMSProp' not in ckpt_var:
# Only show vars not from optimizer to avoid false alarm.
logger.info('skip {} ({}) -- not in ckpt'.format(
v.op.name, ckpt_var))
continue
# logger.info('Init {} from ckpt var {}'.format(v.op.name, ckpt_var))
var_map[ckpt_var] = v
return var_map
def get_ckpt_var_map_ema(ckpt_path, ckpt_scope, var_scope, var_exclude_expr):
"""Get a ema var map for restoring from pretrained checkpoints.
Args:
ckpt_path: string. A pretrained checkpoint path.
ckpt_scope: string. Scope name for checkpoint variables.
var_scope: string. Scope name for model variables.
var_exclude_expr: string. A regex for excluding variables.
This is useful for finetuning with different classes, where
var_exclude_expr='.*class-predict.*' can be used.
Returns:
var_map: a dictionary from checkpoint name to model variables.
"""
# logger.info('Init model from checkpoint {}'.format(ckpt_path))
if not ckpt_scope.endswith('/') or not var_scope.endswith('/'):
raise ValueError('Please specific scope name ending with /')
if ckpt_scope.startswith('/'):
ckpt_scope = ckpt_scope[1:]
if var_scope.startswith('/'):
var_scope = var_scope[1:]
var_map = {}
# Get the list of vars to restore.
model_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=var_scope)
reader = tf.train.load_checkpoint(ckpt_path)
ckpt_var_names = set(reader.get_variable_to_shape_map().keys())
exclude_matcher = re.compile(var_exclude_expr) if var_exclude_expr else None
for v in model_vars:
if exclude_matcher and exclude_matcher.match(v.op.name):
logger.info(
'skip {} -- excluded by {}'.format(v.op.name, var_exclude_expr))
continue
if not v.op.name.startswith(var_scope):
logger.info('skip {} -- does not match scope {}'.format(
v.op.name, var_scope))
if v.op.name.endswith('/ExponentialMovingAverage'):
logger.info('skip ema var {}'.format(v.op.name))
continue
ckpt_var = ckpt_scope + v.op.name[len(var_scope):]
ckpt_var_ema = ckpt_var + '/ExponentialMovingAverage'
if ckpt_var_ema in ckpt_var_names:
var_map[ckpt_var_ema] = v
logger.info('Init {} from ckpt var {}'.format(v.op.name, ckpt_var_ema))
elif ckpt_var in ckpt_var_names:
var_map[ckpt_var] = v
logger.info('Init {} from ckpt var {}'.format(v.op.name, ckpt_var))
else:
logger.info('skip {} ({}) -- not in ckpt'.format(v.op.name, ckpt_var))
return var_map
class PatchedBatchNormalization(tf.keras.layers.BatchNormalization):
"""Patched BatchNorm layer."""
def __init__(self, **kwargs):
"""Fixed default name of BatchNormalization to match TpuBatchNormalization."""
if not kwargs.get('name', None):
kwargs['name'] = 'tpu_batch_normalization'
super(PatchedBatchNormalization, self).__init__(**kwargs)
def call(self, *args, **kwargs):
"""Call."""
outputs = super(PatchedBatchNormalization, self).call(*args, **kwargs)
# A temporary hack for tf1 compatibility with keras batch norm.
for u in self.updates:
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, u)
return outputs
def batch_norm_class(is_training):
"""Return BN class based on training status."""
if is_training:
return PatchedBatchNormalization
return PatchedBatchNormalization
def batch_normalization(inputs, training=False, **kwargs):
"""Return BN layer based on training status."""
bn_layer = batch_norm_class(training)(**kwargs)
return bn_layer(inputs, training=training)
def batch_norm_act(inputs,
is_training_bn: bool,
act_type: Union[Text, None],
init_zero: bool = False,
data_format: Text = 'channels_last',
momentum: float = 0.99,
epsilon: float = 1e-3,
name: Text = None):
"""Performs a batch normalization followed by a non-linear activation.
Args:
inputs: `Tensor` of shape `[batch, channels, ...]`.
is_training_bn: `bool` for whether the model is training.
act_type: non-linear relu function type. If None, omits the relu operation.
init_zero: `bool` if True, initializes scale parameter of batch
normalization with 0 instead of 1 (default).
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
momentum: `float`, momentume of batch norm.
epsilon: `float`, small value for numerical stability.
name: the name of the batch normalization layer
Returns:
A normalized `Tensor` with the same `data_format`.
"""
if init_zero:
gamma_initializer = tf.zeros_initializer()
else:
gamma_initializer = tf.ones_initializer()
if data_format == 'channels_first':
axis = 1
else:
axis = 3
inputs = batch_normalization(
inputs=inputs,
axis=axis,
momentum=momentum,
epsilon=epsilon,
center=True,
scale=True,
training=is_training_bn,
gamma_initializer=gamma_initializer,
name=name)
if act_type:
inputs = activation_fn(inputs, act_type)
return inputs
def drop_connect(inputs, is_training, survival_prob):
"""Drop the entire conv with given survival probability."""
# "Deep Networks with Stochastic Depth", https://arxiv.org/pdf/1603.09382.pdf
if not is_training:
return inputs
# Compute tensor.
batch_size = tf.shape(inputs)[0]
random_tensor = survival_prob
random_tensor += tf.random.uniform([batch_size, 1, 1, 1], dtype=inputs.dtype)
binary_tensor = tf.floor(random_tensor)
# Unlike conventional way that multiply survival_prob at test time, here we
# divide survival_prob at training time, such that no addition compute is
# needed at test time.
output = tf.div(inputs, survival_prob) * binary_tensor
return output
def num_params_flops(readable_format=True):
"""Return number of parameters and flops."""
nparams = np.sum(
[np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
options = tf.profiler.ProfileOptionBuilder.float_operation()
options['output'] = 'none'
flops = tf.profiler.profile(
tf.get_default_graph(), options=options).total_float_ops
# We use flops to denote multiply-adds, which is counted as 2 ops in tfprof.
flops = flops // 2
if readable_format:
nparams = float(nparams) * 1e-6
flops = float(flops) * 1e-9
return nparams, flops
conv_kernel_initializer = tf.initializers.variance_scaling()
dense_kernel_initializer = tf.initializers.variance_scaling()
class Pair(tuple):
"""Pair data structure."""
def __new__(cls, name, value):
"""New object."""
return super(Pair, cls).__new__(cls, (name, value))
def __init__(self, name, _): # pylint: disable=super-init-not-called
"""Init."""
self.name = name
def scalar(name, tensor):
"""Stores a (name, Tensor) tuple in a custom collection."""
# logger.info('Adding scale summary {}'.format(Pair(name, tensor)))
tf.add_to_collection('scalar_summaries', Pair(name, tf.reduce_mean(tensor)))
def image(name, tensor):
"""Stores a (name, Tensor) tuple in a custom collection."""
# logger.info('Adding image summary {}'.format(Pair(name, tensor)))
tf.add_to_collection('image_summaries', Pair(name, tensor))
def get_tpu_host_call(global_step, params):
"""Get TPU host call for summaries."""
scalar_summaries = tf.get_collection('scalar_summaries')
if params['img_summary_steps']:
image_summaries = tf.get_collection('image_summaries')
else:
image_summaries = []
if not scalar_summaries and not image_summaries:
return None # No summaries to write.
model_dir = params['model_dir']
iterations_per_loop = params.get('iterations_per_loop', 100)
img_steps = params['img_summary_steps']
def host_call_fn(global_step, *args):
"""Training host call. Creates summaries for training metrics."""
gs = global_step[0]
with tf2.summary.create_file_writer(
model_dir, max_queue=iterations_per_loop).as_default():
with tf2.summary.record_if(True):
for i, _ in enumerate(scalar_summaries):
name = scalar_summaries[i][0]
tensor = args[i][0]
tf2.summary.scalar(name, tensor, step=gs)
if img_steps:
with tf2.summary.record_if(lambda: tf.math.equal(gs % img_steps, 0)):
# Log images every 1k steps.
for i, _ in enumerate(image_summaries):
name = image_summaries[i][0]
tensor = args[i + len(scalar_summaries)]
tf2.summary.image(name, tensor, step=gs)
return tf.summary.all_v2_summary_ops()
reshaped_tensors = [tf.reshape(t, [1]) for _, t in scalar_summaries]
reshaped_tensors += [t for _, t in image_summaries]
global_step_t = tf.reshape(global_step, [1])
return host_call_fn, [global_step_t] + reshaped_tensors
def archive_ckpt(ckpt_eval, ckpt_objective, ckpt_path):
"""Archive a checkpoint if the metric is better."""
ckpt_dir, ckpt_name = os.path.split(ckpt_path)
saved_objective_path = os.path.join(ckpt_dir, 'best_objective.txt')
saved_objective = float('-inf')
if tf.io.gfile.exists(saved_objective_path):
with tf.io.gfile.GFile(saved_objective_path, 'r') as f:
saved_objective = float(f.read())
if saved_objective > ckpt_objective:
logger.info('Ckpt {} is worse than {}'.format(ckpt_objective, saved_objective))
return False
filenames = tf.io.gfile.glob(ckpt_path + '.*')
if filenames is None:
logger.info('No files to copy for checkpoint {}'.format(ckpt_path))
return False
# clear up the backup folder.
backup_dir = os.path.join(ckpt_dir, 'backup')
if tf.io.gfile.exists(backup_dir):
tf.io.gfile.rmtree(backup_dir)
# rename the old checkpoints to backup folder.
dst_dir = os.path.join(ckpt_dir, 'archive')
if tf.io.gfile.exists(dst_dir):
logger.info('mv {} to {}'.format(dst_dir, backup_dir))
tf.io.gfile.rename(dst_dir, backup_dir)
# Write checkpoints.
tf.io.gfile.makedirs(dst_dir)
for f in filenames:
dest = os.path.join(dst_dir, os.path.basename(f))
tf.io.gfile.copy(f, dest, overwrite=True)
ckpt_state = tf.train.generate_checkpoint_state_proto(
dst_dir,
model_checkpoint_path=os.path.join(dst_dir, ckpt_name))
with tf.io.gfile.GFile(os.path.join(dst_dir, 'checkpoint'), 'w') as f:
f.write(str(ckpt_state))
with tf.io.gfile.GFile(os.path.join(dst_dir, 'best_eval.txt'), 'w') as f:
f.write('%s' % ckpt_eval)
# Update the best objective.
with tf.io.gfile.GFile(saved_objective_path, 'w') as f:
f.write('%f' % ckpt_objective)
logger.info('Copying checkpoint {} to {}'.format(ckpt_path, dst_dir))
return True
def delete_ckpt(ckpt_path):
"""Delete old checkpoints."""
if ckpt_path is None:
logger.info('No old checkpoints')
return True
# ckpt_dir, ckpt_name = os.path.split(ckpt_path)
# get checkpoint files
filenames = tf.io.gfile.glob(ckpt_path + '.*')
if filenames is None:
logger.info('No files to delete for checkpoint {}'.format(ckpt_path))
return True
# remove checkpoint files
for f in filenames:
tf.io.gfile.remove(f)
logger.info('Deleted checkpoint {}'.format(ckpt_path))
return True
def parse_image_size(image_size: Union[Text, int, Tuple[int, int]]):
"""Parse the image size and return (height, width).
Args:
image_size: A integer, a tuple (H, W), or a string with HxW format.
Returns:
A tuple of integer (height, width).
"""
if isinstance(image_size, int):
# image_size is integer, with the same width and height.
return (image_size, image_size)
if isinstance(image_size, str):
if 'x' in image_size:
# image_size is a string with format WxH
width, height = image_size.lower().split('x')
elif ',' in image_size:
# image_size is a string with format (H, W)
height, width = image_size.lower().split(',')
return (int(height), int(width))
if isinstance(image_size, tuple):
return image_size
raise ValueError('image_size must be an int, WxH string, or (height, width)'
'tuple. Was %r' % image_size)
def get_feat_sizes(image_size: Union[Text, int, Tuple[int, int]],
max_level: int):
"""Get feat widths and heights for all levels.
Args:
image_size: A integer, a tuple (H, W), or a string with HxW format.
max_level: maximum feature level.
Returns:
feat_sizes: a list of tuples (height, width) for each level.
"""
image_size = parse_image_size(image_size)
feat_sizes = [{'height': image_size[0], 'width': image_size[1]}]
feat_size = image_size
for _ in range(1, max_level + 1):
feat_size = ((feat_size[0] - 1) // 2 + 1, (feat_size[1] - 1) // 2 + 1)
feat_sizes.append({'height': feat_size[0], 'width': feat_size[1]})
return feat_sizes
def verify_feats_size(feats,
feat_sizes,
min_level,
max_level,
data_format='channels_last'):
"""Verify the feature map sizes."""
expected_output_size = feat_sizes[min_level:max_level + 1]
for cnt, size in enumerate(expected_output_size):
h_id, w_id = (2, 3) if data_format == 'channels_first' else (1, 2)
if feats[cnt].shape[h_id] != size['height']:
raise ValueError(
'feats[{}] has shape {} but its height should be {}.'
'(input_height: {}, min_level: {}, max_level: {}.)'.format(
cnt, feats[cnt].shape, size['height'], feat_sizes[0]['height'],
min_level, max_level))
if feats[cnt].shape[w_id] != size['width']:
raise ValueError(
'feats[{}] has shape {} but its width should be {}.'
'(input_width: {}, min_level: {}, max_level: {}.)'.format(
cnt, feats[cnt].shape, size['width'], feat_sizes[0]['width'],
min_level, max_level))
@contextlib.contextmanager
def float16_scope():
"""Scope class for float16."""
def _custom_getter(getter, *args, **kwargs):
"""Returns a custom getter that methods must be called under."""
cast_to_float16 = False
requested_dtype = kwargs['dtype']
if requested_dtype == tf.float16:
kwargs['dtype'] = tf.float32
cast_to_float16 = True
var = getter(*args, **kwargs)
if cast_to_float16:
var = tf.cast(var, tf.float16)
return var
with tf.variable_scope('', custom_getter=_custom_getter) as varscope:
yield varscope
def set_precision_policy(policy_name: Text = None, loss_scale: bool = False):
"""Set precision policy according to the name.
Args:
policy_name: precision policy name, one of 'float32', 'mixed_float16',
'mixed_bfloat16', or None.
loss_scale: whether to use loss scale (only for training).
"""
if not policy_name:
return
assert policy_name in ('mixed_float16', 'mixed_bfloat16', 'float32')
logger.info('use mixed precision policy name %s', policy_name)
# TODO(tanmingxing): use tf.keras.layers.enable_v2_dtype_behavior() when it
# available in stable TF release.
from tensorflow.python.keras.engine import base_layer_utils
base_layer_utils.enable_v2_dtype_behavior()
# mixed_float16 training is not supported for now, so disable loss_scale.
# float32 and mixed_bfloat16 do not need loss scale for training.
if loss_scale:
policy = tf2.keras.mixed_precision.experimental.Policy(policy_name)
else:
policy = tf2.keras.mixed_precision.experimental.Policy(
policy_name, loss_scale=None)
tf2.keras.mixed_precision.experimental.set_policy(policy)
def build_model_with_precision(pp, mm, ii, tt, *args, **kwargs):
"""Build model with its inputs/params for a specified precision context.
This is highly specific to this codebase, and not intended to be general API.
Advanced users only. DO NOT use it if you don't know what it does.
NOTE: short argument names are intended to avoid conficts with kwargs.
Args:
pp: A string, precision policy name, such as "mixed_float16".
mm: A function, for rmodel builder.
ii: A tensor, for model inputs.
tt: A bool, If true, it is for training; otherwise, it is for eval.
*args: A list of model arguments.
**kwargs: A dict, extra model parameters.
Returns:
the output of mm model.
"""
if pp == 'mixed_bfloat16':
set_precision_policy(pp)
inputs = tf.cast(ii, tf.bfloat16)
with tf.tpu.bfloat16_scope():
outputs = mm(inputs, *args, **kwargs)
set_precision_policy('float32')
elif pp == 'mixed_float16':
set_precision_policy(pp, loss_scale=tt)
inputs = tf.cast(ii, tf.float16)
with float16_scope():
outputs = mm(inputs, *args, **kwargs)
set_precision_policy('float32')
elif not pp or pp == 'float32':
outputs = mm(ii, *args, **kwargs)
else:
raise ValueError('Unknow precision name {}'.format(pp))
# Users are responsible to convert the dtype of all outputs.
return outputs
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/utils/utils.py |
"""Utilities for distributed execution."""
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
__all__ = ["MPI_local_rank", "MPI_rank", "MPI_size", "MPI_rank_and_size", "MPI_is_distributed"]
def MPI_is_distributed():
"""Return a boolean whether a distributed training/inference runtime is being used.
:return: bool
"""
if all([var in os.environ for var in ["OMPI_COMM_WORLD_RANK", "OMPI_COMM_WORLD_SIZE"]]):
return True
if all([var in os.environ for var in ["SLURM_PROCID", "SLURM_NTASKS"]]):
return True
return False
def MPI_local_rank():
"""Local rank."""
if "OMPI_COMM_WORLD_LOCAL_RANK" in os.environ:
return int(os.environ.get("OMPI_COMM_WORLD_LOCAL_RANK"))
if "SLURM_LOCALID" in os.environ:
return int(os.environ.get("SLURM_LOCALID"))
return 0
def MPI_rank():
"""MPI rank."""
return MPI_rank_and_size()[0]
def MPI_size():
"""MPI size."""
return MPI_rank_and_size()[1]
def MPI_rank_and_size():
"""MPI rank and size."""
if "tensorflow" in sys.modules:
return mpi_env_MPI_rank_and_size()
return 0, 1
# Source: https://github.com/horovod/horovod/blob/c3626e/test/common.py#L25
def mpi_env_MPI_rank_and_size():
"""Get MPI rank and size from environment variables and return them as a tuple of integers.
Most MPI implementations have an `mpirun` or `mpiexec` command that will
run an MPI executable and set up all communication necessary between the
different processors. As part of that set up, they will set environment
variables that contain the rank and size of the MPI_COMM_WORLD
communicator. We can read those environment variables from Python in order
to ensure that `hvd.rank()` and `hvd.size()` return the expected values.
Since MPI is just a standard, not an implementation, implementations
typically choose their own environment variable names. This function tries
to support several different implementation, but really it only needs to
support whatever implementation we want to use for the TensorFlow test
suite.
If this is not running under MPI, then defaults of rank zero and size one
are returned. (This is appropriate because when you call MPI_Init in an
application not started with mpirun, it will create a new independent
communicator with only one process in it.)
Source: https://github.com/horovod/horovod/blob/c3626e/test/common.py#L25
"""
rank_env = 'PMI_RANK SLURM_PROCID OMPI_COMM_WORLD_RANK'.split()
size_env = 'PMI_SIZE SLURM_NTASKS OMPI_COMM_WORLD_SIZE'.split()
for rank_var, size_var in zip(rank_env, size_env):
rank = os.environ.get(rank_var)
size = os.environ.get(size_var)
if rank is not None and size is not None:
return int(rank), int(size)
# Default to rank zero and size one if there are no environment variables
return 0, 1
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/utils/distributed_utils.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Hparams for model architecture and trainer."""
from __future__ import absolute_import
from __future__ import division
# gtype import
from __future__ import print_function
import ast
import copy
from typing import Any, Dict, Text
import six
import tensorflow.compat.v1 as tf
import yaml
def eval_str_fn(val):
"""Eval string."""
if val in {'true', 'false'}:
return val == 'true'
try:
return ast.literal_eval(val)
except (ValueError, SyntaxError):
return val
# pylint: disable=protected-access
class Config(object):
"""A config utility class."""
def __init__(self, config_dict=None):
"""Init."""
self.update(config_dict)
def __setattr__(self, k, v):
"""Set attribute."""
self.__dict__[k] = Config(v) if isinstance(v, dict) else copy.deepcopy(v)
def __getattr__(self, k):
"""Get attribute."""
return self.__dict__[k]
def __getitem__(self, k):
"""Get item."""
return self.__dict__[k]
def __repr__(self):
"""Repr."""
return repr(self.as_dict())
def __str__(self):
"""To string."""
try:
return yaml.dump(self.as_dict(), indent=4)
except TypeError:
return str(self.as_dict())
def _update(self, config_dict, allow_new_keys=True):
"""Recursively update internal members."""
if not config_dict:
return
for k, v in six.iteritems(config_dict):
if k not in self.__dict__:
if allow_new_keys:
self.__setattr__(k, v)
else:
raise KeyError('Key `{}` does not exist for overriding. '.format(k))
else:
if isinstance(self.__dict__[k], dict):
self.__dict__[k]._update(v, allow_new_keys)
else:
self.__dict__[k] = copy.deepcopy(v)
def get(self, k, default_value=None):
"""Get member's value."""
return self.__dict__.get(k, default_value)
def update(self, config_dict):
"""Update members while allowing new keys."""
self._update(config_dict, allow_new_keys=True)
def keys(self):
"""Return all keys."""
return self.__dict__.keys()
def override(self, config_dict_or_str):
"""Update members while disallowing new keys."""
if isinstance(config_dict_or_str, str):
if not config_dict_or_str:
return
if '=' in config_dict_or_str:
config_dict = self.parse_from_str(config_dict_or_str)
elif config_dict_or_str.endswith('.yaml'):
config_dict = self.parse_from_yaml(config_dict_or_str)
else:
raise ValueError(
'Invalid string {}, must end with .yaml or contains "=".'.format(
config_dict_or_str))
elif isinstance(config_dict_or_str, dict):
config_dict = config_dict_or_str
else:
raise ValueError('Unknown value type: {}'.format(config_dict_or_str))
self._update(config_dict, allow_new_keys=False)
def parse_from_module(self, module_name: Text) -> Dict[Any, Any]:
"""Import config from module_name containing key=value pairs."""
config_dict = {}
module = __import__(module_name)
for attr in dir(module):
# skip built-ins and private attributes
if not attr.startswith('_'):
config_dict[attr] = getattr(module, attr)
return config_dict
def parse_from_yaml(self, yaml_file_path: Text) -> Dict[Any, Any]:
"""Parses a yaml file and returns a dictionary."""
with tf.io.gfile.GFile(yaml_file_path, 'r') as f:
config_dict = yaml.load(f, Loader=yaml.FullLoader)
return config_dict
def save_to_yaml(self, yaml_file_path):
"""Write a dictionary into a yaml file."""
with tf.gfile.Open(yaml_file_path, 'w') as f:
yaml.dump(self.as_dict(), f, default_flow_style=False)
def parse_from_str(self, config_str: Text) -> Dict[Any, Any]:
"""parse from a string in format 'x=a,y=2' and return the dict."""
if not config_str:
return {}
config_dict = {}
try:
for kv_pair in config_str.split(','):
if not kv_pair: # skip empty string
continue
k, v = kv_pair.split('=')
config_dict[k.strip()] = eval_str_fn(v.strip())
return config_dict
except ValueError:
raise ValueError('Invalid config_str: {}'.format(config_str))
def as_dict(self):
"""Returns a dict representation."""
config_dict = {}
for k, v in six.iteritems(self.__dict__):
if isinstance(v, Config):
config_dict[k] = v.as_dict()
else:
config_dict[k] = copy.deepcopy(v)
return config_dict
# pylint: enable=protected-access
def default_detection_configs():
"""Returns a default detection configs."""
h = Config()
# model name.
h.name = 'efficientdet-d1'
# activation type: see activation_fn in utils.py.
h.act_type = 'swish'
# input preprocessing parameters
h.image_size = 640 # An integer or a string WxH such as 640x320.
h.target_size = None
h.input_rand_hflip = True
h.train_scale_min = 0.1
h.train_scale_max = 2.0
h.autoaugment_policy = None
h.use_augmix = False
# mixture_width, mixture_depth, alpha
h.augmix_params = (3, -1, 1)
# dataset specific parameters
h.num_classes = 91
h.skip_crowd_during_training = True
h.label_id_mapping = None
h.max_instances_per_image = 100 # Default to 100 for COCO.
h.regenerate_source_id = False
# model architecture
h.min_level = 3
h.max_level = 7
h.num_scales = 3
# aspect ratio with format (w, h). Can be computed with k-mean per dataset.
h.aspect_ratios = [(1.0, 1.0), (1.4, 0.7), (0.7, 1.4)]
h.anchor_scale = 4.0
# is batchnorm training mode
h.is_training_bn = True
# optimization
h.momentum = 0.9
h.optimizer = 'sgd' # can be 'adam' or 'sgd'.
h.learning_rate = 0.08 # 0.008 for adam.
h.lr_warmup_init = 0.008 # 0.0008 for adam.
h.lr_warmup_epoch = 1.0
h.first_lr_drop_epoch = 200.0
h.second_lr_drop_epoch = 250.0
h.poly_lr_power = 0.9
h.clip_gradients_norm = 0.0
h.num_epochs = 300
h.data_format = 'channels_last'
h.softnms = True
# classification loss
h.alpha = 0.25
h.gamma = 1.5
# localization loss
h.delta = 0.1
h.box_loss_weight = 50.0
h.iou_loss_type = None
h.iou_loss_weight = 1.0
# regularization l2 loss.
h.l2_weight_decay = 4e-5
h.l1_weight_decay = 0.0
# use horovod for multi-gpu training. If None, use TF default.
# precision: one of 'float32', 'mixed_float16', 'mixed_bfloat16'.
h.precision = None # If None, use float32.
h.amp = True # enable AMP
# For detection.
h.box_class_repeats = 3
h.fpn_cell_repeats = 3
h.fpn_num_filters = 88
h.separable_conv = True
h.apply_bn_for_resampling = True
h.conv_after_downsample = False
h.conv_bn_act_pattern = False
h.use_native_resize_op = True
h.pooling_type = None
# version.
h.fpn_name = 'bifpn_sum'
h.fpn_weight_method = None
h.fpn_config = None
# No stochastic depth in default.
h.survival_prob = None
h.img_summary_steps = None
# h.lr_decay_method = 'cosine' # hardcode to be the only decay method
h.moving_average_decay = 0.9998
h.ckpt_var_scope = None # ckpt variable scope.
# exclude vars when loading pretrained ckpts.
h.var_exclude_expr = '.*/class-predict/.*' # exclude class weights in default
h.backbone_name = 'efficientnet-b1'
h.backbone_config = None
h.var_freeze_expr = None
h.pruned_model_path = None
h.mode = 'train'
h.freeze_blocks = None
h.freeze_bn = False
h.key = None
return h
efficientdet_model_param_dict = {
'efficientdet-d0':
dict(
name='efficientdet-d0',
backbone_name='efficientnet-b0',
image_size=512,
fpn_num_filters=64,
fpn_cell_repeats=3,
box_class_repeats=3,
),
'efficientdet-d1':
dict(
name='efficientdet-d1',
backbone_name='efficientnet-b1',
image_size=640,
fpn_num_filters=88,
fpn_cell_repeats=4,
box_class_repeats=3,
),
'efficientdet-d2':
dict(
name='efficientdet-d2',
backbone_name='efficientnet-b2',
image_size=768,
fpn_num_filters=112,
fpn_cell_repeats=5,
box_class_repeats=3,
),
'efficientdet-d3':
dict(
name='efficientdet-d3',
backbone_name='efficientnet-b3',
image_size=896,
fpn_num_filters=160,
fpn_cell_repeats=6,
box_class_repeats=4,
),
'efficientdet-d4':
dict(
name='efficientdet-d4',
backbone_name='efficientnet-b4',
image_size=1024,
fpn_num_filters=224,
fpn_cell_repeats=7,
box_class_repeats=4,
),
'efficientdet-d5':
dict(
name='efficientdet-d5',
backbone_name='efficientnet-b5',
image_size=1280,
fpn_num_filters=288,
fpn_cell_repeats=7,
box_class_repeats=4,
),
'efficientdet-d6':
dict(
name='efficientdet-d6',
backbone_name='efficientnet-b6',
image_size=1280,
fpn_num_filters=384,
fpn_cell_repeats=8,
box_class_repeats=5,
fpn_name='bifpn_sum', # Use unweighted sum for training stability.
),
'efficientdet-d7':
dict(
name='efficientdet-d7',
backbone_name='efficientnet-b6',
image_size=1536,
fpn_num_filters=384,
fpn_cell_repeats=8,
box_class_repeats=5,
anchor_scale=5.0,
fpn_name='bifpn_sum', # Use unweighted sum for training stability.
),
}
efficientdet_lite_param_dict = {
# lite models are in progress and subject to changes.
'efficientdet-lite0':
dict(
name='efficientdet-lite0',
backbone_name='efficientnet-lite0',
image_size=512,
fpn_num_filters=64,
fpn_cell_repeats=3,
box_class_repeats=3,
act_type='relu',
use_native_resize_op=True,
),
'efficientdet-lite1':
dict(
name='efficientdet-lite1',
backbone_name='efficientnet-lite1',
image_size=640,
fpn_num_filters=88,
fpn_cell_repeats=4,
box_class_repeats=3,
act_type='relu',
use_native_resize_op=True,
),
'efficientdet-lite2':
dict(
name='efficientdet-lite2',
backbone_name='efficientnet-lite2',
image_size=768,
fpn_num_filters=112,
fpn_cell_repeats=5,
box_class_repeats=3,
act_type='relu',
use_native_resize_op=True,
),
'efficientdet-lite3':
dict(
name='efficientdet-lite3',
backbone_name='efficientnet-lite3',
image_size=896,
fpn_num_filters=160,
fpn_cell_repeats=6,
box_class_repeats=4,
act_type='relu',
use_native_resize_op=True,
),
'efficientdet-lite4':
dict(
name='efficientdet-lite4',
backbone_name='efficientnet-lite4',
image_size=1024,
fpn_num_filters=224,
fpn_cell_repeats=7,
box_class_repeats=4,
act_type='relu',
use_native_resize_op=True,
),
}
def get_efficientdet_config(model_name='efficientdet-d1'):
"""Get the default config for EfficientDet based on model name."""
h = default_detection_configs()
if model_name in efficientdet_model_param_dict:
h.override(efficientdet_model_param_dict[model_name])
elif model_name in efficientdet_lite_param_dict:
h.override(efficientdet_lite_param_dict[model_name])
else:
raise ValueError('Unknown model name: {}'.format(model_name))
return h
def get_detection_config(model_name):
"""Get config based on model name."""
if model_name.startswith('efficientdet'):
return get_efficientdet_config(model_name)
raise ValueError('model name must start with efficientdet.')
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/utils/hparams_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions to load EfficientNet/EfficientDet models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from zipfile import is_zipfile, ZipFile
import keras
import tensorflow as tf
from nvidia_tao_tf1.core.templates.utils_tf import swish
from nvidia_tao_tf1.cv.efficientdet.layers.image_resize_layer import ImageResizeLayer
from nvidia_tao_tf1.cv.efficientdet.layers.weighted_fusion_layer import WeightedFusion
from nvidia_tao_tf1.cv.efficientdet.utils import utils
from nvidia_tao_tf1.encoding import encoding
CUSTOM_OBJS = {
'swish': swish,
'PatchedBatchNormalization': utils.PatchedBatchNormalization,
'ImageResizeLayer': ImageResizeLayer,
'WeightedFusion': WeightedFusion}
def load_keras_model(keras_path, is_pruned):
"""Helper function to load keras or tf.keras model."""
if is_pruned:
tf.keras.models.load_model(keras_path, custom_objects=CUSTOM_OBJS)
else:
keras.models.load_model(keras_path, custom_objects=CUSTOM_OBJS)
def load_json_model(json_path, new_objs=None):
"""Helper function to load keras model from json file."""
new_objs = new_objs or {}
with open(json_path, 'r') as jf:
model_json = jf.read()
loaded_model = tf.keras.models.model_from_json(
model_json,
custom_objects={**CUSTOM_OBJS, **new_objs})
return loaded_model
def dump_json(model, out_path):
"""Model to json."""
with open(out_path, "w") as jf:
jf.write(model.to_json())
def get_model_with_input(model_path, input_layer):
"""Implement a trick to replace input tensor."""
def get_input_layer(*arg, **kargs):
return input_layer
return load_json_model(model_path, new_objs={'InputLayer': get_input_layer})
def decode_tlt_file(filepath, key):
"""Decrypt the tlt checkpoint file."""
if filepath and filepath.endswith('.tlt'):
if not is_zipfile(filepath):
# support legacy .tlt model with encryption
os_handle, temp_filepath = tempfile.mkstemp()
os.close(os_handle)
# Decrypt the checkpoint file.
with open(filepath, 'rb') as encoded_file, open(temp_filepath, 'wb') as tmp_zipf:
encoding.decode(encoded_file, tmp_zipf, key.encode())
else:
# .tlt is a zip file
temp_filepath = filepath
# if unencrypted tlt is a zip file, it is either from efficientdet ckpt or pruned
# else it is from classification model
if is_zipfile(temp_filepath):
# create a temp to store extracted ckpt
temp_ckpt_dir = tempfile.mkdtemp()
# Load zip file and extract members to a tmp_directory.
try:
with ZipFile(temp_filepath, 'r') as zip_object:
hdf5_found = None
ckpt_found = None
for member in zip_object.namelist():
if member != 'checkpoint':
zip_object.extract(member, path=temp_ckpt_dir)
if member.endswith('.hdf5'):
# pruned model
hdf5_found = member
if '.ckpt-' in member:
step = int(member.split('.')[1].split('-')[-1])
ckpt_found = "model.ckpt-{}".format(step)
assert hdf5_found or ckpt_found, "The tlt file is in a wrong format."
except Exception:
raise IOError("The checkpoint file is not saved properly. \
Please delete it and rerun the script.")
return os.path.join(temp_ckpt_dir, hdf5_found or ckpt_found)
if os.path.exists(temp_filepath + '.hdf5'):
os.remove(temp_filepath + '.hdf5')
os.rename(temp_filepath, temp_filepath + '.hdf5')
return temp_filepath + '.hdf5'
return filepath
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/utils/model_loader.py |
# Copyright (c) 2006-2011, NIPY Developers
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the NIPY Developers nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Source: https://github.com/nipy/nitime/blob/c8eb314/nitime/lazyimports.py
"""Lazy import utilities.
This module provides lazy import functionality to improve the import
performance of nitime. For example, some parts of nitime leverage and import
matplotlib, which is quite a big package, yet most of the nitime code does not
depend on matplotlib. By lazily-loading a module, we defer the overhead of
importing it until the first time it is actually used, thereby speeding up
nitime imports.
A generic :class:`LazyImport` class is implemented which takes the module name
as a parameter, and acts as a proxy for that module, importing it only when
the module is used, but effectively acting as the module in every other way
(including inside IPython with respect to introspection and tab completion)
with the *exception* of reload() - reloading a :class:`LazyImport` raises an
:class:`ImportError`.
Commonly used nitime lazy imports are also defined in :mod:`nitime.lazy`, so
they can be reused throughout nitime.
"""
import sys
import types
class LazyImport(types.ModuleType):
"""Lazy Import class.
This class takes the module name as a parameter, and acts as a proxy for
that module, importing it only when the module is used, but effectively
acting as the module in every other way (including inside IPython with
respect to introspection and tab completion) with the *exception* of
reload()- reloading a :class:`LazyImport` raises an :class:`ImportError`.
>>> mlab = LazyImport('matplotlib.mlab')
No import happens on the above line, until we do something like call an
``mlab`` method or try to do tab completion or introspection on ``mlab``
in IPython.
>>> mlab
<module 'matplotlib.mlab' will be lazily loaded>
Now the :class:`LazyImport` will do an actual import, and call the dist
function of the imported module.
>>> mlab.dist(1969,2011)
42.0
"""
def __getattribute__(self, x):
"""Get Attributes."""
# This method will be called only once, since we'll change
# self.__class__ to LoadedLazyImport, and __getattribute__ will point
# to module.__getattribute__
name = object.__getattribute__(self, '__name__')
__import__(name)
# if name above is 'package.foo.bar', package is returned, the docs
# recommend that in order to get back the full thing, that we import
# and then lookup the full name is sys.modules, see:
# http://docs.python.org/library/functions.html#__import__
module = sys.modules[name]
# Now that we've done the import, cutout the middleman and make self
# act as the imported module
class LoadedLazyImport(types.ModuleType):
__getattribute__ = module.__getattribute__
__repr__ = module.__repr__
object.__setattr__(self, '__class__', LoadedLazyImport)
# The next line will make "reload(l)" a silent no-op
return module.__getattribute__(x)
def __repr__(self):
"""__repr__."""
return "<module '%s' will be lazily loaded>" % object.__getattribute__(self, '__name__')
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/utils/lazy_imports.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""COCO-style evaluation metrics.
Implements the interface of COCO API and metric_fn in tf.TPUEstimator.
COCO API: github.com/cocodataset/cocoapi/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import sys
import numpy as np
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
import tensorflow.compat.v1 as tf
class EvaluationMetric(object):
"""COCO evaluation metric class."""
def __init__(self, filename=None, testdev_dir=None):
"""Constructs COCO evaluation class.
The class provides the interface to metrics_fn in TPUEstimator. The
_update_op() takes detections from each image and push them to
self.detections. The _evaluate() loads a JSON file in COCO annotation format
as the groundtruth and runs COCO evaluation.
Args:
filename: Ground truth JSON file name. If filename is None, use
groundtruth data passed from the dataloader for evaluation. filename is
ignored if testdev_dir is not None.
testdev_dir: folder name for testdev data. If None, run eval without
groundtruth, and filename will be ignored.
"""
self._block_print()
if filename:
self.coco_gt = COCO(filename)
self._enable_print()
self.filename = filename
self.testdev_dir = testdev_dir
self.metric_names = ['AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'ARmax1',
'ARmax10', 'ARmax100', 'ARs', 'ARm', 'ARl']
self._reset()
# Disable prints
def _block_print(self):
sys.stdout = open(os.devnull, 'w')
# Restore prints
def _enable_print(self):
sys.stdout = sys.__stdout__
def _reset(self):
"""Reset COCO API object."""
self._block_print()
if self.filename is None:
self.coco_gt = COCO()
self.detections = []
self.dataset = {
'images': [],
'annotations': [],
'categories': []
}
self.image_id = 1
self.annotation_id = 1
self.category_ids = []
self._enable_print()
def estimator_metric_fn(self, detections, groundtruth_data):
"""Constructs the metric function for tf.TPUEstimator.
For each metric, we return the evaluation op and an update op; the update op
is shared across all metrics and simply appends the set of detections to the
`self.detections` list. The metric op is invoked after all examples have
been seen and computes the aggregate COCO metrics. Please find details API
in: https://www.tensorflow.org/api_docs/python/tf/contrib/learn/MetricSpec
Args:
detections: Detection results in a tensor with each row representing
[image_id, x, y, width, height, score, class]
groundtruth_data: Groundtruth annotations in a tensor with each row
representing [y1, x1, y2, x2, is_crowd, area, class].
Returns:
metrics_dict: A dictionary mapping from evaluation name to a tuple of
operations (`metric_op`, `update_op`). `update_op` appends the
detections for the metric to the `self.detections` list.
"""
def _evaluate():
"""Evaluates with detections from all images with COCO API.
Returns:
coco_metric: float numpy array with shape [12] representing the
coco-style evaluation metrics.
"""
self._block_print()
if self.filename is None:
self.coco_gt.dataset = self.dataset
self.coco_gt.createIndex()
if self.testdev_dir:
# Run on test-dev dataset.
box_result_list = []
for det in self.detections:
box_result_list.append({
'image_id': int(det[0]),
'category_id': int(det[6]),
'bbox': np.around(
det[1:5].astype(np.float64), decimals=2).tolist(),
'score': float(np.around(det[5], decimals=3)),
})
json.encoder.FLOAT_REPR = lambda o: format(o, '.3f')
output_path = os.path.join(self.testdev_dir,
'detections_test-dev2017_test_results.json')
with tf.io.gfile.GFile(output_path, 'w') as fid:
json.dump(box_result_list, fid)
self._reset()
return np.array([0.], dtype=np.float32)
# Run on validation dataset.
detections = np.array(self.detections)
image_ids = list(set(detections[:, 0]))
coco_dt = self.coco_gt.loadRes(detections)
coco_eval = COCOeval(self.coco_gt, coco_dt, iouType='bbox')
coco_eval.params.imgIds = image_ids
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
coco_metrics = coco_eval.stats
# clean self.detections after evaluation is done.
# this makes sure the next evaluation will start with an empty list of
# self.detections.
self._reset()
self._enable_print()
return np.array(coco_metrics, dtype=np.float32)
def _update_op(detections, groundtruth_data):
"""Update detection results and groundtruth data.
Append detection results to self.detections to aggregate results from
all validation set. The groundtruth_data is parsed and added into a
dictionary with the same format as COCO dataset, which can be used for
evaluation.
Args:
detections: Detection results in a tensor with each row representing
[image_id, x, y, width, height, score, class].
groundtruth_data: Groundtruth annotations in a tensor with each row
representing [y1, x1, y2, x2, is_crowd, area, class].
"""
for i in range(len(detections)):
# Filter out detections with predicted class label = -1.
indices = np.where(detections[i, :, -1] > -1)[0]
detections[i] = detections[i, indices]
if detections[i].shape[0] == 0:
continue
# Append groundtruth annotations to create COCO dataset object.
# Add images.
image_id = detections[i][0, 0]
if image_id == -1:
image_id = self.image_id
detections[i][:, 0] = image_id
self.detections.extend(detections[i])
if self.testdev_dir:
# Skip annotation for test-dev case.
self.image_id += 1
continue
self.dataset['images'].append({
'id': int(image_id),
})
# Add annotations.
indices = np.where(groundtruth_data[i, :, -1] > -1)[0]
for data in groundtruth_data[i, indices]:
box = data[0:4]
is_crowd = data[4]
area = data[5]
category_id = data[6]
if category_id < 0:
break
if area == -1:
area = (box[3] - box[1]) * (box[2] - box[0])
self.dataset['annotations'].append({
'id': int(self.annotation_id),
'image_id': int(image_id),
'category_id': int(category_id),
'bbox': [box[1], box[0], box[3] - box[1], box[2] - box[0]],
'area': area,
'iscrowd': int(is_crowd)
})
self.annotation_id += 1
self.category_ids.append(category_id)
self.image_id += 1
self.category_ids = list(set(self.category_ids))
self.dataset['categories'] = [
{'id': int(category_id)} for category_id in self.category_ids]
with tf.name_scope('coco_metric'):
if self.testdev_dir:
update_op = tf.py_func(_update_op, [detections, groundtruth_data], [])
metrics = tf.py_func(_evaluate, [], tf.float32)
metrics_dict = {'AP': (metrics, update_op)}
return metrics_dict
update_op = tf.py_func(_update_op, [detections, groundtruth_data], [])
metrics = tf.py_func(_evaluate, [], tf.float32)
metrics_dict = {}
for i, name in enumerate(self.metric_names):
metrics_dict[name] = (metrics[i], update_op)
return metrics_dict
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/utils/coco_metric.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Load an experiment spec file to run EfficientDet training, evaluation, pruning."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
from google.protobuf.text_format import Merge as merge_text_proto
import six
import nvidia_tao_tf1.cv.efficientdet.proto.experiment_pb2 as experiment_pb2
from nvidia_tao_tf1.cv.efficientdet.utils import utils
logger = logging.getLogger(__name__)
def eval_str(s):
"""If s is a string, return the eval results. Else return itself."""
if isinstance(s, six.string_types):
if len(s) > 0:
return eval(s)
return None
return s
def load_proto(spec_path, proto_buffer, default_spec_path=None, merge_from_default=True):
"""Load spec from file and merge with given proto_buffer instance.
Args:
spec_path (str): location of a file containing the custom spec proto.
proto_buffer(pb2): protocal buffer instance to be loaded.
default_spec_path(str): location of default spec to use if merge_from_default is True.
merge_from_default (bool): disable default spec, if False, spec_path must be set.
Returns:
proto_buffer(pb2): protocol buffer instance updated with spec.
"""
def _load_from_file(filename, pb2):
with open(filename, "r") as f:
merge_text_proto(f.read(), pb2)
# Setting this flag false prevents concatenating repeated-fields
if merge_from_default:
assert default_spec_path, \
"default spec path has to be defined if merge_from_default is enabled"
# Load the default spec
_load_from_file(default_spec_path, proto_buffer)
else:
assert spec_path, "spec_path has to be defined, if merge_from_default is disabled"
# Merge a custom proto on top of the default spec, if given
if spec_path:
logger.info("Merging specification from %s", spec_path)
_load_from_file(spec_path, proto_buffer)
return proto_buffer
def load_experiment_spec(spec_path=None, merge_from_default=False):
"""Load experiment spec from a .txt file and return an experiment_pb2.Experiment object.
Args:
spec_path (str): location of a file containing the custom experiment spec proto.
dataset_export_spec_paths (list of str): paths to the dataset export specs.
merge_from_default (bool): disable default spec, if False, spec_path must be set.
Returns:
experiment_spec: protocol buffer instance of type experiment_pb2.Experiment.
"""
experiment_spec = experiment_pb2.Experiment()
file_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
default_spec_path = os.path.join(file_path, 'experiment_specs/default.txt')
experiment_spec = load_proto(spec_path, experiment_spec, default_spec_path,
merge_from_default)
spec_checker(experiment_spec)
return experiment_spec
def generate_params_from_spec(config, spec, mode):
"""Generate parameters from experient spec."""
if spec.model_config.aspect_ratios:
aspect_ratios = eval_str(spec.model_config.aspect_ratios)
if not isinstance(aspect_ratios, list):
raise SyntaxError("aspect_ratios should be a list of tuples.")
else:
aspect_ratios = [(1.0, 1.0), (1.4, 0.7), (0.7, 1.4)]
if spec.model_config.max_level != 7 or spec.model_config.min_level != 3:
print("WARNING: min_level and max_level are forced to 3 and 7 respectively "
"in the current version.")
return dict(
config.as_dict(),
# model_config
name=spec.model_config.model_name,
aspect_ratios=aspect_ratios,
anchor_scale=spec.model_config.anchor_scale or 4,
min_level=3,
max_level=7,
num_scales=spec.model_config.num_scales or 3,
freeze_bn=spec.model_config.freeze_bn,
freeze_blocks=eval_str(spec.model_config.freeze_blocks)
if spec.model_config.freeze_blocks else None,
# data config
val_json_file=spec.dataset_config.validation_json_file,
testdev_dir=spec.dataset_config.testdev_dir,
num_classes=spec.dataset_config.num_classes,
max_instances_per_image=spec.dataset_config.max_instances_per_image or 100,
skip_crowd_during_training=spec.dataset_config.skip_crowd_during_training,
# Parse image size in case it is in string format. (H, W)
image_size=utils.parse_image_size(spec.dataset_config.image_size),
# augmentation config
input_rand_hflip=spec.augmentation_config.rand_hflip,
train_scale_min=spec.augmentation_config.random_crop_min_scale or 0.1,
train_scale_max=spec.augmentation_config.random_crop_max_scale or 2.0,
# train eval config
momentum=spec.training_config.momentum or 0.9,
iterations_per_loop=spec.training_config.iterations_per_loop,
num_examples_per_epoch=spec.training_config.num_examples_per_epoch,
checkpoint=spec.training_config.checkpoint,
ckpt=None,
mode=mode,
checkpoint_period=spec.training_config.checkpoint_period,
train_batch_size=spec.training_config.train_batch_size,
eval_batch_size=spec.eval_config.eval_batch_size,
eval_samples=spec.eval_config.eval_samples,
stop_at_epoch=spec.training_config.stop_at_epoch,
profile_skip_steps=spec.training_config.profile_skip_steps,
learning_rate=spec.training_config.learning_rate,
tf_random_seed=spec.training_config.tf_random_seed or 42,
pruned_model_path=spec.training_config.pruned_model_path,
moving_average_decay=spec.training_config.moving_average_decay,
lr_warmup_epoch=spec.training_config.lr_warmup_epoch or 5,
lr_warmup_init=spec.training_config.lr_warmup_init or 0.00001,
amp=spec.training_config.amp,
data_format='channels_last',
l2_weight_decay=spec.training_config.l2_weight_decay,
l1_weight_decay=spec.training_config.l1_weight_decay,
clip_gradients_norm=spec.training_config.clip_gradients_norm or 5.0,
skip_checkpoint_variables=spec.training_config.skip_checkpoint_variables,
num_epochs=spec.training_config.num_epochs,
eval_epoch_cycle=spec.eval_config.eval_epoch_cycle,
logging_frequency=spec.training_config.logging_frequency or 10
)
def spec_checker(experiment_spec):
"""Check if parameters in the spec file are valid.
Args:
experiment_spec (proto): experiment spec proto.
"""
# training config
assert experiment_spec.training_config.train_batch_size > 0, \
"batch size must be positive."
assert experiment_spec.training_config.checkpoint_period > 0, \
"checkpoint interval must be positive."
assert experiment_spec.training_config.num_examples_per_epoch > 0, \
"Number of samples must be positive."
assert experiment_spec.training_config.num_epochs >= \
experiment_spec.eval_config.eval_epoch_cycle, \
"num_epochs must be positive and no less than eval_epoch_cycle."
assert 0 <= experiment_spec.training_config.moving_average_decay < 1, \
"Moving average decay must be within [0, 1)."
assert 0 < experiment_spec.training_config.lr_warmup_init < 1, \
"The initial learning rate during warmup must be within (0, 1)."
assert experiment_spec.training_config.learning_rate > 0, \
"learning_rate must be positive."
# model config
assert experiment_spec.model_config.model_name, \
"model_name must be specified. Choose from ['efficientdet-d0', ..., 'efficientdet-d5']."
# eval config
assert experiment_spec.eval_config.eval_batch_size > 0, "batch size must be positive"
assert experiment_spec.eval_config.eval_epoch_cycle > 0, \
"Evaluation cycle (every N epochs) must be positive."
assert 0 < experiment_spec.eval_config.eval_samples, \
"Number of evaluation samples must be positive."
# dataset config
assert experiment_spec.dataset_config.training_file_pattern, \
"training_file_pattern must be specified."
assert experiment_spec.dataset_config.validation_file_pattern, \
"validation_file_pattern must be specified."
assert experiment_spec.dataset_config.validation_json_file, \
"validation_json_file must be specified."
assert 1 < experiment_spec.dataset_config.num_classes, \
"num_classes is number of categories + 1 (background). It must be greater than 1."
assert experiment_spec.dataset_config.image_size, \
"image size must be specified in 'hh,ww' format."
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/utils/spec_loader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientDet hparam config tests."""
from nvidia_tao_tf1.cv.efficientdet.utils import hparams_config
def test_hparams_config():
c = hparams_config.Config({'a': 1, 'b': 2})
assert c.as_dict() == {'a': 1, 'b': 2}
c.update({'a': 10})
assert c.as_dict() == {'a': 10, 'b': 2}
c.b = 20
assert c.as_dict() == {'a': 10, 'b': 20}
c.override('a=true,b=ss')
assert c.as_dict() == {'a': True, 'b': 'ss'}
c.override('a=100,,,b=2.3,') # extra ',' is fine.
assert c.as_dict() == {'a': 100, 'b': 2.3}
c.override('a=2x3,b=50') # a is a special format for image size.
assert c.as_dict() == {'a': '2x3', 'b': 50}
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/utils/tests/test_hparams_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientDet IOU utils tests."""
import numpy as np
import tensorflow as tf
from nvidia_tao_tf1.cv.efficientdet.utils import iou_utils
def test_iou_utils():
tf.enable_eager_execution()
pb = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]],
dtype=tf.float32)
tb = tf.constant(
[[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0]], dtype=tf.float32)
zeros = tf.constant([[0, 0, 0, 0], [0, 0, 0, 0]], dtype=tf.float32)
assert np.allclose(iou_utils.iou_loss(pb, tb, 'iou'), [0.875, 1.])
assert np.allclose(iou_utils.iou_loss(pb, tb, 'ciou'), [1.408893, 1.548753])
assert np.allclose(iou_utils.iou_loss(pb, tb, 'diou'), [1.406532, 1.531532])
assert np.allclose(iou_utils.iou_loss(pb, tb, 'giou'), [1.075000, 1.933333])
assert np.allclose(iou_utils.iou_loss(pb, zeros, 'giou'), [0, 0])
assert np.allclose(iou_utils.iou_loss(pb, zeros, 'diou'), [0, 0])
assert np.allclose(iou_utils.iou_loss(pb, zeros, 'ciou'), [0, 0])
assert np.allclose(iou_utils.iou_loss(pb, zeros, 'iou'), [0, 0])
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/utils/tests/test_iou_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model function definition, including both architecture and loss."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import logging
import re
import numpy as np
import tensorflow as tf
import nvidia_tao_tf1.cv.common.no_warning # noqa pylint: disable=W0611
from nvidia_tao_tf1.cv.efficientdet.models import anchors
from nvidia_tao_tf1.cv.efficientdet.models import efficientdet_arch
from nvidia_tao_tf1.cv.efficientdet.utils import coco_metric
from nvidia_tao_tf1.cv.efficientdet.utils import hparams_config
from nvidia_tao_tf1.cv.efficientdet.utils import iou_utils
from nvidia_tao_tf1.cv.efficientdet.utils import utils
from nvidia_tao_tf1.cv.efficientdet.utils.lazy_imports import LazyImport
hvd = LazyImport("horovod.tensorflow")
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level='INFO')
logger = logging.getLogger(__name__)
def update_learning_rate_schedule_parameters(params):
"""Updates params that are related to the learning rate schedule."""
batch_size = (params['batch_size'])
# Learning rate is proportional to the batch size
# params['adjusted_learning_rate'] = (
# params['learning_rate'] * batch_size / _DEFAULT_BATCH_SIZE)
# learning rate has already been scaled
params['adjusted_learning_rate'] = params['learning_rate']
steps_per_epoch = params['num_examples_per_epoch'] / batch_size
params['lr_warmup_step'] = int(params['lr_warmup_epoch'] * steps_per_epoch)
params['first_lr_drop_step'] = int(params['first_lr_drop_epoch'] * steps_per_epoch)
params['second_lr_drop_step'] = int(params['second_lr_drop_epoch'] * steps_per_epoch)
params['total_steps'] = int(params['num_epochs'] * steps_per_epoch)
def stepwise_lr_schedule(adjusted_learning_rate, lr_warmup_init, lr_warmup_step,
first_lr_drop_step, second_lr_drop_step, global_step):
"""Handles linear scaling rule, gradual warmup, and LR decay."""
# lr_warmup_init is the starting learning rate; the learning rate is linearly
# scaled up to the full learning rate after `lr_warmup_step` before decaying.
logger.info('LR schedule method: stepwise')
linear_warmup = (
lr_warmup_init + (
tf.cast(global_step, dtype=tf.float32) / lr_warmup_step *
(adjusted_learning_rate - lr_warmup_init)))
learning_rate = tf.where(
global_step < lr_warmup_step, linear_warmup,
adjusted_learning_rate)
lr_schedule = [[1.0, lr_warmup_step], [0.1, first_lr_drop_step],
[0.01, second_lr_drop_step]]
for mult, start_global_step in lr_schedule:
learning_rate = tf.where(
global_step < start_global_step, learning_rate,
adjusted_learning_rate * mult)
return learning_rate
def cosine_lr_schedule_tf2(adjusted_lr, lr_warmup_init, lr_warmup_step,
total_steps, step):
"""TF2 friendly cosine learning rate schedule."""
logger.info('LR schedule method: cosine')
def warmup_lr(step):
return lr_warmup_init + (adjusted_lr - lr_warmup_init) * (
tf.cast(step, tf.float32) / tf.cast(lr_warmup_step, tf.float32))
def cosine_lr(step):
decay_steps = tf.cast(total_steps - lr_warmup_step, tf.float32)
step = tf.cast(step - lr_warmup_step, tf.float32)
cosine_decay = 0.5 * (1 + tf.cos(np.pi * step / decay_steps))
alpha = 0.0
decayed = (1 - alpha) * cosine_decay + alpha
return adjusted_lr * tf.cast(decayed, tf.float32)
return tf.cond(step <= lr_warmup_step, lambda: warmup_lr(step),
lambda: cosine_lr(step))
def cosine_lr_schedule(adjusted_lr, lr_warmup_init, lr_warmup_step, total_steps,
step):
"""Cosine learning rate schedule."""
logger.info('LR schedule method: cosine')
linear_warmup = (
lr_warmup_init + (tf.cast(step, dtype=tf.float32) / lr_warmup_step *
(adjusted_lr - lr_warmup_init)))
decay_steps = tf.cast(total_steps - lr_warmup_step, tf.float32)
cosine_lr = 0.5 * adjusted_lr * (
1 + tf.cos(np.pi * tf.cast(step - lr_warmup_step, tf.float32) / decay_steps))
return tf.where(step < lr_warmup_step, linear_warmup, cosine_lr)
def polynomial_lr_schedule(adjusted_lr, lr_warmup_init, lr_warmup_step, power,
total_steps, step):
"""Polynomial learning rate schedule."""
logger.info('LR schedule method: polynomial')
linear_warmup = (
lr_warmup_init + (
tf.cast(step, dtype=tf.float32) / lr_warmup_step *
(adjusted_lr - lr_warmup_init)))
polynomial_lr = adjusted_lr * tf.pow(
1 - (tf.cast(step, tf.float32) / total_steps), power)
return tf.where(step < lr_warmup_step, linear_warmup, polynomial_lr)
def learning_rate_schedule(params, global_step):
"""Learning rate schedule based on global step."""
return cosine_lr_schedule(params['adjusted_learning_rate'],
params['lr_warmup_init'],
params['lr_warmup_step'], params['total_steps'],
global_step)
def focal_loss(logits, targets, alpha, gamma, normalizer):
"""Compute the focal loss between `logits` and the golden `target` values.
Focal loss = -(1-pt)^gamma * log(pt)
where pt is the probability of being classified to the true class.
Args:
logits: A float32 tensor of size [batch, height_in, width_in,
num_predictions].
targets: A float32 tensor of size [batch, height_in, width_in,
num_predictions].
alpha: A float32 scalar multiplying alpha to the loss from positive examples
and (1-alpha) to the loss from negative examples.
gamma: A float32 scalar modulating loss from hard and easy examples.
normalizer: A float32 scalar normalizes the total loss from all examples.
Returns:
loss: A float32 scalar representing normalized total loss.
"""
with tf.name_scope('focal_loss'):
positive_label_mask = tf.equal(targets, 1.0)
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets, logits=logits)
assert cross_entropy.dtype == tf.float32
# cross_entropy = tf.identity(cross_entropy, name="debug_cross_entropy")
# targets = tf.identity(targets, name="debug_targets")
# logits = tf.identity(logits, name="debug_logits")
# Below are comments/derivations for computing modulator.
# For brevity, let x = logits, z = targets, r = gamma, and p_t = sigmod(x)
# for positive samples and 1 - sigmoid(x) for negative examples.
#
# The modulator, defined as (1 - P_t)^r, is a critical part in focal loss
# computation. For r > 0, it puts more weights on hard examples, and less
# weights on easier ones. However if it is directly computed as (1 - P_t)^r,
# its back-propagation is not stable when r < 1. The implementation here
# resolves the issue.
#
# For positive samples (labels being 1),
# (1 - p_t)^r
# = (1 - sigmoid(x))^r
# = (1 - (1 / (1 + exp(-x))))^r
# = (exp(-x) / (1 + exp(-x)))^r
# = exp(log((exp(-x) / (1 + exp(-x)))^r))
# = exp(r * log(exp(-x)) - r * log(1 + exp(-x)))
# = exp(- r * x - r * log(1 + exp(-x)))
#
# For negative samples (labels being 0),
# (1 - p_t)^r
# = (sigmoid(x))^r
# = (1 / (1 + exp(-x)))^r
# = exp(log((1 / (1 + exp(-x)))^r))
# = exp(-r * log(1 + exp(-x)))
#
# Therefore one unified form for positive (z = 1) and negative (z = 0)
# samples is:
# (1 - p_t)^r = exp(-r * z * x - r * log(1 + exp(-x))).
neg_logits = -1.0 * logits
modulator = tf.exp(gamma * targets * neg_logits -
gamma * tf.log1p(tf.exp(neg_logits)))
loss = modulator * cross_entropy
weighted_loss = tf.where(positive_label_mask, alpha * loss,
(1.0 - alpha) * loss)
# weighted_loss /= normalizer
weighted_loss = tf.math.divide_no_nan(weighted_loss, normalizer, name='weighted_loss')
return weighted_loss
def _classification_loss(cls_outputs,
cls_targets,
num_positives,
alpha=0.25,
gamma=2.0):
"""Computes classification loss."""
normalizer = num_positives
classification_loss = focal_loss(cls_outputs, cls_targets, alpha, gamma,
normalizer)
return classification_loss
def _box_loss(box_outputs, box_targets, num_positives, delta=0.1):
"""Computes box regression loss."""
# delta is typically around the mean value of regression target.
# for instances, the regression targets of 512x512 input with 6 anchors on
# P3-P7 pyramid is about [0.1, 0.1, 0.2, 0.2].
normalizer = num_positives * 4.0
mask = tf.not_equal(box_targets, 0.0)
box_loss = tf.losses.huber_loss(
box_targets,
box_outputs,
weights=mask,
delta=delta,
reduction=tf.losses.Reduction.SUM)
# box_loss /= normalizer
box_loss = tf.math.divide_no_nan(box_loss, normalizer, name="huber_loss")
return box_loss
def _box_iou_loss(box_outputs, box_targets, num_positives, iou_loss_type):
"""Computes box iou loss."""
normalizer = num_positives * 4.0
box_iou_loss = iou_utils.iou_loss(box_outputs, box_targets, iou_loss_type)
box_iou_loss = tf.reduce_sum(box_iou_loss) / normalizer
return box_iou_loss
def detection_loss(cls_outputs, box_outputs, labels, params):
"""Computes total detection loss.
Computes total detection loss including box and class loss from all levels.
Args:
cls_outputs: an OrderDict with keys representing levels and values
representing logits in [batch_size, height, width, num_anchors].
box_outputs: an OrderDict with keys representing levels and values
representing box regression targets in [batch_size, height, width,
num_anchors * 4].
labels: the dictionary that returned from dataloader that includes
groundtruth targets.
params: the dictionary including training parameters specified in
default_haprams function in this file.
Returns:
total_loss: an integer tensor representing total loss reducing from
class and box losses from all levels.
cls_loss: an integer tensor representing total class loss.
box_loss: an integer tensor representing total box regression loss.
box_iou_loss: an integer tensor representing total box iou loss.
"""
# Sum all positives in a batch for normalization and avoid zero
# num_positives_sum, which would lead to inf loss during training
num_positives_sum = tf.reduce_sum(labels['mean_num_positives']) + 1.0
levels = cls_outputs.keys()
cls_losses = []
box_losses = []
box_iou_losses = []
for level in levels:
if params['data_format'] == 'channels_first':
labels['cls_targets_%d' % level] = tf.transpose(
labels['cls_targets_%d' % level], [0, 3, 1, 2])
labels['box_targets_%d' % level] = tf.transpose(
labels['box_targets_%d' % level], [0, 3, 1, 2])
# Onehot encoding for classification labels.
cls_targets_at_level = tf.one_hot(labels['cls_targets_%d' % level],
params['num_classes'])
if params['data_format'] == 'channels_first':
bs, _, width, height, _ = cls_targets_at_level.get_shape().as_list()
cls_targets_at_level = tf.reshape(cls_targets_at_level,
[bs, -1, width, height])
else:
bs, width, height, _, _ = cls_targets_at_level.get_shape().as_list()
cls_targets_at_level = tf.reshape(cls_targets_at_level,
[bs, width, height, -1])
box_targets_at_level = labels['box_targets_%d' % level]
cls_loss = _classification_loss(
cls_outputs[level],
cls_targets_at_level,
num_positives_sum,
alpha=params['alpha'],
gamma=params['gamma'])
if params['data_format'] == 'channels_first':
cls_loss = tf.reshape(cls_loss,
[bs, -1, width, height, params['num_classes']])
else:
cls_loss = tf.reshape(cls_loss,
[bs, width, height, -1, params['num_classes']])
cls_loss *= tf.cast(
tf.expand_dims(tf.not_equal(labels['cls_targets_%d' % level], -2), -1),
tf.float32)
cls_losses.append(tf.reduce_sum(cls_loss))
box_losses.append(
_box_loss(
box_outputs[level],
box_targets_at_level,
num_positives_sum,
delta=params['delta']))
if params['iou_loss_type']:
box_iou_losses.append(
_box_iou_loss(box_outputs[level], box_targets_at_level,
num_positives_sum, params['iou_loss_type']))
# Sum per level losses to total loss.
cls_loss = tf.add_n(cls_losses)
box_loss = tf.add_n(box_losses)
box_iou_loss = tf.add_n(box_iou_losses) if box_iou_losses else 0
total_loss = (
cls_loss + params['box_loss_weight'] * box_loss +
params['iou_loss_weight'] * box_iou_loss)
return total_loss, cls_loss, box_loss, box_iou_loss
def add_metric_fn_inputs(params,
cls_outputs,
box_outputs,
metric_fn_inputs,
max_detection_points=anchors.MAX_DETECTION_POINTS):
"""Selects top-k predictions and adds the selected to metric_fn_inputs.
Args:
params: a parameter dictionary that includes `min_level`, `max_level`,
`batch_size`, and `num_classes`.
cls_outputs: an OrderDict with keys representing levels and values
representing logits in [batch_size, height, width, num_anchors].
box_outputs: an OrderDict with keys representing levels and values
representing box regression targets in [batch_size, height, width,
num_anchors * 4].
metric_fn_inputs: a dictionary that will hold the top-k selections.
max_detection_points: an integer specifing the maximum detection points to
keep before NMS. Keep all anchors if max_detection_points <= 0.
"""
batch_size = params['batch_size']
num_classes = params['num_classes']
cls_outputs_all = []
box_outputs_all = []
# Concatenates class and box of all levels into one tensor.
for level in range(params['min_level'], params['max_level'] + 1):
if params['data_format'] == 'channels_first':
cls_outputs[level] = tf.transpose(cls_outputs[level], [0, 2, 3, 1])
box_outputs[level] = tf.transpose(box_outputs[level], [0, 2, 3, 1])
cls_outputs_all.append(
tf.reshape(cls_outputs[level], [batch_size, -1, num_classes]))
box_outputs_all.append(tf.reshape(box_outputs[level], [batch_size, -1, 4]))
cls_outputs_all = tf.concat(cls_outputs_all, 1)
box_outputs_all = tf.concat(box_outputs_all, 1)
if max_detection_points > 0:
# Prune anchors and detections to only keep max_detection_points.
# Due to some issues, top_k is currently slow in graph model.
cls_outputs_all_reshape = tf.reshape(cls_outputs_all, [batch_size, -1])
_, cls_topk_indices = tf.math.top_k(
cls_outputs_all_reshape, k=max_detection_points, sorted=False)
indices = cls_topk_indices // num_classes
classes = cls_topk_indices % num_classes
cls_indices = tf.stack([indices, classes], axis=2)
cls_outputs_all_after_topk = tf.gather_nd(
cls_outputs_all, cls_indices, batch_dims=1)
box_outputs_all_after_topk = tf.gather_nd(
box_outputs_all, tf.expand_dims(indices, 2), batch_dims=1)
else:
# Keep all anchors, but for each anchor, just keep the max probablity for
# each class.
cls_outputs_idx = tf.math.argmax(
cls_outputs_all, axis=-1, output_type=tf.int32)
num_anchors = cls_outputs_all.shape[1]
classes = cls_outputs_idx
indices = tf.tile(
tf.expand_dims(tf.range(num_anchors), axis=0), [batch_size, 1])
cls_outputs_all_after_topk = tf.reduce_max(cls_outputs_all, -1)
box_outputs_all_after_topk = box_outputs_all
metric_fn_inputs['cls_outputs_all'] = cls_outputs_all_after_topk
metric_fn_inputs['box_outputs_all'] = box_outputs_all_after_topk
metric_fn_inputs['indices_all'] = indices
metric_fn_inputs['classes_all'] = classes
def coco_metric_fn(batch_size,
anchor_labeler,
filename=None,
testdev_dir=None,
softnms=False,
**kwargs):
"""Evaluation metric fn. Performed on CPU, do not reference TPU ops."""
# add metrics to output
detections_bs = []
for index in range(batch_size):
cls_outputs_per_sample = kwargs['cls_outputs_all'][index]
box_outputs_per_sample = kwargs['box_outputs_all'][index]
indices_per_sample = kwargs['indices_all'][index]
classes_per_sample = kwargs['classes_all'][index]
detections = anchor_labeler.generate_detections(
cls_outputs_per_sample,
box_outputs_per_sample,
indices_per_sample,
classes_per_sample,
tf.slice(kwargs['source_ids'], [index], [1]),
tf.slice(kwargs['image_scales'], [index], [1]),
softnms=softnms,
disable_pyfun=kwargs.get('disable_pyfun', None),
)
detections_bs.append(detections)
if testdev_dir:
eval_metric = coco_metric.EvaluationMetric(testdev_dir=testdev_dir)
coco_metrics = eval_metric.estimator_metric_fn(detections_bs, tf.zeros([1]))
else:
eval_metric = coco_metric.EvaluationMetric(filename=filename)
coco_metrics = eval_metric.estimator_metric_fn(detections_bs,
kwargs['groundtruth_data'])
return coco_metrics
def reg_l2_loss(weight_decay, regex=r'.*(kernel|weight):0$'):
"""Return regularization l2 loss loss."""
var_match = re.compile(regex)
return weight_decay * tf.add_n([
tf.nn.l2_loss(v)
for v in tf.trainable_variables()
if var_match.match(v.name)
])
def reg_l1_loss(weight_decay):
"""Return regularization l1 loss loss."""
return tf.contrib.layers.apply_regularization(
tf.keras.regularizers.l1(weight_decay / 2.0),
[v for v in tf.trainable_variables()
if not any([pattern in v.name for pattern in ["batch_normalization", "bias", "beta"]])])
def _model_fn(features, labels, mode, params, model, variable_filter_fn=None):
"""Model definition entry.
Args:
features: the input image tensor with shape [batch_size, height, width, 3].
The height and width are fixed and equal.
labels: the input labels in a dictionary. The labels include class targets
and box targets which are dense label maps. The labels are generated from
get_input_fn function in data/dataloader.py
mode: the mode of TPUEstimator including TRAIN, EVAL, and PREDICT.
params: the dictionary defines hyperparameters of model. The default
settings are in default_hparams function in this file.
model: the model outputs class logits and box regression outputs.
variable_filter_fn: the filter function that takes trainable_variables and
returns the variable list after applying the filter rule.
Returns:
tpu_spec: the TPUEstimatorSpec to run training, evaluation, or prediction.
Raises:
RuntimeError: if both ckpt and backbone_ckpt are set.
"""
scaffold_fn = None
if params['data_format'] == 'channels_first':
features = tf.transpose(features, [0, 3, 1, 2])
cls_outputs, box_outputs = efficientdet_arch.efficientdet(
features, config=hparams_config.Config(params))
levels = cls_outputs.keys()
for level in levels:
cls_outputs[level] = tf.cast(cls_outputs[level], tf.float32)
box_outputs[level] = tf.cast(box_outputs[level], tf.float32)
# First check if it is in PREDICT mode.
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'image': features,
}
for level in levels:
predictions['cls_outputs_%d' % level] = cls_outputs[level]
predictions['box_outputs_%d' % level] = box_outputs[level]
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Set up training loss and learning rate.
update_learning_rate_schedule_parameters(params)
global_step = tf.compat.v1.train.get_or_create_global_step()
learning_rate = learning_rate_schedule(params, global_step)
learning_rate = tf.identity(learning_rate, name="learning_rate")
# cls_loss and box_loss are for logging. only total_loss is optimized.
det_loss, cls_loss, box_loss, box_iou_loss = detection_loss(
cls_outputs, box_outputs, labels, params)
reg_l2loss = reg_l2_loss(params['l2_weight_decay'])
reg_l1loss = reg_l2_loss(params['l1_weight_decay'])
total_loss = det_loss + reg_l2loss + reg_l1loss
total_loss = tf.identity(total_loss, name="total_loss")
cls_loss = tf.identity(cls_loss, name="debug_cls_loss")
box_loss = tf.identity(box_loss, name="debug_box_loss")
if mode == tf.estimator.ModeKeys.TRAIN:
utils.scalar('lrn_rate', learning_rate)
utils.scalar('trainloss/cls_loss', cls_loss)
utils.scalar('trainloss/box_loss', box_loss)
utils.scalar('trainloss/det_loss', det_loss)
utils.scalar('trainloss/reg_l2_loss', reg_l2loss)
utils.scalar('trainloss/loss', total_loss)
if box_iou_loss:
utils.scalar('trainloss/box_iou_loss', box_iou_loss)
moving_average_decay = params['moving_average_decay']
if moving_average_decay:
ema = tf.train.ExponentialMovingAverage(
decay=moving_average_decay, num_updates=global_step)
ema_vars = utils.get_ema_vars()
if mode == tf.estimator.ModeKeys.TRAIN:
if params['optimizer'].lower() == 'sgd':
optimizer = tf.train.MomentumOptimizer(
learning_rate, momentum=params['momentum'])
elif params['optimizer'].lower() == 'adam':
optimizer = tf.train.AdamOptimizer(learning_rate)
else:
raise ValueError('optimizers should be adam or sgd')
optimizer = hvd.DistributedOptimizer(optimizer, compression=hvd.Compression.fp16) # none
# enable amp
if params['amp']:
loss_scale = tf.train.experimental.DynamicLossScale()
optimizer = tf.train.experimental.MixedPrecisionLossScaleOptimizer(
optimizer, loss_scale)
# Batch norm requires update_ops to be added as a train_op dependency.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
var_list = tf.trainable_variables()
if variable_filter_fn:
var_list = variable_filter_fn(var_list)
if params.get('clip_gradients_norm', 0) > 0:
logger.info('clip gradients norm by %f', params['clip_gradients_norm'])
grads_and_vars = optimizer.compute_gradients(total_loss, var_list)
with tf.name_scope('clip'):
grads = [gv[0] for gv in grads_and_vars]
tvars = [gv[1] for gv in grads_and_vars]
clipped_grads, gnorm = tf.clip_by_global_norm(
grads, params['clip_gradients_norm'])
utils.scalar('gnorm', gnorm)
grads_and_vars = list(zip(clipped_grads, tvars))
with tf.control_dependencies(update_ops):
train_op = optimizer.apply_gradients(grads_and_vars, global_step)
else:
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(
total_loss, global_step, var_list=var_list)
if moving_average_decay:
with tf.control_dependencies([train_op]):
train_op = ema.apply(ema_vars)
else:
train_op = None
eval_metrics = None
if mode == tf.estimator.ModeKeys.EVAL:
def metric_fn_method(**kwargs):
"""Returns a dictionary that has the evaluation metrics."""
batch_size = params['batch_size']
eval_anchors = anchors.Anchors(params['min_level'], params['max_level'],
params['num_scales'],
params['aspect_ratios'],
params['anchor_scale'],
params['image_size'])
anchor_labeler = anchors.AnchorLabeler(eval_anchors,
params['num_classes'])
cls_loss = tf.metrics.mean(kwargs['cls_loss_repeat'])
box_loss = tf.metrics.mean(kwargs['box_loss_repeat'])
if params.get('testdev_dir', None):
logger.info('Eval testdev_dir %s', params['testdev_dir'])
coco_metrics = coco_metric_fn(
batch_size,
anchor_labeler,
params['val_json_file'],
testdev_dir=params['testdev_dir'],
disable_pyfun=params.get('disable_pyfun', None),
**kwargs)
else:
logger.info('Eval with groundtruths %s.', params['val_json_file'])
coco_metrics = coco_metric_fn(batch_size, anchor_labeler,
params['val_json_file'],
softnms=params['softnms'],
**kwargs)
# Add metrics to output.
output_metrics = {
'cls_loss': cls_loss,
'box_loss': box_loss,
}
output_metrics.update(coco_metrics)
return output_metrics
cls_loss_repeat = tf.reshape(
tf.tile(tf.expand_dims(cls_loss, 0), [
params['batch_size'],
]), [params['batch_size'], 1])
box_loss_repeat = tf.reshape(
tf.tile(tf.expand_dims(box_loss, 0), [
params['batch_size'],
]), [params['batch_size'], 1])
metric_fn_inputs = {
'cls_loss_repeat': cls_loss_repeat,
'box_loss_repeat': box_loss_repeat,
'source_ids': labels['source_ids'],
'groundtruth_data': labels['groundtruth_data'],
'image_scales': labels['image_scales'],
}
add_metric_fn_inputs(params, cls_outputs, box_outputs, metric_fn_inputs)
eval_metrics = metric_fn_method(**metric_fn_inputs)
if moving_average_decay:
def scaffold_fn_method():
"""Load moving average variables for eval."""
logging.info('Load EMA vars with ema_decay=%f', moving_average_decay)
restore_vars_dict = ema.variables_to_restore(ema_vars)
saver = tf.train.Saver(restore_vars_dict)
return tf.train.Scaffold(saver=saver)
scaffold_fn = scaffold_fn_method()
return tf.estimator.EstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold=scaffold_fn,
eval_metric_ops=eval_metrics,
training_hooks=None)
def efficientdet_model_fn(features, labels, mode, params):
"""EfficientDet model."""
variable_filter_fn = functools.partial(
efficientdet_arch.freeze_vars, pattern=params['var_freeze_expr'])
return _model_fn(
features,
labels,
mode,
params,
model=efficientdet_arch.efficientdet,
variable_filter_fn=variable_filter_fn)
def get_model_arch(model_name='efficientdet-d0'):
"""Get model architecture for a given model name."""
if 'efficientdet' in model_name:
return efficientdet_arch.efficientdet
raise ValueError('Invalide model name {}'.format(model_name))
def get_model_fn(model_name='efficientdet-d0'):
"""Get model fn for a given model name."""
if 'efficientdet' in model_name:
return efficientdet_model_fn
raise ValueError('Invalide model name {}'.format(model_name))
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/models/det_model_fn.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IVA EfficientDet model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/models/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientDet model definition.
[1] Mingxing Tan, Ruoming Pang, Quoc Le.
EfficientDet: Scalable and Efficient Object Detection.
CVPR 2020, https://arxiv.org/abs/1911.09070
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import os
import re
import tensorflow.compat.v1 as tf
from nvidia_tao_tf1.cv.efficientdet.backbone import backbone_factory
from nvidia_tao_tf1.cv.efficientdet.layers.image_resize_layer import ImageResizeLayer
from nvidia_tao_tf1.cv.efficientdet.layers.weighted_fusion_layer import WeightedFusion
from nvidia_tao_tf1.cv.efficientdet.models.utils_keras import BoxNet, ClassNet
from nvidia_tao_tf1.cv.efficientdet.utils import hparams_config
from nvidia_tao_tf1.cv.efficientdet.utils import utils
from nvidia_tao_tf1.cv.efficientdet.utils.distributed_utils import MPI_is_distributed
from nvidia_tao_tf1.cv.efficientdet.utils.distributed_utils import MPI_rank
from nvidia_tao_tf1.cv.efficientdet.utils.model_loader import dump_json, get_model_with_input
################################################################################
def freeze_vars(variables, pattern):
"""Removes backbone+fpn variables from the input.
Args:
variables: all the variables in training
pattern: a reg experession such as ".*(efficientnet|fpn_cells).*".
Returns:
var_list: a list containing variables for training
"""
if pattern:
variables = [v for v in variables if not re.match(pattern, v.name)]
return variables
def resample_feature_map(feat,
name,
target_height,
target_width,
target_num_channels,
apply_bn=False,
is_training=None,
conv_after_downsample=False,
use_native_resize_op=False,
pooling_type=None,
data_format='channels_last'):
"""Resample input feature map to have target number of channels and size."""
if data_format == 'channels_first':
_, num_channels, height, width = feat.get_shape().as_list()
else:
_, height, width, num_channels = feat.get_shape().as_list()
if height is None or width is None or num_channels is None:
raise ValueError(
'shape[1] or shape[2] or shape[3] of feat is None (shape:{}).'.format(
feat.shape))
if apply_bn and is_training is None:
raise ValueError('If BN is applied, need to provide is_training')
def _maybe_apply_1x1(feat, name='conv_1x1'):
"""Apply 1x1 conv to change layer width if necessary."""
if num_channels != target_num_channels:
feat = tf.keras.layers.Conv2D(
filters=target_num_channels,
kernel_size=(1, 1),
padding='same',
name=name,
data_format=data_format)(feat)
if apply_bn:
feat = utils.batch_norm_act(
feat,
is_training_bn=is_training,
act_type=None,
data_format=data_format,
name=name+'_bn')
return feat
# If conv_after_downsample is True, when downsampling, apply 1x1 after
# downsampling for efficiency.
if height > target_height and width > target_width:
if not conv_after_downsample:
feat = _maybe_apply_1x1(feat, name=name)
height_stride_size = int((height - 1) // target_height + 1)
width_stride_size = int((width - 1) // target_width + 1)
if pooling_type == 'max' or pooling_type is None:
# Use max pooling in default.
feat = tf.keras.layers.MaxPool2D(
pool_size=[height_stride_size + 1, width_stride_size + 1],
strides=[height_stride_size, width_stride_size],
padding='SAME',
data_format=data_format)(feat)
elif pooling_type == 'avg':
feat = tf.keras.layers.AveragePooling2D(
pool_size=[height_stride_size + 1, width_stride_size + 1],
strides=[height_stride_size, width_stride_size],
padding='SAME',
data_format=data_format)(feat)
else:
raise ValueError('Unknown pooling type: {}'.format(pooling_type))
if conv_after_downsample:
feat = _maybe_apply_1x1(feat, name=name+'_conv_after_downsample')
elif height <= target_height and width <= target_width:
feat = _maybe_apply_1x1(feat, name=name)
if height < target_height or width < target_width:
height_scale = target_height // height
width_scale = target_width // width
if (use_native_resize_op or target_height % height != 0 or
target_width % width != 0):
if data_format == 'channels_first':
# feat = tf.transpose(feat, [0, 2, 3, 1])
feat = tf.keras.layers.Permute((2, 3, 1))(feat)
tf_resize_layer = ImageResizeLayer(target_height, target_width)
feat = tf_resize_layer(feat)
if data_format == 'channels_first':
# feat = tf.transpose(feat, [0, 3, 1, 2])
feat = tf.keras.layers.Permute((3, 1, 2))(feat)
else:
feat = tf.keras.layers.UpSampling2D(
size=(height_scale, width_scale),
data_format=data_format)(feat)
else:
raise ValueError(
'Incompatible target feature map size: target_height: {},'
'target_width: {}'.format(target_height, target_width))
return feat
def build_class_and_box_outputs(feats, config):
"""Builds box net and class net.
Args:
feats: input tensor.
config: a dict-like config, including all parameters.
Returns:
A tuple (class_outputs, box_outputs) for class/box predictions.
"""
class_outputs = {}
box_outputs = {}
num_anchors = len(config.aspect_ratios) * config.num_scales
num_filters = config.fpn_num_filters
class_outputs = ClassNet(num_classes=config.num_classes,
num_anchors=num_anchors,
num_filters=num_filters,
min_level=config.min_level,
max_level=config.max_level,
is_training=config.is_training_bn,
act_type=config.act_type,
repeats=config.box_class_repeats,
separable_conv=config.separable_conv,
survival_prob=config.survival_prob,
data_format=config.data_format)(feats)
box_outputs = BoxNet(num_anchors=num_anchors,
num_filters=num_filters,
min_level=config.min_level,
max_level=config.max_level,
is_training=config.is_training_bn,
act_type=config.act_type,
repeats=config.box_class_repeats,
separable_conv=config.separable_conv,
survival_prob=config.survival_prob,
data_format=config.data_format)(feats)
return class_outputs, box_outputs
def build_backbone(features, config):
"""Builds backbone model.
Args:
features: input tensor.
config: config for backbone, such as is_training_bn and backbone name.
Returns:
A dict from levels to the feature maps from the output of the backbone model
with strides of 8, 16 and 32.
Raises:
ValueError: if backbone_name is not supported.
"""
backbone_name = config.name
model_builder = backbone_factory.get_model_builder(backbone_name)
# build tf efficientnet backbone from IVA templates
u1, u2, u3, u4, u5 = model_builder.build_model_base(
features, backbone_name,
freeze_blocks=config.freeze_blocks,
freeze_bn=config.freeze_bn)
return {0: features, 1: u1, 2: u2, 3: u3, 4: u4, 5: u5}
def build_feature_network(features, config):
"""Build FPN input features.
Args:
features: input tensor.
config: a dict-like config, including all parameters.
Returns:
A dict from levels to the feature maps processed after feature network.
"""
feat_sizes = utils.get_feat_sizes(config.image_size, config.max_level)
feats = []
if config.min_level not in features.keys():
raise ValueError('features.keys ({}) should include min_level ({})'.format(
features.keys(), config.min_level))
# Build additional input features that are not from backbone.
for level in range(config.min_level, config.max_level + 1):
if level in features.keys():
feats.append(features[level])
else:
h_id, w_id = (2, 3) if config.data_format == 'channels_first' else (1, 2)
# Adds a coarser level by downsampling the last feature map.
feats.append(
resample_feature_map(
feats[-1],
name='p%d' % level,
target_height=(feats[-1].shape[h_id] - 1) // 2 + 1,
target_width=(feats[-1].shape[w_id] - 1) // 2 + 1,
target_num_channels=config.fpn_num_filters,
apply_bn=config.apply_bn_for_resampling,
is_training=config.is_training_bn,
conv_after_downsample=config.conv_after_downsample,
use_native_resize_op=config.use_native_resize_op,
pooling_type=config.pooling_type,
data_format=config.data_format
))
utils.verify_feats_size(
feats,
feat_sizes=feat_sizes,
min_level=config.min_level,
max_level=config.max_level,
data_format=config.data_format)
for rep in range(config.fpn_cell_repeats):
new_feats = build_bifpn_layer(feats, feat_sizes, config, rep=str(rep))
feats = [
new_feats[level]
for level in range(
config.min_level, config.max_level + 1)
]
utils.verify_feats_size(
feats,
feat_sizes=feat_sizes,
min_level=config.min_level,
max_level=config.max_level,
data_format=config.data_format)
return new_feats
def bifpn_sum_config():
"""BiFPN config with sum."""
p = hparams_config.Config()
p.nodes = [
{'feat_level': 6, 'inputs_offsets': [3, 4]},
{'feat_level': 5, 'inputs_offsets': [2, 5]},
{'feat_level': 4, 'inputs_offsets': [1, 6]},
{'feat_level': 3, 'inputs_offsets': [0, 7]},
{'feat_level': 4, 'inputs_offsets': [1, 7, 8]},
{'feat_level': 5, 'inputs_offsets': [2, 6, 9]},
{'feat_level': 6, 'inputs_offsets': [3, 5, 10]},
{'feat_level': 7, 'inputs_offsets': [4, 11]},
]
p.weight_method = 'sum'
return p
def bifpn_fa_config():
"""BiFPN config with fast weighted sum."""
p = bifpn_sum_config()
p.weight_method = 'fastattn'
return p
def bifpn_dynamic_config(min_level, max_level, weight_method):
"""A dynamic bifpn config that can adapt to different min/max levels."""
p = hparams_config.Config()
p.weight_method = weight_method or 'fastattn'
# Node id starts from the input features and monotonically increase whenever
# a new node is added. Here is an example for level P3 - P7:
# P7 (4) P7" (12)
# P6 (3) P6' (5) P6" (11)
# P5 (2) P5' (6) P5" (10)
# P4 (1) P4' (7) P4" (9)
# P3 (0) P3" (8)
# So output would be like:
# [
# {'feat_level': 6, 'inputs_offsets': [3, 4]}, # for P6'
# {'feat_level': 5, 'inputs_offsets': [2, 5]}, # for P5'
# {'feat_level': 4, 'inputs_offsets': [1, 6]}, # for P4'
# {'feat_level': 3, 'inputs_offsets': [0, 7]}, # for P3"
# {'feat_level': 4, 'inputs_offsets': [1, 7, 8]}, # for P4"
# {'feat_level': 5, 'inputs_offsets': [2, 6, 9]}, # for P5"
# {'feat_level': 6, 'inputs_offsets': [3, 5, 10]}, # for P6"
# {'feat_level': 7, 'inputs_offsets': [4, 11]}, # for P7"
# ]
num_levels = max_level - min_level + 1
node_ids = {min_level + i: [i] for i in range(num_levels)}
# level_last_id = lambda level: node_ids[level][-1]
# level_all_ids = lambda level: node_ids[level]
def level_last_id(level):
return node_ids[level][-1]
def level_all_ids(level):
return node_ids[level]
id_cnt = itertools.count(num_levels)
p.nodes = []
for i in range(max_level - 1, min_level - 1, -1):
# top-down path.
p.nodes.append({
'feat_level': i,
'inputs_offsets': [level_last_id(i), level_last_id(i + 1)]
})
node_ids[i].append(next(id_cnt))
for i in range(min_level + 1, max_level + 1):
# bottom-up path.
p.nodes.append({
'feat_level': i,
'inputs_offsets': level_all_ids(i) + [level_last_id(i - 1)]
})
node_ids[i].append(next(id_cnt))
return p
def get_fpn_config(fpn_name, min_level, max_level, weight_method):
"""Get fpn related configuration."""
if not fpn_name:
fpn_name = 'bifpn_fa'
name_to_config = {
'bifpn_sum': bifpn_sum_config(),
'bifpn_fa': bifpn_fa_config(),
'bifpn_dyn': bifpn_dynamic_config(min_level, max_level, weight_method)
}
return name_to_config[fpn_name]
def build_bifpn_layer(feats, feat_sizes, config, rep='0'):
"""Builds a feature pyramid given previous feature pyramid and config."""
p = config # use p to denote the network config.
if p.fpn_config:
fpn_config = p.fpn_config
else:
fpn_config = get_fpn_config(
p.fpn_name, p.min_level, p.max_level, p.fpn_weight_method)
num_output_connections = [0 for _ in feats]
for i, fnode in enumerate(fpn_config.nodes):
# with tf.variable_scope('fnode{}'.format(i)):
# logging.info('fnode %d : %s', i, fnode)
new_node_height = feat_sizes[fnode['feat_level']]['height']
new_node_width = feat_sizes[fnode['feat_level']]['width']
nodes = []
for idx, input_offset in enumerate(fnode['inputs_offsets']):
input_node = feats[input_offset]
num_output_connections[input_offset] += 1
input_node = resample_feature_map(
input_node, 'bifpn{}_{}_{}_{}'.format(idx, input_offset, len(feats), rep),
new_node_height, new_node_width, p.fpn_num_filters,
p.apply_bn_for_resampling, p.is_training_bn,
p.conv_after_downsample,
p.use_native_resize_op,
p.pooling_type,
data_format=config.data_format)
nodes.append(input_node)
# new_node = fuse_features(nodes, fpn_config.weight_method)
new_node = WeightedFusion(name='weighted_fusion_{}_{}'.format(i, rep))(nodes)
# with tf.variable_scope('op_after_combine{}'.format(len(feats))):
if not p.conv_bn_act_pattern:
new_node = utils.activation_fn(new_node, p.act_type)
if p.separable_conv:
conv_layer = tf.keras.layers.SeparableConv2D(
depth_multiplier=1,
filters=p.fpn_num_filters,
kernel_size=(3, 3),
padding='same',
data_format=config.data_format,
name='after_combine_dw_conv_{}_{}'.format(i, rep))
else:
conv_layer = tf.keras.layers.Conv2D(
filters=p.fpn_num_filters,
kernel_size=(3, 3),
padding='same',
data_format=config.data_format,
name='after_combine_conv_{}_{}'.format(i, rep))
new_node = conv_layer(new_node)
new_node = utils.batch_norm_act(
new_node,
is_training_bn=p.is_training_bn,
act_type=None if not p.conv_bn_act_pattern else p.act_type,
data_format=config.data_format,
name='bifpn_bn_{}_{}'.format(i, rep))
feats.append(new_node)
num_output_connections.append(0)
output_feats = {}
for l in range(p.min_level, p.max_level + 1):
for i, fnode in enumerate(reversed(fpn_config.nodes)):
if fnode['feat_level'] == l:
output_feats[l] = feats[-1 - i]
break
return output_feats
def efficientdet(inputs, model_name=None, config=None, **kwargs):
"""Build EfficientDet model."""
if not config and not model_name:
raise ValueError('please specify either model name or config')
if not config:
config = hparams_config.get_efficientdet_config(model_name)
elif isinstance(config, dict):
config = hparams_config.Config(config) # wrap dict in Config object
if kwargs:
config.override(kwargs)
if config.pruned_model_path:
input_layer = tf.keras.layers.InputLayer(input_tensor=inputs, name="Input")
if config.mode == 'export':
pruned_model = get_model_with_input(
os.path.join(os.path.dirname(config.pruned_model_path), 'pruned_eval.json'),
input_layer)
else:
pruned_model = get_model_with_input(
os.path.join(os.path.dirname(config.pruned_model_path), 'pruned_train.json'),
input_layer)
if (not MPI_is_distributed() or MPI_rank() == 0) and config.mode == 'train':
pruned_model.summary()
print("Pruned graph is loaded succesfully.")
model_outputs = pruned_model.outputs
lvl_list = list(range(config.min_level, config.max_level+1))
class_outputs = dict(zip(lvl_list, model_outputs[0:len(lvl_list)]))
box_outputs = dict(zip(lvl_list, model_outputs[len(lvl_list):]))
else:
# build backbone features.
features = build_backbone(inputs, config)
# build feature network.
fpn_feats = build_feature_network(features, config)
# build class and box predictions.
class_outputs, box_outputs = build_class_and_box_outputs(fpn_feats, config)
m = tf.keras.models.Model(
inputs=inputs,
outputs=list(class_outputs.values())+list(box_outputs.values()))
if (not MPI_is_distributed() or MPI_rank() == 0) and config.mode == 'train':
dump_json(m, os.path.join(config.model_dir, "graph.json"))
return class_outputs, box_outputs
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/models/efficientdet_arch.py |
# Lint as: python3
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Anchor definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import logging
import numpy as np
import tensorflow.compat.v1 as tf
from nvidia_tao_tf1.cv.efficientdet.object_detection import argmax_matcher
from nvidia_tao_tf1.cv.efficientdet.object_detection import box_list
from nvidia_tao_tf1.cv.efficientdet.object_detection import faster_rcnn_box_coder
from nvidia_tao_tf1.cv.efficientdet.object_detection import region_similarity_calculator
from nvidia_tao_tf1.cv.efficientdet.object_detection import target_assigner
from nvidia_tao_tf1.cv.efficientdet.utils import utils
# The minimum score to consider a logit for identifying detections.
MIN_CLASS_SCORE = -5.0
# The score for a dummy detection
_DUMMY_DETECTION_SCORE = -1e5
# The maximum number of (anchor,class) pairs to keep for non-max suppression.
MAX_DETECTION_POINTS = 5000
# The maximum number of detections per image.
MAX_DETECTIONS_PER_IMAGE = 100
# The minimal score threshold.
MIN_SCORE_THRESH = 0.4
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level='INFO')
logger = logging.getLogger(__name__)
def sigmoid(x):
"""Sigmoid function for use with Numpy for CPU evaluation."""
return 1 / (1 + np.exp(-x))
def decode_box_outputs(rel_codes, anchors):
"""Transforms relative regression coordinates to absolute positions.
Network predictions are normalized and relative to a given anchor; this
reverses the transformation and outputs absolute coordinates for the input
image.
Args:
rel_codes: box regression targets.
anchors: anchors on all feature levels.
Returns:
outputs: bounding boxes.
"""
ycenter_a = (anchors[0] + anchors[2]) / 2
xcenter_a = (anchors[1] + anchors[3]) / 2
ha = anchors[2] - anchors[0]
wa = anchors[3] - anchors[1]
ty, tx, th, tw = rel_codes
w = np.exp(tw) * wa
h = np.exp(th) * ha
ycenter = ty * ha + ycenter_a
xcenter = tx * wa + xcenter_a
ymin = ycenter - h / 2.
xmin = xcenter - w / 2.
ymax = ycenter + h / 2.
xmax = xcenter + w / 2.
return np.column_stack([ymin, xmin, ymax, xmax])
def decode_box_outputs_tf(rel_codes, anchors):
"""Transforms relative regression coordinates to absolute positions.
Network predictions are normalized and relative to a given anchor; this
reverses the transformation and outputs absolute coordinates for the input
image.
Args:
rel_codes: box regression targets.
anchors: anchors on all feature levels.
Returns:
outputs: bounding boxes.
"""
ycenter_a = (anchors[..., 0] + anchors[..., 2]) / 2
xcenter_a = (anchors[..., 1] + anchors[..., 3]) / 2
ha = anchors[..., 2] - anchors[..., 0]
wa = anchors[..., 3] - anchors[..., 1]
ty, tx, th, tw = tf.unstack(rel_codes, num=4, axis=-1)
w = tf.math.exp(tw) * wa
h = tf.math.exp(th) * ha
ycenter = ty * ha + ycenter_a
xcenter = tx * wa + xcenter_a
ymin = ycenter - h / 2.
xmin = xcenter - w / 2.
ymax = ycenter + h / 2.
xmax = xcenter + w / 2.
return tf.stack([ymin, xmin, ymax, xmax], axis=-1)
# @tf.autograph.to_graph
def nms_tf(dets, thresh):
"""Non-maximum suppression with tf graph mode."""
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = tf.argsort(scores, direction='DESCENDING')
keep = tf.TensorArray(tf.int32, size=0, dynamic_size=True)
index = 0
while tf.shape(order)[0] > 0:
i = order[0]
keep = keep.write(index, i)
xx1 = tf.maximum(x1[i], tf.gather(x1, order[1:]))
yy1 = tf.maximum(y1[i], tf.gather(y1, order[1:]))
xx2 = tf.minimum(x2[i], tf.gather(x2, order[1:]))
yy2 = tf.minimum(y2[i], tf.gather(y2, order[1:]))
w = tf.maximum(0.0, xx2 - xx1 + 1)
h = tf.maximum(0.0, yy2 - yy1 + 1)
intersection = w * h
overlap = intersection / (
areas[i] + tf.gather(areas, order[1:]) - intersection)
inds = tf.where_v2(overlap <= thresh)
order = tf.concat(tf.gather(order, inds + 1), axis=1)
order = tf.squeeze(order, axis=-1)
index += 1
return keep.stack()
def nms(dets, thresh):
"""Non-maximum suppression."""
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
intersection = w * h
overlap = intersection / (areas[i] + areas[order[1:]] - intersection)
inds = np.where(overlap <= thresh)[0]
order = order[inds + 1]
return keep
def soft_nms(dets):
"""Soft non-maximum suppression.
[1] Soft-NMS -- Improving Object Detection With One Line of Code.
https://arxiv.org/abs/1704.04503
Args:
dets: detection with shape (num, 5) and format [x1, y1, x2, y2, score].
nms_configs: a dict config that may contain the following members
* method: one of {`linear`, `gaussian`, 'hard'}. Use `gaussian` if None.
* iou_thresh (float): IOU threshold, only for `linear`, `hard`.
* sigma: Gaussian parameter, only for method 'gaussian'.
* score_thresh (float): Box score threshold for final boxes.
Returns:
numpy.array: Retained boxes.
"""
method = 'gaussian'
# Default sigma and iou_thresh are from the original soft-nms paper.
sigma = 0.5
iou_thresh = 0.3
score_thresh = 0.001
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
# expand dets with areas, and the second dimension is
# x1, y1, x2, y2, score, area
dets = np.concatenate((dets, areas[:, None]), axis=1)
retained_box = []
while dets.size > 0:
max_idx = np.argmax(dets[:, 4], axis=0)
dets[[0, max_idx], :] = dets[[max_idx, 0], :]
retained_box.append(dets[0, :-1])
xx1 = np.maximum(dets[0, 0], dets[1:, 0])
yy1 = np.maximum(dets[0, 1], dets[1:, 1])
xx2 = np.minimum(dets[0, 2], dets[1:, 2])
yy2 = np.minimum(dets[0, 3], dets[1:, 3])
w = np.maximum(xx2 - xx1 + 1, 0.0)
h = np.maximum(yy2 - yy1 + 1, 0.0)
inter = w * h
iou = inter / (dets[0, 5] + dets[1:, 5] - inter)
if method == 'linear':
weight = np.ones(iou.shape, iou.dtype)
weight[iou > iou_thresh] -= iou[iou > iou_thresh]
elif method == 'gaussian':
weight = np.exp(-(iou * iou) / sigma)
else: # traditional nms
weight = np.ones(iou.shape, iou.dtype)
weight[iou > iou_thresh] = 0
dets[1:, 4] *= weight
retained_idx = np.where(dets[1:, 4] >= score_thresh)[0]
dets = dets[retained_idx + 1, :]
return np.vstack(retained_box)
def _generate_anchor_configs(feat_sizes, min_level, max_level, num_scales,
aspect_ratios):
"""Generates mapping from output level to a list of anchor configurations.
A configuration is a tuple of (num_anchors, scale, aspect_ratio).
Args:
feat_sizes: list of dict of integer numbers of feature map sizes.
min_level: integer number of minimum level of the output feature pyramid.
max_level: integer number of maximum level of the output feature pyramid.
num_scales: integer number representing intermediate scales added
on each level. For instances, num_scales=2 adds two additional
anchor scales [2^0, 2^0.5] on each level.
aspect_ratios: list of tuples representing the aspect ratio anchors added
on each level. For instances, aspect_ratios =
[(1, 1), (1.4, 0.7), (0.7, 1.4)] adds three anchors on each level.
Returns:
anchor_configs: a dictionary with keys as the levels of anchors and
values as a list of anchor configuration.
"""
anchor_configs = {}
for level in range(min_level, max_level + 1):
anchor_configs[level] = []
for scale_octave in range(num_scales):
for aspect in aspect_ratios:
anchor_configs[level].append(
((feat_sizes[0]['height'] / float(feat_sizes[level]['height']),
feat_sizes[0]['width'] / float(feat_sizes[level]['width'])),
scale_octave / float(num_scales), aspect))
return anchor_configs
def _generate_anchor_boxes(image_size, anchor_scale, anchor_configs):
"""Generates multiscale anchor boxes.
Args:
image_size: tuple of integer numbers of input image size.
anchor_scale: float number representing the scale of size of the base
anchor to the feature stride 2^level.
anchor_configs: a dictionary with keys as the levels of anchors and
values as a list of anchor configuration.
Returns:
anchor_boxes: a numpy array with shape [N, 4], which stacks anchors on all
feature levels.
Raises:
ValueError: input size must be the multiple of largest feature stride.
"""
boxes_all = []
for _, configs in anchor_configs.items():
boxes_level = []
for config in configs:
stride, octave_scale, aspect = config
base_anchor_size_x = anchor_scale * stride[1] * 2**octave_scale
base_anchor_size_y = anchor_scale * stride[0] * 2**octave_scale
anchor_size_x_2 = base_anchor_size_x * aspect[0] / 2.0
anchor_size_y_2 = base_anchor_size_y * aspect[1] / 2.0
x = np.arange(stride[1] / 2, image_size[1], stride[1])
y = np.arange(stride[0] / 2, image_size[0], stride[0])
xv, yv = np.meshgrid(x, y)
xv = xv.reshape(-1)
yv = yv.reshape(-1)
boxes = np.vstack((yv - anchor_size_y_2, xv - anchor_size_x_2,
yv + anchor_size_y_2, xv + anchor_size_x_2))
boxes = np.swapaxes(boxes, 0, 1)
boxes_level.append(np.expand_dims(boxes, axis=1))
# concat anchors on the same level to the reshape NxAx4
boxes_level = np.concatenate(boxes_level, axis=1)
boxes_all.append(boxes_level.reshape([-1, 4]))
anchor_boxes = np.vstack(boxes_all)
return anchor_boxes
def _generate_detections_tf(cls_outputs,
box_outputs,
anchor_boxes,
indices,
classes,
image_id,
image_scale,
image_size,
min_score_thresh=MIN_SCORE_THRESH,
max_boxes_to_draw=MAX_DETECTIONS_PER_IMAGE,
soft_nms_sigma=0.0,
iou_threshold=0.5,
use_native_nms=True):
"""Generates detections with model outputs and anchors.
Args:
cls_outputs: a numpy array with shape [N, 1], which has the highest class
scores on all feature levels. The N is the number of selected
top-K total anchors on all levels. (k being MAX_DETECTION_POINTS)
box_outputs: a numpy array with shape [N, 4], which stacks box regression
outputs on all feature levels. The N is the number of selected top-k
total anchors on all levels. (k being MAX_DETECTION_POINTS)
anchor_boxes: a numpy array with shape [N, 4], which stacks anchors on all
feature levels. The N is the number of selected top-k total anchors on
all levels.
indices: a numpy array with shape [N], which is the indices from top-k
selection.
classes: a numpy array with shape [N], which represents the class
prediction on all selected anchors from top-k selection.
image_id: an integer number to specify the image id.
image_scale: a float tensor representing the scale between original image
and input image for the detector. It is used to rescale detections for
evaluating with the original groundtruth annotations.
image_size: a tuple (height, width) or an integer for image size.
min_score_thresh: A float representing the threshold for deciding when to
remove boxes based on score.
max_boxes_to_draw: Max number of boxes to draw.
soft_nms_sigma: A scalar float representing the Soft NMS sigma parameter;
See Bodla et al, https://arxiv.org/abs/1704.04503). When
`soft_nms_sigma=0.0` (which is default), we fall back to standard (hard)
NMS.
iou_threshold: A float representing the threshold for deciding whether boxes
overlap too much with respect to IOU.
use_native_nms: a bool that indicates whether to use native nms.
Returns:
detections: detection results in a tensor with each row representing
[image_id, ymin, xmin, ymax, xmax, score, class]
"""
if not image_size:
raise ValueError('tf version generate_detection needs non-empty image_size')
logger.info('Using tf version of post-processing.')
anchor_boxes = tf.gather(anchor_boxes, indices)
scores = tf.math.sigmoid(cls_outputs)
# apply bounding box regression to anchors
boxes = decode_box_outputs_tf(box_outputs, anchor_boxes)
if use_native_nms:
logger.info('Using native nms.')
top_detection_idx, scores = tf.image.non_max_suppression_with_scores(
boxes,
scores,
max_boxes_to_draw,
iou_threshold=iou_threshold,
score_threshold=min_score_thresh,
soft_nms_sigma=soft_nms_sigma)
boxes = tf.gather(boxes, top_detection_idx)
else:
logger.info('Using customized nms.')
scores = tf.expand_dims(scores, axis=1)
all_detections = tf.concat([boxes, scores], axis=1)
top_detection_idx = nms_tf(all_detections, iou_threshold)
detections = tf.gather(all_detections, top_detection_idx)
scores = detections[:, 4]
boxes = detections[:, :4]
image_size = utils.parse_image_size(image_size)
detections = tf.stack([
tf.cast(tf.tile(image_id, tf.shape(top_detection_idx)), tf.float32),
tf.clip_by_value(boxes[:, 0], 0, image_size[0]) * image_scale,
tf.clip_by_value(boxes[:, 1], 0, image_size[1]) * image_scale,
tf.clip_by_value(boxes[:, 2], 0, image_size[0]) * image_scale,
tf.clip_by_value(boxes[:, 3], 0, image_size[1]) * image_scale,
scores,
tf.cast(tf.gather(classes, top_detection_idx) + 1, tf.float32)
], axis=1)
return detections
def _generate_detections(cls_outputs, box_outputs, anchor_boxes, indices,
classes, image_id, image_scale, num_classes,
max_boxes_to_draw, softnms):
"""Generates detections with model outputs and anchors.
Args:
cls_outputs: a numpy array with shape [N, 1], which has the highest class
scores on all feature levels. The N is the number of selected
top-K total anchors on all levels. (k being MAX_DETECTION_POINTS)
box_outputs: a numpy array with shape [N, 4], which stacks box regression
outputs on all feature levels. The N is the number of selected top-k
total anchors on all levels. (k being MAX_DETECTION_POINTS)
anchor_boxes: a numpy array with shape [N, 4], which stacks anchors on all
feature levels. The N is the number of selected top-k total anchors on
all levels.
indices: a numpy array with shape [N], which is the indices from top-k
selection.
classes: a numpy array with shape [N], which represents the class
prediction on all selected anchors from top-k selection.
image_id: an integer number to specify the image id.
image_scale: a float tensor representing the scale between original image
and input image for the detector. It is used to rescale detections for
evaluating with the original groundtruth annotations.
num_classes: a integer that indicates the number of classes.
max_boxes_to_draw: max number of boxes to draw per image.
softnms: whether to use softnms algorithm
Returns:
detections: detection results in a tensor with each row representing
[image_id, x, y, width, height, score, class]
"""
anchor_boxes = anchor_boxes[indices, :]
scores = sigmoid(cls_outputs)
# apply bounding box regression to anchors
boxes = decode_box_outputs(
box_outputs.swapaxes(0, 1), anchor_boxes.swapaxes(0, 1))
boxes = boxes[:, [1, 0, 3, 2]]
# run class-wise nms
detections = []
for c in range(num_classes):
indices = np.where(classes == c)[0]
if indices.shape[0] == 0:
continue
boxes_cls = boxes[indices, :]
scores_cls = scores[indices]
# Select top-scoring boxes in each class and apply non-maximum suppression
# (nms) for boxes in the same class. The selected boxes from each class are
# then concatenated for the final detection outputs.
all_detections_cls = np.column_stack((boxes_cls, scores_cls))
if softnms:
top_detections_cls = soft_nms(all_detections_cls)
top_len = len(top_detections_cls)
else:
top_detection_idx = nms(all_detections_cls, 0.5)
top_detections_cls = all_detections_cls[top_detection_idx]
top_len = len(top_detection_idx)
top_detections_cls[:, 2] -= top_detections_cls[:, 0]
top_detections_cls[:, 3] -= top_detections_cls[:, 1]
top_detections_cls = np.column_stack(
(np.repeat(image_id, top_len),
top_detections_cls,
np.repeat(c + 1, top_len))
)
detections.append(top_detections_cls)
def _generate_dummy_detections(number):
detections_dummy = np.zeros((number, 7), dtype=np.float32)
detections_dummy[:, 0] = image_id[0]
detections_dummy[:, 5] = _DUMMY_DETECTION_SCORE
return detections_dummy
if detections:
detections = np.vstack(detections)
# take final 100 detections
indices = np.argsort(-detections[:, -2])
detections = np.array(
detections[indices[0:max_boxes_to_draw]], dtype=np.float32)
# Add dummy detections to fill up to 100 detections
n = max(max_boxes_to_draw - len(detections), 0)
detections_dummy = _generate_dummy_detections(n)
detections = np.vstack([detections, detections_dummy])
else:
detections = _generate_dummy_detections(max_boxes_to_draw)
detections[:, 1:5] *= image_scale
return detections
class Anchors(object):
"""RetinaNet Anchors class."""
def __init__(self, min_level, max_level, num_scales, aspect_ratios,
anchor_scale, image_size):
"""Constructs multiscale RetinaNet anchors.
Args:
min_level: integer number of minimum level of the output feature pyramid.
max_level: integer number of maximum level of the output feature pyramid.
num_scales: integer number representing intermediate scales added
on each level. For instances, num_scales=2 adds two additional
anchor scales [2^0, 2^0.5] on each level.
aspect_ratios: list of tuples representing the aspect ratio anchors added
on each level. For instances, aspect_ratios =
[(1, 1), (1.4, 0.7), (0.7, 1.4)] adds three anchors on each level.
anchor_scale: float number representing the scale of size of the base
anchor to the feature stride 2^level.
image_size: integer number or tuple of integer number of input image size.
"""
self.min_level = min_level
self.max_level = max_level
self.num_scales = num_scales
self.aspect_ratios = aspect_ratios
self.anchor_scale = anchor_scale
self.image_size = utils.parse_image_size(image_size)
self.feat_sizes = utils.get_feat_sizes(image_size, max_level)
self.config = self._generate_configs()
self.boxes = self._generate_boxes()
def _generate_configs(self):
"""Generate configurations of anchor boxes."""
return _generate_anchor_configs(self.feat_sizes, self.min_level,
self.max_level, self.num_scales,
self.aspect_ratios)
def _generate_boxes(self):
"""Generates multiscale anchor boxes."""
boxes = _generate_anchor_boxes(self.image_size, self.anchor_scale,
self.config)
boxes = tf.convert_to_tensor(boxes, dtype=tf.float32)
return boxes
def get_anchors_per_location(self):
"""Number of anchor per location."""
return self.num_scales * len(self.aspect_ratios)
class AnchorLabeler(object):
"""Labeler for multiscale anchor boxes."""
def __init__(self, anchors, num_classes, match_threshold=0.5):
"""Constructs anchor labeler to assign labels to anchors.
Args:
anchors: an instance of class Anchors.
num_classes: integer number representing number of classes in the dataset.
match_threshold: float number between 0 and 1 representing the threshold
to assign positive labels for anchors.
"""
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(
match_threshold,
unmatched_threshold=match_threshold,
negatives_lower_than_unmatched=True,
force_match_for_each_row=True)
box_coder = faster_rcnn_box_coder.FasterRcnnBoxCoder()
self._target_assigner = target_assigner.TargetAssigner(
similarity_calc, matcher, box_coder)
self._anchors = anchors
self._match_threshold = match_threshold
self._num_classes = num_classes
def _unpack_labels(self, labels):
"""Unpacks an array of labels into multiscales labels."""
labels_unpacked = collections.OrderedDict()
anchors = self._anchors
count = 0
for level in range(anchors.min_level, anchors.max_level + 1):
feat_size = anchors.feat_sizes[level]
steps = feat_size['height'] * \
feat_size['width'] * anchors.get_anchors_per_location()
indices = tf.range(count, count + steps)
count += steps
labels_unpacked[level] = tf.reshape(
tf.gather(labels, indices),
[feat_size['height'], feat_size['width'], -1])
return labels_unpacked
def label_anchors(self, gt_boxes, gt_labels):
"""Labels anchors with ground truth inputs.
Args:
gt_boxes: A float tensor with shape [N, 4] representing groundtruth boxes.
For each row, it stores [y0, x0, y1, x1] for four corners of a box.
gt_labels: A integer tensor with shape [N, 1] representing groundtruth
classes.
Returns:
cls_targets_dict: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, num_anchors]. The height_l and width_l
represent the dimension of class logits at l-th level.
box_targets_dict: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, num_anchors * 4]. The height_l and
width_l represent the dimension of bounding box regression output at
l-th level.
num_positives: scalar tensor storing number of positives in an image.
"""
gt_box_list = box_list.BoxList(gt_boxes)
anchor_box_list = box_list.BoxList(self._anchors.boxes)
# cls_weights, box_weights are not used
cls_targets, _, box_targets, _, matches = self._target_assigner.assign(
anchor_box_list, gt_box_list, gt_labels)
# class labels start from 1 and the background class = -1
cls_targets -= 1
cls_targets = tf.cast(cls_targets, tf.int32)
# Unpack labels.
cls_targets_dict = self._unpack_labels(cls_targets)
box_targets_dict = self._unpack_labels(box_targets)
num_positives = tf.reduce_sum(
tf.cast(tf.not_equal(matches.match_results, -1), tf.float32))
return cls_targets_dict, box_targets_dict, num_positives
def generate_detections(self,
cls_outputs,
box_outputs,
indices,
classes,
image_id,
image_scale,
image_size=None,
min_score_thresh=MIN_SCORE_THRESH,
max_boxes_to_draw=MAX_DETECTIONS_PER_IMAGE,
softnms=False,
disable_pyfun=None):
"""Generate detections based on class and box predictions."""
if disable_pyfun:
return _generate_detections_tf(
cls_outputs,
box_outputs,
self._anchors.boxes,
indices,
classes,
image_id,
image_scale,
image_size,
min_score_thresh=min_score_thresh,
max_boxes_to_draw=max_boxes_to_draw)
return tf.py_func(_generate_detections, [
cls_outputs, box_outputs, self._anchors.boxes, indices, classes,
image_id, image_scale, self._num_classes, max_boxes_to_draw, softnms
], tf.float32)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/models/anchors.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common keras utils."""
import functools
from typing import Text
import numpy as np
import tensorflow as tf
from nvidia_tao_tf1.cv.efficientdet.utils import utils
def build_batch_norm(is_training_bn: bool,
init_zero: bool = False,
data_format: Text = 'channels_last',
momentum: float = 0.99,
epsilon: float = 1e-3,
name: Text = 'tpu_batch_normalization'):
"""Build a batch normalization layer.
Args:
is_training_bn: `bool` for whether the model is training.
init_zero: `bool` if True, initializes scale parameter of batch
normalization with 0 instead of 1 (default).
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
momentum: `float`, momentume of batch norm.
epsilon: `float`, small value for numerical stability.
name: the name of the batch normalization layer
Returns:
A normalized `Tensor` with the same `data_format`.
"""
if init_zero:
gamma_initializer = tf.zeros_initializer()
else:
gamma_initializer = tf.ones_initializer()
axis = 1 if data_format == 'channels_first' else -1
batch_norm_class = utils.batch_norm_class(is_training_bn)
bn_layer = batch_norm_class(axis=axis,
momentum=momentum,
epsilon=epsilon,
center=True,
scale=True,
gamma_initializer=gamma_initializer,
name=name)
return bn_layer
class BoxNet:
"""Box regression network."""
def __init__(self,
num_anchors=9,
num_filters=32,
min_level=3,
max_level=7,
is_training=False,
act_type='swish',
repeats=4,
separable_conv=True,
survival_prob=None,
data_format='channels_last',
name='box_net',
**kwargs):
"""Initialize BoxNet.
Args:
num_anchors: number of anchors used.
num_filters: number of filters for "intermediate" layers.
min_level: minimum level for features.
max_level: maximum level for features.
is_training: True if we train the BatchNorm.
act_type: String of the activation used.
repeats: number of "intermediate" layers.
separable_conv: True to use separable_conv instead of conv2D.
survival_prob: if a value is set then drop connect will be used.
data_format: string of 'channel_first' or 'channels_last'.
name: Name of the layer.
**kwargs: other parameters.
"""
self.num_anchors = num_anchors
self.num_filters = num_filters
self.min_level = min_level
self.max_level = max_level
self.repeats = repeats
self.separable_conv = separable_conv
self.is_training = is_training
self.survival_prob = survival_prob
self.act_type = act_type
self.data_format = data_format
self.conv_ops = []
self.bns = []
for i in range(self.repeats):
# If using SeparableConv2D
if self.separable_conv:
self.conv_ops.append(
tf.keras.layers.SeparableConv2D(
filters=self.num_filters,
depth_multiplier=1,
pointwise_initializer=tf.keras.initializers.VarianceScaling(),
depthwise_initializer=tf.keras.initializers.VarianceScaling(),
data_format=self.data_format,
kernel_size=3,
activation=None,
bias_initializer=tf.zeros_initializer(),
padding='same',
name='box-%d' % i))
# If using Conv2d
else:
self.conv_ops.append(
tf.keras.layers.Conv2D(
filters=self.num_filters,
kernel_initializer=tf.random_normal_initializer(stddev=0.01),
data_format=self.data_format,
kernel_size=3,
activation=None,
bias_initializer=tf.zeros_initializer(),
padding='same',
name='box-%d' % i))
bn_per_level = {}
for level in range(self.min_level, self.max_level + 1):
bn_per_level[level] = build_batch_norm(
is_training_bn=self.is_training,
init_zero=False,
data_format=self.data_format,
name='box-%d-bn-%d' % (i, level))
self.bns.append(bn_per_level)
if self.separable_conv:
self.boxes = tf.keras.layers.SeparableConv2D(
filters=4 * self.num_anchors,
depth_multiplier=1,
pointwise_initializer=tf.keras.initializers.VarianceScaling(),
depthwise_initializer=tf.keras.initializers.VarianceScaling(),
data_format=self.data_format,
kernel_size=3,
activation=None,
bias_initializer=tf.zeros_initializer(),
padding='same',
name='box-predict')
else:
self.boxes = tf.keras.layers.Conv2D(
filters=4 * self.num_anchors,
kernel_initializer=tf.random_normal_initializer(stddev=0.01),
data_format=self.data_format,
kernel_size=3,
activation=None,
bias_initializer=tf.zeros_initializer(),
padding='same',
name='box-predict')
def __call__(self, inputs):
"""Call boxnet."""
box_outputs = {}
for level in range(self.min_level, self.max_level + 1):
image = inputs[level]
for i in range(self.repeats):
original_image = image
image = self.conv_ops[i](image)
image = self.bns[i][level](image, training=self.is_training)
if self.act_type:
image = utils.activation_fn(image, self.act_type)
if i > 0 and self.survival_prob:
image = utils.drop_connect(image, self.is_training,
self.survival_prob)
image = image + original_image
box_outputs[level] = self.boxes(image)
return box_outputs
class ClassNet:
"""Object class prediction network."""
def __init__(self,
num_classes=91,
num_anchors=9,
num_filters=32,
min_level=3,
max_level=7,
is_training=False,
act_type='swish',
repeats=4,
separable_conv=True,
survival_prob=None,
data_format='channels_last',
name='class_net',
**kwargs):
"""Initialize the ClassNet.
Args:
num_classes: number of classes.
num_anchors: number of anchors.
num_filters: number of filters for "intermediate" layers.
min_level: minimum level for features.
max_level: maximum level for features.
is_training: True if we train the BatchNorm.
act_type: String of the activation used.
repeats: number of intermediate layers.
separable_conv: True to use separable_conv instead of conv2D.
survival_prob: if a value is set then drop connect will be used.
data_format: string of 'channel_first' or 'channels_last'.
name: the name of this layerl.
**kwargs: other parameters.
"""
self.num_classes = num_classes
self.num_anchors = num_anchors
self.num_filters = num_filters
self.min_level = min_level
self.max_level = max_level
self.repeats = repeats
self.separable_conv = separable_conv
self.is_training = is_training
self.survival_prob = survival_prob
self.act_type = act_type
self.data_format = data_format
self.conv_ops = []
self.bns = []
if separable_conv:
conv2d_layer = functools.partial(
tf.keras.layers.SeparableConv2D,
depth_multiplier=1,
data_format=data_format,
pointwise_initializer=tf.keras.initializers.VarianceScaling(),
depthwise_initializer=tf.keras.initializers.VarianceScaling())
else:
conv2d_layer = functools.partial(
tf.keras.layers.Conv2D,
data_format=data_format,
kernel_initializer=tf.random_normal_initializer(stddev=0.01))
for i in range(self.repeats):
# If using SeparableConv2D
self.conv_ops.append(
conv2d_layer(self.num_filters,
kernel_size=3,
bias_initializer=tf.zeros_initializer(),
activation=None,
padding='same',
name='class-%d' % i))
bn_per_level = {}
for level in range(self.min_level, self.max_level + 1):
bn_per_level[level] = build_batch_norm(
is_training_bn=self.is_training,
init_zero=False,
data_format=self.data_format,
name='class-%d-bn-%d' % (i, level),
)
self.bns.append(bn_per_level)
self.classes = conv2d_layer(
num_classes * num_anchors,
kernel_size=3,
bias_initializer=tf.constant_initializer(-np.log((1 - 0.01) / 0.01)),
padding='same',
name='class-predict')
def __call__(self, inputs):
"""Call ClassNet."""
class_outputs = {}
for level in range(self.min_level, self.max_level + 1):
image = inputs[level]
for i in range(self.repeats):
original_image = image
image = self.conv_ops[i](image)
image = self.bns[i][level](image, training=self.is_training)
if self.act_type:
image = utils.activation_fn(image, self.act_type)
if i > 0 and self.survival_prob:
image = utils.drop_connect(image, self.is_training,
self.survival_prob)
image = image + original_image
class_outputs[level] = self.classes(image)
return class_outputs
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/models/utils_keras.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientDet arch tests."""
import os
import pytest
import tensorflow as tf
from nvidia_tao_tf1.cv.efficientdet.models import efficientdet_arch
from nvidia_tao_tf1.cv.efficientdet.utils import hparams_config, spec_loader
@pytest.mark.parametrize("file_name",
[('d1.txt'),
('d2.txt'),
('d3.txt'),
('d4.txt'),
('d5.txt')])
def test_arch(tmpdir, file_name):
file_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
default_spec_path = os.path.join(file_path, '../experiment_specs', file_name)
spec = spec_loader.load_experiment_spec(default_spec_path, merge_from_default=False)
MODE = 'train'
# Parse and override hparams
config = hparams_config.get_detection_config(spec.model_config.model_name)
params = spec_loader.generate_params_from_spec(config, spec, MODE)
config.update(params)
config.model_dir = tmpdir
inputs = tf.keras.layers.Input(shape=(512, 512, 3), batch_size=4)
class_output, box_outputs = efficientdet_arch.efficientdet(inputs, None, config=config)
assert len(class_output) == config.max_level - config.min_level + 1
assert len(box_outputs) == config.max_level - config.min_level + 1
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/models/tests/test_models.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils used to manipulate tensor shapes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import zip
import tensorflow.compat.v1 as tf
from nvidia_tao_tf1.cv.efficientdet.visualize import static_shape
get_dim_as_int = static_shape.get_dim_as_int
def _is_tensor(t):
"""Returns a boolean indicating whether the input is a tensor.
Args:
t: the input to be tested.
Returns:
a boolean that indicates whether t is a tensor.
"""
return isinstance(t, (tf.Tensor, tf.SparseTensor, tf.Variable))
def _set_dim_0(t, d0):
"""Sets the 0-th dimension of the input tensor.
Args:
t: the input tensor, assuming the rank is at least 1.
d0: an integer indicating the 0-th dimension of the input tensor.
Returns:
the tensor t with the 0-th dimension set.
"""
t_shape = t.get_shape().as_list()
t_shape[0] = d0
t.set_shape(t_shape)
return t
def pad_tensor(t, length):
"""Pads the input tensor with 0s along the first dimension up to the length.
Args:
t: the input tensor, assuming the rank is at least 1.
length: a tensor of shape [1] or an integer, indicating the first dimension
of the input tensor t after padding, assuming length <= t.shape[0].
Returns:
padded_t: the padded tensor, whose first dimension is length. If the length
is an integer, the first dimension of padded_t is set to length
statically.
"""
t_rank = tf.rank(t)
t_shape = tf.shape(t)
t_d0 = t_shape[0]
pad_d0 = tf.expand_dims(length - t_d0, 0)
pad_shape = tf.cond(
tf.greater(t_rank, 1), lambda: tf.concat([pad_d0, t_shape[1:]], 0),
lambda: tf.expand_dims(length - t_d0, 0))
padded_t = tf.concat([t, tf.zeros(pad_shape, dtype=t.dtype)], 0)
if not _is_tensor(length):
padded_t = _set_dim_0(padded_t, length)
return padded_t
def clip_tensor(t, length):
"""Clips the input tensor along the first dimension up to the length.
Args:
t: the input tensor, assuming the rank is at least 1.
length: a tensor of shape [1] or an integer, indicating the first dimension
of the input tensor t after clipping, assuming length <= t.shape[0].
Returns:
clipped_t: the clipped tensor, whose first dimension is length. If the
length is an integer, the first dimension of clipped_t is set to length
statically.
"""
clipped_t = tf.gather(t, tf.range(length))
if not _is_tensor(length):
clipped_t = _set_dim_0(clipped_t, length)
return clipped_t
def pad_or_clip_tensor(t, length):
"""Pad or clip the input tensor along the first dimension.
Args:
t: the input tensor, assuming the rank is at least 1.
length: a tensor of shape [1] or an integer, indicating the first dimension
of the input tensor t after processing.
Returns:
processed_t: the processed tensor, whose first dimension is length. If the
length is an integer, the first dimension of the processed tensor is set
to length statically.
"""
return pad_or_clip_nd(t, [length] + t.shape.as_list()[1:])
def pad_or_clip_nd(tensor, output_shape):
"""Pad or Clip given tensor to the output shape.
Args:
tensor: Input tensor to pad or clip.
output_shape: A list of integers / scalar tensors (or None for dynamic dim)
representing the size to pad or clip each dimension of the input tensor.
Returns:
Input tensor padded and clipped to the output shape.
"""
tensor_shape = tf.shape(tensor)
clip_size = [
tf.where(tensor_shape[i] - shape > 0, shape, -1)
if shape is not None else -1 for i, shape in enumerate(output_shape)
]
clipped_tensor = tf.slice(
tensor,
begin=tf.zeros(len(clip_size), dtype=tf.int32),
size=clip_size)
# Pad tensor if the shape of clipped tensor is smaller than the expected
# shape.
clipped_tensor_shape = tf.shape(clipped_tensor)
trailing_paddings = [
shape - clipped_tensor_shape[i] if shape is not None else 0
for i, shape in enumerate(output_shape)
]
paddings = tf.stack(
[
tf.zeros(len(trailing_paddings), dtype=tf.int32),
trailing_paddings
],
axis=1)
padded_tensor = tf.pad(clipped_tensor, paddings=paddings)
output_static_shape = [
dim if not isinstance(dim, tf.Tensor) else None for dim in output_shape
]
padded_tensor.set_shape(output_static_shape)
return padded_tensor
def combined_static_and_dynamic_shape(tensor):
"""Returns a list containing static and dynamic values for the dimensions.
Returns a list of static and dynamic values for shape dimensions. This is
useful to preserve static shapes when available in reshape operation.
Args:
tensor: A tensor of any type.
Returns:
A list of size tensor.shape.ndims containing integers or a scalar tensor.
"""
static_tensor_shape = tensor.shape.as_list()
dynamic_tensor_shape = tf.shape(tensor)
combined_shape = []
for index, dim in enumerate(static_tensor_shape):
if dim is not None:
combined_shape.append(dim)
else:
combined_shape.append(dynamic_tensor_shape[index])
return combined_shape
def static_or_dynamic_map_fn(fn, elems, dtype=None, parallel_iterations=32, back_prop=True):
"""Runs map_fn as a (static) for loop when possible.
This function rewrites the map_fn as an explicit unstack input -> for loop
over function calls -> stack result combination. This allows our graphs to
be acyclic when the batch size is static.
For comparison, see https://www.tensorflow.org/api_docs/python/tf/map_fn.
Note that `static_or_dynamic_map_fn` currently is not *fully* interchangeable
with the default tf.map_fn function as it does not accept nested inputs (only
Tensors or lists of Tensors). Likewise, the output of `fn` can only be a
Tensor or list of Tensors.
TODO(jonathanhuang): make this function fully interchangeable with tf.map_fn.
Args:
fn: The callable to be performed. It accepts one argument, which will have
the same structure as elems. Its output must have the
same structure as elems.
elems: A tensor or list of tensors, each of which will
be unpacked along their first dimension. The sequence of the
resulting slices will be applied to fn.
dtype: (optional) The output type(s) of fn. If fn returns a structure of
Tensors differing from the structure of elems, then dtype is not optional
and must have the same structure as the output of fn.
parallel_iterations: (optional) number of batch items to process in
parallel. This flag is only used if the native tf.map_fn is used
and defaults to 32 instead of 10 (unlike the standard tf.map_fn default).
back_prop: (optional) True enables support for back propagation.
This flag is only used if the native tf.map_fn is used.
Returns:
A tensor or sequence of tensors. Each tensor packs the
results of applying fn to tensors unpacked from elems along the first
dimension, from first to last.
Raises:
ValueError: if `elems` a Tensor or a list of Tensors.
ValueError: if `fn` does not return a Tensor or list of Tensors
"""
if isinstance(elems, list):
for elem in elems:
if not isinstance(elem, tf.Tensor):
raise ValueError('`elems` must be a Tensor or list of Tensors.')
elem_shapes = [elem.shape.as_list() for elem in elems]
# Fall back on tf.map_fn if shapes of each entry of `elems` are None or fail
# to all be the same size along the batch dimension.
for elem_shape in elem_shapes:
if (not elem_shape or not elem_shape[0]
or elem_shape[0] != elem_shapes[0][0]):
return tf.map_fn(fn, elems, dtype, parallel_iterations, back_prop)
arg_tuples = zip(*[tf.unstack(elem) for elem in elems])
outputs = [fn(arg_tuple) for arg_tuple in arg_tuples]
else:
if not isinstance(elems, tf.Tensor):
raise ValueError('`elems` must be a Tensor or list of Tensors.')
elems_shape = elems.shape.as_list()
if not elems_shape or not elems_shape[0]:
return tf.map_fn(fn, elems, dtype, parallel_iterations, back_prop)
outputs = [fn(arg) for arg in tf.unstack(elems)]
# Stack `outputs`, which is a list of Tensors or list of lists of Tensors
if all([isinstance(output, tf.Tensor) for output in outputs]):
return tf.stack(outputs)
if all([isinstance(output, list) for output in outputs]):
if all([all([isinstance(entry, tf.Tensor) for entry in output_list])
for output_list in outputs]):
return [tf.stack(output_tuple) for output_tuple in zip(*outputs)]
raise ValueError('`fn` should return a Tensor or a list of Tensors.')
def check_min_image_dim(min_dim, image_tensor):
"""Checks that the image width/height are greater than some number.
This function is used to check that the width and height of an image are above
a certain value. If the image shape is static, this function will perform the
check at graph construction time. Otherwise, if the image shape varies, an
Assertion control dependency will be added to the graph.
Args:
min_dim: The minimum number of pixels along the width and height of the
image.
image_tensor: The image tensor to check size for.
Returns:
If `image_tensor` has dynamic size, return `image_tensor` with a Assert
control dependency. Otherwise returns image_tensor.
Raises:
ValueError: if `image_tensor`'s' width or height is smaller than `min_dim`.
"""
image_shape = image_tensor.get_shape()
image_height = static_shape.get_height(image_shape)
image_width = static_shape.get_width(image_shape)
if image_height is None or image_width is None:
shape_assert = tf.Assert(
tf.logical_and(tf.greater_equal(tf.shape(image_tensor)[1], min_dim),
tf.greater_equal(tf.shape(image_tensor)[2], min_dim)),
['image size must be >= {} in both height and width.'.format(min_dim)])
with tf.control_dependencies([shape_assert]):
return tf.identity(image_tensor)
if image_height < min_dim or image_width < min_dim:
raise ValueError(
'image size must be >= %d in both height and width; image dim = %d,%d' %
(min_dim, image_height, image_width))
return image_tensor
def assert_shape_equal(shape_a, shape_b):
"""Asserts that shape_a and shape_b are equal.
If the shapes are static, raises a ValueError when the shapes
mismatch.
If the shapes are dynamic, raises a tf InvalidArgumentError when the shapes
mismatch.
Args:
shape_a: a list containing shape of the first tensor.
shape_b: a list containing shape of the second tensor.
Returns:
Either a tf.no_op() when shapes are all static and a tf.assert_equal() op
when the shapes are dynamic.
Raises:
ValueError: When shapes are both static and unequal.
"""
if (all(isinstance(dim, int) for dim in shape_a) and
all(isinstance(dim, int) for dim in shape_b)):
if shape_a != shape_b:
raise ValueError('Unequal shapes {}, {}'.format(shape_a, shape_b))
else:
return tf.no_op()
else:
return tf.assert_equal(shape_a, shape_b)
def assert_shape_equal_along_first_dimension(shape_a, shape_b):
"""Asserts that shape_a and shape_b are the same along the 0th-dimension.
If the shapes are static, raises a ValueError when the shapes
mismatch.
If the shapes are dynamic, raises a tf InvalidArgumentError when the shapes
mismatch.
Args:
shape_a: a list containing shape of the first tensor.
shape_b: a list containing shape of the second tensor.
Returns:
Either a tf.no_op() when shapes are all static and a tf.assert_equal() op
when the shapes are dynamic.
Raises:
ValueError: When shapes are both static and unequal.
"""
if isinstance(shape_a[0], int) and isinstance(shape_b[0], int):
if shape_a[0] != shape_b[0]:
raise ValueError('Unequal first dimension {}, {}'.format(
shape_a[0], shape_b[0]))
else:
return tf.no_op()
else:
return tf.assert_equal(shape_a[0], shape_b[0])
def assert_box_normalized(boxes, maximum_normalized_coordinate=1.1):
"""Asserts the input box tensor is normalized.
Args:
boxes: a tensor of shape [N, 4] where N is the number of boxes.
maximum_normalized_coordinate: Maximum coordinate value to be considered
as normalized, default to 1.1.
Returns:
a tf.Assert op which fails when the input box tensor is not normalized.
Raises:
ValueError: When the input box tensor is not normalized.
"""
box_minimum = tf.reduce_min(boxes)
box_maximum = tf.reduce_max(boxes)
return tf.Assert(
tf.logical_and(
tf.less_equal(box_maximum, maximum_normalized_coordinate),
tf.greater_equal(box_minimum, 0)),
[boxes])
def flatten_dimensions(inputs, first, last):
"""Flattens `K-d` tensor along [first, last) dimensions.
Converts `inputs` with shape [D0, D1, ..., D(K-1)] into a tensor of shape
[D0, D1, ..., D(first) * D(first+1) * ... * D(last-1), D(last), ..., D(K-1)].
Example:
`inputs` is a tensor with initial shape [10, 5, 20, 20, 3].
new_tensor = flatten_dimensions(inputs, first=1, last=3)
new_tensor.shape -> [10, 100, 20, 3].
Args:
inputs: a tensor with shape [D0, D1, ..., D(K-1)].
first: first value for the range of dimensions to flatten.
last: last value for the range of dimensions to flatten. Note that the last
dimension itself is excluded.
Returns:
a tensor with shape
[D0, D1, ..., D(first) * D(first + 1) * ... * D(last - 1), D(last), ...,
D(K-1)].
Raises:
ValueError: if first and last arguments are incorrect.
"""
if first >= inputs.shape.ndims or last > inputs.shape.ndims:
raise ValueError('`first` and `last` must be less than inputs.shape.ndims. '
'found {} and {} respectively while ndims is {}'.format(
first, last, inputs.shape.ndims))
shape = combined_static_and_dynamic_shape(inputs)
flattened_dim_prod = tf.reduce_prod(shape[first:last], keepdims=True)
new_shape = tf.concat([shape[:first], flattened_dim_prod, shape[last:]], axis=0)
return tf.reshape(inputs, new_shape)
def flatten_first_n_dimensions(inputs, n):
"""Flattens `K-d` tensor along first n dimension to be a `(K-n+1)-d` tensor.
Converts `inputs` with shape [D0, D1, ..., D(K-1)] into a tensor of shape
[D0 * D1 * ... * D(n-1), D(n), ... D(K-1)].
Example:
`inputs` is a tensor with initial shape [10, 5, 20, 20, 3].
new_tensor = flatten_first_n_dimensions(inputs, 2)
new_tensor.shape -> [50, 20, 20, 3].
Args:
inputs: a tensor with shape [D0, D1, ..., D(K-1)].
n: The number of dimensions to flatten.
Returns:
a tensor with shape [D0 * D1 * ... * D(n-1), D(n), ... D(K-1)].
"""
return flatten_dimensions(inputs, first=0, last=n)
def expand_first_dimension(inputs, dims):
"""Expands `K-d` tensor along first dimension to be a `(K+n-1)-d` tensor.
Converts `inputs` with shape [D0, D1, ..., D(K-1)] into a tensor of shape
[dims[0], dims[1], ..., dims[-1], D1, ..., D(k-1)].
Example:
`inputs` is a tensor with shape [50, 20, 20, 3].
new_tensor = expand_first_dimension(inputs, [10, 5]).
new_tensor.shape -> [10, 5, 20, 20, 3].
Args:
inputs: a tensor with shape [D0, D1, ..., D(K-1)].
dims: List with new dimensions to expand first axis into. The length of
`dims` is typically 2 or larger.
Returns:
a tensor with shape [dims[0], dims[1], ..., dims[-1], D1, ..., D(k-1)].
"""
inputs_shape = combined_static_and_dynamic_shape(inputs)
expanded_shape = tf.stack(dims + inputs_shape[1:])
# Verify that it is possible to expand the first axis of inputs.
assert_op = tf.assert_equal(
inputs_shape[0], tf.reduce_prod(tf.stack(dims)),
message=('First dimension of `inputs` cannot be expanded into provided '
'`dims`'))
with tf.control_dependencies([assert_op]):
inputs_reshaped = tf.reshape(inputs, expanded_shape)
return inputs_reshaped
def resize_images_and_return_shapes(inputs, image_resizer_fn):
"""Resizes images using the given function and returns their true shapes.
Args:
inputs: a float32 Tensor representing a batch of inputs of shape
[batch_size, height, width, channels].
image_resizer_fn: a function which takes in a single image and outputs
a resized image and its original shape.
Returns:
resized_inputs: The inputs resized according to image_resizer_fn.
true_image_shapes: A integer tensor of shape [batch_size, 3]
representing the height, width and number of channels in inputs.
"""
if inputs.dtype is not tf.float32:
raise ValueError('`resize_images_and_return_shapes` expects a'
' tf.float32 tensor')
# TODO(jonathanhuang): revisit whether to always use batch size as
# the number of parallel iterations vs allow for dynamic batching.
outputs = static_or_dynamic_map_fn(
image_resizer_fn,
elems=inputs,
dtype=[tf.float32, tf.int32])
resized_inputs = outputs[0]
true_image_shapes = outputs[1]
return resized_inputs, true_image_shapes
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/visualize/shape_utils.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/visualize/__init__.py |
|
"""Contains classes specifying naming conventions used for object detection.
Specifies:
InputDataFields: standard fields used by reader/preprocessor/batcher.
DetectionResultFields: standard fields returned by object detector.
BoxListFields: standard field used by BoxList
TfExampleFields: standard fields for tf-example data format (go/tf-example).
"""
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
class InputDataFields(object):
"""Names for the input tensors.
Holds the standard data field names to use for identifying input tensors. This
should be used by the decoder to identify keys for the returned tensor_dict
containing input tensors. And it should be used by the model to identify the
tensors it needs.
Attributes:
image: image.
image_additional_channels: additional channels.
original_image: image in the original input size.
original_image_spatial_shape: image in the original input size.
key: unique key corresponding to image.
source_id: source of the original image.
filename: original filename of the dataset (without common path).
groundtruth_image_classes: image-level class labels.
groundtruth_image_confidences: image-level class confidences.
groundtruth_boxes: coordinates of the ground truth boxes in the image.
groundtruth_classes: box-level class labels.
groundtruth_confidences: box-level class confidences. The shape should be
the same as the shape of groundtruth_classes.
groundtruth_label_types: box-level label types (e.g. explicit negative).
groundtruth_is_crowd: [DEPRECATED, use groundtruth_group_of instead]
is the groundtruth a single object or a crowd.
groundtruth_area: area of a groundtruth segment.
groundtruth_difficult: is a `difficult` object
groundtruth_group_of: is a `group_of` objects, e.g. multiple objects of the
same class, forming a connected group, where instances are heavily
occluding each other.
proposal_boxes: coordinates of object proposal boxes.
proposal_objectness: objectness score of each proposal.
groundtruth_instance_masks: ground truth instance masks.
groundtruth_instance_boundaries: ground truth instance boundaries.
groundtruth_instance_classes: instance mask-level class labels.
groundtruth_keypoints: ground truth keypoints.
groundtruth_keypoint_visibilities: ground truth keypoint visibilities.
groundtruth_keypoint_weights: groundtruth weight factor for keypoints.
groundtruth_label_weights: groundtruth label weights.
groundtruth_weights: groundtruth weight factor for bounding boxes.
num_groundtruth_boxes: number of groundtruth boxes.
is_annotated: whether an image has been labeled or not.
true_image_shapes: true shapes of images in the resized images, as resized
images can be padded with zeros.
multiclass_scores: the label score per class for each box.
context_features: a flattened list of contextual features.
context_feature_length: the fixed length of each feature in
context_features, used for reshaping.
valid_context_size: the valid context size, used in filtering the padded
context features.
"""
image = 'image'
image_additional_channels = 'image_additional_channels'
original_image = 'original_image'
original_image_spatial_shape = 'original_image_spatial_shape'
key = 'key'
source_id = 'source_id'
filename = 'filename'
groundtruth_image_classes = 'groundtruth_image_classes'
groundtruth_image_confidences = 'groundtruth_image_confidences'
groundtruth_boxes = 'groundtruth_boxes'
groundtruth_classes = 'groundtruth_classes'
groundtruth_confidences = 'groundtruth_confidences'
groundtruth_label_types = 'groundtruth_label_types'
groundtruth_is_crowd = 'groundtruth_is_crowd'
groundtruth_area = 'groundtruth_area'
groundtruth_difficult = 'groundtruth_difficult'
groundtruth_group_of = 'groundtruth_group_of'
proposal_boxes = 'proposal_boxes'
proposal_objectness = 'proposal_objectness'
groundtruth_instance_masks = 'groundtruth_instance_masks'
groundtruth_instance_boundaries = 'groundtruth_instance_boundaries'
groundtruth_instance_classes = 'groundtruth_instance_classes'
groundtruth_keypoints = 'groundtruth_keypoints'
groundtruth_keypoint_visibilities = 'groundtruth_keypoint_visibilities'
groundtruth_keypoint_weights = 'groundtruth_keypoint_weights'
groundtruth_label_weights = 'groundtruth_label_weights'
groundtruth_weights = 'groundtruth_weights'
num_groundtruth_boxes = 'num_groundtruth_boxes'
is_annotated = 'is_annotated'
true_image_shape = 'true_image_shape'
multiclass_scores = 'multiclass_scores'
context_features = 'context_features'
context_feature_length = 'context_feature_length'
valid_context_size = 'valid_context_size'
class DetectionResultFields(object):
"""Naming conventions for storing the output of the detector.
Attributes:
source_id: source of the original image.
key: unique key corresponding to image.
detection_boxes: coordinates of the detection boxes in the image.
detection_scores: detection scores for the detection boxes in the image.
detection_multiclass_scores: class score distribution (including background)
for detection boxes in the image including background class.
detection_classes: detection-level class labels.
detection_masks: contains a segmentation mask for each detection box.
detection_boundaries: contains an object boundary for each detection box.
detection_keypoints: contains detection keypoints for each detection box.
detection_keypoint_scores: contains detection keypoint scores.
num_detections: number of detections in the batch.
raw_detection_boxes: contains decoded detection boxes without Non-Max
suppression.
raw_detection_scores: contains class score logits for raw detection boxes.
detection_anchor_indices: The anchor indices of the detections after NMS.
detection_features: contains extracted features for each detected box
after NMS.
"""
source_id = 'source_id'
key = 'key'
detection_boxes = 'detection_boxes'
detection_scores = 'detection_scores'
detection_multiclass_scores = 'detection_multiclass_scores'
detection_features = 'detection_features'
detection_classes = 'detection_classes'
detection_masks = 'detection_masks'
detection_boundaries = 'detection_boundaries'
detection_keypoints = 'detection_keypoints'
detection_keypoint_scores = 'detection_keypoint_scores'
num_detections = 'num_detections'
raw_detection_boxes = 'raw_detection_boxes'
raw_detection_scores = 'raw_detection_scores'
detection_anchor_indices = 'detection_anchor_indices'
class BoxListFields(object):
"""Naming conventions for BoxLists.
Attributes:
boxes: bounding box coordinates.
classes: classes per bounding box.
scores: scores per bounding box.
weights: sample weights per bounding box.
objectness: objectness score per bounding box.
masks: masks per bounding box.
boundaries: boundaries per bounding box.
keypoints: keypoints per bounding box.
keypoint_heatmaps: keypoint heatmaps per bounding box.
is_crowd: is_crowd annotation per bounding box.
"""
boxes = 'boxes'
classes = 'classes'
scores = 'scores'
weights = 'weights'
confidences = 'confidences'
objectness = 'objectness'
masks = 'masks'
boundaries = 'boundaries'
keypoints = 'keypoints'
keypoint_heatmaps = 'keypoint_heatmaps'
is_crowd = 'is_crowd'
class PredictionFields(object):
"""Naming conventions for standardized prediction outputs.
Attributes:
feature_maps: List of feature maps for prediction.
anchors: Generated anchors.
raw_detection_boxes: Decoded detection boxes without NMS.
raw_detection_feature_map_indices: Feature map indices from which each raw
detection box was produced.
"""
feature_maps = 'feature_maps'
anchors = 'anchors'
raw_detection_boxes = 'raw_detection_boxes'
raw_detection_feature_map_indices = 'raw_detection_feature_map_indices'
class TfExampleFields(object):
"""TF-example proto feature names for object detection.
Holds the standard feature names to load from an Example proto for object
detection.
Attributes:
image_encoded: JPEG encoded string
image_format: image format, e.g. "JPEG"
filename: filename
channels: number of channels of image
colorspace: colorspace, e.g. "RGB"
height: height of image in pixels, e.g. 462
width: width of image in pixels, e.g. 581
source_id: original source of the image
image_class_text: image-level label in text format
image_class_label: image-level label in numerical format
object_class_text: labels in text format, e.g. ["person", "cat"]
object_class_label: labels in numbers, e.g. [16, 8]
object_bbox_xmin: xmin coordinates of groundtruth box, e.g. 10, 30
object_bbox_xmax: xmax coordinates of groundtruth box, e.g. 50, 40
object_bbox_ymin: ymin coordinates of groundtruth box, e.g. 40, 50
object_bbox_ymax: ymax coordinates of groundtruth box, e.g. 80, 70
object_view: viewpoint of object, e.g. ["frontal", "left"]
object_truncated: is object truncated, e.g. [true, false]
object_occluded: is object occluded, e.g. [true, false]
object_difficult: is object difficult, e.g. [true, false]
object_group_of: is object a single object or a group of objects
object_depiction: is object a depiction
object_is_crowd: [DEPRECATED, use object_group_of instead]
is the object a single object or a crowd
object_segment_area: the area of the segment.
object_weight: a weight factor for the object's bounding box.
instance_masks: instance segmentation masks.
instance_boundaries: instance boundaries.
instance_classes: Classes for each instance segmentation mask.
detection_class_label: class label in numbers.
detection_bbox_ymin: ymin coordinates of a detection box.
detection_bbox_xmin: xmin coordinates of a detection box.
detection_bbox_ymax: ymax coordinates of a detection box.
detection_bbox_xmax: xmax coordinates of a detection box.
detection_score: detection score for the class label and box.
"""
image_encoded = 'image/encoded'
image_format = 'image/format' # format is reserved keyword
filename = 'image/filename'
channels = 'image/channels'
colorspace = 'image/colorspace'
height = 'image/height'
width = 'image/width'
source_id = 'image/source_id'
image_class_text = 'image/class/text'
image_class_label = 'image/class/label'
object_class_text = 'image/object/class/text'
object_class_label = 'image/object/class/label'
object_bbox_ymin = 'image/object/bbox/ymin'
object_bbox_xmin = 'image/object/bbox/xmin'
object_bbox_ymax = 'image/object/bbox/ymax'
object_bbox_xmax = 'image/object/bbox/xmax'
object_view = 'image/object/view'
object_truncated = 'image/object/truncated'
object_occluded = 'image/object/occluded'
object_difficult = 'image/object/difficult'
object_group_of = 'image/object/group_of'
object_depiction = 'image/object/depiction'
object_is_crowd = 'image/object/is_crowd'
object_segment_area = 'image/object/segment/area'
object_weight = 'image/object/weight'
instance_masks = 'image/segmentation/object'
instance_boundaries = 'image/boundaries/object'
instance_classes = 'image/segmentation/object/class'
detection_class_label = 'image/detection/label'
detection_bbox_ymin = 'image/detection/bbox/ymin'
detection_bbox_xmin = 'image/detection/bbox/xmin'
detection_bbox_ymax = 'image/detection/bbox/ymax'
detection_bbox_xmax = 'image/detection/bbox/xmax'
detection_score = 'image/detection/score'
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/visualize/standard_fields.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A set of functions that are used for visualization.
These functions often receive an image, perform some visualization on the image.
The functions do not return a value, instead they modify the image itself.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import matplotlib
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
import numpy as np
import PIL.Image as Image
import PIL.ImageColor as ImageColor
import PIL.ImageDraw as ImageDraw
import PIL.ImageFont as ImageFont
import six
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
from nvidia_tao_tf1.cv.efficientdet.visualize import shape_utils
from nvidia_tao_tf1.cv.efficientdet.visualize import standard_fields as fields
matplotlib.use('Agg') # Set headless-friendly backend.
_TITLE_LEFT_MARGIN = 10
_TITLE_TOP_MARGIN = 10
STANDARD_COLORS = [
'AliceBlue', 'Chartreuse', 'Aqua', 'Aquamarine', 'Azure', 'Beige', 'Bisque',
'BlanchedAlmond', 'BlueViolet', 'BurlyWood', 'CadetBlue', 'AntiqueWhite',
'Chocolate', 'Coral', 'CornflowerBlue', 'Cornsilk', 'Crimson', 'Cyan',
'DarkCyan', 'DarkGoldenRod', 'DarkGrey', 'DarkKhaki', 'DarkOrange',
'DarkOrchid', 'DarkSalmon', 'DarkSeaGreen', 'DarkTurquoise', 'DarkViolet',
'DeepPink', 'DeepSkyBlue', 'DodgerBlue', 'FireBrick', 'FloralWhite',
'ForestGreen', 'Fuchsia', 'Gainsboro', 'GhostWhite', 'Gold', 'GoldenRod',
'Salmon', 'Tan', 'HoneyDew', 'HotPink', 'IndianRed', 'Ivory', 'Khaki',
'Lavender', 'LavenderBlush', 'LawnGreen', 'LemonChiffon', 'LightBlue',
'LightCoral', 'LightCyan', 'LightGoldenRodYellow', 'LightGray', 'LightGrey',
'LightGreen', 'LightPink', 'LightSalmon', 'LightSeaGreen', 'LightSkyBlue',
'LightSlateGray', 'LightSlateGrey', 'LightSteelBlue', 'LightYellow', 'Lime',
'LimeGreen', 'Linen', 'Magenta', 'MediumAquaMarine', 'MediumOrchid',
'MediumPurple', 'MediumSeaGreen', 'MediumSlateBlue', 'MediumSpringGreen',
'MediumTurquoise', 'MediumVioletRed', 'MintCream', 'MistyRose', 'Moccasin',
'NavajoWhite', 'OldLace', 'Olive', 'OliveDrab', 'Orange', 'OrangeRed',
'Orchid', 'PaleGoldenRod', 'PaleGreen', 'PaleTurquoise', 'PaleVioletRed',
'PapayaWhip', 'PeachPuff', 'Peru', 'Pink', 'Plum', 'PowderBlue', 'Purple',
'Red', 'RosyBrown', 'RoyalBlue', 'SaddleBrown', 'Green', 'SandyBrown',
'SeaGreen', 'SeaShell', 'Sienna', 'Silver', 'SkyBlue', 'SlateBlue',
'SlateGray', 'SlateGrey', 'Snow', 'SpringGreen', 'SteelBlue', 'GreenYellow',
'Teal', 'Thistle', 'Tomato', 'Turquoise', 'Violet', 'Wheat', 'White',
'WhiteSmoke', 'Yellow', 'YellowGreen'
]
def _get_multiplier_for_color_randomness():
"""Returns a multiplier to get semi-random colors from successive indices.
This function computes a prime number, p, in the range [2, 17] that:
- is closest to len(STANDARD_COLORS) / 10
- does not divide len(STANDARD_COLORS)
If no prime numbers in that range satisfy the constraints, p is returned as 1.
Once p is established, it can be used as a multiplier to select
non-consecutive colors from STANDARD_COLORS:
colors = [(p * i) % len(STANDARD_COLORS) for i in range(20)]
"""
num_colors = len(STANDARD_COLORS)
prime_candidates = [5, 7, 11, 13, 17]
# Remove all prime candidates that divide the number of colors.
prime_candidates = [p for p in prime_candidates if num_colors % p]
if not prime_candidates:
return 1
# Return the closest prime number to num_colors / 10.
abs_distance = [np.abs(num_colors / 10. - p) for p in prime_candidates]
num_candidates = len(abs_distance)
inds = [i for _, i in sorted(zip(abs_distance, range(num_candidates)))]
return prime_candidates[inds[0]]
def save_image_array_as_png(image, output_path):
"""Saves an image (represented as a numpy array) to PNG.
Args:
image: a numpy array with shape [height, width, 3].
output_path: path to which image should be written.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
with tf.gfile.Open(output_path, 'w') as fid:
image_pil.save(fid, 'PNG')
def encode_image_array_as_png_str(image):
"""Encodes a numpy array into a PNG string.
Args:
image: a numpy array with shape [height, width, 3].
Returns:
PNG encoded image string.
"""
image_pil = Image.fromarray(np.uint8(image))
output = six.BytesIO()
image_pil.save(output, format='PNG')
png_string = output.getvalue()
output.close()
return png_string
def draw_bounding_box_on_image_array(image,
ymin,
xmin,
ymax,
xmax,
color='red',
thickness=4,
display_str_list=(),
use_normalized_coordinates=True):
"""Adds a bounding box to an image (numpy array).
Bounding box coordinates can be specified in either absolute (pixel) or
normalized coordinates by setting the use_normalized_coordinates argument.
Args:
image: a numpy array with shape [height, width, 3].
ymin: ymin of bounding box.
xmin: xmin of bounding box.
ymax: ymax of bounding box.
xmax: xmax of bounding box.
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list: list of strings to display in box (each to be shown on its
own line).
use_normalized_coordinates: If True (default), treat coordinates ymin, xmin,
ymax, xmax as relative to the image. Otherwise treat coordinates as
absolute.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_bounding_box_on_image(image_pil, ymin, xmin, ymax, xmax, color,
thickness, display_str_list,
use_normalized_coordinates)
np.copyto(image, np.array(image_pil))
def draw_bounding_box_on_image(image,
ymin,
xmin,
ymax,
xmax,
color='red',
thickness=4,
display_str_list=(),
use_normalized_coordinates=True):
"""Adds a bounding box to an image.
Bounding box coordinates can be specified in either absolute (pixel) or
normalized coordinates by setting the use_normalized_coordinates argument.
Each string in display_str_list is displayed on a separate line above the
bounding box in black text on a rectangle filled with the input 'color'.
If the top of the bounding box extends to the edge of the image, the strings
are displayed below the bounding box.
Args:
image: a PIL.Image object.
ymin: ymin of bounding box.
xmin: xmin of bounding box.
ymax: ymax of bounding box.
xmax: xmax of bounding box.
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list: list of strings to display in box (each to be shown on its
own line).
use_normalized_coordinates: If True (default), treat coordinates ymin, xmin,
ymax, xmax as relative to the image. Otherwise treat coordinates as
absolute.
"""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
if use_normalized_coordinates:
(left, right, top, bottom) = (
xmin * im_width, xmax * im_width, ymin * im_height, ymax * im_height)
else:
(left, right, top, bottom) = (xmin, xmax, ymin, ymax)
if thickness > 0:
draw.line([(left, top), (left, bottom), (right, bottom), (right, top),
(left, top)],
width=thickness,
fill=color)
try:
font = ImageFont.truetype('arial.ttf', 24)
except IOError:
font = ImageFont.load_default()
# If the total height of the display strings added to the top of the bounding
# box exceeds the top of the image, stack the strings below the bounding box
# instead of above.
display_str_heights = [font.getsize(ds)[1] for ds in display_str_list]
# Each display_str has a top and bottom margin of 0.05x.
total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights)
if top > total_display_str_height:
text_bottom = top
else:
text_bottom = bottom + total_display_str_height
# Reverse list and print from bottom to top.
for display_str in display_str_list[::-1]:
text_width, text_height = font.getsize(display_str)
margin = np.ceil(0.05 * text_height)
draw.rectangle([(left, text_bottom - text_height - 2 * margin),
(left + text_width, text_bottom)],
fill=color)
draw.text((
left + margin, text_bottom - text_height - margin),
display_str,
fill='black',
font=font)
text_bottom -= text_height - 2 * margin
def draw_bounding_boxes_on_image_array(image,
boxes,
color='red',
thickness=4,
display_str_list_list=()):
"""Draws bounding boxes on image (numpy array).
Args:
image: a numpy array object.
boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax). The
coordinates are in normalized format between [0, 1].
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list_list: list of list of strings. a list of strings for each
bounding box. The reason to pass a list of strings for a bounding box is
that it might contain multiple labels.
Raises:
ValueError: if boxes is not a [N, 4] array
"""
image_pil = Image.fromarray(image)
draw_bounding_boxes_on_image(image_pil, boxes, color, thickness,
display_str_list_list)
np.copyto(image, np.array(image_pil))
def draw_bounding_boxes_on_image(image,
boxes,
color='red',
thickness=4,
display_str_list_list=()):
"""Draws bounding boxes on image.
Args:
image: a PIL.Image object.
boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax). The
coordinates are in normalized format between [0, 1].
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list_list: list of list of strings. a list of strings for each
bounding box. The reason to pass a list of strings for a bounding box is
that it might contain multiple labels.
Raises:
ValueError: if boxes is not a [N, 4] array
"""
boxes_shape = boxes.shape
if not boxes_shape:
return
if len(boxes_shape) != 2 or boxes_shape[1] != 4:
raise ValueError('Input must be of size [N, 4]')
for i in range(boxes_shape[0]):
display_str_list = ()
if display_str_list_list:
display_str_list = display_str_list_list[i]
draw_bounding_box_on_image(image, boxes[i, 0], boxes[i, 1], boxes[i, 2],
boxes[i, 3], color, thickness, display_str_list)
def create_visualization_fn(category_index,
include_masks=False,
include_keypoints=False,
include_track_ids=False,
**kwargs):
"""Constructs a visualization function that can be wrapped in a py_func.
py_funcs only accept positional arguments. This function returns a suitable
function with the correct positional argument mapping. The positional
arguments in order are:
0: image
1: boxes
2: classes
3: scores
[4-6]: masks (optional)
[4-6]: keypoints (optional)
[4-6]: track_ids (optional)
-- Example 1 --
vis_only_masks_fn = create_visualization_fn(category_index,
include_masks=True, include_keypoints=False, include_track_ids=False,
**kwargs)
image = tf.py_func(vis_only_masks_fn,
inp=[image, boxes, classes, scores, masks],
Tout=tf.uint8)
-- Example 2 --
vis_masks_and_track_ids_fn = create_visualization_fn(category_index,
include_masks=True, include_keypoints=False, include_track_ids=True,
**kwargs)
image = tf.py_func(vis_masks_and_track_ids_fn,
inp=[image, boxes, classes, scores, masks, track_ids],
Tout=tf.uint8)
Args:
category_index: a dict that maps integer ids to category dicts. e.g.
{1: {1: 'dog'}, 2: {2: 'cat'}, ...}
include_masks: Whether masks should be expected as a positional argument in
the returned function.
include_keypoints: Whether keypoints should be expected as a positional
argument in the returned function.
include_track_ids: Whether track ids should be expected as a positional
argument in the returned function.
**kwargs: Additional kwargs that will be passed to
visualize_boxes_and_labels_on_image_array.
Returns:
Returns a function that only takes tensors as positional arguments.
"""
def visualization_py_func_fn(*args):
"""Visualization function that can be wrapped in a tf.py_func.
Args:
*args: First 4 positional arguments must be: image - uint8 numpy array
with shape (img_height, img_width, 3). boxes - a numpy array of shape
[N, 4]. classes - a numpy array of shape [N]. scores - a numpy array of
shape [N] or None. -- Optional positional arguments -- instance_masks -
a numpy array of shape [N, image_height, image_width]. keypoints - a
numpy array of shape [N, num_keypoints, 2]. track_ids - a numpy array of
shape [N] with unique track ids.
Returns:
uint8 numpy array with shape (img_height, img_width, 3) with overlaid
boxes.
"""
image = args[0]
boxes = args[1]
classes = args[2]
scores = args[3]
masks = keypoints = track_ids = None
pos_arg_ptr = 4 # Positional argument for first optional tensor (masks).
if include_masks:
masks = args[pos_arg_ptr]
pos_arg_ptr += 1
if include_keypoints:
keypoints = args[pos_arg_ptr]
pos_arg_ptr += 1
if include_track_ids:
track_ids = args[pos_arg_ptr]
return visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index=category_index,
instance_masks=masks,
keypoints=keypoints,
track_ids=track_ids,
**kwargs)
return visualization_py_func_fn
def _resize_original_image(image, image_shape):
image = tf.expand_dims(image, 0)
image = tf.image.resize_images(
image,
image_shape,
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=True)
return tf.cast(tf.squeeze(image, 0), tf.uint8)
def draw_bounding_boxes_on_image_tensors(images,
boxes,
classes,
scores,
category_index,
original_image_spatial_shape=None,
true_image_shape=None,
instance_masks=None,
keypoints=None,
keypoint_edges=None,
track_ids=None,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True):
"""Draws bounding boxes, masks, and keypoints on batch of image tensors.
Args:
images: A 4D uint8 image tensor of shape [N, H, W, C]. If C > 3, additional
channels will be ignored. If C = 1, then we convert the images to RGB
images.
boxes: [N, max_detections, 4] float32 tensor of detection boxes.
classes: [N, max_detections] int tensor of detection classes. Note that
classes are 1-indexed.
scores: [N, max_detections] float32 tensor of detection scores.
category_index: a dict that maps integer ids to category dicts. e.g.
{1: {1: 'dog'}, 2: {2: 'cat'}, ...}
original_image_spatial_shape: [N, 2] tensor containing the spatial size of
the original image.
true_image_shape: [N, 3] tensor containing the spatial size of unpadded
original_image.
instance_masks: A 4D uint8 tensor of shape [N, max_detection, H, W] with
instance masks.
keypoints: A 4D float32 tensor of shape [N, max_detection, num_keypoints, 2]
with keypoints.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
track_ids: [N, max_detections] int32 tensor of unique tracks ids (i.e.
instance ids for each object). If provided, the color-coding of boxes is
dictated by these ids, and not classes.
max_boxes_to_draw: Maximum number of boxes to draw on an image. Default 20.
min_score_thresh: Minimum score threshold for visualization. Default 0.2.
use_normalized_coordinates: Whether to assume boxes and kepoints are in
normalized coordinates (as opposed to absolute coordiantes). Default is
True.
Returns:
4D image tensor of type uint8, with boxes drawn on top.
"""
# Additional channels are being ignored.
if images.shape[3] > 3:
images = images[:, :, :, 0:3]
elif images.shape[3] == 1:
images = tf.image.grayscale_to_rgb(images)
visualization_keyword_args = {
'use_normalized_coordinates': use_normalized_coordinates,
'max_boxes_to_draw': max_boxes_to_draw,
'min_score_thresh': min_score_thresh,
'agnostic_mode': False,
'line_thickness': 4,
'keypoint_edges': keypoint_edges
}
if true_image_shape is None:
true_shapes = tf.constant(-1, shape=[images.shape.as_list()[0], 3])
else:
true_shapes = true_image_shape
if original_image_spatial_shape is None:
original_shapes = tf.constant(-1, shape=[images.shape.as_list()[0], 2])
else:
original_shapes = original_image_spatial_shape
visualize_boxes_fn = create_visualization_fn(
category_index,
include_masks=instance_masks is not None,
include_keypoints=keypoints is not None,
include_track_ids=track_ids is not None,
**visualization_keyword_args)
elems = [true_shapes, original_shapes, images, boxes, classes, scores]
if instance_masks is not None:
elems.append(instance_masks)
if keypoints is not None:
elems.append(keypoints)
if track_ids is not None:
elems.append(track_ids)
def draw_boxes(image_and_detections):
"""Draws boxes on image."""
true_shape = image_and_detections[0]
original_shape = image_and_detections[1]
if true_image_shape is not None:
image = shape_utils.pad_or_clip_nd(image_and_detections[2],
[true_shape[0], true_shape[1], 3])
if original_image_spatial_shape is not None:
image_and_detections[2] = _resize_original_image(image, original_shape)
image_with_boxes = tf.py_func(visualize_boxes_fn, image_and_detections[2:],
tf.uint8)
return image_with_boxes
images = tf.map_fn(draw_boxes, elems, dtype=tf.uint8, back_prop=False)
return images
def draw_side_by_side_evaluation_image(eval_dict,
category_index,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True,
keypoint_edges=None):
"""Creates a side-by-side image with detections and groundtruth.
Bounding boxes (and instance masks, if available) are visualized on both
subimages.
Args:
eval_dict: The evaluation dictionary returned by
eval_util.result_dict_for_batched_example() or
eval_util.result_dict_for_single_example().
category_index: A category index (dictionary) produced from a labelmap.
max_boxes_to_draw: The maximum number of boxes to draw for detections.
min_score_thresh: The minimum score threshold for showing detections.
use_normalized_coordinates: Whether to assume boxes and keypoints are in
normalized coordinates (as opposed to absolute coordinates). Default is
True.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
Returns:
A list of [1, H, 2 * W, C] uint8 tensor. The subimage on the left
corresponds to detections, while the subimage on the right corresponds to
groundtruth.
"""
detection_fields = fields.DetectionResultFields()
input_data_fields = fields.InputDataFields()
images_with_detections_list = []
# Add the batch dimension if the eval_dict is for single example.
if len(eval_dict[detection_fields.detection_classes].shape) == 1:
for key in eval_dict:
if key not in (input_data_fields.original_image,
input_data_fields.image_additional_channels):
eval_dict[key] = tf.expand_dims(eval_dict[key], 0)
for indx in range(eval_dict[input_data_fields.original_image].shape[0]):
instance_masks = None
if detection_fields.detection_masks in eval_dict:
instance_masks = tf.cast(
tf.expand_dims(
eval_dict[detection_fields.detection_masks][indx], axis=0),
tf.uint8)
keypoints = None
if detection_fields.detection_keypoints in eval_dict:
keypoints = tf.expand_dims(
eval_dict[detection_fields.detection_keypoints][indx], axis=0)
groundtruth_instance_masks = None
if input_data_fields.groundtruth_instance_masks in eval_dict:
groundtruth_instance_masks = tf.cast(
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_instance_masks][indx],
axis=0), tf.uint8)
images_with_detections = draw_bounding_boxes_on_image_tensors(
tf.expand_dims(
eval_dict[input_data_fields.original_image][indx], axis=0),
tf.expand_dims(
eval_dict[detection_fields.detection_boxes][indx], axis=0),
tf.expand_dims(
eval_dict[detection_fields.detection_classes][indx], axis=0),
tf.expand_dims(
eval_dict[detection_fields.detection_scores][indx], axis=0),
category_index,
original_image_spatial_shape=tf.expand_dims(
eval_dict[input_data_fields.original_image_spatial_shape][indx],
axis=0),
true_image_shape=tf.expand_dims(
eval_dict[input_data_fields.true_image_shape][indx], axis=0),
instance_masks=instance_masks,
keypoints=keypoints,
keypoint_edges=keypoint_edges,
max_boxes_to_draw=max_boxes_to_draw,
min_score_thresh=min_score_thresh,
use_normalized_coordinates=use_normalized_coordinates)
images_with_groundtruth = draw_bounding_boxes_on_image_tensors(
tf.expand_dims(
eval_dict[input_data_fields.original_image][indx], axis=0),
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_boxes][indx], axis=0),
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_classes][indx], axis=0),
tf.expand_dims(
tf.ones_like(
eval_dict[input_data_fields.groundtruth_classes][indx],
dtype=tf.float32),
axis=0),
category_index,
original_image_spatial_shape=tf.expand_dims(
eval_dict[input_data_fields.original_image_spatial_shape][indx],
axis=0),
true_image_shape=tf.expand_dims(
eval_dict[input_data_fields.true_image_shape][indx], axis=0),
instance_masks=groundtruth_instance_masks,
keypoints=None,
keypoint_edges=None,
max_boxes_to_draw=None,
min_score_thresh=0.0,
use_normalized_coordinates=use_normalized_coordinates)
images_to_visualize = tf.concat(
[images_with_detections, images_with_groundtruth], axis=2)
if input_data_fields.image_additional_channels in eval_dict:
images_with_additional_channels_groundtruth = (
draw_bounding_boxes_on_image_tensors(
tf.expand_dims(
eval_dict[input_data_fields.image_additional_channels][indx],
axis=0),
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_boxes][indx], axis=0),
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_classes][indx],
axis=0),
tf.expand_dims(
tf.ones_like(
eval_dict[input_data_fields.groundtruth_classes][indx],
dtype=tf.float32),
axis=0),
category_index,
original_image_spatial_shape=tf.expand_dims(
eval_dict[input_data_fields.original_image_spatial_shape]
[indx],
axis=0),
true_image_shape=tf.expand_dims(
eval_dict[input_data_fields.true_image_shape][indx], axis=0),
instance_masks=groundtruth_instance_masks,
keypoints=None,
keypoint_edges=None,
max_boxes_to_draw=None,
min_score_thresh=0.0,
use_normalized_coordinates=use_normalized_coordinates))
images_to_visualize = tf.concat(
[images_to_visualize, images_with_additional_channels_groundtruth],
axis=2)
images_with_detections_list.append(images_to_visualize)
return images_with_detections_list
def draw_keypoints_on_image_array(image,
keypoints,
color='red',
radius=2,
use_normalized_coordinates=True,
keypoint_edges=None,
keypoint_edge_color='green',
keypoint_edge_width=2):
"""Draws keypoints on an image (numpy array).
Args:
image: a numpy array with shape [height, width, 3].
keypoints: a numpy array with shape [num_keypoints, 2].
color: color to draw the keypoints with. Default is red.
radius: keypoint radius. Default value is 2.
use_normalized_coordinates: if True (default), treat keypoint values as
relative to the image. Otherwise treat them as absolute.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
keypoint_edge_color: color to draw the keypoint edges with. Default is red.
keypoint_edge_width: width of the edges drawn between keypoints. Default
value is 2.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_keypoints_on_image(image_pil, keypoints, color, radius,
use_normalized_coordinates, keypoint_edges,
keypoint_edge_color, keypoint_edge_width)
np.copyto(image, np.array(image_pil))
def draw_keypoints_on_image(image,
keypoints,
color='red',
radius=2,
use_normalized_coordinates=True,
keypoint_edges=None,
keypoint_edge_color='green',
keypoint_edge_width=2):
"""Draws keypoints on an image.
Args:
image: a PIL.Image object.
keypoints: a numpy array with shape [num_keypoints, 2].
color: color to draw the keypoints with. Default is red.
radius: keypoint radius. Default value is 2.
use_normalized_coordinates: if True (default), treat keypoint values as
relative to the image. Otherwise treat them as absolute.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
keypoint_edge_color: color to draw the keypoint edges with. Default is red.
keypoint_edge_width: width of the edges drawn between keypoints. Default
value is 2.
"""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
keypoints_x = [k[1] for k in keypoints]
keypoints_y = [k[0] for k in keypoints]
if use_normalized_coordinates:
keypoints_x = tuple([im_width * x for x in keypoints_x])
keypoints_y = tuple([im_height * y for y in keypoints_y])
for keypoint_x, keypoint_y in zip(keypoints_x, keypoints_y):
draw.ellipse([(keypoint_x - radius, keypoint_y - radius),
(keypoint_x + radius, keypoint_y + radius)],
outline=color,
fill=color)
if keypoint_edges is not None:
for keypoint_start, keypoint_end in keypoint_edges:
if (keypoint_start < 0 or keypoint_start >= len(keypoints) or
keypoint_end < 0 or keypoint_end >= len(keypoints)):
continue
edge_coordinates = [
keypoints_x[keypoint_start], keypoints_y[keypoint_start],
keypoints_x[keypoint_end], keypoints_y[keypoint_end]
]
draw.line(
edge_coordinates, fill=keypoint_edge_color, width=keypoint_edge_width)
def draw_mask_on_image_array(image, mask, color='red', alpha=0.4):
"""Draws mask on an image.
Args:
image: uint8 numpy array with shape (img_height, img_height, 3)
mask: a uint8 numpy array of shape (img_height, img_height) with values
between either 0 or 1.
color: color to draw the keypoints with. Default is red.
alpha: transparency value between 0 and 1. (default: 0.4)
Raises:
ValueError: On incorrect data type for image or masks.
"""
if image.dtype != np.uint8:
raise ValueError('`image` not of type np.uint8')
if mask.dtype != np.uint8:
raise ValueError('`mask` not of type np.uint8')
if np.any(np.logical_and(mask != 1, mask != 0)):
raise ValueError('`mask` elements should be in [0, 1]')
if image.shape[:2] != mask.shape:
raise ValueError('The image has spatial dimensions %s but the mask has '
'dimensions %s' % (image.shape[:2], mask.shape))
rgb = ImageColor.getrgb(color)
pil_image = Image.fromarray(image)
solid_color = np.expand_dims(
np.ones_like(mask), axis=2) * np.reshape(list(rgb), [1, 1, 3])
pil_solid_color = Image.fromarray(np.uint8(solid_color)).convert('RGBA')
pil_mask = Image.fromarray(np.uint8(255.0 * alpha * mask)).convert('L')
pil_image = Image.composite(pil_solid_color, pil_image, pil_mask)
np.copyto(image, np.array(pil_image.convert('RGB')))
def visualize_boxes_and_labels_on_image_array(image,
boxes,
classes,
scores,
category_index,
instance_masks=None,
instance_boundaries=None,
keypoints=None,
keypoint_edges=None,
track_ids=None,
use_normalized_coordinates=False,
max_boxes_to_draw=20,
min_score_thresh=.5,
agnostic_mode=False,
line_thickness=4,
groundtruth_box_visualization_color='black',
skip_boxes=False,
skip_scores=False,
skip_labels=False,
skip_track_ids=False):
"""Overlay labeled boxes on an image with formatted scores and label names.
This function groups boxes that correspond to the same location
and creates a display string for each detection and overlays these
on the image. Note that this function modifies the image in place, and returns
that same image.
Args:
image: uint8 numpy array with shape (img_height, img_width, 3)
boxes: a numpy array of shape [N, 4]
classes: a numpy array of shape [N]. Note that class indices are 1-based,
and match the keys in the label map.
scores: a numpy array of shape [N] or None. If scores=None, then this
function assumes that the boxes to be plotted are groundtruth boxes and
plot all boxes as black with no classes or scores.
category_index: a dict containing category dictionaries (each holding
category index `id` and category name `name`) keyed by category indices.
instance_masks: a numpy array of shape [N, image_height, image_width] with
values ranging between 0 and 1, can be None.
instance_boundaries: a numpy array of shape [N, image_height, image_width]
with values ranging between 0 and 1, can be None.
keypoints: a numpy array of shape [N, num_keypoints, 2], can be None
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
track_ids: a numpy array of shape [N] with unique track ids. If provided,
color-coding of boxes will be determined by these ids, and not the class
indices.
use_normalized_coordinates: whether boxes is to be interpreted as normalized
coordinates or not.
max_boxes_to_draw: maximum number of boxes to visualize. If None, draw all
boxes.
min_score_thresh: minimum score threshold for a box to be visualized
agnostic_mode: boolean (default: False) controlling whether to evaluate in
class-agnostic mode or not. This mode will display scores but ignore
classes.
line_thickness: integer (default: 4) controlling line width of the boxes.
groundtruth_box_visualization_color: box color for visualizing groundtruth
boxes
skip_boxes: whether to skip the drawing of bounding boxes.
skip_scores: whether to skip score when drawing a single detection
skip_labels: whether to skip label when drawing a single detection
skip_track_ids: whether to skip track id when drawing a single detection
Returns:
uint8 numpy array with shape (img_height, img_width, 3) with overlaid boxes.
"""
# Create a display string (and color) for every box location, group any boxes
# that correspond to the same location.
box_to_display_str_map = collections.defaultdict(list)
box_to_color_map = collections.defaultdict(str)
box_to_instance_masks_map = {}
box_to_instance_boundaries_map = {}
box_to_keypoints_map = collections.defaultdict(list)
box_to_track_ids_map = {}
if not max_boxes_to_draw:
max_boxes_to_draw = boxes.shape[0]
for i in range(boxes.shape[0]):
if max_boxes_to_draw == len(box_to_color_map):
break
if scores is None or scores[i] > min_score_thresh:
box = tuple(boxes[i].tolist())
if instance_masks is not None:
box_to_instance_masks_map[box] = instance_masks[i]
if instance_boundaries is not None:
box_to_instance_boundaries_map[box] = instance_boundaries[i]
if keypoints is not None:
box_to_keypoints_map[box].extend(keypoints[i])
if track_ids is not None:
box_to_track_ids_map[box] = track_ids[i]
if scores is None:
box_to_color_map[box] = groundtruth_box_visualization_color
else:
display_str = ''
if not skip_labels:
if not agnostic_mode:
if classes[i] in six.viewkeys(category_index):
class_name = category_index[classes[i]]['name']
else:
class_name = 'N/A'
display_str = str(class_name)
if not skip_scores:
if not display_str:
display_str = '{}%'.format(int(100 * scores[i]))
else:
display_str = '{}: {}%'.format(display_str, int(100 * scores[i]))
if not skip_track_ids and track_ids is not None:
if not display_str:
display_str = 'ID {}'.format(track_ids[i])
else:
display_str = '{}: ID {}'.format(display_str, track_ids[i])
box_to_display_str_map[box].append(display_str)
if agnostic_mode:
box_to_color_map[box] = 'DarkOrange'
elif track_ids is not None:
prime_multipler = _get_multiplier_for_color_randomness()
box_to_color_map[box] = STANDARD_COLORS[
(prime_multipler * track_ids[i]) % len(STANDARD_COLORS)]
else:
box_to_color_map[box] = STANDARD_COLORS[classes[i] % len(STANDARD_COLORS)]
# Draw all boxes onto image.
for box, color in box_to_color_map.items():
ymin, xmin, ymax, xmax = box
if instance_masks is not None:
draw_mask_on_image_array(
image, box_to_instance_masks_map[box], color=color)
if instance_boundaries is not None:
draw_mask_on_image_array(
image, box_to_instance_boundaries_map[box], color='red', alpha=1.0)
draw_bounding_box_on_image_array(
image,
ymin,
xmin,
ymax,
xmax,
color=color,
thickness=0 if skip_boxes else line_thickness,
display_str_list=box_to_display_str_map[box],
use_normalized_coordinates=use_normalized_coordinates)
if keypoints is not None:
draw_keypoints_on_image_array(
image,
box_to_keypoints_map[box],
color=color,
radius=line_thickness / 2,
use_normalized_coordinates=use_normalized_coordinates,
keypoint_edges=keypoint_edges,
keypoint_edge_color=color,
keypoint_edge_width=line_thickness // 2)
return image
def add_cdf_image_summary(values, name):
"""Adds a tf.summary.image for a CDF plot of the values.
Normalizes `values` such that they sum to 1, plots the cumulative distribution
function and creates a tf image summary.
Args:
values: a 1-D float32 tensor containing the values.
name: name for the image summary.
"""
def cdf_plot(values):
"""Numpy function to plot CDF."""
normalized_values = values / np.sum(values)
sorted_values = np.sort(normalized_values)
cumulative_values = np.cumsum(sorted_values)
fraction_of_examples = (
np.arange(cumulative_values.size, dtype=np.float32) /
cumulative_values.size)
fig = plt.figure(frameon=False)
ax = fig.add_subplot('111')
ax.plot(fraction_of_examples, cumulative_values)
ax.set_ylabel('cumulative normalized values')
ax.set_xlabel('fraction of examples')
fig.canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
image = np.frombuffer(
fig.canvas.tostring_rgb(),
dtype='uint8').reshape(1, int(height), int(width), 3)
return image
cdf_plot = tf.py_func(cdf_plot, [values], tf.uint8)
tf.summary.image(name, cdf_plot)
def add_hist_image_summary(values, bins, name):
"""Adds a tf.summary.image for a histogram plot of the values.
Plots the histogram of values and creates a tf image summary.
Args:
values: a 1-D float32 tensor containing the values.
bins: bin edges which will be directly passed to np.histogram.
name: name for the image summary.
"""
def hist_plot(values, bins):
"""Numpy function to plot hist."""
fig = plt.figure(frameon=False)
ax = fig.add_subplot('111')
y, x = np.histogram(values, bins=bins)
ax.plot(x[:-1], y)
ax.set_ylabel('count')
ax.set_xlabel('value')
fig.canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
image = np.frombuffer(
fig.canvas.tostring_rgb(),
dtype='uint8').reshape(1, int(height), int(width), 3)
return image
hist_plot = tf.py_func(hist_plot, [values, bins], tf.uint8)
tf.summary.image(name, hist_plot)
class EvalMetricOpsVisualization(six.with_metaclass(abc.ABCMeta, object)):
"""Abstract base class responsible for visualizations during evaluation.
Currently, summary images are not run during evaluation. One way to produce
evaluation images in Tensorboard is to provide tf.summary.image strings as
`value_ops` in tf.estimator.EstimatorSpec's `eval_metric_ops`. This class is
responsible for accruing images (with overlaid detections and groundtruth)
and returning a dictionary that can be passed to `eval_metric_ops`.
"""
def __init__(self,
category_index,
max_examples_to_draw=5,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True,
summary_name_prefix='evaluation_image',
keypoint_edges=None):
"""Creates an EvalMetricOpsVisualization.
Args:
category_index: A category index (dictionary) produced from a labelmap.
max_examples_to_draw: The maximum number of example summaries to produce.
max_boxes_to_draw: The maximum number of boxes to draw for detections.
min_score_thresh: The minimum score threshold for showing detections.
use_normalized_coordinates: Whether to assume boxes and keypoints are in
normalized coordinates (as opposed to absolute coordinates). Default is
True.
summary_name_prefix: A string prefix for each image summary.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
"""
self._category_index = category_index
self._max_examples_to_draw = max_examples_to_draw
self._max_boxes_to_draw = max_boxes_to_draw
self._min_score_thresh = min_score_thresh
self._use_normalized_coordinates = use_normalized_coordinates
self._summary_name_prefix = summary_name_prefix
self._keypoint_edges = keypoint_edges
self._images = []
def clear(self):
"""Reset images."""
self._images = []
def add_images(self, images):
"""Store a list of images, each with shape [1, H, W, C]."""
if len(self._images) >= self._max_examples_to_draw:
return
# Store images and clip list if necessary.
self._images.extend(images)
if len(self._images) > self._max_examples_to_draw:
self._images[self._max_examples_to_draw:] = []
def get_estimator_eval_metric_ops(self, eval_dict):
"""Returns metric ops for use in tf.estimator.EstimatorSpec.
Args:
eval_dict: A dictionary that holds an image, groundtruth, and detections
for a batched example. Note that, we use only the first example for
visualization. See eval_util.result_dict_for_batched_example() for a
convenient method for constructing such a dictionary. The dictionary
contains
fields.InputDataFields.original_image: [batch_size, H, W, 3] image.
fields.InputDataFields.original_image_spatial_shape: [batch_size, 2]
tensor containing the size of the original image.
fields.InputDataFields.true_image_shape: [batch_size, 3]
tensor containing the spatial size of the upadded original image.
fields.InputDataFields.groundtruth_boxes - [batch_size, num_boxes, 4]
float32 tensor with groundtruth boxes in range [0.0, 1.0].
fields.InputDataFields.groundtruth_classes - [batch_size, num_boxes]
int64 tensor with 1-indexed groundtruth classes.
fields.InputDataFields.groundtruth_instance_masks - (optional)
[batch_size, num_boxes, H, W] int64 tensor with instance masks.
fields.DetectionResultFields.detection_boxes - [batch_size,
max_num_boxes, 4] float32 tensor with detection boxes in range [0.0,
1.0].
fields.DetectionResultFields.detection_classes - [batch_size,
max_num_boxes] int64 tensor with 1-indexed detection classes.
fields.DetectionResultFields.detection_scores - [batch_size,
max_num_boxes] float32 tensor with detection scores.
fields.DetectionResultFields.detection_masks - (optional) [batch_size,
max_num_boxes, H, W] float32 tensor of binarized masks.
fields.DetectionResultFields.detection_keypoints - (optional)
[batch_size, max_num_boxes, num_keypoints, 2] float32 tensor with
keypoints.
Returns:
A dictionary of image summary names to tuple of (value_op, update_op). The
`update_op` is the same for all items in the dictionary, and is
responsible for saving a single side-by-side image with detections and
groundtruth. Each `value_op` holds the tf.summary.image string for a given
image.
"""
if self._max_examples_to_draw == 0:
return {}
images = self.images_from_evaluation_dict(eval_dict)
def get_images():
"""Returns a list of images, padded to self._max_images_to_draw."""
images = self._images
while len(images) < self._max_examples_to_draw:
images.append(np.array(0, dtype=np.uint8))
self.clear()
return images
def image_summary_or_default_string(summary_name, image):
"""Returns image summaries for non-padded elements."""
return tf.cond(
tf.equal(tf.size(tf.shape(image)), 4), # pyformat: disable
lambda: tf.summary.image(summary_name, image),
lambda: tf.constant(''))
if tf.executing_eagerly():
self.add_images([[images[0]]])
image_tensors = get_images()
else:
tf.py_func(self.add_images, [[images[0]]], [])
image_tensors = tf.py_func(get_images, [], [tf.uint8] * self._max_examples_to_draw)
eval_metric_ops = {}
for i, image in enumerate(image_tensors):
summary_name = self._summary_name_prefix + '/' + str(i)
value_op = image_summary_or_default_string(summary_name, image)
eval_metric_ops[summary_name] = (value_op, None)
return eval_metric_ops
@abc.abstractmethod
def images_from_evaluation_dict(self, eval_dict):
"""Converts evaluation dictionary into a list of image tensors.
To be overridden by implementations.
Args:
eval_dict: A dictionary with all the necessary information for producing
visualizations.
Returns:
A list of [1, H, W, C] uint8 tensors.
"""
raise NotImplementedError
class VisualizeSingleFrameDetections(EvalMetricOpsVisualization):
"""Class responsible for single-frame object detection visualizations."""
def __init__(self,
category_index,
max_examples_to_draw=5,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True,
summary_name_prefix='Detections_Left_Groundtruth_Right',
keypoint_edges=None):
"""Init."""
super(VisualizeSingleFrameDetections, self).__init__(
category_index=category_index,
max_examples_to_draw=max_examples_to_draw,
max_boxes_to_draw=max_boxes_to_draw,
min_score_thresh=min_score_thresh,
use_normalized_coordinates=use_normalized_coordinates,
summary_name_prefix=summary_name_prefix,
keypoint_edges=keypoint_edges)
def images_from_evaluation_dict(self, eval_dict):
"""Draw evalution image side by side."""
return draw_side_by_side_evaluation_image(
eval_dict, self._category_index,
self._max_boxes_to_draw,
self._min_score_thresh,
self._use_normalized_coordinates,
self._keypoint_edges)
def visualize_detections(image_path, output_path, detections, labels):
"""Visualize detections."""
image = Image.open(image_path).convert(mode='RGB')
draw = ImageDraw.Draw(image)
line_width = 2
font = ImageFont.load_default()
for d in detections:
color = STANDARD_COLORS[d['class'] % len(STANDARD_COLORS)]
draw.line([(d['xmin'], d['ymin']), (d['xmin'], d['ymax']),
(d['xmax'], d['ymax']), (d['xmax'], d['ymin']),
(d['xmin'], d['ymin'])], width=line_width, fill=color)
label = "Class {}".format(d['class'])
if d['class'] < len(labels):
label = "{}".format(labels[d['class']])
score = d['score']
text = "{}: {}%".format(label, int(100 * score))
if score < 0:
text = label
text_width, text_height = font.getsize(text)
text_bottom = max(text_height, d['ymin'])
text_left = d['xmin']
margin = np.ceil(0.05 * text_height)
draw.rectangle([(text_left, text_bottom - text_height - 2 * margin),
(text_left + text_width, text_bottom)],
fill=color)
draw.text(
(text_left + margin, text_bottom - text_height - margin),
text, fill='black', font=font)
image.save(output_path)
def concat_visualizations(images, names, colors, output_path):
"""Concatenate visualization."""
def draw_text(draw, font, text, width, bar_height, offset, color):
text_width, text_height = font.getsize(text)
draw.rectangle([(offset, 0), (offset + width, bar_height)], fill=color)
draw.text(
(offset + (width - text_width) / 2, text_height - text_height / 2),
text, fill='black', font=font)
bar_height = 18
width = 0
height = 0
for im in images:
width += im.width
height = max(height, im.height)
concat = Image.new('RGB', (width, height + bar_height))
draw = ImageDraw.Draw(concat)
font = ImageFont.load_default()
offset = 0
for i, im in enumerate(images):
concat.paste(im, (offset, bar_height))
draw_text(draw, font, names[i], im.width, bar_height, offset, colors[i])
offset += im.width
concat.save(output_path)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/visualize/vis_utils.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions to access TensorShape values.
The rank 4 tensor_shape must be of the form [batch_size, height, width, depth].
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def get_dim_as_int(dim):
"""Utility to get v1 or v2 TensorShape dim as an int.
Args:
dim: The TensorShape dimension to get as an int
Returns:
None or an int.
"""
try:
return dim.value
except AttributeError:
return dim
def get_batch_size(tensor_shape):
"""Returns batch size from the tensor shape.
Args:
tensor_shape: A rank 4 TensorShape.
Returns:
An integer representing the batch size of the tensor.
"""
tensor_shape.assert_has_rank(rank=4)
return get_dim_as_int(tensor_shape[0])
def get_height(tensor_shape):
"""Returns height from the tensor shape.
Args:
tensor_shape: A rank 4 TensorShape.
Returns:
An integer representing the height of the tensor.
"""
tensor_shape.assert_has_rank(rank=4)
return get_dim_as_int(tensor_shape[1])
def get_width(tensor_shape):
"""Returns width from the tensor shape.
Args:
tensor_shape: A rank 4 TensorShape.
Returns:
An integer representing the width of the tensor.
"""
tensor_shape.assert_has_rank(rank=4)
return get_dim_as_int(tensor_shape[2])
def get_depth(tensor_shape):
"""Returns depth from the tensor shape.
Args:
tensor_shape: A rank 4 TensorShape.
Returns:
An integer representing the depth of the tensor.
"""
tensor_shape.assert_has_rank(rank=4)
return get_dim_as_int(tensor_shape[3])
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/visualize/static_shape.py |
"""Checkpointing hook with encryption."""
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import shutil
import tempfile
from zipfile import ZipFile
import tensorflow as tf
from nvidia_tao_tf1.encoding import encoding
__all__ = ["EncryptCheckpointSaverHook"]
class EncryptCheckpointSaverHook(tf.estimator.SessionRunHook):
"""Saves encrypted checkpoints every N steps or seconds."""
def __init__(self, checkpoint_dir, temp_dir, key, checkpoint_basename="model.ckpt",
steps_per_epoch=100):
"""Initializes a `EncryptCheckpointSaverHook`.
Args:
checkpoint_dir: `str`, base directory for the checkpoint files.
checkpoint_basename: `str`, base name for the checkpoint files.
Raises:
ValueError: One of `save_steps` or `save_secs` should be set.
ValueError: At most one of `saver` or `scaffold` should be set.
"""
logging.info("Create EncryptCheckpointSaverHook.")
self._saver = None
self._checkpoint_dir = checkpoint_dir
self._save_path = temp_dir
self._key = key
self._steps_per_run = 1
self._steps_per_epoch = steps_per_epoch
self._is_initialized = False
self._global_step_tensor = None
self._summary_writer = None
def _set_steps_per_run(self, steps_per_run):
"""Set steps."""
self._steps_per_run = steps_per_run
def begin(self):
"""Begin."""
self._global_step_tensor = tf.compat.v1.train.get_or_create_global_step()
self._saver = tf.compat.v1.train.Saver()
from tensorflow.python.training import summary_io
self._summary_writer = summary_io.SummaryWriterCache.get(self._checkpoint_dir)
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use EncryptCheckpointSaverHook."
)
def after_create_session(self, session, coord):
"""After session created."""
if not self._is_initialized:
global_step = session.run(self._global_step_tensor)
from tensorflow.python.keras.backend import get_graph
default_graph = get_graph()
# We do write graph and saver_def at the first call of before_run.
# We cannot do this in begin, since we let other hooks to change graph and
# add variables in begin. Graph is finalized after all begin calls.
tf.io.write_graph(
default_graph.as_graph_def(add_shapes=True),
self._checkpoint_dir,
"graph.pbtxt"
)
saver_def = self._saver.saver_def
from tensorflow.python.framework import meta_graph
meta_graph_def = meta_graph.create_meta_graph_def(
graph_def=default_graph.as_graph_def(add_shapes=True),
saver_def=saver_def
)
self._summary_writer.add_graph(default_graph)
self._summary_writer.add_meta_graph(meta_graph_def)
# The checkpoint saved here is the state at step "global_step".
self._save(session, global_step)
self._is_initialized = True
def end(self, session):
"""Run session."""
last_step = session.run(self._global_step_tensor)
self._save(session, last_step)
def _save(self, session, step):
"""Saves the latest checkpoint, returns should_stop."""
# reset model saving path
# to prevent unwanted files after evaluation
if os.path.exists(self._save_path):
shutil.rmtree(self._save_path)
os.mkdir(self._save_path)
self._saver.save(session, os.path.join(self._save_path, "model.ckpt"), global_step=step)
# Template for zip file.
# Step based checkpoint naming <TAO5.0
# tlt_file = os.path.join(self._checkpoint_dir, 'model.step-{}.tlt'.format(step))
# New checkpoint naming for >TAO5.0
epoch = int(step / self._steps_per_epoch)
tlt_file = os.path.join(self._checkpoint_dir, 'model.epoch-{}.tlt'.format(epoch))
logging.info("Saving checkpoints for epoch %d into %s.", epoch, tlt_file)
prev_dir = os.getcwd()
os.chdir(self._save_path)
# Zip the checkpoint files to one file.
with ZipFile(tlt_file, 'w') as zip_object:
for ckpt_file in os.listdir(self._save_path):
zip_object.write(ckpt_file)
# Restore previous execution directory and remove tmp files/directories.
os.chdir(prev_dir)
shutil.rmtree(self._save_path)
os.mkdir(self._save_path)
self._summary_writer.add_session_log(
tf.compat.v1.SessionLog(status=tf.compat.v1.SessionLog.CHECKPOINT,
checkpoint_path=self._save_path), step)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/hooks/enc_ckpt_hook.py |
"""Pretrained weight loading hooks."""
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
import sys
import tensorflow as tf
from nvidia_tao_tf1.cv.efficientdet.utils.distributed_utils import MPI_rank
__all__ = ["PretrainedWeightsLoadingHook"]
# pylint: disable=protected-access
# Currently variable_scope doesn't provide very good APIs to access
# all variables under scope and retrieve and check existing scopes.
def get_variable_full_name(var):
"""Returns the full name of a variable.
For normal Variables, this is the same as the var.op.name. For
sliced or PartitionedVariables, this name is the same for all the
slices/partitions. In both cases, this is normally the name used in
a checkpoint file.
Args:
var: A `Variable` object.
Returns:
A string that is the full name.
"""
if var._save_slice_info:
return var._save_slice_info.full_name
return var.op.name
def assign_from_checkpoint(model_path, var_list, ignore_missing_vars=False):
"""Creates an operation to assign specific variables from a checkpoint.
Args:
model_path: The full path to the model checkpoint. To get latest checkpoint
use `model_path = tf.train.latest_checkpoint(checkpoint_dir)`
var_list: A list of (possibly partitioned) `Variable` objects or a
dictionary mapping names in the checkpoint to the corresponding variables
or list of variables to initialize from that checkpoint value. For
partitioned Variables, the name in the checkpoint must be the full
variable, not the name of the partitioned variable, eg. "my_var" rather
than "my_var/part_4". If empty, returns no_op(), {}.
ignore_missing_vars: Boolean, if True ignore variables missing in the
checkpoint with a warning instead of failing.
Returns:
the restore_op and the feed_dict that need to be run to restore var_list.
Raises:
ValueError: If `ignore_missing_vars` is False and the checkpoint specified
at `model_path` is missing one of the variables in `var_list`.
"""
# Normalize var_list into a dictionary mapping names in the
# checkpoint to the list of variables to initialize from that
# checkpoint variable. Sliced (including partitioned) variables will
# end up under the same key.
grouped_vars = {}
if isinstance(var_list, (tuple, list)):
for var in var_list:
ckpt_name = get_variable_full_name(var)
if ckpt_name not in grouped_vars:
grouped_vars[ckpt_name] = []
grouped_vars[ckpt_name].append(var)
else:
for ckpt_name, value in var_list.items():
if isinstance(value, (tuple, list)):
grouped_vars[ckpt_name] = value
else:
grouped_vars[ckpt_name] = [value]
# Read each checkpoint entry. Create a placeholder variable and
# add the (possibly sliced) data from the checkpoint to the feed_dict.
reader = tf.compat.v1.train.NewCheckpointReader(model_path)
feed_dict = {}
assign_ops = []
for ckpt_name in grouped_vars:
if not reader.has_tensor(ckpt_name):
log_str = 'Checkpoint is missing variable [%s]' % ckpt_name
if ignore_missing_vars:
logging.warning(log_str)
continue
raise ValueError(log_str)
ckpt_value = reader.get_tensor(ckpt_name)
for var in grouped_vars[ckpt_name]:
placeholder_tensor = tf.compat.v1.placeholder(
dtype=var.dtype.base_dtype,
shape=var.get_shape(),
name='placeholder/' + var.op.name
)
assign_ops.append(var.assign(placeholder_tensor))
if not var._save_slice_info:
if var.get_shape() != ckpt_value.shape:
raise ValueError(
'Total size of new array must be unchanged for %s '
'lh_shape: [%s], rh_shape: [%s]' %
(ckpt_name, str(ckpt_value.shape), str(var.get_shape())))
feed_dict[placeholder_tensor] = ckpt_value.reshape(ckpt_value.shape)
else:
slice_dims = zip(var._save_slice_info.var_offset,
var._save_slice_info.var_shape)
slice_dims = [(start, start + size) for (start, size) in slice_dims]
slice_dims = [slice(*x) for x in slice_dims]
slice_value = ckpt_value[slice_dims]
slice_value = slice_value.reshape(var._save_slice_info.var_shape)
feed_dict[placeholder_tensor] = slice_value
print_op = tf.print(
"[GPU %02d] Restoring pretrained weights (%d Tensors)" % (
MPI_rank(),
len(assign_ops)
),
output_stream=sys.stdout
)
with tf.control_dependencies([print_op]):
assign_op = tf.group(*assign_ops)
return assign_op, feed_dict
def build_assigment_map(prefix=None, skip_variables_regex=None):
"""Generate assigment map for loading checkpoints."""
all_vars = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, scope=prefix)
if not prefix:
prefix = ''
assignment_map = {}
for var in all_vars:
var_name = var.name
var_name_filter = (
var_name[-11:] in "/Momentum:0" or
var_name[-11:] in "/Adadelta:0" or
var_name[-13:] in "/Adadelta_1:0" or
var_name[-7:] in "/Adam:0" or
var_name[-9:] in "/Adam_1:0" or
var_name[-10:] in "/Adagrad:0" or
var_name[-10:] in "/RMSProp:0" or
var_name[-12:] in "/RMSProp_1:0" or
var_name[-16:] in "/LARSOptimizer:0"
)
if var_name_filter:
continue
# Trim the index of the variable.
if ':' in var_name:
var_name = var_name[:var_name.rindex(':')]
if skip_variables_regex and bool(re.search(skip_variables_regex, var_name[len(prefix):])):
continue
assignment_map[var_name[len(prefix):]] = var
# assignment_map[var_name] = var
return assignment_map
class PretrainedWeightsLoadingHook(tf.estimator.SessionRunHook):
"""Hook for loading pretrained weights."""
def __init__(self, prefix, checkpoint_path, skip_variables_regex=None):
"""Initialize."""
self._prefix = prefix
self._checkpoint_path = checkpoint_path
self._skip_variables_regex = skip_variables_regex
self._is_initialized = False
self._init_op = None
self._init_feed_dict = None
def begin(self):
"""Begin."""
vars_to_load = build_assigment_map(
prefix=self._prefix,
skip_variables_regex=self._skip_variables_regex
)
vars_to_load.pop('global_step')
self._init_op, self._init_feed_dict = assign_from_checkpoint(
model_path=self._checkpoint_path,
var_list=vars_to_load,
ignore_missing_vars=True
)
def after_create_session(self, session, coord=None):
"""Run seesion."""
if not self._is_initialized:
session.run(self._init_op, feed_dict=self._init_feed_dict)
logging.info("Pretrained weights loaded with success...\n")
self._is_initialized = True
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/hooks/pretrained_restore_hook.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hook for job progress monitoring on clusters."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import timedelta
import logging
import time
import tensorflow.compat.v1 as tf
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
logger = logging.getLogger(__name__)
def write_status_json(loss_value, current_epoch, max_epoch, time_per_epoch, ETA, learning_rate):
"""Write out the data to the status.json file initiated by the experiment for monitoring.
Args:
loss_value (float): Current value of loss to be recorder in the monitor.
current_epoch (int): Current epoch.
max_epoch (int): Total number of epochs.
time_per_epoch (float): Time per epoch in seconds.
ETA (float): Time per epoch in seconds.
learning_rate (float): Learning rate tensor.
Returns:
monitor_data (dict): The monitor data as a dict.
"""
s_logger = status_logging.get_status_logger()
monitor_data = {
"epoch": current_epoch,
"max_epoch": max_epoch,
"time_per_epoch": str(timedelta(seconds=time_per_epoch)),
"eta": str(timedelta(seconds=ETA)),
}
# Save the json file.
try:
s_logger.graphical = {
"loss": loss_value,
"learning_rate": learning_rate
}
s_logger.write(
data=monitor_data,
status_level=status_logging.Status.RUNNING)
except IOError:
# We let this pass because we do not want the json file writing to crash the whole job.
pass
# Adding the data back after the graphical data was set to the status logger.
monitor_data["loss"] = loss_value
monitor_data["learning_rate"] = learning_rate
return monitor_data
class TaskProgressMonitorHook(tf.estimator.SessionRunHook):
"""Log loss and epochs for monitoring progress of cluster jobs.
Writes the current training progress (current loss, current epoch and
maximum epoch) to a json file.
"""
def __init__(self, batch_size, epochs, steps_per_epoch, logging_frequency=10):
"""Initialization.
Args:
batch_size (str): batch_size for training.
epochs (int): Number of training epochs.
steps_per_epoch (int): Number of steps per epoch.
logging_frequency (int): Print training summary every N steps.
"""
# Define the tensors to be fetched at every step.
self.local_batch_size = batch_size
self.epochs = epochs
self.steps_per_epoch = steps_per_epoch
assert 0 < logging_frequency <= 1000, "Logging frequency must be no greater than 1000."
self.logging_frequency = logging_frequency
# Initialize variables for epoch time calculation.
self.time_per_epoch = 0
self._step_start_time = None
# Closest estimate of the start time, in case starting from mid-epoch.
self._epoch_start_time = time.time()
def begin(self):
"""Begin."""
self._global_step_tensor = tf.train.get_global_step()
self._fetches = {
'ops': ['learning_rate:0', 'total_loss:0', 'global_step:0'],
'epoch': self._global_step_tensor // self.steps_per_epoch}
def before_run(self, run_context):
"""Request loss and global step from the session.
Args:
run_context: A `SessionRunContext` object.
Returns:
A `SessionRunArgs` object.
"""
# Record start time for each step. Use the value later, if this step started an epoch.
self._step_start_time = time.time()
# Assign the tensors to be fetched.
return tf.train.SessionRunArgs(self._fetches)
def after_run(self, run_context, run_values):
"""Write the progress to json-file after each epoch.
Args:
run_context: A `SessionRunContext` object.
run_values: A `SessionRunValues` object. Contains the loss value
requested by before_run().
"""
# Get the global step value.
learning_rate, loss_value, step = run_values.results['ops']
current_epoch = (step + 1) // self.steps_per_epoch
if (step + 1) % self.logging_frequency == 0:
logger.info(
"Global step %d (epoch %d/%d): loss: %0.5f learning rate: %0.5f"
% (
int(step + 1),
current_epoch + 1,
self.epochs,
float(loss_value),
float(learning_rate)
)
)
if (step + 1) % self.steps_per_epoch == 0:
# Last step of an epoch is completed.
epoch_end_time = time.time()
self.time_per_epoch = epoch_end_time - self._epoch_start_time
if (step + 1) % self.steps_per_epoch == 0:
# First step of a new epoch is completed. Store the time when step was started.
self._epoch_start_time = self._step_start_time
monitor_data = write_status_json(
loss_value=float(loss_value),
current_epoch=int(current_epoch),
max_epoch=int(self.epochs),
time_per_epoch=self.time_per_epoch,
ETA=(self.epochs - current_epoch) * self.time_per_epoch,
learning_rate=float(learning_rate)
)
logger.info(
"Epoch %d/%d: loss: %0.5f learning rate: %0.5f Time taken: %s ETA: %s"
% (
monitor_data["epoch"],
monitor_data["max_epoch"],
monitor_data["loss"],
monitor_data["learning_rate"],
monitor_data["time_per_epoch"],
monitor_data["eta"],
)
)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/hooks/logging_hook.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IVA EfficientDet hooks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/hooks/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""dllogger setup script."""
import time
import dllogger
import tensorflow.compat.v1 as tf
def setup_dllogger(rank, enabled=True, filename='log.json'):
"""Set up Dllogger."""
if enabled and rank == 0:
backends = [
dllogger.StdOutBackend(dllogger.Verbosity.DEFAULT),
dllogger.JSONStreamBackend(
dllogger.Verbosity.VERBOSE,
filename,
),
]
dllogger.init(backends)
# else:
# dllogger.init([])
class AverageMeter:
"""Computes and stores the average and current value."""
def __init__(self, warmup=0, keep=False):
"""Init."""
self.reset()
self.warmup = warmup
self.keep = keep
def reset(self):
"""Reset values."""
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
self.iters = 0
self.vals = []
def update(self, val, n=1):
"""Update."""
self.iters += 1
self.val = val
if self.iters > self.warmup:
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
if self.keep:
self.vals.append(val)
class DLLoggerHook(tf.estimator.SessionRunHook):
"""Dllogger hook."""
def __init__(self, batch_size, num_examples_per_epoch, logging_frequency,
checkpoint_period, rank=-1, size=1):
"""Init."""
self.local_batch_size = batch_size
self.global_batch_size = self.local_batch_size * size
self.num_examples_per_epoch = num_examples_per_epoch
self.logging_frequency = logging_frequency
self.checkpoint_period = checkpoint_period
self.rank = rank
def after_create_session(self, session, coord):
"""After session is created."""
self.meters = {}
warmup = 100
self.meters['train_throughput'] = AverageMeter(warmup=warmup)
def before_run(self, run_context):
"""Before session run."""
self.t0 = time.time()
return tf.estimator.SessionRunArgs(
fetches=['learning_rate:0', 'total_loss:0', 'global_step:0'])
def after_run(self, run_context, run_values):
"""After session run."""
throughput = self.global_batch_size/(time.time() - self.t0)
learning_rate, loss, current_step = run_values.results
if current_step % self.logging_frequency == 0:
summary = {
'global step': str(current_step + 1),
'epoch': str(
(((current_step + 1) * self.local_batch_size) //
self.num_examples_per_epoch) + 1),
'learning_rate': str(learning_rate),
'total_loss': str(loss),
}
dllogger.log(step=int(current_step), data=summary)
# if current_step % self.checkpoint_period == 0:
# summary = {
# 'INFO': 'Saved checkpoint at global step: {}'.format(current_step),
# }
# dllogger.log(step=int(current_step), data=summary)
self.meters['train_throughput'].update(throughput)
def end(self, session):
"""Dump log."""
summary = {
'train_throughput': self.meters['train_throughput'].avg,
}
dllogger.log(step=tuple(), data=summary)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/hooks/dllogging_hook.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.