python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IVA EfficientDet scripts."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/scripts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Export EfficientDet model to etlt and TRT engine."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import os
import shutil
import struct
import tempfile
from zipfile import ZipFile
import tensorflow as tf
from tensorflow.python.framework.ops import disable_eager_execution
from tensorflow.python.util import deprecation
try:
import tensorrt as trt # noqa pylint: disable=W0611 pylint: disable=W0611
from nvidia_tao_tf1.cv.efficientdet.exporter.trt_builder import EngineBuilder
except Exception as e:
logger = logging.getLogger(__name__)
logger.warning(
"Failed to import TensorRT package, exporting TLT to a TensorRT engine "
"will not be available."
)
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
import nvidia_tao_tf1.cv.common.no_warning # noqa pylint: disable=W0611
from nvidia_tao_tf1.cv.efficientdet.exporter.onnx_exporter import EfficientDetGraphSurgeon
from nvidia_tao_tf1.cv.efficientdet.inferencer import inference
from nvidia_tao_tf1.cv.efficientdet.utils import hparams_config
from nvidia_tao_tf1.cv.efficientdet.utils.model_loader import decode_tlt_file
from nvidia_tao_tf1.cv.efficientdet.utils.spec_loader import (
generate_params_from_spec,
load_experiment_spec
)
from nvidia_tao_tf1.encoding import encoding
deprecation._PRINT_DEPRECATION_WARNINGS = False
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # or any {'0', '1', '2'}
os.environ["TF_CPP_VMODULE"] = 'non_max_suppression_op=0,generate_box_proposals_op=0,executor=0'
supported_img_format = ['.jpg', '.jpeg', '.JPG', '.JPEG', '.png', '.PNG']
def extract_zipfile_ckpt(zip_path):
"""Extract the contents of an efficientdet ckpt zip file.
Args:
zip_path (str): Path to a zipfile.
Returns:
checkpoint_path (str): Path to the checkpoint extracted.
"""
temp_ckpt_dir = tempfile.mkdtemp()
with ZipFile(zip_path, 'r') as zip_object:
for member in zip_object.namelist():
zip_object.extract(member, path=temp_ckpt_dir)
if member.startswith('model.ckpt-'):
step = int(member.split('model.ckpt-')[-1].split('.')[0])
return os.path.join(temp_ckpt_dir, "model.ckpt-{}".format(step))
def extract_ckpt(encoded_checkpoint, key):
"""Get unencrypted checkpoint from tlt file."""
logging.info("Loading weights from {}".format(encoded_checkpoint))
try:
# Load an unencrypted checkpoint as 5.0.
checkpoint_path = extract_zipfile_ckpt(encoded_checkpoint)
except BadZipFile:
# Decrypt and load the checkpoint.
os_handle, temp_zip_path = tempfile.mkstemp()
os.close(os_handle)
# Decrypt the checkpoint file.
with open(encoded_checkpoint, 'rb') as encoded_file, open(temp_zip_path, 'wb') as tmp_zipf:
encoding.decode(encoded_file, tmp_zipf, key.encode())
encoded_file.closed
tmp_zipf.closed
checkpoint_path = extract_zipfile_ckpt(temp_zip_path)
os.remove(temp_zip_path)
return checkpoint_path
def main(args=None):
"""Launch EfficientDet training."""
disable_eager_execution()
tf.autograph.set_verbosity(0)
# parse CLI and config file
args = parse_command_line_arguments(args)
output_path = args.output_path
if not ("onnx" in output_path):
output_path = f"{output_path}.onnx"
assert not os.path.exists(output_path), (
f"Exported model already exists at \'{output_path}\'. "
"Please change the output path or remove the current file."
)
assert args.max_batch_size > 0, "Max batch size for the engine must be positive."
print("Loading experiment spec at %s.", args.experiment_spec_file)
spec = load_experiment_spec(args.experiment_spec_file, merge_from_default=False)
results_dir = os.path.dirname(args.output_path)
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True,
verbosity=1,
append=True
)
)
s_logger = status_logging.get_status_logger()
s_logger.write(
status_level=status_logging.Status.STARTED,
message="Starting EfficientDet export."
)
# set up config
MODE = 'export'
# Parse and override hparams
config = hparams_config.get_detection_config(spec.model_config.model_name)
params = generate_params_from_spec(config, spec, MODE)
config.update(params)
if config.pruned_model_path:
config.pruned_model_path = decode_tlt_file(config.pruned_model_path, args.key)
# get output dir from etlt path
output_dir = os.path.dirname(args.output_path)
if not os.path.exists(output_dir):
os.mkdir(output_dir)
pb_tmp_dir = tempfile.mkdtemp()
# extract unencrypted checkpoint
if args.model_path.endswith('.tlt'):
ckpt_path = extract_ckpt(args.model_path, args.key)
elif 'ckpt' in args.model_path:
ckpt_path = args.model_path
else:
raise NotImplementedError(f"Invalid model file at {args.model_path}")
# serve pb
tf.enable_resource_variables()
driver = inference.ServingDriver(
config.name,
ckpt_path,
batch_size=args.max_batch_size,
min_score_thresh=spec.eval_config.min_score_thresh or 0.4,
max_boxes_to_draw=spec.eval_config.max_detections_per_image or 100,
model_params=config.as_dict())
driver.build()
driver.export(pb_tmp_dir, tflite_path=None, tensorrt=None)
# free gpu memory
tf.reset_default_graph()
# convert to onnx
effdet_gs = EfficientDetGraphSurgeon(pb_tmp_dir, legacy_plugins=False)
effdet_gs.update_preprocessor(
[args.max_batch_size] + list(config.image_size) + [3])
effdet_gs.update_network()
effdet_gs.update_nms()
# convert to etlt
output_onnx_file = effdet_gs.save(output_path)
if args.engine_file is not None or args.data_type == 'int8':
if args.engine_file is None:
engine_handle, temp_engine_path = tempfile.mkstemp()
os.close(engine_handle)
output_engine_path = temp_engine_path
else:
output_engine_path = args.engine_file
builder = EngineBuilder(args.verbose, workspace=args.max_workspace_size)
builder.create_network(output_onnx_file)
builder.create_engine(
output_engine_path,
args.data_type,
args.cal_image_dir,
args.cal_cache_file,
args.batch_size * args.batches,
args.batch_size)
# clean up tmp dir
shutil.rmtree(pb_tmp_dir)
print("Exported model is successfully exported at: {}".format(output_path))
s_logger.write(
status_level=status_logging.Status.SUCCESS,
message="Export finished successfully."
)
def build_command_line_parser(parser=None):
"""Build the command line parser using argparse.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism.
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(prog='export', description='Export an EfficientDet model.')
parser.add_argument(
'-e',
'--experiment_spec_file',
type=str,
required=True,
help='Path to spec file. Absolute path or relative to working directory. \
If not specified, default spec from spec_loader.py is used.')
parser.add_argument(
'-m',
'--model_path',
type=str,
required=True,
help='Path to a trained EfficientDet model.'
)
parser.add_argument(
'-o',
'--output_path',
type=str,
required=True,
help='Path to the exported EfficientDet model.'
)
parser.add_argument(
'-k',
'--key',
type=str,
default="",
required=False,
help='Key to save or load a .tlt model.'
)
parser.add_argument(
"--data_type",
type=str,
default="fp32",
help="Data type for the TensorRT export.",
choices=["fp32", "fp16", "int8"])
parser.add_argument(
"--cal_image_dir",
default="",
type=str,
help="Directory of images to run int8 calibration.")
parser.add_argument(
'--cal_cache_file',
default=None,
type=str,
help='Calibration cache file to write to.')
parser.add_argument(
"--engine_file",
type=str,
default=None,
help="Path to the exported TRT engine.")
parser.add_argument(
"--max_batch_size",
type=int,
default=1,
help="Max batch size for TensorRT engine builder.")
parser.add_argument(
"--batch_size",
type=int,
default=16,
help="Number of images per batch.")
parser.add_argument(
"--batches",
type=int,
default=10,
help="Number of batches to calibrate over.")
parser.add_argument(
"--max_workspace_size",
type=int,
default=2,
help="Max memory workspace size to allow in Gb for TensorRT engine builder (default: 2).")
parser.add_argument(
"-v",
"--verbose",
action="store_true",
default=False,
help="Verbosity of the logger.")
return parser
def parse_command_line_arguments(args=None):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser(args)
return parser.parse_args(args)
if __name__ == '__main__':
try:
main()
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Export was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/scripts/export.py |
r"""Convert raw COCO dataset to TFRecord for object_detection."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
import hashlib
import io
import json
import multiprocessing
import os
import numpy as np
import PIL.Image
from pycocotools import mask
from skimage import measure
import tensorflow as tf
from nvidia_tao_tf1.cv.common.dataset import dataset_util
from nvidia_tao_tf1.cv.common.dataset import label_map_util
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
def create_tf_example(image,
bbox_annotations,
image_dir,
category_index,
include_masks=False,
inspect_mask=True):
"""Converts image and annotations to a tf.Example proto.
Args:
image: dict with keys:
[u'license', u'file_name', u'coco_url', u'height', u'width',
u'date_captured', u'flickr_url', u'id']
bbox_annotations:
list of dicts with keys:
[u'segmentation', u'area', u'iscrowd', u'image_id',
u'bbox', u'category_id', u'id']
Notice that bounding box coordinates in the official COCO dataset are
given as [x, y, width, height] tuples using absolute coordinates where
x, y represent the top-left (0-indexed) corner. This function converts
to the format expected by the Tensorflow Object Detection API (which is
which is [ymin, xmin, ymax, xmax] with coordinates normalized relative
to image size).
image_dir: directory containing the image files.
category_index: a dict containing COCO category information keyed
by the 'id' field of each category. See the
label_map_util.create_category_index function.
include_masks: Whether to include instance segmentations masks
(PNG encoded) in the result. default: False.
Returns:
example: The converted tf.Example
num_annotations_skipped: Number of (invalid) annotations that were ignored.
Raises:
ValueError: if the image pointed to by data['filename'] is not a valid JPEG
"""
image_height = image['height']
image_width = image['width']
filename = image['file_name']
image_id = image['id']
full_path = os.path.join(image_dir, filename)
with tf.io.gfile.GFile(full_path, 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = PIL.Image.open(encoded_jpg_io)
key = hashlib.sha256(encoded_jpg).hexdigest()
xmin = []
xmax = []
ymin = []
ymax = []
is_crowd = []
category_names = []
category_ids = []
area = []
encoded_mask_png = []
num_annotations_skipped = 0
log_warnings = {}
box_oob = []
mask_oob = []
for object_annotations in bbox_annotations:
object_annotations_id = object_annotations['id']
(x, y, width, height) = tuple(object_annotations['bbox'])
if width <= 0 or height <= 0 or x + width > image_width or y + height > image_height:
num_annotations_skipped += 1
box_oob.append(object_annotations_id)
continue
xmin.append(float(x) / image_width)
xmax.append(float(x + width) / image_width)
ymin.append(float(y) / image_height)
ymax.append(float(y + height) / image_height)
is_crowd.append(object_annotations['iscrowd'])
category_id = int(object_annotations['category_id'])
category_ids.append(category_id)
category_names.append(category_index[category_id]['name'].encode('utf8'))
area.append(object_annotations['area'])
if include_masks:
if 'segmentation' not in object_annotations:
raise ValueError(
f"segmentation groundtruth is missing in object: {object_annotations_id}.")
# pylygon (e.g. [[289.74,443.39,302.29,445.32, ...], [1,2,3,4]])
if isinstance(object_annotations['segmentation'], list):
rles = mask.frPyObjects(object_annotations['segmentation'],
image_height, image_width)
rle = mask.merge(rles)
elif 'counts' in object_annotations['segmentation']:
# e.g. {'counts': [6, 1, 40, 4, 5, 4, 5, 4, 21], 'size': [9, 10]}
if isinstance(object_annotations['segmentation']['counts'], list):
rle = mask.frPyObjects(object_annotations['segmentation'],
image_height, image_width)
else:
rle = object_annotations['segmentation']
else:
raise ValueError('Please check the segmentation format.')
binary_mask = mask.decode(rle)
contours = measure.find_contours(binary_mask, 0.5)
if inspect_mask:
# check if mask is out of bound compared to bbox
min_x, max_x = image_width + 1, -1
min_y, max_y = image_height + 1, -1
for cont in contours:
c = np.array(cont)
min_x = min(min_x, np.amin(c, axis=0)[1])
max_x = max(max_x, np.amax(c, axis=0)[1])
min_y = min(min_y, np.amin(c, axis=0)[0])
max_y = max(max_y, np.amax(c, axis=0)[0])
xxmin, xxmax, yymin, yymax = \
float(x) - 1, float(x + width) + 1, float(y) - 1, float(y + height) + 1
if xxmin > min_x or yymin > min_y or xxmax < max_x or yymax < max_y:
mask_oob.append(object_annotations_id)
# if not object_annotations['iscrowd']:
# binary_mask = np.amax(binary_mask, axis=2)
pil_image = PIL.Image.fromarray(binary_mask)
output_io = io.BytesIO()
pil_image.save(output_io, format='PNG')
encoded_mask_png.append(output_io.getvalue())
feature_dict = {
'image/height':
dataset_util.int64_feature(image_height),
'image/width':
dataset_util.int64_feature(image_width),
'image/filename':
dataset_util.bytes_feature(filename.encode('utf8')),
'image/source_id':
dataset_util.bytes_feature(str(image_id).encode('utf8')),
'image/key/sha256':
dataset_util.bytes_feature(key.encode('utf8')),
'image/encoded':
dataset_util.bytes_feature(encoded_jpg),
'image/format':
dataset_util.bytes_feature('jpeg'.encode('utf8')),
'image/object/bbox/xmin':
dataset_util.float_list_feature(xmin),
'image/object/bbox/xmax':
dataset_util.float_list_feature(xmax),
'image/object/bbox/ymin':
dataset_util.float_list_feature(ymin),
'image/object/bbox/ymax':
dataset_util.float_list_feature(ymax),
'image/object/class/text':
dataset_util.bytes_list_feature(category_names),
'image/object/class/label':
dataset_util.int64_list_feature(category_ids),
'image/object/is_crowd':
dataset_util.int64_list_feature(is_crowd),
'image/object/area':
dataset_util.float_list_feature(area),
}
if include_masks:
feature_dict['image/object/mask'] = (
dataset_util.bytes_list_feature(encoded_mask_png))
example = tf.train.Example(features=tf.train.Features(feature=feature_dict))
if mask_oob or box_oob:
log_warnings[image_id] = {}
log_warnings[image_id]['box'] = box_oob
log_warnings[image_id]['mask'] = mask_oob
return key, example, num_annotations_skipped, log_warnings
def _pool_create_tf_example(args):
return create_tf_example(*args)
def _load_object_annotations(object_annotations_file):
with tf.io.gfile.GFile(object_annotations_file, 'r') as fid:
obj_annotations = json.load(fid)
images = obj_annotations['images']
category_index = label_map_util.create_category_index(
obj_annotations['categories'])
img_to_obj_annotation = collections.defaultdict(list)
tf.compat.v1.logging.info('Building bounding box index.')
for annotation in obj_annotations['annotations']:
image_id = annotation['image_id']
img_to_obj_annotation[image_id].append(annotation)
missing_annotation_count = 0
for image in images:
image_id = image['id']
if image_id not in img_to_obj_annotation:
missing_annotation_count += 1
tf.compat.v1.logging.info('%d images are missing bboxes.', missing_annotation_count)
return images, img_to_obj_annotation, category_index
def _merge_log(log_a, log_b):
log_ab = log_a.copy()
for k, v in log_b.items():
if k in log_ab:
log_ab[k] += v
else:
log_ab[k] = v
return log_ab
def _create_tf_record_from_coco_annotations(object_annotations_file,
image_dir, output_path, include_masks, num_shards):
"""Loads COCO annotation json files and converts to tf.Record format.
Args:
object_annotations_file: JSON file containing bounding box annotations.
image_dir: Directory containing the image files.
output_path: Path to output tf.Record file.
include_masks: Whether to include instance segmentations masks
(PNG encoded) in the result. default: False.
num_shards: Number of output files to create.
"""
tf.compat.v1.logging.info('writing to output path: %s', output_path)
writers = [
tf.io.TFRecordWriter(
output_path + '-%05d-of-%05d.tfrecord' %
(i, num_shards)) for i in range(num_shards)
]
images, img_to_obj_annotation, category_index = (
_load_object_annotations(object_annotations_file))
pool = multiprocessing.Pool()
total_num_annotations_skipped = 0
log_total = {}
for idx, (_, tf_example, num_annotations_skipped, log_warnings) in enumerate(
pool.imap(_pool_create_tf_example, [(
image,
img_to_obj_annotation[image['id']],
image_dir,
category_index,
include_masks) for image in images])):
if idx % 100 == 0:
tf.compat.v1.logging.info('On image %d of %d', idx, len(images))
total_num_annotations_skipped += num_annotations_skipped
log_total = _merge_log(log_total, log_warnings)
writers[idx % num_shards].write(tf_example.SerializeToString())
pool.close()
pool.join()
for writer in writers:
writer.close()
tf.compat.v1.logging.info(
'Finished writing, skipped %d annotations.', total_num_annotations_skipped)
return log_total
def main(args=None):
"""Convert COCO format json and images into TFRecords."""
args = parse_command_line_arguments(args)
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
if not os.path.exists(args.results_dir):
os.mkdir(args.results_dir)
status_file = os.path.join(args.results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True,
verbosity=1,
append=True
)
)
s_logger = status_logging.get_status_logger()
s_logger.write(
status_level=status_logging.Status.STARTED,
message="Starting tfrecords conversion."
)
tag = args.tag or os.path.splitext(os.path.basename(args.annotations_file))[0]
output_path = os.path.join(args.output_dir, tag)
log_total = _create_tf_record_from_coco_annotations(
args.annotations_file,
args.image_dir,
output_path,
args.include_masks,
num_shards=args.num_shards)
if log_total:
with open(os.path.join(args.output_dir, f'{tag}_warnings.json'), "w") as f:
json.dump(log_total, f)
s_logger.write(
status_level=status_logging.Status.SUCCESS,
message="Conversion finished successfully."
)
def build_command_line_parser(parser=None):
"""Build the command line parser using argparse.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism.
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(
prog='dataset_convert', description='Convert COCO format dataset to TFRecords.')
parser.add_argument(
'-i',
'--image_dir',
type=str,
required=True,
help='Path to the image directory.')
parser.add_argument(
'-a',
'--annotations_file',
type=str,
required=True,
help='Path to the annotation JSON file.')
parser.add_argument(
'-o',
'--output_dir',
type=str,
required=True,
help='Output directory where TFRecords are saved.'
)
parser.add_argument(
'-r',
'--results_dir',
type=str,
default='/tmp',
required=False,
help='Output directory where the status log is saved.'
)
parser.add_argument(
'-t',
'--tag',
type=str,
required=False,
default=None,
help='Tag for the converted TFRecords (e.g. train, val, test). \
Default to the name of annotation file.'
)
parser.add_argument(
'-s',
'--num_shards',
type=int,
required=False,
default=256,
help='Number of shards.'
)
parser.add_argument(
"--include_masks",
action="store_true",
default=False,
help="Whether to include instance segmentation masks.")
return parser
def parse_command_line_arguments(args=None):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser(args)
return parser.parse_args(args)
if __name__ == '__main__':
try:
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
main()
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Dataset convert was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/scripts/dataset_convert.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perform EfficientDet training on a tfrecords dataset."""
import argparse
import logging
import os
from pathlib import Path
import tensorflow as tf
from tensorflow.python.framework.ops import disable_eager_execution
from tensorflow.python.util import deprecation
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
import nvidia_tao_tf1.cv.common.no_warning # noqa pylint: disable=W0611
from nvidia_tao_tf1.cv.efficientdet.dataloader import dataloader
from nvidia_tao_tf1.cv.efficientdet.executer import distributed_executer
from nvidia_tao_tf1.cv.efficientdet.models import det_model_fn
from nvidia_tao_tf1.cv.efficientdet.utils import hparams_config
from nvidia_tao_tf1.cv.efficientdet.utils.model_loader import decode_tlt_file
from nvidia_tao_tf1.cv.efficientdet.utils.spec_loader import (
generate_params_from_spec,
load_experiment_spec
)
from nvidia_tao_tf1.cv.efficientdet.utils.distributed_utils import MPI_is_distributed
from nvidia_tao_tf1.cv.efficientdet.utils.distributed_utils import MPI_rank
deprecation._PRINT_DEPRECATION_WARNINGS = False
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # or any {'0', '1', '2'}
os.environ["TF_CPP_VMODULE"] = 'non_max_suppression_op=0,generate_box_proposals_op=0,executor=0'
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level='INFO')
logger = logging.getLogger(__name__)
def main(args=None):
"""Launch EfficientDet training."""
disable_eager_execution()
tf.autograph.set_verbosity(0)
# parse CLI and config file
args = parse_command_line_arguments(args)
print("Loading experiment spec at %s.", args.experiment_spec_file)
spec = load_experiment_spec(args.experiment_spec_file, merge_from_default=False)
# set up config
MODE = 'train'
# Parse and override hparams
config = hparams_config.get_detection_config(spec.model_config.model_name)
params = generate_params_from_spec(config, spec, MODE)
config.update(params)
# Update config with parameters in args
config.key = args.key
config.model_dir = args.model_dir
if not MPI_is_distributed() or MPI_rank() == 0:
if not os.path.exists(args.model_dir):
os.mkdir(args.model_dir)
if config.checkpoint:
config.checkpoint = decode_tlt_file(config.checkpoint, args.key)
if config.pruned_model_path:
config.pruned_model_path = decode_tlt_file(config.pruned_model_path, args.key)
# Set up dataloader
train_dataloader = dataloader.InputReader(
str(Path(spec.dataset_config.training_file_pattern)),
is_training=True,
use_fake_data=spec.dataset_config.use_fake_data,
max_instances_per_image=config.max_instances_per_image)
eval_dataloader = dataloader.InputReader(
str(Path(spec.dataset_config.validation_file_pattern)),
is_training=False,
max_instances_per_image=config.max_instances_per_image)
try:
run_executer(config, train_dataloader, eval_dataloader)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Training was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
logger.info("Training was interrupted.")
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
def run_executer(runtime_config, train_input_fn=None, eval_input_fn=None):
"""Runs EfficientDet on distribution strategy defined by the user."""
executer = distributed_executer.EstimatorExecuter(runtime_config,
det_model_fn.efficientdet_model_fn)
executer.train_and_eval(train_input_fn=train_input_fn, eval_input_fn=eval_input_fn)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Training finished successfully."
)
def build_command_line_parser(parser=None):
"""Build the command line parser using argparse.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism.
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(prog='train', description='Train an EfficientDet model.')
parser.add_argument(
'-e',
'--experiment_spec_file',
type=str,
required=True,
help='Path to spec file. Absolute path or relative to working directory. \
If not specified, default spec from spec_loader.py is used.')
parser.add_argument(
'-d',
'--model_dir',
type=str,
required=True,
help='Path to a folder where experiment outputs should be written.'
)
parser.add_argument(
'-k',
'--key',
default="",
type=str,
required=False,
help='Key to save or load a .tlt model.'
)
return parser
def parse_command_line_arguments(args=None):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser(args)
return parser.parse_args(args)
if __name__ == '__main__':
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/scripts/train.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientDet pruning script."""
import argparse
import json
import logging
import os
import shutil
import tempfile
import time
from zipfile import ZipFile
import tensorflow as tf
from tensorflow import keras # noqa pylint: disable=F401, W0611
from nvidia_tao_tf1.core.pruning.pruning import prune
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
import nvidia_tao_tf1.cv.common.no_warning # noqa pylint: disable=W0611
from nvidia_tao_tf1.cv.common.utils import get_model_file_size
from nvidia_tao_tf1.cv.efficientdet.utils.model_loader import dump_json, load_json_model
from nvidia_tao_tf1.encoding import encoding
logger = logging.getLogger(__name__)
def build_command_line_parser(parser=None):
"""Build a command line parser for pruning."""
if parser is None:
parser = argparse.ArgumentParser(description="TLT pruning script")
parser.add_argument("-m",
"--model",
type=str,
help="Path to the target model for pruning",
required=True,
default=None)
parser.add_argument("-o",
"--output_dir",
type=str,
help="Output directory for pruned model",
required=True,
default=None)
parser.add_argument('-k',
'--key',
required=False,
type=str,
default="",
help='Key to load a .tlt model')
parser.add_argument('-n',
'--normalizer',
type=str,
default='max',
help="`max` to normalize by dividing each norm by the \
maximum norm within a layer; `L2` to normalize by \
dividing by the L2 norm of the vector comprising all \
kernel norms. (default: `max`)")
parser.add_argument('-eq',
'--equalization_criterion',
type=str,
default='union',
help="Criteria to equalize the stats of inputs to an \
element wise op layer. Options are \
[arithmetic_mean, geometric_mean, union, \
intersection]. (default: `union`)")
parser.add_argument("-pg",
"--pruning_granularity",
type=int,
help="Pruning granularity: number of filters to remove \
at a time. (default:8)",
default=8)
parser.add_argument("-pth",
"--pruning_threshold",
type=float,
help="Threshold to compare normalized norm against \
(default:0.1)", default=0.1)
parser.add_argument("-nf",
"--min_num_filters",
type=int,
help="Minimum number of filters to keep per layer. \
(default:16)", default=16)
parser.add_argument("-el",
"--excluded_layers", action='store',
type=str, nargs='*',
help="List of excluded_layers. Examples: -i item1 \
item2", default=[])
parser.add_argument("-v",
"--verbose",
action='store_true',
help="Include this flag in command line invocation for \
verbose logs.")
return parser
def extract_zipfile_ckpt(zip_path):
"""Extract the contents of an efficientdet ckpt zip file.
Args:
zip_path (str): Path to a zipfile.
Returns:
checkpoint_path (str): Path to the checkpoint extracted.
"""
temp_ckpt_dir = tempfile.mkdtemp()
with ZipFile(zip_path, 'r') as zip_object:
for member in zip_object.namelist():
zip_object.extract(member, path=temp_ckpt_dir)
if member.startswith('model.ckpt-'):
step = int(member.split('model.ckpt-')[-1].split('.')[0])
return os.path.join(temp_ckpt_dir, "model.ckpt-{}".format(step))
def extract_ckpt(encoded_checkpoint, key):
"""Get unencrypted checkpoint from tlt file."""
logging.info("Loading weights from {}".format(encoded_checkpoint))
try:
# Load an unencrypted checkpoint as 5.0.
checkpoint_path = extract_zipfile_ckpt(encoded_checkpoint)
except BadZipFile:
# Decrypt and load the checkpoint.
os_handle, temp_zip_path = tempfile.mkstemp()
os.close(os_handle)
# Decrypt the checkpoint file.
with open(encoded_checkpoint, 'rb') as encoded_file, open(temp_zip_path, 'wb') as tmp_zipf:
encoding.decode(encoded_file, tmp_zipf, key.encode())
encoded_file.closed
tmp_zipf.closed
checkpoint_path = extract_zipfile_ckpt(temp_zip_path)
os.remove(temp_zip_path)
return checkpoint_path
def parse_command_line_arguments(args=None):
"""Parse command line arguments for pruning."""
parser = build_command_line_parser()
return parser.parse_args(args)
def run_pruning(args=None):
"""Prune an encrypted MRCNN model."""
# Set up logger verbosity.
verbosity = 'INFO'
if args.verbose:
verbosity = 'DEBUG'
DEBUG_MODE = False
# Configure the logger.
logging.basicConfig(
format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level=verbosity)
assert args.equalization_criterion in \
['arithmetic_mean', 'geometric_mean', 'union', 'intersection'], \
"Equalization criterion are [arithmetic_mean, geometric_mean, union, \
intersection]."
assert args.normalizer in ['L2', 'max'], \
"normalizer options are [L2, max]."
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
elif len(os.listdir(args.output_dir)) > 0:
raise ValueError("Output directory is not empty. \
Please specify a new directory or clean up the current one.")
output_file = os.path.join(args.output_dir, 'model.tlt')
"""Prune MRCNN model graphs with checkpoint."""
# Load the unpruned model
status_file = os.path.join(args.output_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True,
verbosity=1,
append=True
)
)
s_logger = status_logging.get_status_logger()
s_logger.write(
status_level=status_logging.Status.STARTED,
message="Starting EfficientDet pruning."
)
model_dir = os.path.dirname(args.model)
final_model = load_json_model(
os.path.join(model_dir, 'graph.json'))
# Decrypt and restore checkpoint
ckpt_path = extract_ckpt(args.model, args.key)
if DEBUG_MODE:
# selectively restore checkpoint
reader = tf.compat.v1.train.NewCheckpointReader(ckpt_path)
restore_dict = {}
for v in tf.trainable_variables():
tensor_name = v.name.split(':')[0]
if reader.has_tensor(tensor_name):
restore_dict[tensor_name] = v
else:
print(tensor_name)
saver = tf.compat.v1.train.Saver(restore_dict)
sess = keras.backend.get_session()
tf.global_variables_initializer()
saver.restore(sess, ckpt_path)
# restore checkpoint
sess = keras.backend.get_session()
tf.global_variables_initializer()
tf.compat.v1.train.Saver().restore(sess, ckpt_path)
if verbosity == 'DEBUG':
# Printing out the loaded model summary
logger.debug("Model summary of the unpruned model:")
logger.debug(final_model.summary())
# Excluded layers for Effdet
force_excluded_layers = []
force_excluded_layers += final_model.output_names
t0 = time.time()
logger.info("Pruning process will take some time. Please wait...")
# Pruning trained model
pruned_model = prune(
model=final_model,
method='min_weight',
normalizer=args.normalizer,
criterion='L2',
granularity=args.pruning_granularity,
min_num_filters=args.min_num_filters,
threshold=args.pruning_threshold,
equalization_criterion=args.equalization_criterion,
excluded_layers=args.excluded_layers + force_excluded_layers)
if verbosity == 'DEBUG':
# Printing out pruned model summary
logger.debug("Model summary of the pruned model:")
logger.debug(pruned_model.summary())
pruning_ratio = pruned_model.count_params() / final_model.count_params()
logger.info("Pruning ratio (pruned model / original model): {}".format(
pruning_ratio))
logger.debug("Elapsed time: {}".format(time.time() - t0))
# zip pruned hdf5 and save to tlt file
temp_dir = tempfile.mkdtemp()
pruned_model.save(os.path.join(temp_dir, "pruned_model.hdf5"))
# save train graph in json
dump_json(pruned_model, os.path.join(temp_dir, "pruned_train.json"))
# generate eval graph for exporting. (time saving hack)
with open(os.path.join(temp_dir, "pruned_train.json"), 'r') as f:
pruned_json = json.load(f)
for layer in pruned_json['config']['layers']:
if layer['class_name'] == 'PatchedBatchNormalization':
if layer['inbound_nodes'][0][0][-1]:
layer['inbound_nodes'][0][0][-1]['training'] = False
with open(os.path.join(temp_dir, "pruned_eval.json"), 'w') as jf:
json.dump(pruned_json, jf)
# save to tlt
prev_dir = os.getcwd()
os.chdir(temp_dir)
with ZipFile(output_file, 'w') as zip_object:
written_flag = False
for model_file in os.listdir(temp_dir):
if 'pruned_' in model_file:
zip_object.write(model_file)
written_flag = True
assert written_flag, "The pruned model is not saved probably. \
Please rerun the pruning script."
# Restore previous execution directory and remove tmp files/directories.
os.chdir(prev_dir)
s_logger.kpi.update(
{
'pruning_ratio': float(pruning_ratio),
'param_count': pruned_model.count_params(),
'size': get_model_file_size(output_file)
}
)
s_logger.write(
status_level=status_logging.Status.SUCCESS,
message="Pruning finished successfully."
)
def main(args=None):
"""Wrapper function for pruning."""
try:
# parse command line
args = parse_command_line_arguments(args)
run_pruning(args)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Pruning was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/scripts/prune.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Standalone inference with EfficientDet checkpoint."""
import argparse
import os
import tensorflow as tf
from tensorflow.python.framework.ops import disable_eager_execution
from tensorflow.python.util import deprecation
try:
import tensorrt as trt # noqa pylint: disable=W0611 pylint: disable=W0611
from nvidia_tao_tf1.cv.efficientdet.inferencer import inference_trt
except Exception as e:
import logging
logger = logging.getLogger(__name__)
logger.warning(
"Failed to import TensorRT package, exporting TLT to a TensorRT engine "
"will not be available."
)
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
import nvidia_tao_tf1.cv.common.no_warning # noqa pylint: disable=W0611
from nvidia_tao_tf1.cv.efficientdet.inferencer import inference
from nvidia_tao_tf1.cv.efficientdet.utils import hparams_config
from nvidia_tao_tf1.cv.efficientdet.utils.model_loader import decode_tlt_file
from nvidia_tao_tf1.cv.efficientdet.utils.spec_loader import (
generate_params_from_spec,
load_experiment_spec
)
deprecation._PRINT_DEPRECATION_WARNINGS = False
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # or any {'0', '1', '2'}
os.environ["TF_CPP_VMODULE"] = 'non_max_suppression_op=0,generate_box_proposals_op=0,executor=0'
supported_img_format = ['.jpg', '.jpeg', '.JPG', '.JPEG', '.png', '.PNG']
def get_label_dict(label_txt):
"""Create label dict from txt file."""
with open(label_txt, 'r') as f:
labels = f.readlines()
return {i+1 : label[:-1] for i, label in enumerate(labels)}
def batch_generator(iterable, batch_size=1):
"""Load a list of image paths in batches.
Args:
iterable: a list of image paths
n: batch size
"""
total_len = len(iterable)
for ndx in range(0, total_len, batch_size):
yield iterable[ndx:min(ndx + batch_size, total_len)]
def main(args=None):
"""Launch EfficientDet training."""
disable_eager_execution()
tf.autograph.set_verbosity(0)
# parse CLI and config file
args = parse_command_line_arguments(args)
assert args.experiment_spec, "Experiment spec file must be specified."
if not os.path.exists(args.results_dir):
os.mkdir(args.results_dir)
status_file = os.path.join(args.results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True,
verbosity=1,
append=True
)
)
s_logger = status_logging.get_status_logger()
s_logger.write(
status_level=status_logging.Status.STARTED,
message="Starting EfficientDet inference."
)
print("Loading experiment spec at %s.", args.experiment_spec)
spec = load_experiment_spec(args.experiment_spec, merge_from_default=False)
label_id_mapping = {}
if args.class_map:
label_id_mapping = get_label_dict(args.class_map)
if args.model_path.endswith('.tlt'):
infer_tlt(args, spec, label_id_mapping)
elif args.model_path.endswith('.engine'):
inference_trt.inference(args,
label_id_mapping,
spec.eval_config.min_score_thresh or args.threshold)
else:
raise ValueError("Model file should be in either .tlt or .engine format.")
s_logger.write(
status_level=status_logging.Status.SUCCESS,
message="Inference finished successfully."
)
def infer_tlt(args, spec, label_id_mapping):
"""Inference with tlt model."""
# set up config
MODE = 'infer'
# Parse and override hparams
config = hparams_config.get_detection_config(spec.model_config.model_name)
params = generate_params_from_spec(config, spec, MODE)
config.update(params)
config.label_id_mapping = label_id_mapping
if config.pruned_model_path:
config.pruned_model_path = decode_tlt_file(config.pruned_model_path, args.key)
driver = inference.InferenceDriver(config.name, decode_tlt_file(args.model_path, args.key),
config.as_dict())
config_dict = {}
config_dict['line_thickness'] = 5
config_dict['max_boxes_to_draw'] = spec.eval_config.max_detections_per_image or 100
config_dict['min_score_thresh'] = spec.eval_config.min_score_thresh or args.threshold
out_image_path = os.path.join(args.results_dir, "images_annotated")
out_label_path = os.path.join(args.results_dir, "labels")
os.makedirs(out_image_path, exist_ok=True)
os.makedirs(out_label_path, exist_ok=True)
if os.path.exists(args.image_dir):
if os.path.isfile(args.image_dir):
driver.inference(args.image_dir, out_image_path,
out_label_path, **config_dict)
else:
imgpath_list = [os.path.join(args.image_dir, imgname)
for imgname in sorted(os.listdir(args.image_dir))
if os.path.splitext(imgname)[1].lower()
in supported_img_format]
for file_patterns in batch_generator(imgpath_list, config.eval_batch_size):
driver.inference(file_patterns, out_image_path,
out_label_path, **config_dict)
else:
raise ValueError("{} does not exist. Please verify the input image or directory.".format(
args.image_dir
))
def build_command_line_parser(parser=None):
"""Build the command line parser using argparse.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism.
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(
prog='infer', description='Run inference with an EfficientDet model.')
parser.add_argument(
'-e',
'--experiment_spec',
type=str,
required=False,
help='Path to spec file. Absolute path or relative to working directory. \
If not specified, default spec from spec_loader.py is used.')
parser.add_argument(
'-m',
'--model_path',
type=str,
required=True,
help='Path to a trained EfficientDet model.'
)
parser.add_argument(
'-i',
'--image_dir',
type=str,
required=True,
help='Path to input image.'
)
parser.add_argument(
'-k',
'--key',
type=str,
default="",
required=False,
help='Key to save or load a .tlt model.'
)
parser.add_argument(
'--class_map',
type=str,
required=False,
help='Path to a text file where label mapping is stored. \
Each row corresponds to a class label sorted by class id.'
)
parser.add_argument(
'-r',
'--results_dir',
type=str,
default='/tmp',
required=False,
help='Output directory where the status log is saved.'
)
parser.add_argument(
'-t',
'--threshold',
type=float,
default=0.4,
help='Confidence threshold for inference.'
)
return parser
def parse_command_line_arguments(args=None):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser(args)
return parser.parse_args(args)
if __name__ == '__main__':
try:
main()
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Inference was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/scripts/inference.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientDet evaluation script."""
import argparse
import os
from pathlib import Path
import tensorflow as tf
from tensorflow.python.framework.ops import disable_eager_execution
from tensorflow.python.util import deprecation
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
import nvidia_tao_tf1.cv.common.no_warning # noqa pylint: disable=W0611
from nvidia_tao_tf1.cv.efficientdet.dataloader import dataloader
from nvidia_tao_tf1.cv.efficientdet.executer import distributed_executer
from nvidia_tao_tf1.cv.efficientdet.models import det_model_fn
from nvidia_tao_tf1.cv.efficientdet.utils import hparams_config
from nvidia_tao_tf1.cv.efficientdet.utils.model_loader import decode_tlt_file
from nvidia_tao_tf1.cv.efficientdet.utils.spec_loader import (
generate_params_from_spec,
load_experiment_spec
)
deprecation._PRINT_DEPRECATION_WARNINGS = False
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # or any {'0', '1', '2'}
os.environ["TF_CPP_VMODULE"] = 'non_max_suppression_op=0,generate_box_proposals_op=0,executor=0'
def main(args=None):
"""Launch EfficientDet training."""
disable_eager_execution()
tf.autograph.set_verbosity(0)
# parse CLI and config file
args = parse_command_line_arguments(args)
if not os.path.exists(args.results_dir):
os.mkdir(args.results_dir)
status_file = os.path.join(args.results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True,
verbosity=1,
append=True
)
)
s_logger = status_logging.get_status_logger()
s_logger.write(
status_level=status_logging.Status.STARTED,
message="Starting EfficientDet evaluation."
)
print("Loading experiment spec at %s.", args.experiment_spec)
spec = load_experiment_spec(args.experiment_spec, merge_from_default=False)
# set up config
MODE = 'eval'
# Parse and override hparams
config = hparams_config.get_detection_config(spec.model_config.model_name)
params = generate_params_from_spec(config, spec, MODE)
config.update(params)
config.key = args.key
config.model_path = args.model_path
if config.pruned_model_path:
config.pruned_model_path = decode_tlt_file(config.pruned_model_path, args.key)
# Set up dataloader
eval_dataloader = dataloader.InputReader(
str(Path(spec.dataset_config.validation_file_pattern)),
is_training=False,
max_instances_per_image=config.max_instances_per_image)
eval_results = run_executer(config, eval_dataloader)
for k, v in eval_results.items():
s_logger.kpi[k] = float(v)
s_logger.write(
status_level=status_logging.Status.SUCCESS,
message="Evaluation finished successfully."
)
def run_executer(runtime_config, eval_input_fn=None):
"""Runs EfficientDet on distribution strategy defined by the user."""
executer = distributed_executer.EstimatorExecuter(runtime_config,
det_model_fn.efficientdet_model_fn)
eval_results = executer.eval(eval_input_fn=eval_input_fn)
return eval_results
def build_command_line_parser(parser=None):
"""Build the command line parser using argparse.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism.
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(prog='eval', description='Evaluate an EfficientDet model.')
parser.add_argument(
'-e',
'--experiment_spec',
type=str,
required=True,
help='Path to spec file. Absolute path or relative to working directory. \
If not specified, default spec from spec_loader.py is used.')
parser.add_argument(
'-m',
'--model_path',
type=str,
required=True,
help='Path to a trained EfficientDet model.'
)
parser.add_argument(
'-k',
'--key',
type=str,
default="",
required=False,
help='Key to save or load a .tlt model.'
)
parser.add_argument(
'-i',
'--image_dir',
type=str,
required=False,
default=None,
help=argparse.SUPPRESS
)
parser.add_argument(
'-r',
'--results_dir',
type=str,
default='/tmp',
required=False,
help='Output directory where the status log is saved.'
)
return parser
def parse_command_line_arguments(args=None):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser(args)
return parser.parse_args(args)
if __name__ == '__main__':
try:
main()
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Evaluation was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/scripts/evaluate.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test train script."""
import os
import pytest
import tensorflow as tf
@pytest.fixture
def _spec_file():
"""Get EfficientDet default file."""
file_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
default_spec_path = os.path.join(file_path, '../experiment_specs/default.txt')
return default_spec_path
@pytest.mark.script_launch_mode('subprocess')
def test_train_script(tmpdir, script_runner, _spec_file):
"""Test train script."""
script = 'nvidia_tao_tf1/cv/efficientdet/scripts/train.py'
env = os.environ.copy()
args = ['-k', 'nvidia_tlt',
'--experiment_spec', _spec_file,
'-d', tmpdir]
tf.keras.backend.clear_session()
ret = script_runner.run(script, env=env, *args)
try:
assert ret.success
except AssertionError:
print("Local path is not ready.")
print(f"{ret.stdout + ret.stderr}")
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/scripts/tests/test_train.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TLT command line wrapper to invoke CLI scripts."""
import os
import sys
from nvidia_tao_tf1.cv.common.entrypoint.entrypoint import launch_job
import nvidia_tao_tf1.cv.efficientdet.scripts
def main():
"""Function to launch the job."""
os.environ['TF_KERAS'] = '1'
launch_job(nvidia_tao_tf1.cv.efficientdet.scripts, "efficientdet", sys.argv[1:])
if __name__ == "__main__":
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/entrypoint/efficientdet.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TLT EfficientDet entrypoint."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/entrypoint/__init__.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base target assigner module.
The job of a TargetAssigner is, for a given set of anchors (bounding boxes) and
groundtruth detections (bounding boxes), to assign classification and regression
targets to each anchor as well as weights to each anchor (specifying, e.g.,
which anchors should not contribute to training loss).
It assigns classification/regression targets by performing the following steps:
1) Computing pairwise similarity between anchors and groundtruth boxes using a
provided RegionSimilarity Calculator
2) Computing a matching based on the similarity matrix using a provided Matcher
3) Assigning regression targets based on the matching and a provided BoxCoder
4) Assigning classification targets based on the matching and groundtruth labels
Note that TargetAssigners only operate on detections from a single
image at a time, so any logic for applying a TargetAssigner to multiple
images must be handled externally.
"""
import tensorflow.compat.v1 as tf
from nvidia_tao_tf1.cv.efficientdet.object_detection import box_list
from nvidia_tao_tf1.cv.efficientdet.object_detection import shape_utils
KEYPOINTS_FIELD_NAME = 'keypoints'
class TargetAssigner(object):
"""Target assigner to compute classification and regression targets."""
def __init__(self, similarity_calc, matcher, box_coder,
negative_class_weight=1.0, unmatched_cls_target=None):
"""Construct Object Detection Target Assigner.
Args:
similarity_calc: a RegionSimilarityCalculator
matcher: Matcher used to match groundtruth to anchors.
box_coder: BoxCoder used to encode matching groundtruth boxes with
respect to anchors.
negative_class_weight: classification weight to be associated to negative
anchors (default: 1.0). The weight must be in [0., 1.].
unmatched_cls_target: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
If set to None, unmatched_cls_target is set to be [0] for each anchor.
Raises:
ValueError: if similarity_calc is not a RegionSimilarityCalculator or
if matcher is not a Matcher or if box_coder is not a BoxCoder
"""
self._similarity_calc = similarity_calc
self._matcher = matcher
self._box_coder = box_coder
self._negative_class_weight = negative_class_weight
if unmatched_cls_target is None:
self._unmatched_cls_target = tf.constant([0], tf.float32)
else:
self._unmatched_cls_target = unmatched_cls_target
@property
def box_coder(self):
"""Return box coder."""
return self._box_coder
def assign(self, anchors, groundtruth_boxes, groundtruth_labels=None,
groundtruth_weights=None, **params):
"""Assign classification and regression targets to each anchor.
For a given set of anchors and groundtruth detections, match anchors
to groundtruth_boxes and assign classification and regression targets to
each anchor as well as weights based on the resulting match (specifying,
e.g., which anchors should not contribute to training loss).
Anchors that are not matched to anything are given a classification target
of self._unmatched_cls_target which can be specified via the constructor.
Args:
anchors: a BoxList representing N anchors
groundtruth_boxes: a BoxList representing M groundtruth boxes
groundtruth_labels: a tensor of shape [M, d_1, ... d_k]
with labels for each of the ground_truth boxes. The subshape
[d_1, ... d_k] can be empty (corresponding to scalar inputs). When set
to None, groundtruth_labels assumes a binary problem where all
ground_truth boxes get a positive label (of 1).
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box. The weights
must be in [0., 1.]. If None, all weights are set to 1.
**params: Additional keyword arguments for specific implementations of
the Matcher.
Returns:
cls_targets: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k],
where the subshape [d_1, ..., d_k] is compatible with groundtruth_labels
which has shape [num_gt_boxes, d_1, d_2, ... d_k].
cls_weights: a float32 tensor with shape [num_anchors]
reg_targets: a float32 tensor with shape [num_anchors, box_code_dimension]
reg_weights: a float32 tensor with shape [num_anchors]
match: a matcher.Match object encoding the match between anchors and
groundtruth boxes, with rows corresponding to groundtruth boxes
and columns corresponding to anchors.
Raises:
ValueError: if anchors or groundtruth_boxes are not of type
box_list.BoxList
"""
if not isinstance(anchors, box_list.BoxList):
raise ValueError('anchors must be an BoxList')
if not isinstance(groundtruth_boxes, box_list.BoxList):
raise ValueError('groundtruth_boxes must be an BoxList')
if groundtruth_labels is None:
groundtruth_labels = tf.ones(tf.expand_dims(groundtruth_boxes.num_boxes(), 0))
groundtruth_labels = tf.expand_dims(groundtruth_labels, -1)
unmatched_shape_assert = shape_utils.assert_shape_equal(
shape_utils.combined_static_and_dynamic_shape(groundtruth_labels)[1:],
shape_utils.combined_static_and_dynamic_shape(
self._unmatched_cls_target))
labels_and_box_shapes_assert = shape_utils.assert_shape_equal(
shape_utils.combined_static_and_dynamic_shape(
groundtruth_labels)[:1],
shape_utils.combined_static_and_dynamic_shape(
groundtruth_boxes.get())[:1])
if groundtruth_weights is None:
num_gt_boxes = groundtruth_boxes.num_boxes_static()
if not num_gt_boxes:
num_gt_boxes = groundtruth_boxes.num_boxes()
groundtruth_weights = tf.ones([num_gt_boxes], dtype=tf.float32)
with tf.control_dependencies(
[unmatched_shape_assert, labels_and_box_shapes_assert]):
match_quality_matrix = self._similarity_calc.compare(groundtruth_boxes, anchors)
match = self._matcher.match(match_quality_matrix, **params)
reg_targets = self._create_regression_targets(anchors,
groundtruth_boxes,
match)
cls_targets = self._create_classification_targets(groundtruth_labels, match)
reg_weights = self._create_regression_weights(match, groundtruth_weights)
cls_weights = self._create_classification_weights(match, groundtruth_weights)
num_anchors = anchors.num_boxes_static()
if num_anchors is not None:
reg_targets = self._reset_target_shape(reg_targets, num_anchors)
cls_targets = self._reset_target_shape(cls_targets, num_anchors)
reg_weights = self._reset_target_shape(reg_weights, num_anchors)
cls_weights = self._reset_target_shape(cls_weights, num_anchors)
return cls_targets, cls_weights, reg_targets, reg_weights, match
def _reset_target_shape(self, target, num_anchors):
"""Sets the static shape of the target.
Args:
target: the target tensor. Its first dimension will be overwritten.
num_anchors: the number of anchors, which is used to override the target's
first dimension.
Returns:
A tensor with the shape info filled in.
"""
target_shape = target.get_shape().as_list()
target_shape[0] = num_anchors
target.set_shape(target_shape)
return target
def _create_regression_targets(self, anchors, groundtruth_boxes, match):
"""Returns a regression target for each anchor.
Args:
anchors: a BoxList representing N anchors
groundtruth_boxes: a BoxList representing M groundtruth_boxes
match: a matcher.Match object
Returns:
reg_targets: a float32 tensor with shape [N, box_code_dimension]
"""
matched_gt_boxes = match.gather_based_on_match(
groundtruth_boxes.get(),
unmatched_value=tf.zeros(4),
ignored_value=tf.zeros(4))
matched_gt_boxlist = box_list.BoxList(matched_gt_boxes)
if groundtruth_boxes.has_field(KEYPOINTS_FIELD_NAME):
groundtruth_keypoints = groundtruth_boxes.get_field(KEYPOINTS_FIELD_NAME)
matched_keypoints = match.gather_based_on_match(
groundtruth_keypoints,
unmatched_value=tf.zeros(groundtruth_keypoints.get_shape()[1:]),
ignored_value=tf.zeros(groundtruth_keypoints.get_shape()[1:]))
matched_gt_boxlist.add_field(KEYPOINTS_FIELD_NAME, matched_keypoints)
matched_reg_targets = self._box_coder.encode(matched_gt_boxlist, anchors)
match_results_shape = shape_utils.combined_static_and_dynamic_shape(
match.match_results)
# Zero out the unmatched and ignored regression targets.
unmatched_ignored_reg_targets = tf.tile(
self._default_regression_target(), [match_results_shape[0], 1])
matched_anchors_mask = match.matched_column_indicator()
reg_targets = tf.where(matched_anchors_mask,
matched_reg_targets,
unmatched_ignored_reg_targets)
return reg_targets
def _default_regression_target(self):
"""Returns the default target for anchors to regress to.
Default regression targets are set to zero (though in
this implementation what these targets are set to should
not matter as the regression weight of any box set to
regress to the default target is zero).
Returns:
default_target: a float32 tensor with shape [1, box_code_dimension]
"""
return tf.constant([self._box_coder.code_size*[0]], tf.float32)
def _create_classification_targets(self, groundtruth_labels, match):
"""Create classification targets for each anchor.
Assign a classification target of for each anchor to the matching
groundtruth label that is provided by match. Anchors that are not matched
to anything are given the target self._unmatched_cls_target
Args:
groundtruth_labels: a tensor of shape [num_gt_boxes, d_1, ... d_k]
with labels for each of the ground_truth boxes. The subshape
[d_1, ... d_k] can be empty (corresponding to scalar labels).
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
Returns:
a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k], where the
subshape [d_1, ..., d_k] is compatible with groundtruth_labels which has
shape [num_gt_boxes, d_1, d_2, ... d_k].
"""
return match.gather_based_on_match(
groundtruth_labels,
unmatched_value=self._unmatched_cls_target,
ignored_value=self._unmatched_cls_target)
def _create_regression_weights(self, match, groundtruth_weights):
"""Set regression weight for each anchor.
Only positive anchors are set to contribute to the regression loss, so this
method returns a weight of 1 for every positive anchor and 0 for every
negative anchor.
Args:
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box.
Returns:
a float32 tensor with shape [num_anchors] representing regression weights.
"""
return match.gather_based_on_match(
groundtruth_weights, ignored_value=0., unmatched_value=0.)
def _create_classification_weights(self,
match,
groundtruth_weights):
"""Create classification weights for each anchor.
Positive (matched) anchors are associated with a weight of
positive_class_weight and negative (unmatched) anchors are associated with
a weight of negative_class_weight. When anchors are ignored, weights are set
to zero. By default, both positive/negative weights are set to 1.0,
but they can be adjusted to handle class imbalance (which is almost always
the case in object detection).
Args:
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box.
Returns:
a float32 tensor with shape [num_anchors] representing classification
weights.
"""
return match.gather_based_on_match(
groundtruth_weights,
ignored_value=0.,
unmatched_value=self._negative_class_weight)
def get_box_coder(self):
"""Get BoxCoder of this TargetAssigner.
Returns:
BoxCoder object.
"""
return self._box_coder
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/object_detection/target_assigner.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Matcher interface and Match class.
This module defines the Matcher interface and the Match object. The job of the
matcher is to match row and column indices based on the similarity matrix and
other optional parameters. Each column is matched to at most one row. There
are three possibilities for the matching:
1) match: A column matches a row.
2) no_match: A column does not match any row.
3) ignore: A column that is neither 'match' nor no_match.
The ignore case is regularly encountered in object detection: when an anchor has
a relatively small overlap with a ground-truth box, one neither wants to
consider this box a positive example (match) nor a negative example (no match).
The Match class is used to store the match results and it provides simple apis
to query the results.
"""
import abc
import tensorflow.compat.v1 as tf
class Match(object):
"""Class to store results from the matcher.
This class is used to store the results from the matcher. It provides
convenient methods to query the matching results.
"""
def __init__(self, match_results):
"""Constructs a Match object.
Args:
match_results: Integer tensor of shape [N] with (1) match_results[i]>=0,
meaning that column i is matched with row match_results[i].
(2) match_results[i]=-1, meaning that column i is not matched.
(3) match_results[i]=-2, meaning that column i is ignored.
Raises:
ValueError: if match_results does not have rank 1 or is not an
integer int32 scalar tensor
"""
if match_results.shape.ndims != 1:
raise ValueError('match_results should have rank 1')
if match_results.dtype != tf.int32:
raise ValueError('match_results should be an int32 or int64 scalar '
'tensor')
self._match_results = match_results
@property
def match_results(self):
"""The accessor for match results.
Returns:
the tensor which encodes the match results.
"""
return self._match_results
def matched_column_indices(self):
"""Returns column indices that match to some row.
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return self._reshape_and_cast(tf.where(tf.greater(self._match_results, -1)))
def matched_column_indicator(self):
"""Returns column indices that are matched.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return tf.greater_equal(self._match_results, 0)
def num_matched_columns(self):
"""Returns number (int32 scalar tensor) of matched columns."""
return tf.shape(self.matched_column_indices())[0]
def unmatched_column_indices(self):
"""Returns column indices that do not match any row.
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return self._reshape_and_cast(tf.where(tf.equal(self._match_results, -1)))
def unmatched_column_indicator(self):
"""Returns column indices that are unmatched.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return tf.equal(self._match_results, -1)
def num_unmatched_columns(self):
"""Returns number (int32 scalar tensor) of unmatched columns."""
return tf.shape(self.unmatched_column_indices())[0]
def ignored_column_indices(self):
"""Returns column indices that are ignored (neither Matched nor Unmatched).
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return self._reshape_and_cast(tf.where(self.ignored_column_indicator()))
def ignored_column_indicator(self):
"""Returns boolean column indicator where True means the column is ignored.
Returns:
column_indicator: boolean vector which is True for all ignored column
indices.
"""
return tf.equal(self._match_results, -2)
def num_ignored_columns(self):
"""Returns number (int32 scalar tensor) of matched columns."""
return tf.shape(self.ignored_column_indices())[0]
def unmatched_or_ignored_column_indices(self):
"""Returns column indices that are unmatched or ignored.
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return self._reshape_and_cast(tf.where(tf.greater(0, self._match_results)))
def matched_row_indices(self):
"""Returns row indices that match some column.
The indices returned by this op are ordered so as to be in correspondence
with the output of matched_column_indicator(). For example if
self.matched_column_indicator() is [0,2], and self.matched_row_indices() is
[7, 3], then we know that column 0 was matched to row 7 and column 2 was
matched to row 3.
Returns:
row_indices: int32 tensor of shape [K] with row indices.
"""
return self._reshape_and_cast(
tf.gather(self._match_results, self.matched_column_indices()))
def _reshape_and_cast(self, t):
return tf.cast(tf.reshape(t, [-1]), tf.int32)
def gather_based_on_match(self, input_tensor, unmatched_value,
ignored_value):
"""Gathers elements from `input_tensor` based on match results.
For columns that are matched to a row, gathered_tensor[col] is set to
input_tensor[match_results[col]]. For columns that are unmatched,
gathered_tensor[col] is set to unmatched_value. Finally, for columns that
are ignored gathered_tensor[col] is set to ignored_value.
Note that the input_tensor.shape[1:] must match with unmatched_value.shape
and ignored_value.shape
Args:
input_tensor: Tensor to gather values from.
unmatched_value: Constant tensor value for unmatched columns.
ignored_value: Constant tensor value for ignored columns.
Returns:
gathered_tensor: A tensor containing values gathered from input_tensor.
The shape of the gathered tensor is [match_results.shape[0]] +
input_tensor.shape[1:].
"""
input_tensor = tf.concat([tf.stack([ignored_value, unmatched_value]),
input_tensor], axis=0)
gather_indices = tf.maximum(self.match_results + 2, 0)
gathered_tensor = tf.gather(input_tensor, gather_indices)
return gathered_tensor
class Matcher(object):
"""Abstract base class for matcher."""
__metaclass__ = abc.ABCMeta
def match(self, similarity_matrix, scope=None, **params):
"""Computes matches among row and column indices and returns the result.
Computes matches among the row and column indices based on the similarity
matrix and optional arguments.
Args:
similarity_matrix: Float tensor of shape [N, M] with pairwise similarity
where higher value means more similar.
scope: Op scope name. Defaults to 'Match' if None.
**params: Additional keyword arguments for specific implementations of
the Matcher.
Returns:
A Match object with the results of matching.
"""
with tf.name_scope(scope, 'Match', [similarity_matrix, params]):
return Match(self._match(similarity_matrix, **params))
@abc.abstractmethod
def _match(self, similarity_matrix, **params):
"""Method to be overridden by implementations.
Args:
similarity_matrix: Float tensor of shape [N, M] with pairwise similarity
where higher value means more similar.
**params: Additional keyword arguments for specific implementations of
the Matcher.
Returns:
match_results: Integer tensor of shape [M]: match_results[i]>=0 means
that column i is matched to row match_results[i], match_results[i]=-1
means that the column is not matched. match_results[i]=-2 means that
the column is ignored (usually this happens when there is a very weak
match which one neither wants as positive nor negative example).
"""
pass
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/object_detection/matcher.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Faster RCNN box coder.
Faster RCNN box coder follows the coding schema described below:
ty = (y - ya) / ha
tx = (x - xa) / wa
th = log(h / ha)
tw = log(w / wa)
where x, y, w, h denote the box's center coordinates, width and height
respectively. Similarly, xa, ya, wa, ha denote the anchor's center
coordinates, width and height. tx, ty, tw and th denote the anchor-encoded
center, width and height respectively.
See http://arxiv.org/abs/1506.01497 for details.
"""
import tensorflow.compat.v1 as tf
from nvidia_tao_tf1.cv.efficientdet.object_detection import box_coder
from nvidia_tao_tf1.cv.efficientdet.object_detection import box_list
EPSILON = 1e-8
class FasterRcnnBoxCoder(box_coder.BoxCoder):
"""Faster RCNN box coder."""
def __init__(self, scale_factors=None):
"""Constructor for FasterRcnnBoxCoder.
Args:
scale_factors: List of 4 positive scalars to scale ty, tx, th and tw.
If set to None, does not perform scaling. For Faster RCNN,
the open-source implementation recommends using [10.0, 10.0, 5.0, 5.0].
"""
if scale_factors:
assert len(scale_factors) == 4
for scalar in scale_factors:
assert scalar > 0
self._scale_factors = scale_factors
@property
def code_size(self):
"""Return code size."""
return 4
def _encode(self, boxes, anchors):
"""Encode a box collection with respect to anchor collection.
Args:
boxes: BoxList holding N boxes to be encoded.
anchors: BoxList of anchors.
Returns:
a tensor representing N anchor-encoded boxes of the format
[ty, tx, th, tw].
"""
# Convert anchors to the center coordinate representation.
ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes()
ycenter, xcenter, h, w = boxes.get_center_coordinates_and_sizes()
# Avoid NaN in division and log below.
# ha = tf.maximum(EPSILON, ha)
# wa = tf.maximum(EPSILON, wa)
# h = tf.maximum(EPSILON, h)
# w = tf.maximum(EPSILON, w)
ha += EPSILON
wa += EPSILON
h += EPSILON
w += EPSILON
tx = (xcenter - xcenter_a) / wa
ty = (ycenter - ycenter_a) / ha
tw = tf.math.log(w / wa)
th = tf.math.log(h / ha)
# Scales location targets as used in paper for joint training.
if self._scale_factors:
ty *= self._scale_factors[0]
tx *= self._scale_factors[1]
th *= self._scale_factors[2]
tw *= self._scale_factors[3]
return tf.transpose(tf.stack([ty, tx, th, tw]))
def _decode(self, rel_codes, anchors):
"""Decode relative codes to boxes.
Args:
rel_codes: a tensor representing N anchor-encoded boxes.
anchors: BoxList of anchors.
Returns:
boxes: BoxList holding N bounding boxes.
"""
ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes()
ty, tx, th, tw = tf.unstack(tf.transpose(rel_codes))
if self._scale_factors:
ty /= self._scale_factors[0]
tx /= self._scale_factors[1]
th /= self._scale_factors[2]
tw /= self._scale_factors[3]
w = tf.exp(tw) * wa
h = tf.exp(th) * ha
ycenter = ty * ha + ycenter_a
xcenter = tx * wa + xcenter_a
ymin = ycenter - h / 2.
xmin = xcenter - w / 2.
ymax = ycenter + h / 2.
xmax = xcenter + w / 2.
return box_list.BoxList(tf.transpose(tf.stack([ymin, xmin, ymax, xmax])))
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/object_detection/faster_rcnn_box_coder.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils used to manipulate tensor shapes."""
import tensorflow.compat.v1 as tf
def assert_shape_equal(shape_a, shape_b):
"""Asserts that shape_a and shape_b are equal.
If the shapes are static, raises a ValueError when the shapes
mismatch.
If the shapes are dynamic, raises a tf InvalidArgumentError when the shapes
mismatch.
Args:
shape_a: a list containing shape of the first tensor.
shape_b: a list containing shape of the second tensor.
Returns:
Either a tf.no_op() when shapes are all static and a tf.assert_equal() op
when the shapes are dynamic.
Raises:
ValueError: When shapes are both static and unequal.
"""
if (all(isinstance(dim, int) for dim in shape_a) and
all(isinstance(dim, int) for dim in shape_b)):
if shape_a != shape_b:
raise ValueError('Unequal shapes {}, {}'.format(shape_a, shape_b))
return tf.no_op()
return tf.assert_equal(shape_a, shape_b)
def combined_static_and_dynamic_shape(tensor):
"""Returns a list containing static and dynamic values for the dimensions.
Returns a list of static and dynamic values for shape dimensions. This is
useful to preserve static shapes when available in reshape operation.
Args:
tensor: A tensor of any type.
Returns:
A list of size tensor.shape.ndims containing integers or a scalar tensor.
"""
static_tensor_shape = tensor.shape.as_list()
dynamic_tensor_shape = tf.shape(tensor)
combined_shape = []
for index, dim in enumerate(static_tensor_shape):
if dim is not None:
combined_shape.append(dim)
else:
combined_shape.append(dynamic_tensor_shape[index])
return combined_shape
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/object_detection/shape_utils.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base box coder.
Box coders convert between coordinate frames, namely image-centric
(with (0,0) on the top left of image) and anchor-centric (with (0,0) being
defined by a specific anchor).
Users of a BoxCoder can call two methods:
encode: which encodes a box with respect to a given anchor
(or rather, a tensor of boxes wrt a corresponding tensor of anchors) and
decode: which inverts this encoding with a decode operation.
In both cases, the arguments are assumed to be in 1-1 correspondence already;
it is not the job of a BoxCoder to perform matching.
"""
from abc import ABCMeta
from abc import abstractmethod
from abc import abstractproperty
import tensorflow.compat.v1 as tf
# Box coder types.
FASTER_RCNN = 'faster_rcnn'
KEYPOINT = 'keypoint'
MEAN_STDDEV = 'mean_stddev'
SQUARE = 'square'
class BoxCoder(object):
"""Abstract base class for box coder."""
__metaclass__ = ABCMeta
@abstractproperty # noqa pylint: disable=W4905
def code_size(self):
"""Return the size of each code.
This number is a constant and should agree with the output of the `encode`
op (e.g. if rel_codes is the output of self.encode(...), then it should have
shape [N, code_size()]). This abstractproperty should be overridden by
implementations.
Returns:
an integer constant
"""
pass
def encode(self, boxes, anchors):
"""Encode a box list relative to an anchor collection.
Args:
boxes: BoxList holding N boxes to be encoded
anchors: BoxList of N anchors
Returns:
a tensor representing N relative-encoded boxes
"""
with tf.name_scope('Encode'):
return self._encode(boxes, anchors)
def decode(self, rel_codes, anchors):
"""Decode boxes that are encoded relative to an anchor collection.
Args:
rel_codes: a tensor representing N relative-encoded boxes
anchors: BoxList of anchors
Returns:
boxlist: BoxList holding N boxes encoded in the ordinary way (i.e.,
with corners y_min, x_min, y_max, x_max)
"""
with tf.name_scope('Decode'):
return self._decode(rel_codes, anchors)
@abstractmethod
def _encode(self, boxes, anchors):
"""Method to be overridden by implementations.
Args:
boxes: BoxList holding N boxes to be encoded
anchors: BoxList of N anchors
Returns:
a tensor representing N relative-encoded boxes
"""
pass
@abstractmethod
def _decode(self, rel_codes, anchors):
"""Method to be overridden by implementations.
Args:
rel_codes: a tensor representing N relative-encoded boxes
anchors: BoxList of anchors
Returns:
boxlist: BoxList holding N boxes encoded in the ordinary way (i.e.,
with corners y_min, x_min, y_max, x_max)
"""
pass
def batch_decode(encoded_boxes, box_coder, anchors):
"""Decode a batch of encoded boxes.
This op takes a batch of encoded bounding boxes and transforms
them to a batch of bounding boxes specified by their corners in
the order of [y_min, x_min, y_max, x_max].
Args:
encoded_boxes: a float32 tensor of shape [batch_size, num_anchors,
code_size] representing the location of the objects.
box_coder: a BoxCoder object.
anchors: a BoxList of anchors used to encode `encoded_boxes`.
Returns:
decoded_boxes: a float32 tensor of shape [batch_size, num_anchors,
coder_size] representing the corners of the objects in the order
of [y_min, x_min, y_max, x_max].
Raises:
ValueError: if batch sizes of the inputs are inconsistent, or if
the number of anchors inferred from encoded_boxes and anchors are
inconsistent.
"""
encoded_boxes.get_shape().assert_has_rank(3)
if encoded_boxes.get_shape()[1].value != anchors.num_boxes_static():
raise ValueError('The number of anchors inferred from encoded_boxes'
' and anchors are inconsistent: shape[1] of encoded_boxes'
' %s should be equal to the number of anchors: %s.' %
(encoded_boxes.get_shape()[1].value,
anchors.num_boxes_static()))
decoded_boxes = tf.stack([
box_coder.decode(boxes, anchors).get()
for boxes in tf.unstack(encoded_boxes)
])
return decoded_boxes
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/object_detection/box_coder.py |
"""Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/object_detection/__init__.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Preprocess images and bounding boxes for detection.
We perform two sets of operations in preprocessing stage:
(a) operations that are applied to both training and testing data,
(b) operations that are applied only to training data for the purpose of
data augmentation.
A preprocessing function receives a set of inputs,
e.g. an image and bounding boxes,
performs an operation on them, and returns them.
Some examples are: randomly cropping the image, randomly mirroring the image,
randomly changing the brightness, contrast, hue and
randomly jittering the bounding boxes.
The image is a rank 4 tensor: [1, height, width, channels] with
dtype=tf.float32. The groundtruth_boxes is a rank 2 tensor: [N, 4] where
in each row there is a box with [ymin xmin ymax xmax].
Boxes are in normalized coordinates meaning
their coordinate values range in [0, 1]
Important Note: In tensor_dict, images is a rank 4 tensor, but preprocessing
functions receive a rank 3 tensor for processing the image. Thus, inside the
preprocess function we squeeze the image to become a rank 3 tensor and then
we pass it to the functions. At the end of the preprocess we expand the image
back to rank 4.
"""
import tensorflow.compat.v1 as tf
from nvidia_tao_tf1.cv.efficientdet.object_detection import box_list
def _flip_boxes_left_right(boxes):
"""Left-right flip the boxes.
Args:
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
Returns:
Flipped boxes.
"""
ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1)
flipped_xmin = tf.subtract(1.0, xmax)
flipped_xmax = tf.subtract(1.0, xmin)
flipped_boxes = tf.concat([ymin, flipped_xmin, ymax, flipped_xmax], 1)
return flipped_boxes
def _flip_masks_left_right(masks):
"""Left-right flip masks.
Args:
masks: rank 3 float32 tensor with shape
[num_instances, height, width] representing instance masks.
Returns:
flipped masks: rank 3 float32 tensor with shape
[num_instances, height, width] representing instance masks.
"""
return masks[:, :, ::-1]
def keypoint_flip_horizontal(keypoints, flip_point, flip_permutation,
scope=None):
"""Flips the keypoints horizontally around the flip_point.
This operation flips the x coordinate for each keypoint around the flip_point
and also permutes the keypoints in a manner specified by flip_permutation.
Args:
keypoints: a tensor of shape [num_instances, num_keypoints, 2]
flip_point: (float) scalar tensor representing the x coordinate to flip the
keypoints around.
flip_permutation: rank 1 int32 tensor containing the keypoint flip
permutation. This specifies the mapping from original keypoint indices
to the flipped keypoint indices. This is used primarily for keypoints
that are not reflection invariant. E.g. Suppose there are 3 keypoints
representing ['head', 'right_eye', 'left_eye'], then a logical choice for
flip_permutation might be [0, 2, 1] since we want to swap the 'left_eye'
and 'right_eye' after a horizontal flip.
scope: name scope.
Returns:
new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
"""
with tf.name_scope(scope, 'FlipHorizontal'):
keypoints = tf.transpose(keypoints, [1, 0, 2])
keypoints = tf.gather(keypoints, flip_permutation)
v, u = tf.split(value=keypoints, num_or_size_splits=2, axis=2)
u = flip_point * 2.0 - u
new_keypoints = tf.concat([v, u], 2)
new_keypoints = tf.transpose(new_keypoints, [1, 0, 2])
return new_keypoints
def random_horizontal_flip(image,
boxes=None,
masks=None,
keypoints=None,
keypoint_flip_permutation=None,
seed=None):
"""Randomly flips the image and detections horizontally.
The probability of flipping the image is 50%.
Args:
image: rank 3 float32 tensor with shape [height, width, channels].
boxes: (optional) rank 2 float32 tensor with shape [N, 4]
containing the bounding boxes.
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks. The masks
are of the same height, width as the input `image`.
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x
normalized coordinates.
keypoint_flip_permutation: rank 1 int32 tensor containing the keypoint flip
permutation.
seed: random seed
Returns:
image: image which is the same shape as input image.
If boxes, masks, keypoints, and keypoint_flip_permutation are not None,
the function also returns the following tensors.
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
masks: rank 3 float32 tensor with shape [num_instances, height, width]
containing instance masks.
keypoints: rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]
Raises:
ValueError: if keypoints are provided but keypoint_flip_permutation is not.
"""
def _flip_image(image):
# flip image
image_flipped = tf.image.flip_left_right(image)
return image_flipped
if keypoints is not None and keypoint_flip_permutation is None:
raise ValueError(
'keypoints are provided but keypoints_flip_permutation is not provided')
with tf.name_scope('RandomHorizontalFlip', values=[image, boxes]):
result = []
# random variable defining whether to do flip or not
do_a_flip_random = tf.greater(tf.random_uniform([], seed=seed), 0.5)
# flip image
image = tf.cond(do_a_flip_random, lambda: _flip_image(image), lambda: image)
result.append(image)
# flip boxes
if boxes is not None:
boxes = tf.cond(do_a_flip_random, lambda: _flip_boxes_left_right(boxes),
lambda: boxes)
result.append(boxes)
# flip masks
if masks is not None:
masks = tf.cond(do_a_flip_random, lambda: _flip_masks_left_right(masks),
lambda: masks)
result.append(masks)
# flip keypoints
if keypoints is not None and keypoint_flip_permutation is not None:
permutation = keypoint_flip_permutation
keypoints = tf.cond(
do_a_flip_random,
lambda: keypoint_flip_horizontal(keypoints, 0.5, permutation),
lambda: keypoints)
result.append(keypoints)
return tuple(result)
def _compute_new_static_size(image, min_dimension, max_dimension):
"""Compute new static shape for resize_to_range method."""
image_shape = image.get_shape().as_list()
orig_height = image_shape[0]
orig_width = image_shape[1]
num_channels = image_shape[2]
orig_min_dim = min(orig_height, orig_width)
# Calculates the larger of the possible sizes
large_scale_factor = min_dimension / float(orig_min_dim)
# Scaling orig_(height|width) by large_scale_factor will make the smaller
# dimension equal to min_dimension, save for floating point rounding errors.
# For reasonably-sized images, taking the nearest integer will reliably
# eliminate this error.
large_height = int(round(orig_height * large_scale_factor))
large_width = int(round(orig_width * large_scale_factor))
large_size = [large_height, large_width]
if max_dimension:
# Calculates the smaller of the possible sizes, use that if the larger
# is too big.
orig_max_dim = max(orig_height, orig_width)
small_scale_factor = max_dimension / float(orig_max_dim)
# Scaling orig_(height|width) by small_scale_factor will make the larger
# dimension equal to max_dimension, save for floating point rounding
# errors. For reasonably-sized images, taking the nearest integer will
# reliably eliminate this error.
small_height = int(round(orig_height * small_scale_factor))
small_width = int(round(orig_width * small_scale_factor))
small_size = [small_height, small_width]
new_size = large_size
if max(large_size) > max_dimension:
new_size = small_size
else:
new_size = large_size
return tf.constant(new_size + [num_channels])
def _compute_new_dynamic_size(image, min_dimension, max_dimension):
"""Compute new dynamic shape for resize_to_range method."""
image_shape = tf.shape(image)
orig_height = tf.to_float(image_shape[0])
orig_width = tf.to_float(image_shape[1])
num_channels = image_shape[2]
orig_min_dim = tf.minimum(orig_height, orig_width)
# Calculates the larger of the possible sizes
min_dimension = tf.constant(min_dimension, dtype=tf.float32)
large_scale_factor = min_dimension / orig_min_dim
# Scaling orig_(height|width) by large_scale_factor will make the smaller
# dimension equal to min_dimension, save for floating point rounding errors.
# For reasonably-sized images, taking the nearest integer will reliably
# eliminate this error.
large_height = tf.to_int32(tf.round(orig_height * large_scale_factor))
large_width = tf.to_int32(tf.round(orig_width * large_scale_factor))
large_size = tf.stack([large_height, large_width])
if max_dimension:
# Calculates the smaller of the possible sizes, use that if the larger
# is too big.
orig_max_dim = tf.maximum(orig_height, orig_width)
max_dimension = tf.constant(max_dimension, dtype=tf.float32)
small_scale_factor = max_dimension / orig_max_dim
# Scaling orig_(height|width) by small_scale_factor will make the larger
# dimension equal to max_dimension, save for floating point rounding
# errors. For reasonably-sized images, taking the nearest integer will
# reliably eliminate this error.
small_height = tf.to_int32(tf.round(orig_height * small_scale_factor))
small_width = tf.to_int32(tf.round(orig_width * small_scale_factor))
small_size = tf.stack([small_height, small_width])
new_size = tf.cond(
tf.to_float(tf.reduce_max(large_size)) > max_dimension,
lambda: small_size, lambda: large_size)
else:
new_size = large_size
return tf.stack(tf.unstack(new_size) + [num_channels])
def resize_to_range(image,
masks=None,
min_dimension=None,
max_dimension=None,
method=tf.image.ResizeMethod.BILINEAR,
align_corners=False,
pad_to_max_dimension=False):
"""Resizes an image so its dimensions are within the provided value.
The output size can be described by two cases:
1. If the image can be rescaled so its minimum dimension is equal to the
provided value without the other dimension exceeding max_dimension,
then do so.
2. Otherwise, resize so the largest dimension is equal to max_dimension.
Args:
image: A 3D tensor of shape [height, width, channels]
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks.
min_dimension: (optional) (scalar) desired size of the smaller image
dimension.
max_dimension: (optional) (scalar) maximum allowed size
of the larger image dimension.
method: (optional) interpolation method used in resizing. Defaults to
BILINEAR.
align_corners: bool. If true, exactly align all 4 corners of the input
and output. Defaults to False.
pad_to_max_dimension: Whether to resize the image and pad it with zeros
so the resulting image is of the spatial size
[max_dimension, max_dimension]. If masks are included they are padded
similarly.
Returns:
Note that the position of the resized_image_shape changes based on whether
masks are present.
resized_image: A 3D tensor of shape [new_height, new_width, channels],
where the image has been resized (with bilinear interpolation) so that
min(new_height, new_width) == min_dimension or
max(new_height, new_width) == max_dimension.
resized_masks: If masks is not None, also outputs masks. A 3D tensor of
shape [num_instances, new_height, new_width].
resized_image_shape: A 1D tensor of shape [3] containing shape of the
resized image.
Raises:
ValueError: if the image is not a 3D tensor.
"""
if len(image.get_shape()) != 3:
raise ValueError('Image should be 3D tensor')
with tf.name_scope('ResizeToRange', values=[image, min_dimension]):
if image.get_shape().is_fully_defined():
new_size = _compute_new_static_size(image, min_dimension, max_dimension)
else:
new_size = _compute_new_dynamic_size(image, min_dimension, max_dimension)
new_image = tf.image.resize_images(
image, new_size[:-1], method=method, align_corners=align_corners)
if pad_to_max_dimension:
new_image = tf.image.pad_to_bounding_box(
new_image, 0, 0, max_dimension, max_dimension)
result = [new_image]
if masks is not None:
new_masks = tf.expand_dims(masks, 3)
new_masks = tf.image.resize_images(
new_masks,
new_size[:-1],
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=align_corners)
new_masks = tf.squeeze(new_masks, 3)
if pad_to_max_dimension:
new_masks = tf.image.pad_to_bounding_box(
new_masks, 0, 0, max_dimension, max_dimension)
result.append(new_masks)
result.append(new_size)
return result
def _copy_extra_fields(boxlist_to_copy_to, boxlist_to_copy_from):
"""Copies the extra fields of boxlist_to_copy_from to boxlist_to_copy_to.
Args:
boxlist_to_copy_to: BoxList to which extra fields are copied.
boxlist_to_copy_from: BoxList from which fields are copied.
Returns:
boxlist_to_copy_to with extra fields.
"""
for field in boxlist_to_copy_from.get_extra_fields():
boxlist_to_copy_to.add_field(field, boxlist_to_copy_from.get_field(field))
return boxlist_to_copy_to
def box_list_scale(boxlist, y_scale, x_scale, scope=None):
"""scale box coordinates in x and y dimensions.
Args:
boxlist: BoxList holding N boxes
y_scale: (float) scalar tensor
x_scale: (float) scalar tensor
scope: name scope.
Returns:
boxlist: BoxList holding N boxes
"""
with tf.name_scope(scope, 'Scale'):
y_scale = tf.cast(y_scale, tf.float32)
x_scale = tf.cast(x_scale, tf.float32)
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
y_min = y_scale * y_min
y_max = y_scale * y_max
x_min = x_scale * x_min
x_max = x_scale * x_max
scaled_boxlist = box_list.BoxList(
tf.concat([y_min, x_min, y_max, x_max], 1))
return _copy_extra_fields(scaled_boxlist, boxlist)
def keypoint_scale(keypoints, y_scale, x_scale, scope=None):
"""Scales keypoint coordinates in x and y dimensions.
Args:
keypoints: a tensor of shape [num_instances, num_keypoints, 2]
y_scale: (float) scalar tensor
x_scale: (float) scalar tensor
scope: name scope.
Returns:
new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
"""
with tf.name_scope(scope, 'Scale'):
y_scale = tf.cast(y_scale, tf.float32)
x_scale = tf.cast(x_scale, tf.float32)
new_keypoints = keypoints * [[[y_scale, x_scale]]]
return new_keypoints
def scale_boxes_to_pixel_coordinates(image, boxes, keypoints=None):
"""Scales boxes from normalized to pixel coordinates.
Args:
image: A 3D float32 tensor of shape [height, width, channels].
boxes: A 2D float32 tensor of shape [num_boxes, 4] containing the bounding
boxes in normalized coordinates. Each row is of the form
[ymin, xmin, ymax, xmax].
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x normalized
coordinates.
Returns:
image: unchanged input image.
scaled_boxes: a 2D float32 tensor of shape [num_boxes, 4] containing the
bounding boxes in pixel coordinates.
scaled_keypoints: a 3D float32 tensor with shape
[num_instances, num_keypoints, 2] containing the keypoints in pixel
coordinates.
"""
boxlist = box_list.BoxList(boxes)
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
scaled_boxes = box_list_scale(boxlist, image_height, image_width).get()
result = [image, scaled_boxes]
if keypoints is not None:
scaled_keypoints = keypoint_scale(keypoints, image_height, image_width)
result.append(scaled_keypoints)
return tuple(result)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/object_detection/preprocessor.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bounding Box List definition.
BoxList represents a list of bounding boxes as tensorflow
tensors, where each bounding box is represented as a row of 4 numbers,
[y_min, x_min, y_max, x_max]. It is assumed that all bounding boxes
within a given list correspond to a single image. See also
box_list_ops.py for common box related operations (such as area, iou, etc).
Optionally, users can add additional related fields (such as weights).
We assume the following things to be true about fields:
* they correspond to boxes in the box_list along the 0th dimension
* they have inferable rank at graph construction time
* all dimensions except for possibly the 0th can be inferred
(i.e., not None) at graph construction time.
Some other notes:
* Following tensorflow conventions, we use height, width ordering,
and correspondingly, y,x (or ymin, xmin, ymax, xmax) ordering
* Tensors are always provided as (flat) [N, 4] tensors.
"""
import tensorflow.compat.v1 as tf
class BoxList(object):
"""Box collection."""
def __init__(self, boxes):
"""Constructs box collection.
Args:
boxes: a tensor of shape [N, 4] representing box corners
Raises:
ValueError: if invalid dimensions for bbox data or if bbox data is not in
float32 format.
"""
if len(boxes.get_shape()) != 2 or boxes.get_shape()[-1] != 4:
raise ValueError('Invalid dimensions for box data.')
if boxes.dtype != tf.float32:
raise ValueError('Invalid tensor type: should be tf.float32')
self.data = {'boxes': boxes}
def num_boxes(self):
"""Returns number of boxes held in collection.
Returns:
a tensor representing the number of boxes held in the collection.
"""
return tf.shape(self.data['boxes'])[0]
def num_boxes_static(self):
"""Returns number of boxes held in collection.
This number is inferred at graph construction time rather than run-time.
Returns:
Number of boxes held in collection (integer) or None if this is not
inferable at graph construction time.
"""
return self.data['boxes'].get_shape().as_list()[0]
def get_all_fields(self):
"""Returns all fields."""
return self.data.keys()
def get_extra_fields(self):
"""Returns all non-box fields (i.e., everything not named 'boxes')."""
return [k for k in self.data.keys() if k != 'boxes']
def add_field(self, field, field_data):
"""Add field to box list.
This method can be used to add related box data such as
weights/labels, etc.
Args:
field: a string key to access the data via `get`
field_data: a tensor containing the data to store in the BoxList
"""
self.data[field] = field_data
def has_field(self, field):
"""Check if key in data."""
return field in self.data
def get(self):
"""Convenience function for accessing box coordinates.
Returns:
a tensor with shape [N, 4] representing box coordinates.
"""
return self.get_field('boxes')
def set(self, boxes):
"""Convenience function for setting box coordinates.
Args:
boxes: a tensor of shape [N, 4] representing box corners
Raises:
ValueError: if invalid dimensions for bbox data
"""
if len(boxes.get_shape()) != 2 or boxes.get_shape()[-1] != 4:
raise ValueError('Invalid dimensions for box data.')
self.data['boxes'] = boxes
def get_field(self, field):
"""Accesses a box collection and associated fields.
This function returns specified field with object; if no field is specified,
it returns the box coordinates.
Args:
field: this optional string parameter can be used to specify
a related field to be accessed.
Returns:
a tensor representing the box collection or an associated field.
Raises:
ValueError: if invalid field
"""
if not self.has_field(field):
raise ValueError('field ' + str(field) + ' does not exist')
return self.data[field]
def set_field(self, field, value):
"""Sets the value of a field.
Updates the field of a box_list with a given value.
Args:
field: (string) name of the field to set value.
value: the value to assign to the field.
Raises:
ValueError: if the box_list does not have specified field.
"""
if not self.has_field(field):
raise ValueError('field %s does not exist' % field)
self.data[field] = value
def get_center_coordinates_and_sizes(self, scope=None):
"""Computes the center coordinates, height and width of the boxes.
Args:
scope: name scope of the function.
Returns:
a list of 4 1-D tensors [ycenter, xcenter, height, width].
"""
with tf.name_scope(scope, 'get_center_coordinates_and_sizes'):
box_corners = self.get()
ymin, xmin, ymax, xmax = tf.unstack(tf.transpose(box_corners))
width = xmax - xmin
height = ymax - ymin
ycenter = ymin + height / 2.
xcenter = xmin + width / 2.
return [ycenter, xcenter, height, width]
def transpose_coordinates(self, scope=None):
"""Transpose the coordinate representation in a boxlist.
Args:
scope: name scope of the function.
"""
with tf.name_scope(scope, 'transpose_coordinates'):
y_min, x_min, y_max, x_max = tf.split(
value=self.get(), num_or_size_splits=4, axis=1)
self.set(tf.concat([x_min, y_min, x_max, y_max], 1))
def as_tensor_dict(self, fields=None):
"""Retrieves specified fields as a dictionary of tensors.
Args:
fields: (optional) list of fields to return in the dictionary.
If None (default), all fields are returned.
Returns:
tensor_dict: A dictionary of tensors specified by fields.
Raises:
ValueError: if specified field is not contained in boxlist.
"""
tensor_dict = {}
if fields is None:
fields = self.get_all_fields()
for field in fields:
if not self.has_field(field):
raise ValueError('boxlist must contain all specified fields')
tensor_dict[field] = self.get_field(field)
return tensor_dict
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/object_detection/box_list.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Region Similarity Calculators for BoxLists.
Region Similarity Calculators compare a pairwise measure of similarity
between the boxes in two BoxLists.
"""
from abc import ABCMeta
from abc import abstractmethod
import tensorflow.compat.v1 as tf
def area(boxlist, scope=None):
"""Computes area of boxes.
Args:
boxlist: BoxList holding N boxes
scope: name scope.
Returns:
a tensor with shape [N] representing box areas.
"""
with tf.name_scope(scope, 'Area'):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
return tf.squeeze((y_max - y_min) * (x_max - x_min), [1])
def intersection(boxlist1, boxlist2, scope=None):
"""Compute pairwise intersection areas between boxes.
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
scope: name scope.
Returns:
a tensor with shape [N, M] representing pairwise intersections
"""
with tf.name_scope(scope, 'Intersection'):
y_min1, x_min1, y_max1, x_max1 = tf.split(
value=boxlist1.get(), num_or_size_splits=4, axis=1)
y_min2, x_min2, y_max2, x_max2 = tf.split(
value=boxlist2.get(), num_or_size_splits=4, axis=1)
all_pairs_min_ymax = tf.minimum(y_max1, tf.transpose(y_max2))
all_pairs_max_ymin = tf.maximum(y_min1, tf.transpose(y_min2))
intersect_heights = tf.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin)
all_pairs_min_xmax = tf.minimum(x_max1, tf.transpose(x_max2))
all_pairs_max_xmin = tf.maximum(x_min1, tf.transpose(x_min2))
intersect_widths = tf.maximum(0.0, all_pairs_min_xmax - all_pairs_max_xmin)
return intersect_heights * intersect_widths
def iou(boxlist1, boxlist2, scope=None):
"""Computes pairwise intersection-over-union between box collections.
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
scope: name scope.
Returns:
a tensor with shape [N, M] representing pairwise iou scores.
"""
with tf.name_scope(scope, 'IOU'):
intersections = intersection(boxlist1, boxlist2)
areas1 = area(boxlist1)
areas2 = area(boxlist2)
unions = (
tf.expand_dims(areas1, 1) + tf.expand_dims(areas2, 0) - intersections)
return tf.where(
tf.equal(intersections, 0.0),
tf.zeros_like(intersections), tf.truediv(intersections, unions))
class RegionSimilarityCalculator(object):
"""Abstract base class for region similarity calculator."""
__metaclass__ = ABCMeta
def compare(self, boxlist1, boxlist2, scope=None):
"""Computes matrix of pairwise similarity between BoxLists.
This op (to be overridden) computes a measure of pairwise similarity between
the boxes in the given BoxLists. Higher values indicate more similarity.
Note that this method simply measures similarity and does not explicitly
perform a matching.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
scope: Op scope name. Defaults to 'Compare' if None.
Returns:
a (float32) tensor of shape [N, M] with pairwise similarity score.
"""
with tf.name_scope(scope, 'Compare', [boxlist1, boxlist2]):
return self._compare(boxlist1, boxlist2)
@abstractmethod
def _compare(self, boxlist1, boxlist2):
pass
class IouSimilarity(RegionSimilarityCalculator):
"""Class to compute similarity based on Intersection over Union (IOU) metric.
This class computes pairwise similarity between two BoxLists based on IOU.
"""
def _compare(self, boxlist1, boxlist2):
"""Compute pairwise IOU similarity between the two BoxLists.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
Returns:
A tensor with shape [N, M] representing pairwise iou scores.
"""
return iou(boxlist1, boxlist2)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/object_detection/region_similarity_calculator.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensorflow Example proto decoder for object detection.
A decoder to decode string tensors containing serialized tensorflow.Example
protos for object detection.
"""
import tensorflow.compat.v1 as tf
def _get_source_id_from_encoded_image(parsed_tensors):
return tf.strings.as_string(
tf.strings.to_hash_bucket_fast(
parsed_tensors['image/encoded'],
2**63 - 1))
class TfExampleDecoder(object):
"""Tensorflow Example proto decoder."""
def __init__(self, include_mask=False, regenerate_source_id=False):
"""Init."""
self._include_mask = include_mask
self._regenerate_source_id = regenerate_source_id
self._keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string),
'image/source_id': tf.FixedLenFeature((), tf.string, ''),
'image/height': tf.FixedLenFeature((), tf.int64, -1),
'image/width': tf.FixedLenFeature((), tf.int64, -1),
'image/object/bbox/xmin': tf.VarLenFeature(tf.float32),
'image/object/bbox/xmax': tf.VarLenFeature(tf.float32),
'image/object/bbox/ymin': tf.VarLenFeature(tf.float32),
'image/object/bbox/ymax': tf.VarLenFeature(tf.float32),
'image/object/class/label': tf.VarLenFeature(tf.int64),
'image/object/area': tf.VarLenFeature(tf.float32),
'image/object/is_crowd': tf.VarLenFeature(tf.int64),
}
if include_mask:
self._keys_to_features.update({
'image/object/mask': tf.VarLenFeature(tf.string),
})
def _decode_image(self, parsed_tensors):
"""Decodes the image and set its static shape."""
image = tf.io.decode_image(parsed_tensors['image/encoded'], channels=3)
image.set_shape([None, None, 3])
return image
def _decode_boxes(self, parsed_tensors):
"""Concat box coordinates in the format of [ymin, xmin, ymax, xmax]."""
xmin = parsed_tensors['image/object/bbox/xmin']
xmax = parsed_tensors['image/object/bbox/xmax']
ymin = parsed_tensors['image/object/bbox/ymin']
ymax = parsed_tensors['image/object/bbox/ymax']
return tf.stack([ymin, xmin, ymax, xmax], axis=-1)
def _decode_masks(self, parsed_tensors):
"""Decode a set of PNG masks to the tf.float32 tensors."""
def _decode_png_mask(png_bytes):
mask = tf.squeeze(
tf.io.decode_png(png_bytes, channels=1, dtype=tf.uint8), axis=-1)
mask = tf.cast(mask, dtype=tf.float32)
mask.set_shape([None, None])
return mask
height = parsed_tensors['image/height']
width = parsed_tensors['image/width']
masks = parsed_tensors['image/object/mask']
return tf.cond(
tf.greater(tf.shape(masks)[0], 0),
lambda: tf.map_fn(_decode_png_mask, masks, dtype=tf.float32),
lambda: tf.zeros([0, height, width], dtype=tf.float32))
def _decode_areas(self, parsed_tensors):
xmin = parsed_tensors['image/object/bbox/xmin']
xmax = parsed_tensors['image/object/bbox/xmax']
ymin = parsed_tensors['image/object/bbox/ymin']
ymax = parsed_tensors['image/object/bbox/ymax']
return tf.cond(
tf.greater(tf.shape(parsed_tensors['image/object/area'])[0], 0),
lambda: parsed_tensors['image/object/area'],
lambda: (xmax - xmin) * (ymax - ymin))
def decode(self, serialized_example):
"""Decode the serialized example.
Args:
serialized_example: a single serialized tf.Example string.
Returns:
decoded_tensors: a dictionary of tensors with the following fields:
- image: a uint8 tensor of shape [None, None, 3].
- source_id: a string scalar tensor.
- height: an integer scalar tensor.
- width: an integer scalar tensor.
- groundtruth_classes: a int64 tensor of shape [None].
- groundtruth_is_crowd: a bool tensor of shape [None].
- groundtruth_area: a float32 tensor of shape [None].
- groundtruth_boxes: a float32 tensor of shape [None, 4].
- groundtruth_instance_masks: a float32 tensor of shape
[None, None, None].
- groundtruth_instance_masks_png: a string tensor of shape [None].
"""
parsed_tensors = tf.io.parse_single_example(
serialized_example, self._keys_to_features)
for k in parsed_tensors:
if isinstance(parsed_tensors[k], tf.SparseTensor):
if parsed_tensors[k].dtype == tf.string:
parsed_tensors[k] = tf.sparse_tensor_to_dense(
parsed_tensors[k], default_value='')
else:
parsed_tensors[k] = tf.sparse_tensor_to_dense(
parsed_tensors[k], default_value=0)
image = self._decode_image(parsed_tensors)
boxes = self._decode_boxes(parsed_tensors)
areas = self._decode_areas(parsed_tensors)
decode_image_shape = tf.logical_or(
tf.equal(parsed_tensors['image/height'], -1),
tf.equal(parsed_tensors['image/width'], -1))
image_shape = tf.cast(tf.shape(image), dtype=tf.int64)
parsed_tensors['image/height'] = tf.where(
decode_image_shape,
image_shape[0],
parsed_tensors['image/height'])
parsed_tensors['image/width'] = tf.where(
decode_image_shape, image_shape[1],
parsed_tensors['image/width'])
is_crowds = tf.cond(
tf.greater(tf.shape(parsed_tensors['image/object/is_crowd'])[0], 0),
lambda: tf.cast(parsed_tensors['image/object/is_crowd'], dtype=tf.bool),
lambda: tf.zeros_like(parsed_tensors['image/object/class/label'], dtype=tf.bool))
if self._regenerate_source_id:
source_id = _get_source_id_from_encoded_image(parsed_tensors)
else:
source_id = tf.cond(
tf.greater(tf.strings.length(parsed_tensors['image/source_id']),
0), lambda: parsed_tensors['image/source_id'],
lambda: _get_source_id_from_encoded_image(parsed_tensors))
if self._include_mask:
masks = self._decode_masks(parsed_tensors)
decoded_tensors = {
'image': image,
'source_id': source_id,
'height': parsed_tensors['image/height'],
'width': parsed_tensors['image/width'],
'groundtruth_classes': parsed_tensors['image/object/class/label'],
'groundtruth_is_crowd': is_crowds,
'groundtruth_area': areas,
'groundtruth_boxes': boxes,
}
if self._include_mask:
decoded_tensors.update({
'groundtruth_instance_masks': masks,
'groundtruth_instance_masks_png': parsed_tensors['image/object/mask'],
})
return decoded_tensors
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/object_detection/tf_example_decoder.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Argmax matcher implementation.
This class takes a similarity matrix and matches columns to rows based on the
maximum value per column. One can specify matched_thresholds and
to prevent columns from matching to rows (generally resulting in a negative
training example) and unmatched_theshold to ignore the match (generally
resulting in neither a positive or negative training example).
This matcher is used in Fast(er)-RCNN.
Note: matchers are used in TargetAssigners. There is a create_target_assigner
factory function for popular implementations.
"""
import tensorflow.compat.v1 as tf
from nvidia_tao_tf1.cv.efficientdet.object_detection import matcher
from nvidia_tao_tf1.cv.efficientdet.object_detection import shape_utils
class ArgMaxMatcher(matcher.Matcher):
"""Matcher based on highest value.
This class computes matches from a similarity matrix. Each column is matched
to a single row.
To support object detection target assignment this class enables setting both
matched_threshold (upper threshold) and unmatched_threshold (lower thresholds)
defining three categories of similarity which define whether examples are
positive, negative, or ignored:
(1) similarity >= matched_threshold: Highest similarity. Matched/Positive!
(2) matched_threshold > similarity >= unmatched_threshold: Medium similarity.
Depending on negatives_lower_than_unmatched, this is either
Unmatched/Negative OR Ignore.
(3) unmatched_threshold > similarity: Lowest similarity. Depending on flag
negatives_lower_than_unmatched, either Unmatched/Negative OR Ignore.
For ignored matches this class sets the values in the Match object to -2.
"""
def __init__(self,
matched_threshold,
unmatched_threshold=None,
negatives_lower_than_unmatched=True,
force_match_for_each_row=False):
"""Construct ArgMaxMatcher.
Args:
matched_threshold: Threshold for positive matches. Positive if
sim >= matched_threshold, where sim is the maximum value of the
similarity matrix for a given column. Set to None for no threshold.
unmatched_threshold: Threshold for negative matches. Negative if
sim < unmatched_threshold. Defaults to matched_threshold
when set to None.
negatives_lower_than_unmatched: Boolean which defaults to True. If True
then negative matches are the ones below the unmatched_threshold,
whereas ignored matches are in between the matched and unmatched
threshold. If False, then negative matches are in between the matched
and unmatched threshold, and everything lower than unmatched is ignored.
force_match_for_each_row: If True, ensures that each row is matched to
at least one column (which is not guaranteed otherwise if the
matched_threshold is high). Defaults to False. See
argmax_matcher_test.testMatcherForceMatch() for an example.
Raises:
ValueError: if unmatched_threshold is set but matched_threshold is not set
or if unmatched_threshold > matched_threshold.
"""
if (matched_threshold is None) and (unmatched_threshold is not None):
raise ValueError('Need to also define matched_threshold when'
'unmatched_threshold is defined')
self._matched_threshold = matched_threshold
if unmatched_threshold is None:
self._unmatched_threshold = matched_threshold
else:
if unmatched_threshold > matched_threshold:
raise ValueError('unmatched_threshold needs to be smaller or equal'
'to matched_threshold')
self._unmatched_threshold = unmatched_threshold
if not negatives_lower_than_unmatched:
if self._unmatched_threshold == self._matched_threshold:
raise ValueError('When negatives are in between matched and '
'unmatched thresholds, these cannot be of equal '
'value. matched: {}, unmatched: {}'.format(
self._matched_threshold,
self._unmatched_threshold))
self._force_match_for_each_row = force_match_for_each_row
self._negatives_lower_than_unmatched = negatives_lower_than_unmatched
def _match(self, similarity_matrix):
"""Tries to match each column of the similarity matrix to a row.
Args:
similarity_matrix: tensor of shape [N, M] representing any similarity
metric.
Returns:
Match object with corresponding matches for each of M columns.
"""
def _match_when_rows_are_empty():
"""Performs matching when the rows of similarity matrix are empty.
When the rows are empty, all detections are false positives. So we return
a tensor of -1's to indicate that the columns do not match to any rows.
Returns:
matches: int32 tensor indicating the row each column matches to.
"""
similarity_matrix_shape = shape_utils.combined_static_and_dynamic_shape(
similarity_matrix)
return -1 * tf.ones([similarity_matrix_shape[1]], dtype=tf.int32)
def _match_when_rows_are_non_empty():
"""Performs matching when the rows of similarity matrix are non empty.
Returns:
matches: int32 tensor indicating the row each column matches to.
"""
# Matches for each column
matches = tf.argmax(similarity_matrix, 0, output_type=tf.int32)
# Deal with matched and unmatched threshold
if self._matched_threshold is not None:
# Get logical indices of ignored and unmatched columns as tf.int64
matched_vals = tf.reduce_max(similarity_matrix, 0)
below_unmatched_threshold = tf.greater(self._unmatched_threshold, matched_vals)
between_thresholds = tf.logical_and(
tf.greater_equal(matched_vals, self._unmatched_threshold),
tf.greater(self._matched_threshold, matched_vals))
if self._negatives_lower_than_unmatched:
matches = self._set_values_using_indicator(matches,
below_unmatched_threshold,
-1)
matches = self._set_values_using_indicator(matches,
between_thresholds,
-2)
else:
matches = self._set_values_using_indicator(matches,
below_unmatched_threshold,
-2)
matches = self._set_values_using_indicator(matches,
between_thresholds,
-1)
if self._force_match_for_each_row:
similarity_matrix_shape = shape_utils.combined_static_and_dynamic_shape(
similarity_matrix)
force_match_column_ids = tf.argmax(similarity_matrix, 1,
output_type=tf.int32)
force_match_column_indicators = tf.one_hot(
force_match_column_ids, depth=similarity_matrix_shape[1])
force_match_row_ids = tf.argmax(force_match_column_indicators, 0,
output_type=tf.int32)
force_match_column_mask = tf.cast(
tf.reduce_max(force_match_column_indicators, 0), tf.bool)
final_matches = tf.where(force_match_column_mask, force_match_row_ids, matches)
return final_matches
return matches
if similarity_matrix.shape.is_fully_defined():
if similarity_matrix.shape[0].value == 0:
return _match_when_rows_are_empty()
return _match_when_rows_are_non_empty()
return tf.cond(
tf.greater(tf.shape(similarity_matrix)[0], 0),
_match_when_rows_are_non_empty, _match_when_rows_are_empty)
def _set_values_using_indicator(self, x, indicator, val):
"""Set the indicated fields of x to val.
Args:
x: tensor.
indicator: boolean with same shape as x.
val: scalar with value to set.
Returns:
modified tensor.
"""
indicator = tf.cast(indicator, x.dtype)
return x * (1 - indicator) + val * indicator
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/object_detection/argmax_matcher.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Util class for creating image batches."""
import os
import sys
import numpy as np
from PIL import Image
class ImageBatcher:
"""Creates batches of pre-processed images."""
def __init__(self, input, shape, dtype, # noqa pylint: disable=W0622
max_num_images=None, exact_batches=False, preprocessor="EfficientDet"):
"""Initialize.
:param input: The input directory to read images from.
:param shape: The tensor shape of the batch to prepare, either in NCHW or NHWC format.
:param dtype: The (numpy) datatype to cast the batched data to.
:param max_num_images: The maximum number of images to read from the directory.
:param exact_batches: This defines how to handle a number of images that is not an exact
multiple of the batch size. If false, it will pad the final batch with zeros to reach
the batch size. If true, it will *remove* the last few images in excess of a batch size
multiple, to guarantee batches are exact (useful for calibration).
:param preprocessor: Set the preprocessor to use, depending on which network is being used.
"""
# Find images in the given input path
input = os.path.realpath(input)
self.images = []
extensions = [".jpg", ".jpeg", ".png", ".bmp"]
def is_image(path):
return os.path.isfile(path) and os.path.splitext(path)[1].lower() in extensions
if os.path.isdir(input):
self.images = [os.path.join(input, f) for f in os.listdir(input)
if is_image(os.path.join(input, f))]
self.images.sort()
elif os.path.isfile(input):
if is_image(input):
self.images.append(input)
self.num_images = len(self.images)
if self.num_images < 1:
print("No valid {} images found in {}".format("/".join(extensions), input))
sys.exit(1)
# Handle Tensor Shape
self.dtype = dtype
self.shape = shape
assert len(self.shape) == 4
self.batch_size = shape[0]
assert self.batch_size > 0
self.format = None
self.width = -1
self.height = -1
if self.shape[1] == 3:
self.format = "NCHW"
self.height = self.shape[2]
self.width = self.shape[3]
elif self.shape[3] == 3:
self.format = "NHWC"
self.height = self.shape[1]
self.width = self.shape[2]
assert all([self.format, self.width > 0, self.height > 0])
# Adapt the number of images as needed
if max_num_images and 0 < max_num_images < len(self.images):
self.num_images = max_num_images
if exact_batches:
self.num_images = self.batch_size * (self.num_images // self.batch_size)
if self.num_images < 1:
print("Not enough images to create batches")
sys.exit(1)
self.images = self.images[0:self.num_images]
# Subdivide the list of images into batches
self.num_batches = 1 + int((self.num_images - 1) / self.batch_size)
self.batches = []
for i in range(self.num_batches):
start = i * self.batch_size
end = min(start + self.batch_size, self.num_images)
self.batches.append(self.images[start:end])
# Indices
self.image_index = 0
self.batch_index = 0
self.preprocessor = preprocessor
def preprocess_image(self, image_path):
"""
The image preprocessor loads an image from disk and prepares it as needed for batching.
This includes padding, resizing, normalization, data type casting, and transposing.
This Image Batcher implements one algorithm for now:
* EfficientDet: Resizes and pads the image to fit the input size.
:param image_path: The path to the image on disk to load.
:return: Two values: A numpy array holding the image sample, ready to be contacatenated
into the rest of the batch, and the resize scale used, if any.
"""
def resize_pad(image, pad_color=(0, 0, 0)):
"""
Resize and Pad.
A subroutine to implement padding and resizing. This will resize the image to fit
fully within the input size, and pads the remaining bottom-right portions with
the value provided.
:param image: The PIL image object
:pad_color: The RGB values to use for the padded area. Default: Black/Zeros.
:return: Two values: The PIL image object already padded and cropped,
and the resize scale used.
"""
width, height = image.size
width_scale = width / self.width
height_scale = height / self.height
scale = 1.0 / max(width_scale, height_scale)
image = image.resize(
(round(width * scale), round(height * scale)),
resample=Image.BILINEAR)
pad = Image.new("RGB", (self.width, self.height))
pad.paste(pad_color, [0, 0, self.width, self.height])
pad.paste(image)
return pad, scale
scale = None
image = Image.open(image_path)
image = image.convert(mode='RGB')
if self.preprocessor == "EfficientDet":
# For EfficientNet V2: Resize & Pad with ImageNet mean values
# and keep as [0,255] Normalization
image, scale = resize_pad(image, (124, 116, 104))
image = np.asarray(image, dtype=self.dtype)
# [0-1] Normalization, Mean subtraction and Std Dev scaling are
# part of the EfficientDet graph, so no need to do it during preprocessing here
else:
print("Preprocessing method {} not supported".format(self.preprocessor))
sys.exit(1)
if self.format == "NCHW":
image = np.transpose(image, (2, 0, 1))
return image, scale
def get_batch(self):
"""
Retrieve the batches.
This is a generator object, so you can use it within a loop as:
for batch, images in batcher.get_batch():
...
Or outside of a batch with the next() function.
:return: A generator yielding three items per iteration: a numpy array holding
a batch of images, the list of paths to the images loaded within this batch,
and the list of resize scales for each image in the batch.
"""
for i, batch_images in enumerate(self.batches):
batch_data = np.zeros(self.shape, dtype=self.dtype)
batch_scales = [None] * len(batch_images)
for i, image in enumerate(batch_images):
self.image_index += 1
batch_data[i], batch_scales[i] = self.preprocess_image(image)
self.batch_index += 1
yield batch_data, batch_images, batch_scales
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/exporter/image_batcher.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IVA EfficientDet exporter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/exporter/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientDet ONNX exporter."""
import logging
import os
import tempfile
import numpy as np
import onnx
from onnx import numpy_helper
from onnx import shape_inference
import onnx_graphsurgeon as gs
import tensorflow as tf
from tensorflow.core.framework import tensor_pb2
from tensorflow.python.framework import tensor_util
from tf2onnx import optimizer, tf_loader, tf_utils, tfonnx
from tf2onnx.utils import make_sure
from nvidia_tao_tf1.cv.efficientdet.exporter import onnx_utils # noqa pylint: disable=W0611
logging.basicConfig(level=logging.INFO)
logging.getLogger("EfficientDetGraphSurgeon").setLevel(logging.INFO)
log = logging.getLogger("EfficientDetGraphSurgeon")
def get_tf_tensor_data(tensor):
"""Get data from tensor."""
make_sure(isinstance(tensor, tensor_pb2.TensorProto), "Require TensorProto")
np_data = tensor_util.MakeNdarray(tensor)
make_sure(isinstance(np_data, np.ndarray), "%r isn't ndarray", np_data)
return np_data
def tf_to_onnx_tensor(tensor, name=""):
"""Convert tensorflow tensor to onnx tensor."""
np_data = get_tf_tensor_data(tensor)
if np_data.dtype == np.object:
# assume np_data is string, numpy_helper.from_array accepts ndarray,
# in which each item is of str while the whole dtype is of object.
try:
# Faster but fails on Unicode
# np_data = np_data.astype(np.str).astype(np.object)
if len(np_data.shape) > 0:
np_data = np_data.astype(np.str).astype(np.object)
else:
np_data = np.array(str(np_data)).astype(np.object)
except UnicodeDecodeError:
decode = np.vectorize(lambda x: x.decode('UTF-8'))
np_data = decode(np_data).astype(np.object)
except: # noqa pylint: disable=W0611
raise RuntimeError("Not support type: {}".format(type(np_data.flat[0])))
return numpy_helper.from_array(np_data, name=name)
tf_utils.tf_to_onnx_tensor = tf_to_onnx_tensor
class EfficientDetGraphSurgeon:
"""EfficientDet GraphSurgeon Class."""
def __init__(self, saved_model_path, legacy_plugins=False):
"""
Constructor of the EfficientDet Graph Surgeon object.
:param saved_model_path: The path pointing to the TensorFlow saved model to load.
:param legacy_plugins: If using TensorRT version < 8.0.1,
set this to True to use older (but slower) plugins.
"""
saved_model_path = os.path.realpath(saved_model_path)
assert os.path.exists(saved_model_path)
# Use tf2onnx to convert saved model to an initial ONNX graph.
graph_def, inputs, outputs = tf_loader.from_saved_model(
saved_model_path, None, None, "serve", ["serving_default"])
log.info("Loaded saved model from {}".format(saved_model_path))
with tf.Graph().as_default() as tf_graph:
tf.import_graph_def(graph_def, name="")
with tf_loader.tf_session(graph=tf_graph):
onnx_graph = tfonnx.process_tf_graph(
tf_graph, input_names=inputs, output_names=outputs, opset=13)
onnx_model = optimizer.optimize_graph(onnx_graph).make_model(
"Converted from {}".format(saved_model_path))
self.graph = gs.import_onnx(onnx_model)
assert self.graph
log.info("TF2ONNX graph created successfully")
# Fold constants via ONNX-GS that TF2ONNX may have missed
self.graph.fold_constants()
self.api = "AutoML"
self.batch_size = None
self.legacy_plugins = legacy_plugins
os_handle, self.tmp_onnx_path = tempfile.mkstemp(suffix='.onnx', dir=saved_model_path)
os.close(os_handle)
def infer(self):
"""
Sanitize the graph by cleaning any unconnected nodes.
do a topological resort and fold constant inputs values.
When possible, run shape inference on the ONNX graph to determine tensor shapes.
"""
for _ in range(3):
count_before = len(self.graph.nodes)
self.graph.cleanup().toposort()
try:
for node in self.graph.nodes:
for o in node.outputs:
o.shape = None
model = gs.export_onnx(self.graph)
model = shape_inference.infer_shapes(model)
self.graph = gs.import_onnx(model)
except Exception as e:
log.info("Shape inference could not be performed at this time:\n{}".format(e))
try:
self.graph.fold_constants(fold_shapes=True)
except TypeError as e:
log.error("This version of ONNX GraphSurgeon does not support folding shapes, "
"please upgrade your onnx_graphsurgeon module. Error:\n{}".format(e))
raise
count_after = len(self.graph.nodes)
if count_before == count_after:
# No new folding occurred in this iteration, so we can stop for now.
break
def save(self, output_path=None):
"""
Save the ONNX model to the given location.
:param output_path: Path pointing to the location where to
write out the updated ONNX model.
"""
self.graph.cleanup().toposort()
model = gs.export_onnx(self.graph)
output_path = output_path or self.tmp_onnx_path
onnx.save(model, output_path)
return output_path
def update_preprocessor(self, input_shape):
"""
Update preprocessor.
Remove all the pre-processing nodes in the ONNX graph and leave only
the image normalization essentials.
:param input_shape: The input tensor shape to use for the ONNX graph.
"""
# Update the input and output tensors shape
assert len(input_shape) == 4
for i in range(len(input_shape)):
input_shape[i] = int(input_shape[i])
assert input_shape[i] >= 1
input_format = None
if input_shape[1] == 3:
input_format = "NCHW"
if input_shape[3] == 3:
input_format = "NHWC"
assert input_format in ["NCHW", "NHWC"]
self.batch_size = input_shape[0]
self.graph.inputs[0].shape = input_shape
self.graph.inputs[0].dtype = np.float32
self.graph.inputs[0].name = "input"
self.infer()
log.info("ONNX graph input shape: {} [{} format detected]".format(
self.graph.inputs[0].shape, input_format))
# Find the initial nodes of the graph, whatever the input
# is first connected to, and disconnect them
for node in [node for node in self.graph.nodes if self.graph.inputs[0] in node.inputs]:
node.inputs.clear()
# Convert to NCHW format if needed
input_tensor = self.graph.inputs[0]
if input_format == "NHWC":
input_tensor = self.graph.transpose(
"preprocessor/transpose", input_tensor, [0, 3, 1, 2])
# RGB Normalizers. The per-channel values are given with shape [1, 3, 1, 1]
# for proper NCHW shape broadcasting
scale_val = 1 / np.asarray([255], dtype=np.float32)
mean_val = -1 * np.expand_dims(
np.asarray([0.485, 0.456, 0.406], dtype=np.float32), axis=(0, 2, 3))
stddev_val = 1 / np.expand_dims(
np.asarray([0.224, 0.224, 0.224], dtype=np.float32), axis=(0, 2, 3))
# y = (x * scale + mean) * stddev --> y = x * scale * stddev + mean * stddev
scale_out = self.graph.elt_const(
"Mul", "preprocessor/scale", input_tensor, scale_val * stddev_val)
mean_out = self.graph.elt_const(
"Add", "preprocessor/mean", scale_out, mean_val * stddev_val)
# Find the first stem conv node of the graph, and connect the normalizer directly to it
stem_name = None
if self.api == "AutoML":
stem_name = "stem_conv"
stem = [node for node in self.graph.nodes
if node.op == "Conv" and stem_name in node.name][0]
log.info("Found {} node '{}' as stem entry".format(stem.op, stem.name))
stem.inputs[0] = mean_out[0]
# Reshape nodes tend to update the batch dimension to a fixed value of 1,
# they should use the batch size instead
for node in [node for node in self.graph.nodes if node.op == "Reshape"]:
if type(node.inputs[1]) == gs.Constant and node.inputs[1].values[0] == 1:
node.inputs[1].values[0] = self.batch_size
self.infer()
def update_network(self):
"""
Updates the graph.
To replace certain nodes in the main EfficientDet network:
- the global average pooling nodes are optimized when running for TFOD models.
- the nearest neighbor resize ops in the FPN are replaced by a TRT plugin nodes
when running in legacy mode.
"""
if self.legacy_plugins:
self.infer()
count = 1
for node in [node for node in self.graph.nodes
if node.op == "Resize" and node.attrs['mode'] == "nearest"]:
# Older versions of TensorRT do not understand nearest neighbor resize ops,
# so a plugin is used to perform this operation.
self.graph.plugin(
op="ResizeNearest_TRT",
name="resize_nearest_{}".format(count),
inputs=[node.inputs[0]],
outputs=node.outputs,
attrs={
'plugin_version': "1",
# All resize ops in the EfficientDet FPN should have
# an upscale factor of 2.0
'scale': 2.0,
})
node.outputs.clear()
log.info(
"Replaced '{}' ({}) with a ResizeNearest_TRT plugin node".format(
node.name, count))
count += 1
def update_nms(self, threshold=None, detections=None):
"""
Updates the graph to replace the NMS op by BatchedNMS_TRT TensorRT plugin node.
:param threshold: Override the score threshold attribute. If set to None,
use the value in the graph.
:param detections: Override the max detections attribute. If set to None,
use the value in the graph.
"""
def find_head_concat(name_scope):
# This will find the concatenation node at the end of either Class Net or Box Net.
# These concatenation nodes bring together prediction data for each of 5 scales.
# The concatenated Class Net node will have shape
# [batch_size, num_anchors, num_classes],
# and the concatenated Box Net node has the shape [batch_size, num_anchors, 4].
# These concatenation nodes can be be found by searching for all Concat's
# and checking if the node two steps above in the graph has a name that begins with
# either "box_net/..." or "class_net/...".
for node in [node for node in self.graph.nodes
if node.op == "Transpose" and name_scope in node.name]:
concat = self.graph.find_descendant_by_op(node, "Concat")
assert concat and len(concat.inputs) == 5
log.info("Found {} node '{}' as the tip of {}".format(
concat.op, concat.name, name_scope))
return concat
def extract_anchors_tensor(split):
# This will find the anchors that have been hardcoded somewhere within the ONNX graph.
# The function will return a gs.Constant that can be directly used as
# an input to the NMS plugin.
# The anchor tensor shape will be [1, num_anchors, 4].
# Note that '1' is kept as first dim, regardless of batch size,
# as it's not necessary to replicate the anchors for all images in the batch.
# The anchors are available (one per coordinate) hardcoded as constants
# within certain box decoder nodes.
# Each of these four constants have shape [1, num_anchors], so some numpy operations
# are used to expand the dims and concatenate them as needed.
# These constants can be found by starting from the Box Net's split operation,
# and for each coordinate, walking down in the graph until either an Add or
# Mul node is found. The second input on this nodes will be the anchor data required.
def get_anchor_np(output_idx, op):
node = self.graph.find_descendant_by_op(split.o(0, output_idx), op)
assert node
val = np.squeeze(node.inputs[1].values)
return np.expand_dims(val.flatten(), axis=(0, 2))
anchors_y = get_anchor_np(0, "Add")
anchors_x = get_anchor_np(1, "Add")
anchors_h = get_anchor_np(2, "Mul")
anchors_w = get_anchor_np(3, "Mul")
anchors = np.concatenate([anchors_y, anchors_x, anchors_h, anchors_w], axis=2)
return gs.Constant(name="nms/anchors:0", values=anchors)
self.infer()
head_names = []
if self.api == "AutoML":
head_names = ["class-predict", "box-predict"]
# There are five nodes at the bottom of the graph that provide important connection points:
# 1. Find the concat node at the end of the class net (multi-scale class predictor)
class_net = find_head_concat(head_names[0])
class_net_tensor = class_net.outputs[0]
# 2. Find the concat node at the end of the box net (multi-scale localization predictor)
box_net = find_head_concat(head_names[1])
box_net_tensor = box_net.outputs[0]
# 3. Find the split node that separates the box net coordinates
# and feeds them into the box decoder.
box_net_split = self.graph.find_descendant_by_op(box_net, "Split")
assert box_net_split and len(box_net_split.outputs) == 4
# 4. Find the concat node at the end of the box decoder.
box_decoder = self.graph.find_descendant_by_op(box_net_split, "Concat")
assert box_decoder and len(box_decoder.inputs) == 4
box_decoder_tensor = box_decoder.outputs[0]
# 5. Find the NMS node.
nms_node = self.graph.find_node_by_op("NonMaxSuppression")
# Extract NMS Configuration
num_detections = int(nms_node.inputs[2].values) if detections is None else detections
iou_threshold = float(nms_node.inputs[3].values)
score_threshold = float(nms_node.inputs[4].values) if threshold is None else threshold
num_classes = class_net.i().inputs[1].values[-1]
normalized = False
# NMS Inputs and Attributes
# NMS expects these shapes for its input tensors:
# box_net: [batch_size, number_boxes, 4]
# class_net: [batch_size, number_boxes, number_classes]
# anchors: [1, number_boxes, 4] (if used)
nms_op = None
nms_attrs = None
nms_inputs = None
if not self.legacy_plugins:
# EfficientNMS TensorRT Plugin
# Fusing the decoder will always be faster, so this is
# the default NMS method supported. In this case,
# three inputs are given to the NMS TensorRT node:
# - The box predictions (from the Box Net node found above)
# - The class predictions (from the Class Net node found above)
# - The default anchor coordinates (from the extracted anchor constants)
# As the original tensors from EfficientDet will be used,
# the NMS code type is set to 1 (Center+Size),
# because this is the internal box coding format used by the network.
anchors_tensor = extract_anchors_tensor(box_net_split)
nms_inputs = [box_net_tensor, class_net_tensor, anchors_tensor]
nms_op = "EfficientNMS_TRT"
nms_attrs = {
'plugin_version': "1",
'background_class': -1,
'max_output_boxes': num_detections,
# Keep threshold to at least 0.01 for better efficiency
'score_threshold': max(0.01, score_threshold),
'iou_threshold': iou_threshold,
'score_activation': True,
'box_coding': 1,
}
nms_output_classes_dtype = np.int32
else:
# BatchedNMS TensorRT Plugin
# Alternatively, the ONNX box decoder can be used. This will be slower,
# as more element-wise and non-fused
# operations will need to be performed by TensorRT. However,
# it's easier to implement, so it is shown here
# for reference. In this case, only two inputs are given to the NMS TensorRT node:
# - The box predictions (already decoded through the ONNX Box Decoder node)
# - The class predictions (from the Class Net node found above,
# but also needs to pass through a sigmoid)
# This time, the box predictions will have the coordinate coding from
# the ONNX box decoder, which matches what the BatchedNMS plugin uses.
if self.api == "AutoML":
# The default boxes tensor has shape [batch_size, number_boxes, 4].
# This will insert a "1" dimension
# in the second axis, to become [batch_size, number_boxes, 1, 4],
# the shape that BatchedNMS expects.
box_decoder_tensor = self.graph.unsqueeze(
"nms/box_net_reshape", box_decoder_tensor, axes=[2])[0]
# BatchedNMS also expects the classes tensor to be already activated,
# in the case of EfficientDet, this is through a Sigmoid op.
class_net_tensor = self.graph.sigmoid("nms/class_net_sigmoid", class_net_tensor)[0]
nms_inputs = [box_decoder_tensor, class_net_tensor]
nms_op = "BatchedNMS_TRT"
nms_attrs = {
'plugin_version': "1",
'shareLocation': True,
'backgroundLabelId': -1,
'numClasses': num_classes,
'topK': 1024,
'keepTopK': num_detections,
'scoreThreshold': score_threshold,
'iouThreshold': iou_threshold,
'isNormalized': normalized,
'clipBoxes': False,
# 'scoreBits': 10,
}
nms_output_classes_dtype = np.float32
# NMS Outputs
nms_output_num_detections = gs.Variable(
name="num_detections", dtype=np.int32, shape=[self.batch_size, 1])
nms_output_boxes = gs.Variable(name="detection_boxes", dtype=np.float32,
shape=[self.batch_size, num_detections, 4])
nms_output_scores = gs.Variable(name="detection_scores", dtype=np.float32,
shape=[self.batch_size, num_detections])
nms_output_classes = gs.Variable(name="detection_classes", dtype=nms_output_classes_dtype,
shape=[self.batch_size, num_detections])
nms_outputs = [
nms_output_num_detections,
nms_output_boxes,
nms_output_scores,
nms_output_classes]
# Create the NMS Plugin node with the selected inputs.
# The outputs of the node will also become the final outputs of the graph.
self.graph.plugin(
op=nms_op,
name="nms/non_maximum_suppression",
inputs=nms_inputs,
outputs=nms_outputs,
attrs=nms_attrs)
log.info("Created NMS plugin '{}' with attributes: {}".format(nms_op, nms_attrs))
self.graph.outputs = nms_outputs
self.infer()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/exporter/onnx_exporter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientDet TensorRT engine builder."""
import logging
import os
import sys
import numpy as np
import pycuda.autoinit # noqa pylint: disable=unused-import
import pycuda.driver as cuda
import tensorrt as trt
from nvidia_tao_tf1.cv.efficientdet.exporter.image_batcher import ImageBatcher
logging.basicConfig(level=logging.INFO)
logging.getLogger("EngineBuilder").setLevel(logging.INFO)
log = logging.getLogger("EngineBuilder")
class EngineCalibrator(trt.IInt8EntropyCalibrator2):
"""Implements the INT8 Entropy Calibrator2."""
def __init__(self, cache_file):
"""Init.
:param cache_file: The location of the cache file.
"""
super().__init__()
self.cache_file = cache_file
self.image_batcher = None
self.batch_allocation = None
self.batch_generator = None
def set_image_batcher(self, image_batcher: ImageBatcher):
"""
Define the image batcher to use, if any.
If using only the cache file,
an image batcher doesn't need to be defined.
:param image_batcher: The ImageBatcher object
"""
self.image_batcher = image_batcher
size = int(np.dtype(self.image_batcher.dtype).itemsize * np.prod(self.image_batcher.shape))
self.batch_allocation = cuda.mem_alloc(size)
self.batch_generator = self.image_batcher.get_batch()
def get_batch_size(self):
"""
Overrides from trt.IInt8EntropyCalibrator2.
Get the batch size to use for calibration.
:return: Batch size.
"""
if self.image_batcher:
return self.image_batcher.batch_size
return 1
def get_batch(self, names):
"""
Overrides from trt.IInt8EntropyCalibrator2.
Get the next batch to use for calibration, as a list of device memory pointers.
:param names: The names of the inputs, if useful to define the order of inputs.
:return: A list of int-casted memory pointers.
"""
if not self.image_batcher:
return None
try:
batch, _, _ = next(self.batch_generator)
log.info("Calibrating image {} / {}".format(
self.image_batcher.image_index, self.image_batcher.num_images))
cuda.memcpy_htod(self.batch_allocation, np.ascontiguousarray(batch))
return [int(self.batch_allocation)]
except StopIteration:
log.info("Finished calibration batches")
return None
def read_calibration_cache(self):
"""
Overrides from trt.IInt8EntropyCalibrator2.
Read the calibration cache file stored on disk, if it exists.
:return: The contents of the cache file, if any.
"""
if os.path.exists(self.cache_file):
with open(self.cache_file, "rb") as f:
log.info("Using calibration cache file: {}".format(self.cache_file))
return f.read()
return None
def write_calibration_cache(self, cache):
"""
Overrides from trt.IInt8EntropyCalibrator2.
Store the calibration cache to a file on disk.
:param cache: The contents of the calibration cache to store.
"""
with open(self.cache_file, "wb") as f:
log.info("Writing calibration cache data to: {}".format(self.cache_file))
f.write(cache)
class EngineBuilder:
"""Parses an ONNX graph and builds a TensorRT engine from it."""
def __init__(self, verbose=False, workspace=8):
"""Init.
:param verbose: If enabled, a higher verbosity level will be set on the TensorRT logger.
:param workspace: Max memory workspace to allow, in Gb.
"""
self.trt_logger = trt.Logger(trt.Logger.INFO)
if verbose:
self.trt_logger.min_severity = trt.Logger.Severity.VERBOSE
trt.init_libnvinfer_plugins(self.trt_logger, namespace="")
self.builder = trt.Builder(self.trt_logger)
self.config = self.builder.create_builder_config()
self.config.max_workspace_size = workspace * (2 ** 30)
self.batch_size = None
self.network = None
self.parser = None
def create_network(self, onnx_path):
"""
Parse the ONNX graph and create the corresponding TensorRT network definition.
:param onnx_path: The path to the ONNX graph to load.
"""
network_flags = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
self.network = self.builder.create_network(network_flags)
self.parser = trt.OnnxParser(self.network, self.trt_logger)
onnx_path = os.path.realpath(onnx_path)
with open(onnx_path, "rb") as f:
if not self.parser.parse(f.read()):
log.error("Failed to load ONNX file: {}".format(onnx_path))
for error in range(self.parser.num_errors):
log.error(self.parser.get_error(error))
sys.exit(1)
inputs = [self.network.get_input(i) for i in range(self.network.num_inputs)]
outputs = [self.network.get_output(i) for i in range(self.network.num_outputs)]
log.info("Network Description")
for input in inputs: # noqa pylint: disable=W0622
self.batch_size = input.shape[0]
log.info("Input '{}' with shape {} and dtype {}".format(
input.name, input.shape, input.dtype))
for output in outputs:
log.info("Output '{}' with shape {} and dtype {}".format(
output.name, output.shape, output.dtype))
assert self.batch_size > 0
self.builder.max_batch_size = self.batch_size
def create_engine(self, engine_path, precision,
calib_input=None, calib_cache=None, calib_num_images=5000,
calib_batch_size=8):
"""
Build the TensorRT engine and serialize it to disk.
:param engine_path: The path where to serialize the engine to.
:param precision: The datatype to use for the engine, either 'fp32', 'fp16' or 'int8'.
:param calib_input: The path to a directory holding the calibration images.
:param calib_cache: The path where to write the calibration cache to,
or if it already exists, load it from.
:param calib_num_images: The maximum number of images to use for calibration.
:param calib_batch_size: The batch size to use for the calibration process.
"""
engine_path = os.path.realpath(engine_path)
engine_dir = os.path.dirname(engine_path)
os.makedirs(engine_dir, exist_ok=True)
log.debug("Building {} Engine in {}".format(precision, engine_path))
inputs = [self.network.get_input(i) for i in range(self.network.num_inputs)]
if precision == "fp16":
if not self.builder.platform_has_fast_fp16:
log.warning("FP16 is not supported natively on this platform/device")
else:
self.config.set_flag(trt.BuilderFlag.FP16)
elif precision == "int8":
if not self.builder.platform_has_fast_int8:
log.warning("INT8 is not supported natively on this platform/device")
else:
if self.builder.platform_has_fast_fp16:
# Also enable fp16, as some layers may be even more efficient in fp16 than int8
self.config.set_flag(trt.BuilderFlag.FP16)
self.config.set_flag(trt.BuilderFlag.INT8)
self.config.int8_calibrator = EngineCalibrator(calib_cache)
if not os.path.exists(calib_cache):
calib_shape = [calib_batch_size] + list(inputs[0].shape[1:])
calib_dtype = trt.nptype(inputs[0].dtype)
self.config.int8_calibrator.set_image_batcher(
ImageBatcher(calib_input, calib_shape, calib_dtype,
max_num_images=calib_num_images,
exact_batches=True))
with self.builder.build_engine(self.network, self.config) as engine, \
open(engine_path, "wb") as f:
log.debug("Serializing engine to file: {:}".format(engine_path))
f.write(engine.serialize())
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/exporter/trt_builder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ONNX utils."""
import logging
import onnx_graphsurgeon as gs
logging.basicConfig(level=logging.INFO)
logging.getLogger("EfficientDetHelper").setLevel(logging.INFO)
log = logging.getLogger("EfficientDetHelper")
@gs.Graph.register()
def elt_const(self, op, name, input, value): # noqa pylint: disable=W0622
"""
Element-wise operation.
Add an element-wise operation to the graph which will operate
on the input tensor with the value(s) given.
:param op: The ONNX operation to perform, i.e. "Add" or "Mul".
:param input: The tensor to operate on.
:param value: The value array to operate with.
:param name: The name to use for the node.
"""
input_tensor = input if type(input) is gs.Variable else input[0]
log.debug("Created {} node '{}': {}".format(op, name, value.squeeze()))
const = gs.Constant(name="{}_value:0".format(name), values=value)
return self.layer(
name=name, op=op,
inputs=[input_tensor, const], outputs=[name + ":0"])
@gs.Graph.register()
def unsqueeze(self, name, input, axes=None): # noqa pylint: disable=W0622
"""
Adds to the graph an Unsqueeze node for the given axes and to the given input.
:param self: The gs.Graph object being extended.
:param name: The name to use for the node.
:param input: The tensor to be "unsqueezed".
:param axes: A list of axes on which to add the new dimension(s).
:return: The first output tensor, to allow chained graph construction.
"""
input_tensor = input if type(input) is gs.Variable else input[0]
log.debug("Created Unsqueeze node '{}': {}".format(name, axes))
return self.layer(
name=name, op="Unsqueeze",
inputs=[input_tensor], outputs=[name + ":0"], attrs={'axes': axes})
@gs.Graph.register()
def transpose(self, name, input, perm): # noqa pylint: disable=W0622
"""
Adds to the graph a Transpose node for the given axes permutation and to the given input.
:param self: The gs.Graph object being extended.
:param name: The name to use for the node.
:param input: The tensor to be transposed.
:param perm: A list of axes defining their order after transposing occurs.
:return: The first output tensor, to allow chained graph construction.
"""
input_tensor = input if type(input) is gs.Variable else input[0]
log.debug("Created Transpose node '{}': {}".format(name, perm))
return self.layer(
name=name, op="Transpose",
inputs=[input_tensor], outputs=[name + ":0"], attrs={'perm': perm})
@gs.Graph.register()
def sigmoid(self, name, input): # noqa pylint: disable=W0622
"""
Adds to the graph a Sigmoid node for the given input.
:param self: The gs.Graph object being extended.
:param name: The name to use for the node.
:param input: The tensor to be applied to.
:return: The first output tensor, to allow chained graph construction.
"""
input_tensor = input if type(input) is gs.Variable else input[0]
log.debug("Created Sigmoid node '{}'".format(name))
return self.layer(
name=name, op="Sigmoid",
inputs=[input_tensor], outputs=[name + ":0"])
@gs.Graph.register()
def plugin(self, op, name, inputs, outputs, attrs): # noqa pylint: disable=W0622
"""
Adds to the graph a TensorRT plugin node with the given name, inputs and outputs.
The attrs dictionary holds attributes to be added to the plugin node.
:param self: The gs.Graph object being extended.
:param op: The registered name for the TensorRT plugin.
:param name: The name to use for the node.
:param inputs: The list of tensors to use an inputs.
:param outputs: The list of tensors to use as outputs.
:param attrs: The dictionary to use as attributes.
:return: The first output tensor, to allow chained graph construction.
"""
input_tensors = inputs if type(inputs) is list else [inputs]
log.debug("Created TRT Plugin node '{}': {}".format(name, attrs))
return self.layer(
op=op, name=name,
inputs=input_tensors, outputs=outputs, attrs=attrs)
@gs.Graph.register()
def find_node_by_op(self, op):
"""
Finds the first node in the graph with the given operation name.
:param self: The gs.Graph object being extended.
:param op: The operation name to search for.
:return: The first node matching that performs that op.
"""
for node in self.nodes:
if node.op == op:
return node
return None
@gs.Graph.register()
def find_descendant_by_op(self, node, op, depth=10):
"""
Find lower node by matching op name.
Starting from the given node, finds a node lower in the graph
matching the given operation name. This is not an
exhaustive graph search, it will take only the first output of
each node traversed while searching depth-first.
:param self: The gs.Graph object being extended.
:param node: The node to start searching from.
:param op: The operation name to search for.
:param depth: Stop searching after traversing these many nodes.
:return: The first descendant node matching that performs that op.
"""
for _ in range(depth):
node = node.o()
if node.op == op:
return node
return None
@gs.Graph.register()
def find_ancestor_by_op(self, node, op, depth=10):
"""
Find higher node by matching op name.
Starting from the given node, finds a node higher in the graph
matching the given operation name. This is not an
exhaustive graph search, it will take only the first input of
each node traversed while searching depth-first.
:param self: The gs.Graph object being extended.
:param node: The node to start searching from.
:param op: The operation name to search for.
:param depth: Stop searching after traversing these many nodes.
:return: The first ancestor node matching that performs that op.
"""
for _ in range(depth):
node = node.i()
if node.op == op:
return node
return None
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/exporter/onnx_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IVA EfficientDet inferencer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/inferencer/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Standalone TensorRT inference."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import numpy as np
import pycuda.autoinit # noqa pylint: disable=unused-import
import pycuda.driver as cuda
import tensorrt as trt
from nvidia_tao_tf1.cv.efficientdet.exporter.image_batcher import ImageBatcher
from nvidia_tao_tf1.cv.efficientdet.visualize.vis_utils import visualize_detections
class TensorRTInfer:
"""Implements inference for the EfficientDet TensorRT engine."""
def __init__(self, engine_path):
"""Init.
:param engine_path: The path to the serialized engine to load from disk.
"""
# Load TRT engine
self.logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(self.logger, namespace="")
with open(engine_path, "rb") as f, trt.Runtime(self.logger) as runtime:
self.engine = runtime.deserialize_cuda_engine(f.read())
self.context = self.engine.create_execution_context()
assert self.engine
assert self.context
# Setup I/O bindings
self.inputs = []
self.outputs = []
self.allocations = []
for i in range(self.engine.num_bindings):
is_input = False
if self.engine.binding_is_input(i):
is_input = True
name = self.engine.get_binding_name(i)
dtype = self.engine.get_binding_dtype(i)
shape = self.engine.get_binding_shape(i)
if is_input:
self.batch_size = shape[0]
size = np.dtype(trt.nptype(dtype)).itemsize
for s in shape:
size *= s
allocation = cuda.mem_alloc(size)
binding = {
'index': i,
'name': name,
'dtype': np.dtype(trt.nptype(dtype)),
'shape': list(shape),
'allocation': allocation,
}
self.allocations.append(allocation)
if self.engine.binding_is_input(i):
self.inputs.append(binding)
else:
self.outputs.append(binding)
assert self.batch_size > 0
assert len(self.inputs) > 0
assert len(self.outputs) > 0
assert len(self.allocations) > 0
def input_spec(self):
"""
Get the specs for the input tensor of the network. Useful to prepare memory allocations.
:return: Two items, the shape of the input tensor and its (numpy) datatype.
"""
return self.inputs[0]['shape'], self.inputs[0]['dtype']
def output_spec(self):
"""
Get the specs for the output tensors of the network. Useful to prepare memory allocations.
:return: A list with two items per element,
the shape and (numpy) datatype of each output tensor.
"""
specs = []
for o in self.outputs:
specs.append((o['shape'], o['dtype']))
return specs
def infer(self, batch, scales=None, nms_threshold=None):
"""
Execute inference on a batch of images.
The images should already be batched and preprocessed, as prepared by
the ImageBatcher class. Memory copying to and from the GPU device will be performed here.
:param batch: A numpy array holding the image batch.
:param scales: The image resize scales for each image in this batch.
Default: No scale postprocessing applied.
:return: A nested list for each image in the batch and each detection in the list.
"""
# Prepare the output data
outputs = []
for shape, dtype in self.output_spec():
outputs.append(np.zeros(shape, dtype))
# Process I/O and execute the network
cuda.memcpy_htod(self.inputs[0]['allocation'], np.ascontiguousarray(batch))
self.context.execute_v2(self.allocations)
for o in range(len(outputs)):
cuda.memcpy_dtoh(outputs[o], self.outputs[o]['allocation'])
# Process the results
nums = outputs[0]
boxes = outputs[1]
scores = outputs[2]
classes = outputs[3]
detections = []
normalized = (np.max(boxes) < 2.0)
for i in range(self.batch_size):
detections.append([])
for n in range(int(nums[i])):
scale = self.inputs[0]['shape'][2] if normalized else 1.0
if scales and i < len(scales):
scale /= scales[i]
if nms_threshold and scores[i][n] < nms_threshold:
continue
detections[i].append({
'ymin': boxes[i][n][0] * scale,
'xmin': boxes[i][n][1] * scale,
'ymax': boxes[i][n][2] * scale,
'xmax': boxes[i][n][3] * scale,
'score': scores[i][n],
'class': int(classes[i][n]),
})
return detections
def __del__(self):
"""Simple function to destroy tensorrt handlers."""
if self.context:
del self.context
if self.engine:
del self.engine
if self.allocations:
self.allocations.clear()
def main(args=None):
"""EfficientDet TRT inference."""
args = parse_command_line_arguments(args)
labels = []
if args.label_map:
with open(args.label_map) as f:
for _, label in enumerate(f):
labels.append(label.strip())
inference(args, labels)
def inference(args, label_id_mapping, score_threshold):
"""Run trt inference."""
output_dir = os.path.join(args.results_dir, "images_annotated")
out_label_path = os.path.join(args.results_dir, "labels")
os.makedirs(output_dir, exist_ok=True)
os.makedirs(out_label_path, exist_ok=True)
labels = [i[1] for i in sorted(label_id_mapping.items(), key=lambda x: x[0])]
trt_infer = TensorRTInfer(args.model_path)
batcher = ImageBatcher(args.image_dir, *trt_infer.input_spec())
for batch, images, scales in batcher.get_batch():
print("Processing Image {} / {}".format(batcher.image_index, batcher.num_images), end="\r")
detections = trt_infer.infer(batch, scales, score_threshold)
for i in range(len(images)):
basename = os.path.splitext(os.path.basename(images[i]))[0]
# Image Visualizations
output_path = os.path.join(output_dir, "{}.png".format(basename))
visualize_detections(images[i], output_path, detections[i], labels)
if out_label_path:
os.makedirs(out_label_path, exist_ok=True)
assert label_id_mapping, "Label mapping must be valid to generate KIITI labels."
# Generate KITTI labels
kitti_txt = ""
for d in detections[i]:
kitti_txt += label_id_mapping[int(d['class'])+1] + ' 0 0 0 ' + ' '.join(
[str(d['xmin']), str(d['ymin']), str(d['xmax']), str(d['ymax'])]) + \
' 0 0 0 0 0 0 0 ' + str(d['score']) + '\n'
with open(os.path.join(out_label_path, "{}.txt".format(basename)), "w") as f:
f.write(kitti_txt)
print("Finished Processing")
def build_command_line_parser(parser=None):
"""Build the command line parser using argparse.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism.
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(prog='export', description='Export an EfficientDet model.')
parser.add_argument(
'-i',
'--in_image_path',
type=str,
required=True,
help='Input image or input directory of images')
parser.add_argument(
'-m',
'--model_path',
type=str,
required=True,
help='Path to the EfficientDet TensorRT engine.'
)
parser.add_argument(
'-o',
'--out_image_path',
type=str,
required=True,
help='Directory where the annotated images are saved.'
)
parser.add_argument(
'-l',
'--out_label_path',
type=str,
required=False,
help='Directory where the predicted labels are saved.'
)
parser.add_argument(
'--label_map',
type=str,
required=True,
help='Label file for all the classes.'
)
parser.add_argument(
"-t", "--threshold", type=float,
help="Override the score threshold for the NMS operation, "
"if higher than the threshold in the engine.")
return parser
def parse_command_line_arguments(args=None):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser(args)
return parser.parse_args(args)
if __name__ == '__main__':
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/inferencer/inference_trt.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Inference related utilities."""
from __future__ import absolute_import
from __future__ import division
# gtype import
from __future__ import print_function
import copy
import functools
import os
import time
from typing import Any, Dict, List, Text, Tuple, Union
from absl import logging
import numpy as np
from PIL import Image
import tensorflow as tf
from tensorflow.python.client import timeline # pylint: disable=g-direct-tensorflow-import
import yaml
from nvidia_tao_tf1.cv.efficientdet.dataloader import dataloader
from nvidia_tao_tf1.cv.efficientdet.models import anchors
from nvidia_tao_tf1.cv.efficientdet.models import det_model_fn
from nvidia_tao_tf1.cv.efficientdet.utils import hparams_config
from nvidia_tao_tf1.cv.efficientdet.utils import utils
from nvidia_tao_tf1.cv.efficientdet.visualize import vis_utils
coco_id_mapping = {
1: 'person', 2: 'bicycle', 3: 'car', 4: 'motorcycle', 5: 'airplane',
6: 'bus', 7: 'train', 8: 'truck', 9: 'boat', 10: 'traffic light',
11: 'fire hydrant', 13: 'stop sign', 14: 'parking meter', 15: 'bench',
16: 'bird', 17: 'cat', 18: 'dog', 19: 'horse', 20: 'sheep', 21: 'cow',
22: 'elephant', 23: 'bear', 24: 'zebra', 25: 'giraffe', 27: 'backpack',
28: 'umbrella', 31: 'handbag', 32: 'tie', 33: 'suitcase', 34: 'frisbee',
35: 'skis', 36: 'snowboard', 37: 'sports ball', 38: 'kite',
39: 'baseball bat', 40: 'baseball glove', 41: 'skateboard', 42: 'surfboard',
43: 'tennis racket', 44: 'bottle', 46: 'wine glass', 47: 'cup', 48: 'fork',
49: 'knife', 50: 'spoon', 51: 'bowl', 52: 'banana', 53: 'apple',
54: 'sandwich', 55: 'orange', 56: 'broccoli', 57: 'carrot', 58: 'hot dog',
59: 'pizza', 60: 'donut', 61: 'cake', 62: 'chair', 63: 'couch',
64: 'potted plant', 65: 'bed', 67: 'dining table', 70: 'toilet', 72: 'tv',
73: 'laptop', 74: 'mouse', 75: 'remote', 76: 'keyboard', 77: 'cell phone',
78: 'microwave', 79: 'oven', 80: 'toaster', 81: 'sink', 82: 'refrigerator',
84: 'book', 85: 'clock', 86: 'vase', 87: 'scissors', 88: 'teddy bear',
89: 'hair drier', 90: 'toothbrush',
} # pyformat: disable
def image_preprocess(image, image_size: Union[int, Tuple[int, int]]):
"""Preprocess image for inference.
Args:
image: input image, can be a tensor or a numpy arary.
image_size: single integer of image size for square image or tuple of two
integers, in the format of (image_height, image_width).
Returns:
(image, scale): a tuple of processed image and its scale.
"""
input_processor = dataloader.DetectionInputProcessor(image, image_size)
input_processor.normalize_image()
input_processor.set_scale_factors_to_output_size()
image = input_processor.resize_and_crop_image()
image_scale = input_processor.image_scale_to_original
return image, image_scale
def batch_image_files_decode(image_files):
"""Decode batch of images."""
def decode(image_file):
image = tf.io.decode_image(image_file)
image.set_shape([None, None, None])
return image
raw_images = tf.map_fn(decode, image_files, dtype=tf.uint8)
return tf.stack(raw_images)
def batch_image_preprocess(raw_images,
image_size: Union[int, Tuple[int, int]],
batch_size: int = None):
"""Preprocess batched images for inference.
Args:
raw_images: a list of images, each image can be a tensor or a numpy arary.
image_size: single integer of image size for square image or tuple of two
integers, in the format of (image_height, image_width).
batch_size: if None, use map_fn to deal with dynamic batch size.
Returns:
(image, scale): a tuple of processed images and scales.
"""
if not batch_size:
# map_fn is a little bit slower due to some extra overhead.
map_fn = functools.partial(image_preprocess, image_size=image_size)
images, scales = tf.map_fn(
map_fn, raw_images, dtype=(tf.float32, tf.float32), back_prop=False)
return (images, scales)
# If batch size is known, use a simple loop.
scales, images = [], []
for i in range(batch_size):
image, scale = image_preprocess(raw_images[i], image_size)
scales.append(scale)
images.append(image)
images = tf.stack(images)
scales = tf.stack(scales)
return (images, scales)
def build_inputs(image_path_pattern: Text, image_size: Union[int, Tuple[int, int]]):
"""Read and preprocess input images.
Args:
image_path_pattern: a path to indicate a single or multiple files.
image_size: single integer of image size for square image or tuple of two
integers, in the format of (image_height, image_width).
Returns:
(raw_images, images, scales): raw images, processed images, and scales.
Raises:
ValueError if image_path_pattern doesn't match any file.
"""
raw_images, images, scales, fnames = [], [], [], []
for fname in tf.io.gfile.glob(image_path_pattern):
image = Image.open(fname).convert('RGB')
raw_images.append(image)
image, scale = image_preprocess(image, image_size)
images.append(image)
scales.append(scale)
fnames.append(fname)
if not images:
raise ValueError(
'Cannot find any images for pattern {}'.format(image_path_pattern))
return raw_images, tf.stack(images), tf.stack(scales), fnames
def build_model(model_name: Text, inputs: tf.Tensor, **kwargs):
"""Build model for a given model name.
Args:
model_name: the name of the model.
inputs: an image tensor or a numpy array.
**kwargs: extra parameters for model builder.
Returns:
(cls_outputs, box_outputs): the outputs for class and box predictions.
Each is a dictionary with key as feature level and value as predictions.
"""
model_arch = det_model_fn.get_model_arch(model_name)
cls_outputs, box_outputs = model_arch(inputs, model_name=model_name, config=kwargs)
if kwargs.get('precision', None):
# Post-processing has multiple places with hard-coded float32.
# TODO(tanmingxing): Remove them once post-process can adpat to dtypes.
cls_outputs = {k: tf.cast(v, tf.float32) for k, v in cls_outputs.items()}
box_outputs = {k: tf.cast(v, tf.float32) for k, v in box_outputs.items()}
return cls_outputs, box_outputs
def restore_ckpt(sess, ckpt_path, ema_decay=0.9998, export_ckpt=None):
"""Restore variables from a given checkpoint.
Args:
sess: a tf session for restoring or exporting models.
ckpt_path: the path of the checkpoint. Can be a file path or a folder path.
ema_decay: ema decay rate. If None or zero or negative value, disable ema.
export_ckpt: whether to export the restored model.
"""
sess.run(tf.global_variables_initializer())
if tf.io.gfile.isdir(ckpt_path):
ckpt_path = tf.train.latest_checkpoint(ckpt_path)
if ema_decay > 0:
ema = tf.train.ExponentialMovingAverage(decay=0.0)
ema_vars = utils.get_ema_vars()
var_dict = ema.variables_to_restore(ema_vars)
ema_assign_op = ema.apply(ema_vars)
else:
var_dict = utils.get_ema_vars()
ema_assign_op = None
tf.train.get_or_create_global_step()
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(var_dict, max_to_keep=1)
if ckpt_path == '_':
logging.info('Running test: do not load any ckpt.')
return
# Restore all variables from ckpt.
saver.restore(sess, ckpt_path)
if export_ckpt:
print('export model to {}'.format(export_ckpt))
if ema_assign_op is not None:
sess.run(ema_assign_op)
saver = tf.train.Saver(max_to_keep=1, save_relative_paths=True)
saver.save(sess, export_ckpt)
def det_post_process_combined(params, cls_outputs, box_outputs, scales,
min_score_thresh, max_boxes_to_draw):
"""A combined version of det_post_process with dynamic batch size support."""
batch_size = tf.shape(list(cls_outputs.values())[0])[0]
cls_outputs_all = []
box_outputs_all = []
# Concatenates class and box of all levels into one tensor.
for level in range(params['min_level'], params['max_level'] + 1):
if params['data_format'] == 'channels_first':
cls_outputs[level] = tf.transpose(cls_outputs[level], [0, 2, 3, 1])
box_outputs[level] = tf.transpose(box_outputs[level], [0, 2, 3, 1])
cls_outputs_all.append(
tf.reshape(cls_outputs[level], [batch_size, -1, params['num_classes']]))
box_outputs_all.append(tf.reshape(box_outputs[level], [batch_size, -1, 4]))
cls_outputs_all = tf.concat(cls_outputs_all, 1)
box_outputs_all = tf.concat(box_outputs_all, 1)
# Create anchor_label for picking top-k predictions.
eval_anchors = anchors.Anchors(params['min_level'], params['max_level'],
params['num_scales'], params['aspect_ratios'],
params['anchor_scale'], params['image_size'])
anchor_boxes = eval_anchors.boxes
scores = tf.math.sigmoid(cls_outputs_all)
# apply bounding box regression to anchors
boxes = anchors.decode_box_outputs_tf(box_outputs_all, anchor_boxes)
boxes = tf.expand_dims(boxes, axis=2)
scales = tf.expand_dims(scales, axis=-1)
nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections = (
tf.image.combined_non_max_suppression(
boxes,
scores,
max_boxes_to_draw,
max_boxes_to_draw,
score_threshold=min_score_thresh,
clip_boxes=False))
del valid_detections # to be used in future.
image_ids = tf.cast(
tf.tile(
tf.expand_dims(tf.range(batch_size), axis=1), [1, max_boxes_to_draw]),
dtype=tf.float32)
image_size = utils.parse_image_size(params['image_size'])
ymin = tf.clip_by_value(nmsed_boxes[..., 0], 0, image_size[0]) * scales
xmin = tf.clip_by_value(nmsed_boxes[..., 1], 0, image_size[1]) * scales
ymax = tf.clip_by_value(nmsed_boxes[..., 2], 0, image_size[0]) * scales
xmax = tf.clip_by_value(nmsed_boxes[..., 3], 0, image_size[1]) * scales
classes = tf.cast(nmsed_classes + 1, tf.float32)
detection_list = [image_ids, ymin, xmin, ymax, xmax, nmsed_scores, classes]
detections = tf.stack(detection_list, axis=2, name='detections')
return detections
def det_post_process(params: Dict[Any, Any], cls_outputs: Dict[int, tf.Tensor],
box_outputs: Dict[int, tf.Tensor], scales: List[float],
min_score_thresh, max_boxes_to_draw):
"""Post preprocessing the box/class predictions.
Args:
params: a parameter dictionary that includes `min_level`, `max_level`,
`batch_size`, and `num_classes`.
cls_outputs: an OrderDict with keys representing levels and values
representing logits in [batch_size, height, width, num_anchors].
box_outputs: an OrderDict with keys representing levels and values
representing box regression targets in [batch_size, height, width,
num_anchors * 4].
scales: a list of float values indicating image scale.
min_score_thresh: A float representing the threshold for deciding when to
remove boxes based on score.
max_boxes_to_draw: Max number of boxes to draw.
Returns:
detections_batch: a batch of detection results. Each detection is a tensor
with each row as [image_id, ymin, xmin, ymax, xmax, score, class].
"""
if not params['batch_size']:
# Use combined version for dynamic batch size.
return det_post_process_combined(params, cls_outputs, box_outputs, scales,
min_score_thresh, max_boxes_to_draw)
# TODO(tanmingxing): refactor the code to make it more explicity.
outputs = {
'cls_outputs_all': [None],
'box_outputs_all': [None],
'indices_all': [None],
'classes_all': [None]
}
det_model_fn.add_metric_fn_inputs(params, cls_outputs, box_outputs, outputs, -1)
# Create anchor_label for picking top-k predictions.
eval_anchors = anchors.Anchors(params['min_level'], params['max_level'],
params['num_scales'], params['aspect_ratios'],
params['anchor_scale'], params['image_size'])
anchor_labeler = anchors.AnchorLabeler(eval_anchors, params['num_classes'])
# Add all detections for each input image.
detections_batch = []
for index in range(params['batch_size']):
cls_outputs_per_sample = outputs['cls_outputs_all'][index]
box_outputs_per_sample = outputs['box_outputs_all'][index]
indices_per_sample = outputs['indices_all'][index]
classes_per_sample = outputs['classes_all'][index]
detections = anchor_labeler.generate_detections(
cls_outputs_per_sample,
box_outputs_per_sample,
indices_per_sample,
classes_per_sample,
image_id=[index],
image_scale=[scales[index]],
image_size=params['image_size'],
min_score_thresh=min_score_thresh,
max_boxes_to_draw=max_boxes_to_draw,
disable_pyfun=params.get('disable_pyfun'))
if params['batch_size'] > 1:
# pad to fixed length if batch size > 1.
padding_size = max_boxes_to_draw - tf.shape(detections)[0]
detections = tf.pad(detections, [[0, padding_size], [0, 0]])
detections_batch.append(detections)
return tf.stack(detections_batch, name='detections')
def visualize_image(image,
boxes,
classes,
scores,
id_mapping,
min_score_thresh=anchors.MIN_SCORE_THRESH,
max_boxes_to_draw=anchors.MAX_DETECTIONS_PER_IMAGE,
line_thickness=2,
**kwargs):
"""Visualizes a given image.
Args:
image: a image with shape [H, W, C].
boxes: a box prediction with shape [N, 4] ordered [ymin, xmin, ymax, xmax].
classes: a class prediction with shape [N].
scores: A list of float value with shape [N].
id_mapping: a dictionary from class id to name.
min_score_thresh: minimal score for showing. If claass probability is below
this threshold, then the object will not show up.
max_boxes_to_draw: maximum bounding box to draw.
line_thickness: how thick is the bounding box line.
**kwargs: extra parameters.
Returns:
output_image: an output image with annotated boxes and classes.
"""
category_index = {k: {'id': k, 'name': id_mapping[k]} for k in id_mapping}
img = np.array(image)
vis_utils.visualize_boxes_and_labels_on_image_array(
img,
boxes,
classes,
scores,
category_index,
min_score_thresh=min_score_thresh,
max_boxes_to_draw=max_boxes_to_draw,
line_thickness=line_thickness,
**kwargs)
return img
def parse_label_id_mapping(
label_id_mapping: Union[Text, Dict[int, Text]] = None) -> Dict[int, Text]:
"""Parse label id mapping from a string or a yaml file.
The label_id_mapping is a dict that maps class id to its name, such as:
{
1: "person",
2: "dog"
}
Args:
label_id_mapping:
Returns:
A dictionary with key as integer id and value as a string of name.
"""
if label_id_mapping is None:
return coco_id_mapping
if isinstance(label_id_mapping, dict):
label_id_dict = label_id_mapping
elif isinstance(label_id_mapping, str):
with tf.io.gfile.GFile(label_id_mapping) as f:
label_id_dict = yaml.load(f, Loader=yaml.FullLoader)
else:
raise TypeError('label_id_mapping must be a dict or a yaml filename, '
'containing a mapping from class ids to class names.')
return label_id_dict
def visualize_image_prediction(image,
prediction,
disable_pyfun=True,
label_id_mapping=None,
**kwargs):
"""Viusalize detections on a given image.
Args:
image: Image content in shape of [height, width, 3].
prediction: a list of vector, with each vector has the format of [image_id,
ymin, xmin, ymax, xmax, score, class].
disable_pyfun: disable pyfunc for faster post processing.
label_id_mapping: a map from label id to name.
**kwargs: extra parameters for vistualization, such as min_score_thresh,
max_boxes_to_draw, and line_thickness.
Returns:
a list of annotated images.
"""
boxes = prediction[:, 1:5]
classes = prediction[:, 6].astype(int)
scores = prediction[:, 5]
if not disable_pyfun:
# convert [x, y, width, height] to [y, x, height, width]
boxes[:, [0, 1, 2, 3]] = boxes[:, [1, 0, 3, 2]]
label_id_mapping = label_id_mapping or {} # coco_id_mapping
return visualize_image(image, boxes, classes, scores, label_id_mapping,
**kwargs)
class ServingDriver(object):
"""A driver for serving single or batch images.
This driver supports serving with image files or arrays, with configurable
batch size.
Example 1. Serving streaming image contents:
driver = inference.ServingDriver(
'efficientdet-d0', '/tmp/efficientdet-d0', batch_size=1)
driver.build()
for m in image_iterator():
predictions = driver.serve_files([m])
driver.visualize(m, predictions[0])
# m is the new image with annotated boxes.
Example 2. Serving batch image contents:
imgs = []
for f in ['/tmp/1.jpg', '/tmp/2.jpg']:
imgs.append(np.array(Image.open(f)))
driver = inference.ServingDriver(
'efficientdet-d0', '/tmp/efficientdet-d0', batch_size=len(imgs))
driver.build()
predictions = driver.serve_images(imgs)
for i in range(len(imgs)):
driver.visualize(imgs[i], predictions[i])
Example 3: another way is to use SavedModel:
# step1: export a model.
driver = inference.ServingDriver('efficientdet-d0', '/tmp/efficientdet-d0')
driver.build()
driver.export('/tmp/saved_model_path')
# step2: Serve a model.
with tf.Session() as sess:
tf.saved_model.load(sess, ['serve'], self.saved_model_dir)
raw_images = []
for f in tf.io.gfile.glob('/tmp/images/*.jpg'):
raw_images.append(np.array(PIL.Image.open(f)))
detections = sess.run('detections:0', {'image_arrays:0': raw_images})
driver = inference.ServingDriver(
'efficientdet-d0', '/tmp/efficientdet-d0')
driver.visualize(raw_images[0], detections[0])
PIL.Image.fromarray(raw_images[0]).save(output_image_path)
"""
def __init__(self,
model_name: Text,
ckpt_path: Text,
batch_size: int = 1,
use_xla: bool = False,
min_score_thresh: float = None,
max_boxes_to_draw: float = None,
line_thickness: int = None,
model_params: Dict[Text, Any] = None):
"""Initialize the inference driver.
Args:
model_name: target model name, such as efficientdet-d0.
ckpt_path: checkpoint path, such as /tmp/efficientdet-d0/.
batch_size: batch size for inference.
use_xla: Whether run with xla optimization.
min_score_thresh: minimal score threshold for filtering predictions.
max_boxes_to_draw: the maximum number of boxes per image.
line_thickness: the line thickness for drawing boxes.
model_params: model parameters for overriding the config.
"""
self.model_name = model_name
self.ckpt_path = ckpt_path
self.batch_size = batch_size
self.params = hparams_config.get_detection_config(model_name).as_dict()
if model_params:
self.params.update(model_params)
self.params.update(dict(is_training_bn=False))
self.label_id_mapping = parse_label_id_mapping(
self.params.get('label_id_mapping', None))
self.signitures = None
self.sess = None
self.disable_pyfun = True
self.use_xla = use_xla
self.min_score_thresh = min_score_thresh or anchors.MIN_SCORE_THRESH
self.max_boxes_to_draw = (
max_boxes_to_draw or anchors.MAX_DETECTIONS_PER_IMAGE)
self.line_thickness = line_thickness
def __del__(self):
"""Clear session."""
if self.sess:
self.sess.close()
def _build_session(self):
"""Build session."""
sess_config = tf.ConfigProto()
if self.use_xla:
sess_config.graph_options.optimizer_options.global_jit_level = (
tf.OptimizerOptions.ON_2)
return tf.Session(config=sess_config)
def build(self, params_override=None):
"""Build model and restore checkpoints."""
params = copy.deepcopy(self.params)
if params_override:
params.update(params_override)
if not self.sess:
self.sess = self._build_session()
with self.sess.graph.as_default():
image_files = tf.placeholder(tf.string, name='image_files', shape=[None])
raw_images = batch_image_files_decode(image_files)
raw_images = tf.identity(raw_images, name='image_arrays')
images, scales = batch_image_preprocess(
raw_images, params['image_size'], self.batch_size)
if params['data_format'] == 'channels_first':
images = tf.transpose(images, [0, 3, 1, 2])
class_outputs, box_outputs = build_model(self.model_name, images, **params)
params.update(dict(batch_size=self.batch_size, disable_pyfun=self.disable_pyfun))
detections = det_post_process(params, class_outputs, box_outputs, scales,
self.min_score_thresh,
self.max_boxes_to_draw)
restore_ckpt(
self.sess,
self.ckpt_path,
ema_decay=self.params['moving_average_decay'],
export_ckpt=None)
self.signitures = {
'image_files': image_files,
'image_arrays': raw_images,
'prediction': detections,
}
return self.signitures
def visualize(self, image, prediction, **kwargs):
"""Visualize prediction on image."""
return visualize_image_prediction(
image,
prediction,
disable_pyfun=self.disable_pyfun,
label_id_mapping=self.label_id_mapping,
**kwargs)
def serve_files(self, image_files: List[Text]):
"""Serve a list of input image files.
Args:
image_files: a list of image files with shape [1] and type string.
Returns:
A list of detections.
"""
if not self.sess:
self.build()
predictions = self.sess.run(
self.signitures['prediction'],
feed_dict={self.signitures['image_files']: image_files})
return predictions
def benchmark(self, image_arrays, trace_filename=None):
"""Benchmark inference latency/throughput.
Args:
image_arrays: a list of images in numpy array format.
trace_filename: If None, specify the filename for saving trace.
"""
if not self.sess:
self.build()
# init session
self.sess.run(
self.signitures['prediction'],
feed_dict={self.signitures['image_arrays']: image_arrays})
start = time.perf_counter()
for _ in range(10):
self.sess.run(
self.signitures['prediction'],
feed_dict={self.signitures['image_arrays']: image_arrays})
end = time.perf_counter()
inference_time = (end - start) / 10
print('Per batch inference time: ', inference_time)
print('FPS: ', self.batch_size / inference_time)
if trace_filename:
run_options = tf.RunOptions()
run_options.trace_level = tf.RunOptions.FULL_TRACE
run_metadata = tf.RunMetadata()
self.sess.run(
self.signitures['prediction'],
feed_dict={self.signitures['image_arrays']: image_arrays},
options=run_options,
run_metadata=run_metadata)
with tf.io.gfile.GFile(trace_filename, 'w') as trace_file:
trace = timeline.Timeline(step_stats=run_metadata.step_stats)
trace_file.write(trace.generate_chrome_trace_format(show_memory=True))
def serve_images(self, image_arrays):
"""Serve a list of image arrays.
Args:
image_arrays: A list of image content with each image has shape [height,
width, 3] and uint8 type.
Returns:
A list of detections.
"""
if not self.sess:
self.build()
predictions = self.sess.run(
self.signitures['prediction'],
feed_dict={self.signitures['image_arrays']: image_arrays})
return predictions
def load(self, saved_model_dir_or_frozen_graph: Text):
"""Load the model using saved model or a frozen graph."""
if not self.sess:
self.sess = self._build_session()
self.signitures = {
'image_files': 'image_files:0',
'image_arrays': 'image_arrays:0',
'prediction': 'detections:0',
}
# Load saved model if it is a folder.
if tf.io.gfile.isdir(saved_model_dir_or_frozen_graph):
return tf.saved_model.load(self.sess, ['serve'], saved_model_dir_or_frozen_graph)
# Load a frozen graph.
graph_def = tf.GraphDef()
with tf.gfile.GFile(saved_model_dir_or_frozen_graph, 'rb') as f:
graph_def.ParseFromString(f.read())
return tf.import_graph_def(graph_def, name='')
def freeze(self):
"""Freeze the graph."""
output_names = [self.signitures['prediction'].op.name]
graphdef = tf.graph_util.convert_variables_to_constants(
self.sess, self.sess.graph_def, output_names)
return graphdef
def export(self,
output_dir: Text,
tflite_path: Text = None,
tensorrt: Text = None):
"""Export a saved model, frozen graph, and potential tflite/tensorrt model.
Args:
output_dir: the output folder for saved model.
tflite_path: the path for saved tflite file.
tensorrt: If not None, must be {'FP32', 'FP16', 'INT8'}.
"""
signitures = self.signitures
signature_def_map = {
'serving_default':
tf.saved_model.predict_signature_def(
{signitures['image_arrays'].name: signitures['image_arrays']},
{signitures['prediction'].name: signitures['prediction']}),
}
b = tf.saved_model.Builder(output_dir)
b.add_meta_graph_and_variables(
self.sess,
tags=['serve'],
signature_def_map=signature_def_map,
assets_collection=tf.get_collection(tf.GraphKeys.ASSET_FILEPATHS),
clear_devices=True)
b.save()
logging.info('Model saved at %s', output_dir)
# also save freeze pb file.
graphdef = self.freeze()
pb_path = os.path.join(output_dir, self.model_name + '_frozen.pb')
tf.io.gfile.GFile(pb_path, 'wb').write(graphdef.SerializeToString())
logging.info('Frozen graph saved at %s', pb_path)
if tflite_path:
height, width = utils.parse_image_size(self.params['image_size'])
input_name = signitures['image_arrays'].op.name
input_shapes = {input_name: [None, height, width, 3]}
converter = tf.lite.TFLiteConverter.from_saved_model(
output_dir,
input_arrays=[input_name],
input_shapes=input_shapes,
output_arrays=[signitures['prediction'].op.name])
converter.experimental_new_converter = True
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS]
tflite_model = converter.convert()
tf.io.gfile.GFile(tflite_path, 'wb').write(tflite_model)
logging.info('TFLite is saved at %s', tflite_path)
if tensorrt:
# pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
from tensorflow.python.compiler.tensorrt import trt
sess_config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
trt_path = os.path.join(output_dir, 'tensorrt_' + tensorrt.lower())
trt.create_inference_graph(
None,
None,
precision_mode=tensorrt,
input_saved_model_dir=output_dir,
output_saved_model_dir=trt_path,
session_config=sess_config)
logging.info('TensorRT model is saved at %s', trt_path)
class InferenceDriver(object):
"""A driver for doing batch inference.
Example usage:
driver = inference.InferenceDriver('efficientdet-d0', '/tmp/efficientdet-d0')
driver.inference('/tmp/*.jpg', '/tmp/outputdir')
"""
def __init__(self,
model_name: Text,
ckpt_path: Text,
model_params: Dict[Text, Any] = None):
"""Initialize the inference driver.
Args:
model_name: target model name, such as efficientdet-d0.
ckpt_path: checkpoint path, such as /tmp/efficientdet-d0/.
model_params: model parameters for overriding the config.
"""
self.model_name = model_name
self.ckpt_path = ckpt_path
self.params = hparams_config.get_detection_config(model_name).as_dict()
if model_params:
self.params.update(model_params)
self.params.update(dict(is_training_bn=False))
self.label_id_mapping = parse_label_id_mapping(
self.params.get('label_id_mapping', None))
self.disable_pyfun = True
def inference(self, image_path_pattern: Text, output_dir: Text,
out_label_path: Text, **kwargs):
"""Read and preprocess input images.
Args:
image_path_pattern: Image file pattern such as /tmp/img*.jpg
output_dir: the directory for output images. Output images will be named
as 0.jpg, 1.jpg, ....
**kwargs: extra parameters for for vistualization, such as
min_score_thresh, max_boxes_to_draw, and line_thickness.
Returns:
Annotated image.
"""
params = copy.deepcopy(self.params)
with tf.Session() as sess:
# Buid inputs and preprocessing.
raw_images, images, scales, fnames = \
build_inputs(image_path_pattern, params['image_size'])
if params['data_format'] == 'channels_first':
images = tf.transpose(images, [0, 3, 1, 2])
# Build model.
class_outputs, box_outputs = build_model(self.model_name, images, **self.params)
restore_ckpt(
sess,
self.ckpt_path,
ema_decay=self.params['moving_average_decay'],
export_ckpt=None)
# for postprocessing.
params.update(
dict(batch_size=len(raw_images), disable_pyfun=self.disable_pyfun))
# Build postprocessing.
detections_batch = det_post_process(
params,
class_outputs,
box_outputs,
scales,
min_score_thresh=kwargs.get('min_score_thresh', anchors.MIN_SCORE_THRESH),
max_boxes_to_draw=kwargs.get('max_boxes_to_draw', anchors.MAX_DETECTIONS_PER_IMAGE))
predictions = sess.run(detections_batch)
tf.reset_default_graph()
tf.keras.backend.clear_session()
# Visualize results.
for i, prediction in enumerate(predictions):
img = visualize_image_prediction(
raw_images[i],
prediction,
disable_pyfun=self.disable_pyfun,
label_id_mapping=self.label_id_mapping,
**kwargs)
output_image_path = os.path.join(output_dir, os.path.basename(fnames[i]))
Image.fromarray(img).save(output_image_path)
logging.info('writing output image to %s', output_image_path)
if out_label_path:
assert self.label_id_mapping, \
"Label mapping must be valid to generate KIITI labels."
os.makedirs(out_label_path, exist_ok=True)
# Generate KITTI labels
kitti_txt = ""
for d in prediction:
if d[5] >= kwargs.get('min_score_thresh', 0):
kitti_txt += self.label_id_mapping[int(d[6])] + ' 0 0 0 ' + ' '.join(
[str(i) for i in [d[2], d[1], d[4], d[3]]]) + ' 0 0 0 0 0 0 0 ' + \
str(d[5]) + '\n'
basename = os.path.splitext(os.path.basename(fnames[i]))[0]
with open(os.path.join(out_label_path, "{}.txt".format(basename)), "w") as f:
f.write(kitti_txt)
# free memory
del images
del raw_images
del img
return predictions
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/inferencer/inference.py |
"""Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/executer/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface to run EfficientDet distributed strategies."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import logging
import math
import multiprocessing
import operator
import os
import tempfile
from zipfile import BadZipFile, ZipFile
import horovod.tensorflow as hvd
import keras
import six
import tensorflow as tf
from tensorflow.core.protobuf import rewriter_config_pb2
from nvidia_tao_tf1.core.utils.path_utils import expand_path
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.efficientdet.hooks.enc_ckpt_hook import EncryptCheckpointSaverHook
from nvidia_tao_tf1.cv.efficientdet.hooks.logging_hook import TaskProgressMonitorHook
from nvidia_tao_tf1.cv.efficientdet.hooks.pretrained_restore_hook import \
PretrainedWeightsLoadingHook
from nvidia_tao_tf1.cv.efficientdet.utils.distributed_utils import MPI_is_distributed
from nvidia_tao_tf1.cv.efficientdet.utils.distributed_utils import MPI_local_rank, MPI_rank
from nvidia_tao_tf1.cv.efficientdet.utils.model_loader import load_keras_model
from nvidia_tao_tf1.encoding import encoding
hvd.init()
# os.environ['CUDA_VISIBLE_DEVICES'] = '0' if not os.environ.get('CUDA_VISIBLE_DEVICES') \
# else os.environ['CUDA_VISIBLE_DEVICES'].split(',')[hvd.local_rank()]
@six.add_metaclass(abc.ABCMeta)
class BaseExecuter(object):
"""Interface to run EfficientDet model in GPUs.
Arguments:
model_config: Model configuration needed to run distribution strategy.
model_fn: Model function to be passed to Estimator.
"""
def __init__(self, runtime_config, model_fn):
"""Initialize."""
self._runtime_config = runtime_config
self._model_fn = model_fn
self._temp_dir = tempfile.mkdtemp()
self.curr_step = 0
# To resume from checkpoint
# old tmp dir need to be retrieved
if runtime_config.mode == 'train':
tmp_path = self.get_latest_checkpoint(runtime_config.model_dir, runtime_config.key)
if tmp_path:
with open(os.path.join(self._temp_dir, "checkpoint"), "r") as f:
old_path = f.readline()
old_path = eval(old_path.split(":")[-1])
self._temp_dir = os.path.dirname(old_path)
# shutil.rmtree(os.path.dirname(tmp_path))
ckpt_path = self.get_latest_checkpoint(runtime_config.model_dir,
runtime_config.key)
self._runtime_config.checkpoint = tmp_path
self.curr_step = int(ckpt_path.split('.')[1].split('-')[1])
# Set status logger
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=os.path.join(runtime_config.model_dir, "status.json"),
is_master=hvd.rank() == 0,
verbosity=status_logging.Verbosity.INFO,
append=True
)
)
s_logger = status_logging.get_status_logger()
s_logger.write(
status_level=status_logging.Status.STARTED,
message="Starting EfficientDet training."
)
os.environ['CUDA_CACHE_DISABLE'] = '0'
os.environ['TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT'] = '1'
os.environ['TF_ADJUST_HUE_FUSED'] = '1'
os.environ['TF_ADJUST_SATURATION_FUSED'] = '1'
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'
os.environ['TF_AUTOTUNE_THRESHOLD'] = '2'
os.environ["TF_FORCE_GPU_ALLOW_GROWTH"] = "true"
@staticmethod
def _get_session_config(mode, use_xla, use_amp, use_tf_distributed=False,
allow_xla_at_inference=False):
assert mode in ('train', 'eval')
rewrite_options = rewriter_config_pb2.RewriterConfig(
meta_optimizer_iterations=rewriter_config_pb2.RewriterConfig.TWO)
if use_amp:
logging.info("[%s] AMP is activated - Experiment Feature" % mode)
rewrite_options.auto_mixed_precision = True
config = tf.compat.v1.ConfigProto(
allow_soft_placement=True,
log_device_placement=False,
graph_options=tf.compat.v1.GraphOptions(
rewrite_options=rewrite_options,
# infer_shapes=True # Heavily drops throughput by 30%
),
gpu_options=tf.compat.v1.GPUOptions(
allow_growth=True,
)
)
if use_tf_distributed:
config.gpu_options.force_gpu_compatible = False
else:
config.gpu_options.force_gpu_compatible = True # Force pinned memory
config.gpu_options.allow_growth = True
if MPI_is_distributed():
config.gpu_options.visible_device_list = str(MPI_local_rank())
if use_xla and (mode == "train" or allow_xla_at_inference):
logging.info("[%s] XLA is activated - Experiment Feature" % mode)
config.graph_options.optimizer_options.global_jit_level = \
tf.compat.v1.OptimizerOptions.ON_1
if mode == 'train':
config.intra_op_parallelism_threads = 1 # Avoid pool of Eigen threads
if MPI_is_distributed():
config.inter_op_parallelism_threads = \
max(2, multiprocessing.cpu_count() // hvd.local_size())
elif not use_tf_distributed:
config.inter_op_parallelism_threads = 4
return config
@abc.abstractmethod
def build_strategy_configuration(self, mode):
"""Builds run configuration for distributed train/eval.
Returns:
RunConfig with distribution strategy configurations
to pass to the constructor of TPUEstimator/Estimator.
"""
raise NotImplementedError('Must be implemented in subclass')
def build_model_parameters(self, mode):
"""Builds model parameter."""
assert mode in ('train', 'eval')
batch_size = self._runtime_config.train_batch_size \
if mode == 'train' else self._runtime_config.eval_batch_size
params = dict(
self._runtime_config.as_dict(),
mode=mode,
batch_size=batch_size,
# model_dir=self._runtime_config.model_dir,
)
if mode == 'eval':
params = dict(
params,
input_rand_hflip=False,
is_training_bn=False,
precision=None,
)
return params
def build_efficientdet_estimator(self, params, run_config, mode):
"""Creates Estimator instance.
Arguments:
params: A dictionary to pass to Estimator `model_fn`.
run_config: RunConfig instance specifying distribution strategy
configurations.
mode: Mode -- one of 'train` or `eval`.
Returns:
TFEstimator instance.
"""
assert mode in ('train', 'eval')
return tf.estimator.Estimator(
model_fn=self._model_fn,
model_dir=self._temp_dir,
config=run_config,
params=params
)
def get_training_hooks(self, mode, params):
"""Set up training hooks."""
assert mode in ('train', 'eval')
training_hooks = []
steps_per_epoch = (params['num_examples_per_epoch'] + params['batch_size'] - 1) \
// params['batch_size']
if not MPI_is_distributed() or MPI_rank() == 0:
training_hooks.append(
TaskProgressMonitorHook(
params['batch_size'],
epochs=params['num_epochs'],
steps_per_epoch=steps_per_epoch,
logging_frequency=params['logging_frequency']))
training_hooks.append(EncryptCheckpointSaverHook(
checkpoint_dir=params['model_dir'],
temp_dir=self._temp_dir,
key=params['key'],
checkpoint_basename="model.ckpt",
steps_per_epoch=steps_per_epoch
))
if params.get('checkpoint', None):
checkpoint_path = self.load_pretrained_model(
params['checkpoint'], params.get('pruned_model_path', ''))
training_hooks.append(PretrainedWeightsLoadingHook(
prefix="",
checkpoint_path=checkpoint_path,
skip_variables_regex=params.get('skip_checkpoint_variables', None)
))
if MPI_is_distributed() and mode == "train":
training_hooks.append(hvd.BroadcastGlobalVariablesHook(root_rank=0))
# stop training after x epochs
if params['stop_at_epoch']:
stop_hook = tf.estimator.StopAtStepHook(
last_step=params['stop_at_epoch'] * steps_per_epoch)
training_hooks.append(stop_hook)
return training_hooks
def load_pretrained_model(self, checkpoint_path, pruned_model_path=''):
"""Load pretrained model."""
is_pruned = bool(pruned_model_path)
_, ext = os.path.splitext(checkpoint_path)
if ext == '.hdf5':
logging.info("Loading pretrained model...")
load_keras_model(checkpoint_path, is_pruned)
km_weights = tf.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES,
scope=None)
with tempfile.NamedTemporaryFile() as f:
if is_pruned:
checkpoint_path = tf.train.Saver(km_weights).save(
tf.keras.backend.get_session(), f.name)
else:
checkpoint_path = tf.train.Saver(km_weights).save(
keras.backend.get_session(), f.name)
return checkpoint_path
if ext == '.tlt':
"""Get unencrypted checkpoint from tlt file."""
raise ValueError("You shouldn't be here.")
if '.ckpt' in ext:
return checkpoint_path
raise ValueError("Pretrained weights in only .hdf5 or .tlt format are supported.")
def get_latest_checkpoint(self, results_dir, key):
"""Get the latest checkpoint path from a given results directory.
Parses through the directory to look for the latest checkpoint file
and returns the path to this file.
Args:
results_dir (str): Path to the results directory.
Returns:
ckpt_path (str): Path to the latest checkpoint.
"""
if not os.path.exists(results_dir):
return None
trainable_ckpts = [int(item.split('.')[1].split('-')[1])
for item in os.listdir(results_dir) if item.endswith(".tlt")]
num_ckpts = len(trainable_ckpts)
if num_ckpts == 0:
return None
latest_step = sorted(trainable_ckpts, reverse=True)[0]
latest_checkpoint = os.path.join(results_dir, "model.epoch-{}.tlt".format(latest_step))
return self.extract_ckpt(latest_checkpoint, key)
def extract_zip_file(self, zip_file):
"""Get the checkpoint file.
Args:
zip_file (str): Path to the zip file.
"""
with ZipFile(zip_file, "r") as zip_object:
for member in zip_object.namelist():
zip_object.extract(member, path=self._temp_dir)
if member.startswith('model.ckpt-'):
step = int(member.split('model.ckpt-')[-1].split('.')[0])
return expand_path(f"{self._temp_dir}/model.ckpt-{step}")
def extract_ckpt(self, encoded_checkpoint, key):
"""Get unencrypted checkpoint from tlt file."""
logging.info("Loading weights from {}".format(encoded_checkpoint))
try:
extracted_ckpt_path = self.extract_zip_file(encoded_checkpoint)
except BadZipFile:
os_handle, temp_zip_path = tempfile.mkstemp()
os.close(os_handle)
# Decrypt the checkpoint file.
with open(encoded_checkpoint, 'rb') as encoded_file, open(temp_zip_path, 'wb') as tmp_zipf:
encoding.decode(encoded_file, tmp_zipf, key.encode())
encoded_file.closed
tmp_zipf.closed
extracted_ckpt_path = self.extract_zip_file(temp_zip_path)
except Exception:
raise IOError("The last checkpoint file is not saved properly. \
Please delete it and rerun the script.")
return extracted_ckpt_path
def train_and_eval(self, train_input_fn, eval_input_fn):
"""Run distributed train and eval on EfficientDet model."""
# check whether to resume
ckpt_cycle = 0
if self.curr_step:
ckpt_epoch = math.ceil(
self.curr_step * self._runtime_config.train_batch_size /
self._runtime_config.num_examples_per_epoch)
logging.info('Resume training from the latest checkpoint step: {}.'.format(
self.curr_step))
ckpt_cycle = ckpt_epoch // self._runtime_config.eval_epoch_cycle
train_run_config = self.build_strategy_configuration('train')
train_params = self.build_model_parameters('train')
train_estimator = self.build_efficientdet_estimator(train_params, train_run_config, 'train')
eval_estimator = None
eval_results = None
training_hooks = self.get_training_hooks(
mode="train",
params=train_params,
)
max_steps_per_cycle = self._runtime_config.eval_epoch_cycle * \
self._runtime_config.num_examples_per_epoch // self._runtime_config.train_batch_size
# Starting training cycle
for cycle in range(ckpt_cycle + 1,
self._runtime_config.num_epochs //
self._runtime_config.eval_epoch_cycle + 1):
epoch = (cycle - 1) * self._runtime_config.eval_epoch_cycle
logging.info('Starting training cycle: %d, epoch: %d.', cycle, epoch)
train_estimator.train(
input_fn=train_input_fn,
max_steps=int(max_steps_per_cycle * cycle),
hooks=training_hooks
)
if (not MPI_is_distributed() or MPI_rank() == 0):
print() # Visual Spacing
logging.info("=================================")
logging.info(' Start evaluation cycle %02d' % cycle)
logging.info("=================================\n")
if eval_estimator is None:
eval_run_config = self.build_strategy_configuration('eval')
eval_params = self.build_model_parameters('eval')
eval_estimator = self.build_efficientdet_estimator(eval_params,
eval_run_config, 'eval')
ckpt_path = self.get_latest_checkpoint(self._runtime_config.model_dir,
self._runtime_config.key)
eval_results = eval_estimator.evaluate(
input_fn=eval_input_fn,
checkpoint_path=ckpt_path,
steps=self._runtime_config.eval_samples // self._runtime_config.eval_batch_size,
name='Eval')
for key, value in sorted(eval_results.items(), key=operator.itemgetter(0)):
logging.info("%s: %.9f" % (key, value))
print("Evaluation is complete.")
kpi_data = {
k: float(round(v * 100, 4))
for k, v in sorted(eval_results.items(), key=operator.itemgetter(0))
}
s_logger = status_logging.get_status_logger()
if isinstance(s_logger, status_logging.StatusLogger):
s_logger.kpi = kpi_data
s_logger.write(
status_level=status_logging.Status.RUNNING,
message="Evaluation metrics generated."
)
if MPI_is_distributed():
logging.info("Training Cycle: {} complete".format(cycle))
from mpi4py import MPI
MPI.COMM_WORLD.Barrier() # Waiting for all MPI processes to sync
return eval_results
def eval(self, eval_input_fn):
"""Run eval with EfficientDet model."""
# model_path = os.path.join(self._runtime_config.model_dir)
print(self._runtime_config.model_path)
ckpt_path = self.extract_ckpt(self._runtime_config.model_path, self._runtime_config.key)
if (not MPI_is_distributed() or MPI_rank() == 0):
print() # Visual Spacing
logging.info("=================================")
logging.info(' Start evaluation')
logging.info("=================================\n")
eval_run_config = self.build_strategy_configuration('eval')
eval_params = self.build_model_parameters('eval')
eval_estimator = self.build_efficientdet_estimator(eval_params,
eval_run_config, 'eval')
eval_results = eval_estimator.evaluate(
input_fn=eval_input_fn,
steps=self._runtime_config.eval_samples // self._runtime_config.eval_batch_size,
checkpoint_path=ckpt_path,
name='Eval')
for key, value in sorted(eval_results.items(), key=operator.itemgetter(0)):
logging.info("%s: %.9f" % (key, value))
print("Evaluation is complete.")
return eval_results
class EstimatorExecuter(BaseExecuter):
"""Interface that runs EfficientDet model using Estimator."""
def __init__(self, runtime_config, model_fn):
"""Initialize."""
super(EstimatorExecuter, self).__init__(runtime_config, model_fn)
if MPI_is_distributed():
os.environ['HOROVOD_GPU_ALLREDUCE'] = 'NCCL'
os.environ['HOROVOD_NUM_NCCL_STREAMS'] = '1'
# os.environ['HOROVOD_AUTOTUNE'] = '2'
# hvd.init()
logging.info("Horovod successfully initialized ...")
os.environ['TF_GPU_THREAD_MODE'] = 'gpu_private'
os.environ['TF_GPU_THREAD_COUNT'] = '1' if not MPI_is_distributed() else str(hvd.size())
os.environ['TF_SYNC_ON_FINISH'] = '0'
def build_strategy_configuration(self, mode):
"""Retrieves model configuration for running TF Estimator."""
run_config = tf.estimator.RunConfig(
tf_random_seed=(
self._runtime_config.tf_random_seed
if not MPI_is_distributed() or self._runtime_config.tf_random_seed is None else
self._runtime_config.tf_random_seed + MPI_rank()
),
# model_dir=self._runtime_config.model_dir,
save_summary_steps=None, # disabled
save_checkpoints_steps=None, # disabled
save_checkpoints_secs=None, # disabled
keep_checkpoint_max=20, # disabled
keep_checkpoint_every_n_hours=None, # disabled
log_step_count_steps=None, # disabled
session_config=self._get_session_config(
mode=mode,
use_xla=False, # self._runtime_config.use_xla
use_amp=self._runtime_config.amp,
use_tf_distributed=False,
# TODO: Remove when XLA at inference fixed
allow_xla_at_inference=False, # self._runtime_config.allow_xla_at_inference
),
protocol=None,
device_fn=None,
train_distribute=None,
eval_distribute=None,
experimental_distribute=None
)
return run_config
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/executer/distributed_executer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientNet (tf.keras) builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import numpy as np
import tensorflow.compat.v1 as tf
from nvidia_tao_tf1.core.templates.efficientnet_tf import EfficientNetB0, EfficientNetB1
from nvidia_tao_tf1.core.templates.efficientnet_tf import EfficientNetB2, EfficientNetB3
from nvidia_tao_tf1.core.templates.efficientnet_tf import EfficientNetB4, EfficientNetB5
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level='INFO')
logger = logging.getLogger(__name__)
mappings = {
'efficientdet-d0': [
'block1a_project_bn', 'block2b_add', 'block3b_add', 'block5c_add', 'block7a_project_bn'],
'efficientdet-d1': [
'block1b_project_bn', 'block2c_add', 'block3c_add', 'block5d_add', 'block7b_project_bn'],
'efficientdet-d2': [
'block1b_project_bn', 'block2c_add', 'block3c_add', 'block5d_add', 'block7b_project_bn'],
'efficientdet-d3': [
'block1b_project_bn', 'block2c_add', 'block3c_add', 'block5e_add', 'block7b_project_bn'],
'efficientdet-d4': [
'block1b_project_bn', 'block2d_add', 'block3d_add', 'block5f_add', 'block7b_project_bn'],
'efficientdet-d5': [
'block1b_project_bn', 'block2e_add', 'block3e_add', 'block5g_add', 'block7c_project_bn'],
}
def swish(features, use_native=True, use_hard=False):
"""Computes the Swish activation function.
We provide three alternatives:
- Native tf.nn.swish, use less memory during training than composable swish.
- Quantization friendly hard swish.
- A composable swish, equivalent to tf.nn.swish, but more general for
finetuning and TF-Hub.
Args:
features: A `Tensor` representing preactivation values.
use_native: Whether to use the native swish from tf.nn that uses a custom
gradient to reduce memory usage, or to use customized swish that uses
default TensorFlow gradient computation.
use_hard: Whether to use quantization-friendly hard swish.
Returns:
The activation value.
"""
if use_native and use_hard:
raise ValueError('Cannot specify both use_native and use_hard.')
if use_native:
return tf.nn.swish(features)
if use_hard:
return features * tf.nn.relu6(features + np.float32(3)) * (1. / 6.)
features = tf.convert_to_tensor(features, name='features')
return features * tf.nn.sigmoid(features)
def build_model_base(images, model_name='efficientdet-d0',
num_classes=2, freeze_blocks=None, freeze_bn=False):
"""Create a base feature network and return the features before pooling.
Args:
images: input images tensor.
model_name: string, the predefined model name.
Returns:
features: base features before pooling.
endpoints: the endpoints for each layer.
Raises:
When model_name specified an undefined model, raises NotImplementedError.
"""
assert isinstance(images, tf.Tensor)
supported_backbones = {
'efficientdet-d0': EfficientNetB0,
'efficientdet-d1': EfficientNetB1,
'efficientdet-d2': EfficientNetB2,
'efficientdet-d3': EfficientNetB3,
'efficientdet-d4': EfficientNetB4,
'efficientdet-d5': EfficientNetB5,
}
if model_name not in supported_backbones.keys():
raise ValueError("{} is not a supported arch. \
Please choose from `efficientdet-d0` to `efficientdet-d5`.")
model = supported_backbones[model_name](
add_head=False,
input_tensor=images,
classes=num_classes,
data_format="channels_last",
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
use_bias=False,
kernel_regularizer=None,
bias_regularizer=None,
stride16=False,
activation_type=None)
return [model.get_layer(fmap).output for fmap in mappings[model_name]]
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/backbone/efficientnet_builder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IVA EfficientDet backbone."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/backbone/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Backbone network factory."""
from nvidia_tao_tf1.cv.efficientdet.backbone import efficientnet_builder
def get_model_builder(model_name):
"""Get the model_builder module for a given model name."""
# can be extended to efficientnet_lite builder
if model_name.startswith('efficientdet-d'):
return efficientnet_builder
raise ValueError('Unknown model name {}'.format(model_name))
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/backbone/backbone_factory.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data loader and processing test cases."""
import pytest
import tensorflow as tf
from nvidia_tao_tf1.cv.efficientdet.backbone import backbone_factory
@pytest.mark.parametrize("model_name",
[('efficientdet-d0'),
('efficientdet-d2'),
('efficientdet-d3'),
('efficientdet-d4'),
('efficientdet-d5')])
def test_backbone(model_name):
builder = backbone_factory.get_model_builder(model_name)
inputs = tf.keras.layers.Input(shape=(512, 512, 3), batch_size=1)
fmaps = builder.build_model_base(inputs, model_name)
assert len(fmaps) == 5
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/backbone/tests/test_backbone.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IVA EfficientDet dataloader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/dataloader/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Data loader and processing."""
import tensorflow.compat.v1 as tf
from nvidia_tao_tf1.cv.efficientdet.models import anchors
from nvidia_tao_tf1.cv.efficientdet.object_detection import preprocessor
from nvidia_tao_tf1.cv.efficientdet.object_detection import tf_example_decoder
from nvidia_tao_tf1.cv.efficientdet.utils import utils
from nvidia_tao_tf1.cv.efficientdet.utils.distributed_utils import MPI_is_distributed
from nvidia_tao_tf1.cv.efficientdet.utils.distributed_utils import MPI_rank, MPI_rank_and_size
class InputProcessor(object):
"""Base class of Input processor."""
def __init__(self, image, output_size):
"""Initializes a new `InputProcessor`.
Args:
image: The input image before processing.
output_size: The output image size after calling resize_and_crop_image
function.
"""
self._image = image
if isinstance(output_size, int):
self._output_size = (output_size, output_size)
else:
self._output_size = output_size
# Parameters to control rescaling and shifting during preprocessing.
# Image scale defines scale from original image to scaled image.
self._image_scale = tf.constant(1.0)
# The integer height and width of scaled image.
self._scaled_height = tf.shape(image)[0]
self._scaled_width = tf.shape(image)[1]
# The x and y translation offset to crop scaled image to the output size.
self._crop_offset_y = tf.constant(0)
self._crop_offset_x = tf.constant(0)
def normalize_image(self):
"""Normalize the image to zero mean and unit variance."""
# The image normalization is identical to Cloud TPU ResNet.
self._image = tf.image.convert_image_dtype(self._image, dtype=tf.float32)
offset = tf.constant([0.485, 0.456, 0.406])
offset = tf.expand_dims(offset, axis=0)
offset = tf.expand_dims(offset, axis=0)
self._image -= offset
scale = tf.constant([0.224, 0.224, 0.224])
scale = tf.expand_dims(scale, axis=0)
scale = tf.expand_dims(scale, axis=0)
self._image /= scale
def set_training_random_scale_factors(self,
scale_min,
scale_max,
target_size=None):
"""Set the parameters for multiscale training.
Notably, if train and eval use different sizes, then target_size should be
set as eval size to avoid the discrency between train and eval.
Args:
scale_min: minimal scale factor.
scale_max: maximum scale factor.
target_size: targeted size, usually same as eval. If None, use train size.
"""
if not target_size:
target_size = self._output_size
target_size = utils.parse_image_size(target_size)
# Select a random scale factor.
random_scale_factor = tf.random_uniform([], scale_min, scale_max)
scaled_y = tf.cast(random_scale_factor * target_size[0], tf.int32)
scaled_x = tf.cast(random_scale_factor * target_size[1], tf.int32)
# Recompute the accurate scale_factor using rounded scaled image size.
height = tf.cast(tf.shape(self._image)[0], tf.float32)
width = tf.cast(tf.shape(self._image)[1], tf.float32)
image_scale_y = tf.cast(scaled_y, tf.float32) / height
image_scale_x = tf.cast(scaled_x, tf.float32) / width
image_scale = tf.minimum(image_scale_x, image_scale_y)
# Select non-zero random offset (x, y) if scaled image is larger than
# self._output_size.
scaled_height = tf.cast(height * image_scale, tf.int32)
scaled_width = tf.cast(width * image_scale, tf.int32)
offset_y = tf.cast(scaled_height - self._output_size[0], tf.float32)
offset_x = tf.cast(scaled_width - self._output_size[1], tf.float32)
offset_y = tf.maximum(0.0, offset_y) * tf.random_uniform([], 0, 1)
offset_x = tf.maximum(0.0, offset_x) * tf.random_uniform([], 0, 1)
offset_y = tf.cast(offset_y, tf.int32)
offset_x = tf.cast(offset_x, tf.int32)
self._image_scale = image_scale
self._scaled_height = scaled_height
self._scaled_width = scaled_width
self._crop_offset_x = offset_x
self._crop_offset_y = offset_y
def set_scale_factors_to_output_size(self):
"""Set the parameters to resize input image to self._output_size."""
# Compute the scale_factor using rounded scaled image size.
height = tf.cast(tf.shape(self._image)[0], tf.float32)
width = tf.cast(tf.shape(self._image)[1], tf.float32)
image_scale_y = tf.cast(self._output_size[0], tf.float32) / height
image_scale_x = tf.cast(self._output_size[1], tf.float32) / width
image_scale = tf.minimum(image_scale_x, image_scale_y)
scaled_height = tf.cast(height * image_scale, tf.int32)
scaled_width = tf.cast(width * image_scale, tf.int32)
self._image_scale = image_scale
self._scaled_height = scaled_height
self._scaled_width = scaled_width
def resize_and_crop_image(self, method=tf.image.ResizeMethod.BILINEAR):
"""Resize input image and crop it to the self._output dimension."""
scaled_image = tf.image.resize_images(
self._image, [self._scaled_height, self._scaled_width], method=method)
scaled_image = \
scaled_image[
self._crop_offset_y:self._crop_offset_y +
self._output_size[0],
self._crop_offset_x:self._crop_offset_x +
self._output_size[1], :]
output_image = tf.image.pad_to_bounding_box(scaled_image, 0, 0,
self._output_size[0],
self._output_size[1])
return output_image
class DetectionInputProcessor(InputProcessor):
"""Input processor for object detection."""
def __init__(self, image, output_size, boxes=None, classes=None):
"""Init."""
InputProcessor.__init__(self, image, output_size)
self._boxes = boxes
self._classes = classes
def random_horizontal_flip(self):
"""Randomly flip input image and bounding boxes."""
self._image, self._boxes = preprocessor.random_horizontal_flip(
self._image, boxes=self._boxes)
def clip_boxes(self, boxes):
"""Clip boxes to fit in an image."""
ymin, xmin, ymax, xmax = tf.unstack(boxes, axis=1)
ymin = tf.clip_by_value(ymin, 0, self._output_size[0] - 1)
xmin = tf.clip_by_value(xmin, 0, self._output_size[1] - 1)
ymax = tf.clip_by_value(ymax, 0, self._output_size[0] - 1)
xmax = tf.clip_by_value(xmax, 0, self._output_size[1] - 1)
boxes = tf.stack([ymin, xmin, ymax, xmax], axis=1)
return boxes
def resize_and_crop_boxes(self):
"""Resize boxes and crop it to the self._output dimension."""
boxlist = preprocessor.box_list.BoxList(self._boxes)
# boxlist is in range of [0, 1], so here we pass the scale_height/width
# instead of just scale.
boxes = preprocessor.box_list_scale(boxlist, self._scaled_height,
self._scaled_width).get()
# Adjust box coordinates based on the offset.
box_offset = tf.stack([
self._crop_offset_y,
self._crop_offset_x,
self._crop_offset_y,
self._crop_offset_x,
])
boxes -= tf.cast(tf.reshape(box_offset, [1, 4]), tf.float32)
# Clip the boxes.
boxes = self.clip_boxes(boxes)
# Filter out ground truth boxes that are illegal.
indices = tf.where(
tf.not_equal((boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]),
0))
boxes = tf.gather_nd(boxes, indices)
classes = tf.gather_nd(self._classes, indices)
return boxes, classes
@property
def image_scale(self):
"""Return image scale from original image to scaled image."""
return self._image_scale
@property
def image_scale_to_original(self):
"""Return image scale from scaled image to original image."""
return 1.0 / self._image_scale
@property
def offset_x(self):
"""Return offset x."""
return self._crop_offset_x
@property
def offset_y(self):
"""Return offset y."""
return self._crop_offset_y
def pad_to_fixed_size(data, pad_value, output_shape):
"""Pad data to a fixed length at the first dimension.
Args:
data: Tensor to be padded to output_shape.
pad_value: A constant value assigned to the paddings.
output_shape: The output shape of a 2D tensor.
Returns:
The Padded tensor with output_shape [max_instances_per_image, dimension].
"""
max_instances_per_image = output_shape[0]
dimension = output_shape[1]
data = tf.reshape(data, [-1, dimension])
num_instances = tf.shape(data)[0]
msg = 'ERROR: please increase dataset_config.max_instances_per_image'
with tf.control_dependencies(
[tf.assert_less(num_instances, max_instances_per_image, message=msg)]):
pad_length = max_instances_per_image - num_instances
paddings = pad_value * tf.ones([pad_length, dimension])
padded_data = tf.concat([data, paddings], axis=0)
padded_data = tf.reshape(padded_data, output_shape)
return padded_data
class InputReader(object):
"""Input reader for dataset."""
def __init__(self,
file_pattern,
is_training,
use_fake_data=False,
max_instances_per_image=None):
"""Init."""
self._file_pattern = file_pattern
self._is_training = is_training
self._use_fake_data = use_fake_data
# COCO has 100 limit, but users may set different values for custom dataset.
self._max_instances_per_image = max_instances_per_image or 100
@tf.autograph.experimental.do_not_convert
def _dataset_parser(self, value, example_decoder, anchor_labeler, params):
"""Parse data to a fixed dimension input image and learning targets.
Args:
value: A dictionary contains an image and groundtruth annotations.
Returns:
image: Image tensor that is preprocessed to have normalized value and
fixed dimension [image_height, image_width, 3]
cls_targets_dict: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, num_anchors]. The height_l and width_l
represent the dimension of class logits at l-th level.
box_targets_dict: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, num_anchors * 4]. The height_l and
width_l represent the dimension of bounding box regression output at
l-th level.
num_positives: Number of positive anchors in the image.
source_id: Source image id. Default value -1 if the source id is empty
in the groundtruth annotation.
image_scale: Scale of the processed image to the original image.
boxes: Groundtruth bounding box annotations. The box is represented in
[y1, x1, y2, x2] format. The tensor is padded with -1 to the fixed
dimension [self._max_instances_per_image, 4].
is_crowds: Groundtruth annotations to indicate if an annotation
represents a group of instances by value {0, 1}. The tensor is
padded with 0 to the fixed dimension [self._max_instances_per_image].
areas: Groundtruth areas annotations. The tensor is padded with -1
to the fixed dimension [self._max_instances_per_image].
classes: Groundtruth classes annotations. The tensor is padded with -1
to the fixed dimension [self._max_instances_per_image].
"""
with tf.name_scope('parser'):
data = example_decoder.decode(value)
source_id = data['source_id']
image = data['image']
boxes = data['groundtruth_boxes']
classes = data['groundtruth_classes']
classes = tf.reshape(tf.cast(classes, dtype=tf.float32), [-1, 1])
areas = data['groundtruth_area']
is_crowds = data['groundtruth_is_crowd']
classes = tf.reshape(tf.cast(classes, dtype=tf.float32), [-1, 1])
if params['skip_crowd_during_training'] and self._is_training:
indices = tf.where(tf.logical_not(data['groundtruth_is_crowd']))
classes = tf.gather_nd(classes, indices)
boxes = tf.gather_nd(boxes, indices)
# NOTE: The autoaugment method works best when used alongside the
# standard horizontal flipping of images along with size jittering
# and normalization.
if params.get('autoaugment_policy', None) and self._is_training:
from nvidia_tao_tf1.cv.efficientdet.aug import autoaugment
image, boxes = autoaugment.distort_image_with_autoaugment(
image, boxes, params['autoaugment_policy'], params['use_augmix'],
*params['augmix_params'])
input_processor = DetectionInputProcessor(image, params['image_size'],
boxes, classes)
input_processor.normalize_image()
if self._is_training and params['input_rand_hflip']:
input_processor.random_horizontal_flip()
if self._is_training:
input_processor.set_training_random_scale_factors(
params['train_scale_min'], params['train_scale_max'],
params.get('target_size', None))
else:
input_processor.set_scale_factors_to_output_size()
image = input_processor.resize_and_crop_image()
boxes, classes = input_processor.resize_and_crop_boxes()
# Assign anchors.
(cls_targets, box_targets,
num_positives) = anchor_labeler.label_anchors(boxes, classes)
source_id = tf.where(
tf.equal(source_id, tf.constant('')), '-1', source_id)
source_id = tf.string_to_number(source_id)
# Pad groundtruth data for evaluation.
image_scale = input_processor.image_scale_to_original
boxes *= image_scale
is_crowds = tf.cast(is_crowds, dtype=tf.float32)
boxes = pad_to_fixed_size(boxes, -1, [self._max_instances_per_image, 4])
is_crowds = pad_to_fixed_size(is_crowds, 0,
[self._max_instances_per_image, 1])
areas = pad_to_fixed_size(areas, -1, [self._max_instances_per_image, 1])
classes = pad_to_fixed_size(classes, -1,
[self._max_instances_per_image, 1])
return (image, cls_targets, box_targets, num_positives, source_id,
image_scale, boxes, is_crowds, areas, classes)
def __call__(self, params):
"""Call."""
input_anchors = anchors.Anchors(
params['min_level'], params['max_level'],
params['num_scales'],
params['aspect_ratios'],
params['anchor_scale'],
params['image_size'])
anchor_labeler = anchors.AnchorLabeler(input_anchors, params['num_classes'])
example_decoder = tf_example_decoder.TfExampleDecoder(
regenerate_source_id=params['regenerate_source_id'])
batch_size = params['batch_size']
try:
seed = params['tf_random_seed'] \
if not MPI_is_distributed() else params['seed'] * MPI_rank()
except (KeyError, TypeError):
seed = None
dataset = tf.data.Dataset.list_files(
self._file_pattern, self._is_training)
if self._is_training:
_shard_idx, _num_shards = MPI_rank_and_size()
# dataset = dataset.shard(hvd.size(), hvd.rank())
dataset = dataset.shard(
num_shards=_num_shards,
index=_shard_idx)
dataset = dataset.shuffle(256 // _num_shards)
# Prefetch data from files.
def _prefetch_dataset(filename):
dataset = tf.data.TFRecordDataset(filename).prefetch(1)
return dataset
dataset = dataset.apply(
tf.data.experimental.parallel_interleave(
_prefetch_dataset, cycle_length=32, sloppy=self._is_training))
if self._is_training:
dataset = dataset.shuffle(
buffer_size=64,
reshuffle_each_iteration=True,
seed=seed)
dataset = dataset.repeat()
# map_fn = lambda value: self._dataset_parser(value, example_decoder,
# anchor_labeler, params)
def map_fn(value):
return self._dataset_parser(value, example_decoder,
anchor_labeler, params)
# Parse the fetched records to input tensors for model function.
dataset = dataset.apply(tf.data.experimental.map_and_batch(
map_func=map_fn,
batch_size=batch_size,
drop_remainder=True,
num_parallel_calls=64))
def _process_example(images, cls_targets, box_targets, num_positives,
source_ids, image_scales, boxes, is_crowds, areas,
classes):
"""Processes one batch of data."""
labels = {}
# Count num_positives in a batch.
num_positives_batch = tf.reduce_mean(num_positives)
labels['mean_num_positives'] = tf.reshape(
tf.tile(tf.expand_dims(num_positives_batch, 0), [
batch_size, ]), [batch_size, 1])
for level in range(params['min_level'], params['max_level'] + 1):
labels['cls_targets_%d' % level] = cls_targets[level]
labels['box_targets_%d' % level] = box_targets[level]
# Concatenate groundtruth annotations to a tensor.
groundtruth_data = tf.concat([boxes, is_crowds, areas, classes], axis=2)
labels['source_ids'] = source_ids
labels['groundtruth_data'] = groundtruth_data
labels['image_scales'] = image_scales
return images, labels
dataset = dataset.map(_process_example)
dataset = dataset.prefetch(batch_size)
if self._use_fake_data:
# Turn this dataset into a semi-fake dataset which always loop at the
# first batch. This reduces variance in performance and is useful in
# testing.
dataset = dataset.take(1).cache().repeat()
return dataset
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/dataloader/dataloader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data loader and processing test cases."""
import glob
import hashlib
import os
import numpy as np
from PIL import Image
import tensorflow as tf
tf.enable_eager_execution()
from nvidia_tao_tf1.cv.efficientdet.dataloader import dataloader
from nvidia_tao_tf1.cv.efficientdet.models import anchors
from nvidia_tao_tf1.cv.efficientdet.object_detection import tf_example_decoder
from nvidia_tao_tf1.cv.efficientdet.utils import hparams_config
def int64_feature(value):
"""int64_feature."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def int64_list_feature(value):
"""int64_list_feature."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def bytes_feature(value):
"""bytes_feature."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def bytes_list_feature(value):
"""bytes_list_feature."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def float_feature(value):
"""float_feature."""
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def float_list_feature(value):
"""float_list_feature."""
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def test_dataloader(tmpdir):
tf.random.set_random_seed(42)
# generate dummy tfrecord
image_height = 512
image_width = 512
filename = "dummy_example.jpg"
image_id = 1
full_path = os.path.join(tmpdir, filename)
# save dummy image to file
dummy_array = np.zeros((image_height, image_width, 3), dtype=np.uint8)
Image.fromarray(dummy_array, 'RGB').save(full_path)
with open(full_path, 'rb') as fid:
encoded_jpg = fid.read()
# encoded_jpg_b = bytearray(encoded_jpg)
key = hashlib.sha256(encoded_jpg).hexdigest()
xmin = [0.25]
xmax = [0.5]
ymin = [0.25]
ymax = [0.5]
is_crowd = [False]
category_names = [b'void']
category_ids = [0]
area = [16384]
feature_dict = {
'image/height':
int64_feature(image_height),
'image/width':
int64_feature(image_width),
'image/filename':
bytes_feature(filename.encode('utf8')),
'image/source_id':
bytes_feature(str(image_id).encode('utf8')),
'image/key/sha256':
bytes_feature(key.encode('utf8')),
'image/encoded':
bytes_feature(encoded_jpg),
'image/caption':
bytes_list_feature([]),
'image/format':
bytes_feature('jpeg'.encode('utf8')),
'image/object/bbox/xmin':
float_list_feature(xmin),
'image/object/bbox/xmax':
float_list_feature(xmax),
'image/object/bbox/ymin':
float_list_feature(ymin),
'image/object/bbox/ymax':
float_list_feature(ymax),
'image/object/class/text':
bytes_list_feature(category_names),
'image/object/class/label':
int64_list_feature(category_ids),
'image/object/is_crowd':
int64_list_feature(is_crowd),
'image/object/area':
float_list_feature(area),
}
example = tf.train.Example(features=tf.train.Features(feature=feature_dict))
# dump tfrecords
tfrecords_dir = tmpdir.mkdir("tfrecords")
dummy_tfrecords = str(tfrecords_dir.join('/dummy-001'))
writer = tf.python_io.TFRecordWriter(str(dummy_tfrecords))
writer.write(example.SerializeToString())
writer.close()
params = hparams_config.get_detection_config('efficientdet-d0').as_dict()
input_anchors = anchors.Anchors(params['min_level'], params['max_level'],
params['num_scales'],
params['aspect_ratios'],
params['anchor_scale'],
params['image_size'])
anchor_labeler = anchors.AnchorLabeler(input_anchors, params['num_classes'])
example_decoder = tf_example_decoder.TfExampleDecoder(
regenerate_source_id=params['regenerate_source_id'])
tfrecord_path = os.path.join(tfrecords_dir, "dummy*")
dataset = tf.data.TFRecordDataset(glob.glob(tfrecord_path))
value = next(iter(dataset))
reader = dataloader.InputReader(
tfrecord_path, is_training=True,
use_fake_data=False,
max_instances_per_image=100)
result = reader._dataset_parser(value, example_decoder, anchor_labeler,
params)
print(result[1])
print(result[2])
assert np.allclose(result[0][0, 0, :], [-2.1651785, -2.0357141, -1.8124998])
assert len(result) == 10
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/efficientdet/dataloader/tests/test_dataloader.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""YOLOv4 Loss for training."""
import numpy as np
import tensorflow as tf
from nvidia_tao_tf1.cv.common.losses.base_loss import BaseLoss
from nvidia_tao_tf1.cv.ssd.utils.box_utils import iou
class YOLOv4Loss(BaseLoss):
'''
YOLOv4 Loss class.
See details here: https://arxiv.org/pdf/1506.02640.pdf
'''
def __init__(self,
lambda_coord=5.0,
lambda_no_obj=50.0,
lambda_cls=1.0,
smoothing=0.0,
ignore_threshold=0.7,
focal_loss_alpha=0.25,
focal_loss_gamma=2.0):
'''
Loss init function.
NOTE: obj classification loss weight for positive sample is fixed at 1
Args:
lambda_coord: coord (bbox regression) loss weight
lambda_no_obj: obj classification loss weight for negative sample
lambda_cls: classification loss weight for positive sample
smoothing: classification label smoothing
ignore_threshold: iou threshold to ignore boxes when calculating negative obj loss
'''
self.lambda_coord = lambda_coord
self.lambda_no_obj = lambda_no_obj
self.lambda_cls = lambda_cls
self.smoothing = smoothing
self.ignore_threshold = ignore_threshold
self.focal_loss_alpha = focal_loss_alpha
self.focal_loss_gamma = focal_loss_gamma
def l_ciou(self, yx_gt, hw_gt, yx_pred, hw_pred):
"""CIoU loss of two boxes.
See https://arxiv.org/pdf/1911.08287.pdf
l_ciou = l_diou + alpha * v
l_diou = 1.0 - iou + p^2 / c^2
v = 4/pi^2 * (arctan(w_gt / h_gt) - arctan(w / h))^2
alpha = v / ((1-iou) + v)
Args:
yx_gt (tensor): Tensor of shape [B,N,2], holding GT y, x
hw_gt (tensor): Tensor of shape [B,N,2], holding GT h, w
yx_pred (tensor): Tensor of shape [B,N,2], holding pred y, x
hw_pred (tensor): Tensor of shape [B,N,2], holding pred h, w
Returns:
a tensor with shape [B,N] representing element-wise l_ciou
"""
# Basic calculations
y_gt = yx_gt[..., 0]
x_gt = yx_gt[..., 1]
h_gt = hw_gt[..., 0]
w_gt = hw_gt[..., 1]
ymin_gt = y_gt - 0.5 * h_gt
ymax_gt = y_gt + 0.5 * h_gt
xmin_gt = x_gt - 0.5 * w_gt
xmax_gt = x_gt + 0.5 * w_gt
y_pred = yx_pred[..., 0]
x_pred = yx_pred[..., 1]
h_pred = hw_pred[..., 0]
w_pred = hw_pred[..., 1]
ymin_pred = y_pred - 0.5 * h_pred
ymax_pred = y_pred + 0.5 * h_pred
xmin_pred = x_pred - 0.5 * w_pred
xmax_pred = x_pred + 0.5 * w_pred
min_ymax = tf.minimum(ymax_gt, ymax_pred)
max_ymin = tf.maximum(ymin_gt, ymin_pred)
min_xmax = tf.minimum(xmax_gt, xmax_pred)
max_xmin = tf.maximum(xmin_gt, xmin_pred)
intersect_h = tf.maximum(0.0, min_ymax - max_ymin)
intersect_w = tf.maximum(0.0, min_xmax - max_xmin)
intersect_area = intersect_h * intersect_w
union_area = h_gt * w_gt + h_pred * w_pred - intersect_area
# avoid div by zero, add 1e-18
iou = tf.truediv(intersect_area, union_area + 1e-18)
max_ymax = tf.maximum(ymax_gt, ymax_pred)
min_ymin = tf.minimum(ymin_gt, ymin_pred)
max_xmax = tf.maximum(xmax_gt, xmax_pred)
min_xmin = tf.minimum(xmin_gt, xmin_pred)
enclose_h = max_ymax - min_ymin
enclose_w = max_xmax - min_xmin
# c2 is square of diagonal length of enclosing box
c2 = enclose_h * enclose_h + enclose_w * enclose_w
diff_cy = y_gt - y_pred
diff_cx = x_gt - x_pred
p2 = diff_cy * diff_cy + diff_cx * diff_cx
l_diou = 1.0 - iou + tf.truediv(p2, c2 + 1e-18)
v_atan_diff = tf.math.atan2(w_gt, h_gt + 1e-18) - tf.math.atan2(w_pred, h_pred + 1e-18)
v = (4.0 / (np.pi**2)) * v_atan_diff * v_atan_diff
alpha = tf.truediv(v, 1.0 - iou + v + 1e-18)
l_ciou = l_diou + alpha * v
return l_ciou
def decode_bbox(self, cy, cx, h, w):
"""Decode bbox from (cy, cx, h, w) format to (xmin, ymin, xmax, ymax) format."""
x_min = cx - 0.5 * w
x_max = cx + 0.5 * w
y_min = cy - 0.5 * h
y_max = cy + 0.5 * h
return tf.stack([x_min, y_min, x_max, y_max], -1)
def compute_loss_no_reduce(self, y_true, y_pred):
'''
Compute the loss of the YOLO v3 model prediction against the ground truth.
Arguments:
y_true (tensor): array of `(batch_size, #boxes,
[cy, cx, h, w, objectness, objectness_neg, cls])`
!!!objectness_neg is deprecated. DO NOT USE, it's not reliable!!!
y_pred (tensor): array of `(batch_size, #boxes,
[cy, cx, ph, pw, step_y, step_x, pred_y, pred_x, pred_h, pred_w, object, cls...])`
Returns:
A scalar, the total multitask loss for classification and localization.
'''
gt_yx = y_true[:, :, 0:2]
pred_yx = y_pred[:, :, 0:2] + y_pred[:, :, 6:8] * y_pred[:, :, 4:6]
gt_hw = y_true[:, :, 2:4]
pred_hw = tf.minimum(y_pred[:, :, 2:4] * y_pred[:, :, 8:10], 1e18)
cls_wts = y_true[:, :, -1]
# assign more loc loss weights to smaller boxes
loss_scale = 2.0 - y_true[:, :, 2] * y_true[:, :, 3]
loc_loss = self.l_ciou(gt_yx, gt_hw, pred_yx, pred_hw)
loc_loss = loc_loss * y_true[:, :, 4] * loss_scale * cls_wts
# if self.focal_loss_alpha > 0 and self.focal_loss_gamma > 0:
# # use focal loss
# obj_loss = self.bce_focal_loss(
# y_true[:, :, 4:5],
# y_pred[:, :, 10:11],
# self.focal_loss_alpha,
# self.focal_loss_gamma
# )
# else:
# # default case(no focal loss)
# obj_loss = self.bce_loss(y_true[:, :, 4:5], y_pred[:, :, 10:11])
def max_iou_fn(x):
# x[0] is y_true (#bbox, ...), x[1] is pred_yx (#bbox, ...), x[2] is pred_hw
# we need to calculate neutral bboxes.
valid_bbox = tf.boolean_mask(x[0], x[0][:, 4])
# shape (batch_size, #boxes, 4)
valid_bbox = self.decode_bbox(valid_bbox[:, 0], valid_bbox[:, 1],
valid_bbox[:, 2], valid_bbox[:, 3])
pred_bbox = self.decode_bbox(x[1][:, 0], x[1][:, 1],
x[2][:, 0], x[2][:, 1])
return tf.reduce_max(iou(pred_bbox, valid_bbox), -1)
max_iou = tf.map_fn(max_iou_fn, (y_true, pred_yx, pred_hw), dtype=tf.float32)
is_neg = tf.cast(tf.less(max_iou, self.ignore_threshold), tf.float32)
# Do not count positive box as negative even it's iou is small!
is_neg = (1. - y_true[:, :, 4]) * is_neg
# loc_loss = tf.reduce_mean(tf.reduce_sum(loc_loss, axis=1), axis=0)
# obj_pos_loss = tf.reduce_mean(
# tf.reduce_sum(obj_loss * y_true[:, :, 4], axis=1),
# axis=0
# )
# obj_neg_loss = tf.reduce_mean(
# tf.reduce_sum(obj_loss * tf.stop_gradient(is_neg), axis=1),
# axis=0
# )
if self.focal_loss_alpha > 0 and self.focal_loss_gamma > 0:
cls_loss = tf.reduce_sum(
self.bce_focal_loss(
y_true[:, :, 6:-1],
y_pred[:, :, 11:],
self.focal_loss_alpha,
self.focal_loss_gamma,
self.smoothing
) * y_true[:, :, 4] * cls_wts,
axis=1
)
cls_loss = tf.reduce_mean(cls_loss, axis=0)
else:
# cls_loss = tf.reduce_sum(
# self.bce_loss(
# y_true[:, :, 6:-1],
# y_pred[:, :, 11:],
# self.smoothing
# ) * y_true[:, :, 4] * cls_wts,
# axis=1
# )
# cls_loss = tf.reduce_mean(cls_loss, axis=0)
cls_loss = self.bce_loss(
y_true[:, :, 6:-1],
y_pred[:, :, 11:],
self.smoothing
) * y_true[:, :, 4] * cls_wts
# total_loss = self.lambda_coord * loc_loss + obj_pos_loss + self.lambda_cls * cls_loss + \
# self.lambda_no_obj * obj_neg_loss
# return total_loss
# loc_loss: [#batch, #anchor]; cls_loss: [#batch, #anchor]
return loc_loss, cls_loss
def compute_loss(self, y_true, y_pred):
'''
Compute the loss of the YOLO v3 model prediction against the ground truth.
Arguments:
y_true (tensor): array of `(batch_size, #boxes,
[cy, cx, h, w, objectness, objectness_neg, cls])`
!!!objectness_neg is deprecated. DO NOT USE, it's not reliable!!!
y_pred (tensor): array of `(batch_size, #boxes,
[cy, cx, ph, pw, step_y, step_x, pred_y, pred_x, pred_h, pred_w, object, cls...])`
Returns:
A scalar, the total multitask loss for classification and localization.
'''
gt_yx = y_true[:, :, 0:2]
pred_yx = y_pred[:, :, 0:2] + y_pred[:, :, 6:8] * y_pred[:, :, 4:6]
gt_hw = y_true[:, :, 2:4]
pred_hw = tf.minimum(y_pred[:, :, 2:4] * y_pred[:, :, 8:10], 1e18)
cls_wts = y_true[:, :, -1]
# assign more loc loss weights to smaller boxes
loss_scale = 2.0 - y_true[:, :, 2] * y_true[:, :, 3]
loc_loss = self.l_ciou(gt_yx, gt_hw, pred_yx, pred_hw)
loc_loss = loc_loss * y_true[:, :, 4] * loss_scale * cls_wts
if self.focal_loss_alpha > 0 and self.focal_loss_gamma > 0:
# use focal loss
obj_loss = self.bce_focal_loss(
y_true[:, :, 4:5],
y_pred[:, :, 10:11],
self.focal_loss_alpha,
self.focal_loss_gamma,
smoothing=self.smoothing
)
else:
# default case(no focal loss)
obj_loss = self.bce_loss(
y_true[:, :, 4:5],
y_pred[:, :, 10:11],
smoothing=self.smoothing
)
def max_iou_fn(x):
# x[0] is y_true (#bbox, ...), x[1] is pred_yx (#bbox, ...), x[2] is pred_hw
# we need to calculate neutral bboxes.
valid_bbox = tf.boolean_mask(x[0], x[0][:, 4])
# shape (batch_size, #boxes, 4)
valid_bbox = self.decode_bbox(valid_bbox[:, 0], valid_bbox[:, 1],
valid_bbox[:, 2], valid_bbox[:, 3])
pred_bbox = self.decode_bbox(x[1][:, 0], x[1][:, 1],
x[2][:, 0], x[2][:, 1])
return tf.reduce_max(iou(pred_bbox, valid_bbox), -1)
max_iou = tf.map_fn(max_iou_fn, (y_true, pred_yx, pred_hw), dtype=tf.float32)
is_neg = tf.cast(tf.less(max_iou, self.ignore_threshold), tf.float32)
# Do not count positive box as negative even it's iou is small!
is_neg = (1. - y_true[:, :, 4]) * is_neg
loc_loss = tf.reduce_mean(tf.reduce_sum(loc_loss, axis=1), axis=0)
obj_pos_loss = tf.reduce_mean(
tf.reduce_sum(obj_loss * y_true[:, :, 4], axis=1),
axis=0
)
obj_neg_loss = tf.reduce_mean(
tf.reduce_sum(obj_loss * tf.stop_gradient(is_neg), axis=1),
axis=0
)
if self.focal_loss_alpha > 0 and self.focal_loss_gamma > 0:
cls_loss = tf.reduce_sum(
self.bce_focal_loss(
y_true[:, :, 6:-1],
y_pred[:, :, 11:],
self.focal_loss_alpha,
self.focal_loss_gamma,
self.smoothing
) * y_true[:, :, 4] * cls_wts,
axis=1
)
cls_loss = tf.reduce_mean(cls_loss, axis=0)
else:
cls_loss = tf.reduce_sum(
self.bce_loss(
y_true[:, :, 6:-1],
y_pred[:, :, 11:],
self.smoothing
) * y_true[:, :, 4] * cls_wts,
axis=1
)
cls_loss = tf.reduce_mean(cls_loss, axis=0)
total_loss = self.lambda_coord * loc_loss + obj_pos_loss + self.lambda_cls * cls_loss + \
self.lambda_no_obj * obj_neg_loss
return total_loss
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/losses/yolo_loss.py |
"""Module containing implementation of loss functions for YOLO.""" | tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/losses/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test yolo loss."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
# Replicating this from Yolov3 findings.
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import tensorflow as tf
from nvidia_tao_tf1.cv.yolo_v4.losses.yolo_loss import YOLOv4Loss
def convert_pred_to_true_match(y_pred):
# "construct" GT that matches pred
y_true_02 = y_pred[:, :, 6:8] * y_pred[:, :, 4:6] + y_pred[:, :, 0:2]
y_true_24 = y_pred[:, :, 8:10] * y_pred[:, :, 2:4]
y_true_45 = tf.sigmoid(y_pred[:, :, 10:11])
y_true_56 = 1.0 - tf.sigmoid(y_pred[:, :, 10:11])
y_true_6_ = tf.sigmoid(y_pred[:, :, 11:])
y_true_last = tf.ones_like(y_pred[:, :, 0:1])
# return constructed GT
return tf.concat([y_true_02, y_true_24, y_true_45, y_true_56, y_true_6_, y_true_last], -1)
def test_loss_zero():
# let's give a large coef on loss
yolo_loss = YOLOv4Loss(10, 1000, 10)
y_pred = tf.constant([[[0.1, 0.3, 0.05, 0.07, 0.5, 0.7, 1.0,
0.01, 1.35, 4.5, 1e99, 1e99, -1e99, -1e99]]])
y_true = convert_pred_to_true_match(y_pred)
with tf.Session() as sess:
assert abs(sess.run(yolo_loss.compute_loss(y_true, y_pred))) < 1e-5
def test_loss_nonzero_x():
yolo_loss = YOLOv4Loss(10, 1000, 10)
y_pred = tf.constant([[[0.1, 0.3, 0.05, 0.07, 0.5, 0.7, 1.0,
0.01, 1.35, 4.5, 1e99, 1e99, -1e99, -1e99]]])
y_true = convert_pred_to_true_match(y_pred)
# turbulate pred_x
y_pred = tf.constant([[[0.1, 0.3, 0.05, 0.07, 0.5, 0.7, 1.0,
0.001, 1.35, 4.5, 1e99, 1e99, -1e99, -1e99]]])
# eliminate coord loss
yolo_loss1 = YOLOv4Loss(0, 1000, 10)
with tf.Session() as sess:
assert abs(sess.run(yolo_loss.compute_loss(y_true, y_pred)) - 0.7832673) < 1e-5
assert abs(sess.run(yolo_loss1.compute_loss(y_true, y_pred))) < 1e-5
def test_loss_nonzero_y():
yolo_loss = YOLOv4Loss(10, 1000, 10)
y_pred = tf.constant([[[0.1, 0.3, 0.05, 0.07, 0.5, 0.7, 1.0,
0.01, 1.35, 4.5, 1e99, 1e99, -1e99, -1e99]]])
y_true = convert_pred_to_true_match(y_pred)
# turbulate pred_y
y_pred = tf.constant([[[0.1, 0.3, 0.05, 0.07, 0.5, 0.7, 0.5,
0.01, 1.35, 4.5, 1e99, 1e99, -1e99, -1e99]]])
# eliminate coord loss
yolo_loss1 = YOLOv4Loss(0, 1000, 10)
with tf.Session() as sess:
assert abs(sess.run(yolo_loss.compute_loss(y_true, y_pred)) - 25.969965) < 1e-5
assert abs(sess.run(yolo_loss1.compute_loss(y_true, y_pred))) < 1e-5
def test_loss_nonzero_wh():
yolo_loss = YOLOv4Loss(10, 1000, 10)
y_pred = tf.constant([[[0.1, 0.3, 0.05, 0.07, 0.5, 0.7, 1.0,
0.01, 1.35, 4.5, 1e99, 1e99, -1e99, -1e99]]])
y_true = convert_pred_to_true_match(y_pred)
# turbulate pred_wh
y_pred = tf.constant([[[0.1, 0.3, 0.05, 0.07, 0.5, 0.7, 1.0,
0.01, 2, 5, 1e99, 1e99, -1e99, -1e99]]])
# eliminate coord loss
yolo_loss1 = YOLOv4Loss(0, 1000, 10)
with tf.Session() as sess:
assert abs(sess.run(yolo_loss.compute_loss(y_true, y_pred)) - 7.7667146) < 1e-5
assert abs(sess.run(yolo_loss1.compute_loss(y_true, y_pred))) < 1e-5
def test_loss_nonzero_obj():
yolo_loss = YOLOv4Loss(10, 1000, 10)
y_pred = tf.constant([[[0.1, 0.3, 0.05, 0.07, 0.5, 0.7, 1.0,
0.01, 1.35, 4.5, 1e99, 1e99, -1e99, -1e99]]])
y_true = convert_pred_to_true_match(y_pred)
# turbulate pred_obj
y_pred = tf.constant([[[0.1, 0.3, 0.05, 0.07, 0.5, 0.7, 1.0,
0.01, 1.35, 4.5, -1e99, 1e99, -1e99, -1e99]]])
with tf.Session() as sess:
assert abs(sess.run(yolo_loss.compute_loss(y_true, y_pred)) - 10.361637115478516) < 1e-5
def test_loss_nonzero_cls():
yolo_loss = YOLOv4Loss(10, 1000, 10)
y_pred = tf.constant([[[0.1, 0.3, 0.05, 0.07, 0.5, 0.7, 1.0,
0.01, 1.35, 4.5, 1e99, 1e99, -1e99, -1e99]]])
y_true = convert_pred_to_true_match(y_pred)
# turbulate pred_cls
y_pred = tf.constant([[[0.1, 0.3, 0.05, 0.07, 0.5, 0.7, 1.0,
0.01, 1.35, 4.5, 1e99, -1e99, 1e99, -1e99]]])
# eliminate cls loss
yolo_loss1 = YOLOv4Loss(10, 1000, 0)
with tf.Session() as sess:
assert abs(sess.run(yolo_loss.compute_loss(y_true, y_pred)) - 414.46533203125) < 1e-5
assert abs(sess.run(yolo_loss1.compute_loss(y_true, y_pred))) < 1e-5
def test_loss_nonzero_noobj():
yolo_loss = YOLOv4Loss(10, 1, 10)
y_pred = tf.constant([[[0.1, 0.3, 0.05, 0.07, 0.5, 0.7, 1.0,
0.01, 1.35, 4.5, -1e99, 1e99, -1e99, -1e99]]])
y_true = convert_pred_to_true_match(y_pred)
# turbulate pred_y
y_pred = tf.constant([[[0.1, 0.3, 0.05, 0.07, 0.5, 0.7, 1.0,
0.01, 1.35, 4.5, 1e99, 1e99, -1e99, -1e99]]])
# eliminate noobj loss
yolo_loss1 = YOLOv4Loss(10, 0, 10)
# turbulate everything other than obj
y_pred1 = tf.constant([[[0.1, 0.3, 0.05, 0.07, 0.5, 0.7, 0.1,
0.7, 1.5, 0.1, -1e99, 0, 0, 0]]])
with tf.Session() as sess:
assert abs(sess.run(yolo_loss.compute_loss(y_true, y_pred)) - 31.08489990234375) < 1e-5
assert abs(sess.run(yolo_loss1.compute_loss(y_true, y_pred))) < 1e-5
assert abs(sess.run(yolo_loss1.compute_loss(y_true, y_pred1))) < 1e-5
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/losses/tests/test_loss.py |
"""YOLO entry point."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from maglev_sdk.docker_container.entrypoint import main
if __name__ == '__main__':
main('yolo_v4', 'nvidia_tao_tf1/cv/yolo_v4/scripts')
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/docker/yolo_v4.py |
# Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
"""IVA YOLO v4 data loader builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tensorflow as tf
import nvidia_tao_tf1.core as tao_core
from nvidia_tao_tf1.cv.ssd.utils.tensor_utils import get_non_empty_rows_2d_sparse
from nvidia_tao_tf1.cv.yolo_v3.data_loader.augmentation import (
outer_augmentations
)
from nvidia_tao_tf1.cv.yolo_v3.data_loader.yolo_v3_data_loader import build_dataloader
def unstack_images(images, shapes, bs):
"""unstack images into a list of images."""
images_list = []
for b_idx in range(bs):
image = images[b_idx, ...]
shape = shapes[b_idx, ...]
image = image[0:shape[0], 0:shape[1], ...]
images_list.append(image)
return images_list
def tf_regular_augmentation(image, label, training, w_tensor, h_tensor, augmentation_config):
"""Function to do non-mosaic augmentation."""
ratio = tf.cast(w_tensor, tf.float32) / tf.cast(h_tensor, tf.float32)
if training:
aug_img, aug_label = outer_augmentations(
image,
label[:, 2:6],
ratio,
augmentation_config
)
aug_img = tf.image.resize_images(aug_img, [h_tensor, w_tensor])
else:
aug_img, aug_label = image, label[:, 2:6]
aug_img.set_shape([None, None, 3])
aug_img = tf.transpose(aug_img, (2, 0, 1))
aug_label = tf.concat([label[:, 0:2], aug_label], axis=-1)
# filter out bad boxes after augmentation
aug_x1 = aug_label[:, 2]
aug_x2 = aug_label[:, 4]
aug_y1 = aug_label[:, 3]
aug_y2 = aug_label[:, 5]
# only select valid labels
select = tf.logical_and(
aug_x2 - aug_x1 > 1e-3,
aug_y2 - aug_y1 > 1e-3
)
aug_label = tf.boolean_mask(aug_label, select)
return aug_img, aug_label
def _resize_box(box, t_shift, l_shift, h_ratio, w_ratio):
"""Helper function to resize labels in mosaic."""
xmin = l_shift + w_ratio * box[:, -4]
xmax = l_shift + w_ratio * box[:, -2]
ymin = t_shift + h_ratio * box[:, -3]
ymax = t_shift + h_ratio * box[:, -1]
return tf.stack([xmin, ymin, xmax, ymax], axis=-1)
def tf_do_mosaic(images, labels, min_ratio, w_tensor, h_tensor, augmentation_config):
"""Function to do mosaic augmentation."""
assert 0 < min_ratio < 0.5, "mosaic_min_ratio must in range (0, 0.5)"
# Step 1. determine border
thlw_ratio = tf.random.uniform([2], minval=min_ratio, maxval=1.0-min_ratio)
th = tf.cast(tf.math.round(thlw_ratio[0] * tf.cast(h_tensor, tf.float32)), tf.int32)
bh = h_tensor - th
lw = tf.cast(tf.math.round(thlw_ratio[1] * tf.cast(w_tensor, tf.float32)), tf.int32)
rw = w_tensor - lw
# Step 2. Generate 4 images with respect shape
# top-left
aug_img0, aug_label0 = outer_augmentations(
images[0],
labels[0][:, 2:6],
tf.cast(lw, tf.float32) / tf.cast(th, tf.float32),
augmentation_config
)
# top-right
aug_img1, aug_label1 = outer_augmentations(
images[1],
labels[1][:, 2:6],
tf.cast(rw, tf.float32) / tf.cast(th, tf.float32),
augmentation_config
)
# bottom-left
aug_img2, aug_label2 = outer_augmentations(
images[2],
labels[2][:, 2:6],
tf.cast(lw, tf.float32) / tf.cast(bh, tf.float32),
augmentation_config
)
# bottom-right
aug_img3, aug_label3 = outer_augmentations(
images[3],
labels[3][:, 2:6],
tf.cast(rw, tf.float32) / tf.cast(bh, tf.float32),
augmentation_config
)
# Step 3. make new images
shape = tf.stack([h_tensor, w_tensor, 3], axis=0)
# top-left
h_idx = tf.range(0, th, delta=1)
w_idx = tf.range(0, lw, delta=1)
h_idx, w_idx = tf.meshgrid(h_idx, w_idx)
h_idx = tf.reshape(tf.transpose(h_idx), (-1,))
w_idx = tf.reshape(tf.transpose(w_idx), (-1,))
# (k, 2)
indices = tf.stack([h_idx, w_idx], axis=1)
aug_img0 = tf.image.resize_images(aug_img0, [th, lw])
updates = tf.reshape(
aug_img0,
(-1, 3)
)
aug_img0 = tf.scatter_nd(
indices,
updates,
shape
)
# top-right
h_idx = tf.range(0, th, delta=1)
w_idx = tf.range(lw, w_tensor, delta=1)
h_idx, w_idx = tf.meshgrid(h_idx, w_idx)
h_idx = tf.reshape(tf.transpose(h_idx), (-1,))
w_idx = tf.reshape(tf.transpose(w_idx), (-1,))
# (k, 2)
indices = tf.stack([h_idx, w_idx], axis=1)
aug_img1 = tf.image.resize_images(aug_img1, [th, rw])
updates = tf.reshape(
aug_img1,
(-1, 3)
)
aug_img1 = tf.scatter_nd(
indices,
updates,
shape
)
# bottom-left
h_idx = tf.range(th, h_tensor, delta=1)
w_idx = tf.range(0, lw, delta=1)
h_idx, w_idx = tf.meshgrid(h_idx, w_idx)
h_idx = tf.reshape(tf.transpose(h_idx), (-1,))
w_idx = tf.reshape(tf.transpose(w_idx), (-1,))
# (k, 2)
indices = tf.stack([h_idx, w_idx], axis=1)
aug_img2 = tf.image.resize_images(aug_img2, [bh, lw])
updates = tf.reshape(
aug_img2,
(-1, 3)
)
aug_img2 = tf.scatter_nd(
indices,
updates,
shape
)
# bottom-right
h_idx = tf.range(th, h_tensor, delta=1)
w_idx = tf.range(lw, w_tensor, delta=1)
h_idx, w_idx = tf.meshgrid(h_idx, w_idx)
h_idx = tf.reshape(tf.transpose(h_idx), (-1,))
w_idx = tf.reshape(tf.transpose(w_idx), (-1,))
# (k, 2)
indices = tf.stack([h_idx, w_idx], axis=1)
aug_img3 = tf.image.resize_images(aug_img3, [bh, rw])
updates = tf.reshape(
aug_img3,
(-1, 3)
)
aug_img3 = tf.scatter_nd(
indices,
updates,
shape
)
aug_img = aug_img0 + aug_img1 + aug_img2 + aug_img3
aug_img.set_shape([None, None, 3])
# Step 4. make new labels
th_ratio = tf.cast(th, tf.float32) / tf.cast(h_tensor, tf.float32)
bh_ratio = tf.cast(bh, tf.float32) / tf.cast(h_tensor, tf.float32)
lw_ratio = tf.cast(lw, tf.float32) / tf.cast(w_tensor, tf.float32)
rw_ratio = tf.cast(rw, tf.float32) / tf.cast(w_tensor, tf.float32)
aug_label0 = _resize_box(aug_label0, 0,
0,
th_ratio,
lw_ratio)
aug_label1 = _resize_box(aug_label1, 0,
lw_ratio,
th_ratio,
rw_ratio)
aug_label2 = _resize_box(aug_label2, th_ratio,
0,
bh_ratio,
lw_ratio)
aug_label3 = _resize_box(aug_label3, th_ratio,
lw_ratio,
bh_ratio,
rw_ratio)
aug_label = tf.concat([aug_label0, aug_label1, aug_label2, aug_label3], axis=0)
aug_label_first_2dim = tf.concat([x[:, 0:2] for x in labels], axis=0)
aug_img = tf.transpose(aug_img, (2, 0, 1))
aug_label = tf.concat([aug_label_first_2dim, aug_label], axis=-1)
# filter out bad boxes after augmentation
aug_x1 = aug_label[:, 2]
aug_x2 = aug_label[:, 4]
aug_y1 = aug_label[:, 3]
aug_y2 = aug_label[:, 5]
# only select valid labels
select = tf.logical_and(
aug_x2 - aug_x1 > 1e-3,
aug_y2 - aug_y1 > 1e-3
)
aug_label = tf.boolean_mask(aug_label, select)
return aug_img, aug_label
class YOLOv4TFDataPipe:
"""
Data loader class.
DataLoader can be used in two ways:
1. build groundtruth image and label TF tensors. Those two tensors can be
directly used for training.
2. build a generator that yields image and label numpy arrays. In this case,
a TF session needs to be passed into the class initializer.
"""
def __init__(self,
experiment_spec,
label_encoder=None,
training=True,
sess=None,
h_tensor=None,
w_tensor=None,
visualizer=None,
rank=0):
"""
Data loader init function.
Arguments:
experiment_spec: The loaded config pb2.
label_encoder (function, optional): If passed in, groundtruth label will be encoded.
training (bool): Return training set or validation set.
sess (TF Session): Required if generator() function needs to be called. Otherwise, just
pass None.
visualizer(object): The Visualizer object.
rank(int): Horovod rank.
"""
dataset_proto = experiment_spec.dataset_config
self._exclude_difficult = not dataset_proto.include_difficult_in_training
dataloader = build_dataloader(
dataset_proto=dataset_proto,
augmentation_proto=experiment_spec.augmentation_config,
h_tensor=h_tensor,
w_tensor=w_tensor,
training=training
)
self.dataloader = dataloader
if training:
batch_size = experiment_spec.training_config.batch_size_per_gpu
else:
batch_size = experiment_spec.eval_config.batch_size
mosaic_prob = experiment_spec.augmentation_config.mosaic_prob
if training and mosaic_prob > 1e-3:
mosaic_max_bs = int(math.ceil(mosaic_prob * batch_size))
mosaic_min_bs = int(math.floor(mosaic_prob * batch_size))
else:
mosaic_max_bs = 0
mosaic_min_bs = 0
raw_bs = batch_size + 3 * mosaic_max_bs
self.batch_size = batch_size
self.images, self.ground_truth_labels, self.shapes, self.num_samples = \
dataloader.get_dataset_tensors(
raw_bs
)
# original images for debugging
self._images = self.images
if self.num_samples == 0:
return
self.n_batches = (self.num_samples + self.batch_size - 1) // self.batch_size
cls_mapping_dict = experiment_spec.dataset_config.target_class_mapping
self.classes = sorted({str(x) for x in cls_mapping_dict.values()})
cls_map = tao_core.processors.LookupTable(
keys=self.classes,
values=list(range(len(self.classes))),
default_value=-1)
cls_map.build()
self.encode_fn = label_encoder
gt_labels = []
source_classes = self.ground_truth_labels.object_class
mapped_classes = tf.SparseTensor(
values=cls_map(source_classes.values),
indices=source_classes.indices,
dense_shape=source_classes.dense_shape
)
mapped_labels = self.ground_truth_labels._replace(object_class=mapped_classes)
valid_indices = tf.not_equal(mapped_classes.values, -1)
filtered_labels = mapped_labels.filter(valid_indices)
filtered_obj_ids = tf.sparse.reshape(filtered_labels.object_class,
[raw_bs, -1, 1])
filtered_coords = tf.sparse.reshape(filtered_labels.vertices.coordinates,
[raw_bs, -1, 4])
filtered_occlusion = tf.sparse.reshape(
filtered_labels.occlusion,
[raw_bs, -1, 1]
)
filtered_obj_ids = tf.sparse.SparseTensor(
values=tf.cast(tf.round(filtered_obj_ids.values), tf.float32),
indices=filtered_obj_ids.indices,
dense_shape=filtered_obj_ids.dense_shape
)
filtered_coords = tf.sparse.SparseTensor(
values=tf.cast(filtered_coords.values, tf.float32),
indices=filtered_coords.indices,
dense_shape=filtered_coords.dense_shape
)
filtered_occlusion = tf.sparse.SparseTensor(
values=tf.cast(filtered_occlusion.values, tf.float32),
indices=filtered_occlusion.indices,
dense_shape=filtered_occlusion.dense_shape,
)
labels_all = tf.sparse.concat(
axis=-1,
sp_inputs=[filtered_obj_ids, filtered_occlusion, filtered_coords]
)
labels_split = tf.sparse.split(sp_input=labels_all, num_split=raw_bs,
axis=0)
labels_split = [tf.sparse.reshape(x, [-1, 6]) for x in labels_split]
labels = [tf.sparse.to_dense(get_non_empty_rows_2d_sparse(x)) for x in labels_split]
print_op = tf.print(
"Got label marked as difficult(occlusion > 0), "
"please set occlusion field in KITTI label to 0 "
"and re-generate TFRecord dataset "
"or set `dataset_config.include_difficult_in_training` to True "
"in spec file, if you want to include it in training."
)
def _print_fn():
with tf.control_dependencies([print_op]):
return tf.constant([])
def _no_print_fn():
return tf.constant([])
for l_idx, l in enumerate(labels):
obj_id = tf.cast(l[:, 0], tf.float32)
is_difficult = tf.cast(l[:, 1], tf.float32)
x1 = l[:, 2] / tf.cast(self.shapes[l_idx, 1], tf.float32)
x2 = l[:, 4] / tf.cast(self.shapes[l_idx, 1], tf.float32)
y1 = l[:, 3] / tf.cast(self.shapes[l_idx, 0], tf.float32)
y2 = l[:, 5] / tf.cast(self.shapes[l_idx, 0], tf.float32)
# only select valid labels
select = tf.logical_and(
tf.not_equal(obj_id, -1),
tf.logical_and(tf.less(x1, x2), tf.less(y1, y2))
)
label = tf.stack([obj_id, is_difficult, x1, y1, x2, y2], axis=1)
label = tf.boolean_mask(label, select)
# exclude difficult boxes is forced to do so
if self._exclude_difficult:
print_cond = tf.cond(
tf.reduce_any(tf.cast(label[:, 1], tf.bool)),
true_fn=_print_fn,
false_fn=_no_print_fn,
)
with tf.control_dependencies([print_cond]):
label = tf.boolean_mask(
label,
tf.math.logical_not(tf.cast(label[:, 1], tf.bool))
)
gt_labels.append(label)
self.gt_labels = gt_labels
self.frame_ids = self.ground_truth_labels.frame_id
# images
images_list = unstack_images(
self.images,
self.shapes,
raw_bs
)
augmented_images = []
augmented_labels = []
# @zeyuz: define outside loop to avoid static test failures
images = []
labels = []
# step 1. add mosaic batch
mosaic_min_ratio = experiment_spec.augmentation_config.mosaic_min_ratio
for b_idx in range(mosaic_min_bs):
for i_idx in range(4):
idx = b_idx * 4 + i_idx
images.append(images_list[idx])
labels.append(self.gt_labels[idx])
aug_img, aug_label = tf_do_mosaic(images, labels, mosaic_min_ratio,
w_tensor, h_tensor,
experiment_spec.augmentation_config)
# reset imgs, labels
images = []
labels = []
augmented_images.append(aug_img)
augmented_labels.append(aug_label)
# step 2. add maybe mosaic image
if mosaic_max_bs > mosaic_min_bs:
# equation to satisfy: (mosaic_min_bs + mosaic_img_prob) / bs = mosaic_prob
mosaic_img_prob = mosaic_prob * batch_size - mosaic_min_bs
for i_idx in range(4):
idx = mosaic_min_bs * 4 + i_idx
images.append(images_list[idx])
labels.append(self.gt_labels[idx])
aug_img, aug_label = tf.cond(
tf.random.uniform([]) < mosaic_img_prob,
true_fn=lambda: tf_do_mosaic(images, labels, mosaic_min_ratio,
w_tensor, h_tensor,
experiment_spec.augmentation_config),
false_fn=lambda: tf_regular_augmentation(images[0], labels[0], training,
w_tensor, h_tensor,
experiment_spec.augmentation_config)
)
augmented_images.append(aug_img)
augmented_labels.append(aug_label)
# step 3. add non-mosaic image
for b_idx in range(batch_size - mosaic_max_bs):
aug_img, aug_label = tf_regular_augmentation(images_list[mosaic_max_bs * 4 + b_idx],
self.gt_labels[mosaic_max_bs * 4 + b_idx],
training,
w_tensor, h_tensor,
experiment_spec.augmentation_config)
augmented_images.append(aug_img)
augmented_labels.append(aug_label)
self.images = tf.stack(augmented_images, axis=0)
num_channels = experiment_spec.augmentation_config.output_channel
image_depth = int(experiment_spec.augmentation_config.output_depth) or 8
# See conversion: https://pillow.readthedocs.io/en/3.2.x/reference/Image.html
bgr_ = tf.reshape(
tf.constant([0.1140, 0.5870, 0.2990], dtype=tf.float32),
(1, 3, 1, 1)
)
# Vis the augmented images in TensorBoard, only rank 0
if rank == 0 and visualizer is not None:
# TensorBoard image summary only supports 8-bit images
if visualizer.enabled and image_depth == 8:
if num_channels == 3:
aug_images = self.images
else:
# Project RGB to grayscale
aug_images = tf.reduce_sum(
self.images * bgr_,
axis=1,
keepdims=True
)
aug_images = tf.transpose(aug_images, (0, 2, 3, 1))
_max_box_num = tf.shape(augmented_labels[0])[0]
for _aug_label in augmented_labels[1:]:
_max_box_num = tf.reduce_max(
tf.stack([_max_box_num, tf.shape(_aug_label)[0]], axis=0)
)
_aug_label_list = []
for _aug_label in augmented_labels:
_num_pad = _max_box_num - tf.shape(_aug_label)[0]
_aug_label_list.append(tf.pad(_aug_label, [(0, _num_pad), (0, 0)]))
_aug_label_concat = tf.stack(_aug_label_list, axis=0)[:, :, 2:]
# (xmin, ymin, xmax, ymax) to (ymin, xmin, ymax, xmax)
_aug_label_concat = tf.gather(_aug_label_concat, [1, 0, 3, 2], axis=2)
aug_images = tf.image.draw_bounding_boxes(
aug_images,
_aug_label_concat
)
aug_images = tf.cast(aug_images, tf.uint8)
visualizer.image(
"augmented_images",
aug_images,
data_format="channels_last"
)
self._images = self.images
self.gt_labels = augmented_labels
img_mean = experiment_spec.augmentation_config.image_mean
if experiment_spec.augmentation_config.output_channel == 3:
assert image_depth == 8, (
f"RGB images only support 8-bit depth, got {image_depth}, "
"please check `augmentation_config.output_depth` in spec file"
)
if img_mean:
bb, gg, rr = img_mean['b'], img_mean['g'], img_mean['r']
else:
bb, gg, rr = 103.939, 116.779, 123.68
else:
if img_mean:
bb, gg, rr = img_mean['l'], img_mean['l'], img_mean['l']
elif image_depth == 8:
bb, gg, rr = 117.3786, 117.3786, 117.3786
elif image_depth == 16:
# 117.3786 * 256
bb, gg, rr = 30048.9216, 30048.9216, 30048.9216
else:
raise ValueError(
f"Unsupported image depth: {image_depth}, should be 8 or 16, "
"please check `augmentation_config.output_depth` in spec file"
)
perm = tf.constant([2, 1, 0])
self.images = tf.gather(self.images, perm, axis=1)
self.images -= tf.constant([[[[bb]], [[gg]], [[rr]]]])
if num_channels == 1:
self.images = tf.reduce_sum(self.images * bgr_, axis=1, keepdims=True)
self.encoded_labels = self.gt_labels
if self.encode_fn is not None:
self.encoded_labels = self.encode_fn(self.gt_labels)
self.sess = sess
def set_encoder(self, label_encoder):
"""Set a new label encoder for output labels."""
self.encode_fn = label_encoder
self.encoded_labels = self.encode_fn(self.gt_labels)
def generator(self):
"""Yields img and label numpy arrays."""
if self.sess is None:
raise ValueError('TF session can not be found. Pass a session to the initializer!')
while True:
img, enc_label, label = self.sess.run(
[self.images, self.encoded_labels, self.gt_labels]
)
yield img, enc_label, label
def get_array(self):
'''get the array for a batch.'''
return self.sess.run([self.images, self.gt_labels])
def get_array_and_frame_ids(self):
'''get the array and frame IDs for a batch.'''
return self.sess.run([self.frame_ids, self.images, self.gt_labels])
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/dataio/tf_data_pipe.py |
"""Module containing implementation of dataio routines for YOLOv4.""" | tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/dataio/__init__.py |
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
'''Numpy implementation of YOLOv4 label encoder.'''
from keras import backend as K
import numpy as np
import tensorflow as tf
from nvidia_tao_tf1.cv.ssd.utils.box_utils import iou as iou_tf
from nvidia_tao_tf1.cv.ssd.utils.tensor_utils import tensor_slice_replace
def iou(boxes1, boxes2):
'''
numpy version of vectorized iou.
Let `boxes1` and `boxes2` contain `m` and `n` boxes, respectively.
returns an `(m, n)` matrix with the IoUs for all pairwise boxes
Arguments:
boxes1 (array of shape (m, 4)): x_min, y_min, x_max, y_max
boxes2 (array of shape (n, 4)): x_min, y_min, x_max, y_max
Returns:
IOU (array of shape (m, n)): IOU score
'''
# Compute the IoU.
xmin1, ymin1, xmax1, ymax1 = np.split(boxes1, 4, axis=1)
xmin2, ymin2, xmax2, ymax2 = np.split(boxes2, 4, axis=1)
xmin = np.maximum(xmin1, xmin2.T)
ymin = np.maximum(ymin1, ymin2.T)
xmax = np.minimum(xmax1, xmax2.T)
ymax = np.minimum(ymax1, ymax2.T)
intersection = np.maximum(xmax - xmin, 0) * np.maximum(ymax - ymin, 0)
boxes1_areas = (xmax1 - xmin1) * (ymax1 - ymin1)
boxes2_areas = (xmax2 - xmin2) * (ymax2 - ymin2)
union = boxes1_areas + boxes2_areas.T - intersection
return intersection / union
class YOLOv4InputEncoder:
'''
Encoder class.
Transforms ground truth labels for object detection in images
(2D bounding box coordinates and class labels) to the format required for
training an YOLO v4 model.
In the process of encoding the ground truth labels, a template of anchor boxes
is being built, which are subsequently matched to the ground truth boxes
via an intersection-over-union threshold criterion.
args:
img_height: image height (how many pixels in height)
img_width: image width (how many pixels in width)
n_classes: Number of all possible classes.
matching_box_iou_thres: Boxes not best-matched but with GT iou higher than this threshold
will be treated as match.
feature_map_stride: List of length n and format [(h_stride, w_stride), ...], n is number of
feature maps. Stride is `input_size / fmap_size` and must be integer. The original paper
input image is (416, 416) and feature map size is [(13, 13), (26, 26), (52, 52)]. The
corresponding feature_map_stride should be [(32, 32), (16, 16), (8, 8)]
anchors: List of 3 elements indicating the anchor boxes shape on feature maps. first element
is for smallest feature map (i.e. to detect large objects). Last element is for largest
feature map (i.e. to detect small objects). Each element is a list of tuples of size 2,
in the format of (w, h). The length of the list can be any integer larger than 0. All
w and h needs to be in range (0, 1) and this is (anchor_w / img_w, anchor_h / img_h)
class_weights: List of weights for class ids
'''
def __init__(self, # pylint: disable=W0102
n_classes,
matching_box_iou_thres=0.25,
feature_map_stride=[(32, 32), (16, 16), (8, 8)],
anchors=[[(0.279, 0.216), (0.375, 0.476), (0.897, 0.784)],
[(0.072, 0.147), (0.149, 0.108), (0.142, 0.286)],
[(0.024, 0.031), (0.038, 0.072), (0.079, 0.055)]],
class_weights=None):
'''See class documentation for details.'''
assert len(feature_map_stride) == len(anchors), "anchors and feature maps mismatch!"
self.n_classes = n_classes
if class_weights is None:
self.class_weights = np.ones(self.n_classes, dtype=np.float32)
else:
self.class_weights = class_weights
self.fmap_num_anchors = [len(x) for x in anchors]
self.feature_map_stride = feature_map_stride
# maps box to fmap and corresponding box ind. {box_ind => (fmap, box_ind inside fmap)}
self.fmap_box_dict = {}
box_cnt = 0
for idx, i in enumerate(anchors):
for j, _ in enumerate(i):
self.fmap_box_dict[box_cnt] = (idx, j)
box_cnt += 1
# w, h shape (9, 1)
w, h = np.split(np.array(anchors).reshape(-1, 2), 2, axis=-1)
# yxyx
self.anchor_boxes = np.concatenate([-h / 2.0, -w / 2.0, h / 2.0, w / 2.0], axis=-1)
self.matching_box_iou_thres = matching_box_iou_thres
self.one_hot_cls = np.eye(n_classes)
def update_class_weights(self, class_weights):
"""Update the class weights in the callback."""
self.class_weights = class_weights
def _loc_map_fn(self, output_img_size, cx, cy, anchor_idx):
"""Helper function to get location of anchor match.
Returns:
fmap_id, fmap_y, fmap_x, anchor_id_in_fmap
"""
fmap_idx, anchor_idx_fmap = self.fmap_box_dict[anchor_idx]
w_step = self.feature_map_stride[fmap_idx][1] / float(output_img_size[0])
h_step = self.feature_map_stride[fmap_idx][0] / float(output_img_size[1])
fmap_x = int(np.floor(np.clip(cx / w_step, 0, 1.0 / w_step - 1e-3)))
fmap_y = int(np.floor(np.clip(cy / h_step, 0, 1.0 / h_step - 1e-3)))
return fmap_idx, fmap_y, fmap_x, anchor_idx_fmap
def __call__(self, output_img_size, gt_label):
'''
Processing one image groundtruthing.
Args:
output_img_size: (w, h) 2 integers representing image size
gt_label: (#boxes, [class_idx, is_difficult, x_min, y_min, x_max, y_max])
Returns:
encoded_target: `(#anchor_boxes, [cy, cx, h, w, objectness, objectness_negative, #cls,
cls_weight])`
'''
encoding_template = []
for fmap_id, num_anchor in enumerate(self.fmap_num_anchors):
# template[..., -1] replacement_iou
template = np.zeros((output_img_size[1] // self.feature_map_stride[fmap_id][0],
output_img_size[0] // self.feature_map_stride[fmap_id][1],
num_anchor,
7 + self.n_classes + 1), dtype=np.float)
template[..., 5] = 1.0
encoding_template.append(template)
# all shape (#boxes, 1)
cls_id, _, xmin, ymin, xmax, ymax = np.split(gt_label, 6, axis=-1)
cy = (ymin + ymax) / 2.0
cx = (xmin + xmax) / 2.0
h = ymax - ymin
w = xmax - xmin
gt_shape = np.concatenate([-h / 2.0, -w / 2.0, h / 2.0, w / 2.0], axis=-1)
ious = iou(gt_shape, self.anchor_boxes)
for gt_idx, gt_iou in enumerate(ious):
gt_cx = cx[gt_idx, 0]
gt_cy = cy[gt_idx, 0]
gt_h = h[gt_idx, 0]
gt_w = w[gt_idx, 0]
gt_array = np.concatenate(([gt_cy, gt_cx, gt_h, gt_w, 1.0, 0.0],
self.one_hot_cls[int(round(cls_id[gt_idx, 0]))],
[self.class_weights[int(round(cls_id[gt_idx, 0]))]]))
# process all matching_box_iou_thres boxes
for i in np.where(gt_iou > self.matching_box_iou_thres)[0]:
# loc => fmap_id, fmap_y, fmap_x, anchor_id_in_fmap
loc = self._loc_map_fn(output_img_size, gt_cx, gt_cy, i)
if gt_iou[i] > encoding_template[loc[0]][loc[1], loc[2], loc[3], -1]:
# multi-match
encoding_template[loc[0]][loc[1], loc[2], loc[3], -1] = gt_iou[i]
encoding_template[loc[0]][loc[1], loc[2], loc[3], :-1] = gt_array
# process best match
loc = self._loc_map_fn(output_img_size, gt_cx, gt_cy, np.argmax(gt_iou))
encoding_template[loc[0]][loc[1], loc[2], loc[3], :-1] = gt_array
# Do not allow change
encoding_template[loc[0]][loc[1], loc[2], loc[3], -1] = 1.0
encoding_template = [x.reshape(-1, 7 + self.n_classes + 1) for x in encoding_template]
# do not return tmp value of gt_iou
return np.concatenate(encoding_template, axis=0)[..., :-1]
class YOLOv4InputEncoderTensor:
'''
Encoder class.
Transforms ground truth labels for object detection in images
(2D bounding box coordinates and class labels) to the format required for
training an YOLO v4 model.
In the process of encoding the ground truth labels, a template of anchor boxes
is being built, which are subsequently matched to the ground truth boxes
via an intersection-over-union threshold criterion.
args:
img_height: image height (how many pixels in height)
img_width: image width (how many pixels in width)
n_classes: Number of all possible classes.
neutral_box_iou_thres: Boxes not matched but with GT iou higher than this threshold will be
treated as neutral boxes (no grad prop).
feature_map_size: List of length n and format [(h,w), ...], n is number of feature maps and
(h, w) is the last two dims of NCHW feature maps' shape
anchors: List of 3 elements indicating the anchor boxes shape on feature maps. first element
is for smallest feature map (i.e. to detect large objects). Last element is for largest
feature map (i.e. to detect small objects). Each element is a list of tuples of size 2,
in the format of (w, h). The length of the list can be any integer larger than 0.
'''
def __init__(self, # pylint: disable=W0102
img_height,
img_width,
n_classes,
matching_box_iou_thres=0.25,
feature_map_size=None,
anchors=None,
class_weights=None):
'''See class documentation for details.'''
self.n_classes = n_classes
self.fmap_size = tf.convert_to_tensor(feature_map_size)
self.image_size = tf.convert_to_tensor([[img_height, img_width]], dtype=tf.float32)
self.fmap_size_ratio = tf.truediv(
self.image_size,
tf.cast(self.fmap_size, dtype=tf.float32)
)
self.box_inds_in_fmap = tf.constant(sum([[j for j in range(len(anchors[i]))]
for i in range(len(anchors))], []), dtype=tf.int32)
self.fmap_anchor_count = tf.constant([len(i) for i in anchors], dtype=tf.int32)
# compute cumsum of box_idx_offset
self.box_idx_offset = tf.constant([0], dtype=tf.int32)
box_total = tf.constant(0, dtype=tf.int32)
for i in range(len(feature_map_size)):
box_per_fmap = feature_map_size[i][0] * feature_map_size[i][1] * len(anchors[i])
_sum = self.box_idx_offset[-1:] + box_per_fmap
box_total = box_total + box_per_fmap
self.box_idx_offset = tf.concat([self.box_idx_offset, _sum], axis=-1)
self.box_idx_offset = self.box_idx_offset[:-1]
self.encoding_template = tf.concat([tf.zeros([box_total, 5 + n_classes + 1], tf.float32)],
axis=-1)
self.obj_negative_template = tf.ones([box_total, 1], tf.float32)
self.obj_negative_pos_template = tf.zeros([box_total, 1], tf.float32)
anchors = np.array(anchors)
self.anchor_fmap_mapping = tf.constant(sum([[i] * len(anchors[i])
for i in range(len(anchors))], []),
dtype=tf.int32)
anchors = anchors.reshape(-1, 2)
w = anchors[:, 0]
h = anchors[:, 1]
self.matching_box_iou_thres = matching_box_iou_thres
self.anchor_boxes = tf.constant(np.stack([-h / 2.0, -w / 2.0, h / 2.0, w / 2.0], axis=1),
dtype=tf.float32)
if class_weights is None:
self.class_weights = np.ones(self.n_classes, dtype=np.float32)
else:
self.class_weights = np.array(class_weights, dtype=np.float32)
# self.class_weights = tf.constant(self.class_weights, dtype=tf.float32)
self.class_placeholder = tf.placeholder(tf.float32, shape=[self.n_classes])
self.class_weights_tensor = tf.Variable(self.class_weights,
dtype=tf.float32,
aggregation=tf.VariableAggregation.MEAN)
self.update_op = tf.assign(self.class_weights_tensor, self.class_placeholder)
def update_class_weights(self, class_weights):
"""Update the class weights in the callback."""
self.class_weights = class_weights
K.get_session().run(self.update_op, feed_dict={self.class_placeholder: self.class_weights})
def __call__(self, ground_truth_labels):
'''
Converts ground truth bounding box data into a suitable format to train a YOLO v4 model.
Arguments:
ground_truth_labels (list): A python list of length `batch_size` that contains one 2D
Numpy array for each batch image. Each such array has `k` rows for the `k` ground
truth bounding boxes belonging to the respective image, and the data for each ground
truth bounding box has the format `(class_id, xmin, ymin, xmax, ymax)` (i.e. the
'corners' coordinate format), and `class_id` must be an integer greater than 0 for
all boxes as class ID 0 is reserved for the background class.
Returns:
`y_encoded`, a 3D numpy array of shape
`(batch_size, #boxes, [cy, cx, h, w, objectness, objectness_negative, cls])` that
serves as the ground truth label tensor for training, where `#boxes` is the total number
of boxes predicted by the model per image. cx, cy, h, w are centroid coord. cls is
one-hot class encoding. objectness = 1 if matched to GT, else 0. objectness_negative = 1
if not matched to GT and not neutral, else 0.
'''
encoded = []
for gt_label in ground_truth_labels: # For each batch item...
match_y = tf.cond(tf.equal(tf.shape(gt_label)[0], 0),
lambda: tf.concat([self.encoding_template[..., :5],
self.obj_negative_template,
self.encoding_template[..., 5:]], axis=-1),
lambda label=gt_label: self.__process_one_img(label), strict=True)
encoded.append(match_y)
return tf.stack(encoded, axis=0)
def __process_one_img(self, gt_label):
'''
TF graph for processing one image groundtruthing.
Args:
gt_label: 2D Numpy array for this image with `k` rows for the `k` ground
truth bounding boxes belonging to the image, and the data for each ground
truth bounding box has the format `(class_id, xmin, ymin, xmax, ymax)` (i.e. the
'corners' coordinate format), and `class_id` must be an integer greater than 0 for
all boxes as class ID 0 is reserved for the background class.
Returns:
encoded_target: `(#anchor_boxes, [cy, cx, h, w, objectness, objectness_negative, cls,
cls_weights])`
'''
# nightmare level TF graph build.
# Commented-out code is for single box match. which is easier to understand.
# Real code matches all possible boxes with given GT togather.
gt_label = tf.cast(gt_label, tf.float32)
classes_one_hot = tf.one_hot(tf.reshape(tf.cast(gt_label[:, 0], tf.int32), [-1]),
self.n_classes)
classes_weights = tf.gather(self.class_weights_tensor, tf.cast(gt_label[:, 0:1], tf.int32))
cy = tf.truediv(gt_label[:, 3:4] + gt_label[:, 5:6], 2.0)
cx = tf.truediv(gt_label[:, 2:3] + gt_label[:, 4:5], 2.0)
h = gt_label[:, 5:6] - gt_label[:, 3:4]
w = gt_label[:, 4:5] - gt_label[:, 2:3]
objectness = tf.ones_like(w)
# gt encoded as [:, 4+n_cls]
one_hot_gt = tf.concat([cy, cx, h, w, objectness, classes_one_hot, classes_weights],
axis=-1)
# force center to (0, 0)
gt_centroid_0 = tf.concat([tf.truediv(-h, 2.0), tf.truediv(-w, 2.0), tf.truediv(h, 2.0),
tf.truediv(w, 2.0)], axis=-1)
num_gt = tf.shape(gt_centroid_0)[0]
iou_result = iou_tf(gt_centroid_0, self.anchor_boxes)
# iou_match = tf.reshape(tf.argmax(iou_result, axis=-1, output_type=tf.int32), [-1])
# size (#gt_box, #anchors)
fmap_match_all = tf.tile(tf.reshape(self.anchor_fmap_mapping, [1, -1]), [num_gt, 1])
# fmap_match = tf.gather(self.anchor_fmap_mapping, iou_match)
# fmap_size_ratio = tf.gather(self.fmap_size_ratio, fmap_match, axis=0)
# size (#gt_box, #anchors, 2)
fmap_ratio_all = tf.gather(self.fmap_size_ratio, fmap_match_all, axis=0)
# fmap_size = tf.gather(self.fmap_size, fmap_match, axis=0)
# size (#gt_box, #anchors, 2)
fmap_size_all = tf.gather(self.fmap_size, fmap_match_all, axis=0)
# fmap_shift = tf.gather(self.box_idx_offset, fmap_match)
# size (#gt_box, #anchors)
fmap_shift_all = tf.gather(self.box_idx_offset, fmap_match_all)
# anchor_count = tf.gather(self.fmap_anchor_count, fmap_match)
anchor_count_all = tf.gather(self.fmap_anchor_count, fmap_match_all)
cycx = one_hot_gt[..., :2]
# adjusted_box_center = tf.truediv(cycx, fmap_size_ratio)
# size (#gt, #anchor, 2)
adjusted_box_center_all = tf.truediv(
tf.reshape(cycx * self.image_size, [-1, 1, 2]),
fmap_ratio_all
)
# box_center_cell = tf.cast(tf.maximum(tf.floor(adjusted_box_center - 1e-5), 0.0), tf.int32)
box_center_limit = tf.truediv(
tf.reshape(tf.ones_like(cycx) * self.image_size, [-1, 1, 2]),
fmap_ratio_all
)
box_center_cell_all = tf.cast(
tf.floor(tf.maximum(tf.minimum(adjusted_box_center_all, box_center_limit - 1e-3), 0.)),
tf.int32
)
# cell_shift = (box_center_cell[..., 0] * fmap_size[..., 1] + box_center_cell[..., 1])
# * anchor_count
cell_shift_all = (box_center_cell_all[..., 0] * fmap_size_all[..., 1] +
box_center_cell_all[..., 1]) * anchor_count_all
# anchor_shift = tf.gather(self.box_inds_in_fmap, iou_match)
anchor_shift_all = tf.tile(tf.reshape(self.box_inds_in_fmap, [1, -1]), [num_gt, 1])
# box_ind = fmap_shift + cell_shift + anchor_shift, (#gt, #anchors)
box_ind_all = fmap_shift_all + cell_shift_all + anchor_shift_all
# YOLOv4 special: let's do multi-match
# iou_large: (k, 2)
iou_large = tf.where(tf.greater(iou_result, self.matching_box_iou_thres))
multi_match_box = tf.gather_nd(box_ind_all, iou_large)
neg_val = tf.cond(tf.greater(tf.shape(multi_match_box)[0], 0),
lambda: tensor_slice_replace(self.obj_negative_template,
self.obj_negative_pos_template,
multi_match_box,
multi_match_box),
lambda: self.obj_negative_template, strict=True)
encoded = tf.cond(tf.greater(tf.shape(multi_match_box)[0], 0),
lambda: tensor_slice_replace(self.encoding_template,
one_hot_gt,
multi_match_box,
iou_large[:, 0]),
lambda: self.encoding_template, strict=True)
# end multi-match #
# now do best-match
iou_match = tf.reshape(tf.argmax(iou_result, axis=-1, output_type=tf.int32), [-1, 1])
best_match_box = tf.gather_nd(
box_ind_all,
tf.concat([tf.reshape(tf.range(num_gt), [-1, 1]), iou_match], axis=-1)
)
neg_val = tensor_slice_replace(
neg_val,
self.obj_negative_pos_template,
best_match_box,
best_match_box
)
encoded = tensor_slice_replace(
encoded,
one_hot_gt,
best_match_box,
tf.range(num_gt)
)
return tf.concat([encoded[..., :5], neg_val, encoded[..., 5:]], axis=-1)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/dataio/input_encoder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TLT YOLOv4 data sequence."""
import cv2
import numpy as np
from nvidia_tao_tf1.cv.common.dataio.augmentation_lib import (
aug_flip,
aug_hsv,
aug_jitter,
aug_letterbox_resize,
aug_random_crop
)
from nvidia_tao_tf1.cv.yolo_v3.dataio.data_sequence import YOLOv3DataSequence
class YOLOv4DataSequence(YOLOv3DataSequence):
"""YOLOv4 data sequence."""
def _build_single_training_img(self, image, label, resize_ar, use_jitter, jitter_or_min_ratio):
'''Helper function for mosaic technique.
Args:
image: np array
label: np array
resize_ar: image resize aspect ratio,
use_jitter: false - use random crop. true - use jitter
jitter_or_min_ratio: if use_jitter, this value should be jitter
otherwise, this value should be min_ratio for crop
'''
bboxes = label[:, -4:]
image = aug_hsv(image,
self.augmentation_config.hue,
self.augmentation_config.saturation,
self.augmentation_config.exposure,
depth=self.image_depth)
if np.random.rand() < self.augmentation_config.vertical_flip:
image, bboxes = aug_flip(image, bboxes, ftype=0)
if np.random.rand() < self.augmentation_config.horizontal_flip:
image, bboxes = aug_flip(image, bboxes, ftype=1)
if use_jitter:
image, bboxes = aug_jitter(image, bboxes, jitter_or_min_ratio, resize_ar)
else:
image, bboxes = aug_random_crop(image, bboxes, resize_ar, jitter_or_min_ratio)
label[:, -4:] = bboxes
label = self._filter_invalid_labels(label)
return image, label
def _resize_label(self, label, t_shift, l_shift, h_ratio, w_ratio):
"""Helper function to resize labels in mosaic."""
# xmin
label[:, -4] = l_shift + w_ratio * label[:, -4]
# xmax
label[:, -2] = l_shift + w_ratio * label[:, -2]
# ymin
label[:, -3] = t_shift + h_ratio * label[:, -3]
# ymax
label[:, -1] = t_shift + h_ratio * label[:, -1]
def _preprocessing(self, image, label, output_img_size):
if self.is_training:
if np.random.rand() < self.augmentation_config.mosaic_prob:
# do mosaic augmentation
mosaic_min_ratio = self.augmentation_config.mosaic_min_ratio
assert 0 < mosaic_min_ratio < 0.5, "mosaic_min_ratio must in range (0, 0.5)"
# step 1. determine border
y_border = np.random.randint(int(output_img_size[1] * mosaic_min_ratio),
1 + int(output_img_size[1] * (1.0 - mosaic_min_ratio)))
x_border = np.random.randint(int(output_img_size[0] * mosaic_min_ratio),
1 + int(output_img_size[0] * (1.0 - mosaic_min_ratio)))
mosaic_image = np.zeros((output_img_size[1], output_img_size[0], 3))
mosaic_label = []
# Load 3 additional images from training data
additional_ids = np.random.randint(self.n_samples, size=3)
# step 2. process images
# left-top
image, label = self._build_single_training_img(
image, label,
x_border / float(y_border),
False, 1.0 - self.augmentation_config.jitter)
mosaic_image[:y_border, :x_border] = cv2.resize(image,
(x_border, y_border),
cv2.INTER_LINEAR)
self._resize_label(label, 0, 0, y_border / float(output_img_size[1]),
x_border / float(output_img_size[0]))
mosaic_label.append(label)
# right-top
image, label = self._get_single_item_raw(additional_ids[0])
image, label = self._build_single_training_img(
image, label,
(output_img_size[0] - x_border) / float(y_border),
False, 1.0 - self.augmentation_config.jitter)
mosaic_image[:y_border, x_border:] = cv2.resize(image,
(output_img_size[0] - x_border,
y_border),
cv2.INTER_LINEAR)
self._resize_label(label, 0, x_border / float(output_img_size[0]),
y_border / float(output_img_size[1]),
1.0 - (x_border / float(output_img_size[0])))
mosaic_label.append(label)
# left-bottom
image, label = self._get_single_item_raw(additional_ids[1])
image, label = self._build_single_training_img(
image, label,
x_border / float(output_img_size[1] - y_border),
False, 1.0 - self.augmentation_config.jitter)
mosaic_image[y_border:, :x_border] = cv2.resize(image,
(x_border,
output_img_size[1] - y_border),
cv2.INTER_LINEAR)
self._resize_label(label, y_border / float(output_img_size[1]), 0,
1.0 - (y_border / float(output_img_size[1])),
x_border / float(output_img_size[0]))
mosaic_label.append(label)
# right-bottom
image, label = self._get_single_item_raw(additional_ids[2])
image, label = self._build_single_training_img(
image, label,
(output_img_size[0] - x_border) / float(output_img_size[1] - y_border),
False, 1.0 - self.augmentation_config.jitter)
mosaic_image[y_border:, x_border:] = cv2.resize(image,
(output_img_size[0] - x_border,
output_img_size[1] - y_border),
cv2.INTER_LINEAR)
self._resize_label(label, y_border / float(output_img_size[1]),
x_border / float(output_img_size[0]),
1.0 - (y_border / float(output_img_size[1])),
1.0 - (x_border / float(output_img_size[0])))
mosaic_label.append(label)
label = np.concatenate(mosaic_label, axis=0)
image = mosaic_image
else:
# YOLOv3 style loading
image, label = self._build_single_training_img(
image, label,
output_img_size[0] / float(output_img_size[1]),
True, self.augmentation_config.jitter)
image = cv2.resize(image, output_img_size, cv2.INTER_LINEAR)
else:
bboxes = label[:, -4:]
image, bboxes = aug_letterbox_resize(image, bboxes, resize_shape=output_img_size)
label[:, -4:] = bboxes
label = self._filter_invalid_labels(label)
raw_label = label[:, 2:6]
if self.encode_fn is not None:
label = self.encode_fn(output_img_size, label)
if self.output_raw_label:
return image, label, raw_label
return image, label
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/dataio/data_sequence.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test YOLOv4 data augmentation with tf.data dataloader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from keras import backend as K
import numpy as np
from PIL import Image
import pytest
import tensorflow as tf
from nvidia_tao_tf1.cv.detectnet_v2.common.dataio.converter_lib import _bytes_feature
from nvidia_tao_tf1.cv.detectnet_v2.common.dataio.converter_lib import _float_feature
from nvidia_tao_tf1.cv.detectnet_v2.common.dataio.converter_lib import _int64_feature
from nvidia_tao_tf1.cv.yolo_v3.proto.dataset_config_pb2 import (
YOLOv3DatasetConfig,
YOLOv3DataSource,
)
from nvidia_tao_tf1.cv.yolo_v3.utils.tensor_utils import get_init_ops
from nvidia_tao_tf1.cv.yolo_v4.dataio.tf_data_pipe import YOLOv4TFDataPipe
from nvidia_tao_tf1.cv.yolo_v4.proto.augmentation_config_pb2 import AugmentationConfig
from nvidia_tao_tf1.cv.yolo_v4.proto.experiment_pb2 import Experiment
def generate_dummy_labels(path, num_samples, height=0, width=0, labels=None):
"""Generate num_samples dummy labels and store them to a tfrecords file in the given path.
Args:
path: Path to the generated tfrecords file.
num_samples: Labels will be generated for this many images.
height, width: Optional image shape.
labels: Optional, list of custom labels to write into the tfrecords file. The user is
expected to provide a label for each sample. Each label is dictionary with the label
name as the key and value as the corresponding tf.train.Feature.
"""
if labels is None:
labels = [{'target/object_class': _bytes_feature('bicycle'),
'target/coordinates_x1': _float_feature(1.0),
'target/coordinates_y1': _float_feature(45.0),
'target/coordinates_x2': _float_feature(493.0),
'target/coordinates_y2': _float_feature(372.0),
'target/truncation': _float_feature(0.0),
'target/occlusion': _int64_feature(0),
'target/front': _float_feature(0.0),
'target/back': _float_feature(0.5),
'frame/id': _bytes_feature(str(i)),
'frame/height': _int64_feature(height),
'frame/width': _int64_feature(width)} for i in range(num_samples)]
else:
num_custom_labels = len(labels)
assert num_custom_labels == num_samples, \
"Expected %d custom labels, got %d." % (num_samples, num_custom_labels)
writer = tf.python_io.TFRecordWriter(str(path))
for label in labels:
features = tf.train.Features(feature=label)
example = tf.train.Example(features=features)
writer.write(example.SerializeToString())
writer.close()
@pytest.fixture
def dataset_config():
"""dataset config."""
source = YOLOv3DataSource()
curr_dir = os.path.dirname(__file__)
source.tfrecords_path = os.path.join(curr_dir, "tmp_tfrecord")
source.image_directory_path = curr_dir
dataset = YOLOv3DatasetConfig()
dataset.data_sources.extend([source])
dataset.target_class_mapping.update({"bicycle": "bicycle"})
dataset.validation_data_sources.extend([source])
dataset.image_extension = "jpg"
return dataset
@pytest.fixture
def augmentation_config():
"""augmentation config."""
aug_config = AugmentationConfig()
aug_config.output_width = 320
aug_config.output_height = 320
aug_config.output_channel = 3
aug_config.hue = 1
aug_config.saturation = 1.5
aug_config.exposure = 1.5
aug_config.vertical_flip = 0
aug_config.horizontal_flip = 0.5
aug_config.jitter = 0.3
aug_config.randomize_input_shape_period = 10
aug_config.mosaic_prob = 0.5
aug_config.mosaic_min_ratio = 0.2
return aug_config
@pytest.fixture
def _test_experiment_spec(dataset_config, augmentation_config):
curr_dir = os.path.dirname(__file__)
record_path = os.path.join(curr_dir, "tmp_tfrecord")
img_path = os.path.join(curr_dir, "0.jpg")
# write dummy tfrecord
generate_dummy_labels(record_path, 1, height=375, width=500)
# write dummy image
img = np.random.randint(low=0, high=255, size=(375, 500, 3), dtype=np.uint8)
tmp_im = Image.fromarray(img)
tmp_im.save(img_path)
# instantiate a dummy spec
spec = Experiment()
spec.dataset_config.data_sources.extend(dataset_config.data_sources)
spec.dataset_config.target_class_mapping.update(dataset_config.target_class_mapping)
spec.dataset_config.validation_data_sources.extend(dataset_config.validation_data_sources)
spec.dataset_config.image_extension = dataset_config.image_extension
spec.augmentation_config.output_width = augmentation_config.output_width
spec.augmentation_config.output_height = augmentation_config.output_height
spec.augmentation_config.output_channel = augmentation_config.output_channel
spec.augmentation_config.hue = augmentation_config.hue
spec.augmentation_config.saturation = augmentation_config.saturation
spec.augmentation_config.exposure = augmentation_config.exposure
spec.augmentation_config.vertical_flip = augmentation_config.vertical_flip
spec.augmentation_config.horizontal_flip = augmentation_config.horizontal_flip
spec.augmentation_config.jitter = augmentation_config.jitter
spec.augmentation_config.randomize_input_shape_period = \
augmentation_config.randomize_input_shape_period
spec.augmentation_config.mosaic_prob = augmentation_config.mosaic_prob
spec.augmentation_config.mosaic_min_ratio = augmentation_config.mosaic_min_ratio
spec.training_config.batch_size_per_gpu = 1
spec.eval_config.batch_size = 1
yield spec
os.remove(record_path)
os.remove(img_path)
def test_aug_tf_data(_test_experiment_spec):
h_tensor = tf.constant(
_test_experiment_spec.augmentation_config.output_height,
dtype=tf.int32
)
w_tensor = tf.constant(
_test_experiment_spec.augmentation_config.output_width,
dtype=tf.int32
)
tf_dataset = YOLOv4TFDataPipe(
experiment_spec=_test_experiment_spec,
training=True,
h_tensor=h_tensor,
w_tensor=w_tensor
)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = str(0)
sess = tf.Session(config=config)
K.set_session(sess)
sess.run(get_init_ops())
imgs, tf_labels = K.get_session().run(
[tf_dataset.images, tf_dataset.gt_labels]
)
# since we forced the shape as constant, it will be fixed
assert imgs.shape == (1, 3, 320, 320)
# Depending on mosaic is enabled or not, the num boxes can be either
# 1(no mosaic) or 4(mosaic)
assert tf_labels[0].shape[0] in [1, 4]
assert tf_labels[0].shape[1] == 6
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/dataio/tests/test_aug_tf_data.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test YOLO v4 keras data augmentation with sequence dataloader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import numpy as np
from PIL import Image
import pytest
from nvidia_tao_tf1.cv.yolo_v3.proto.dataset_config_pb2 import (
YOLOv3DatasetConfig,
YOLOv3DataSource,
)
from nvidia_tao_tf1.cv.yolo_v4.dataio.data_sequence import YOLOv4DataSequence
from nvidia_tao_tf1.cv.yolo_v4.proto.augmentation_config_pb2 import AugmentationConfig
@pytest.fixture
def _test_experiment_spec():
img = np.random.randint(low=0, high=255, size=(375, 500, 3), dtype=np.uint8)
gt = ["bicycle 0 0 0 1 45 493 372 0 0 0 0 0 0 0",
"bicycle 0 0 0 54 24 500 326 0 0 0 0 0 0 0",
"bicycle 0 0 0 54 326 500 326 0 0 0 0 0 0 0"]
if not os.path.exists("tmp_labels/"):
os.mkdir("tmp_labels/")
with open("tmp_labels/0.txt", "w") as f:
for line in gt:
f.write(line + "\n")
if not os.path.exists("tmp_imgs/"):
os.mkdir("tmp_imgs/")
tmp_im = Image.fromarray(img)
tmp_im.save("tmp_imgs/0.jpg")
yield
shutil.rmtree("tmp_labels")
shutil.rmtree("tmp_imgs")
@pytest.fixture
def dataset_config():
"""dataset config."""
source = YOLOv3DataSource()
source.label_directory_path = "tmp_labels/"
source.image_directory_path = "tmp_imgs/"
dataset = YOLOv3DatasetConfig()
dataset.data_sources.extend([source])
dataset.target_class_mapping.update({"bicycle": "bicycle"})
dataset.validation_data_sources.extend([source])
dataset.image_extension = "jpg"
return dataset
@pytest.fixture
def augmentation_config():
"""augmentation config."""
aug_config = AugmentationConfig()
aug_config.output_width = 320
aug_config.output_height = 320
aug_config.output_channel = 3
aug_config.hue = 1
aug_config.saturation = 1.5
aug_config.exposure = 1.5
aug_config.vertical_flip = 0
aug_config.horizontal_flip = 0.5
aug_config.jitter = 0.3
aug_config.randomize_input_shape_period = 10
aug_config.mosaic_prob = 0.5
aug_config.mosaic_min_ratio = 0.2
return aug_config
def test_aug_sequence(dataset_config, augmentation_config, _test_experiment_spec):
# init dataloader:
train_dataset = YOLOv4DataSequence(
dataset_config=dataset_config,
augmentation_config=augmentation_config,
batch_size=1,
is_training=True,
encode_fn=None
)
val_dataset = YOLOv4DataSequence(
dataset_config=dataset_config,
augmentation_config=augmentation_config,
batch_size=1,
is_training=False,
encode_fn=None
)
# test load gt label for train
train_imgs, train_labels = train_dataset[0]
val_imgs, val_labels = val_dataset[0]
assert train_labels[0].shape[-1] == 6
assert val_labels[0].shape[-1] == 6
# test filter wrong gt label
assert val_labels[0].shape[0] == 2
# test preprocess
# augmentations applied for training data, dynamic scale ranges
# from [0.6, 1.5] of target size
assert train_imgs[0].shape[0] == 3
assert 320 * 0.6 <= train_imgs[0].shape[1] <= 320 * 1.5
assert 320 * 0.6 <= train_imgs[0].shape[2] <= 320 * 1.5
assert val_imgs[0].shape == (3, 320, 320)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/dataio/tests/test_aug_sequence.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test YOLO v4 input encoders."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from nvidia_tao_tf1.cv.yolo_v4.dataio.input_encoder import (
YOLOv4InputEncoder,
YOLOv4InputEncoderTensor
)
def test_input_encoder():
"""test input encoder."""
encoder = YOLOv4InputEncoder(3)
labels = np.array([
[0, 0, 1, 45, 293, 272],
[1, 0, 54, 24, 200, 226],
[2, 0, 54, 126, 200, 226]],
dtype=np.float32
)
encoded = encoder((320, 320), labels)
print(encoded.shape)
assert encoded.shape[-1] == 10
def test_input_encoder_tensor():
"""test input encoder with tf.data dataset."""
encoder = YOLOv4InputEncoderTensor(
320, 320, 3,
feature_map_size=np.array([[10, 10], [20, 20], [40, 40]], dtype=np.int32),
anchors=np.array([[(0.279, 0.216), (0.375, 0.476), (0.897, 0.784)],
[(0.072, 0.147), (0.149, 0.108), (0.142, 0.286)],
[(0.024, 0.031), (0.038, 0.072), (0.079, 0.055)]],
dtype=np.float32)
)
labels = np.array([
[1, 0, 1, 45, 293, 272],
[0, 0, 54, 24, 200, 226],
[2, 0, 54, 126, 200, 226]],
dtype=np.float32
)
labels = [tf.constant(labels, dtype=tf.float32)]
encoded = encoder(labels)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = str(0)
sess = tf.Session(config=config)
sess.run(tf.global_variables_initializer())
encoded_np = sess.run(encoded)
assert encoded_np.shape[-1] == 10
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/dataio/tests/test_input_encoder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test YOLOv4 tf.data dataloader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from keras import backend as K
import numpy as np
from PIL import Image
import pytest
import tensorflow as tf
from nvidia_tao_tf1.cv.detectnet_v2.common.dataio.converter_lib import _bytes_feature
from nvidia_tao_tf1.cv.detectnet_v2.common.dataio.converter_lib import _float_feature
from nvidia_tao_tf1.cv.detectnet_v2.common.dataio.converter_lib import _int64_feature
from nvidia_tao_tf1.cv.yolo_v3.proto.dataset_config_pb2 import (
YOLOv3DatasetConfig,
YOLOv3DataSource,
)
from nvidia_tao_tf1.cv.yolo_v3.utils.tensor_utils import get_init_ops
from nvidia_tao_tf1.cv.yolo_v4.dataio.tf_data_pipe import YOLOv4TFDataPipe
from nvidia_tao_tf1.cv.yolo_v4.proto.augmentation_config_pb2 import AugmentationConfig
from nvidia_tao_tf1.cv.yolo_v4.proto.experiment_pb2 import Experiment
def generate_dummy_labels(path, num_samples, height=0, width=0, labels=None):
"""Generate num_samples dummy labels and store them to a tfrecords file in the given path.
Args:
path: Path to the generated tfrecords file.
num_samples: Labels will be generated for this many images.
height, width: Optional image shape.
labels: Optional, list of custom labels to write into the tfrecords file. The user is
expected to provide a label for each sample. Each label is dictionary with the label
name as the key and value as the corresponding tf.train.Feature.
"""
if labels is None:
labels = [{'target/object_class': _bytes_feature('bicycle'),
'target/coordinates_x1': _float_feature(1.0),
'target/coordinates_y1': _float_feature(45.0),
'target/coordinates_x2': _float_feature(493.0),
'target/coordinates_y2': _float_feature(372.0),
'target/truncation': _float_feature(0.0),
'target/occlusion': _int64_feature(0),
'target/front': _float_feature(0.0),
'target/back': _float_feature(0.5),
'frame/id': _bytes_feature(str(i)),
'frame/height': _int64_feature(height),
'frame/width': _int64_feature(width)} for i in range(num_samples)]
else:
num_custom_labels = len(labels)
assert num_custom_labels == num_samples, \
"Expected %d custom labels, got %d." % (num_samples, num_custom_labels)
writer = tf.python_io.TFRecordWriter(str(path))
for label in labels:
features = tf.train.Features(feature=label)
example = tf.train.Example(features=features)
writer.write(example.SerializeToString())
writer.close()
@pytest.fixture
def dataset_config():
"""dataset config."""
source = YOLOv3DataSource()
curr_dir = os.path.dirname(__file__)
source.tfrecords_path = os.path.join(curr_dir, "tmp_tfrecord")
source.image_directory_path = curr_dir
dataset = YOLOv3DatasetConfig()
dataset.data_sources.extend([source])
dataset.target_class_mapping.update({"bicycle": "bicycle"})
dataset.validation_data_sources.extend([source])
dataset.image_extension = "jpg"
return dataset
@pytest.fixture
def augmentation_config():
"""augmentation config."""
aug_config = AugmentationConfig()
aug_config.output_width = 320
aug_config.output_height = 320
aug_config.output_channel = 3
return aug_config
@pytest.fixture
def _test_experiment_spec(dataset_config, augmentation_config):
curr_dir = os.path.dirname(__file__)
record_path = os.path.join(curr_dir, "tmp_tfrecord")
img_path = os.path.join(curr_dir, "0.jpg")
# write dummy tfrecord
generate_dummy_labels(record_path, 1, height=375, width=500)
# write dummy image
img = np.random.randint(low=0, high=255, size=(375, 500, 3), dtype=np.uint8)
tmp_im = Image.fromarray(img)
tmp_im.save(img_path)
# instantiate a dummy spec
spec = Experiment()
spec.dataset_config.data_sources.extend(dataset_config.data_sources)
spec.dataset_config.target_class_mapping.update(dataset_config.target_class_mapping)
spec.dataset_config.validation_data_sources.extend(dataset_config.validation_data_sources)
spec.dataset_config.image_extension = dataset_config.image_extension
spec.augmentation_config.output_width = augmentation_config.output_width
spec.augmentation_config.output_height = augmentation_config.output_height
spec.augmentation_config.output_channel = augmentation_config.output_channel
spec.training_config.batch_size_per_gpu = 1
spec.eval_config.batch_size = 1
yield spec
os.remove(record_path)
os.remove(img_path)
def test_tf_data_pipe(_test_experiment_spec):
h_tensor = tf.constant(
_test_experiment_spec.augmentation_config.output_height,
dtype=tf.int32
)
w_tensor = tf.constant(
_test_experiment_spec.augmentation_config.output_width,
dtype=tf.int32
)
tf_dataset = YOLOv4TFDataPipe(
experiment_spec=_test_experiment_spec,
training=False,
h_tensor=h_tensor,
w_tensor=w_tensor
)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = str(0)
sess = tf.Session(config=config)
K.set_session(sess)
sess.run(get_init_ops())
imgs, tf_labels = K.get_session().run(
[tf_dataset.images, tf_dataset.gt_labels]
)
assert imgs.shape == (1, 3, 320, 320)
assert tf_labels[0].shape == (1, 6)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/dataio/tests/test_tf_data_pipe.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test YOLO v4 keras sequence dataloader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import numpy as np
from PIL import Image
import pytest
from nvidia_tao_tf1.cv.yolo_v3.proto.dataset_config_pb2 import (
YOLOv3DatasetConfig,
YOLOv3DataSource,
)
from nvidia_tao_tf1.cv.yolo_v4.dataio.data_sequence import YOLOv4DataSequence
from nvidia_tao_tf1.cv.yolo_v4.proto.augmentation_config_pb2 import AugmentationConfig
@pytest.fixture
def _test_experiment_spec():
img = np.random.randint(low=0, high=255, size=(375, 500, 3), dtype=np.uint8)
gt = ["bicycle 0 0 0 1 45 493 372 0 0 0 0 0 0 0",
"bicycle 0 0 0 54 24 500 326 0 0 0 0 0 0 0",
"bicycle 0 0 0 54 326 500 326 0 0 0 0 0 0 0"]
if not os.path.exists("tmp_labels/"):
os.mkdir("tmp_labels/")
with open("tmp_labels/0.txt", "w") as f:
for line in gt:
f.write(line + "\n")
if not os.path.exists("tmp_imgs/"):
os.mkdir("tmp_imgs/")
tmp_im = Image.fromarray(img)
tmp_im.save("tmp_imgs/0.jpg")
yield
shutil.rmtree("tmp_labels")
shutil.rmtree("tmp_imgs")
@pytest.fixture
def dataset_config():
"""dataset config."""
source = YOLOv3DataSource()
source.label_directory_path = "tmp_labels/"
source.image_directory_path = "tmp_imgs/"
dataset = YOLOv3DatasetConfig()
dataset.data_sources.extend([source])
dataset.target_class_mapping.update({"bicycle": "bicycle"})
dataset.validation_data_sources.extend([source])
dataset.image_extension = "jpg"
return dataset
@pytest.fixture
def augmentation_config():
"""augmentation config."""
aug_config = AugmentationConfig()
aug_config.output_width = 320
aug_config.output_height = 320
aug_config.output_channel = 3
return aug_config
def test_data_sequence(dataset_config, augmentation_config, _test_experiment_spec):
# init dataloader:
train_dataset = YOLOv4DataSequence(
dataset_config=dataset_config,
augmentation_config=augmentation_config,
batch_size=1,
is_training=True,
encode_fn=None
)
val_dataset = YOLOv4DataSequence(
dataset_config=dataset_config,
augmentation_config=augmentation_config,
batch_size=1,
is_training=False,
encode_fn=None
)
# test load gt label for train
train_imgs, train_labels = train_dataset[0]
val_imgs, val_labels = val_dataset[0]
assert train_labels[0].shape[-1] == 6
assert val_labels[0].shape[-1] == 6
# test filter wrong gt label
assert val_labels[0].shape[0] == 2
# test preprocess
assert train_imgs[0].shape == (3, 320, 320)
assert val_imgs[0].shape == (3, 320, 320)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/dataio/tests/test_data_sequence.py |
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
"""IVA YOLOv4 BBoxPostProcessingLayer Layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.engine.topology import Layer
import tensorflow as tf
class BBoxPostProcessingLayer(Layer):
'''
BBoxPostProcessing layer to map prediction to GT target format.
xy = softmax(xy) * grid_scale_xy - (grid_scale_xy - 1.0) / 2.0
wh = exp(wh)
Args:
grid_scale_xy: how many boxes you want for final outputs (padded by zeros)
'''
def __init__(self,
grid_scale_xy=1.0,
**kwargs):
'''Init function.'''
self.grid_scale_xy = grid_scale_xy
super(BBoxPostProcessingLayer, self).__init__(**kwargs)
def call(self, x):
"""
Post-process detection bbox prediction.
Input:
grid_scale_xy: a float indicating how much the grid scale should be
Output:
a function takes in detection prediction and returns processed detection prediction.
"""
# Workaround for UFF export. Need to change if using ONNX
x_shape = tf.shape(x)
x = tf.reshape(x, [x_shape[0], x_shape[1], x_shape[2], 1])
# input last dim: [pred_y, pred_x, pred_h, pred_w, object, cls...]
yx = x[:, :, :2, :]
hw = x[:, :, 2:4, :]
yx = tf.sigmoid(yx) * self.grid_scale_xy - (self.grid_scale_xy - 1.0) / 2.0
# Limit HW max to np.exp(8) to avoid numerical instability.
# Do not change following seemingly stupid way to construct tf.constant(8)
# otherwise TensorRT will complain. EXP(8.0) = 2981, more than enough for hw multiplier
hw = tf.exp(tf.minimum(hw, hw + 8.0 - hw))
result = tf.concat([yx, hw, x[:, :, 4:, :]], 2)
result = tf.reshape(result, [x_shape[0], x_shape[1], x_shape[2]])
return result
def compute_output_shape(self, input_shape):
'''Layer output shape function.'''
return input_shape
def get_config(self):
'''Layer get_config function.'''
config = {
'grid_scale_xy': self.grid_scale_xy,
}
base_config = super(BBoxPostProcessingLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/layers/bbox_postprocessing_layer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IVA YOLO Decode Layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import keras.backend as K
from keras.engine.topology import Layer
import tensorflow as tf
class YOLOv4DecodeLayer(Layer):
'''Decodes model output to corner-formatted boxes.'''
def call(self, x):
'''
Decode output.
Args:
x: 3-D tensor. Last dimension is
(cy, cx, ph, pw, step_y, step_x, pred_y, pred_x, pred_h, pred_w, object, cls...)
Returns:
boxes: 3-D tensor. Last dimension is (x_min, y_min, x_max, y_max, cls_score)
'''
# shape [..., num_cls]
# !!! DO NOT replace `:, :, :,` with `...,` as this fails TensorRT export
cls_score = tf.sigmoid(x[:, :, 11:]) * tf.sigmoid(x[:, :, 10:11])
by = x[:, :, 0:1] + x[:, :, 6:7] * x[:, :, 4:5] # shape [..., 1]
bx = x[:, :, 1:2] + x[:, :, 7:8] * x[:, :, 5:6] # shape [..., 1]
bh = x[:, :, 2:3] * x[:, :, 8:9] # shape [..., 1]
bw = x[:, :, 3:4] * x[:, :, 9:10] # shape [..., 1]
x_min = bx - 0.5 * bw
x_max = bx + 0.5 * bw
y_min = by - 0.5 * bh
y_max = by + 0.5 * bh
# tf.concat(axis=-1) can't be processed correctly by uff converter.
return K.concatenate([x_min, y_min, x_max, y_max, cls_score], -1)
def compute_output_shape(self, input_shape):
'''Layer output shape function.'''
return (input_shape[0], input_shape[1], input_shape[2] - 7)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/layers/decode_layer.py |
"""Module containing implementation custom layers for YOLOv4.""" | tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/layers/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Split layer in YOLO v4 tiny."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Layer
class Split(Layer):
'''Keras Split layer for doing tf.split.'''
def __init__(self, groups, group_id, **kwargs):
"""Initialize the Split layer.
Args:
groups(int): Number of groups of channels.
group_id(int): The ID of the output group of channels.
"""
self.groups = groups
self.group_id = group_id
super(Split, self).__init__(**kwargs)
def build(self, input_shape):
"""Setup some internal parameters."""
self.nb_channels = input_shape[1]
assert self.nb_channels % self.groups == 0, (
"Number of channels is not a multiple of number of groups!"
)
def compute_output_shape(self, input_shape):
"""compute_output_shape.
Args:
input_shape(tuple): the shape of the input tensor.
Returns:
The output tensor shape: (N, C // g, h, w).
"""
batch_size = input_shape[0]
h = input_shape[2]
w = input_shape[3]
return (batch_size, self.nb_channels // self.groups, h, w)
def call(self, x, mask=None):
"""Call this layer with inputs."""
group_size = self.nb_channels // self.groups
return x[:, group_size * self.group_id : group_size * (self.group_id + 1), :, :]
def get_config(self):
"""Get config for this layer."""
config = {'groups': self.groups, "group_id": self.group_id}
base_config = super(Split, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/layers/split.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""helper layers for model export."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import keras.backend as K
from keras.layers import Layer
class BoxLayer(Layer):
'''
Helper layer to export model - Get box.
Input:
Encoded detection (last layer output of training model).
Output:
Boxes in corner format (x_min, y_min, x_max, y_max)
'''
def compute_output_shape(self, input_shape):
'''Define output shape.'''
return (input_shape[0], input_shape[1], 1, 4)
def call(self, x):
'''See class doc.'''
x_shape = K.shape(x)
x = K.reshape(x, [x_shape[0], x_shape[1], 1, x_shape[2]])
by = x[:, :, :, 0:1] + x[:, :, :, 6:7] * x[:, :, :, 4:5] # shape [..., 1]
bx = x[:, :, :, 1:2] + x[:, :, :, 7:8] * x[:, :, :, 5:6] # shape [..., 1]
bh = x[:, :, :, 2:3] * x[:, :, :, 8:9] # shape [..., 1]
bw = x[:, :, :, 3:4] * x[:, :, :, 9:10] # shape [..., 1]
x_min = bx - 0.5 * bw
x_max = bx + 0.5 * bw
y_min = by - 0.5 * bh
y_max = by + 0.5 * bh
loc = K.concatenate([x_min, y_min, x_max, y_max], -1)
return K.identity(loc, name="out_box")
class ClsLayer(Layer):
'''
Helper layer to export model - Get class score.
Input:
Encoded detection (last layer output of training model).
Output:
(Sigmoid) confidence scores for each class.
'''
def compute_output_shape(self, input_shape):
'''Define output shape.'''
return (input_shape[0], input_shape[1], input_shape[2]-11, 1)
def call(self, x):
'''See class doc.'''
# shape [..., num_cls]
x_shape = K.shape(x)
x = K.reshape(x, [x_shape[0], x_shape[1], x_shape[2], 1])
cls_score = K.sigmoid(x[:, :, 11:, :]) * K.sigmoid(x[:, :, 10:11, :])
return K.identity(cls_score, name="out_cls")
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/layers/export_layers.py |
"""Implementation of model protobuf definitions and pb2 files.""" | tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/proto/__init__.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/yolo_v4/proto/augmentation_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/yolo_v4/proto/augmentation_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n9nvidia_tao_tf1/cv/yolo_v4/proto/augmentation_config.proto\"\xa1\x03\n\x12\x41ugmentationConfig\x12\x0b\n\x03hue\x18\x01 \x01(\x02\x12\x12\n\nsaturation\x18\x02 \x01(\x02\x12\x10\n\x08\x65xposure\x18\x03 \x01(\x02\x12\x15\n\rvertical_flip\x18\x04 \x01(\x02\x12\x17\n\x0fhorizontal_flip\x18\x05 \x01(\x02\x12\x0e\n\x06jitter\x18\x06 \x01(\x02\x12\x14\n\x0coutput_width\x18\x07 \x01(\x05\x12\x15\n\routput_height\x18\x08 \x01(\x05\x12\x16\n\x0eoutput_channel\x18\t \x01(\x05\x12\x14\n\x0coutput_depth\x18\x0e \x01(\r\x12$\n\x1crandomize_input_shape_period\x18\n \x01(\x05\x12\x13\n\x0bmosaic_prob\x18\x0b \x01(\x02\x12\x18\n\x10mosaic_min_ratio\x18\x0c \x01(\x02\x12\x36\n\nimage_mean\x18\r \x03(\x0b\x32\".AugmentationConfig.ImageMeanEntry\x1a\x30\n\x0eImageMeanEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x02:\x02\x38\x01\x62\x06proto3')
)
_AUGMENTATIONCONFIG_IMAGEMEANENTRY = _descriptor.Descriptor(
name='ImageMeanEntry',
full_name='AugmentationConfig.ImageMeanEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='AugmentationConfig.ImageMeanEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='AugmentationConfig.ImageMeanEntry.value', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=431,
serialized_end=479,
)
_AUGMENTATIONCONFIG = _descriptor.Descriptor(
name='AugmentationConfig',
full_name='AugmentationConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='hue', full_name='AugmentationConfig.hue', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='saturation', full_name='AugmentationConfig.saturation', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='exposure', full_name='AugmentationConfig.exposure', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='vertical_flip', full_name='AugmentationConfig.vertical_flip', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='horizontal_flip', full_name='AugmentationConfig.horizontal_flip', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='jitter', full_name='AugmentationConfig.jitter', index=5,
number=6, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_width', full_name='AugmentationConfig.output_width', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_height', full_name='AugmentationConfig.output_height', index=7,
number=8, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_channel', full_name='AugmentationConfig.output_channel', index=8,
number=9, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_depth', full_name='AugmentationConfig.output_depth', index=9,
number=14, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='randomize_input_shape_period', full_name='AugmentationConfig.randomize_input_shape_period', index=10,
number=10, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mosaic_prob', full_name='AugmentationConfig.mosaic_prob', index=11,
number=11, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mosaic_min_ratio', full_name='AugmentationConfig.mosaic_min_ratio', index=12,
number=12, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image_mean', full_name='AugmentationConfig.image_mean', index=13,
number=13, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_AUGMENTATIONCONFIG_IMAGEMEANENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=62,
serialized_end=479,
)
_AUGMENTATIONCONFIG_IMAGEMEANENTRY.containing_type = _AUGMENTATIONCONFIG
_AUGMENTATIONCONFIG.fields_by_name['image_mean'].message_type = _AUGMENTATIONCONFIG_IMAGEMEANENTRY
DESCRIPTOR.message_types_by_name['AugmentationConfig'] = _AUGMENTATIONCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AugmentationConfig = _reflection.GeneratedProtocolMessageType('AugmentationConfig', (_message.Message,), dict(
ImageMeanEntry = _reflection.GeneratedProtocolMessageType('ImageMeanEntry', (_message.Message,), dict(
DESCRIPTOR = _AUGMENTATIONCONFIG_IMAGEMEANENTRY,
__module__ = 'nvidia_tao_tf1.cv.yolo_v4.proto.augmentation_config_pb2'
# @@protoc_insertion_point(class_scope:AugmentationConfig.ImageMeanEntry)
))
,
DESCRIPTOR = _AUGMENTATIONCONFIG,
__module__ = 'nvidia_tao_tf1.cv.yolo_v4.proto.augmentation_config_pb2'
# @@protoc_insertion_point(class_scope:AugmentationConfig)
))
_sym_db.RegisterMessage(AugmentationConfig)
_sym_db.RegisterMessage(AugmentationConfig.ImageMeanEntry)
_AUGMENTATIONCONFIG_IMAGEMEANENTRY._options = None
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/proto/augmentation_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/yolo_v4/proto/experiment.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_tao_tf1.cv.yolo_v4.proto import augmentation_config_pb2 as nvidia__tao__tf1_dot_cv_dot_yolo__v4_dot_proto_dot_augmentation__config__pb2
from nvidia_tao_tf1.cv.yolo_v3.proto import dataset_config_pb2 as nvidia__tao__tf1_dot_cv_dot_yolo__v3_dot_proto_dot_dataset__config__pb2
from nvidia_tao_tf1.cv.common.proto import training_config_pb2 as nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_training__config__pb2
from nvidia_tao_tf1.cv.common.proto import eval_config_pb2 as nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_eval__config__pb2
from nvidia_tao_tf1.cv.common.proto import nms_config_pb2 as nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_nms__config__pb2
from nvidia_tao_tf1.cv.common.proto import class_weighting_config_pb2 as nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_class__weighting__config__pb2
from nvidia_tao_tf1.cv.yolo_v4.proto import yolov4_config_pb2 as nvidia__tao__tf1_dot_cv_dot_yolo__v4_dot_proto_dot_yolov4__config__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/yolo_v4/proto/experiment.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n0nvidia_tao_tf1/cv/yolo_v4/proto/experiment.proto\x1a\x39nvidia_tao_tf1/cv/yolo_v4/proto/augmentation_config.proto\x1a\x34nvidia_tao_tf1/cv/yolo_v3/proto/dataset_config.proto\x1a\x34nvidia_tao_tf1/cv/common/proto/training_config.proto\x1a\x30nvidia_tao_tf1/cv/common/proto/eval_config.proto\x1a/nvidia_tao_tf1/cv/common/proto/nms_config.proto\x1a;nvidia_tao_tf1/cv/common/proto/class_weighting_config.proto\x1a\x33nvidia_tao_tf1/cv/yolo_v4/proto/yolov4_config.proto\"\xca\x02\n\nExperiment\x12,\n\x0e\x64\x61taset_config\x18\x01 \x01(\x0b\x32\x14.YOLOv3DatasetConfig\x12\x30\n\x13\x61ugmentation_config\x18\x02 \x01(\x0b\x32\x13.AugmentationConfig\x12(\n\x0ftraining_config\x18\x03 \x01(\x0b\x32\x0f.TrainingConfig\x12 \n\x0b\x65val_config\x18\x04 \x01(\x0b\x32\x0b.EvalConfig\x12\x1e\n\nnms_config\x18\x05 \x01(\x0b\x32\n.NMSConfig\x12$\n\ryolov4_config\x18\x06 \x01(\x0b\x32\r.YOLOv4Config\x12\x35\n\x16\x63lass_weighting_config\x18\x08 \x01(\x0b\x32\x15.ClassWeightingConfig\x12\x13\n\x0brandom_seed\x18\x07 \x01(\rb\x06proto3')
,
dependencies=[nvidia__tao__tf1_dot_cv_dot_yolo__v4_dot_proto_dot_augmentation__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_yolo__v3_dot_proto_dot_dataset__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_training__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_eval__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_nms__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_class__weighting__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_yolo__v4_dot_proto_dot_yolov4__config__pb2.DESCRIPTOR,])
_EXPERIMENT = _descriptor.Descriptor(
name='Experiment',
full_name='Experiment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dataset_config', full_name='Experiment.dataset_config', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='augmentation_config', full_name='Experiment.augmentation_config', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='training_config', full_name='Experiment.training_config', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='eval_config', full_name='Experiment.eval_config', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='nms_config', full_name='Experiment.nms_config', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='yolov4_config', full_name='Experiment.yolov4_config', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='class_weighting_config', full_name='Experiment.class_weighting_config', index=6,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='random_seed', full_name='Experiment.random_seed', index=7,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=433,
serialized_end=763,
)
_EXPERIMENT.fields_by_name['dataset_config'].message_type = nvidia__tao__tf1_dot_cv_dot_yolo__v3_dot_proto_dot_dataset__config__pb2._YOLOV3DATASETCONFIG
_EXPERIMENT.fields_by_name['augmentation_config'].message_type = nvidia__tao__tf1_dot_cv_dot_yolo__v4_dot_proto_dot_augmentation__config__pb2._AUGMENTATIONCONFIG
_EXPERIMENT.fields_by_name['training_config'].message_type = nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_training__config__pb2._TRAININGCONFIG
_EXPERIMENT.fields_by_name['eval_config'].message_type = nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_eval__config__pb2._EVALCONFIG
_EXPERIMENT.fields_by_name['nms_config'].message_type = nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_nms__config__pb2._NMSCONFIG
_EXPERIMENT.fields_by_name['yolov4_config'].message_type = nvidia__tao__tf1_dot_cv_dot_yolo__v4_dot_proto_dot_yolov4__config__pb2._YOLOV4CONFIG
_EXPERIMENT.fields_by_name['class_weighting_config'].message_type = nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_class__weighting__config__pb2._CLASSWEIGHTINGCONFIG
DESCRIPTOR.message_types_by_name['Experiment'] = _EXPERIMENT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Experiment = _reflection.GeneratedProtocolMessageType('Experiment', (_message.Message,), dict(
DESCRIPTOR = _EXPERIMENT,
__module__ = 'nvidia_tao_tf1.cv.yolo_v4.proto.experiment_pb2'
# @@protoc_insertion_point(class_scope:Experiment)
))
_sym_db.RegisterMessage(Experiment)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/proto/experiment_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/yolo_v4/proto/yolov4_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/yolo_v4/proto/yolov4_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n3nvidia_tao_tf1/cv/yolo_v4/proto/yolov4_config.proto\"\x9b\x04\n\x0cYOLOv4Config\x12\x18\n\x10\x62ig_anchor_shape\x18\x01 \x01(\t\x12\x18\n\x10mid_anchor_shape\x18\x02 \x01(\t\x12\x1a\n\x12small_anchor_shape\x18\x03 \x01(\t\x12 \n\x18matching_neutral_box_iou\x18\x04 \x01(\x02\x12\x18\n\x10\x62ox_matching_iou\x18\x05 \x01(\x02\x12\x0c\n\x04\x61rch\x18\x06 \x01(\t\x12\x0f\n\x07nlayers\x18\x07 \x01(\r\x12\x18\n\x10\x61rch_conv_blocks\x18\x08 \x01(\r\x12\x17\n\x0floss_loc_weight\x18\t \x01(\x02\x12\x1c\n\x14loss_neg_obj_weights\x18\n \x01(\x02\x12\x1a\n\x12loss_class_weights\x18\x0b \x01(\x02\x12\x15\n\rfreeze_blocks\x18\x0c \x03(\x02\x12\x11\n\tfreeze_bn\x18\r \x01(\x08\x12\x12\n\nforce_relu\x18\x0e \x01(\x08\x12\x12\n\nactivation\x18\x15 \x01(\t\x12\x18\n\x10\x66ocal_loss_alpha\x18\x0f \x01(\x02\x12\x18\n\x10\x66ocal_loss_gamma\x18\x10 \x01(\x02\x12\x17\n\x0flabel_smoothing\x18\x11 \x01(\x02\x12\x1a\n\x12\x62ig_grid_xy_extend\x18\x12 \x01(\x02\x12\x1a\n\x12mid_grid_xy_extend\x18\x13 \x01(\x02\x12\x1c\n\x14small_grid_xy_extend\x18\x14 \x01(\x02\x62\x06proto3')
)
_YOLOV4CONFIG = _descriptor.Descriptor(
name='YOLOv4Config',
full_name='YOLOv4Config',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='big_anchor_shape', full_name='YOLOv4Config.big_anchor_shape', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mid_anchor_shape', full_name='YOLOv4Config.mid_anchor_shape', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='small_anchor_shape', full_name='YOLOv4Config.small_anchor_shape', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='matching_neutral_box_iou', full_name='YOLOv4Config.matching_neutral_box_iou', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='box_matching_iou', full_name='YOLOv4Config.box_matching_iou', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='arch', full_name='YOLOv4Config.arch', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='nlayers', full_name='YOLOv4Config.nlayers', index=6,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='arch_conv_blocks', full_name='YOLOv4Config.arch_conv_blocks', index=7,
number=8, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='loss_loc_weight', full_name='YOLOv4Config.loss_loc_weight', index=8,
number=9, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='loss_neg_obj_weights', full_name='YOLOv4Config.loss_neg_obj_weights', index=9,
number=10, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='loss_class_weights', full_name='YOLOv4Config.loss_class_weights', index=10,
number=11, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='freeze_blocks', full_name='YOLOv4Config.freeze_blocks', index=11,
number=12, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='freeze_bn', full_name='YOLOv4Config.freeze_bn', index=12,
number=13, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='force_relu', full_name='YOLOv4Config.force_relu', index=13,
number=14, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='activation', full_name='YOLOv4Config.activation', index=14,
number=21, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='focal_loss_alpha', full_name='YOLOv4Config.focal_loss_alpha', index=15,
number=15, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='focal_loss_gamma', full_name='YOLOv4Config.focal_loss_gamma', index=16,
number=16, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='label_smoothing', full_name='YOLOv4Config.label_smoothing', index=17,
number=17, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='big_grid_xy_extend', full_name='YOLOv4Config.big_grid_xy_extend', index=18,
number=18, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mid_grid_xy_extend', full_name='YOLOv4Config.mid_grid_xy_extend', index=19,
number=19, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='small_grid_xy_extend', full_name='YOLOv4Config.small_grid_xy_extend', index=20,
number=20, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=56,
serialized_end=595,
)
DESCRIPTOR.message_types_by_name['YOLOv4Config'] = _YOLOV4CONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
YOLOv4Config = _reflection.GeneratedProtocolMessageType('YOLOv4Config', (_message.Message,), dict(
DESCRIPTOR = _YOLOV4CONFIG,
__module__ = 'nvidia_tao_tf1.cv.yolo_v4.proto.yolov4_config_pb2'
# @@protoc_insertion_point(class_scope:YOLOv4Config)
))
_sym_db.RegisterMessage(YOLOv4Config)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/proto/yolov4_config_pb2.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''build model for training.'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def _load_pretrain_weights(pretrain_model, train_model):
"""Load weights in pretrain model to model."""
strict_mode = True
for layer in train_model.layers[1:]:
# The layer must match up to yolo layers.
if layer.name.find('yolo_') != -1:
strict_mode = False
try:
l_return = pretrain_model.get_layer(layer.name)
except ValueError:
if strict_mode and layer.name[-3:] != 'qdq' and len(layer.get_weights()) != 0:
raise ValueError(layer.name + ' not found in pretrained model.')
# Ignore QDQ
continue
try:
layer.set_weights(l_return.get_weights())
except ValueError:
if strict_mode:
raise ValueError(layer.name + ' has incorrect shape in pretrained model.')
continue
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/builders/model_builder.py |
"""Module containing builders of the core yolo component blocks.""" | tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/builders/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''build model for evaluation.'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.models import Model
from nvidia_tao_tf1.cv.yolo_v3.layers.nms_layer import NMSLayer
from nvidia_tao_tf1.cv.yolo_v4.layers.decode_layer import YOLOv4DecodeLayer
def build(training_model,
confidence_thresh=0.05,
iou_threshold=0.5,
top_k=200,
include_encoded_head=False,
nms_on_cpu=False):
'''
build model for evaluation.
Args:
training_model: Keras model built from model_builder. Last layer is encoded prediction.
confidence_thresh: ignore all boxes with confidence less then threshold
iou_threshold: iou threshold for NMS
top_k: pick top k boxes with highest confidence scores after NMS
include_encoded_head: whether to include original model output into final model output.
nms_on_cpu(bool): Flag to force NMS to run on CPU as GPU NMS is flaky with tfrecord dataset.
Returns:
eval_model: keras model that outputs at most top_k detection boxes.
'''
decoded_predictions = YOLOv4DecodeLayer(name='decoded_predictions')
x = decoded_predictions(training_model.layers[-1].output)
nms = NMSLayer(output_size=top_k,
iou_threshold=iou_threshold,
score_threshold=confidence_thresh,
force_on_cpu=nms_on_cpu,
name="NMS")
x = nms(x)
x = [training_model.layers[-1].output, x] if include_encoded_head else x
eval_model = Model(inputs=training_model.layers[0].input,
outputs=x)
return eval_model
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/builders/eval_builder.py |
"""Utils module for YOLOv4.""" | tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper function to load model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import keras
from nvidia_tao_tf1.cv.common.utils import CUSTOM_OBJS
from nvidia_tao_tf1.cv.common.utils import load_keras_model
from nvidia_tao_tf1.cv.yolo_v4.layers.split import Split
from nvidia_tao_tf1.cv.yolo_v4.losses.yolo_loss import YOLOv4Loss
from nvidia_tao_tf1.encoding import encoding
def get_model_with_input(model_path, input_layer):
"""Implement a trick to replace input tensor."""
def get_input_layer(*arg, **kargs):
return input_layer
# Following syntax only works in python3.
return keras.models.load_model(
model_path,
custom_objects={
**CUSTOM_OBJS,
'InputLayer': get_input_layer,
"Split": Split
}
)
def load_model(model_path, experiment_spec=None, input_shape=None, key=None):
"""Load a model either in .tlt format or .hdf5 format."""
_, ext = os.path.splitext(model_path)
if ext == '.hdf5':
yololoss = YOLOv4Loss(experiment_spec.yolov4_config.loss_loc_weight,
experiment_spec.yolov4_config.loss_neg_obj_weights,
experiment_spec.yolov4_config.loss_class_weights,
experiment_spec.yolov4_config.label_smoothing,
experiment_spec.yolov4_config.matching_neutral_box_iou,
experiment_spec.yolov4_config.focal_loss_alpha,
experiment_spec.yolov4_config.focal_loss_gamma)
CUSTOM_OBJS['compute_loss'] = yololoss.compute_loss
# directly load model, add dummy loss since loss is never required.
if input_shape is None:
# load the model to get img width/height
model = load_keras_model(model_path,
custom_objects=CUSTOM_OBJS)
else:
input_layer = keras.layers.InputLayer(input_shape=input_shape, name="Input")
model = get_model_with_input(model_path, input_layer)
elif ext == '.tlt':
os_handle, temp_file_name = tempfile.mkstemp(suffix='.hdf5')
os.close(os_handle)
with open(temp_file_name, 'wb') as temp_file, open(model_path, 'rb') as encoded_file:
encoding.decode(encoded_file, temp_file, key)
encoded_file.close()
temp_file.close()
# recursive call
model = load_model(temp_file_name, experiment_spec, input_shape, None)
os.remove(temp_file_name)
else:
raise NotImplementedError("{0} file is not supported!".format(ext))
return model
def save_model(keras_model, model_path, key, save_format=None):
"""Save a model to either .h5, .tlt or .hdf5 format."""
_, ext = os.path.splitext(model_path)
if (save_format is not None) and (save_format != ext):
# recursive call to save a correct model
return save_model(keras_model, model_path + save_format, key, None)
if ext == '.hdf5':
keras_model.save(model_path, overwrite=True, include_optimizer=True)
else:
raise NotImplementedError("{0} file is not supported!".format(ext))
return model_path
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/utils/model_io.py |
"""Part of the training engine related to Python generators of array data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
from keras import backend as K
from keras import callbacks as cbks
from keras.engine.training_utils import iter_sequence_infinite
from keras.utils.data_utils import GeneratorEnqueuer
from keras.utils.data_utils import OrderedEnqueuer
from keras.utils.data_utils import Sequence
from keras.utils.generic_utils import to_list
from nvidia_tao_tf1.cv.common.utils import tensorboard_images
def fit_generator(model,
tb_writer,
img_means,
max_image_num,
generator,
steps_per_epoch=None,
epochs=1,
verbose=1,
callbacks=None,
validation_data=None,
validation_steps=None,
class_weight=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
shuffle=True,
initial_epoch=0):
"""See docstring for `Model.fit_generator`."""
wait_time = 0.01 # in seconds
epoch = initial_epoch
do_validation = bool(validation_data)
model._make_train_function()
if do_validation:
model._make_test_function()
is_sequence = isinstance(generator, Sequence)
if not is_sequence and use_multiprocessing and workers > 1:
warnings.warn(
UserWarning('Using a generator with `use_multiprocessing=True`'
' and multiple workers may duplicate your data.'
' Please consider using the`keras.utils.Sequence'
' class.'))
if steps_per_epoch is None:
if is_sequence:
steps_per_epoch = len(generator)
else:
raise ValueError('`steps_per_epoch=None` is only valid for a'
' generator based on the '
'`keras.utils.Sequence`'
' class. Please specify `steps_per_epoch` '
'or use the `keras.utils.Sequence` class.')
# python 2 has 'next', 3 has '__next__'
# avoid any explicit version checks
val_gen = (hasattr(validation_data, 'next') or
hasattr(validation_data, '__next__') or
isinstance(validation_data, Sequence))
if (val_gen and not isinstance(validation_data, Sequence) and
not validation_steps):
raise ValueError('`validation_steps=None` is only valid for a'
' generator based on the `keras.utils.Sequence`'
' class. Please specify `validation_steps` or use'
' the `keras.utils.Sequence` class.')
# Prepare display labels.
out_labels = model.metrics_names
callback_metrics = out_labels + ['val_' + n for n in out_labels]
# prepare callbacks
model.history = cbks.History()
_callbacks = [cbks.BaseLogger(
stateful_metrics=model.stateful_metric_names)]
if verbose:
_callbacks.append(
cbks.ProgbarLogger(
count_mode='steps',
stateful_metrics=model.stateful_metric_names))
_callbacks += (callbacks or []) + [model.history]
callbacks = cbks.CallbackList(_callbacks)
# it's possible to callback a different model than self:
if hasattr(model, 'callback_model') and model.callback_model:
callback_model = model.callback_model
else:
callback_model = model
callbacks.set_model(callback_model)
callbacks.set_params({
'epochs': epochs,
'steps': steps_per_epoch,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics,
})
callbacks.on_train_begin()
enqueuer = None
val_enqueuer = None
try:
if do_validation:
if val_gen and workers > 0:
# Create an Enqueuer that can be reused
val_data = validation_data
if isinstance(val_data, Sequence):
val_enqueuer = OrderedEnqueuer(
val_data,
use_multiprocessing=use_multiprocessing)
validation_steps = validation_steps or len(val_data)
else:
val_enqueuer = GeneratorEnqueuer(
val_data,
use_multiprocessing=use_multiprocessing)
val_enqueuer.start(workers=workers,
max_queue_size=max_queue_size)
val_enqueuer_gen = val_enqueuer.get()
elif val_gen:
val_data = validation_data
if isinstance(val_data, Sequence):
val_enqueuer_gen = iter_sequence_infinite(val_data)
validation_steps = validation_steps or len(val_data)
else:
val_enqueuer_gen = val_data
else:
# Prepare data for validation
if len(validation_data) == 2:
val_x, val_y = validation_data
val_sample_weight = None
elif len(validation_data) == 3:
val_x, val_y, val_sample_weight = validation_data
else:
raise ValueError('`validation_data` should be a tuple '
'`(val_x, val_y, val_sample_weight)` '
'or `(val_x, val_y)`. Found: ' +
str(validation_data))
val_x, val_y, val_sample_weights = model._standardize_user_data(
val_x, val_y, val_sample_weight)
val_data = val_x + val_y + val_sample_weights
if model.uses_learning_phase and not isinstance(K.learning_phase(),
int):
val_data += [0.]
for cbk in callbacks:
cbk.validation_data = val_data
if workers > 0:
if is_sequence:
enqueuer = OrderedEnqueuer(
generator,
use_multiprocessing=use_multiprocessing,
shuffle=shuffle)
else:
enqueuer = GeneratorEnqueuer(
generator,
use_multiprocessing=use_multiprocessing,
wait_time=wait_time)
enqueuer.start(workers=workers, max_queue_size=max_queue_size)
output_generator = enqueuer.get()
else:
if is_sequence:
output_generator = iter_sequence_infinite(generator)
else:
output_generator = generator
callback_model.stop_training = False
# Construct epoch logs.
epoch_logs = {}
while epoch < epochs:
for m in model.stateful_metric_functions:
m.reset_states()
callbacks.on_epoch_begin(epoch)
steps_done = 0
batch_index = 0
while steps_done < steps_per_epoch:
generator_output = next(output_generator)
if not hasattr(generator_output, '__len__'):
raise ValueError('Output of generator should be '
'a tuple `(x, y, sample_weight)` '
'or `(x, y)`. Found: ' +
str(generator_output))
raw_labels = None
if len(generator_output) == 2:
x, y = generator_output
sample_weight = None
if isinstance(y, tuple):
y, raw_labels = y
elif len(generator_output) == 3:
x, y, sample_weight = generator_output
if isinstance(y, tuple):
y, raw_labels = y
else:
raise ValueError('Output of generator should be '
'a tuple `(x, y, sample_weight)` '
'or `(x, y)`. Found: ' +
str(generator_output))
# build batch logs
batch_logs = {}
if x is None or len(x) == 0:
# Handle data tensors support when no input given
# step-size = 1 for data tensors
batch_size = 1
elif isinstance(x, list):
batch_size = x[0].shape[0]
elif isinstance(x, dict):
batch_size = list(x.values())[0].shape[0]
else:
batch_size = x.shape[0]
batch_logs['batch'] = batch_index
batch_logs['size'] = batch_size
callbacks.on_batch_begin(batch_index, batch_logs)
outs = model.train_on_batch(x, y,
sample_weight=sample_weight,
class_weight=class_weight)
# Visualize images on every epoch start
if (tb_writer is not None) and batch_index == 0:
tensorboard_images(
"augmented_images",
x,
raw_labels,
tb_writer,
batch_index,
img_means=img_means,
max_num=max_image_num
)
outs = to_list(outs)
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(batch_index, batch_logs)
batch_index += 1
steps_done += 1
# Epoch finished.
if steps_done >= steps_per_epoch and do_validation:
if val_gen:
val_outs = model.evaluate_generator(
val_enqueuer_gen,
validation_steps,
workers=0)
else:
# No need for try/except because
# data has already been validated.
val_outs = model.evaluate(
val_x, val_y,
batch_size=batch_size,
sample_weight=val_sample_weights,
verbose=0)
val_outs = to_list(val_outs)
# Same labels assumed.
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
if callback_model.stop_training:
break
callbacks.on_epoch_end(epoch, epoch_logs)
epoch += 1
if callback_model.stop_training:
break
finally:
try:
if enqueuer is not None:
enqueuer.stop()
finally:
if val_enqueuer is not None:
val_enqueuer.stop()
callbacks.on_train_end()
return model.history
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/utils/fit_generator.py |
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
"""Load an experiment spec file to run YOLOv3 training, evaluation, pruning."""
from google.protobuf.text_format import Merge as merge_text_proto
import nvidia_tao_tf1.cv.yolo_v4.proto.experiment_pb2 as experiment_pb2
def load_experiment_spec(spec_path=None):
"""Load experiment spec from a .txt file and return an experiment_pb2.Experiment object.
Args:
spec_path (str): location of a file containing the custom experiment spec proto.
Returns:
experiment_spec: protocol buffer instance of type experiment_pb2.Experiment.
"""
experiment_spec = experiment_pb2.Experiment()
merge_text_proto(open(spec_path, "r").read(), experiment_spec)
# dataset_config
assert len(experiment_spec.dataset_config.target_class_mapping.values()) > 0, \
"Please specify target_class_mapping"
data_sources = experiment_spec.dataset_config.data_sources
assert len(data_sources) > 0, "Please specify training data sources"
train_label_types = [
s.WhichOneof("labels_format") for s in data_sources
]
assert len(list(set(train_label_types))) == 1, (
"Label format should be identical for all training data sources. Got {}".format(
train_label_types
)
)
if train_label_types[0] == "tfrecords_path":
assert len(experiment_spec.dataset_config.image_extension) > 0, (
"`image_extension` should be specified in `dataset_config` if training "
" label format is TFRecord."
)
if len(experiment_spec.dataset_config.validation_data_sources) > 0:
val_data_source = experiment_spec.dataset_config.validation_data_sources
val_label_types = [
s.WhichOneof("labels_format") for s in val_data_source
]
assert len(list(set(val_label_types))) == 1, (
"Label format should be identical for all validation data sources. Got {}".format(
val_label_types
)
)
if val_label_types[0] == "tfrecords_path":
assert len(experiment_spec.dataset_config.image_extension) > 0, (
"`image_extension` should be specified in `dataset_config` if validation "
" label format is TFRecord."
)
else:
assert data_sources[0].WhichOneof("labels_format") == "tfrecords_path", (
"Validation dataset specified by `validation_fold` requires the training label format "
"to be TFRecords."
)
# augmentation config
assert experiment_spec.augmentation_config.output_channel in [1, 3], \
"output_channel must be either 1 or 3."
img_mean = experiment_spec.augmentation_config.image_mean
img_depth = int(experiment_spec.augmentation_config.output_depth) or 8
img_ext = str(experiment_spec.dataset_config.image_extension)
if experiment_spec.augmentation_config.output_channel == 3:
if img_mean:
assert all(c in img_mean for c in ['r', 'g', 'b']) , (
"'r', 'g', 'b' should all be present in image_mean "
"for images with 3 channels."
)
assert img_depth == 8, (
f"RGB images can only support 8-bit depth, got {img_depth}"
)
else:
if img_mean:
assert 'l' in img_mean, (
"'l' should be present in image_mean for images "
"with 1 channel."
)
assert img_depth in [8, 16], (
f"Grayscale images can only support 8-bit or 16-bit depth, got {img_depth}"
)
if img_depth == 16:
assert img_ext in ["png", "PNG"], (
f"16-bit images can only be PNG(png) format, got {img_ext}"
)
assert 0.0 <= experiment_spec.augmentation_config.hue <= 1.0, "hue must be within [0, 1]"
assert experiment_spec.augmentation_config.saturation >= 1.0, "saturation must be at least 1.0"
assert experiment_spec.augmentation_config.exposure >= 1.0, "exposure must be at least 1.0"
assert 0.0 <= experiment_spec.augmentation_config.vertical_flip <= 1.0, \
"vertical_flip must be within [0, 1]"
assert 0.0 <= experiment_spec.augmentation_config.horizontal_flip <= 1.0, \
"horizontal_flip must be within [0, 1]"
assert 0.0 <= experiment_spec.augmentation_config.jitter <= 1.0, "jitter must be within [0, 1]"
assert experiment_spec.augmentation_config.output_width >= 32, "width must be at least 32"
assert experiment_spec.augmentation_config.output_width % 32 == 0, \
"width must be multiple of 32"
assert experiment_spec.augmentation_config.output_height >= 32, "height must be at least 32"
assert experiment_spec.augmentation_config.output_height % 32 == 0, \
"height must be multiple of 32"
assert experiment_spec.augmentation_config.randomize_input_shape_period >= 0, \
"randomize_input_shape_period should be non-negative"
assert 0.0 <= experiment_spec.augmentation_config.mosaic_prob <= 1.0, \
"mosaic_prob must be within [0, 1]"
assert 0.0 < experiment_spec.augmentation_config.mosaic_min_ratio < 0.5, \
"mosaic_min_ratio must be within (0, 0.5)"
# training config
assert experiment_spec.training_config.batch_size_per_gpu > 0, "batch size must be positive"
assert experiment_spec.training_config.num_epochs > 0, \
"number of training batchs must be positive"
assert experiment_spec.training_config.checkpoint_interval > 0, \
"checkpoint interval must be positive"
# eval config
assert experiment_spec.eval_config.batch_size > 0, "batch size must be positive"
assert 0.0 < experiment_spec.eval_config.matching_iou_threshold <= 1.0, \
"matching_iou_threshold must be within (0, 1]"
# nms config
assert 0.0 < experiment_spec.nms_config.clustering_iou_threshold <= 1.0, \
"clustering_iou_threshold must be within (0, 1]"
# yolo_v4 config
assert 0 < experiment_spec.yolov4_config.box_matching_iou <= 1.0, \
"box_matching_iou must be within (0, 1]"
assert 0.25 < experiment_spec.yolov4_config.matching_neutral_box_iou <= 1.0, \
"matching_neutral_box_iou must be within (0.25, 1]"
assert experiment_spec.yolov4_config.loss_loc_weight >= 0.0, \
"all loss weights must be non-negative"
assert experiment_spec.yolov4_config.loss_neg_obj_weights >= 0.0, \
"all loss weights must be non-negative"
assert experiment_spec.yolov4_config.loss_class_weights >= 0.0, \
"all loss weights must be non-negative"
assert 0.0 <= experiment_spec.yolov4_config.label_smoothing <= 0.3, \
"label_smoothing must be within [0, 0.3]"
assert 0.0 <= experiment_spec.yolov4_config.focal_loss_alpha < 1.0, \
"focal_loss_alpha must be within (0.0, 1.0) or 0.0(focal loss disabled)."
assert experiment_spec.yolov4_config.focal_loss_gamma >= 0.0, \
"focal_loss_gamma must be at least 0.0"
assert len(experiment_spec.yolov4_config.big_anchor_shape) > 0, (
"`big_anchor_shape` is not provided in spec file."
)
assert len(experiment_spec.yolov4_config.mid_anchor_shape) > 0, (
"`mid_anchor_shape` is not provided in spec file."
)
# yolov4-tiny does not have small anchors
if experiment_spec.yolov4_config.arch != "cspdarknet_tiny":
assert len(experiment_spec.yolov4_config.small_anchor_shape) > 0, (
"`small_anchor_shape` is not provided in spec file."
)
else:
assert len(experiment_spec.yolov4_config.small_anchor_shape) == 0, (
"`small_anchor_shape` is not supported by `cspdarknet_tiny` backbone."
)
assert 0.0 <= experiment_spec.yolov4_config.big_grid_xy_extend <= 0.3, \
"big_grid_xy_extend must be within [0, 0.3]"
assert 0.0 <= experiment_spec.yolov4_config.mid_grid_xy_extend <= 0.3, \
"mid_grid_xy_extend must be within [0, 0.3]"
if experiment_spec.yolov4_config.arch != "cspdarknet_tiny":
assert 0.0 <= experiment_spec.yolov4_config.small_grid_xy_extend <= 0.3, \
"`small_grid_xy_extend` must be within [0, 0.3]"
else:
assert 0.0 == experiment_spec.yolov4_config.small_grid_xy_extend, \
"`small_grid_xy_extend` is not support by `cspdarknet_tiny` backbone."
# Validate early_stopping config
if experiment_spec.training_config.HasField("early_stopping"):
es = experiment_spec.training_config.early_stopping
if es.monitor not in ["loss"]:
raise ValueError(
"Only `loss` is supported monitor"
f", got {es.monitor}"
)
if es.min_delta < 0.:
raise ValueError(
f"`min_delta` should be non-negative, got {es.min_delta}"
)
if es.patience == 0:
raise ValueError(
f"`patience` should be positive, got {es.patience}"
)
return experiment_spec
def validation_labels_format(spec):
"""The format of the labels of validation set."""
if len(spec.dataset_config.validation_data_sources) > 0:
if (
spec.dataset_config.validation_data_sources[0].WhichOneof("labels_format") ==
"label_directory_path"
):
return "keras_sequence"
return "tfrecords"
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/utils/spec_loader.py |
"""YOLO v4 class to build the model and pipelines.
@zeyuz: code largely from yolov3_model.py by @zhimengf
"""
from contextlib import contextmanager
from math import ceil
from multiprocessing import cpu_count
import os
import shutil
import tempfile
import keras
from keras.backend import set_learning_phase
from keras.callbacks import EarlyStopping, TerminateOnNaN
from keras.layers import Input
from keras.models import Model
import numpy as np
import six
import tensorflow as tf
from nvidia_tao_tf1.cv.common.callbacks.auto_class_weighting_callback import AutoClassWeighting
from nvidia_tao_tf1.cv.common.callbacks.detection_metric_callback import DetectionMetricCallback
from nvidia_tao_tf1.cv.common.callbacks.enc_model_saver_callback import KerasModelSaver
from nvidia_tao_tf1.cv.common.callbacks.loggers import TAOStatusLogger
from nvidia_tao_tf1.cv.common.callbacks.model_ema_callback import ModelEMACallback
from nvidia_tao_tf1.cv.common.evaluator.ap_evaluator import APEvaluator
from nvidia_tao_tf1.cv.common.utils import (
build_lrs_from_config,
build_optimizer_from_config,
build_regularizer_from_config,
CUSTOM_OBJS,
TensorBoard
)
from nvidia_tao_tf1.cv.common.utils import OneIndexedCSVLogger as CSVLogger
from nvidia_tao_tf1.cv.yolo_v3.data_loader.generate_shape_tensors import gen_random_shape_tensors
from nvidia_tao_tf1.cv.yolo_v3.metric.yolov3_metric_callback import YOLOv3MetricCallback
from nvidia_tao_tf1.cv.yolo_v4.architecture.yolo_arch import YOLO
from nvidia_tao_tf1.cv.yolo_v4.builders import eval_builder
from nvidia_tao_tf1.cv.yolo_v4.builders.model_builder import _load_pretrain_weights
from nvidia_tao_tf1.cv.yolo_v4.dataio.tf_data_pipe import YOLOv4TFDataPipe
from nvidia_tao_tf1.cv.yolo_v4.losses.yolo_loss import YOLOv4Loss
from nvidia_tao_tf1.cv.yolo_v4.utils import model_io
from nvidia_tao_tf1.cv.yolo_v4.utils.fit_generator import fit_generator
from nvidia_tao_tf1.cv.yolo_v4.utils.model_io import get_model_with_input
from nvidia_tao_tf1.cv.yolo_v4.utils.spec_loader import validation_labels_format
@contextmanager
def patch_freeze_bn(freeze_bn):
"""context for patching BN to freeze it during model creation."""
def compose_call(prev_call_method):
def call(self, inputs, training=False):
return prev_call_method(self, inputs, training)
return call
prev_batchnorm_call = keras.layers.normalization.BatchNormalization.call
if freeze_bn:
keras.layers.normalization.BatchNormalization.call = compose_call(
prev_batchnorm_call
)
yield
if freeze_bn:
keras.layers.normalization.BatchNormalization.call = prev_batchnorm_call
class YOLOv4Model(object):
"""YOLO v4 model."""
def __init__(self, spec, key):
"""Initialize."""
self.spec = spec
self.yolov4_config = spec.yolov4_config
self.key = key
# dataset classes
self.class_mapping = spec.dataset_config.target_class_mapping
self.classes = sorted({str(x).lower() for x in self.class_mapping.values()})
self.n_classes = len(self.classes)
# model architecture
self.arch = spec.yolov4_config.arch
self.activation = spec.yolov4_config.activation
self.arch_name = self.arch
if self.arch_name in ['resnet', 'darknet', 'cspdarknet', 'vgg']:
# append nlayers into meta_arch_name
self.arch_name += str(spec.yolov4_config.nlayers)
self.nlayers = spec.yolov4_config.nlayers
self.freeze_blocks = spec.yolov4_config.freeze_blocks
self.freeze_bn = spec.yolov4_config.freeze_bn
self.force_relu = spec.yolov4_config.force_relu
self.qat = spec.training_config.enable_qat
if self.arch != "cspdarknet_tiny":
self.predictor_names = [
'conv_big_object',
'conv_mid_object',
'conv_sm_object'
]
else:
self.predictor_names = [
'conv_big_object',
'conv_mid_object'
]
# NMS config
self.nms_confidence_thresh = spec.nms_config.confidence_threshold
self.nms_iou_threshold = spec.nms_config.clustering_iou_threshold
self.nms_top_k = spec.nms_config.top_k
self.nms_on_cpu = False
if self.train_labels_format == "tfrecords" or self.val_labels_format == "tfrecords":
self.nms_on_cpu = True
# evaluation params
self.ap_mode = spec.eval_config.average_precision_mode
matching_iou = spec.eval_config.matching_iou_threshold
self.matching_iou = matching_iou if matching_iou > 0 else 0.5
self.ap_mode_dict = {0: "sample", 1: "integrate"}
self.average_precision_mode = self.ap_mode_dict[self.ap_mode]
# training
self.training_config = spec.training_config
self.use_mp = spec.training_config.use_multiprocessing
self.n_workers = spec.training_config.n_workers or (cpu_count()-1)
self.max_queue_size = spec.training_config.max_queue_size or 20
self.num_epochs = spec.training_config.num_epochs
self.bs = spec.training_config.batch_size_per_gpu
self.lrconfig = spec.training_config.learning_rate
self.ckpt_interval = spec.training_config.checkpoint_interval
self.augmentation_config = spec.augmentation_config
self.image_channels = int(self.augmentation_config.output_channel)
self.image_width = int(self.augmentation_config.output_width)
self.image_height = int(self.augmentation_config.output_height)
self.image_depth = int(self.augmentation_config.output_depth) or 8
self.shape_period = int(self.augmentation_config.randomize_input_shape_period)
self.load_type = spec.training_config.WhichOneof('load_model')
self.hmin_ratio = 0.6
self.hmax_ratio = 1.5
self.wmin_ratio = 0.6
self.wmax_ratio = 1.5
self.build_regularizer()
self.generate_random_shape()
self.h_tensor_val = tf.constant(
self.image_height,
dtype=tf.int32
)
self.w_tensor_val = tf.constant(
self.image_width,
dtype=tf.int32
)
self.get_val_fmap_stride()
self.parse_init_epoch()
self.callbacks = []
self.losses = None
self.metrics = None
self.optimizer = None
self.target_tensors = None
self.tb_callback = None
def generate_random_shape(self):
"""generate random shape for multi-scale training."""
if self.shape_period > 0:
self.h_tensor, self.w_tensor = gen_random_shape_tensors(
self.shape_period,
int(self.image_height * self.hmin_ratio),
int(self.image_height * self.hmax_ratio),
int(self.image_width * self.wmin_ratio),
int(self.image_width * self.wmax_ratio)
)
else:
self.h_tensor = tf.constant(
self.image_height,
dtype=tf.int32
)
self.w_tensor = tf.constant(
self.image_width,
dtype=tf.int32
)
def parse_init_epoch(self):
"""Parse initial epoch."""
if self.load_type == 'resume_model_path':
try:
epoch = int(self.training_config.resume_model_path.split('.')[-2].split('_')[-1])
except Exception:
raise ValueError("Cannot parse the checkpoint path. Did you rename it?")
else:
epoch = 0
self.init_epoch = epoch
@property
def train_labels_format(self):
"""The format of the labels of training set."""
if self.spec.dataset_config.data_sources[0].WhichOneof("labels_format") == \
"tfrecords_path":
return "tfrecords"
return "keras_sequence"
@property
def val_labels_format(self):
"""The format of the labels of validation set."""
return validation_labels_format(self.spec)
def build_regularizer(self):
"""build regularizer."""
self.regularizer = build_regularizer_from_config(
self.training_config.regularizer
)
def build_optimizer(self, hvd):
"""build optimizer."""
optim = build_optimizer_from_config(
self.training_config.optimizer
)
self.set_optimizer(optim, hvd)
def eval_str(self, s):
"""If s is a string, return the eval results. Else return itself."""
if isinstance(s, six.string_types):
if len(s) > 0:
return eval(s)
return None
return s
@property
def big_anchor_shape(self):
"""big anchor shape."""
big_anchor = self.eval_str(self.yolov4_config.big_anchor_shape)
assert len(big_anchor) > 0, "big_anchor_shape in spec cannot be empty"
return big_anchor
@property
def mid_anchor_shape(self):
"""middle anchor shape."""
mid_anchor = self.eval_str(self.yolov4_config.mid_anchor_shape)
assert len(mid_anchor) > 0, "mid_anchor_shape in spec cannot be empty"
return mid_anchor
@property
def small_anchor_shape(self):
"""small anchor shape."""
small_anchor = self.eval_str(self.yolov4_config.small_anchor_shape)
assert len(small_anchor) > 0, "small_anchor_shape in spec cannot be empty"
return small_anchor
@property
def grid_scale_xy(self):
"""grid scale."""
# cspdarknet tiny only has 2 heads
if self.arch == "cspdarknet_tiny":
grid_scale_xy = [1.0 + self.yolov4_config.big_grid_xy_extend,
1.0 + self.yolov4_config.mid_grid_xy_extend]
else:
grid_scale_xy = [1.0 + self.yolov4_config.big_grid_xy_extend,
1.0 + self.yolov4_config.mid_grid_xy_extend,
1.0 + self.yolov4_config.small_grid_xy_extend]
return grid_scale_xy
def anchor_to_relative(self, x):
"""convert absolute anchors to relative anchors."""
return (np.array(x, dtype=np.float).reshape(-1, 2) / np.array(
[self.image_width, self.image_height]).reshape(1, 2)).tolist()
@property
def all_anchors(self):
"""all absolute anchors."""
if self.arch == "cspdarknet_tiny":
return [self.big_anchor_shape, self.mid_anchor_shape]
return [self.big_anchor_shape, self.mid_anchor_shape, self.small_anchor_shape]
@property
def all_relative_anchors(self):
"""all relative anchors."""
return [self.anchor_to_relative(x) for x in self.all_anchors]
def build_keras_model(self, input_image=None, input_shape=None, val=False):
"""build a keras model from scratch."""
model_input = Input(
shape=input_shape or (self.image_channels, None, None),
tensor=input_image,
name="Input"
)
yolo_model = YOLO(
model_input,
self.arch,
self.nlayers,
num_classes=self.n_classes,
kernel_regularizer=self.regularizer,
anchors=self.all_relative_anchors,
grid_scale_xy=self.grid_scale_xy,
freeze_blocks=self.freeze_blocks,
freeze_bn=self.freeze_bn,
qat=self.qat,
force_relu=self.force_relu,
activation=self.activation
)
if val:
# if it is a validation model, return it directly
return yolo_model
# rename it
self.keras_model = Model(
inputs=model_input,
outputs=yolo_model.outputs,
name='yolo_' + self.arch
)
self.inputs = self.keras_model.inputs
self.outputs = self.keras_model.outputs
return None
def load_pretrained_model(self, model_path):
"""load pretrained model's weights."""
pretrained_model = model_io.load_model(
model_path,
self.spec,
key=self.key
)
_load_pretrain_weights(pretrained_model, self.keras_model)
def override_regularizer(self, train_model):
"""override regularizer."""
model_config = train_model.get_config()
for layer, layer_config in zip(train_model.layers, model_config['layers']):
if hasattr(layer, 'kernel_regularizer'):
layer_config['config']['kernel_regularizer'] = self.regularizer
reg_model = Model.from_config(
model_config,
custom_objects=CUSTOM_OBJS
)
reg_model.set_weights(train_model.get_weights())
return reg_model
def apply_model_to_new_inputs(self, model, tensor, input_shape):
"""Apply model to new inputs."""
input_layer = keras.layers.InputLayer(
input_shape=input_shape,
input_tensor=tensor,
name="Input",
)
_, temp_model_path = tempfile.mkstemp()
os.remove(temp_model_path)
model.save(temp_model_path)
with patch_freeze_bn(self.freeze_bn):
new_model = get_model_with_input(temp_model_path, input_layer)
os.remove(temp_model_path)
return new_model
def load_pruned_model(self, pruned_model_path, input_tensor, input_shape):
"""load pruned model."""
pruned_model = model_io.load_model(
pruned_model_path,
self.spec,
key=self.key,
input_shape=input_shape
)
pruned_model = self.override_regularizer(
pruned_model
)
if input_tensor is not None:
self.keras_model = self.apply_model_to_new_inputs(
pruned_model,
input_tensor,
input_shape
)
else:
self.keras_model = pruned_model
self.inputs = self.keras_model.inputs
self.outputs = self.keras_model.outputs
def set_optimizer(self, opt, hvd):
'''setup optimizer.'''
if self.optimizer is not None:
return
self.optimizer = hvd.DistributedOptimizer(opt)
def resume_model(self, checkpoint_path, input_tensor, input_shape, hvd):
'''resume model from checkpoints and continue to train.'''
resumed_model = model_io.load_model(
checkpoint_path,
self.spec,
key=self.key,
input_shape=input_shape
)
optimizer = resumed_model.optimizer
if input_tensor is not None:
resumed_model = self.apply_model_to_new_inputs(
resumed_model,
input_tensor,
input_shape
)
self.keras_model = resumed_model
self.inputs = self.keras_model.inputs
self.outputs = self.keras_model.outputs
self.set_optimizer(optimizer, hvd)
def set_target_tensors(self, encoded_labels):
"""set target tensors."""
if self.target_tensors is not None:
return
self.target_tensors = [encoded_labels]
def build_losses(self):
"""build loss."""
if self.losses is not None:
return
yololoss = YOLOv4Loss(
self.spec.yolov4_config.loss_loc_weight,
self.spec.yolov4_config.loss_neg_obj_weights,
self.spec.yolov4_config.loss_class_weights,
self.spec.yolov4_config.label_smoothing,
self.spec.yolov4_config.matching_neutral_box_iou,
self.spec.yolov4_config.focal_loss_alpha,
self.spec.yolov4_config.focal_loss_gamma
)
self.losses = [yololoss.compute_loss]
self.losses_no_reduce = [yololoss.compute_loss_no_reduce]
def build_hvd_callbacks(self, hvd):
'''setup horovod callbacks.'''
self.callbacks.append(hvd.callbacks.BroadcastGlobalVariablesCallback(0))
self.callbacks.append(hvd.callbacks.MetricAverageCallback())
self.callbacks.append(TerminateOnNaN())
def build_lr_scheduler(self, train_dataset, hvd):
"""build LR scheduler."""
init_epoch = self.init_epoch
if type(train_dataset) == YOLOv4TFDataPipe:
total_num = train_dataset.num_samples
else:
total_num = train_dataset.n_samples
iters_per_epoch = int(ceil(total_num / self.bs / hvd.size()))
max_iterations = self.num_epochs * iters_per_epoch
lr_scheduler = build_lrs_from_config(self.lrconfig, max_iterations, hvd.size())
init_step = init_epoch * iters_per_epoch
lr_scheduler.reset(init_step)
self.callbacks.append(lr_scheduler)
self.iters_per_epoch = iters_per_epoch
def build_checkpointer(self, ckpt_path, verbose):
"""build checkpointer."""
model_checkpointer = KerasModelSaver(
ckpt_path,
self.key,
self.ckpt_interval,
last_epoch=self.num_epochs,
verbose=verbose
)
self.callbacks.append(model_checkpointer)
def build_csvlogger(self, csv_path):
"""build CSV logger."""
csv_logger = CSVLogger(
filename=csv_path,
separator=',',
append=False
)
self.callbacks.append(csv_logger)
def build_training_model(self, hvd):
"""build the training model in various cases."""
if type(self.train_dataset) == YOLOv4TFDataPipe:
input_image = self.train_dataset.images
else:
input_image = None
if self.load_type == "resume_model_path":
self.resume_model(
self.training_config.resume_model_path,
input_image,
(self.image_channels, None, None),
hvd
)
elif self.load_type == "pruned_model_path":
self.load_pruned_model(
self.training_config.pruned_model_path,
input_image,
(self.image_channels, None, None)
)
else:
self.build_keras_model(
input_image
)
if self.training_config.pretrain_model_path:
self.load_pretrained_model(
self.training_config.pretrain_model_path
)
# get predictor sizes for later use
predictor_layers = [
self.keras_model.get_layer(n) for n in self.predictor_names
]
self.predictor_sizes = [tf.shape(l.output)[2:4] for l in predictor_layers]
def build_validation_model(self):
"""build validation model."""
# set eval phase at first
assert self.keras_model is not None, (
"""Training model has to be built before validation model."""
)
set_learning_phase(0)
input_shape = (self.image_channels, self.image_height, self.image_width)
input_layer = keras.layers.InputLayer(
input_shape=input_shape,
input_tensor=None,
name="Input",
)
_, temp_model_path = tempfile.mkstemp()
os.remove(temp_model_path)
self.keras_model.save(temp_model_path)
with patch_freeze_bn(self.freeze_bn):
val_model = get_model_with_input(temp_model_path, input_layer)
os.remove(temp_model_path)
self._val_model = val_model
# setup validation model predictor sizes for later use
predictor_layers = [
self._val_model.get_layer(n) for n in self.predictor_names
]
self.val_predictor_sizes = [l.output_shape[2:] for l in predictor_layers]
self.val_fmap_stride = [
(self.image_height // x[0], self.image_width // x[1]) for x in self.val_predictor_sizes
]
# restore learning phase to 1
set_learning_phase(1)
self.val_model = eval_builder.build(
val_model,
confidence_thresh=self.nms_confidence_thresh,
iou_threshold=self.nms_iou_threshold,
top_k=self.nms_top_k,
include_encoded_head=True,
nms_on_cpu=self.nms_on_cpu
)
def build_early_stopping_callback(self):
"""Setup early stopping callback."""
# If early stopping is enabled...
if self.spec.training_config.HasField("early_stopping"):
es = self.spec.training_config.early_stopping
callback = EarlyStopping(
monitor=es.monitor,
min_delta=es.min_delta,
patience=es.patience,
verbose=True
)
self.callbacks.append(callback)
def build_auto_class_weighting_callback(self, train_dataset):
"""build ACW callback."""
cb = AutoClassWeighting(
train_dataset=train_dataset,
loss_ops=self.acw_loss_ops,
)
fetches = [tf.assign(cb.pred, self.keras_model.outputs[0], validate_shape=False),
tf.assign(cb.label, self.keras_model.targets[0], validate_shape=False)]
self.keras_model._function_kwargs = {'fetches': fetches}
self.callbacks.append(cb)
def get_val_fmap_stride(self):
"""build a dummy validation model to get val_fmap_stride."""
# set eval phase at first
set_learning_phase(0)
# it doesn't matter whether the train model is pruned or not,
# since we just care about the height/width of the predictor
# feature maps. Channel number is irrelevant.
val_model = self.build_keras_model(
input_shape=(self.image_channels, self.image_height, self.image_width),
val=True
)
# restore learning phase to 1
set_learning_phase(1)
# setup validation model predictor sizes for later use
predictor_layers = [
val_model.get_layer(n) for n in self.predictor_names
]
val_predictor_sizes = [l.output_shape[2:4] for l in predictor_layers]
fmap_stride = [
(self.image_height // x[0], self.image_width // x[1]) for x in val_predictor_sizes
]
self.val_fmap_stride = fmap_stride
def build_ap_evaluator(self):
"""build_ap_evaluator."""
self.ap_evaluator = APEvaluator(
self.n_classes,
conf_thres=self.nms_confidence_thresh,
matching_iou_threshold=self.matching_iou,
average_precision_mode=self.average_precision_mode
)
def build_loss_ops(self):
"""build loss ops."""
n_box, n_attr = self._val_model.layers[-1].output_shape[1:]
op_pred = tf.placeholder(tf.float32, shape=(None, n_box, n_attr))
# op_true = tf.placeholder(tf.float32, shape=(None, n_box, n_attr - 5))
op_true = tf.placeholder(tf.float32, shape=(None, n_box, n_attr - 4))
self.loss_ops = [op_true, op_pred, self.losses[0](op_true, op_pred)]
self.acw_loss_ops = [op_true, op_pred, self.losses_no_reduce[0](op_true, op_pred)]
def build_validation_callback(
self,
val_dataset,
verbose=False
):
"""Build validation model."""
# build validation model
self.build_loss_ops()
self.build_ap_evaluator()
# build validation callback
if type(val_dataset) == YOLOv4TFDataPipe:
eval_callback = YOLOv3MetricCallback(
ap_evaluator=self.ap_evaluator,
built_eval_model=self.val_model,
generator=val_dataset.generator(),
classes=self.classes,
n_batches=val_dataset.n_batches,
loss_ops=self.loss_ops,
eval_model=self._val_model,
metric_interval=self.ckpt_interval,
last_epoch=self.num_epochs,
verbose=verbose
)
else:
eval_callback = DetectionMetricCallback(
ap_evaluator=self.ap_evaluator,
built_eval_model=self.val_model,
eval_sequence=val_dataset,
loss_ops=self.loss_ops,
eval_model=self._val_model,
metric_interval=1,
last_epoch=self.num_epochs,
verbose=verbose
)
return self.callbacks.append(eval_callback)
def build_savers(self, results_dir, verbose):
"""build several savers."""
if not os.path.exists(os.path.join(results_dir, 'weights')):
os.mkdir(os.path.join(results_dir, 'weights'))
ckpt_path = str(os.path.join(
results_dir,
'weights',
'yolov4_' + self.arch_name + '_epoch_{epoch:03d}.hdf5'
)
)
# checkpointer
self.build_checkpointer(ckpt_path, verbose)
# output label file
with open(os.path.join(results_dir, 'model_output_labels.txt'), 'w') as f:
f.write('\n'.join(self.classes))
csv_path = os.path.join(results_dir, 'yolov4_training_log_' + self.arch_name + '.csv')
# CSV logger
self.build_csvlogger(csv_path)
def build_tensorboard_callback(self, output_dir):
"""Build TensorBoard callback for visualization."""
tb_path = os.path.join(
output_dir,
"logs"
)
if os.path.exists(tb_path) and os.path.isdir(tb_path):
shutil.rmtree(tb_path)
if not os.path.exists(tb_path):
os.makedirs(tb_path)
tb_callback = TensorBoard(
log_dir=tb_path,
write_graph=False,
weight_hist=False
)
self.tb_callback = tb_callback
self.callbacks.append(tb_callback)
def build_status_logging_callback(self, results_dir, num_epochs, is_master):
"""Build status logging for TAO API."""
status_logger = TAOStatusLogger(
results_dir,
append=True,
num_epochs=num_epochs,
is_master=is_master,
)
self.callbacks.append(status_logger)
def build_model_ema_callback(self):
"""Build modelEMA callback."""
cur_iter = self.init_epoch * self.iters_per_epoch
model_ema = ModelEMACallback(model=self.keras_model,
init_step=cur_iter)
self.callbacks.append(model_ema)
# insert ema to validation cb
for cb in self.callbacks:
if type(cb) in [DetectionMetricCallback, KerasModelSaver]:
cb.ema = model_ema.ema
def compile(self):
'''compile the keras model.'''
self.keras_model.compile(
optimizer=self.optimizer,
loss=self.losses,
target_tensors=self.target_tensors
)
def summary(self):
"""print keras model summary."""
self.keras_model.summary()
def train(self, verbose=1):
"""training."""
if type(self.train_dataset) == YOLOv4TFDataPipe:
self.keras_model.fit(
epochs=self.num_epochs,
steps_per_epoch=self.iters_per_epoch,
callbacks=self.callbacks,
initial_epoch=self.init_epoch,
verbose=verbose
)
else:
# Use the patched fit_generator
# TensorBoard image summary only supports 8-bit images
if (self.tb_callback is not None) and (self.image_depth == 8):
writer = self.tb_callback.writer
else:
writer = None
default_img_mean = (103.939, 116.779, 123.68)
fit_generator(
self.keras_model,
writer,
img_means=self.augmentation_config.image_mean or default_img_mean,
max_image_num=self.spec.training_config.visualizer.num_images,
steps_per_epoch=self.iters_per_epoch,
generator=self.train_dataset,
epochs=self.num_epochs,
callbacks=self.callbacks,
initial_epoch=self.init_epoch,
workers=self.n_workers,
max_queue_size=self.max_queue_size,
verbose=verbose,
use_multiprocessing=self.use_mp,
shuffle=False
)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/models/yolov4_model.py |
"""Models module for YOLOv4.""" | tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/models/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IVA YOLOv4 model construction wrapper functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras import layers
from nvidia_tao_tf1.core.templates.utils import _leaky_conv
from nvidia_tao_tf1.cv.common.models.backbones import get_backbone
def tiny_neck_featuremaps(
model,
):
"""helper function to build the neck featuremap of yolov4-tiny."""
feat_names = ["conv_5_mish", "conv_4_conv_3_mish"]
feats = [model.get_layer(n).output for n in feat_names]
feat_sizes = [128]
return feats, feat_sizes
def tiny3l_neck_featuremaps(
model,
):
"""helper function to build the neck featuremap of yolov4-tiny-3l."""
feat_names = ["conv_5_mish", "conv_4_conv_3_mish", "conv_3_conv_3_mish"]
feats = [model.get_layer(n).output for n in feat_names]
feat_sizes = [128, 64]
return feats, feat_sizes
def get_base_model(input_tensor,
arch,
nlayers,
kernel_regularizer=None,
bias_regularizer=None,
freeze_blocks=None,
freeze_bn=None,
force_relu=False,
activation="leaky_relu"):
'''Return feature maps for YOLOv3.
Args:
input_tensor: image tensor
arch: feature extractor arch
nlayers: arch layers
kernel_regularizer: kernel_regularizer
bias_regularizer: bias_regularizer
freeze_blocks: freeze_blocks
freeze_bn: freeze_bn
force_relu: Replace LeakyReLU with ReLU.
activation(str): Activation type.
Returns:
the return is two tuples. First one is three tensors for three feature layers. second one is
two integer tuple, corresponding to upsample0 and upsample1 num_filters.
'''
base_model = get_backbone(input_tensor,
arch,
data_format='channels_first',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
freeze_blocks=freeze_blocks,
freeze_bn=freeze_bn,
nlayers=nlayers,
use_batch_norm=True,
use_pooling=False,
use_bias=False,
all_projections=True,
dropout=1e-3,
force_relu=force_relu,
activation=activation)
if arch == "cspdarknet_tiny":
return tiny_neck_featuremaps(
base_model,
)
if arch == "cspdarknet_tiny_3l":
return tiny3l_neck_featuremaps(
base_model,
)
base_featuremap = base_model.layers[-1].output
neck = base_featuremap
neck = _leaky_conv(
neck, 512, kernel=1, strides=1,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
use_batch_norm=True,
force_relu=force_relu,
name="yolo_neck_1"
)
neck = _leaky_conv(
neck, 1024, kernel=3, strides=1,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
use_batch_norm=True,
force_relu=force_relu,
name="yolo_neck_2"
)
neck = _leaky_conv(
neck, 512, kernel=1, strides=1,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
use_batch_norm=True,
force_relu=force_relu,
name="yolo_neck_3"
)
# SPP module
x1 = layers.MaxPooling2D(pool_size=5, strides=1, data_format='channels_first',
padding='same', name='yolo_spp_pool_1')(neck)
x2 = layers.MaxPooling2D(pool_size=9, strides=1, data_format='channels_first',
padding='same', name='yolo_spp_pool_2')(neck)
x3 = layers.MaxPooling2D(pool_size=13, strides=1, data_format='channels_first',
padding='same', name='yolo_spp_pool_3')(neck)
x = layers.Concatenate(axis=1,
name='yolo_spp_concat')([x1, x2, x3, neck])
# Detector will use leaky-relu activation
# See darknet yolov4.cfg
x_spp = _leaky_conv(
x, 512, kernel=1, strides=1,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
use_batch_norm=True,
force_relu=force_relu,
name='yolo_spp_conv'
)
def additional_conv(nchannels):
return _leaky_conv(
x_spp, nchannels, kernel=3, strides=2,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
use_batch_norm=True,
force_relu=force_relu,
name='yolo_expand_conv1'
)
if arch == 'resnet':
if nlayers == 10:
fmaps = (additional_conv(512),
base_model.layers[-10].output, base_model.layers[-19].output)
map_size = (128, 64)
elif nlayers == 18:
fmaps = (additional_conv(512),
base_model.layers[-19].output, base_model.layers[-37].output)
map_size = (128, 64)
elif nlayers == 34:
fmaps = (additional_conv(512),
base_model.layers[-28].output, base_model.layers[-82].output)
map_size = (128, 64)
elif nlayers == 50:
# Extract layers[-43] as the feature layer for large feature map (to detect sm object)
lg_map = base_model.layers[-43].output
lg_map = layers.UpSampling2D(2, data_format='channels_first',
name='expand_upsample')(lg_map)
fmaps = (additional_conv(1024), base_model.layers[-7].output, lg_map)
map_size = (256, 128)
elif nlayers == 101:
# Extract layers[-43] as the feature layer for large feature map (to detect sm object)
# there's too many stride 16 layers. We take one and upsample it to stride 8.
lg_map = base_model.layers[-43].output
lg_map = layers.UpSampling2D(2, data_format='channels_first',
name='expand_upsample')(lg_map)
fmaps = (additional_conv(1024), base_model.layers[-7].output, lg_map)
map_size = (256, 128)
else:
raise ValueError("ResNet-{} architecture is currently not implemented\n"
"Please choose out of the following:\n{}.".
format(nlayers, '10, 18, 34, 50, 101'))
elif arch == 'vgg':
if nlayers == 16:
fmaps = (x_spp, base_model.layers[-10].output,
base_model.layers[-19].output)
map_size = (256, 128)
elif nlayers == 19:
fmaps = (x_spp, base_model.layers[-13].output,
base_model.layers[-25].output)
map_size = (256, 128)
else:
raise ValueError("VGG-{} architecture is currently not implemented\n"
"Please choose out of the following:\n{}.".
format(nlayers, '16, 19'))
elif arch == 'darknet':
if nlayers == 19:
fmaps = (x_spp, base_model.get_layer('b4_conv5').output,
base_model.get_layer('b3_conv3').output)
map_size = (256, 128)
elif nlayers == 53:
fmaps = (x_spp, base_model.get_layer('b4_add7').output,
base_model.get_layer('b3_add7').output)
map_size = (256, 128)
else:
raise ValueError("DarkNet-{} architecture is currently not implemented\n"
"Please choose out of the following:\n{}.".
format(nlayers, '19, 53'))
elif arch == 'cspdarknet':
fmaps = (x_spp, base_model.get_layer('b4_final_trans').output,
base_model.get_layer('b3_final_trans').output)
map_size = (256, 128)
elif arch == 'efficientnet_b0':
lg_map = base_model.get_layer('block5a_expand_activation').output
lg_map = layers.UpSampling2D(2, data_format='channels_first',
name='expand_upsample')(lg_map)
z = _leaky_conv(
lg_map, 256, kernel=3,
strides=1, kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer, use_batch_norm=True,
force_relu=force_relu, name='expand_conv3'
)
fmaps = (base_featuremap,
base_model.get_layer('block6a_expand_activation').output,
z)
map_size = (256, 128)
elif arch == 'mobilenet_v1':
fmaps = (additional_conv(512), base_model.layers[-39].output, base_model.layers[-53].output)
map_size = (128, 64)
elif arch == 'mobilenet_v2':
fmaps = (additional_conv(96), base_model.layers[-32].output, base_model.layers[-74].output)
map_size = (32, 16)
elif arch == 'squeezenet':
# Quite a bit work here...
x = additional_conv(128)
y = _leaky_conv(
base_featuremap, 128, kernel=1,
strides=1, kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer, use_batch_norm=True,
force_relu=force_relu, name='expand_conv2'
)
z = _leaky_conv(
base_model.layers[-9].output, 128, kernel=1,
strides=1, kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer, use_batch_norm=True,
force_relu=force_relu, name='expand_conv3'
)
fmaps = (x, y, z)
map_size = (64, 64)
elif arch == 'googlenet':
# Quite a bit work here...
x = additional_conv(1024)
y = _leaky_conv(
base_model.layers[-21].output, 512, kernel=3,
strides=1, kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer, use_batch_norm=True,
force_relu=force_relu, name='expand_conv2'
)
lg_map = base_model.layers[-41].output
lg_map = layers.UpSampling2D(2, data_format='channels_first',
name='expand_upsample')(lg_map)
z = _leaky_conv(
lg_map, 256, kernel=3,
strides=1, kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer, use_batch_norm=True,
force_relu=force_relu, name='expand_conv3'
)
fmaps = (x, y, z)
map_size = (256, 128)
else:
raise ValueError("{} architecture is currently not implemented\n".
format(arch))
return fmaps, map_size
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/models/base_model.py |
"""Utils to build the model, data loader and entire pipeline."""
from nvidia_tao_tf1.cv.common.mlops.clearml import get_clearml_task
from nvidia_tao_tf1.cv.common.mlops.wandb import check_wandb_logged_in, initialize_wandb
from nvidia_tao_tf1.cv.common.utils import build_class_weights
from nvidia_tao_tf1.cv.common.visualizer.tensorboard_visualizer import TensorBoardVisualizer
from nvidia_tao_tf1.cv.yolo_v4.dataio.data_sequence import YOLOv4DataSequence
from nvidia_tao_tf1.cv.yolo_v4.dataio.input_encoder import (
YOLOv4InputEncoder,
YOLOv4InputEncoderTensor
)
from nvidia_tao_tf1.cv.yolo_v4.dataio.tf_data_pipe import YOLOv4TFDataPipe
from nvidia_tao_tf1.cv.yolo_v4.models.yolov4_model import YOLOv4Model
def build_training_pipeline(spec, results_dir, key, hvd, sess, verbose):
"""Build the training pipeline."""
# Define visualizer
visualizer = TensorBoardVisualizer()
visualizer.build_from_config(
spec.training_config.visualizer
)
visualizer_config = spec.training_config.visualizer
is_master = hvd.rank() == 0
if is_master and visualizer_config.HasField("clearml_config"):
clearml_config = visualizer_config.clearml_config
get_clearml_task(clearml_config, "yolo_v4")
if is_master and visualizer_config.HasField("wandb_config"):
wandb_config = visualizer_config.wandb_config
wandb_logged_in = check_wandb_logged_in()
wandb_name = f"{wandb_config.name}" if wandb_config.name else \
"yolov4_training"
initialize_wandb(
project=wandb_config.project if wandb_config.project else None,
entity=wandb_config.entity if wandb_config.entity else None,
notes=wandb_config.notes if wandb_config.notes else None,
tags=wandb_config.tags if wandb_config.tags else None,
sync_tensorboard=True,
save_code=False,
results_dir=results_dir,
wandb_logged_in=wandb_logged_in,
name=wandb_name
)
# instantiate the model
yolov4 = YOLOv4Model(
spec,
key
)
cls_weights = build_class_weights(spec)
train_encoder = YOLOv4InputEncoder(
yolov4.n_classes,
spec.yolov4_config.box_matching_iou,
yolov4.val_fmap_stride,
yolov4.all_relative_anchors,
class_weights=cls_weights
)
def eval_encode_fn(output_img_size, gt_label):
return (train_encoder(output_img_size, gt_label), gt_label)
if yolov4.train_labels_format == "tfrecords":
# tfrecord data loader
train_dataset = YOLOv4TFDataPipe(
spec,
label_encoder=None,
training=True,
h_tensor=yolov4.h_tensor,
w_tensor=yolov4.w_tensor,
visualizer=visualizer,
rank=hvd.rank()
)
yolov4.train_dataset = train_dataset
# build the training model
yolov4.build_training_model(hvd)
# setup target tensors
yolo_input_encoder = \
YOLOv4InputEncoderTensor(
img_height=yolov4.h_tensor,
img_width=yolov4.w_tensor,
n_classes=yolov4.n_classes,
matching_box_iou_thres=spec.yolov4_config.box_matching_iou,
feature_map_size=yolov4.predictor_sizes,
anchors=yolov4.all_relative_anchors,
class_weights=cls_weights
)
train_dataset.set_encoder(yolo_input_encoder)
yolov4.set_target_tensors(train_dataset.encoded_labels)
else:
# keras sequence data loader
train_sequence = YOLOv4DataSequence(
spec.dataset_config,
spec.augmentation_config,
spec.training_config.batch_size_per_gpu,
is_training=True,
encode_fn=train_encoder,
output_raw_label=spec.training_config.visualizer.enabled
)
yolov4.train_dataset = train_sequence
# build the training model
yolov4.build_training_model(hvd)
# Visualize model weights histogram
if hvd.rank() == 0 and spec.training_config.visualizer.enabled:
visualizer.keras_model_weight_histogram(yolov4.keras_model)
# setup optimizer, if any
yolov4.build_optimizer(hvd)
# buld loss functions
yolov4.build_losses()
# build callbacks
yolov4.build_hvd_callbacks(hvd)
# build learning rate scheduler
yolov4.build_lr_scheduler(yolov4.train_dataset, hvd)
# build validation callback
if yolov4.val_labels_format == "tfrecords":
val_dataset = YOLOv4TFDataPipe(
spec,
training=False,
sess=sess,
h_tensor=yolov4.h_tensor_val,
w_tensor=yolov4.w_tensor_val
)
yolov4.val_dataset = val_dataset
yolov4.build_validation_model()
val_input_encoder = \
YOLOv4InputEncoderTensor(
img_height=yolov4.h_tensor_val,
img_width=yolov4.w_tensor_val,
n_classes=yolov4.n_classes,
matching_box_iou_thres=spec.yolov4_config.box_matching_iou,
feature_map_size=yolov4.val_predictor_sizes,
anchors=yolov4.all_relative_anchors
)
val_dataset.set_encoder(val_input_encoder)
yolov4.build_validation_callback(
val_dataset,
verbose=verbose
)
else:
yolov4.build_validation_model()
eval_sequence = YOLOv4DataSequence(
spec.dataset_config,
spec.augmentation_config,
spec.eval_config.batch_size,
is_training=False,
encode_fn=eval_encode_fn
)
yolov4.val_dataset = eval_sequence
yolov4.build_validation_callback(
eval_sequence,
verbose=verbose
)
# build checkpointer
yolov4.compile()
if spec.class_weighting_config.enable_auto:
yolov4.build_auto_class_weighting_callback(yolov4.train_dataset)
if hvd.rank() == 0:
yolov4.build_savers(results_dir, verbose)
if spec.training_config.visualizer.enabled:
yolov4.build_tensorboard_callback(results_dir)
yolov4.build_status_logging_callback(results_dir, yolov4.num_epochs, True)
if spec.training_config.model_ema:
yolov4.build_model_ema_callback()
yolov4.build_early_stopping_callback()
return yolov4
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/models/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test yolo models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Input
from keras.models import Model
import numpy as np
import pytest
from nvidia_tao_tf1.cv.yolo_v4.models.base_model import get_base_model
def do_model_pred(input_shape, arch, nlayers):
x = Input(shape=input_shape)
fmaps, map_size = get_base_model(x, arch, nlayers)
model = Model(inputs=x, outputs=fmaps)
x_in = np.random.normal(size=(2, ) + input_shape)
pred = model.predict(x_in)
stride = 32
if arch == "cspdarknet_tiny":
assert len(map_size) == 1
assert len(pred) == 2
else:
assert len(map_size) == 2
assert len(pred) == 3
if arch == 'vgg':
stride = 16
# assert pred 0
assert pred[0].shape[0] == 2
assert pred[0].shape[2] == input_shape[-1] / stride
assert pred[0].shape[3] == input_shape[-1] / stride
# assert pred 1
assert pred[1].shape[0] == 2
assert pred[1].shape[2] == input_shape[-1] / (stride / 2)
assert pred[1].shape[3] == input_shape[-1] / (stride / 2)
# assert pred 2
# YOLO v4 Tiny only has two heads(stride 32 and 16, no 8)
if arch != "cspdarknet_tiny":
assert pred[2].shape[0] == 2
assert pred[2].shape[2] == input_shape[-1] / (stride / 4)
assert pred[2].shape[3] == input_shape[-1] / (stride / 4)
def test_all_base_models():
# let's give a large coef on loss
for arch in ['resnet', 'vgg', 'squeezenet', 'darknet', 'mobilenet_v1', 'mobilenet_v2',
'googlenet', 'cspdarknet', 'efficientnet_b0', 'cspdarknet_tiny',
'cspdarknet_tiny_3l', 'wrong_net']:
for nlayers in [10, 16, 18, 19, 34, 50, 53, 101]:
if arch in ['squeezenet', 'googlenet', 'mobilenet_v1',
'efficientnet_b0', 'cspdarknet_tiny',
'cspdarknet_tiny_3l'] and nlayers > 10:
# Use mobilenet_v2 to test nlayers invariant
continue
resnet_flag = (arch == 'resnet' and nlayers not in [10, 18, 34, 50, 101])
vgg_flag = (arch == 'vgg' and nlayers not in [16, 19])
darknet_flag = (arch in ['darknet', 'cspdarknet'] and nlayers not in [19, 53])
if resnet_flag or vgg_flag or darknet_flag or arch == 'wrong_net':
with pytest.raises((ValueError, NotImplementedError)):
do_model_pred((3, 64, 64), arch, nlayers)
else:
do_model_pred((3, 64, 64), arch, nlayers)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/models/tests/test_model.py |
"""Implementation of YOLOv4 model architecture.""" | tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/architecture/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IVA YOLOv4 base architecture."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Concatenate, Conv2D, Permute, Reshape, UpSampling2D
from keras.models import Model
from nvidia_tao_tf1.core.models.quantize_keras_model import create_quantized_keras_model
from nvidia_tao_tf1.core.templates.utils import _leaky_conv, arg_scope
from nvidia_tao_tf1.cv.yolo_v3.layers.yolo_anchor_box_layer import YOLOAnchorBox
from nvidia_tao_tf1.cv.yolo_v4.layers.bbox_postprocessing_layer import BBoxPostProcessingLayer
from nvidia_tao_tf1.cv.yolo_v4.models.base_model import get_base_model
def YOLO_FCN(feature_layers, # pylint: disable=W0102
data_format='channels_first',
use_batch_norm=True,
kernel_regularizer=None,
bias_regularizer=None,
use_bias=False,
num_anchors=[3, 3, 3],
num_classes=80,
force_relu=False,
activation="leaky_relu"):
'''
Build FCN (fully convolutional net) part of YOLOv4.
Args:
feature_layers: two elements' list. First element is a tuple of size 3, containing three
keras tensors as three feature maps. Second element is a tuple of size 2, containing
number of channels upsampled layers need to have (this should be half of the number of
channels of the 2x larger feature map).
data_format: currently only 'channels_first' is tested and supported
use_batch_norm: whether to use batch norm in FCN build. Note this should be consistent with
feature extractor.
kernel_regularizer, bias_regularizer: keras regularizer object or None
use_bias: whether to use bias for conv layers. If use_batch_norm is true, this should be
false.
num_anchors: Number of anchors of different sizes in each feature maps. first element is
for smallest feature map (i.e. to detect large objects). Last element is for largest
feature map (i.e. to detect small objects).
num_classes: Number of all possible classes. E.g. if you have `person, bag, face`, the value
should be 3.
force_relu: whether to use ReLU instead of LeakyReLU
activation(str): Activation type.
Returns:
[det_bgobj, det_mdobj, det_smobj]: Three keras tensors for big/mid/small objects detection.
Those tensors can be processed to get detection boxes.
'''
concat_axis = 1 if data_format == 'channels_first' else -1
concat_num_filters = feature_layers[1]
x = feature_layers[0][0]
last_conv_filters = [i * (num_classes + 5) for i in num_anchors]
with arg_scope([_leaky_conv],
use_batch_norm=use_batch_norm,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
padding='same',
freeze_bn=False,
use_bias=use_bias,
force_relu=force_relu):
x = _leaky_conv(x, filters=concat_num_filters[0] * 4,
kernel=3, strides=1, name='yolo_conv1_2')
x = _leaky_conv(x, filters=concat_num_filters[0] * 2,
kernel=1, strides=1, name='yolo_conv1_3')
x = _leaky_conv(x, filters=concat_num_filters[0], kernel=1, strides=1, name='yolo_conv2')
x_branch_0 = x
x = UpSampling2D(2, data_format=data_format, name='upsample0')(x)
x_branch = _leaky_conv(
feature_layers[0][1], filters=concat_num_filters[0],
kernel=1, strides=1,
name="yolo_x_branch")
x = Concatenate(axis=concat_axis)([x, x_branch])
x = _leaky_conv(x, filters=concat_num_filters[0],
kernel=1, strides=1, name='yolo_conv3_1')
x = _leaky_conv(x, filters=concat_num_filters[0] * 2,
kernel=3, strides=1, name='yolo_conv3_2')
x = _leaky_conv(x, filters=concat_num_filters[0],
kernel=1, strides=1, name='yolo_conv3_3')
x = _leaky_conv(x, filters=concat_num_filters[0] * 2,
kernel=3, strides=1, name='yolo_conv3_4')
x = _leaky_conv(x, filters=concat_num_filters[0],
kernel=1, strides=1, name='yolo_conv3_5')
x_next_next = x
x = _leaky_conv(x_next_next, filters=concat_num_filters[1], kernel=1,
strides=1, name='yolo_conv4')
x = UpSampling2D(2, data_format=data_format, name='upsample1')(x)
x_branch_2 = _leaky_conv(
feature_layers[0][2], filters=concat_num_filters[1],
kernel=1, strides=1,
name="yolo_x_branch_2")
x = Concatenate(axis=concat_axis)([x, x_branch_2])
x = _leaky_conv(x, filters=concat_num_filters[1],
kernel=1, strides=1, name='yolo_conv5_1')
x = _leaky_conv(x, filters=concat_num_filters[1] * 2,
kernel=3, strides=1, name='yolo_conv5_2')
x = _leaky_conv(x, filters=concat_num_filters[1],
kernel=1, strides=1, name='yolo_conv5_3')
x = _leaky_conv(x, filters=concat_num_filters[1] * 2,
kernel=3, strides=1, name='yolo_conv5_4')
x = _leaky_conv(x, filters=concat_num_filters[1],
kernel=1, strides=1, name='yolo_conv5_5')
sm_branch = x
sm_leaky = _leaky_conv(x, filters=concat_num_filters[1] * 2,
kernel=3, strides=1, name='yolo_conv5_6')
det_smobj = Conv2D(filters=last_conv_filters[2],
kernel_size=1,
strides=1,
padding='same',
data_format=data_format,
activation=None,
bias_regularizer=bias_regularizer,
kernel_regularizer=kernel_regularizer,
use_bias=True,
name='conv_sm_object')(sm_leaky)
with arg_scope([_leaky_conv],
use_batch_norm=use_batch_norm,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
padding='same',
freeze_bn=False,
use_bias=use_bias,
force_relu=force_relu):
sm_branch_conv = _leaky_conv(
sm_branch, filters=concat_num_filters[0],
kernel=3, strides=2,
name="yolo_sm_branch_conv")
x = Concatenate(axis=concat_axis)([x_next_next, sm_branch_conv])
x = _leaky_conv(x, filters=concat_num_filters[0],
kernel=1, strides=1, name='yolo_conv3_5_1')
x = _leaky_conv(x, filters=concat_num_filters[0] * 2,
kernel=3, strides=1, name='yolo_conv3_4_2')
x = _leaky_conv(x, filters=concat_num_filters[0],
kernel=1, strides=1, name='yolo_conv3_5_1_1')
md_leaky = _leaky_conv(x, filters=concat_num_filters[0] * 2,
kernel=3, strides=1, name='yolo_conv3_6')
md_leaky = _leaky_conv(md_leaky, 256, alpha=0.1, kernel=1, strides=1,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
use_batch_norm=True, force_relu=force_relu,
name='md_leaky_conv512')
md_leaky_down = _leaky_conv(md_leaky, 512, alpha=0.1, kernel=3, strides=2,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
use_batch_norm=True, force_relu=force_relu,
name='md_leaky_conv512_down')
md_leaky = _leaky_conv(md_leaky, 512, alpha=0.1, kernel=3, strides=1,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
use_batch_norm=True, force_relu=force_relu,
name='md_leaky_conv1024')
det_mdobj = Conv2D(filters=last_conv_filters[1],
kernel_size=1,
strides=1,
padding='same',
data_format=data_format,
activation=None,
bias_regularizer=bias_regularizer,
kernel_regularizer=kernel_regularizer,
use_bias=True,
name='conv_mid_object')(md_leaky)
with arg_scope([_leaky_conv],
use_batch_norm=use_batch_norm,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
padding='same',
freeze_bn=False,
use_bias=use_bias,
force_relu=force_relu):
x = Concatenate(axis=concat_axis)([x_branch_0, md_leaky_down])
x = _leaky_conv(x, filters=concat_num_filters[0] * 2,
kernel=1, strides=1, name='yolo_conv1_3_1')
x = _leaky_conv(x, filters=concat_num_filters[0] * 4,
kernel=3, strides=1, name='yolo_conv1_4')
x = _leaky_conv(x, filters=concat_num_filters[0] * 2,
kernel=1, strides=1, name='yolo_conv1_5')
bg_leaky = _leaky_conv(x, filters=concat_num_filters[0] * 4,
kernel=3, strides=1, name='yolo_conv1_6')
bg_leaky = _leaky_conv(bg_leaky, 512, alpha=0.1, kernel=1, strides=1,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
use_batch_norm=True, force_relu=force_relu,
name='bg_leaky_conv512')
bg_leaky = _leaky_conv(bg_leaky, 1024, alpha=0.1, kernel=3, strides=1,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
use_batch_norm=True, force_relu=force_relu,
name='bg_leaky_conv1024')
det_bgobj = Conv2D(filters=last_conv_filters[0],
kernel_size=1,
strides=1,
padding='same',
data_format=data_format,
activation=None,
bias_regularizer=bias_regularizer,
kernel_regularizer=kernel_regularizer,
use_bias=True,
name='conv_big_object')(bg_leaky)
return [det_bgobj, det_mdobj, det_smobj]
def YOLO_FCN_Tiny( # pylint: disable=W0102
feature_layers,
data_format='channels_first',
use_batch_norm=True,
kernel_regularizer=None,
bias_regularizer=None,
use_bias=False,
num_anchors=[3, 3, 3],
num_classes=80,
force_relu=False,
activation="leaky_relu"
):
'''
Build FCN (fully convolutional net) part of YOLOv4.
Args:
feature_layers: two elements' list. First element is a tuple of size 3, containing three
keras tensors as three feature maps. Second element is a tuple of size 2, containing
number of channels upsampled layers need to have (this should be half of the number of
channels of the 2x larger feature map).
data_format: currently only 'channels_first' is tested and supported
use_batch_norm: whether to use batch norm in FCN build. Note this should be consistent with
feature extractor.
kernel_regularizer, bias_regularizer: keras regularizer object or None
use_bias: whether to use bias for conv layers. If use_batch_norm is true, this should be
false.
num_anchors: Number of anchors of different sizes in each feature maps. first element is
for smallest feature map (i.e. to detect large objects). Last element is for largest
feature map (i.e. to detect small objects).
num_classes: Number of all possible classes. E.g. if you have `person, bag, face`, the value
should be 3.
force_relu: whether to use ReLU instead of LeakyReLU
activation(str): Activation type.
Returns:
[det_bgobj, det_mdobj, det_smobj]: Three keras tensors for big/mid/small objects detection.
Those tensors can be processed to get detection boxes.
'''
concat_axis = 1 if data_format == 'channels_first' else -1
concat_num_filters = feature_layers[1]
last_layer = feature_layers[0][0]
last_conv_filters = [i * (num_classes + 5) for i in num_anchors]
with arg_scope([_leaky_conv],
use_batch_norm=use_batch_norm,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
padding='same',
freeze_bn=False,
use_bias=use_bias,
force_relu=force_relu):
x = _leaky_conv(last_layer, filters=concat_num_filters[0] * 2,
kernel=1, strides=1, name='yolo_conv1_1')
bg_mish = _leaky_conv(x, filters=concat_num_filters[0] * 4,
kernel=3, strides=1, name='yolo_conv1_6')
x = _leaky_conv(x, filters=concat_num_filters[0], kernel=1, strides=1, name='yolo_conv2')
x = UpSampling2D(2, data_format=data_format, name='upsample0')(x)
x = Concatenate(axis=concat_axis)([x, feature_layers[0][1]])
md_mish = _leaky_conv(x, filters=concat_num_filters[0] * 2,
kernel=3, strides=1, name='yolo_conv3_6')
if len(num_anchors) > 2:
# tiny-3l
x = _leaky_conv(md_mish, filters=concat_num_filters[1], kernel=1,
strides=1, name='yolo_conv4')
x = UpSampling2D(2, data_format=data_format, name='upsample1')(x)
x = Concatenate(axis=concat_axis)([x, feature_layers[0][2]])
sm_mish = _leaky_conv(x, filters=concat_num_filters[1] * 2,
kernel=3, strides=1, name='yolo_conv5_6')
if len(num_anchors) > 2:
det_smobj = Conv2D(filters=last_conv_filters[2],
kernel_size=1,
strides=1,
padding='same',
data_format=data_format,
activation=None,
bias_regularizer=bias_regularizer,
kernel_regularizer=kernel_regularizer,
use_bias=True,
name='conv_sm_object')(sm_mish)
det_bgobj = Conv2D(filters=last_conv_filters[0],
kernel_size=1,
strides=1,
padding='same',
data_format=data_format,
activation=None,
bias_regularizer=bias_regularizer,
kernel_regularizer=kernel_regularizer,
use_bias=True,
name='conv_big_object')(bg_mish)
det_mdobj = Conv2D(filters=last_conv_filters[1],
kernel_size=1,
strides=1,
padding='same',
data_format=data_format,
activation=None,
bias_regularizer=bias_regularizer,
kernel_regularizer=kernel_regularizer,
use_bias=True,
name='conv_mid_object')(md_mish)
if len(num_anchors) == 2:
return [det_bgobj, det_mdobj]
return [det_bgobj, det_mdobj, det_smobj]
def YOLO(input_tensor, # pylint: disable=W0102
arch,
nlayers,
kernel_regularizer=None,
bias_regularizer=None,
freeze_blocks=None,
freeze_bn=None,
anchors=[[(0.279, 0.216), (0.375, 0.476), (0.897, 0.784)],
[(0.072, 0.147), (0.149, 0.108), (0.142, 0.286)],
[(0.024, 0.031), (0.038, 0.072), (0.079, 0.055)]],
grid_scale_xy=[1.05, 1.1, 1.1],
num_classes=80,
qat=True,
force_relu=False,
activation="leaky_relu"):
'''
Build YOLO v4 Network.
Args:
input_tensor: Keras tensor created by Input layer
arch: architecture of feature extractors. E.g. resnet18, resnet10, darknet53
kernel_regularizer, bias_regularizer: keras regularizer object or None
freeze_blocks: blocks to freeze during training. The meaning of `block` is arch-specific
freeze_bn: whether to freeze batch norm layer **for feature extractors**
anchors: List of 3 elements indicating the anchor boxes shape on feature maps. first element
is for smallest feature map (i.e. to detect large objects). Last element is for largest
feature map (i.e. to detect small objects). Each element is a list of tuples of size 2,
in the format of (w, h). The length of the list can be any integer larger than 0.
grid_scale_xy: List of 3 floats indicating how much the grid scale should be (to eliminate
grid sensitivity. See YOLOv4 paper for details)
num_classes: Number of all possible classes. E.g. if you have `person, bag, face`, the value
should be 3
qat (bool): If `True`, build an quantization aware model.
force_relu(bool): If `True`, change all LeakyReLU to ReLU.
activation(str): Activation type.
Returns:
model: A keras YOLO v4 model with encoded box detections as output.
'''
assert len(anchors) in [2, 3]
num_anchors = [len(i) for i in anchors]
feature_layers = get_base_model(
input_tensor, arch, nlayers, kernel_regularizer,
bias_regularizer, freeze_blocks, freeze_bn, force_relu,
activation=activation
)
is_tiny = bool(arch in ["cspdarknet_tiny", "cspdarknet_tiny_3l"])
if is_tiny:
yolo_fcn = YOLO_FCN_Tiny(
feature_layers,
data_format='channels_first',
use_batch_norm=True,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
use_bias=False,
num_anchors=num_anchors,
num_classes=num_classes,
force_relu=force_relu,
activation=activation
)
else:
yolo_fcn = YOLO_FCN(
feature_layers,
data_format='channels_first',
use_batch_norm=True,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
use_bias=False,
num_anchors=num_anchors,
num_classes=num_classes,
force_relu=force_relu,
activation=activation
)
if qat:
raw_model = Model(inputs=input_tensor, outputs=yolo_fcn)
qat_model = create_quantized_keras_model(raw_model)
if len(anchors) == 3:
yolo_fcn = [qat_model.get_layer('conv_big_object').output,
qat_model.get_layer('conv_mid_object').output,
qat_model.get_layer('conv_sm_object').output]
else:
yolo_fcn = [qat_model.get_layer('conv_big_object').output,
qat_model.get_layer('conv_mid_object').output]
# [pred_y, pred_x, pred_h, pred_w, object, cls...]
bgdet = Reshape((-1, num_classes + 5),
name="bg_reshape")(Permute((2, 3, 1), name="bg_permute")(yolo_fcn[0]))
bgdet = BBoxPostProcessingLayer(grid_scale_xy[0], name="bg_bbox_processor")(bgdet)
mddet = Reshape((-1, num_classes + 5),
name="md_reshape")(Permute((2, 3, 1), name="md_permute")(yolo_fcn[1]))
mddet = BBoxPostProcessingLayer(grid_scale_xy[1], name="md_bbox_processor")(mddet)
if len(anchors) == 3:
smdet = Reshape((-1, num_classes + 5),
name="sm_reshape")(Permute((2, 3, 1), name="sm_permute")(yolo_fcn[2]))
smdet = BBoxPostProcessingLayer(grid_scale_xy[2], name="sm_bbox_processor")(smdet)
# build YOLO v3 anchor layers for corresponding feature maps. Anchor shapes are defined in args.
bg_anchor = YOLOAnchorBox(anchors[0], name="bg_anchor")(yolo_fcn[0])
md_anchor = YOLOAnchorBox(anchors[1], name="md_anchor")(yolo_fcn[1])
if len(anchors) == 3:
sm_anchor = YOLOAnchorBox(anchors[2], name="sm_anchor")(yolo_fcn[2])
bgdet = Concatenate(axis=-1, name="encoded_bg")([bg_anchor, bgdet])
mddet = Concatenate(axis=-1, name="encoded_md")([md_anchor, mddet])
if len(anchors) == 3:
smdet = Concatenate(axis=-1, name="encoded_sm")([sm_anchor, smdet])
if len(anchors) == 3:
results = Concatenate(axis=-2, name="encoded_detections")([bgdet, mddet, smdet])
else:
results = Concatenate(axis=-2, name="encoded_detections")([bgdet, mddet])
return Model(inputs=input_tensor, outputs=results, name="YOLOv4")
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/architecture/yolo_arch.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test yolo arch builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Input, LeakyReLU
from nvidia_tao_tf1.cv.yolo_v4.architecture.yolo_arch import YOLO
def test_arch():
it = Input(shape=(3, 64, 32), name="Input")
model = YOLO(it,
'resnet', 18,
kernel_regularizer=None,
bias_regularizer=None,
freeze_blocks=[0],
freeze_bn=None,
qat=True)
assert model.get_layer('conv1').trainable is False
assert model.get_layer('encoded_detections').output_shape[-2:] == (126, 91)
model = YOLO(it,
'resnet', 18,
kernel_regularizer=None,
bias_regularizer=None,
freeze_blocks=[0],
freeze_bn=None,
qat=True)
assert model.get_layer('conv1').trainable is False
assert model.get_layer('encoded_detections').output_shape[-2:] == (126, 91)
for layer in model.layers:
assert type(layer) != LeakyReLU
model = YOLO(it,
'darknet', 19,
kernel_regularizer=None,
bias_regularizer=None,
freeze_blocks=None,
freeze_bn=None,
qat=False,
force_relu=True)
assert model.get_layer('conv1').trainable is True
for layer in model.layers:
assert type(layer) != LeakyReLU
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/architecture/tests/test_arch.py |
"""Implementation of the tasks for YOLOv4.""" | tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/scripts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Export a yolov4 model."""
# import build_command_line_parser as this is needed by entrypoint
from nvidia_tao_tf1.cv.common.export.app import build_command_line_parser # noqa pylint: disable=W0611
from nvidia_tao_tf1.cv.common.export.app import launch_export
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.yolo_v4.export.yolov4_exporter import YOLOv4Exporter as Exporter
if __name__ == "__main__":
try:
launch_export(Exporter, None, "onnx")
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Export finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Export was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/scripts/export.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to calculate YOLOv4 anchor config."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# import build_command_line_parser as this is needed by entrypoint
from nvidia_tao_tf1.cv.yolo_v3.scripts.kmeans import build_command_line_parser # noqa pylint: disable=W0611
from nvidia_tao_tf1.cv.yolo_v3.scripts.kmeans import main
if __name__ == "__main__":
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/scripts/kmeans.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert KITTI dataset to TFRecords for YOLOv4 TLT model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.detectnet_v2.scripts.dataset_convert import ( # noqa pylint: disable=unused-import
build_command_line_parser,
main,
)
if __name__ == "__main__":
try:
main(sys.argv[1:])
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Dataset convert finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Dataset convert was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/scripts/dataset_convert.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perform continuous YOLO training on a tfrecords or keras dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import os
import warnings
from keras import backend as K
import tensorflow as tf
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.common.utils import check_tf_oom, hvd_keras, initialize
from nvidia_tao_tf1.cv.yolo_v3.utils.tensor_utils import get_init_ops
from nvidia_tao_tf1.cv.yolo_v4.models.utils import build_training_pipeline
from nvidia_tao_tf1.cv.yolo_v4.utils.spec_loader import load_experiment_spec
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level='INFO')
logger = logging.getLogger(__name__)
verbose = 0
warnings.filterwarnings(action="ignore", category=UserWarning)
def run_experiment(config_path, results_dir, key):
"""
Launch experiment that trains the model.
NOTE: Do not change the argument names without verifying that cluster submission works.
Args:
config_path (str): Path to a text file containing a complete experiment configuration.
results_dir (str): Path to a folder where various training outputs will be written.
If the folder does not already exist, it will be created.
"""
hvd = hvd_keras()
hvd.init()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = str(hvd.local_rank())
sess = tf.Session(config=config)
K.set_session(sess)
K.set_image_data_format('channels_first')
K.set_learning_phase(1)
verbose = 1 if hvd.rank() == 0 else 0
is_master = hvd.rank() == 0
if is_master and not os.path.exists(results_dir):
os.makedirs(results_dir)
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=is_master,
verbosity=1,
append=True
)
)
status_logging.get_status_logger().write(
data=None,
status_level=status_logging.Status.STARTED,
message="Starting Yolo_V4 Training job"
)
# Load experiment spec.
spec = load_experiment_spec(config_path)
initialize(spec.random_seed, hvd)
# build training model and dataset
model = build_training_pipeline(
spec,
results_dir,
key,
hvd,
sess,
verbose
)
if hvd.rank() == 0:
model.summary()
sess.run(get_init_ops())
model.train(verbose)
status_logging.get_status_logger().write(
data=None,
status_level=status_logging.Status.SUCCESS,
message="YOLO_V4 training finished successfully."
)
def build_command_line_parser(parser=None):
'''build parser.'''
if parser is None:
parser = argparse.ArgumentParser(prog='train', description='Train an YOLOv4 model.')
parser.add_argument(
'-e',
'--experiment_spec_file',
type=str,
required=True,
help='Path to spec file. Absolute path or relative to working directory. \
If not specified, default spec from spec_loader.py is used.')
parser.add_argument(
'-r',
'--results_dir',
type=str,
required=True,
help='Path to a folder where experiment outputs should be written.'
)
parser.add_argument(
'-k',
'--key',
type=str,
default="",
required=False,
help='Key to save or load a .tlt model.'
)
return parser
def parse_command_line(args):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser()
return parser.parse_args(args)
@check_tf_oom
def main(args=None):
"""Run the training process."""
args = parse_command_line(args)
try:
run_experiment(
config_path=args.experiment_spec_file,
results_dir=args.results_dir,
key=args.key
)
logger.info("Training finished successfully.")
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Training was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
logger.info("Training was interrupted.")
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/scripts/train.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MagNet pruning wrapper for classification/detection models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from datetime import datetime as dt
import logging
import os
from nvidia_tao_tf1.core.pruning.pruning import prune
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.common.utils import (
get_model_file_size,
get_num_params
)
from nvidia_tao_tf1.cv.yolo_v4.layers.split import Split
from nvidia_tao_tf1.cv.yolo_v4.utils.model_io import load_model, save_model
from nvidia_tao_tf1.cv.yolo_v4.utils.spec_loader import load_experiment_spec
logger = logging.getLogger(__name__)
def build_command_line_parser(parser=None):
'''build parser.'''
if parser is None:
parser = argparse.ArgumentParser(description="TLT pruning script")
parser.add_argument("-m",
"--model",
type=str,
help="Path to the target model for pruning",
required=True,
default=None)
parser.add_argument("-o",
"--output_file",
type=str,
help="Output file path for pruned model",
required=True,
default=None)
parser.add_argument("-e",
"--experiment_spec_path",
type=str,
help="Path to experiment spec file",
required=True)
parser.add_argument('-k',
'--key',
required=False,
type=str,
default="",
help='Key to load a .tlt model')
parser.add_argument('-n',
'--normalizer',
type=str,
default='max',
help="`max` to normalize by dividing each norm by the \
maximum norm within a layer; `L2` to normalize by \
dividing by the L2 norm of the vector comprising all \
kernel norms. (default: `max`)")
parser.add_argument('-eq',
'--equalization_criterion',
type=str,
default='union',
help="Criteria to equalize the stats of inputs to an \
element wise op layer. Options are \
[arithmetic_mean, geometric_mean, union, \
intersection]. (default: `union`)")
parser.add_argument("-pg",
"--pruning_granularity",
type=int,
help="Pruning granularity: number of filters to remove \
at a time. (default:8)",
default=8)
parser.add_argument("-pth",
"--pruning_threshold",
type=float,
help="Threshold to compare normalized norm against \
(default:0.1)", default=0.1)
parser.add_argument("-nf",
"--min_num_filters",
type=int,
help="Minimum number of filters to keep per layer. \
(default:16)", default=16)
parser.add_argument("-el",
"--excluded_layers", action='store',
type=str, nargs='*',
help="List of excluded_layers. Examples: -i item1 \
item2", default=[])
parser.add_argument("--results_dir",
type=str,
default=None,
help="Path to the files where the logs are stored.")
parser.add_argument("-v",
"--verbose",
action='store_true',
help="Include this flag in command line invocation for \
verbose logs.")
return parser
def parse_command_line(args):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser()
return parser.parse_args(args)
def run_pruning(args=None):
"""Prune an encrypted Keras model."""
results_dir = args.results_dir
if results_dir is not None:
if not os.path.exists(results_dir):
os.makedirs(results_dir)
timestamp = int(dt.timestamp(dt.now()))
filename = "status.json"
if results_dir == "/workspace/logs":
filename = f"status_prune_{timestamp}.json"
status_file = os.path.join(results_dir, filename)
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True
)
)
# Set up logger verbosity.
verbosity = 'INFO'
if args.verbose:
verbosity = 'DEBUG'
# Configure the logger.
logging.basicConfig(
format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level=verbosity
)
assert args.equalization_criterion in \
['arithmetic_mean', 'geometric_mean', 'union', 'intersection'], \
"Equalization criterion are [arithmetic_mean, geometric_mean, union, \
intersection]."
assert args.normalizer in ['L2', 'max'], \
"normalizer options are [L2, max]."
experiment_spec = load_experiment_spec(args.experiment_spec_path)
img_height = experiment_spec.augmentation_config.output_height
img_width = experiment_spec.augmentation_config.output_width
n_channels = experiment_spec.augmentation_config.output_channel
final_model = load_model(
args.model,
experiment_spec,
(n_channels, img_height, img_width),
key=args.key
)
if verbosity == 'DEBUG':
# Printing out the loaded model summary
logger.debug("Model summary of the unpruned model:")
logger.debug(final_model.summary())
# Exckuded layers for YOLOv3 / v4
force_excluded_layers = ['conv_big_object', 'conv_mid_object',
'conv_sm_object']
for layer in final_model.layers:
if type(layer) == Split:
basename = layer.name[:-8]
name = basename + "_conv_0"
force_excluded_layers.append(name)
force_excluded_layers += final_model.output_names
# Pruning trained model
pruned_model = prune(
model=final_model,
method='min_weight',
normalizer=args.normalizer,
criterion='L2',
granularity=args.pruning_granularity,
min_num_filters=args.min_num_filters,
threshold=args.pruning_threshold,
equalization_criterion=args.equalization_criterion,
excluded_layers=args.excluded_layers + force_excluded_layers)
if verbosity == 'DEBUG':
# Printing out pruned model summary
logger.debug("Model summary of the pruned model:")
logger.debug(pruned_model.summary())
pruning_ratio = pruned_model.count_params() / final_model.count_params()
logger.info("Pruning ratio (pruned model / original model): {}".format(
pruning_ratio
)
)
# Save the encrypted pruned model
save_model(pruned_model, args.output_file, args.key, save_format='.hdf5')
if results_dir is not None:
s_logger = status_logging.get_status_logger()
s_logger.kpi = {
"pruning_ratio": pruning_ratio,
"size": get_model_file_size(args.output_file),
"param_count": get_num_params(pruned_model)
}
s_logger.write(
message="Pruning ratio (pruned model / original model): {}".format(
pruning_ratio
)
)
def main(args=None):
"""Wrapper function for pruning."""
# Apply patch to correct keras 2.2.4 bug
try:
# parse command line
args = parse_command_line(args)
run_pruning(args)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Pruning finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Pruning was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/scripts/prune.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple Stand-alone inference script for YOLO models trained using modulus."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import os
import keras.backend as K
import numpy as np
from nvidia_tao_tf1.cv.common.inferencer.inferencer import Inferencer
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.common.utils import check_tf_oom
from nvidia_tao_tf1.cv.yolo_v4.builders import eval_builder
from nvidia_tao_tf1.cv.yolo_v4.utils.model_io import load_model
from nvidia_tao_tf1.cv.yolo_v4.utils.spec_loader import load_experiment_spec
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level='INFO')
image_extensions = ['.jpg', '.jpeg', '.JPG', '.JPEG', '.png', '.PNG']
def build_command_line_parser(parser=None):
'''build parser.'''
if parser is None:
parser = argparse.ArgumentParser(description='Keras YOLO Inference Tool')
parser.add_argument('-m',
'--model_path',
type=str,
required=True,
help='Path to a TLT model or TensorRT engine.')
parser.add_argument('-i',
'--image_dir',
required=True,
type=str,
help='The path to input image or directory.')
parser.add_argument('-k',
'--key',
type=str,
default="",
help='Key to save or load a .tlt model. Must present if -m is a TLT model')
parser.add_argument('-e',
'--experiment_spec',
required=True,
type=str,
help='Path to an experiment spec file for training.')
parser.add_argument('-t',
'--threshold',
type=float,
default=0.3,
help='Confidence threshold for inference.')
parser.add_argument("-r",
"--results_dir",
type=str,
default=None,
help="Path to the files where the logs are stored.")
parser.add_argument('-b',
'--batch_size',
type=int,
required=False,
default=1,
help=argparse.SUPPRESS)
return parser
def parse_command_line(args):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser()
return parser.parse_args(args)
def keras_output_process_fn(inferencer, y_encoded):
"function to process keras model output."
# xmin
y_encoded[..., -4] = y_encoded[..., -4] * inferencer.model_input_width
# ymin
y_encoded[..., -3] = y_encoded[..., -3] * inferencer.model_input_height
# xmax
y_encoded[..., -2] = y_encoded[..., -2] * inferencer.model_input_width
# ymax
y_encoded[..., -1] = y_encoded[..., -1] * inferencer.model_input_height
return y_encoded
def trt_output_process_fn(inferencer, y_encoded):
"function to process TRT model output."
keep_k, boxes, scores, cls_id = y_encoded
result = []
for idx, k in enumerate(keep_k.reshape(-1)):
mul = np.array([[inferencer.model_input_width,
inferencer.model_input_height,
inferencer.model_input_width,
inferencer.model_input_height]])
loc = boxes[idx].reshape(-1, 4)[:k] * mul
cid = cls_id[idx].reshape(-1, 1)[:k]
conf = scores[idx].reshape(-1, 1)[:k]
result.append(np.concatenate((cid, conf, loc), axis=-1))
return result
def inference(arguments):
'''make inference on a folder of images.'''
# Set up status logging
if arguments.results_dir:
if not os.path.exists(arguments.results_dir):
os.makedirs(arguments.results_dir)
status_file = os.path.join(arguments.results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True,
verbosity=1,
append=True
)
)
s_logger = status_logging.get_status_logger()
s_logger.write(
status_level=status_logging.Status.STARTED,
message="Starting YOLOv4(-Tiny) inference."
)
config_path = arguments.experiment_spec
experiment_spec = load_experiment_spec(config_path)
K.clear_session() # Clear previous models from memory.
K.set_learning_phase(0)
classes = sorted({str(x).lower() for x in
experiment_spec.dataset_config.target_class_mapping.values()})
class_mapping = dict(zip(range(len(classes)), classes))
img_mean = experiment_spec.augmentation_config.image_mean
# Number of bits per pixel per channel, 0 defaults to 8
image_depth = int(experiment_spec.augmentation_config.output_depth) or 8
if experiment_spec.augmentation_config.output_channel == 3:
assert image_depth == 8, (
f"RGB images only support 8-bit depth, got {image_depth}, "
"please check `augmentation_config.output_depth` in spec file"
)
if img_mean:
img_mean = [img_mean['b'], img_mean['g'], img_mean['r']]
else:
img_mean = [103.939, 116.779, 123.68]
else:
if img_mean:
img_mean = [img_mean['l']]
elif image_depth == 8:
img_mean = [117.3786]
elif image_depth == 16:
# 117.3786 * 256
img_mean = [30048.9216]
else:
raise ValueError(
f"Unsupported image depth: {image_depth}, should be 8 or 16, "
"please check `augmentation_config.output_depth` in spec file"
)
if os.path.splitext(arguments.model_path)[1] in ['.tlt', '.hdf5']:
img_height = experiment_spec.augmentation_config.output_height
img_width = experiment_spec.augmentation_config.output_width
n_channels = experiment_spec.augmentation_config.output_channel
model = load_model(arguments.model_path,
experiment_spec,
(n_channels, img_height, img_width),
key=arguments.key)
# Load evaluation parameters
conf_th = experiment_spec.nms_config.confidence_threshold
iou_th = experiment_spec.nms_config.clustering_iou_threshold
top_k = experiment_spec.nms_config.top_k
nms_on_cpu = True
# Build evaluation model
model = eval_builder.build(
model, conf_th, iou_th, top_k, nms_on_cpu=nms_on_cpu
)
inferencer = Inferencer(keras_model=model,
batch_size=experiment_spec.eval_config.batch_size,
infer_process_fn=keras_output_process_fn,
class_mapping=class_mapping,
img_mean=img_mean,
threshold=arguments.threshold,
image_depth=image_depth)
print("Using TLT model for inference, setting batch size to the one in eval_config:",
experiment_spec.eval_config.batch_size)
else:
inferencer = Inferencer(trt_engine_path=arguments.model_path,
infer_process_fn=trt_output_process_fn,
batch_size=experiment_spec.eval_config.batch_size,
class_mapping=class_mapping,
img_mean=img_mean,
threshold=arguments.threshold,
image_depth=image_depth)
print("Using TensorRT engine for inference, setting batch size to the one in eval_config:",
experiment_spec.eval_config.batch_size)
print("If your engine maximum batch size is smaller, change eval_config in spec file!")
out_image_path = os.path.join(arguments.results_dir, "images_annotated")
out_label_path = os.path.join(arguments.results_dir, "labels")
os.makedirs(out_image_path, exist_ok=True)
os.makedirs(out_label_path, exist_ok=True)
inferencer.infer(arguments.image_dir, out_image_path, out_label_path)
if arguments.results_dir:
s_logger.write(
status_level=status_logging.Status.SUCCESS,
message="Inference finished successfully."
)
@check_tf_oom
def main(args=None):
"""Run the inference process."""
try:
args = parse_command_line(args)
inference(args)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Inference was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/scripts/inference.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple Stand-alone evaluate script for YOLO models trained using modulus."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import os
import sys
import keras.backend as K
from keras.utils.data_utils import OrderedEnqueuer
import numpy as np
import tensorflow as tf
from tqdm import trange
from nvidia_tao_tf1.cv.common.evaluator.ap_evaluator import APEvaluator
from nvidia_tao_tf1.cv.common.inferencer.inferencer import Inferencer
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.common.utils import check_tf_oom
from nvidia_tao_tf1.cv.yolo_v3.utils.tensor_utils import get_init_ops
from nvidia_tao_tf1.cv.yolo_v4.builders import eval_builder
from nvidia_tao_tf1.cv.yolo_v4.dataio.data_sequence import YOLOv4DataSequence
from nvidia_tao_tf1.cv.yolo_v4.dataio.tf_data_pipe import YOLOv4TFDataPipe
from nvidia_tao_tf1.cv.yolo_v4.utils.model_io import load_model
from nvidia_tao_tf1.cv.yolo_v4.utils.spec_loader import (
load_experiment_spec,
validation_labels_format
)
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level='INFO')
def build_command_line_parser(parser=None):
'''build parser.'''
if parser is None:
parser = argparse.ArgumentParser(description='TLT YOLOv4 Evaluation Tool')
parser.add_argument('-m',
'--model_path',
help='Path to an YOLOv4 TLT model or TensorRT engine.',
required=True,
type=str)
parser.add_argument('-k',
'--key',
type=str,
default="",
help='Key to load a .tlt model.')
parser.add_argument('-e',
'--experiment_spec',
required=True,
type=str,
help='Experiment spec file for training and evaluation.')
parser.add_argument("-r",
"--results_dir",
type=str,
default=None,
help="Path to the files where the logs are stored.")
parser.add_argument('-i',
'--image_dir',
type=str,
required=False,
default=None,
help=argparse.SUPPRESS)
parser.add_argument('-l',
'--label_dir',
type=str,
required=False,
help=argparse.SUPPRESS)
parser.add_argument('-b',
'--batch_size',
type=int,
required=False,
default=1,
help=argparse.SUPPRESS)
return parser
def parse_command_line(args):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser()
return parser.parse_args(args)
def keras_output_process_fn(inferencer, y_encoded):
"function to process keras model output."
return y_encoded
def trt_output_process_fn(inferencer, y_encoded):
"function to process TRT model output."
keep_k, boxes, scores, cls_id = y_encoded
result = []
for idx, k in enumerate(keep_k.reshape(-1)):
loc = boxes[idx].reshape(-1, 4)[:k]
cid = cls_id[idx].reshape(-1, 1)[:k]
conf = scores[idx].reshape(-1, 1)[:k]
result.append(np.concatenate((cid, conf, loc), axis=-1))
return result
def evaluate(arguments):
'''make evaluation.'''
# Set up status logging
if arguments.results_dir:
if not os.path.exists(arguments.results_dir):
os.makedirs(arguments.results_dir)
status_file = os.path.join(arguments.results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True,
verbosity=1,
append=True
)
)
s_logger = status_logging.get_status_logger()
s_logger.write(
status_level=status_logging.Status.STARTED,
message="Starting YOLOv4(-Tiny) evaluation."
)
config_path = arguments.experiment_spec
experiment_spec = load_experiment_spec(config_path)
val_labels_format = validation_labels_format(experiment_spec)
if experiment_spec.eval_config.visualize_pr_curve:
vis_path = os.path.dirname(arguments.model_path)
else:
vis_path = None
classes = sorted({str(x).lower() for x in
experiment_spec.dataset_config.target_class_mapping.values()})
ap_mode = experiment_spec.eval_config.average_precision_mode
matching_iou = experiment_spec.eval_config.matching_iou_threshold
matching_iou = matching_iou if matching_iou > 0 else 0.5
ap_mode_dict = {0: "sample", 1: "integrate"}
average_precision_mode = ap_mode_dict[ap_mode]
K.clear_session() # Clear previous models from memory.
evaluator = APEvaluator(len(classes),
conf_thres=experiment_spec.nms_config.confidence_threshold,
matching_iou_threshold=matching_iou,
average_precision_mode=average_precision_mode)
if os.path.splitext(arguments.model_path)[1] in ['.tlt', '.hdf5']:
K.set_learning_phase(0)
img_height = experiment_spec.augmentation_config.output_height
img_width = experiment_spec.augmentation_config.output_width
n_channels = experiment_spec.augmentation_config.output_channel
model = load_model(
arguments.model_path,
experiment_spec,
(n_channels, img_height, img_width),
key=arguments.key
)
# Load evaluation parameters
conf_th = experiment_spec.nms_config.confidence_threshold
iou_th = experiment_spec.nms_config.clustering_iou_threshold
top_k = experiment_spec.nms_config.top_k
nms_on_cpu = False
if val_labels_format == "tfrecords":
nms_on_cpu = True
# Build evaluation model
model = eval_builder.build(
model, conf_th, iou_th, top_k, nms_on_cpu=nms_on_cpu
)
model.summary()
inferencer = Inferencer(keras_model=model,
batch_size=experiment_spec.eval_config.batch_size,
infer_process_fn=keras_output_process_fn,
class_mapping=None,
threshold=experiment_spec.nms_config.confidence_threshold)
print("Using TLT model for inference, setting batch size to the one in eval_config:",
experiment_spec.eval_config.batch_size)
else:
# Works in python 3.6
cpu_cnt = os.cpu_count()
if cpu_cnt is None:
cpu_cnt = 1
session_config = tf.compat.v1.ConfigProto(
device_count={'GPU' : 0, 'CPU': cpu_cnt}
)
session = tf.Session(config=session_config)
# Pin TF to CPU to avoid TF & TRT CUDA context conflict
K.set_session(session)
inferencer = Inferencer(trt_engine_path=arguments.model_path,
infer_process_fn=trt_output_process_fn,
batch_size=experiment_spec.eval_config.batch_size,
class_mapping=None,
threshold=experiment_spec.nms_config.confidence_threshold)
print("Using TensorRT engine for inference, setting batch size to engine's one:",
inferencer.batch_size)
# Prepare labels
sess = K.get_session()
if val_labels_format == "tfrecords":
h_tensor = tf.constant(
experiment_spec.augmentation_config.output_height,
dtype=tf.int32
)
w_tensor = tf.constant(
experiment_spec.augmentation_config.output_width,
dtype=tf.int32
)
val_dataset = YOLOv4TFDataPipe(
experiment_spec,
label_encoder=None,
training=False,
h_tensor=h_tensor,
w_tensor=w_tensor,
sess=sess
)
num_samples = val_dataset.num_samples
num_steps = num_samples // experiment_spec.eval_config.batch_size
tr = trange(num_steps, file=sys.stdout)
sess.run(get_init_ops())
else:
eval_sequence = YOLOv4DataSequence(
experiment_spec.dataset_config,
experiment_spec.augmentation_config,
experiment_spec.eval_config.batch_size,
is_training=False,
encode_fn=None
)
enqueuer = OrderedEnqueuer(eval_sequence, use_multiprocessing=False)
enqueuer.start(workers=max(os.cpu_count() - 1, 1), max_queue_size=20)
output_generator = enqueuer.get()
tr = trange(len(eval_sequence), file=sys.stdout)
tr.set_description('Producing predictions')
gt_labels = []
pred_labels = []
# Loop over all batches.
for _ in tr:
# Generate batch.
if val_labels_format == "tfrecords":
batch_X, batch_labs = val_dataset.get_array()
else:
batch_X, batch_labs = next(output_generator)
y_pred = inferencer._predict_batch(batch_X)
gt_labels.extend(batch_labs)
conf_thres = experiment_spec.nms_config.confidence_threshold
for i in range(len(y_pred)):
y_pred_valid = y_pred[i][y_pred[i][:, 1] > conf_thres]
pred_labels.append(y_pred_valid)
results = evaluator(
gt_labels,
pred_labels,
verbose=True,
class_names=classes if experiment_spec.eval_config.visualize_pr_curve else None,
vis_path=vis_path
)
mean_average_precision, average_precisions = results
print("*******************************")
for i in range(len(average_precisions)):
print("{:<14}{:<6}{}".format(classes[i], 'AP', round(average_precisions[i], 5)))
print("{:<14}{:<6}{}".format('', 'mAP', round(mean_average_precision, 5)))
print("*******************************")
if arguments.results_dir:
s_logger.kpi.update({'mAP': float(mean_average_precision)})
s_logger.write(
status_level=status_logging.Status.SUCCESS,
message="Evaluation finished successfully."
)
@check_tf_oom
def main(args=None):
"""Run the evaluation process."""
try:
args = parse_command_line(args)
evaluate(args)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Evaluation was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/scripts/evaluate.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""TLT YOLOv4 entrypoint."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/entrypoint/__init__.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""TLT command line wrapper to invoke CLI scripts."""
import sys
from nvidia_tao_tf1.cv.common.entrypoint.entrypoint import launch_job
import nvidia_tao_tf1.cv.yolo_v4.scripts
def main():
"""Function to launch the job."""
launch_job(nvidia_tao_tf1.cv.yolo_v4.scripts, "yolo_v4", sys.argv[1:])
if __name__ == "__main__":
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/entrypoint/yolo_v4.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/experiment_specs/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to export a trained YOLO model to an ETLT file for deployment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/export/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class to export trained .tlt models to etlt file."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from keras import backend as K
from keras.models import Model
import numpy as np
import onnx
import onnx_graphsurgeon as onnx_gs
import tensorflow as tf
from nvidia_tao_tf1.core.export._onnx import keras_to_onnx
# Import quantization layer processing.
from nvidia_tao_tf1.core.export._quantized import (
check_for_quantized_layers,
process_quantized_layers,
)
from nvidia_tao_tf1.core.export._uff import _reload_model_for_inference
from nvidia_tao_tf1.core.templates.utils import swish
from nvidia_tao_tf1.cv.common.export.keras_exporter import KerasExporter as Exporter
from nvidia_tao_tf1.cv.yolo_v3.layers.yolo_anchor_box_layer import YOLOAnchorBox
from nvidia_tao_tf1.cv.yolo_v4.layers.bbox_postprocessing_layer import BBoxPostProcessingLayer
from nvidia_tao_tf1.cv.yolo_v4.layers.export_layers import BoxLayer, ClsLayer
from nvidia_tao_tf1.cv.yolo_v4.layers.split import Split
from nvidia_tao_tf1.cv.yolo_v4.utils.model_io import load_model
from nvidia_tao_tf1.cv.yolo_v4.utils.spec_loader import load_experiment_spec
CUSTOM_OBJ = {'YOLOAnchorBox': YOLOAnchorBox,
'BBoxPostProcessingLayer': BBoxPostProcessingLayer,
'BoxLayer': BoxLayer,
'ClsLayer': ClsLayer,
'Split': Split,
# use swish as a placeholder as keras2onnx does not support mish
# handle this later with onnx-gs
"mish": swish}
class YOLOv4Exporter(Exporter):
"""Exporter class to export a trained yolo model."""
def __init__(self, model_path=None,
key=None,
data_type="fp32",
strict_type=False,
experiment_spec_path="",
backend="onnx",
**kwargs):
"""Instantiate the yolo exporter to export a trained yolo .tlt model.
Args:
model_path(str): Path to the yolo model file.
key (str): Key to decode the model.
data_type (str): Backend data-type for the optimized TensorRT engine.
strict_type(bool): Apply TensorRT strict_type_constraints or not for INT8 mode.
experiment_spec_path (str): Path to yolo experiment spec file.
backend (str): Type of intermediate backend parser to be instantiated.
"""
super(YOLOv4Exporter, self).__init__(model_path=model_path,
key=key,
data_type=data_type,
strict_type=strict_type,
backend=backend,
**kwargs)
self.experiment_spec_path = experiment_spec_path
assert os.path.isfile(self.experiment_spec_path), \
"Experiment spec file not found at {}.".format(self.experiment_spec_path)
self.experiment_spec = None
def load_model(self):
"""Simple function to load the yolo Keras model."""
experiment_spec = load_experiment_spec(self.experiment_spec_path)
K.clear_session()
K.set_learning_phase(0)
img_channel = experiment_spec.augmentation_config.output_channel
img_height = experiment_spec.augmentation_config.output_height
img_width = experiment_spec.augmentation_config.output_width
model = load_model(
self.model_path,
experiment_spec,
(img_channel, img_height, img_width),
key=self.key
)
last_layer_out = model.layers[-1].output
r_boxes = BoxLayer(name="box")(last_layer_out)
r_cls = ClsLayer(name="cls")(last_layer_out)
model = Model(inputs=model.inputs, outputs=[r_boxes, r_cls])
if check_for_quantized_layers(model):
model, self.tensor_scale_dict = process_quantized_layers(
model, self.backend,
calib_cache=None,
calib_json=None)
# plugin nodes will have different names in TRT
nodes = list(self.tensor_scale_dict.keys())
for k in nodes:
if k.find('Input') != -1:
self.tensor_scale_dict['Input'] = self.tensor_scale_dict.pop(k)
# ZeroPadding is fused with its following conv2d/depthwiseconv2d, collapse them.
padding_nodes = []
for k in self.tensor_scale_dict:
if '/Pad' in k:
# this is a ZeroPadding node
padding_nodes.append(k)
for n in padding_nodes:
self.tensor_scale_dict.pop(n)
self.experiment_spec = experiment_spec
img_mean = experiment_spec.augmentation_config.image_mean
# Number of bits per pixel per channel
image_depth = int(experiment_spec.augmentation_config.output_depth) or 8
self.image_depth = image_depth
if experiment_spec.augmentation_config.output_channel == 3:
assert image_depth == 8, (
f"RGB images only support 8-bit depth, got {image_depth}, "
"please check `augmentation_config.output_depth` in spec file"
)
if img_mean:
self.img_mean = [img_mean['b'], img_mean['g'], img_mean['r']]
else:
self.img_mean = [103.939, 116.779, 123.68]
else:
if img_mean:
self.img_mean = [img_mean['l']]
elif image_depth == 8:
self.img_mean = [117.3786]
elif image_depth == 16:
# 117.3786 * 256
self.img_mean = [30048.9216]
else:
raise ValueError(
f"Unsupported image depth: {image_depth}, should be 8 or 16, "
"please check `augmentation_config.output_depth` in spec file"
)
# @zeyuz: must reload so the tensor name won't have _1 suffixed.
model = _reload_model_for_inference(model, custom_objects=CUSTOM_OBJ)
return model
def save_exported_file(self, model, output_file_name):
"""Save the exported model file.
This routine converts a keras model to onnx/uff model
based on the backend the exporter was initialized with.
Args:
model (keras.model.Model): Decoded keras model to be exported.
output_file_name (str): Path to the output file.
Returns:
output_file_name (str): Path to the output ONNX file.
"""
if self.backend == "onnx":
keras_to_onnx(model,
output_file_name,
custom_objects=CUSTOM_OBJ,
target_opset=self.target_opset)
tf.reset_default_graph()
onnx_model = onnx.load(output_file_name)
onnx_model = self.node_process(onnx_model)
os.remove(output_file_name)
onnx.save(onnx_model, output_file_name)
return output_file_name
# @zeyuz: UFF export not supported in YOLOv4 due to Mish activation.
raise NotImplementedError("Invalid backend provided. {}".format(self.backend))
def set_input_output_node_names(self):
"""Set input output node names."""
self.output_node_names = ["BatchedNMS"]
self.input_node_names = ["Input"]
def process_nms_node(self, onnx_graph):
"""Process the NMS ONNX node."""
spec = self.experiment_spec
box_data = self._get_onnx_node_by_name(onnx_graph, 'box/concat_concat').outputs[0]
cls_data = self._get_onnx_node_by_name(onnx_graph, 'cls/mul').outputs[0]
nms_out_0 = onnx_gs.Variable(
"BatchedNMS",
dtype=np.int32
)
nms_out_1 = onnx_gs.Variable(
"BatchedNMS_1",
dtype=np.float32
)
nms_out_2 = onnx_gs.Variable(
"BatchedNMS_2",
dtype=np.float32
)
nms_out_3 = onnx_gs.Variable(
"BatchedNMS_3",
dtype=np.float32
)
nms_attrs = dict()
nms_attrs["shareLocation"] = 1
nms_attrs["backgroundLabelId"] = -1
nms_attrs["scoreThreshold"] = spec.nms_config.confidence_threshold
nms_attrs["iouThreshold"] = spec.nms_config.clustering_iou_threshold
nms_attrs["topK"] = 2*spec.nms_config.top_k
nms_attrs["keepTopK"] = spec.nms_config.top_k
nms_attrs["numClasses"] = len(
{str(x) for x in spec.dataset_config.target_class_mapping.values()}
)
nms_attrs["clipBoxes"] = 1
nms_attrs["isNormalized"] = 1
nms_attrs["scoreBits"] = spec.nms_config.infer_nms_score_bits
nms_plugin = onnx_gs.Node(
op="BatchedNMSDynamic_TRT",
name="BatchedNMS_N",
inputs=[box_data, cls_data],
outputs=[nms_out_0, nms_out_1, nms_out_2, nms_out_3],
attrs=nms_attrs
)
onnx_graph.nodes.append(nms_plugin)
onnx_graph.outputs = nms_plugin.outputs
onnx_graph.cleanup().toposort()
def _sigmoid_to_tanh_softplus(self, onnx_graph, node):
softplus_output = onnx_gs.Variable(
"softplus_output_" + node.name,
dtype=np.float32
)
softplus_node = onnx_gs.Node(
op="Softplus",
name="softplus_" + node.name,
inputs=node.inputs,
outputs=[softplus_output]
)
tanh_node = onnx_gs.Node(
op="Tanh",
name="tanh_" + node.name,
inputs=[softplus_output],
outputs=[node.outputs[0]]
)
node.outputs.clear()
onnx_graph.nodes.append(softplus_node)
onnx_graph.nodes.append(tanh_node)
onnx_graph.cleanup().toposort()
def _check_mish(self, node):
if (
node.op != "Mul" or
len(node.inputs) != 2 or
len(node.inputs[0].inputs) != 1 or
len(node.inputs[1].inputs) != 1
):
return False
inp1 = node.inputs[0].inputs[0]
inp2 = node.inputs[1].inputs[0]
if inp1.op == "Sigmoid":
if inp1.i() == inp2:
return True
if inp2.op == "Sigmoid":
if inp2.i() == inp1:
return True
return False
def replace_activations(self, onnx_graph):
"""Replace swish activations with mish activations(if any)."""
nodes = [n for n in onnx_graph.nodes if n.op == "Sigmoid"]
mish_nodes = []
for n in nodes:
if len(n.outputs[0].outputs) != 1:
continue
if self._check_mish(n.o()):
mish_nodes.append(n)
for n in mish_nodes:
self._sigmoid_to_tanh_softplus(onnx_graph, n)
def node_process(self, yolo_graph):
"""Manipulating the yolo dynamic graph to make it compatible with TRT.
Args:
yolo_graph (onnx_gs.DynamicGraph): Dynamic graph of the yolo model from the TF Proto
file.
Returns:
yolo_graph (onnx_gs.DynamicGraph): Post processed dynamic graph which is ready to be
serialized as a ONNX file.
"""
graph = onnx_gs.import_onnx(yolo_graph)
self.process_nms_node(graph)
self.replace_activations(graph)
self._fix_onnx_paddings(graph)
return onnx_gs.export_onnx(graph)
def get_class_labels(self):
"""Get list of class labels to serialize to a labels.txt file."""
classes = sorted({str(x) for x in
self.experiment_spec.dataset_config.target_class_mapping.values()})
return classes
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v4/export/yolov4_exporter.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/mask_rcnn/__init__.py |
|
"""MaskRCNN entry point."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from maglev_sdk.docker_container.entrypoint import main
if __name__ == '__main__':
main('mask_rcnn', 'nvidia_tao_tf1/cv/mask_rcnn/scripts')
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/mask_rcnn/docker/mask_rcnn.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MaskRCNN custom layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow import keras
from nvidia_tao_tf1.cv.mask_rcnn.ops import roi_ops
class MultilevelProposal(keras.layers.Layer):
'''A custom Keras layer to generate RoIs.'''
def __init__(self,
rpn_pre_nms_topn=2000,
rpn_post_nms_topn=1000,
rpn_nms_threshold=0.7,
rpn_min_size=0,
bbox_reg_weights=None,
use_batched_nms=True,
**kwargs):
'''Init function.'''
self.rpn_pre_nms_topn = rpn_pre_nms_topn
self.rpn_post_nms_topn = rpn_post_nms_topn
self.rpn_nms_threshold = rpn_nms_threshold
self.rpn_min_size = rpn_min_size
self.bbox_reg_weights = bbox_reg_weights
self.use_batched_nms = use_batched_nms
super(MultilevelProposal, self).__init__(**kwargs)
def call(self, inputs):
"""Proposes RoIs given a group of candidates from different FPN levels."""
scores_outputs = inputs[0:5]
box_outputs = inputs[5:10]
anchor_boxes = inputs[10:15]
image_info = inputs[-1]
# turn into dict
k_order = list(range(2, 7))
scores_outputs = dict(zip(k_order, scores_outputs))
box_outputs = dict(zip(k_order, box_outputs))
anchor_boxes = dict(zip(k_order, anchor_boxes))
if isinstance(self.rpn_pre_nms_topn, tuple):
self.rpn_pre_nms_topn = self.rpn_pre_nms_topn[0]
if isinstance(self.rpn_post_nms_topn, tuple):
self.rpn_post_nms_topn = self.rpn_post_nms_topn[0]
return roi_ops.multilevel_propose_rois(
scores_outputs=scores_outputs,
box_outputs=box_outputs,
anchor_boxes=anchor_boxes,
image_info=image_info,
rpn_pre_nms_topn=self.rpn_pre_nms_topn,
rpn_post_nms_topn=self.rpn_post_nms_topn,
rpn_nms_threshold=self.rpn_nms_threshold,
rpn_min_size=self.rpn_min_size,
bbox_reg_weights=self.bbox_reg_weights,
use_batched_nms=self.use_batched_nms
)
def get_config(self):
'''Keras layer get config.'''
config = {
'rpn_pre_nms_topn': self.rpn_pre_nms_topn,
'rpn_post_nms_topn': self.rpn_post_nms_topn,
'rpn_nms_threshold': self.rpn_nms_threshold,
'rpn_min_size': self.rpn_min_size,
'bbox_reg_weights': self.bbox_reg_weights,
'use_batched_nms': self.use_batched_nms,
}
base_config = super(MultilevelProposal, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/mask_rcnn/layers/multilevel_proposal_layer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MaskRCNN custom layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow import keras
class MaskInput(keras.layers.InputLayer):
'''A Keras layer to generate final prediction output.'''
def __init__(self,
input_shape=None,
batch_size=None,
dtype=None,
input_tensor=None,
sparse=False,
name="mask_input",
ragged=False,
**kwargs):
'''Init function.'''
super(MaskInput, self).__init__(input_shape=input_shape,
batch_size=batch_size,
dtype=dtype,
input_tensor=input_tensor,
sparse=sparse,
name=name,
ragged=ragged,
**kwargs)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/mask_rcnn/layers/mask_input_layer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MaskRCNN."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow import keras
from nvidia_tao_tf1.cv.mask_rcnn.ops import training_ops
class BoxTargetEncoder(keras.layers.Layer):
'''A custom Keras layer to encode box targets.'''
def __init__(self,
bbox_reg_weights=None,
**kwargs):
'''Init function.'''
self.bbox_reg_weights = bbox_reg_weights
super(BoxTargetEncoder, self).__init__(**kwargs)
def call(self, inputs):
"""Generate box target."""
boxes, gt_boxes, gt_labels = inputs
return training_ops.encode_box_targets(boxes, gt_boxes, gt_labels, self.bbox_reg_weights)
def get_config(self):
'''Keras layer get config.'''
config = {
'bbox_reg_weights': self.bbox_reg_weights,
}
base_config = super(BoxTargetEncoder, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/mask_rcnn/layers/box_target_encoder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MaskRCNN custom layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow import keras
class ClassInput(keras.layers.InputLayer):
'''A custom Keras layer for class label input.'''
def __init__(self,
input_shape=None,
batch_size=None,
dtype=None,
input_tensor=None,
sparse=False,
name="class_input",
ragged=False,
**kwargs):
'''Init function.'''
super(ClassInput, self).__init__(input_shape=input_shape,
batch_size=batch_size,
dtype=dtype,
input_tensor=input_tensor,
sparse=sparse,
name=name,
ragged=ragged,
**kwargs)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/mask_rcnn/layers/class_input_layer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MaskRCNN custom layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow import keras
from nvidia_tao_tf1.cv.mask_rcnn.ops import postprocess_ops
class GPUDetections(keras.layers.Layer):
'''A Keras layer to generate final prediction output.'''
def __init__(self,
pre_nms_num_detections=1000,
post_nms_num_detections=100,
nms_threshold=0.5,
bbox_reg_weights=(10., 10., 5., 5.),
**kwargs):
'''Init function.'''
self.pre_nms_num_detections = pre_nms_num_detections
self.post_nms_num_detections = post_nms_num_detections
self.nms_threshold = nms_threshold
self.bbox_reg_weights = bbox_reg_weights
super(GPUDetections, self).__init__(**kwargs)
def call(self, inputs):
"""Generate the final detections given the model outputs (GPU version)."""
class_outputs, box_outputs, anchor_boxes, image_info = inputs
return postprocess_ops.generate_detections_gpu(
class_outputs=class_outputs,
box_outputs=box_outputs,
anchor_boxes=anchor_boxes,
image_info=image_info,
pre_nms_num_detections=self.pre_nms_num_detections,
post_nms_num_detections=self.post_nms_num_detections,
nms_threshold=self.nms_threshold,
bbox_reg_weights=self.bbox_reg_weights
)
def get_config(self):
'''Keras layer get config.'''
config = {
'pre_nms_num_detections': self.pre_nms_num_detections,
'post_nms_num_detections': self.post_nms_num_detections,
'nms_threshold': self.nms_threshold,
'bbox_reg_weights': self.bbox_reg_weights,
}
base_config = super(GPUDetections, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/mask_rcnn/layers/gpu_detection_layer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MaskRCNN custom layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow import keras
class MaskPostprocess(keras.layers.Layer):
'''A custom Keras layer to generate processed mask output.'''
def __init__(self,
batch_size,
num_rois,
mrcnn_resolution,
num_classes,
is_gpu_inference,
**kwargs):
'''Init function.'''
self.batch_size = batch_size
self.num_rois = num_rois
self.mrcnn_resolution = mrcnn_resolution
self.num_classes = num_classes
self.is_gpu_inference = is_gpu_inference
super(MaskPostprocess, self).__init__(**kwargs)
def call(self, inputs):
"""Proposes RoIs given a group of candidates from different FPN levels."""
mask_outputs, class_indices = inputs
if not self.is_gpu_inference:
class_indices = tf.cast(class_indices, dtype=tf.int32)
mask_outputs = tf.reshape(
mask_outputs,
[-1, self.num_rois, self.num_classes, self.mrcnn_resolution, self.mrcnn_resolution]
)
with tf.name_scope('masks_post_processing'):
indices_dtype = tf.float32 if self.is_gpu_inference else tf.int32
if self.batch_size == 1:
indices = tf.reshape(
tf.reshape(
tf.range(self.num_rois, dtype=indices_dtype),
[self.batch_size, self.num_rois, 1]
) * self.num_classes + tf.expand_dims(class_indices, axis=-1),
[self.batch_size, -1]
)
indices = tf.cast(indices, dtype=tf.int32)
mask_outputs = tf.gather(
tf.reshape(mask_outputs,
[self.batch_size, -1, self.mrcnn_resolution, self.mrcnn_resolution]),
indices,
axis=1
)
mask_outputs = tf.squeeze(mask_outputs, axis=1)
mask_outputs = tf.reshape(
mask_outputs,
[self.batch_size, self.num_rois, self.mrcnn_resolution, self.mrcnn_resolution])
else:
batch_indices = (
tf.expand_dims(tf.range(self.batch_size, dtype=indices_dtype), axis=1) *
tf.ones([1, self.num_rois], dtype=indices_dtype)
)
mask_indices = (
tf.expand_dims(tf.range(self.num_rois, dtype=indices_dtype), axis=0) *
tf.ones([self.batch_size, 1], dtype=indices_dtype)
)
gather_indices = tf.stack([batch_indices, mask_indices, class_indices],
axis=2)
if self.is_gpu_inference:
gather_indices = tf.cast(gather_indices, dtype=tf.int32)
mask_outputs = tf.gather_nd(mask_outputs, gather_indices)
return mask_outputs
def get_config(self):
'''Keras layer get config.'''
config = {
'batch_size': self.batch_size,
'num_rois': self.num_rois,
'mrcnn_resolution': self.mrcnn_resolution,
'num_classes': self.num_classes,
'is_gpu_inference': self.is_gpu_inference
}
base_config = super(MaskPostprocess, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/mask_rcnn/layers/mask_postprocess_layer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MaskRCNN custom layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow import keras
class InfoInput(keras.layers.InputLayer):
'''A custom Keras layer for image info input.'''
def __init__(self,
input_shape=None,
batch_size=None,
dtype=None,
input_tensor=None,
sparse=False,
name="info_input",
ragged=False,
**kwargs):
'''Init function.'''
super(InfoInput, self).__init__(input_shape=input_shape,
batch_size=batch_size,
dtype=dtype,
input_tensor=input_tensor,
sparse=sparse,
name=name,
ragged=ragged,
**kwargs)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/mask_rcnn/layers/info_input_layer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MaskRCNN custom layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow import keras
from nvidia_tao_tf1.cv.mask_rcnn.models import anchors
class AnchorLayer(keras.layers.Layer):
'''A custom Keras layer to generate anchors.'''
def __init__(self,
min_level,
max_level,
num_scales,
aspect_ratios,
anchor_scale,
**kwargs):
'''Init function.'''
self.min_level = min_level
self.max_level = max_level
self.num_scales = num_scales
self.aspect_ratios = aspect_ratios
self.anchor_scale = anchor_scale
super(AnchorLayer, self).__init__(**kwargs)
def call(self, inputs):
"""Get unpacked multiscale Mask-RCNN anchors."""
_, _, image_height, image_width = inputs.get_shape().as_list()
all_anchors = anchors.Anchors(
self.min_level, self.max_level,
self.num_scales, self.aspect_ratios,
self.anchor_scale,
(image_height, image_width))
return all_anchors.get_unpacked_boxes()
def get_config(self):
'''Keras layer get config.'''
config = {
'min_level': self.min_level,
'max_level': self.max_level,
'num_scales': self.num_scales,
'aspect_ratios': self.aspect_ratios,
'anchor_scale': self.anchor_scale
}
base_config = super(AnchorLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/mask_rcnn/layers/anchor_layer.py |
"""Module containing custom layers for MaskRCNN.""" | tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/mask_rcnn/layers/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.