python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default dataloader for FpeNet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
import os
import six
import tensorflow as tf
from nvidia_tao_tf1.blocks.dataloader import DataLoader
import nvidia_tao_tf1.core as tao_core
from nvidia_tao_tf1.core import distribution
from nvidia_tao_tf1.core.processors import ColorTransform
from nvidia_tao_tf1.core.processors import SpatialTransform
from nvidia_tao_tf1.core.processors.augment.color import get_random_color_transformation_matrix
from nvidia_tao_tf1.core.processors.augment.spatial import get_random_spatial_transformation_matrix
from nvidia_tao_tf1.cv.core.augment import RandomBlur
from nvidia_tao_tf1.cv.core.augment import RandomGamma
from nvidia_tao_tf1.cv.core.augment import RandomShift
# tf.compat.v1.enable_eager_execution()
def _lrange(*args, **kwargs):
"""Used for Python 3 compatibility since range() no longer returns a list."""
return list(range(*args, **kwargs))
class FpeNetDataloader(DataLoader):
"""
Dataloader with online augmentation for Fpe datasets.
The dataloader reads labels and frame id from TFRecord files and compiles
image and ground truth tensors used in training and validation.
"""
ITERATOR_INIT_OP_NAME = "iterator_init"
@tao_core.coreobject.save_args
def __init__(self, batch_size, image_info, dataset_info, kpiset_info,
augmentation_info, num_keypoints, **kwargs):
"""
Instantiate the dataloader.
Args:
batch_size (int): Size of minibatch.
image_info (dict): Basic information of input images.
dataset_info (dict): Basic information of datasets used for training.
kpiset_info (dict): Basic information of KPI set.
augmentation_info (dict): Parameters information for augmentation.
keypoints (int): Number of facial keypoints.
"""
super(FpeNetDataloader, self).__init__(**kwargs)
self.batch_size = batch_size
# Get data information from experiment specs.
# Image information.
self.image_width = image_info['image']['width']
self.image_height = image_info['image']['height']
self.image_channel = image_info['image']['channel']
# Dataset information.
self.image_extension = dataset_info['image_extension']
self.root_path = dataset_info['root_path']
self.tfrecords_directory_path = dataset_info['tfrecords_directory_path']
# Occlusion specific sets
self.no_occ_masksets = dataset_info['no_occlusion_masking_sets']
self.tfrecords_set_id_train = dataset_info['tfrecords_set_id_train']
self.tfrecord_folder_name = dataset_info['tfrecord_folder_name']
self.file_name = dataset_info['tfrecord_file_name']
# validation info
self.tfrecords_set_id_val = dataset_info['tfrecords_set_id_val']
# KPI testing dataset information.
self.tfrecords_set_id_kpi = kpiset_info['tfrecords_set_id_kpi']
self.num_keypoints = num_keypoints
# assert self.num_keypoints in [68, 80, 104], \
# "Expect number of keypoints one of 68, 80 or 104"
# Augmentation info
self.augmentation_config = build_augmentation_config(augmentation_info)
self.enable_online_augmentation = self.augmentation_config[
"enable_online_augmentation"]
self.enable_occlusion_augmentation = self.augmentation_config[
"enable_occlusion_augmentation"]
self.enable_resize_augmentation = self.augmentation_config[
"enable_resize_augmentation"]
self.augmentation_resize_scale = self.augmentation_config[
"augmentation_resize_scale"]
self.augmentation_resize_probability = self.augmentation_config[
"augmentation_resize_probability"]
self.patch_probability = self.augmentation_config["patch_probability"]
self.size_to_image_ratio = self.augmentation_config["size_to_image_ratio"]
self.mask_aug_patch = self.augmentation_config["mask_augmentation_patch"]
# Flipping augmentation
if self.augmentation_config['modulus_spatial_augmentation']['hflip_probability'] != 0:
assert self.num_keypoints in [68, 80, 104], \
("Horizontal flip augmentation can only be applied to 68, 80, 104 face landmarks."
"Please set hflip_probability to be 0.0")
self._flip_lm_ind_map = self._get_flip_landmark_mapping(num_keypoints=self.num_keypoints)
frame_shape = [self.image_height, self.image_width, self.image_channel]
frame_shape = map(float, frame_shape)
self._stm_op, self._ctm_op, self._blur_op, \
self._gamma_op, self._shift_op = \
get_transformation_ops(self.augmentation_config, frame_shape)
# Get the proto parser.
self._proto_parser = self._tfrecord_parser(self.num_keypoints)
def __call__(self, repeat=True, phase='training'):
"""
Get input images and ground truth labels as tensors for training and validation.
Returns the number of minibatches required to loop over all the datasets once.
Args:
repeat (bool): Whether the dataset can be looped over multiple times or only once.
phase (str): Demonstrate the current phase: training, validation, kpi_testing.
Returns:
images (Tensor): Decoded input images of shape (NCHW).
ground_truth_labels (Tensor): Ground truth labels of shape (1, num_outputs).
num_samples (int): Total number of loaded data points.
occ_masking_info (Tensor): Ground truth occlusions mask of shape (1, num_keypoints).
face_bbox (Tensor): Face bounding box for kpi data.
image_names (Tensor): Image names for kpi data.
"""
# load and encode tfrecords.
records, num_samples = self._loading_dataset(repeat=repeat, phase=phase)
# Generate input images and ground truth labels.
images, ground_truth_labels, masking_occ_info, face_bbox, \
image_names = self._parse_records(records,
self.num_keypoints, phase)
if phase == 'kpi_testing':
return images, ground_truth_labels, num_samples, \
masking_occ_info, face_bbox, image_names
return images, ground_truth_labels, num_samples, \
masking_occ_info
def _loading_dataset(self, repeat, phase):
"""Get TFRecordsIterator for a given set of TFRecord files.
Args:
repeat (bool): Whether the dataset can be looped over multiple times or only once.
phase (str): Demonstrate the current phase: training, testing, validation, kpi_testing.
Returns:
records Dict<tf.Tensor>: Dict of Tensor represeting a batch of samples.
num_samples: Number of samples in the training/validation dataset.
"""
tfrecords_filename_list = []
tf_folder_name = self.tfrecord_folder_name
file_name = self.file_name
if phase == 'kpi_testing':
set_ids = self.tfrecords_set_id_kpi.split(' ')
elif phase == 'validation':
set_ids = self.tfrecords_set_id_val.split(' ')
elif phase == 'training':
set_ids = self.tfrecords_set_id_train.split(' ')
else:
raise NameError("Invalid phase")
assert len(set_ids) != 0, \
'Expects more than one dataset id in experiment_spec.'
for set_id in set_ids:
recordrootpath = self.tfrecords_directory_path
folder_name = os.path.join(recordrootpath, set_id, tf_folder_name)
tfrecord_filename = [os.path.join(folder_name, file_name)]
tfrecords_filename_list.extend(tfrecord_filename)
# Check validity of each file.
for filename in tfrecords_filename_list:
assert tf.data.TFRecordDataset(filename), \
('Expects each file to be valid!', filename)
# Print number of files
num_samples = 0
for filename in tfrecords_filename_list:
num_samples_set = sum(1 for _ in tf.python_io.tf_record_iterator(filename))
num_samples += num_samples_set
print(filename+': '+str(num_samples_set))
print("Total Samples: {}".format(num_samples))
# Create different iterators based on different phases.
if phase == 'kpi_testing':
shuffle_buffer_size = 0
shuffle = False
repeat = True
else:
shuffle_buffer_size = num_samples
shuffle = True
dataset = tf.data.TFRecordDataset(
tfrecords_filename_list,
num_parallel_reads=multiprocessing.cpu_count()
)
# Shard dataset in multi-gpu cases
rank = distribution.get_distributor().rank()
size = distribution.get_distributor().size()
dataset = dataset.shard(size, rank)
if shuffle:
dataset = dataset.shuffle(
buffer_size=shuffle_buffer_size, reshuffle_each_iteration=True
)
if repeat:
dataset = dataset.repeat()
dataset = dataset.map(self._proto_parser, num_parallel_calls=multiprocessing.cpu_count())
dataset = dataset.batch(self.batch_size, drop_remainder=True)
dataset = dataset.map(self._load_and_decode,
num_parallel_calls=multiprocessing.cpu_count())
dataset = dataset.prefetch(3)
iterator = tf.compat.v1.data.Iterator.from_structure(
dataset.output_types, dataset.output_shapes
)
iterator_init_op = iterator.make_initializer(dataset)
tf.compat.v1.add_to_collection(
self.ITERATOR_INIT_OP_NAME, iterator_init_op
)
# Pull the records from tensorflow dataset.
records = iterator.get_next()
return records, num_samples
@staticmethod
def _tfrecord_parser(num_keypoints):
"""
Load and set up Modulus TFRecord features parsers.
Args:
num_keypoints (int): Number of keypoints.
Returns:
A dict of tensors with the same keys as the features dict, and dense tensor.
"""
# Processor for parsing meta `features`
features = {
'train/image_frame_name': tf.FixedLenFeature([], dtype=tf.string),
'train/image_frame_width': tf.FixedLenFeature([], dtype=tf.int64),
'train/image_frame_height': tf.FixedLenFeature([], dtype=tf.int64),
'train/facebbx_x': tf.FixedLenFeature([], dtype=tf.int64),
'train/facebbx_y': tf.FixedLenFeature([], dtype=tf.int64),
'train/facebbx_w': tf.FixedLenFeature([], dtype=tf.int64),
'train/facebbx_h': tf.FixedLenFeature([], dtype=tf.int64),
'train/landmarks': tf.FixedLenFeature([num_keypoints * 2], dtype=tf.float32),
'train/landmarks_occ': tf.FixedLenFeature([num_keypoints], dtype=tf.int64)
}
proto_parser = tao_core.processors.ParseExampleProto(features=features, single=True)
return proto_parser
def _load_and_decode(self, records):
"""
Load and decode images.
Args:
records (tf.Tensor): Records from dataset to process.
Returns:
records (tf.Tensor): Records contains loaded images.
"""
file_loader = tao_core.processors.LoadFile(prefix=self.root_path)
train_frames = []
train_kpts = []
for index in range(self.batch_size):
image_frame_name = records['train/image_frame_name'][index]
image_frame = self._read_image_frame(file_loader, image_frame_name)
cropped_face, kpts_norm = self._crop_image(image_frame,
records['train/facebbx_x'][index],
records['train/facebbx_y'][index],
records['train/facebbx_w'][index],
records['train/facebbx_h'][index],
records['train/landmarks'][index],
self.image_height,
self.image_width,
channels=self.image_channel,
num_keypoints=self.num_keypoints)
train_frames.append(cropped_face)
train_kpts.append(kpts_norm)
def _stack_frames(frames):
if len(frames) > 0:
return tf.stack(frames, 0)
return tf.constant(0, shape=[self.batch_size, 0])
records.update({
'train/cropped_face': _stack_frames(train_frames),
'train/kpts_norm': _stack_frames(train_kpts)
})
return records
@staticmethod
def _crop_image(image,
facebbox_x,
facebbox_y,
facebbox_width,
facebbox_height,
landmarks,
target_width,
target_height,
channels=3,
num_keypoints=80):
"""
Crop bounding box from image & Scale the Keypoints to Target Resolution.
Args:
image (Tensor): Input image tensor.
facebbox_x (scalar Tensor): top-right X pixel location of face bounding box.
facebbox_y (scalar Tensor): top-right Y pixel location of face bounding box.
facebbox_width (scalar Tensor): width of face bounding box.
facebbox_height (scalar Tensor): height of face bounding box.
landmarks (Tensor): Input keypoint (x,y) locations [num_keypoints X 2]
target_width (int): Target width of bounding box.
target_height (int): Target height of bounding box.
channels (int): Number of channels in image.
num_keypoints (int): Number of keypoints.
Returns:
image (Tensor): Output cropped image (HWC) & Scaled Keypoints for target resolution.
kpts_target (Tensor): image keypoints after cropping and scaling.
"""
kpts = landmarks[:2 * num_keypoints]
kpts = tf.cast(kpts, dtype=tf.int32)
x = tf.cast(facebbox_x, dtype=tf.int32)
y = tf.cast(facebbox_y, dtype=tf.int32)
h = tf.cast(facebbox_height, dtype=tf.int32)
w = tf.cast(facebbox_width, dtype=tf.int32)
img = tf.image.crop_to_bounding_box(image, y, x, h, w)
img_shape = tf.stack([h, w, channels])
image = tf.reshape(img, img_shape)
image = tf.image.resize(image,
(target_height, target_width),
method=tf.image.ResizeMethod.BILINEAR)
# make it channel first (channel, height, width)
image = tf.transpose(image, (2, 0, 1))
kpts_shape = tf.stack([num_keypoints, 2])
kpts_norm = tf.reshape(kpts, kpts_shape)
kpts_x = tf.cast(kpts_norm[:, 0], dtype=tf.float32)
kpts_y = tf.cast(kpts_norm[:, 1], dtype=tf.float32)
x = tf.cast(x, dtype=tf.float32)
y = tf.cast(y, dtype=tf.float32)
w = tf.cast(w, dtype=tf.float32)
h = tf.cast(h, dtype=tf.float32)
kpts_norm_x = (kpts_x - x) / w
kpts_norm_y = (kpts_y - y) / h
kpts_x_target = kpts_norm_x * target_width
kpts_y_target = kpts_norm_y * target_height
kpts_target = tf.stack([kpts_x_target, kpts_y_target], axis=1)
return image, kpts_target
def _parse_records(self, records, num_keypoints, phase='validation'):
"""
Return generators for input image and output target tensors.
Args:
records (Dict<tf.Tensor>): Dict of tf.Tensor represeting training samples.
num_keypoints (int): Number of keypoints.
phase (string): training, validation, kpi_testing.
Returns:
images (Tensor): 4D image tensors with shape (NCHW).
labels (Tensor): 2D labels tensor of shape (1, num_outputs).
"""
# Untack the batched tensor into list of tensors.
records = {
key: tf.unstack(value, axis=0)
for key, value in records.items()
}
records = [
{
key: value[idx]
for key, value in records.items()
}
for idx in range(self.batch_size)
]
# Augmentation only enabled during training.
enable_augmentation = phase == 'training' and self.enable_online_augmentation
enable_occlude = enable_augmentation and self.enable_occlusion_augmentation
enable_resize_augment = enable_augmentation and self.enable_resize_augmentation
# Initialize lists for each input and output.
data_image = []
labels_kpts = []
labels_occ = []
face_bbox = []
image_names = []
masking_occ_info = []
for record in records:
image_frame_name = record['train/image_frame_name']
cropped_face = tf.cast(record['train/cropped_face'], tf.float32)
kpts_norm = tf.cast(record['train/kpts_norm'], tf.float32)
kpts_occ = record['train/landmarks_occ']
face_bbox.append((record['train/facebbx_x'],
record['train/facebbx_y'],
record['train/facebbx_w'],
record['train/facebbx_h']))
kpts_mask = 1.0 - tf.cast(kpts_occ, dtype=tf.float32)[:num_keypoints]
# 1-visible, 0-occluded
if enable_augmentation:
if enable_resize_augment:
cropped_face = self.resize_augmentations(
cropped_face,
self.augmentation_resize_scale,
self.augmentation_resize_probability)
if enable_occlude:
cropped_face, kpts_mask = self.random_patches(
tf.transpose(cropped_face),
kpts_norm,
kpts_mask,
probability=self.patch_probability,
size_to_image_ratio=self.size_to_image_ratio)
# obtain random spatial transformation matrices.
sm, _ = get_all_transformations_matrices(self.augmentation_config,
self.image_height,
self.image_width,
enable_augmentation=enable_augmentation)
# Apply augmentations to frame tensors.
cropped_face = self._apply_augmentations_to_frame(cropped_face, sm)
cropped_face = tf.transpose(cropped_face, perm=[2, 0, 1])
# Apply gamma augmentation
self._gamma_op.build()
cropped_face = self._gamma_op(cropped_face)
# Apply augmentations to keypoints
kpts_norm = self._apply_augmentations_to_kpts(kpts_norm, num_keypoints, sm)
# Apply flipping augmentation
# if image is flipped then x value of landmark is flipped.
flip_lr_flag = tf.equal(tf.sign(sm[0][0]), -1)
kpts_norm, kpts_mask = self._flip_landmarks(kpts_norm, kpts_mask, flip_lr_flag)
data_image.append(cropped_face)
labels_kpts.append(kpts_norm)
labels_occ.append(kpts_mask)
image_names.append(image_frame_name)
# occlusion masking exception handling
masking_info = []
for no_occ_set in self.no_occ_masksets.split(' '):
regex_pattern = tf.compat.v1.string_join(['.*', no_occ_set, '.*'])
masking_info.append(
tf.compat.v1.strings.regex_full_match(image_frame_name, regex_pattern)
)
masking_occ_info.append(tf.cast(tf.reduce_any(masking_info), tf.float32))
# Batch together list of tensors.
input_images = tf.stack(data_image)
datalabels = [tf.stack(labels_kpts), tf.stack(labels_occ)]
masking_occ_info = tf.stack(masking_occ_info)
face_bbox = tf.stack(face_bbox)
image_names = tf.stack(image_names)
return input_images, datalabels, masking_occ_info, face_bbox, image_names
def _read_image_frame(self, load_func, image_name):
"""Read and decode a single image on disk to a tensor.
Args:
load_func (tao_core.processors.LoadFile): File loading function.
image_name (str): Name of the image.
Returns:
image (Tensor): A decoded 3D image tensor (HWC).
"""
data = load_func(image_name)
image = tf.image.decode_png(data, channels=self.image_channel)
return image
def _get_flip_landmark_mapping(self, num_keypoints=80):
"""
Compute order of facial landmarks for horizontally flipped image.
Face keypoints ordering listed here-
https://docs.google.com/document/d/13q8NciZtGyx5TgIgELkCbXGfE7PstKZpI3cENBGWkVw/edit#
Args:
num_keypoints (int): Number of keypoints. Options- 68, 80, 104.
Returns:
flip_lm_ind_map (list): order of facial landmarks for flipped image.
"""
# common face regions for 68 points
chin_ind_flip = _lrange(17)[::-1] + _lrange(17, 27)[::-1]
nose_ind_flip = _lrange(27, 31) + _lrange(31, 36)[::-1]
eye_ind_flip = [45, 44, 43, 42, 47, 46, 39, 38, 37, 36, 41, 40]
mouth_ind_flip = (_lrange(48, 55)[::-1] + _lrange(55, 60)[::-1] + _lrange(60, 65)[::-1]
+ _lrange(65, 68)[::-1])
# For 80 points
pupil_ind_flip = [74, 73, 72, 75, 70, 69, 68, 71]
ear_ind_flip = [78, 79, 76, 77]
# For 104 points
extra_ind_flip = ([91, 92, 93] +
_lrange(94, 101)[::-1] +
[101] + [80, 81, 82] +
_lrange(83, 90)[::-1] +
[90] + [103, 102])
# collection all face regions
flip_lm_ind_map = (chin_ind_flip + nose_ind_flip + eye_ind_flip + mouth_ind_flip)
if num_keypoints == 80:
flip_lm_ind_map = (flip_lm_ind_map + pupil_ind_flip + ear_ind_flip)
if num_keypoints == 104:
flip_lm_ind_map = (flip_lm_ind_map + pupil_ind_flip + ear_ind_flip + extra_ind_flip)
return flip_lm_ind_map
def _apply_augmentations_to_frame(self, input_tensor, sm):
"""
Apply spatial and color transformations to an image.
Spatial transform op maps destination image pixel P into source image location Q
by matrix M: Q = P M. Here we first compute a forward mapping Q M^-1 = P, and
finally invert the matrix.
Args:
input_tensor (Tensor): Input image frame tensors (HWC).
sm (Tensor): 3x3 spatial transformation/augmentation matrix.
Returns:
image (Tensor, CHW): Augmented input tensor.
"""
# Convert image to float if needed (stm_op requirement).
if input_tensor.dtype != tf.float32:
input_tensor = tf.cast(input_tensor, tf.float32)
dm = tf.matrix_inverse(sm)
# NOTE: Image and matrix need to be reshaped into a batch of one for this op.
# Apply spatial transformations.
input_tensor = tf.transpose(input_tensor, perm=[1, 2, 0])
image = self._stm_op(images=tf.stack([tf.image.grayscale_to_rgb(input_tensor)]),
stms=tf.stack([dm]))
image = tf.image.rgb_to_grayscale(image)
image = tf.reshape(image, [self.image_height, self.image_width,
self.image_channel])
return image
def _apply_augmentations_to_kpts(self, key_points, num_keypoints, mapMatrix):
"""
Apply augmentation to keypoints.
This methods get matrix of keypoints and returns a matrix of
their affine transformed location.
Args:
key_points: a matrix of key_point locations in the format (#key-points, 2)
num_keypoints: number of keypoints
MapMatrix: affine transformation of shape (2 * 3)
Returns:
A matrix of affine transformed key_point location in the
format (#key-points, 2)
"""
kpts = tf.concat([tf.transpose(key_points),
tf.ones([1, num_keypoints],
dtype=tf.float32)], axis=0)
new_kpt_points = tf.matmul(tf.transpose(mapMatrix), kpts)
new_kpt_points = tf.slice(new_kpt_points, [0, 0], [2, -1])
return tf.transpose(new_kpt_points)
def resize_augmentations(self,
cropped_face,
augmentation_resize_scale,
augmentation_resize_probability):
"""
Obtain resize augmentations.
This methods get a cropped face image and performs resize augmentation.
Args:
cropped_face (Tensor): Tensor of cropped image.
augmentation_resize_scale (float): scale for resize image.
augmentation_resize_probability (float): probability for applying augmentation.
Returns:
A matrix of affine transformed key_point location in the
format (#key-points, 2)
"""
def resize_aug(cropped_face, augmentation_resize_scale):
resize_height = int(self.image_height * self.augmentation_resize_scale)
resize_width = int(self.image_width * self.augmentation_resize_scale)
resize_shape = (resize_height, resize_width)
cropped_face = tf.image.resize(tf.transpose(cropped_face), resize_shape)
cropped_face = tf.image.resize(cropped_face, (self.image_height, self.image_width))
cropped_face = tf.transpose(cropped_face)
return cropped_face
def no_resize_aug(cropped_face):
return cropped_face
prob = tf.random.uniform([1], minval=0, maxval=1.0, dtype=tf.float32)
augmentation_prob_condition = tf.reshape(tf.greater(prob,
tf.constant(augmentation_resize_probability)), [])
cropped_face = tf.cond(augmentation_prob_condition,
lambda: resize_aug(cropped_face, augmentation_resize_scale),
lambda: no_resize_aug(cropped_face))
return cropped_face
def random_patches(self,
image,
kpts,
kpts_mask,
probability=0.5,
size_to_image_ratio=0.15):
"""
Overlay a random sized patch on the image.
Args:
image (Tensor): Input image frame tensors.
kpts (Tensor): Ground truth facial keypoints.
kpts_mask (Tensor): Ground truth facial keypoints occlusion flags.
probability: Probability to add occlusion.
size_to_image_ratio: Maximum scale of occlusion.
Returns:
Image with an occluded region.
"""
def occlusion(image, kpts, kpts_mask, size_to_image_ratio=0.15):
image_shape = tf.shape(image)
# get random location
# get random size
min_size = 10 # min pixel size of occlusion boxes
max_size = tf.multiply(tf.cast(image_shape[0],
dtype=tf.float32), tf.constant(size_to_image_ratio))
size_x = tf.random.uniform([], minval=min_size, maxval=max_size)
size_y = tf.random.uniform([], minval=min_size, maxval=max_size)
# get box with ones
ones_box = tf.ones([tf.cast(size_x, tf.int32), tf.cast(size_y, tf.int32), 1])
# pad box to image size with zeros
mask = tf.image.resize_with_crop_or_pad(ones_box, image_shape[0], image_shape[1])
mask_zeros = tf.cast(-1.0 * (mask - 1.0), tf.float32)
# apply masking to newly occluded points
occ_aug_mask = tf.gather_nd(mask_zeros, tf.cast(kpts, tf.int32))
kpts_mask_new = tf.math.multiply(kpts_mask, occ_aug_mask[:, 0])
# multiply box with image
mask_image = tf.multiply(image, mask_zeros)
# get random color
color_mask = tf.multiply(mask, tf.random.uniform([],
minval=0,
maxval=255,
dtype=tf.float32))
# add box to image
image = tf.add(mask_image, color_mask)
return tf.transpose(image), kpts_mask_new
def no_occlusion(image, kpts_mask):
return tf.transpose(image), kpts_mask
prob = tf.random.uniform([1], minval=0, maxval=1.0, dtype=tf.float32)
image, kpts_mask_new = tf.cond(tf.reshape(tf.greater(prob, tf.constant(probability)), []),
lambda: occlusion(image,
kpts,
kpts_mask,
size_to_image_ratio),
lambda: no_occlusion(image, kpts_mask))
if self.mask_aug_patch:
kpts_mask = kpts_mask_new
return image, kpts_mask
def _flip_landmarks(self, kpts_norm, kpts_mask, flip_lr_flag):
"""
Utility to flip landmarks and occlusion masks.
Args:
kpts_norm (Tensor): Original keypoints.
kpts_mask (Tensor): Original occlusion mask.
flip_lr_flag (Bool): Bool flag for flipping keypoints.
Returns:
kpts_norm (Tensor): flipped keypoints.
kpts_mask (Tensor): flipped occlusion mask.
"""
kpts_norm = tf.cond(
pred=flip_lr_flag,
true_fn=lambda: tf.gather(kpts_norm, self._flip_lm_ind_map),
false_fn=lambda: kpts_norm)
kpts_mask = tf.cond(
pred=flip_lr_flag,
true_fn=lambda: tf.gather(kpts_mask, self._flip_lm_ind_map),
false_fn=lambda: kpts_mask)
return kpts_norm, kpts_mask
def build_augmentation_config(augmentation_info):
"""
Creates a default augmentation config and updates it with user augmentation info.
User provided augmentation specification is updated with default values for unspecified
fields.
Args:
augmentation_info (dict): generated from yaml spec.
Returns:
config (dict): augmentation information with default values for unspecified.
"""
modulus_spatial_augmentation = {
'hflip_probability': 0.0,
'zoom_min': 1.0,
'zoom_max': 1.0,
'translate_max_x': 0.0,
'translate_max_y': 0.0,
'rotate_rad_max': 0.0
}
modulus_color_augmentation = {
'hue_rotation_max': 0.0,
'saturation_shift_max': 0.0,
'contrast_scale_max': 0.0,
'contrast_center': 127.5, # Should be 127.5 if images are in [0,255].
'brightness_scale_max': 0,
'brightness_uniform_across_channels': True,
}
gamma_augmentation = {
'gamma_type': 'uniform',
'gamma_mu': 1.0,
'gamma_std': 0.3,
'gamma_max': 1.0,
'gamma_min': 1.0,
'gamma_probability': 0.0
}
blur_augmentation = {
'kernel_sizes': [],
'blur_probability': 0.0,
'channels': 1
}
random_shift_bbx_augmentation = {
'shift_percent_max': 0.0,
'shift_probability': 0.0
}
config = {
'modulus_spatial_augmentation': modulus_spatial_augmentation,
'modulus_color_augmentation': modulus_color_augmentation,
'gamma_augmentation': gamma_augmentation,
'enable_online_augmentation': False,
'blur_augmentation': blur_augmentation,
'random_shift_bbx_augmentation': random_shift_bbx_augmentation,
}
def _update(d, u):
"""Update nested dictionaries.
Args:
d (dict): Nested dictionary.
u (dict): Nested dictionary.
Returns:
d (dict): Nested dictionary that has been updated.
"""
for k, v in six.iteritems(u):
if isinstance(v, dict):
d[k] = _update(d.get(k, {}), v)
else:
d[k] = v
return d
config = _update(config, augmentation_info)
return config
def get_transformation_ops(augmentation_config, frame_shape):
"""
Generate ops which will apply spatial / color transformations, custom blur and gamma ops.
Args:
augmentation_config (dict): Contains configuration for augmentation.
frame_shape (list): Shape of frame (HWC).
Returns:
stm_op (Modulus Processor): Spatial transformation op.
ctm_op (Modulus Processor): Color transformation op.
blur_op (Modulus Processor): Custom blur op.
gamma_op (Modulus Processor): Custom gamma correction op.
shift_op (Modulus Processor): Custom bounding box shifting op.
"""
# Set up spatial transform op.
stm_op = SpatialTransform(method='bilinear', background_value=0.0, data_format="channels_last")
# Set up color transform op.
# NOTE: Output is always normalized to [0,255] range.
ctm_op = ColorTransform(min_clip=0.0, max_clip=255.0, data_format="channels_last")
# Set up random blurring op.
blur_choices = augmentation_config["blur_augmentation"]["kernel_sizes"]
blur_probability = augmentation_config["blur_augmentation"]["blur_probability"]
channels = augmentation_config["blur_augmentation"]["channels"]
blur_op = RandomBlur(blur_choices=blur_choices,
blur_probability=blur_probability,
channels=channels)
# Set up random gamma op.
gamma_type = augmentation_config["gamma_augmentation"]["gamma_type"]
gamma_mu = augmentation_config["gamma_augmentation"]["gamma_mu"]
gamma_std = augmentation_config["gamma_augmentation"]["gamma_std"]
gamma_max = augmentation_config["gamma_augmentation"]["gamma_max"]
gamma_min = augmentation_config["gamma_augmentation"]["gamma_min"]
gamma_probability = augmentation_config["gamma_augmentation"]["gamma_probability"]
gamma_op = RandomGamma(gamma_type=gamma_type, gamma_mu=gamma_mu, gamma_std=gamma_std,
gamma_max=gamma_max, gamma_min=gamma_min,
gamma_probability=gamma_probability)
# Set up random shift op.
shift_percent_max = augmentation_config["random_shift_bbx_augmentation"]["shift_percent_max"]
shift_probability = augmentation_config["random_shift_bbx_augmentation"]["shift_probability"]
shift_op = RandomShift(shift_percent_max=shift_percent_max, shift_probability=shift_probability,
frame_shape=frame_shape)
return stm_op, ctm_op, blur_op, gamma_op, shift_op
def get_spatial_transformations_matrix(spatial_augmentation_config, image_width, image_height):
"""Generate a spatial transformations matrix that applies both preprocessing and augmentations.
Args:
spatial_augmentation_config (dict): Contains configuration for spatial augmentation:
'hflip_probability' (float)
'translate_max_x' (int)
'translate_max_y' (int)
'zoom_min' (float)
'zoom_max' (float)
'rotate_rad_max' (float)
image_width (int): Width of image canvas
image_height (int): Height of image canvas
Returns:
stm (Tensor 3x3): Matrix that transforms from original image space to augmented space.
"""
hflip_probability = spatial_augmentation_config["hflip_probability"]
translate_max_x = int(spatial_augmentation_config["translate_max_x"])
translate_max_y = int(spatial_augmentation_config["translate_max_y"])
zoom_ratio_min = spatial_augmentation_config["zoom_min"]
zoom_ratio_max = spatial_augmentation_config["zoom_max"]
rotate_rad_max = spatial_augmentation_config["rotate_rad_max"]
# Create spatial transformation matrices on CPU.
# NOTE: Creating matrices on GPU is much much slower.
with tf.device('/CPU'):
stm = get_random_spatial_transformation_matrix(
image_width, image_height,
flip_lr_prob=hflip_probability,
translate_max_x=translate_max_x,
translate_max_y=translate_max_y,
zoom_ratio_min=zoom_ratio_min,
zoom_ratio_max=zoom_ratio_max,
rotate_rad_max=rotate_rad_max)
return stm
def get_color_augmentation_matrix(color_augmentation_config):
"""Generate a color transformations matrix applying augmentations.
Args:
color_augmentation_config (dict): Contains configuration for color augmentation:
'hue_rotation_max' (float)
'saturation_shift_max' (float)
'contrast_scale_max' (float)
'contrast_center' (float)
'brightness_scale_max' (float)
'brightness_uniform_across_channels' (bool)
Returns:
ctm (Tensor 4x4): Matrix describing the color transformation to be applied.
"""
hue_rotation_max = color_augmentation_config["hue_rotation_max"]
saturation_shift_max = color_augmentation_config["saturation_shift_max"]
contrast_scale_max = color_augmentation_config["contrast_scale_max"]
contrast_center = color_augmentation_config["contrast_center"]
brightness_scale_max = color_augmentation_config["brightness_scale_max"]
brightness_uniform = color_augmentation_config["brightness_uniform_across_channels"]
# Create color transformation matrices on CPU.
# NOTE: Creating matrices on GPU is much much slower.
with tf.device('/CPU'):
ctm = get_random_color_transformation_matrix(
hue_rotation_max=hue_rotation_max,
saturation_shift_max=saturation_shift_max,
contrast_scale_max=contrast_scale_max,
contrast_center=contrast_center,
brightness_scale_max=brightness_scale_max,
brightness_uniform_across_channels=brightness_uniform)
return ctm
def get_all_transformations_matrices(augmentation_config, image_height, image_width,
enable_augmentation=False):
"""Generate all the color and spatial transformations as defined in augmentation_config.
Input image values are assumed to be in the [0, 1] range.
Args:
augmentation_config (dict): Contains augmentation configuration for
'modulus_spatial_augmentation',
'modulus_color_augmentation'.
image_height (int): Height of image canvas.
image_width (int): Width of image canvas.
enable_augmentation (bool): Toggle to turn off augmentations during non-training phases.
Returns:
stm (Tensor 3x3): matrix that transforms from original image space to augmented space.
ctm (Tensor 4x4): color transformation matrix.
"""
if not enable_augmentation:
# Default Spatial and Color Transformation matrices.
stm = tf.eye(3, dtype=tf.float32)
ctm = tf.eye(4, dtype=tf.float32)
return stm, ctm
spatial_augmentation_config = augmentation_config["modulus_spatial_augmentation"]
color_augmentation_config = augmentation_config["modulus_color_augmentation"]
# Compute spatial transformation matrix.
stm = get_spatial_transformations_matrix(spatial_augmentation_config, image_width, image_height)
# Compute color transformation matrix.
ctm = get_color_augmentation_matrix(color_augmentation_config)
return stm, ctm
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/dataloader/fpenet_dataloader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IVA MultiTaskNet root module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/multitask_classification/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Store protobuf definitions for MClassification."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/multitask_classification/proto/__init__.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/multitask_classification/proto/experiment.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_tao_tf1.cv.common.proto import training_config_pb2 as nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_training__config__pb2
from nvidia_tao_tf1.cv.makenet.proto import model_config_pb2 as nvidia__tao__tf1_dot_cv_dot_makenet_dot_proto_dot_model__config__pb2
from nvidia_tao_tf1.cv.multitask_classification.proto import dataset_config_pb2 as nvidia__tao__tf1_dot_cv_dot_multitask__classification_dot_proto_dot_dataset__config__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/multitask_classification/proto/experiment.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\nAnvidia_tao_tf1/cv/multitask_classification/proto/experiment.proto\x1a\x34nvidia_tao_tf1/cv/common/proto/training_config.proto\x1a\x32nvidia_tao_tf1/cv/makenet/proto/model_config.proto\x1a\x45nvidia_tao_tf1/cv/multitask_classification/proto/dataset_config.proto\"\x94\x01\n\nExperiment\x12#\n\x0e\x64\x61taset_config\x18\x01 \x01(\x0b\x32\x0b.DataSource\x12\"\n\x0cmodel_config\x18\x02 \x01(\x0b\x32\x0c.ModelConfig\x12(\n\x0ftraining_config\x18\x03 \x01(\x0b\x32\x0f.TrainingConfig\x12\x13\n\x0brandom_seed\x18\x04 \x01(\rb\x06proto3')
,
dependencies=[nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_training__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_makenet_dot_proto_dot_model__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_multitask__classification_dot_proto_dot_dataset__config__pb2.DESCRIPTOR,])
_EXPERIMENT = _descriptor.Descriptor(
name='Experiment',
full_name='Experiment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dataset_config', full_name='Experiment.dataset_config', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='model_config', full_name='Experiment.model_config', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='training_config', full_name='Experiment.training_config', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='random_seed', full_name='Experiment.random_seed', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=247,
serialized_end=395,
)
_EXPERIMENT.fields_by_name['dataset_config'].message_type = nvidia__tao__tf1_dot_cv_dot_multitask__classification_dot_proto_dot_dataset__config__pb2._DATASOURCE
_EXPERIMENT.fields_by_name['model_config'].message_type = nvidia__tao__tf1_dot_cv_dot_makenet_dot_proto_dot_model__config__pb2._MODELCONFIG
_EXPERIMENT.fields_by_name['training_config'].message_type = nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_training__config__pb2._TRAININGCONFIG
DESCRIPTOR.message_types_by_name['Experiment'] = _EXPERIMENT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Experiment = _reflection.GeneratedProtocolMessageType('Experiment', (_message.Message,), dict(
DESCRIPTOR = _EXPERIMENT,
__module__ = 'nvidia_tao_tf1.cv.multitask_classification.proto.experiment_pb2'
# @@protoc_insertion_point(class_scope:Experiment)
))
_sym_db.RegisterMessage(Experiment)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/multitask_classification/proto/experiment_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/multitask_classification/proto/dataset_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/multitask_classification/proto/dataset_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\nEnvidia_tao_tf1/cv/multitask_classification/proto/dataset_config.proto\"X\n\nDataSource\x12\x16\n\x0etrain_csv_path\x18\x01 \x01(\t\x12\x1c\n\x14image_directory_path\x18\x02 \x01(\t\x12\x14\n\x0cval_csv_path\x18\x03 \x01(\tb\x06proto3')
)
_DATASOURCE = _descriptor.Descriptor(
name='DataSource',
full_name='DataSource',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='train_csv_path', full_name='DataSource.train_csv_path', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image_directory_path', full_name='DataSource.image_directory_path', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='val_csv_path', full_name='DataSource.val_csv_path', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=73,
serialized_end=161,
)
DESCRIPTOR.message_types_by_name['DataSource'] = _DATASOURCE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DataSource = _reflection.GeneratedProtocolMessageType('DataSource', (_message.Message,), dict(
DESCRIPTOR = _DATASOURCE,
__module__ = 'nvidia_tao_tf1.cv.multitask_classification.proto.dataset_config_pb2'
# @@protoc_insertion_point(class_scope:DataSource)
))
_sym_db.RegisterMessage(DataSource)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/multitask_classification/proto/dataset_config_pb2.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/multitask_classification/utils/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper function to load model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from nvidia_tao_tf1.cv.common.utils import (
CUSTOM_OBJS,
load_keras_model
)
from nvidia_tao_tf1.encoding import encoding
def load_model(model_path, key=None):
"""Load a model either in .tlt format or .hdf5 format."""
_, ext = os.path.splitext(model_path)
if ext == '.hdf5':
model = load_keras_model(model_path, custom_objects=CUSTOM_OBJS)
elif ext == '.tlt':
os_handle, temp_file_name = tempfile.mkstemp(suffix='.hdf5')
os.close(os_handle)
with open(temp_file_name, 'wb') as temp_file, open(model_path, 'rb') as encoded_file:
encoding.decode(encoded_file, temp_file, key)
encoded_file.close()
temp_file.close()
# recursive call
model = load_model(temp_file_name, None)
os.remove(temp_file_name)
else:
raise NotImplementedError("{0} file is not supported!".format(ext))
return model
def save_model(keras_model, model_path, key, save_format=None):
"""Save a model to either .tlt or .hdf5 format."""
_, ext = os.path.splitext(model_path)
if (save_format is not None) and (save_format != ext):
# recursive call to save a correct model
return save_model(keras_model, model_path + save_format, key, None)
if ext == '.hdf5':
keras_model.save(model_path, overwrite=True, include_optimizer=True)
else:
raise NotImplementedError("{0} file is not supported!".format(ext))
return model_path
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/multitask_classification/utils/model_io.py |
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
"""Load an experiment spec file to run MultiTaskNet training, evaluation, pruning."""
from google.protobuf.text_format import Merge as merge_text_proto
import nvidia_tao_tf1.cv.multitask_classification.proto.experiment_pb2 as experiment_pb2
def load_experiment_spec(spec_path=None):
"""Load experiment spec from a .txt file and return an experiment_pb2.Experiment object.
Args:
spec_path (str): location of a file containing the custom experiment spec proto.
Returns:
experiment_spec: protocol buffer instance of type experiment_pb2.Experiment.
"""
experiment_spec = experiment_pb2.Experiment()
merge_text_proto(open(spec_path, "r").read(), experiment_spec)
return experiment_spec
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/multitask_classification/utils/spec_loader.py |
"""Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/multitask_classification/scripts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates confusion matrix on evaluation dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import keras
from keras.applications.imagenet_utils import preprocess_input
import numpy as np
import pandas as pd
from PIL import Image
from tqdm import tqdm
from nvidia_tao_tf1.cv.common.utils import check_tf_oom
from nvidia_tao_tf1.cv.multitask_classification.utils.model_io import load_model
def build_command_line_parser(parser=None):
"""Build a command line parser for confmat generation."""
if parser is None:
parser = argparse.ArgumentParser(description="TLT MultiTask Confusion Matrix Generator")
parser.add_argument("--model",
"-m",
type=str,
help="TLT model file")
parser.add_argument("--img_root",
"-i",
type=str,
help="test image dir")
parser.add_argument("--target_csv",
"-l",
type=str,
help="Target CSV file")
parser.add_argument("--key",
"-k",
default="",
type=str,
help="TLT model key")
return parser
def parse_command_line_arguments(args=None):
"""Parse command line arguments for confmat."""
parser = build_command_line_parser()
return vars(parser.parse_known_args(args)[0])
def confmat(model_file, image_dir, csv_file, key):
"""Get prediction confusion matrix."""
# get class mapping
df = pd.read_csv(csv_file)
tasks_header = sorted(df.columns.tolist()[1:])
class_num = []
class_mapping = []
conf_matrix = {}
for task in tasks_header:
unique_vals = sorted(df.loc[:, task].unique())
class_num.append(len(unique_vals))
class_mapping.append(dict(zip(range(len(unique_vals)), unique_vals)))
# initialize confusion matrix
conf_matrix[task] = pd.DataFrame(0, index=unique_vals, columns=unique_vals)
# get model
# set custom_object to arbitrary function to avoid not_found error.
keras.backend.set_learning_phase(0)
model = load_model(model_file, key=key)
# Use list() so tqdm knows total size
for _, row in tqdm(list(df.iterrows())):
true_label = [row[l] for l in tasks_header]
pred_label = [class_mapping[i][val] for i, val in enumerate(
inference(model, os.path.join(image_dir, row.values[0]), class_num))]
for i in range(len(true_label)):
conf_matrix[tasks_header[i]].at[pred_label[i], true_label[i]] += 1
return conf_matrix
@check_tf_oom
def inference(model, img_path, class_num):
"""Performing Inference."""
# extracting the data format parameter to detect input shape
data_format = model.layers[1].data_format
# Computing shape of input tensor
image_shape = model.layers[0].input_shape[1:4]
# Setting input shape
if data_format == "channels_first":
image_height, image_width = image_shape[1:3]
else:
image_height, image_width = image_shape[0:2]
# Open image and preprocessing
image = Image.open(img_path)
image = image.resize((image_width, image_height), Image.ANTIALIAS).convert('RGB')
inference_input = preprocess_input(np.array(image).astype(np.float32).transpose(2, 0, 1))
inference_input.shape = (1, ) + inference_input.shape
# Keras inference
raw_predictions = model.predict(inference_input, batch_size=1)
return [np.argmax(x.reshape(-1)) for x in raw_predictions]
if __name__ == "__main__":
arguments = parse_command_line_arguments()
# Do not omit rows / cols
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
conf_matrix = confmat(arguments['model'], image_dir=arguments['img_root'],
csv_file=arguments['target_csv'], key=arguments['key'])
print('Row corresponds to predicted label and column corresponds to ground-truth')
for task, table in list(conf_matrix.items()):
print("********")
print("For task", task)
print(table)
print("Accuracy:", table.values.trace() / table.values.sum())
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/multitask_classification/scripts/confmat.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to export a trained TLT model to an ETLT file for deployment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from datetime import datetime as dt
import json
import logging
import os
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.multitask_classification.export.mclassification_exporter import (
MClassificationExporter
)
logger = logging.getLogger(__name__)
DEFAULT_MAX_WORKSPACE_SIZE = 2 * (1 << 30)
DEFAULT_MAX_BATCH_SIZE = 1
def build_command_line_parser(parser=None):
"""Simple function to parse arguments."""
if parser is None:
parser = argparse.ArgumentParser(description='Export a TLT model.')
parser.add_argument("-m",
"--model",
help="Path to the model file.",
type=str,
required=True,
default=None)
parser.add_argument("-k",
"--key",
help="Key to load the model.",
type=str,
default="")
parser.add_argument("-o",
"--output_file",
type=str,
default=None,
help="Output file (defaults to $(input_filename).etlt)")
parser.add_argument("--force_ptq",
action="store_true",
default=False,
help=argparse.SUPPRESS)
# Int8 calibration arguments.
parser.add_argument("--cal_data_file",
default="",
type=str,
help=argparse.SUPPRESS)
parser.add_argument("--cal_image_dir",
default="",
type=str,
help=argparse.SUPPRESS)
parser.add_argument("--data_type",
type=str,
default="fp32",
help=argparse.SUPPRESS,
choices=["fp32", "fp16", "int8"])
parser.add_argument("-s",
"--strict_type_constraints",
action="store_true",
default=False,
help=argparse.SUPPRESS)
parser.add_argument('--cal_cache_file',
default='./cal.bin',
type=str,
help=argparse.SUPPRESS)
parser.add_argument("--batches",
type=int,
default=10,
help=argparse.SUPPRESS)
parser.add_argument("--max_workspace_size",
type=int,
default=DEFAULT_MAX_WORKSPACE_SIZE,
help=argparse.SUPPRESS)
parser.add_argument("--max_batch_size",
type=int,
default=DEFAULT_MAX_BATCH_SIZE,
help=argparse.SUPPRESS)
parser.add_argument("--batch_size",
type=int,
default=16,
help=argparse.SUPPRESS)
parser.add_argument("--backend",
type=str,
default="onnx",
help=argparse.SUPPRESS,
choices=["onnx", "uff"])
parser.add_argument("-cm",
"--class_map",
type=str,
help="Path to the classmap JSON file.")
parser.add_argument("--gen_ds_config",
action="store_true",
default=False,
help="Generate a template DeepStream related configuration elements. "
"This config file is NOT a complete configuration file and requires "
"the user to update the sample config files in DeepStream with the "
"parameters generated from here.")
parser.add_argument("--engine_file",
type=str,
default=None,
help=argparse.SUPPRESS)
parser.add_argument("--results_dir",
type=str,
default=None,
help="Path to the files where the logs are stored.")
parser.add_argument("-v",
"--verbose",
action="store_true",
default=False,
help="Verbosity of the logger.")
return parser
def parse_command_line(args=None):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser()
return vars(parser.parse_known_args(args)[0])
def run_export(args):
"""Wrapper to run export of tlt models.
Args:
args (dict): Dictionary of parsed arguments to run export.
Returns:
No explicit returns.
"""
# Parsing command line arguments.
model_path = args['model']
key = args['key']
# Calibrator configuration.
cal_cache_file = args['cal_cache_file']
cal_image_dir = args['cal_image_dir']
cal_data_file = args['cal_data_file']
batch_size = args['batch_size']
n_batches = args['batches']
data_type = args['data_type']
strict_type = args['strict_type_constraints']
output_file = args['output_file']
engine_file_name = args['engine_file']
max_workspace_size = args["max_workspace_size"]
max_batch_size = args["max_batch_size"]
force_ptq = args["force_ptq"]
# Status logger for the UI.
results_dir = args.get("results_dir", None)
gen_ds_config = args["gen_ds_config"]
backend = args["backend"]
save_engine = False
if engine_file_name is not None:
save_engine = True
log_level = "INFO"
if args['verbose']:
log_level = "DEBUG"
# Status logger initialization
if results_dir is not None:
if not os.path.exists(results_dir):
os.makedirs(results_dir)
timestamp = int(dt.timestamp(dt.now()))
filename = "status.json"
if results_dir == "/workspace/logs":
filename = f"status_export_{timestamp}.json"
status_file = os.path.join(results_dir, filename)
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True
)
)
status_logger = status_logging.get_status_logger()
# Configure the logger.
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level=log_level)
# Set default output filename if the filename
# isn't provided over the command line.
if output_file is None:
split_name = os.path.splitext(model_path)[0]
output_file = f"{split_name}.{backend}"
if not (backend in output_file):
output_file = f"{output_file}.{backend}"
logger.info("Saving exported model to {}".format(output_file))
# Warn the user if an exported file already exists.
assert not os.path.exists(output_file), "Default output file {} already "\
"exists".format(output_file)
# Make an output directory if necessary.
output_root = os.path.dirname(os.path.realpath(output_file))
if not os.path.exists(output_root):
os.makedirs(output_root)
output_tasks = json.load(open(args['class_map'], 'r'))['tasks']
# Build exporter instance
status_logger.write(message="Building exporter object.")
exporter = MClassificationExporter(output_tasks, model_path, key,
backend=backend,
data_type=data_type,
strict_type=strict_type)
exporter.set_session()
exporter.set_keras_backend_dtype()
# Export the model to etlt file and build the TRT engine.
status_logger.write(message="Exporting the model.")
exporter.export(output_file, backend,
data_file_name=cal_data_file,
calibration_cache=os.path.realpath(cal_cache_file),
n_batches=n_batches,
batch_size=batch_size,
save_engine=save_engine,
engine_file_name=engine_file_name,
calibration_images_dir=cal_image_dir,
max_batch_size=max_batch_size,
max_workspace_size=max_workspace_size,
force_ptq=force_ptq,
gen_ds_config=gen_ds_config)
status_logger.write(
status_level=status_logging.Status.SUCCESS,
message="Exporting finished.")
if __name__ == "__main__":
try:
args = parse_command_line()
run_export(args)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Export was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/multitask_classification/scripts/export.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perform continuous MultitaskNet training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import logging
from multiprocessing import cpu_count
import os
import keras
from keras import backend as K
from keras.applications.imagenet_utils import preprocess_input
from keras.callbacks import TerminateOnNaN
import tensorflow as tf
from nvidia_tao_tf1.cv.common.callbacks.enc_model_saver_callback import KerasModelSaver
from nvidia_tao_tf1.cv.common.callbacks.loggers import TAOStatusLogger
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.common.mlops.clearml import get_clearml_task
from nvidia_tao_tf1.cv.common.utils import build_lrs_from_config
from nvidia_tao_tf1.cv.common.utils import (
build_optimizer_from_config,
build_regularizer_from_config
)
from nvidia_tao_tf1.cv.common.utils import (
check_tf_oom,
hvd_keras,
initialize,
parse_model_load_from_config
)
from nvidia_tao_tf1.cv.common.utils import OneIndexedCSVLogger as CSVLogger
from nvidia_tao_tf1.cv.common.utils import TensorBoard
from nvidia_tao_tf1.cv.multitask_classification.data_loader.data_generator import (
MultiClassDataGenerator
)
from nvidia_tao_tf1.cv.multitask_classification.model.model_builder import get_model
from nvidia_tao_tf1.cv.multitask_classification.utils.model_io import load_model
from nvidia_tao_tf1.cv.multitask_classification.utils.spec_loader import load_experiment_spec
logger = logging.getLogger(__name__)
verbose = 0
def build_command_line_parser(parser=None):
"""Build a command line parser for inference."""
if parser is None:
parser = argparse.ArgumentParser(
description="TAO Toolkit Multitask Classification training."
)
parser.add_argument(
'-e',
'--experiment_spec_file',
type=str,
required=True,
help='Path to the experiment spec file.')
parser.add_argument(
'-r',
'--results_dir',
required=True,
type=str,
help='Path to a folder where experiment outputs should be written.'
)
parser.add_argument(
'-k',
'--key',
required=False,
default="",
type=str,
help='Key to save or load a .tlt model.'
)
parser.add_argument(
"-v",
"--verbose",
action='store_true',
default=False,
help="Flag to enable verbose logging."
)
return parser
def parse_command_line_arguments(args=None):
"""Parse command line arguments for training."""
parser = build_command_line_parser()
return parser.parse_args(args)
def _load_pretrain_weights(pretrain_model, train_model):
"""Load weights in pretrain model to model."""
strict_mode = True
for layer in train_model.layers[1:]:
# The layer must match up to yolo layers.
if layer.name.find('multitask_') != -1:
strict_mode = False
try:
l_return = pretrain_model.get_layer(layer.name)
except ValueError:
if strict_mode and layer.name[-3:] != 'qdq' and len(layer.get_weights()) != 0:
raise ValueError(layer.name + ' not found in pretrained model.')
# Ignore QDQ
continue
try:
layer.set_weights(l_return.get_weights())
except ValueError:
if strict_mode:
raise ValueError(layer.name + ' has incorrect shape in pretrained model.')
continue
def construct_model(model_config, training_config, nclasses_dict, key):
'''
Construct a model according to spec file.
Args:
model_config: model_config of parsed spec file
training_config: training_config of parsed spec file
nclasses_dict: dictionary with task / class information from data loader
key: TLT encryption / decryption key
Returns:
model: built model
init_epoch: training should start from this epoch
'''
# load_path, load_graph, reset_optim, init_epoch = load_config
load_config = parse_model_load_from_config(training_config)
load_model_path = load_config[0]
load_graph = load_config[1]
reset_optim = load_config[2]
nchannels, im_height, im_width = map(int, model_config.input_image_size.split(','))
# Creating model
ka = dict()
ka['nlayers'] = model_config.n_layers if model_config.n_layers else 18
ka['use_batch_norm'] = model_config.use_batch_norm
ka['use_pooling'] = model_config.use_pooling
ka['freeze_bn'] = model_config.freeze_bn
ka['use_bias'] = model_config.use_bias
ka['all_projections'] = model_config.all_projections
ka['dropout'] = model_config.dropout if model_config.dropout else 0.0
ka['freeze_blocks'] = model_config.freeze_blocks if model_config.freeze_blocks else None
ka['arch'] = model_config.arch if model_config.arch else "resnet"
ka['data_format'] = 'channels_first'
ka['nclasses_dict'] = nclasses_dict
ka['input_shape'] = (nchannels, im_height, im_width)
ka['kernel_regularizer'] = build_regularizer_from_config(training_config.regularizer)
if (not load_model_path) or (not load_graph):
# needs to build a training model
train_model = get_model(**ka)
if load_model_path:
# load pretrain weights
pretrain_model = load_model(load_model_path, key=key)
_load_pretrain_weights(pretrain_model, train_model)
else:
train_model = load_model(load_model_path, key=key)
if reset_optim:
train_model_config = train_model.get_config()
for layer, layer_config in zip(train_model.layers, train_model_config['layers']):
if hasattr(layer, 'kernel_regularizer'):
layer_config['config']['kernel_regularizer'] = ka['kernel_regularizer']
reg_model = keras.Model.from_config(train_model_config)
reg_model.set_weights(train_model.get_weights())
train_model = reg_model
if (not load_model_path) or reset_optim:
optim = build_optimizer_from_config(training_config.optimizer)
train_model.compile(loss=len(nclasses_dict)*["categorical_crossentropy"],
loss_weights=len(nclasses_dict)*[1.0],
metrics=["accuracy"], optimizer=optim)
return train_model, load_config[3]
def run_experiment(config_path, results_dir, key, verbose=False):
"""
Launch experiment that trains the model.
NOTE: Do not change the argument names without verifying that cluster submission works.
Args:
config_path (str): Path to a text file containing a complete experiment configuration.
results_dir (str): Path to a folder where various training outputs will be written.
If the folder does not already exist, it will be created.
"""
hvd = hvd_keras()
hvd.init()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = str(hvd.local_rank())
sess = tf.Session(config=config)
K.set_session(sess)
verbose = 1 if hvd.rank() == 0 else 0
# Load experiment spec.
experiment_spec = load_experiment_spec(config_path)
initialize(experiment_spec.random_seed, hvd)
# Setting up keras backend and keras environment
K.set_image_data_format("channels_first")
logging.basicConfig(
format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level='DEBUG' if verbose else 'INFO'
)
is_master = hvd.rank() == 0
if is_master and not os.path.exists(results_dir):
os.makedirs(results_dir)
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=is_master,
verbosity=logger.getEffectiveLevel(),
append=True
)
)
# keras.backend.set_learning_phase(1)
# get channel, height and width of the input image
model_config = experiment_spec.model_config
training_config = experiment_spec.training_config
if is_master:
if training_config.HasField("visualizer"):
if training_config.visualizer.HasField("clearml_config"):
logger.info("Integrating with clearml.")
clearml_config = training_config.visualizer.clearml_config
get_clearml_task(
clearml_config,
"multitask_classification"
)
nchannels, im_height, im_width = map(int, model_config.input_image_size.split(','))
if nchannels == 1:
color_mode = 'grayscale'
elif nchannels == 3:
color_mode = 'rgb'
else:
raise ValueError("number of channels must be 1 or 3")
# Initializing data generator : Train
train_datagen = MultiClassDataGenerator(preprocessing_function=preprocess_input,
horizontal_flip=True,
featurewise_center=False,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
# Initiliazing data iterator: Train
data_config = experiment_spec.dataset_config
batch_size = training_config.batch_size_per_gpu
train_iterator = train_datagen.flow_from_singledirectory(data_config.image_directory_path,
data_config.train_csv_path,
target_size=(im_height, im_width),
batch_size=batch_size,
color_mode=color_mode)
if hvd.rank() == 0:
print('Processing dataset (train): {}'.format(data_config.train_csv_path))
# Initializing data generator: Val
val_datagen = MultiClassDataGenerator(preprocessing_function=preprocess_input,
horizontal_flip=False)
# Initializing data iterator: Val
val_iterator = val_datagen.flow_from_singledirectory(data_config.image_directory_path,
data_config.val_csv_path,
target_size=(im_height, im_width),
batch_size=batch_size,
color_mode=color_mode)
if hvd.rank() == 0:
print('Processing dataset (validation): {}'.format(data_config.val_csv_path))
# Check if the number of classes is consistent
assert train_iterator.class_dict == val_iterator.class_dict, \
"Num of classes at train and val don't match"
nclasses_dict = train_iterator.class_dict
final_model, init_epoch = construct_model(model_config, training_config, nclasses_dict, key)
final_model.optimizer = hvd.DistributedOptimizer(final_model.optimizer)
# Load training parameters
num_epochs = training_config.num_epochs
ckpt_interval = training_config.checkpoint_interval
# Setup callbacks
iters_per_epoch = len(train_iterator) // hvd.size()
max_iterations = num_epochs * iters_per_epoch
lr_scheduler = build_lrs_from_config(training_config.learning_rate, max_iterations, hvd.size())
init_step = init_epoch * iters_per_epoch
lr_scheduler.reset(init_step)
callbacks = [hvd.callbacks.BroadcastGlobalVariablesCallback(0),
hvd.callbacks.MetricAverageCallback(),
lr_scheduler,
TerminateOnNaN()]
# Writing out class-map file for inference mapping
if hvd.rank() == 0:
# Printing model summary
final_model.summary()
reverse_mapping = {task: {v: k for k, v in classes.items()}
for task, classes in train_iterator.class_mapping.items()}
save_dict = {'tasks': train_iterator.tasks_header,
'class_mapping': reverse_mapping}
json.dump(save_dict, open(os.path.join(results_dir, 'class_mapping.json'), 'w'))
if not os.path.exists(os.path.join(results_dir, 'weights')):
os.mkdir(os.path.join(results_dir, 'weights'))
arch_name = model_config.arch
if model_config.arch in ['resnet', 'darknet', 'cspdarknet', 'vgg']:
# append nlayers into meta_arch_name
arch_name += str(model_config.n_layers)
ckpt_path = str(os.path.join(results_dir, 'weights',
'multitask_cls_' + arch_name + '_epoch_{epoch:03d}.hdf5'))
# This callback will update model_eval and save the model.
model_checkpoint = KerasModelSaver(ckpt_path, key, ckpt_interval, verbose=verbose)
csv_path = os.path.join(results_dir, 'multitask_cls_training_log_' + arch_name + '.csv')
csv_logger = CSVLogger(filename=csv_path,
separator=',',
append=False)
callbacks.append(model_checkpoint)
callbacks.append(csv_logger)
# Setting up TAO status logger.
status_logger = TAOStatusLogger(
results_dir,
append=True,
num_epochs=num_epochs,
is_master=hvd.rank() == 0,
)
callbacks.append(status_logger)
# Setting up Tensorboard visualizer.
tensorboard_dir = os.path.join(results_dir, "events")
if not os.path.exists(tensorboard_dir):
os.makedirs(tensorboard_dir)
weight_histograms = False
if training_config.HasField("visualizer"):
weight_histograms = training_config.visualizer.weight_histograms
tensorboard = TensorBoard(
log_dir=tensorboard_dir,
weight_hist=weight_histograms
)
callbacks.append(tensorboard)
# Commencing Training
final_model.fit_generator(train_iterator,
steps_per_epoch=iters_per_epoch,
epochs=num_epochs,
verbose=verbose,
workers=max(int((cpu_count() - 1) / hvd.size() + 0.5), 1),
validation_data=val_iterator,
validation_steps=len(val_iterator),
callbacks=callbacks,
max_queue_size=20,
initial_epoch=init_epoch)
status_logging.get_status_logger().write(message="Final model evaluation in progress.")
score = hvd.allreduce(
final_model.evaluate_generator(val_iterator,
len(val_iterator),
workers=training_config.n_workers))
status_logging.get_status_logger().write(message="Model evaluation in complete.")
if verbose:
print('Total Val Loss: {}'.format(score[0]))
print('Tasks: {}'.format(val_iterator.tasks_header))
print('Val loss per task: {}'.format(score[1:1 + val_iterator.num_tasks]))
print('Val acc per task: {}'.format(score[1 + val_iterator.num_tasks:]))
tasks = val_iterator.tasks_header
val_accuracies = score[1 + val_iterator.num_tasks:]
kpi_dict = {key: float(value) for key, value in zip(tasks, val_accuracies)}
kpi_dict["mean accuracy"] = sum(val_accuracies) / len(val_accuracies)
status_logging.get_status_logger().kpi.update(kpi_dict)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Multi-Task classification finished successfully"
)
@check_tf_oom
def main(args=None):
"""Run the training process."""
try:
args = parse_command_line_arguments(args)
run_experiment(config_path=args.experiment_spec_file,
results_dir=args.results_dir,
key=args.key,
verbose=args.verbose)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Training was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/multitask_classification/scripts/train.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to prune the multitask_classification TLT model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.common.magnet_prune import ( # noqa pylint: disable=unused-import
build_command_line_parser,
main,
)
if __name__ == "__main__":
try:
main(sys.argv[1:])
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Pruning finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Pruning was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/multitask_classification/scripts/prune.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Inference and metrics computation code using a loaded model.
Arguments:
image : image to be inferenced
classmap: classmap generated by training script
Returns:
Network predictions
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import os
import keras
from keras.applications.imagenet_utils import preprocess_input
import numpy as np
from PIL import Image
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.common.utils import check_tf_oom
from nvidia_tao_tf1.cv.multitask_classification.utils.model_io import load_model
def build_command_line_parser(parser=None):
"""Build a command line parser for inference."""
if parser is None:
parser = argparse.ArgumentParser(description="Multitask classification inference script")
parser.add_argument("--model_path",
"-m",
type=str,
help="TLT model file")
parser.add_argument("--image_dir",
"-i",
type=str,
help="inference image")
parser.add_argument("--classmap",
"-cm",
type=str,
help="Class map file generated from train example")
parser.add_argument("--key",
"-k",
default="",
type=str,
help="TLT model key")
parser.add_argument("-r",
"--results_dir",
type=str,
default=None,
help="Path to results directory")
return parser
def parse_command_line_arguments(args=None):
"""Parse command line arguments for inference."""
parser = build_command_line_parser()
return vars(parser.parse_known_args(args)[0])
@check_tf_oom
def inference(model_file, image_file=None, classmap=None, key=None, results_file=None):
"""Inference on an image using a pretrained model file.
Args:
model_file : .hdf5 keras model file containing weights and topology
image_file : image to be inferenced
classmap : path to json file containing classmap output generated from the train script
results_file: Path to store predicted outputs (result.txt)
Returns:
None
Log:
Image Mode:
print classifier output
Directory Mode:
Classifier accuracy for given KPI dataset
"""
s_logger = status_logging.get_status_logger()
s_logger.write(
status_level=status_logging.Status.STARTED,
message="Starting inference."
)
# Retrieve model using the pretrained file
# set custom_object to arbitrary function to avoid not_found error.
keras.backend.set_learning_phase(0)
model = load_model(model_file, key=key)
# extracting the data format parameter to detect input shape
data_format = model.layers[1].data_format
# Computing shape of input tensor
image_shape = model.layers[0].input_shape[1:4]
# Printing summary of retrieved model
model.summary()
# Setting input shape
if data_format == "channels_first":
image_height, image_width = image_shape[1:3]
else:
image_height, image_width = image_shape[0:2]
if image_file is not None:
# Open image and preprocessing
image = Image.open(image_file)
image = image.resize((image_width, image_height), Image.ANTIALIAS).convert('RGB')
inference_input = preprocess_input(np.array(image).astype(np.float32).transpose(2, 0, 1))
inference_input.shape = (1, ) + inference_input.shape
# Keras inference
raw_predictions = model.predict(inference_input, batch_size=1)
with open(classmap, "r") as cm:
class_dict = json.load(cm)
cm.close()
task_name = class_dict['tasks']
class_map = class_dict['class_mapping']
for idx, task in enumerate(task_name):
pred = raw_predictions[idx].reshape(-1)
print("Task {}:".format(task))
print("Predictions: {}".format(pred))
class_name = class_map[task][str(np.argmax(pred))]
print("Class name = {}".format(class_name))
print('********')
# Log into results_file if exists
if results_file:
with open(results_file, "a") as f:
f.write("Task {}:\n".format(task))
f.write("Predictions: {}\n".format(pred))
f.write("Class name = {}\n".format(class_name))
f.write('********\n')
s_logger.write(
status_level=status_logging.Status.SUCCESS,
message="Inference finished successfully."
)
if __name__ == "__main__":
arguments = parse_command_line_arguments()
results_file = None
# Create results directory and init status.json
if arguments["results_dir"]:
if not os.path.exists(arguments["results_dir"]):
os.makedirs(arguments["results_dir"])
status_file = os.path.join(arguments["results_dir"], "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True,
verbosity=1,
append=True
)
)
results_file = os.path.join(arguments["results_dir"], "result.txt")
try:
inference(arguments['model_path'], image_file=arguments['image_dir'],
classmap=arguments['classmap'], key=arguments['key'],
results_file=results_file)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Inference was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/multitask_classification/scripts/inference.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perform Evaluation of the trained models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from multiprocessing import cpu_count
import os
from keras import backend as K
from keras.applications.imagenet_utils import preprocess_input
import numpy as np
import tensorflow as tf
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.common.utils import check_tf_oom
from nvidia_tao_tf1.cv.multitask_classification.data_loader.data_generator import (
MultiClassDataGenerator
)
from nvidia_tao_tf1.cv.multitask_classification.utils.model_io import load_model
from nvidia_tao_tf1.cv.multitask_classification.utils.spec_loader import load_experiment_spec
def build_command_line_parser(parser=None):
"""Build a command line parser for eval."""
if parser is None:
parser = argparse.ArgumentParser(description="TLT MultiTask Classification Evaluator")
parser.add_argument("--model_path",
"-m",
type=str,
required=True,
help="Path to TLT model file")
parser.add_argument("--experiment_spec",
"-e",
type=str,
required=True,
help="Path to experiment spec file")
parser.add_argument("--key",
"-k",
type=str,
default="",
help="TLT model key")
parser.add_argument("-r",
"--results_dir",
type=str,
default=None,
help="Path to results directory")
# Dummy arguments for Deploy
parser.add_argument('-i',
'--image_dir',
type=str,
required=False,
default=None,
help=argparse.SUPPRESS)
parser.add_argument('-b',
'--batch_size',
type=int,
required=False,
default=1,
help=argparse.SUPPRESS)
return parser
def parse_command_line_arguments(args=None):
"""Parse command line arguments for eval."""
parser = build_command_line_parser()
return vars(parser.parse_known_args(args)[0])
@check_tf_oom
def evaluate(model_file, img_root, target_csv, key, batch_size):
"""Wrapper function for evaluating MClassification application.
Args:
Dictionary arguments containing parameters defined by command line parameters
"""
s_logger = status_logging.get_status_logger()
s_logger.write(
status_level=status_logging.Status.STARTED,
message="Starting evaluation."
)
# Horovod: pin GPU to be used to process local rank (one GPU per process)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
K.set_session(tf.Session(config=config))
K.set_learning_phase(0)
model = load_model(model_file, key=key)
# extracting the data format parameter to detect input shape
data_format = model.layers[1].data_format
# Computing shape of input tensor
image_shape = model.layers[0].input_shape[1:4]
# Setting input shape
if data_format == "channels_first":
image_height, image_width = image_shape[1:3]
else:
image_height, image_width = image_shape[0:2]
target_datagen = MultiClassDataGenerator(preprocessing_function=preprocess_input,
horizontal_flip=False
)
# Initializing data iterator: Val
target_iterator = target_datagen.flow_from_singledirectory(img_root,
target_csv,
target_size=(image_height,
image_width),
batch_size=batch_size)
print('Processing dataset (evaluation): {}'.format(target_csv))
nclasses_list = list(target_iterator.class_dict.values())
assert all(np.array(nclasses_list) > 0), "Invalid target dataset."
# If number of classes does not match the new data
assert np.sum(nclasses_list) == \
np.sum([l.get_shape().as_list()[-1] for l in model.output]), \
"The number of classes of the loaded model doesn't match the target dataset."
# Printing summary of retrieved model
model.summary()
# Evaluate the model on the full data set.
score = model.evaluate_generator(target_iterator,
len(target_iterator),
workers=cpu_count() - 1)
print('Total Val Loss:', score[0])
print('Tasks:', target_iterator.tasks_header)
print('Val loss per task:', score[1:1 + target_iterator.num_tasks])
print('Val acc per task:', score[1 + target_iterator.num_tasks:])
# Write val accuracy per task into kpi
tasks = target_iterator.tasks_header
val_accuracies = score[1 + target_iterator.num_tasks:]
kpi_dict = {key: float(value) for key, value in zip(tasks, val_accuracies)}
kpi_dict["mean accuracy"] = sum(val_accuracies) / len(val_accuracies)
s_logger.kpi.update(kpi_dict)
s_logger.write(
status_level=status_logging.Status.SUCCESS,
message="Evalation finished successfully."
)
if __name__ == '__main__':
args = parse_command_line_arguments()
experiment_spec = load_experiment_spec(args['experiment_spec'])
if args["results_dir"]:
if not os.path.exists(args["results_dir"]):
os.makedirs(args["results_dir"])
status_file = os.path.join(args["results_dir"], "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True,
verbosity=1,
append=True
)
)
try:
evaluate(args['model_path'],
experiment_spec.dataset_config.image_directory_path,
experiment_spec.dataset_config.val_csv_path,
args['key'],
experiment_spec.training_config.batch_size_per_gpu)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Evaluation was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/multitask_classification/scripts/evaluate.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""TLT Multitask Classification entrypoint."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/multitask_classification/entrypoint/__init__.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""TLT command line wrapper to invoke CLI scripts."""
import sys
from nvidia_tao_tf1.cv.common.entrypoint.entrypoint import launch_job
import nvidia_tao_tf1.cv.multitask_classification.scripts
def main():
"""Function to launch the job."""
launch_job(
nvidia_tao_tf1.cv.multitask_classification.scripts,
"multitask_classification",
sys.argv[1:]
)
if __name__ == "__main__":
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/multitask_classification/entrypoint/multitask_classification.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IVA Colornet model construction wrapper functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import AveragePooling2D, Dense, Flatten
from keras.layers import Input
from keras.models import Model
from nvidia_tao_tf1.core.templates.utils import _leaky_conv, arg_scope
from nvidia_tao_tf1.cv.common.models.backbones import get_backbone
def add_classification_branch(nclasses_dict, base_model, data_format,
kernel_regularizer, bias_regularizer):
"""Add classification branches for multitasknet."""
output = base_model.output
output_shape = output.get_shape().as_list()
all_out = []
for class_task, nclasses in sorted(nclasses_dict.items(), key=lambda x: x[0]):
# Implement a trick to use _leaky_conv for Conv+BN+ReLU.
with arg_scope([_leaky_conv],
use_batch_norm=True,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
alpha=0,
kernel=(3, 3),
padding='same',
freeze_bn=False,
use_bias=False,
force_relu=True):
attribute_output = _leaky_conv(output, 128,
name='multitask_'+class_task+'_conv1')
attribute_output = _leaky_conv(attribute_output, 64,
name='multitask_'+class_task+'_conv2')
if data_format == 'channels_first':
pool_size = (output_shape[-2], output_shape[-1])
else:
pool_size = (output_shape[-3], output_shape[-2])
attribute_output = AveragePooling2D(pool_size=pool_size,
data_format=data_format,
padding='valid',
name='multitask_'+class_task+'_pool')(attribute_output)
attribute_output = Flatten(name='multitask_'+class_task+'_flatten')(attribute_output)
attribute_output = Dense(nclasses, activation='softmax',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
name=class_task)(attribute_output)
all_out.append(attribute_output)
final_model = Model(inputs=base_model.input, outputs=all_out,
name='multitask_' + base_model.name)
return final_model
def get_model(nclasses_dict,
arch="squeezenet",
input_shape=(3, 224, 224),
data_format=None,
kernel_regularizer=None,
bias_regularizer=None,
freeze_blocks=None,
**kwargs):
'''Wrapper function to construct a model.'''
input_image = Input(shape=input_shape)
base_model = get_backbone(backbone=arch,
input_tensor=input_image,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
freeze_blocks=freeze_blocks,
**kwargs)
final_model = add_classification_branch(nclasses_dict, base_model, data_format,
kernel_regularizer, bias_regularizer)
return final_model
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/multitask_classification/model/model_builder.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/multitask_classification/model/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class to export trained .tlt models to etlt file."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from nvidia_tao_tf1.cv.common.export.keras_exporter import KerasExporter as Exporter
from nvidia_tao_tf1.cv.common.types.base_ds_config import BaseDSConfig
logger = logging.getLogger(__name__)
class MClassificationExporter(Exporter):
"""Define an exporter for classification models."""
def __init__(self,
output_tasks,
model_path=None,
key=None,
data_type="fp32",
strict_type=False,
backend="uff",
**kwargs):
"""Initialize the classification exporter.
Args:
outout_task (List of str): The task names for outputs.
model_path (str): Path to the model file.
key (str): Key to load the model.
data_type (str): Path to the TensorRT backend data type.
strict_type(bool): Apply TensorRT strict_type_constraints or not for INT8 mode.
backend (str): TensorRT parser to be used.
Returns:
None.
"""
super(MClassificationExporter, self).__init__(model_path=model_path,
key=key,
data_type=data_type,
strict_type=strict_type,
backend=backend)
self.output_tasks = output_tasks
def set_input_output_node_names(self):
"""Set input output node names."""
self.output_node_names = [n+"/Softmax" for n in self.output_tasks]
self.input_node_names = ["input_1"]
def generate_ds_config(self, input_dims, num_classes=None):
"""Generate Deepstream config element for the exported model."""
if input_dims[0] == 1:
color_format = "l"
else:
color_format = "bgr" if self.preprocessing_arguments["flip_channel"] else "rgb"
kwargs = {
"data_format": self.data_format,
"backend": self.backend,
# Setting this to 0 by default because there are more
# detection networks.
"network_type": 1
}
if num_classes:
kwargs["num_classes"] = num_classes
if self.backend == "uff":
kwargs.update({
"input_names": self.input_node_names,
"output_names": self.output_node_names
})
ds_config = BaseDSConfig(
self.preprocessing_arguments["scale"],
self.preprocessing_arguments["means"],
input_dims,
color_format,
self.key,
**kwargs
)
return ds_config
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/multitask_classification/export/mclassification_exporter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to export a trained Mclassification model to an ETLT file for deployment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/multitask_classification/export/__init__.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/multitask_classification/data_loader/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IVA MultiTask model data generator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from keras import backend as K
from keras.preprocessing.image import array_to_img, img_to_array, load_img
from keras.preprocessing.image import ImageDataGenerator, Iterator
import numpy as np
import pandas as pd
class SingleDirectoryIterator(Iterator):
"""Iterator capable of reading images from a directory (no subdir) on disk.
# Arguments
directory: Path to the directory to read images from.
All images should be under this directory.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
class_table: A pandas table containing the true label of the images
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"grayscale"`. Color mode to read images.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
"""
def __init__(self, directory, image_data_generator,
class_table, target_size=(256, 256),
color_mode='rgb', batch_size=32, shuffle=True,
seed=None, data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
follow_links=False, subset=None, interpolation='nearest'):
"""init function for the Iterator.
# Code largely from Keras DirectoryIterator
# https://github.com/keras-team/keras/blob/master/keras/preprocessing/image.py#L1507
"""
if data_format is None:
data_format = K.image_data_format()
self.directory = directory
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
if self.color_mode == 'rgb':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
self.interpolation = interpolation
self.subset = subset
self._generate_class_mapping(class_table)
self.num_tasks = len(self.tasks_header)
self.data_df = class_table
# print total # of images with K tasks
print('Found %d images with %d tasks (%s)' %
(self.samples, self.num_tasks, self.class_dict))
super(SingleDirectoryIterator, self).__init__(self.samples,
batch_size,
shuffle,
seed)
def _generate_class_mapping(self, class_table):
"""Prepare task dictionary and class mapping."""
self.filenames = class_table.iloc[:, 0].values
self.samples = len(self.filenames)
self.tasks_header = sorted(class_table.columns.tolist()[1:])
self.class_dict = {}
self.class_mapping = {}
for task in self.tasks_header:
unique_vals = sorted(class_table.loc[:, task].unique())
self.class_dict[task] = len(unique_vals)
self.class_mapping[task] = dict(zip(unique_vals, range(len(unique_vals))))
# convert class dictionary to a sorted tolist
self.class_dict_list_sorted = sorted(self.class_dict.items(), key=lambda x: x[0])
self.class_values_list_sorted = list(zip(*list(self.class_dict_list_sorted)))[1]
def _get_batches_of_transformed_samples(self, index_array):
"""Prepare input and the groundtruth for a batch of data."""
batch_x = np.zeros(
(len(index_array),) + self.image_shape,
dtype=K.floatx())
grayscale = self.color_mode == 'grayscale'
# build batch of image data
for i, j in enumerate(index_array):
fname = self.filenames[j]
img = load_img(os.path.join(self.directory, fname),
grayscale=grayscale,
target_size=self.target_size,
interpolation=self.interpolation)
x = img_to_array(img, data_format=self.data_format)
x = self.image_data_generator.random_transform(x)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e7),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
# one-hot encoding
batch_y = []
for _, cls_cnt in self.class_dict_list_sorted:
batch_y.append(np.zeros((len(index_array), cls_cnt), dtype=K.floatx()))
index = 0
for _, row in self.data_df.iloc[index_array, :].iterrows():
for i, (c, _) in enumerate(self.class_dict_list_sorted):
batch_y[i][index, self.class_mapping[c][row[c]]] = 1.
index += 1
return batch_x, batch_y
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
class MultiClassDataGenerator(ImageDataGenerator):
"""Generate batches of tensor image data with real-time data augmentation.
Code based on ImageDataGenerator in Keras.
"""
def flow_from_singledirectory(self, directory, label_csv,
target_size=(256, 256), color_mode='rgb',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
"""Get flow from a single directory with all labels in a separate CSV."""
df = pd.read_csv(label_csv)
return SingleDirectoryIterator(
directory, self, df,
target_size=target_size, color_mode=color_mode,
data_format=self.data_format,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/multitask_classification/data_loader/data_generator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BpNet module."""
from nvidia_tao_tf1.cv.bpnet import dataloaders
from nvidia_tao_tf1.cv.bpnet import learning_rate_schedules
from nvidia_tao_tf1.cv.bpnet import losses
from nvidia_tao_tf1.cv.bpnet import models
from nvidia_tao_tf1.cv.bpnet import optimizers
from nvidia_tao_tf1.cv.bpnet import trainers
__all__ = (
"dataloaders",
"learning_rate_schedules",
"losses",
"models",
"optimizers",
"trainers",
)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BpNet Losses."""
from nvidia_tao_tf1.cv.bpnet.losses.bpnet_loss import BpNetLoss
__all__ = ('BpNetLoss', )
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/losses/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for BpNet loss."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras import backend as K
import numpy as np
import pytest
import tensorflow as tf
from nvidia_tao_tf1.cv.bpnet.losses.bpnet_loss import BpNetLoss
class Config():
"""Mock config class."""
def __init__(self, num_paf_layers, num_cmap_layers):
"""__init__ method.
Args:
num_paf_layers (int): number of channels in part-affinity fields head.
num_cmap_layers (int): number of channels in heatmap head.
"""
self.gt_slice_index = {
'paf': [0, num_paf_layers],
'heatmap_with_background': [num_paf_layers, num_cmap_layers + num_paf_layers]
}
# Number of output channels for the network heads
NUM_PAF_CHANNELS = 38
NUM_CMAP_CHANNELS = 19
# Ground truth feature map shape [batchsize, height, width, channels]
GT_SHAPE = [2, 64, 64, NUM_PAF_CHANNELS + NUM_CMAP_CHANNELS]
# Part affinity field head output shape [batchsize, height, width, channels]
# where num_connections
PAF_SHAPE = [2, 64, 64, NUM_PAF_CHANNELS]
# Confidence map head output shape [batchsize, height, width, channels]
CMAP_SHAPE = [2, 64, 64, NUM_CMAP_CHANNELS]
test_inputs = [
# Different sequences
([('cmap', 1), ('paf', 1)], [1, 1], [38, 19]),
([('cmap', 1), ('cmap', 2), ('paf', 1)], [1, 1], [38, 19]),
([('cmap', 1), ('cmap', 2), ('cmap', 3)], [1, 1], [38, 19]),
# Different masks [mask_paf, mask_cmap]
([('cmap', 1), ('cmap', 2), ('paf', 1), ('paf', 2)], [0, 0], [38, 19]),
([('cmap', 1), ('cmap', 2), ('paf', 1), ('paf', 2)], [0, 1], [38, 19]),
([('cmap', 1), ('cmap', 2), ('paf', 1), ('paf', 2)], [1, 0], [38, 19]),
([('cmap', 1), ('cmap', 2), ('paf', 1), ('paf', 2)], [1, 1], [38, 19]),
# Different feature map sizes [num_paf_layers, num_cmap_layers]
([('cmap', 1), ('cmap', 2), ('cmap', 3)], [1, 1], [20, 20]),
([('cmap', 1), ('cmap', 2), ('cmap', 3)], [1, 1], [0, 20]),
]
def make_np_predictions(output_sequence):
"""Calculate the predictions in numpy.
Args:
output_sequence (list): specifies the feature map type for each prediction.
ex. [('cmap', 1), ('cmap', 2), ('paf', 1), ('paf', 2)] for 2 stage model
where 'cmap' is confidence map (or) heatmap and,
'paf' is part affinity fields.
"""
np_pred = []
for output in output_sequence:
if output[0] == 'cmap':
np_pred.append(
np.random.uniform(size=tuple(CMAP_SHAPE)).astype('float32'))
elif output[0] == 'paf':
np_pred.append(
np.random.uniform(size=tuple(PAF_SHAPE)).astype('float32'))
else:
raise ValueError("output_sequence must be in ['cmap', 'paf']")
return np_pred
def make_np_masks(mask_configuration, feat_size):
"""Calculate the masks in numpy.
Args:
mask_configuration (list): list of int specifying which output head to mask
[paf_mask_val, cmap_mask_val]
feat_size (list): list of int specifying number of channels in each output
heads [num_paf_features, num_cmap_features]
"""
np_mask = np.ones(shape=tuple(GT_SHAPE)).astype('float32')
np_mask[:, :, :, :feat_size[0]] = float(mask_configuration[0])
np_mask[:, :, :, feat_size[1]:] = float(mask_configuration[1])
return np_mask
def calculate_np_loss(np_cmap_gt, np_paf_gt, np_preds, np_paf_mask,
np_cmap_mask, output_sequence):
"""Calculate the expected losses in numpy.
Args:
np_cmap_gt (np.ndarray): ground truth cmap (NHWC).
np_paf_gt (np.ndarray): ground truth pafmap (NHWC).
np_preds (list): list of predictions of each head and stage (np.ndarray)
np_paf_mask (np.ndarray): mask for pafmap (NHWC).
np_cmap_mask (np.ndarray): mask for cmap (NHWC).
output_sequence (list): specifies the feature map type for each prediction.
ex. [('cmap', 1), ('cmap', 2), ('paf', 1), ('paf', 2)] for 2 stage model
where 'cmap' is confidence map (or) heatmap and,
'paf' is part affinity fields.
"""
losses = []
for idx, pred in enumerate(np_preds):
if output_sequence[idx][0] == 'cmap':
loss = np.sum(np.square(((pred - np_cmap_gt) * np_cmap_mask))) / 2
if output_sequence[idx][0] == 'paf':
loss = np.sum(np.square(((pred - np_paf_gt) * np_paf_mask))) / 2
losses.append(loss)
return losses
def assert_equal_losses(losses, expected_losses):
"""Assert if the losses calculated match the expected losses.
Args:
losses (list): list of tensors containing loss for each stage/prediction
expected_losses (list): list of np.ndarray containing expected loss for
each stage/prediction.
"""
assert len(losses) == len(expected_losses)
for idx in range(len(losses)):
np.testing.assert_almost_equal(np.mean(losses[idx]),
np.mean(expected_losses[idx]),
decimal=2)
@pytest.mark.parametrize("output_sequence, mask_configuration, feat_size",
test_inputs)
def test_bpnet_loss(output_sequence, mask_configuration, feat_size):
"""Function to test BpNet loss.
Args:
output_sequence (list): specifies the feature map type for each prediction.
ex. [('cmap', 1), ('cmap', 2), ('paf', 1), ('paf', 2)] for 2 stage model
where 'cmap' is confidence map (or) heatmap and,
'paf' is part affinity fields.
mask_configuration (list): list of int specifying which output head to mask
[paf_mask_val, cmap_mask_val]
feat_size (list): list of int specifying number of channels in each output
heads [num_paf_features, num_cmap_features]
"""
# Update params based on test case
GT_SHAPE[3] = np.sum(feat_size)
PAF_SHAPE[3] = feat_size[0]
CMAP_SHAPE[3] = feat_size[1]
config = Config(feat_size[0], feat_size[1])
# Generate the groundtruth and prediction.
np_gt = np.random.uniform(size=tuple(GT_SHAPE)).astype('float32')
np_preds = make_np_predictions(output_sequence)
np_mask = make_np_masks(mask_configuration, feat_size)
np_paf_gt = np_gt[:, :, :, :feat_size[0]]
np_cmap_gt = np_gt[:, :, :, feat_size[0]:]
np_paf_mask = np_mask[:, :, :, :feat_size[0]]
np_cmap_mask = np_mask[:, :, :, feat_size[0]:]
expected_losses = calculate_np_loss(np_cmap_gt, np_paf_gt, np_preds,
np_paf_mask, np_cmap_mask,
output_sequence)
# Build loss
losses_tensor = BpNetLoss()(K.constant(np_gt),
[K.constant(np_pred) for np_pred in np_preds],
K.constant(np_mask), output_sequence, config.gt_slice_index)
with tf.Session() as sess:
losses = sess.run(losses_tensor)
assert_equal_losses(losses, expected_losses)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/losses/test_bpnet_loss.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Loss Functions used by BpNet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nvidia_tao_tf1.blocks.losses.loss import Loss
import nvidia_tao_tf1.core as tao_core
import tensorflow as tf
class BpNetLoss(Loss):
"""Loss Class for BpNet."""
@tao_core.coreobject.save_args
def __init__(self,
focal_loss_params=None,
**kwargs):
"""__init__ method.
Args:
use_focal_loss (bool): Enable focal L2 loss
"""
super(BpNetLoss, self).__init__(**kwargs)
if not focal_loss_params:
self._use_focal_loss = False
else:
self._use_focal_loss = focal_loss_params["use_focal_loss"]
self._focal_loss_params = focal_loss_params
def __call__(self,
labels,
predictions,
masks,
output_sequence,
gt_slice_index):
"""__call__ method.
Calculate the loss.
Args:
labels (tf.Tensor): labels.
predictions (tf.Tensor): predictions.
masks (tf.Tensor): regions over which loss is ignored.
output_sequence (list): specifies the feature map type for each prediction.
ex. [('cmap', 1), ('cmap', 2), ('paf', 1), ('paf', 2)] for 2 stage model
where 'cmap' is confidence map (or) heatmap and,
'paf' is part affinity fields.
config (BpNetConfig): contains meta data
Returns:
losses (list): list of tensors containing loss for each stage/prediction
"""
paf_end_idx = gt_slice_index['paf'][1]
paf_start_idx = gt_slice_index['paf'][0]
cmap_end_idx = gt_slice_index['heatmap_with_background'][1]
cmap_start_idx = gt_slice_index['heatmap_with_background'][0]
# split the mask into paf_mask and heat_mask
paf_mask = masks[:, :, :, paf_start_idx:paf_end_idx]
cmap_mask = masks[:, :, :, cmap_start_idx:cmap_end_idx]
# split the label into paf_label and heat_label
paf_labels = labels[:, :, :, paf_start_idx:paf_end_idx]
cmap_labels = labels[:, :, :, cmap_start_idx:cmap_end_idx]
assert len(output_sequence) == len(predictions)
if self._use_focal_loss:
losses = self.focal_loss(
predictions,
cmap_labels,
paf_labels,
cmap_mask,
paf_mask,
output_sequence
)
else:
losses = self.l2_loss(
predictions,
cmap_labels,
paf_labels,
cmap_mask,
paf_mask,
output_sequence
)
return losses
def l2_loss(self,
predictions,
cmap_labels,
paf_labels,
cmap_mask,
paf_mask,
output_sequence):
"""Function to compute l2 loss.
Args:
predictions (tf.Tensor): model predictions.
cmap_labels (tf.Tensor): heatmap ground truth labels.
paf_labels (tf.Tensor): part affinity field ground truth labels.
cmap_mask (tf.Tensor): heatmap regions over which loss is ignored.
paf_mask (tf.Tensor): paf regions over which loss is ignored.
output_sequence (list): specifies the feature map type for each prediction.
ex. [('cmap', 1), ('cmap', 2), ('paf', 1), ('paf', 2)] for 2 stage model
where 'cmap' is confidence map (or) heatmap and,
'paf' is part affinity fields.
Returns:
losses (list): list of tensors containing loss for each stage/prediction
"""
losses = []
for idx, pred in enumerate(predictions):
if output_sequence[idx][0] == 'cmap':
loss = tf.nn.l2_loss((pred - cmap_labels) * cmap_mask)
tf.summary.scalar(name='cmap_loss_stage{}'.format(
output_sequence[idx][1]),
tensor=loss)
elif output_sequence[idx][0] == 'paf':
loss = tf.nn.l2_loss((pred - paf_labels) * paf_mask)
tf.summary.scalar(name='paf_loss_stage{}'.format(
output_sequence[idx][1]),
tensor=loss)
else:
raise ValueError("output_sequence must be in ['cmap', 'paf']")
losses.append(loss)
return losses
def focal_loss(self,
predictions,
cmap_labels,
paf_labels,
cmap_mask,
paf_mask,
output_sequence):
"""Function to focal l2 loss.
Args:
predictions (tf.Tensor): model predictions.
cmap_labels (tf.Tensor): heatmap ground truth labels.
paf_labels (tf.Tensor): part affinity field ground truth labels.
cmap_mask (tf.Tensor): heatmap regions over which loss is ignored.
paf_mask (tf.Tensor): paf regions over which loss is ignored.
output_sequence (list): specifies the feature map type for each prediction.
ex. [('cmap', 1), ('cmap', 2), ('paf', 1), ('paf', 2)] for 2 stage model
where 'cmap' is confidence map (or) heatmap and,
'paf' is part affinity fields.
Returns:
losses (list): list of tensors containing loss for each stage/prediction
"""
cmap_bkg_mask = tf.greater(cmap_labels, self._focal_loss_params['bkg_thresh'])
paf_bkg_mask = tf.greater(paf_labels, self._focal_loss_params['bkg_thresh'])
losses = []
for idx, pred in enumerate(predictions):
if output_sequence[idx][0] == 'cmap':
focal_scaling = self.compute_focal_loss_factor(
cmap_bkg_mask, cmap_labels, pred)
loss = tf.nn.l2_loss((pred - cmap_labels) * cmap_mask * focal_scaling)
tf.summary.scalar(name='cmap_loss_stage{}'.format(
output_sequence[idx][1]),
tensor=loss)
elif output_sequence[idx][0] == 'paf':
focal_scaling = self.compute_focal_loss_factor(
paf_bkg_mask, paf_labels, pred)
loss = tf.nn.l2_loss((pred - paf_labels) * paf_mask * focal_scaling)
tf.summary.scalar(name='paf_loss_stage{}'.format(
output_sequence[idx][1]),
tensor=loss)
else:
raise ValueError("output_sequence must be in ['cmap', 'paf']")
losses.append(loss)
return losses
def compute_focal_loss_factor(self, bkg_mask, label, prediction):
"""Function to compute focal loss factor.
Args:
bkg_mask (tf.Tensor): binary mask with background pixels as `False`.
prediction (tf.Tensor): model prediction.
label (tf.Tensor): ground truth label.
Returns:
scale (tf.Tensor): scale factor to use for loss
"""
adjusted_scores = tf.where(
bkg_mask,
prediction - self._focal_loss_params['alpha'],
1 - prediction - self._focal_loss_params['beta']
)
scale = tf.pow(tf.abs(1 - adjusted_scores), self._focal_loss_params['gamma'])
return scale
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/losses/bpnet_loss.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""A launcher script for DriveIX BpNet tasks inside a runtime container."""
from maglev_sdk.docker_container.entrypoint import main
if __name__ == '__main__':
main('bpnet', 'nvidia_tao_tf1/cv/bpnet/scripts')
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/docker/bpnet.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converts a COCO pose estimation dataset to TFRecords."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import numpy as np
from six.moves import range as xrange
import tensorflow as tf
import tqdm
from nvidia_tao_tf1.core.dataloader.tfrecord.converter_lib import _bytes_feature
from nvidia_tao_tf1.core.dataloader.tfrecord.converter_lib import _int64_feature
from nvidia_tao_tf1.cv.bpnet.dataio.coco_dataset import COCODataset
from nvidia_tao_tf1.cv.bpnet.dataio.dataset_converter_lib import DatasetConverter
logger = logging.getLogger(__name__)
class COCOConverter(DatasetConverter):
"""Converts a COCO dataset to TFRecords."""
def __init__(self,
dataset_spec,
root_directory_path,
num_partitions,
num_shards,
output_filename,
mode,
generate_masks=False,
check_if_images_and_masks_exist=False):
"""Initialize the converter.
Keypoint Ordering
=================
"keypoints": {
0: "nose",
1: "left_eye",
2: "right_eye",
3: "left_ear",
4: "right_ear",
5: "left_shoulder",
6: "right_shoulder",
7: "left_elbow",
8: "right_elbow",
9: "left_wrist",
10: "right_wrist",
11: "left_hip",
12: "right_hip",
13: "left_knee",
14: "right_knee",
15: "left_ankle",
16: "right_ankle"
}
====================
Args:
root_directory_path (string): Dataset root directory path.
dataset_spec (dict): Specifications and parameters to be used for
the dataset export
num_partitions (int): Number of partitions (folds).
num_shards (int): Number of shards.
output_filename (str): Path for the output file.
mode (string): train/test.
generate_masks (bool): Generate and save masks of regions with unlabeled people
check_if_images_and_masks_exist (bool): check if the required files
exist in the data location.
"""
super(COCOConverter, self).__init__(
root_data_directory_path=root_directory_path,
num_partitions=num_partitions,
num_shards=num_shards,
output_filename=output_filename)
# Create an instance of COCODataset class
self.dataset = COCODataset(dataset_spec, parse_annotations=True)
self.dataset_name = self.dataset.dataset_name
# Get the person category config
self.num_joints = self.dataset.pose_config["num_joints"]
self.duplicate_data_with_each_person_as_center = \
dataset_spec["duplicate_data_with_each_person_as_center"]
self._check_if_images_and_masks_exist = check_if_images_and_masks_exist
if mode == 'train':
self.images_root_dir_path = self.dataset.train_images_root_dir_path
self.mask_root_dir_path = self.dataset.train_masks_root_dir_path
self.data = self.dataset.train_data
else:
self.images_root_dir_path = self.dataset.test_images_root_dir_path
self.mask_root_dir_path = self.dataset.test_masks_root_dir_path
self.data = self.dataset.test_data
# Generate and save binary masks
if generate_masks:
mask_root_dir = os.path.join(self.root_dir, self.mask_root_dir_path)
self.dataset.process_segmentation_masks(self.data, mask_root_dir)
# Reformat data so it can be converted to tfrecords
# by the base class `DatasetConverter`
self.reformatted_data = self._reformat_dataset()
# make output directory if not already existing
if not os.path.exists(os.path.dirname(output_filename)):
os.makedirs(os.path.dirname(output_filename))
def _reformat_dataset(self):
"""Reformat the dataset as required before writing into tfrecords.
This function compiles the necessary keys that would be stored in the tfrecords.
It also provides an option to duplicate the pose data with multiple people
with each data point starting with one person of interest (so that later
during augmentation, the crop can be centered around this person)
"""
reformatted_data = []
for data_point in tqdm.tqdm(self.data):
for _, main_person in enumerate(data_point['main_persons']):
reformatted_data_point = {}
reformatted_data_point['image_id'] = data_point['image_id']
reformatted_data_point['image_meta'] = data_point['image_meta']
image_path = os.path.join(
self.images_root_dir_path, data_point['image_meta']['file_name'])
mask_path = os.path.join(
self.mask_root_dir_path, data_point['image_meta']['file_name'])
reformatted_data_point['image_path'] = image_path
reformatted_data_point['mask_path'] = mask_path
# This path check takes a lot of time, so kept it optional if we need to verify.
if self._check_if_images_and_masks_exist:
if not os.path.exists(os.path.join(self.root_dir, image_path)):
logger.warning(
"Skipping data point. Image doesn't exist: {}".format(image_path))
break
if not os.path.exists(os.path.join(self.root_dir, mask_path)):
logger.warning(
"Skipping data point. Mask doesn't exist: {}".format(mask_path))
break
num_other_people = 0
joints = [main_person["joint"]]
scales = [main_person["scale_provided"]]
centers = [main_person["objpos"]]
iscrowd_flags = [main_person["iscrowd"]]
segmentation_masks = [main_person["segmentation"]]
num_keypoints = [main_person["num_keypoints"]]
for oidx, other_person in enumerate(data_point['all_persons']):
if main_person is other_person:
person_idx = oidx
continue
if other_person["num_keypoints"] == 0:
continue
joints.append(other_person["joint"])
scales.append(other_person["scale_provided"])
centers.append(other_person["objpos"])
iscrowd_flags.append(other_person["iscrowd"])
segmentation_masks.append(other_person["segmentation"])
num_keypoints.append(other_person["num_keypoints"])
num_other_people += 1
reformatted_data_point['person_idx'] = person_idx
reformatted_data_point['num_other_people'] = num_other_people
reformatted_data_point['joints'] = np.asarray(joints, dtype=np.float64)
reformatted_data_point['scales'] = np.asarray(scales, dtype=np.float64)
reformatted_data_point['centers'] = np.asarray(centers, dtype=np.float64)
reformatted_data_point['iscrowd_flags'] = iscrowd_flags
reformatted_data_point['segmentation_masks'] = segmentation_masks
reformatted_data_point['num_keypoints'] = num_keypoints
reformatted_data.append(reformatted_data_point)
if not self.duplicate_data_with_each_person_as_center:
break
return reformatted_data
def _partition(self):
"""Partition dataset to self.output_partitions partitions based on sequences.
Returns:
partitions (list): A list of lists of data points, one list per partition.
"""
if self.output_partitions > 1:
partitions = [[] for _ in xrange(self.output_partitions)]
# TODO: Sort the dataset based on imageid?
for counter, data_point in enumerate(self.reformatted_data):
partition_idx = counter % self.output_partitions
partitions[partition_idx].append(data_point)
else:
partitions = [self.reformatted_data]
return partitions
def _create_example_proto(self, data_point):
"""Generate the example proto for this frame.
Args:
data_point (dict): Dictionary containing the details about the data sample.
Returns:
example (tf.train.Example): An Example containing all labels for the frame.
"""
# Create proto for the training example. Populate with frame attributes.
example = self._example_proto(data_point)
if example is not None:
# Add labels.
self._add_person_labels(example, data_point)
return example
def _add_person_labels(self, example, data_point):
"""Add joint labels of all persons in the image to the Example protobuf.
Args:
data_point (dict): Dictionary containing the details about the data sample.
example (tf.train.Example): An Example containing all labels for the frame.
"""
joints = data_point['joints']
scales = data_point['scales']
centers = data_point['centers']
person_idx = data_point['person_idx']
num_other_people = data_point['num_other_people']
f = example.features.feature
f['person/joints'].MergeFrom(_bytes_feature(joints.tostring()))
f['person/scales'].MergeFrom(_bytes_feature(scales.tostring()))
f['person/centers'].MergeFrom(_bytes_feature(centers.tostring()))
f['person/person_idx'].MergeFrom(_int64_feature(person_idx))
f['person/num_other_people'].MergeFrom(_int64_feature(num_other_people))
def _example_proto(self, data_point):
"""Generate a base Example protobuf to which COCO-specific features are added.
Args:
data_point (dict): Dictionary containing the details about the data sample.
"""
image_id = data_point['image_id']
width = data_point['image_meta']['width']
height = data_point['image_meta']['height']
image_path = data_point['image_path']
mask_path = data_point['mask_path']
example = tf.train.Example(features=tf.train.Features(feature={
'frame/image_id': _int64_feature(image_id),
'frame/height': _int64_feature(height),
'frame/width': _int64_feature(width),
'frame/image_path': _bytes_feature(str.encode(image_path)),
'frame/mask_path': _bytes_feature(str.encode(mask_path)),
'dataset': _bytes_feature(str.encode(self.dataset_name))
}))
return example
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/dataio/coco_converter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command line interface for converting pose datasets to TFRecords."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import logging
from nvidia_tao_tf1.cv.bpnet.dataio.build_converter import build_converter
def main(args=None):
"""
Convert an object detection dataset to TFRecords.
Args:
args(list): list of arguments to be parsed if called from another module.
"""
parser = argparse.ArgumentParser(prog='dataset_converter',
description='Convert pose datasets to TFRecords')
parser.add_argument(
'-d',
'--dataset_spec',
required=True,
help='Path to the dataset spec containing config for exporting .tfrecords.')
parser.add_argument(
'-o',
'--output_filename',
required=True,
help='Output file name.')
parser.add_argument(
'-m',
'--mode',
required=False,
default='train',
help='Converter mode: train/test.')
parser.add_argument(
'-p',
'--num_partitions',
required=False,
default=1,
help='Number of partitions (folds).')
parser.add_argument(
'-s',
'--num_shards',
required=False,
default=0,
help='Number of shards.')
parser.add_argument(
'--generate_masks',
action='store_true',
help='Generate and save masks of regions with unlabeled people - used for training.')
parser.add_argument(
'--check_files',
action='store_true',
help='Check if the files including images and masks exist in the given root data dir.')
args = parser.parse_args(args)
# Load config file
with open(args.dataset_spec, "r") as f:
dataset_spec_json = json.load(f)
converter = build_converter(
dataset_spec_json,
args.output_filename,
mode=args.mode,
num_partitions=args.num_partitions,
num_shards=args.num_shards,
generate_masks=args.generate_masks,
check_if_images_and_masks_exist=args.check_files)
converter.convert()
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/dataio/dataset_converter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License."""
"""BpNet DataIO definitions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/dataio/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""COCO pose estimation dataset."""
import json
import logging
import multiprocessing
import os
import cv2
import matplotlib
# Use Agg to avoid installing an additional backend package like python-tk.
# This call (mpl.use('Agg') needs to happen before importing pyplot.
matplotlib.use('Agg') # noqa # pylint-disable: To stop static test complains.
import numpy as np
import pandas
from pycocotools.coco import COCO
# TODO: Use custom script for COCO eval to support different combination
from pycocotools.cocoeval import COCOeval
from scipy.spatial.distance import cdist
import tqdm
from nvidia_tao_tf1.cv.bpnet.inferencer.bpnet_inferencer import BpNetInferencer
from nvidia_tao_tf1.cv.bpnet.utils import dataio_utils
logger = logging.getLogger(__name__)
class COCODataset(object):
"""COCO Dataset helper class."""
def __init__(self,
dataset_spec,
parse_annotations=False):
"""Init function.
Args:
dataset_spec (dict): Specifications and parameters to be used for
the dataset export
"""
self.dataset_name = dataset_spec["dataset"]
self.pose_config = self._get_category(dataset_spec, 'person')
self.num_joints = self.pose_config["num_joints"]
self.parts = self.pose_config["keypoints"]
self.parts2idx = dict(zip(self.parts, range(self.num_joints)))
assert self.num_joints == len(self.parts), "Assertion error: num_joints and \
the number of keypoints are not matching! Please check dataset_spec config"
self.root_dir = dataset_spec['root_directory_path']
self.train_anno_path = os.path.join(
self.root_dir, dataset_spec["train_data"]['annotation_root_dir_path'])
self.test_anno_path = os.path.join(
self.root_dir, dataset_spec["test_data"]['annotation_root_dir_path'])
self.train_images_root_dir_path = dataset_spec["train_data"]["images_root_dir_path"]
self.train_masks_root_dir_path = dataset_spec["train_data"]["mask_root_dir_path"]
self.test_images_root_dir_path = dataset_spec["test_data"]["images_root_dir_path"]
self.test_masks_root_dir_path = dataset_spec["test_data"]["mask_root_dir_path"]
# Data filtering parameters
self.min_acceptable_kpts = dataset_spec["data_filtering_params"]["min_acceptable_kpts"]
self.min_acceptable_width = dataset_spec["data_filtering_params"]["min_acceptable_width"]
self.min_acceptable_height = dataset_spec["data_filtering_params"]["min_acceptable_height"]
self.min_acceptable_area = self.min_acceptable_width * self.min_acceptable_height
self.min_acceptable_interperson_dist_ratio = \
dataset_spec["data_filtering_params"]["min_acceptable_interperson_dist_ratio"]
# Load train and test data
self.train_data, self.train_images, self.train_coco, self.train_image_ids = \
self.load_dataset(self.train_anno_path, parse_annotations)
self.test_data, self.test_images, self.test_coco, self.test_image_ids = \
self.load_dataset(self.test_anno_path, parse_annotations)
def load_dataset(self, annotation_path, parse_annotations):
"""Function to load the dataset.
Args:
annotation_path (str): Path to the annotations json
parse_annotations (bool): If enabled, it would parse
through the annotations and extract individual annos.
Returns:
data (dict): Dictionary containing parsed image and anno info
images (dict): Dictionary containing image info
coco (COCO): Object of type COCO initialized with the anno file
image_ids (list): List of image ids in the annotations
"""
coco = COCO(annotation_path)
image_ids = list(coco.imgs.keys())
images = []
for _, image_id in enumerate(image_ids):
data_point = {}
data_point['image_id'] = image_id
data_point['image_meta'] = coco.imgs[image_id]
data_point['full_image_path'] = data_point['image_meta']['file_name']
# append only image related information
images.append(data_point)
data = []
# Parse annotations of all the images in coco dataset
if parse_annotations:
for _, image_id in enumerate(tqdm.tqdm(image_ids)):
data_point = {}
data_point['image_id'] = image_id
data_point['image_meta'] = coco.imgs[image_id]
annotation_ids = coco.getAnnIds(imgIds=image_id)
image_annotation = coco.loadAnns(annotation_ids)
all_persons, main_persons = self._parse_annotation(image_annotation)
# If no keypoint labeling in this image, skip it
if not len(all_persons):
continue
data_point['all_persons'] = all_persons
data_point['main_persons'] = main_persons
data.append(data_point)
return data, images, coco, image_ids
@staticmethod
def _get_category(data, cat_name):
"""Get the configuration corresponding to the given category name.
TODO: Move to utils
Args:
cat_name (str): category name
Return:
(dict): meta information about the category
"""
return [c for c in data['categories'] if c['name'] == cat_name][0]
@staticmethod
def get_image_name(coco, image_id):
"""Get the image path.
Args:
coco (COCO): Object of type COCO
image_id (int): id of the image to retrieve filepath
Returns:
(str): filepath
"""
return coco.imgs[image_id]['file_name']
def _parse_annotation(self, img_anns):
"""Parse the given annotations in the image and compile the info.
Args:
img_anns (list): list of annotations associated with the current image.
Returns:
all_persons (list): list consisting of all the annotated people in the image
main_persons (list): filtered list of annotated people in the image
based on certain criteria.
"""
num_people = len(img_anns)
all_persons = []
for p in range(num_people):
pers = dict()
person_center = [img_anns[p]["bbox"][0] + img_anns[p]["bbox"][2] / 2,
img_anns[p]["bbox"][1] + img_anns[p]["bbox"][3] / 2]
pers["objpos"] = person_center
pers["bbox"] = img_anns[p]["bbox"]
pers["iscrowd"] = img_anns[p]["iscrowd"]
pers["segment_area"] = img_anns[p]["area"]
pers["segmentation"] = img_anns[p]["segmentation"]
pers["num_keypoints"] = img_anns[p]["num_keypoints"]
kpts = img_anns[p]["keypoints"]
pers["joint"] = np.zeros((self.num_joints, 3))
for part in range(self.num_joints):
# The convention for visbility flags used in COCO is as follows:
# 0: not labeled (in which case x=y=0)
# 1: labeled but not visible,
# 2: labeled and visible.
pers["joint"][part, 0] = kpts[part * 3]
pers["joint"][part, 1] = kpts[part * 3 + 1]
pers["joint"][part, 2] = kpts[part * 3 + 2]
pers["scale_provided"] = img_anns[p]["bbox"][3]
all_persons.append(pers)
main_persons = []
prev_center = []
# Filter out the "main people" based on following creteria
# 1. Number of keypoints less than `min_acceptable_kpts`.
# 2. Pixel Area is less than `min_acceptable_area`
# This is used later during training to augment the data around the main persons
for pers in all_persons:
# Filter the persons with few visible/annotated keypoints parts or
# little occupied area (relating to the scale of the person).
if pers["num_keypoints"] < self.min_acceptable_kpts:
continue
if pers["segment_area"] < self.min_acceptable_area:
continue
person_center = pers["objpos"]
# Filter the persons very close to the existing list of persons in the
# `main_persons` already.
flag = 0
for pc in prev_center:
a = np.expand_dims(pc[:2], axis=0)
b = np.expand_dims(person_center, axis=0)
dist = cdist(a, b)[0]
if dist < pc[2] * self.min_acceptable_interperson_dist_ratio:
flag = 1
continue
if flag == 1:
continue
main_persons.append(pers)
prev_center.append(
np.append(person_center, max(img_anns[p]["bbox"][2], img_anns[p]["bbox"][3]))
)
return all_persons, main_persons
def process_segmentation_masks(self, data, mask_root_dir):
"""Generate and save binary mask to disk.
Args:
data (list): List of groundtruth annotations
with corresponding meta data.
mask_root_dir (str): Root directory path to save the
generated binary masks.
"""
pool = multiprocessing.Pool()
total_samples = len(data)
for idx, _ in enumerate(
pool.imap(
COCODataset._pool_process_segmentation_mask,
[(data_point, mask_root_dir) for data_point in data])):
if idx % 1000 == 0:
logger.info('Mask Generation: {}/{}'.format(idx, total_samples))
@staticmethod
def _pool_process_segmentation_mask(args):
"""Wrapper for process_single_segmentation_mask for multiprocessing.
Args:
args (list): List of all args that needs to be forwarded
to process_single_segmentation_mask fn.
"""
COCODataset.process_single_segmentation_mask(*args)
@staticmethod
def process_single_segmentation_mask(data_point, mask_root_dir):
"""Generate and save binary mask to disk.
Args:
data_point (dict): Dictionary containing the groundtruth
annotation for one image with corresponding meta data.
mask_root_dir (str): Root directory path to save the
generated binary masks.
"""
image_meta = data_point['image_meta']
height = image_meta['height']
width = image_meta['width']
annotations = data_point['all_persons']
mask_out, _, _, _ = COCODataset.get_segmentation_masks(
height, width, annotations)
mask_out = mask_out.astype(np.uint8)
mask_out *= 255
# TODO: How to handle IX dataset? Maybe add keys for replacement
# of path? Provide mask path as pattern?
filename = image_meta['file_name']
mask_path = os.path.join(mask_root_dir, filename)
# Create directory if it doesn't already exist
if not os.path.exists(os.path.dirname(mask_path)):
os.makedirs(os.path.dirname(mask_path), exist_ok=True)
cv2.imwrite(mask_path, mask_out)
@staticmethod
def get_segmentation_masks(height, width, annotations):
"""Generate and save binary mask to disk.
Args:
height (int): Height of the image.
width (int): Width of the image.
annotations (dict): Dictionary containing the groundtruth
annotation for one image.
Returns:
mask_out (np.ndarray): Binary mask of regions to mask
out during training.
crowd_region (np.ndarray): Binary mask of regions labeled
as `crowd`.
unlabeled_region (np.ndarray): Binary mask of regions without
keypoint labeling for persons.
labeled_region (np.ndarray): Binary mask of regions with
keypoint labeling for persons.
"""
labeled_region = np.zeros((height, width), dtype=np.uint8)
unlabeled_region = np.zeros((height, width), dtype=np.uint8)
crowd_region = np.zeros((height, width), dtype=np.uint8)
mask_out = np.zeros((height, width), dtype=np.uint8)
# Iterate through the annaotations
for annotation in annotations:
# Convert the segemtation to RLE to binary mask
binary_mask = dataio_utils.annotation_to_mask(
annotation['segmentation'], height, width)
if annotation["iscrowd"]:
crowd_region = np.bitwise_or(crowd_region, binary_mask)
elif annotation["num_keypoints"] <= 0:
unlabeled_region = np.bitwise_or(unlabeled_region, binary_mask)
else:
labeled_region = np.bitwise_or(labeled_region, binary_mask)
# Remove the overlap region from `crowd_region` to ensure the
# labeled person is not masked out.
# TODO: Should we be doing the same for `unlabeled_region_mask`?
overlap_region = np.bitwise_and(labeled_region, crowd_region)
mask_out = crowd_region - overlap_region
# Union of crowd region and unlabeled region should be masked out
mask_out = np.bitwise_or(mask_out, unlabeled_region)
# Invert the mask to ensure valid regions are 1s.
mask_out = np.logical_not(mask_out)
return mask_out, crowd_region, unlabeled_region, labeled_region
# TODO: Move to utils?
@staticmethod
def convert_kpts_format(kpts, target_parts2idx, source_parts2idx):
"""Convert keypoints from source to target format.
Args:
kpts (np.ndarray): source keypoints that needs to be converted
to target ordering.
target_parts2idx (dict): Dict with mapping from target keypoint
names to keypoint index
source_parts2idx (dict): Dict with mapping from source keypoint
names to keypoint index
Returns:
converted_kpts (np.ndarray): converted keypoints
"""
converted_kpts = np.zeros(
(kpts.shape[0], len(target_parts2idx.keys()), kpts.shape[-1]), dtype=np.float32)
for part in source_parts2idx:
source_part_id = source_parts2idx[part]
if part in target_parts2idx:
target_part_id = target_parts2idx[part]
converted_kpts[:, target_part_id, :] = kpts[:, source_part_id, :]
return converted_kpts
def dump_detection_in_coco_format(self, results, detections_parts2idx, results_dir):
"""Function to dump results as expected by COCO AP evaluation.
Args:
results (dict): Keypoint results in BpNetInferencer format.
detections_parts2idx (dict): Meta data about the pose configuration
results_dir (dict): Directory to save the formatted results.
Returns:
output_path (str): Path to the final saved results json.
"""
detections = []
for result_dict in results['images']:
keypoints_list = result_dict['keypoints']
scores = result_dict['scores']
if len(keypoints_list) == 0:
continue
converted_kpts_list = self.convert_kpts_format(
np.array(keypoints_list), self.parts2idx, detections_parts2idx).tolist()
for keypoints, score in zip(converted_kpts_list, scores):
format_keypoint_list = []
for x, y in keypoints:
# Append visibility index (0 if kpt was not detected)
for fkpts in [int(x), int(y), 1 if x > 0 or y > 0 else 0]:
format_keypoint_list.append(fkpts)
detections.append({
"image_id": result_dict['image_id'],
"category_id": 1,
"keypoints": format_keypoint_list,
# TODO: derive score from the model
"score": score,
})
# Dump the formatted detection dict for COCO evaluation
output_path = os.path.join(results_dir, 'detections_coco_format.json')
if os.path.exists(output_path):
logger.warning(
"File already exists: {}. Overwritting into same file.".format(output_path))
with open(output_path, 'w') as f:
json.dump(detections, f, indent=2)
return output_path
@staticmethod
def dump_coco_results_summary(results, results_dir):
"""Function to dump evaluation results summary into a csv.
Args:
results (COCOeval): evaluation results
results_dir (str): Directory to save the formatted results.
"""
columns = [
'metric', 'IoU', 'area', 'maxDets', 'score'
]
stats = results.stats
num_rows = len(stats)
df = pandas.DataFrame(columns=columns, index=np.arange(0, num_rows))
df.loc[0:4, 'metric'] = 'AP'
df.loc[5:10, 'metric'] = 'AR'
df.loc[[0, 3, 4, 5, 8, 9], 'IoU'] = '0.50:0.95'
df.loc[[1, 6], 'IoU'] = '0.50'
df.loc[[2, 7], 'IoU'] = '0.75'
df.loc[[0, 1, 2, 5, 6, 7], 'area'] = 'all'
df.loc[[3, 8], 'area'] = 'medium'
df.loc[[4, 9], 'area'] = 'large'
df.loc[:, 'maxDets'] = 20
for i in range(num_rows):
df.loc[i, 'score'] = stats[i]
# dump as csv
results_file = os.path.join(results_dir, 'results.csv')
df.to_csv(results_file)
def infer(self,
model_path,
inference_spec,
experiment_spec,
results_dir,
key=None,
visualize=False):
"""Run inference on the validation set and save results.
Args:
model_path (str): Path to model.
inference_spec (dict): Inference specification.
experiment_spec (dict): Training experiment specification.
results_dir (str): Directory to save results.
visualize (bool): Option to enable visualization
Returns:
output_path (str): Path to the formatted detections json
"""
# init BpNet Inferencer
inferencer = BpNetInferencer(
model_path,
inference_spec,
experiment_spec,
key=key
)
# Run inference
data = dict(images=self.test_images)
image_root_path = os.path.join(self.root_dir, self.test_images_root_dir_path)
results = inferencer.run(
data,
results_dir=results_dir,
image_root_path=image_root_path,
visualize=visualize)
# Also save in format expected by COCO eval
output_path = self.dump_detection_in_coco_format(
results,
inferencer.bpnet_pose_config.parts2idx,
results_dir)
return output_path
@staticmethod
def evaluate(coco_gt, detections_path, results_dir):
"""Run evaluation on the detections and save results.
Args:
coco_gt (COCO): COCO object initialized with annotations
of the test/val set.
detections_path (str): Path to the formatted detections json
results_dir (str): Directory to save results.
Returns:
results (COCOeval): evaluation results
"""
annotation_type = 'keypoints'
print('Running test for {} results.'.format(annotation_type))
coco_dt = coco_gt.loadRes(detections_path)
results = COCOeval(coco_gt, coco_dt, annotation_type)
results.evaluate()
results.accumulate()
results.summarize()
# Save results
COCODataset.dump_coco_results_summary(results, results_dir)
return results
def visualize(self):
"""Visualize annotations."""
pass
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/dataio/coco_dataset.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build a dataset converter object based on dataset spec config."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nvidia_tao_tf1.cv.bpnet.dataio.coco_converter import COCOConverter
def build_converter(
dataset_spec,
output_filename,
mode='train',
num_partitions=1,
num_shards=0,
generate_masks=False,
check_if_images_and_masks_exist=False
):
"""Build a DatasetConverter object.
Build and return an object of desired subclass of DatasetConverter based on
given dataset export configuration.
Args:
dataset_spec (dict): Dataset export configuration spec
output_filename (string): Path for the output file.
mode (string): train/test.
num_partitions (int): Number of folds
num_shards (int): Number of shards
generate_masks (bool): Flag to generate and save masks of regions with unlabeled people
check_if_images_and_masks_exist (bool): check if the required files exist in data location.
Return:
converter (DatasetConverter): An object of desired subclass of DatasetConverter.
"""
# Fetch the dataset configuration object first
# TODO: Handle more datasets.
# dataset_type = dataset_spec["dataset"]
root_directory_path = dataset_spec["root_directory_path"]
converter = COCOConverter(
root_directory_path=root_directory_path,
dataset_spec=dataset_spec,
num_partitions=num_partitions,
num_shards=num_shards,
output_filename=output_filename,
mode=mode,
generate_masks=generate_masks,
check_if_images_and_masks_exist=check_if_images_and_masks_exist)
return converter
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/dataio/build_converter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base Class Implementation to convert a dataset to TFRecords."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import ABCMeta
from abc import abstractmethod
import logging
import random
import six
import tensorflow as tf
import tqdm
from nvidia_tao_tf1.core.dataloader.tfrecord.converter_lib import _shard
from nvidia_tao_tf1.core.dataloader.tfrecord.converter_lib import _shuffle
logger = logging.getLogger(__name__)
@six.add_metaclass(ABCMeta)
class DatasetConverter(object):
"""Base Class Implementation to convert a dataset to TFRecords.
This class needs to be subclassed, and the convert() and
create_example_proto() methods overridden to do the dataset
conversion. Splitting of partitions to shards, shuffling and
writing TFRecords are implemented here, as well as counting
of written targets.
"""
@abstractmethod
def __init__(self, root_data_directory_path, num_partitions, num_shards,
output_filename):
"""Initialize the converter.
Args:
root_data_directory_path (string): Dataset root directory path.
num_partitions (int): Number of partitions (folds).
num_shards (int): Number of shards.
output_filename (str): Path for the output file.
"""
self.root_dir = root_data_directory_path
self.output_partitions = num_partitions
self.output_shards = num_shards
self.output_filename = output_filename
# Set a fixed seed to get a reproducible sequence.
random.seed(42)
def convert(self):
"""Do the dataset conversion."""
# Divide dataset into partitions and shuffle them.
partitions = self._partition()
_shuffle(partitions)
# Shard and write the partitions to tfrecords.
object_count = self._write_partitions(partitions)
# Log how many objects got written in total.
log_str = 'Wrote the following numbers of objects: {}\n'.format(object_count)
logger.info(log_str)
def _write_partitions(self, partitions):
"""Shard and write partitions into tfrecords.
Args:
partitions (list): A list of list of frame IDs.
Returns:
object_count (Counter): The total number of objects per target class.
"""
# Divide partitions into shards.
sharded_partitions = _shard(partitions, self.output_shards)
# Write .tfrecords to disk for each partition and shard.
# Also count the target objects per partition and over the whole dataset.
object_count = 0
for p, partition in enumerate(sharded_partitions):
partition_object_count = 0
for s, shard in enumerate(partition):
shard_object_count = self._write_shard(shard, p, s)
partition_object_count += shard_object_count
# Log the count in this partition and increase total
# object count.
object_count += partition_object_count
return object_count
def _write_shard(self, shard, partition_number, shard_number):
"""Write a single shard into the tfrecords file.
Note that the dataset-specific part is captured in function
create_example_proto() which needs to be overridden for each
specific dataset.
Args:
shard (list): A list of frame IDs for this shard.
partition_number (int): Current partition (fold) index.
shard_number (int): Current shard index.
Returns:
object_count (int): The number of written objects.
"""
logger.info('Writing partition {}, shard {}'.format(partition_number, shard_number))
output = self.output_filename
if self.output_partitions != 0:
output = '{}-fold-{:03d}-of-{:03d}'.format(output, partition_number,
self.output_partitions)
if self.output_shards != 0:
output = '{}-shard-{:05d}-of-{:05d}'.format(output, shard_number, self.output_shards)
object_count = 0
# Store all the data for the shard.
writer = tf.io.TFRecordWriter(output)
for frame_id in tqdm.tqdm(shard):
# Create the Example with all labels for this frame_id.
example = self._create_example_proto(frame_id)
# The example might be skipped e.g. due to missing labels.
if example is not None:
# TODO: add option to sample/filter data
# Serialize the example.
writer.write(example.SerializeToString())
object_count += 1
writer.close()
log_str = 'Wrote the following numbers of objects: {}\n'.format(object_count)
logger.info(log_str)
return object_count
@abstractmethod
def _partition(self):
"""Return dataset partitions."""
pass
@abstractmethod
def _create_example_proto(self, frame_id):
"""Generate the example for this frame."""
pass
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/dataio/dataset_converter_lib.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""A launcher script for DriveIX BpNet tasks inside a runtime container."""
from maglev_sdk.docker_container.entrypoint import main
if __name__ == '__main__':
main('bpnet_dataio', 'nvidia_tao_tf1/cv/bpnet/dataio')
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/dataio/docker/bpnet_dataio.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the COCOConverter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import pytest
from nvidia_tao_tf1.cv.bpnet.dataio.build_converter import build_converter
BPNET_ROOT = os.getenv(
"CI_PROJECT_DIR",
"/workspace/tao-tf1"
)
TEST_DATA_ROOT_PATH = os.path.join(
BPNET_ROOT,
"nvidia_tao_tf1/cv/bpnet"
)
@pytest.mark.parametrize("dataset_spec", [
('coco_spec.json'),
])
def test_coco_converter(dataset_spec):
"""Check that COCOConverter outputs a tfrecord as expected."""
dataset_spec_path = \
os.path.join('nvidia_tao_tf1/cv/bpnet/dataio/dataset_specs', dataset_spec)
# Load config file
with open(dataset_spec_path, "r") as f:
dataset_spec_json = json.load(f)
# Update the paths based on test data
dataset_spec_json["train_data"] = {
"images_root_dir_path": "coco/images",
"mask_root_dir_path": "coco/masks",
"annotation_root_dir_path": "coco/person_keypoints_test.json"
}
dataset_spec_json["test_data"] = {
"images_root_dir_path": "coco/images",
"mask_root_dir_path": "coco/masks",
"annotation_root_dir_path": "coco/person_keypoints_test.json"
}
dataset_spec_json['root_directory_path'] = os.path.join(
TEST_DATA_ROOT_PATH, "dataio/tests/test_data")
converter = build_converter(dataset_spec=dataset_spec_json,
output_filename='./test')
converter.convert()
tf_records_path = './test-fold-000-of-001'
assert os.path.exists(tf_records_path)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/dataio/tests/test_coco_converter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the dataset converter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import pytest
from nvidia_tao_tf1.cv.bpnet.dataio.build_converter import build_converter
from nvidia_tao_tf1.cv.bpnet.dataio.coco_converter import COCOConverter
BPNET_ROOT = os.getenv(
"CI_PROJECT_DIR",
"/workspace/tao-tf1"
)
TEST_DATA_ROOT_PATH = os.path.join(
BPNET_ROOT,
"nvidia_tao_tf1/cv/bpnet"
)
@pytest.mark.parametrize("dataset_spec,converter_type", [
('coco_spec.json', COCOConverter),
])
def test_build_converter(dataset_spec, converter_type):
"""Check that build_converter returns a DatasetConverter of the desired type."""
dataset_spec_path = \
os.path.join('nvidia_tao_tf1/cv/bpnet/dataio/dataset_specs', dataset_spec)
# Load config file
with open(dataset_spec_path, "r") as f:
dataset_spec_json = json.load(f)
# Update the paths based on test data
dataset_spec_json["train_data"] = {
"images_root_dir_path": "coco/images",
"mask_root_dir_path": "coco/masks",
"annotation_root_dir_path": "coco/person_keypoints_test.json"
}
dataset_spec_json["test_data"] = {
"images_root_dir_path": "coco/images",
"mask_root_dir_path": "coco/masks",
"annotation_root_dir_path": "coco/person_keypoints_test.json"
}
dataset_spec_json['root_directory_path'] = os.path.join(
TEST_DATA_ROOT_PATH, "dataio/tests/test_data")
print(f"Test data root path {TEST_DATA_ROOT_PATH}")
converter = build_converter(dataset_spec_json, 'test/coco')
assert isinstance(converter, converter_type)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/dataio/tests/test_dataset_converter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset config class for BpNet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import numpy as np
from nvidia_tao_tf1.cv.bpnet.dataloaders.pose_config import Joints
from nvidia_tao_tf1.cv.bpnet.dataloaders.pose_config import JointVisibility
class DatasetConfig(object):
"""Class to hold all dataset related parameters."""
def __init__(self,
dataset_spec_path,
bpnet_pose_config,
**kwargs):
"""Constructor.
Args:
pose_config (PoseConfig)
dataset_spec (dict): Dataset configuration spec.
"""
self.pose_config = bpnet_pose_config
# Load the dataset config
self.dataset_config = json.load(open(dataset_spec_path))
# Get the person category config
self.person_config = self._get_category(self.dataset_config, 'person')
# Get number of joints and edges in the skeleton and assert correctness
self.num_parts = self.person_config["num_joints"]
self.parts = self.person_config["keypoints"]
self.parts2idx = dict(zip(self.parts, range(self.num_parts)))
assert self.num_parts == len(self.parts)
self.skeleton = self.person_config["skeleton_edge_names"]
self.skeleton2idx = self.person_config["skeleton"]
self.num_connections = len(self.skeleton)
# Check whether to estimate and add neck joint
self._add_neck_joint = False
neck_needed = (Joints.NECK in self.pose_config.parts2idx) \
and (Joints.NECK not in self.parts2idx)
shoulders_labeled = (Joints.RIGHT_SHOULDER in self.parts2idx) \
and (Joints.LEFT_SHOULDER in self.parts2idx)
if neck_needed and shoulders_labeled:
self._add_neck_joint = True
self.visibility_flag_map = DatasetConfig.get_visibility_flag_mapping(
self.dataset_config['visibility_flags'])
@staticmethod
def _get_category(data, cat_name):
"""Get the configuration corresponding to the given category name."""
return [c for c in data['categories'] if c['name'] == cat_name][0]
@staticmethod
def get_visibility_flag_mapping(visibility_flags):
"""Map visiblity flag convention in the dataset to JointVisiblity.
Args:
visibility_flags (dict): visibility flag convention used in dataset
Returns:
mapdict (dict): mapping between dataset and JointVisiblity convention.
"""
mapdict = {}
dest_str2var = JointVisibility.map_visibiltiy_str
for key in visibility_flags['value'].keys():
# source value of the corresponding occlusion flag
source_val = visibility_flags['value'].get(key)
# desired value of the corresponding occlusion flag
dest_flag = visibility_flags['mapping'].get(key)
if dest_flag is None:
raise Exception("Visibility flag is missing {} key in `mapping`".format(key))
dest_val = dest_str2var.get(dest_flag)
if dest_val is None:
raise Exception("Visibility flag is missing {} key in `mapping`".format(key))
mapdict[source_val] = dest_val
return mapdict
def transform_labels(self, joint_labels):
"""Map the keypoints from current dataset format to bpnet format.
Args:
joint_labels (np.ndarray): Keypoints in current dataset format
Returns:
formatted_joint_labels (np.ndarray): Mapped keypoints in bpnet format
"""
# Reshape the joints to (num_persons, num_joints in dataset, 3)
joint_labels = np.reshape(joint_labels, (-1, self.num_parts, 3))
# Intialize an array for formatted keypoints with shape (num_persons, num_bpnet_joints. 3)
formatted_joint_labels = np.zeros(
(joint_labels.shape[0], self.pose_config.num_parts, 3), dtype=np.float
)
# Set default visibility flag to not labeled in the dataset
formatted_joint_labels[:, :, 2] = JointVisibility.NL_IN_DATASET
# Iterate through each part and map them to the corresponding index in bpnet joint format
for part in self.parts:
dataset_part_id = self.parts2idx[part]
if part in self.pose_config.parts2idx:
bpnet_part_id = self.pose_config.parts2idx[part]
formatted_joint_labels[:, bpnet_part_id, :] = joint_labels[:, dataset_part_id, :]
# Convert the visibility flags
tmp_array = np.copy(formatted_joint_labels)
for flag in self.visibility_flag_map:
formatted_joint_labels[:, :, 2] = np.where(
np.isclose(tmp_array[:, :, 2], flag),
self.visibility_flag_map[flag],
formatted_joint_labels[:, :, 2]
)
# Add neck joint if it isn't part of the current dataset
if self._add_neck_joint:
formatted_joint_labels = self._estimate_neck_joint(joint_labels, formatted_joint_labels)
return formatted_joint_labels
def _estimate_neck_joint(self, joint_labels, formatted_joint_labels):
"""Estimate the neck joint using the average of left and right shoulders.
Args:
joint_labels (np.ndarray): Keypoints in current dataset format
formatted_joint_labels (np.ndarray): Mapped keypoints in bpnet format
Returns:
formatted_joint_labels (np.ndarray): Mapped keypoints in bpnet format
with neck keypoint added based on lsho and rsho keypoints.
"""
# Get the relevant part indices
neck_idx = self.pose_config.parts2idx[Joints.NECK]
rsho_idx = self.parts2idx[Joints.RIGHT_SHOULDER]
lsho_idx = self.parts2idx[Joints.LEFT_SHOULDER]
# Get the visibility flags of the left and right shoulders
lsho_labeled = joint_labels[:, lsho_idx, 2] < JointVisibility.L_TRUNCATED
rsho_labeled = joint_labels[:, rsho_idx, 2] < JointVisibility.L_TRUNCATED
both_sho_labeled = lsho_labeled & rsho_labeled
# Update neck joint as average of the left and right shoulder joints
formatted_joint_labels[:, neck_idx, 0:2] = \
(joint_labels[:, lsho_idx, 0:2] + joint_labels[:, rsho_idx, 0:2]) / 2
# Update the visibility of the neck joint based on the visibilities of the
# left and right shoulders. Only when both are visible/labeled, the neck
# is considered to be labeled.
formatted_joint_labels[~both_sho_labeled, neck_idx, 2] = JointVisibility.NL_FOR_PERSON
formatted_joint_labels[both_sho_labeled, neck_idx, 2] = np.minimum(
joint_labels[both_sho_labeled, rsho_idx, 2],
joint_labels[both_sho_labeled, lsho_idx, 2]
)
return formatted_joint_labels
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/dataloaders/dataset_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License."""
"""BpNet Dataloader definitions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nvidia_tao_tf1.cv.bpnet.dataloaders.bpnet_dataloader \
import BpNetDataloader
__all__ = (
'BpNetDataloader',
)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/dataloaders/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataloader for BpNet datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict, namedtuple
import logging
import os
import tensorflow as tf
from nvidia_tao_tf1.blocks.dataloader import DataLoader
import nvidia_tao_tf1.core as tao_core
from nvidia_tao_tf1.core.processors import SpatialTransform
from nvidia_tao_tf1.cv.bpnet.dataloaders.dataset_config import DatasetConfig
from nvidia_tao_tf1.cv.bpnet.dataloaders.processors.augmentation import BpNetSpatialTransformer
from nvidia_tao_tf1.cv.bpnet.dataloaders.processors.label_processor import LabelProcessor
logger = logging.getLogger(__name__)
BpData = namedtuple('BpData', ['images', 'masks', 'labels'])
class BpNetDataloader(DataLoader):
"""Dataloader Class for BpNet.
The dataloader parses the given TFRecord files and processes the inputs
and ground truth labels to be used for training and validation.
"""
TRAIN = "train"
VAL = "val"
SUPPORTED_MODES = [TRAIN, VAL]
ITERATOR_INIT_OP_NAME = "iterator_init"
@tao_core.coreobject.save_args
def __init__(self,
batch_size,
pose_config,
image_config,
dataset_config,
augmentation_config,
label_processor_config,
normalization_params,
shuffle_buffer_size=None,
**kwargs):
"""Init function for the dataloader.
Args:
batch_size (int): Size of minibatch.
pose_config (BpNetPoseConfig)
image_config (dict): Basic information of input images.
dataset_config (dict): Basic information of datasets used.
augmentation_config (AugmentationConfig): Parameters used for input augmentations.
label_processor_config (dict): Parameters used for transforming kpts
to final label tensors consumable by the training pipeline.
normalization_params (dict): Parameter values to be used for input normalization
shuffle_buffer_size (int): Size of the shuffle buffer for feeding in data.
If None, it will be 20000. Default None
"""
super(BpNetDataloader, self).__init__(**kwargs)
self.batch_size = batch_size
self.image_dims = image_config['image_dims']
self.image_encoding = image_config['image_encoding']
# Dataset config
self._root_data_path = dataset_config['root_data_path']
self._train_records_folder_path = dataset_config['train_records_folder_path']
self._train_records_path = dataset_config['train_records_path']
self._val_records_folder_path = dataset_config['val_records_folder_path']
self._val_records_path = dataset_config['val_records_path']
self.dataset2spec_map = dataset_config['dataset_specs']
# Get the relative paths for the tfrecords filelist
self.tfrecords_filename_list = [os.path.join(
self._train_records_folder_path, train_record_path)
for train_record_path in self._train_records_path]
# If shuffle buffer size is not provided, set default.
if shuffle_buffer_size is None:
# Currently set to 20% of COCO dataset (after key person centric
# mode in dataio)
shuffle_buffer_size = 20000
# Normalization parameters
self._normalization_params = {}
self._normalization_params["image_scale"] = tf.constant(
normalization_params["image_scale"])
self._normalization_params["image_offset"] = tf.constant(
normalization_params["image_offset"])
self._normalization_params["mask_scale"] = tf.constant(
normalization_params["mask_scale"])
self._normalization_params["mask_offset"] = tf.constant(
normalization_params["mask_offset"])
# TFRecords Iterator Attributes
# NOTE: This will no longer needed after swithing to the parallel pptimized loader.
# But the issue remains that the images are of varying sizes and hence tricky to
# stack together.
#
# TODO: One option is to have augmentations and resizing before the the batching and after
# load and decode. This ensures that all the images and mask are of same size.
#
# For Example:
# dataset = dataset.map(load_image) // load single image
# dataset = dataset.map(augment_image) // augment and resize image
# dataset = dataset.batch(batch_size) // Batch
self._tfrecords_iterator_attributes = {
"batch_size": self.batch_size,
"batch_as_list": True,
"shuffle": True,
"shuffle_buffer_size": shuffle_buffer_size,
"repeat": True
}
# Get the file loader object
self._load_file = tao_core.processors.LoadFile(
prefix=self._root_data_path)
# Get the image decoder object
# TODO: Encoding type might vary with datasets. How to choose?
# Figure out encoding based on datasets and switch decoders
# accordingly
self._decode_image, self._decode_mask = self._image_decoder()
# Get the image shape
self.image_shape = [self.image_dims['height'],
self.image_dims['width'],
self.image_dims['channels']]
# Get the proto parser.
self._proto_parser = self._tfrecord_parser()
# Initialize the Dataset parsers for each supported dataset.
self.pose_config = pose_config
self.dataset_cfgs = {}
for dataset in self.dataset2spec_map:
self.dataset_cfgs[dataset] = DatasetConfig(
self.dataset2spec_map[dataset], pose_config)
# Intialize Label Processor
self.label_processor = LabelProcessor(
pose_config=pose_config,
image_shape=self.image_shape,
target_shape=pose_config.label_tensor_shape,
**label_processor_config
)
# Initialize Spatial Transformer
self.augmentation_config = augmentation_config
# NOTE: This is currently used for obtaining the spatial transformer
# matrix and to transform the keypoints. Images and masks are transformed
# using the modulus spatial transformer on the GPU
self.bpnet_spatial_transformer = BpNetSpatialTransformer(
aug_params=augmentation_config.spatial_aug_params,
identity_aug_params=augmentation_config.identity_spatial_aug_params,
image_shape=self.image_shape,
pose_config=pose_config,
augmentation_mode=augmentation_config.spatial_augmentation_mode)
# To transform images and masks
self.modulus_spatial_transformer = SpatialTransform(
method='bilinear',
data_format="channels_last"
)
# TODO: Enable when training and disable during validation
self.enable_augmentation = tf.constant(True)
def __call__(self):
"""Get input images and ground truth labels as tensors for training and validation.
Returns:
A BpData namedtuple which contains the following tensors in 'NCHW' format:
1. images (4D tensor): model input, images.
2. masks (4D tensor): regions to mask out for training/validation loss.
3. labels (4D tensor): gaussian peaks and part affinity fields representing
the human skeleton ground truth.
"""
# Load and encode tfrecords.
self._tfrecord_iterator = self._get_tfrecords_iterator()
masks = []
images = []
labels = []
# Iterate through each record in the batch
for record in self._tfrecord_iterator():
# Parse the tfrecord
example = self._proto_parser(record)
# Load and decode images, masks and labels.
# This also converts the kpts from dataset format to bpnet format
image, mask, joints, meta_data = self._load_and_decode(example)
# Obtain the transformation matrix for desired augmentation
# and apply transformation to the keypoints.
# TODO: Move keypoint transformation and flipping outside numpy_function.
# to be done on GPU.
joints, stm = tf.numpy_function(
self.bpnet_spatial_transformer.call,
[image, joints, meta_data['scales'],
meta_data['centers'], self.enable_augmentation],
[tf.float64, tf.float32]
)
# Apply spatial transformations to the image
image_crop_size = (self.image_shape[0], self.image_shape[1])
image = self._apply_augmentations_to_image(
image, stm, self.image_shape, crop_size=image_crop_size, background_value=127.0)
# Apply spatial transformations to the mask
mask_target_shape = [self.image_shape[0], self.image_shape[1], 1]
# Mask is the same size as image. Transformation happens in image space
# and it is cropped to network input shape similar to image
mask_crop_size = (self.image_shape[0], self.image_shape[1])
mask = self._apply_augmentations_to_image(
mask, stm, mask_target_shape, crop_size=mask_crop_size, background_value=255.0)
# Resize mask to target label shape
mask = tf.image.resize_images(
mask,
size=(
self.pose_config.label_tensor_shape[0],
self.pose_config.label_tensor_shape[1]),
method=tf.image.ResizeMethod.BILINEAR,
)
# Normalize image
image = tf.compat.v1.math.divide(
image, self._normalization_params["image_scale"])
image = tf.compat.v1.math.subtract(
image, self._normalization_params["image_offset"])
# Normalize mask
mask = tf.compat.v1.math.divide(
mask, self._normalization_params["mask_scale"])
mask = tf.compat.v1.math.subtract(
mask, self._normalization_params["mask_offset"])
# Repeat mask to match the number of channels in the labels tensor
mask = tf.tile(
mask, [1, 1, self.pose_config.label_tensor_shape[-1]])
# Tranform the keypoints using the LabelProcessor
# Converts them to heatmaps and part affinity fields
label = self._transform_labels(joints)
images.append(image)
masks.append(mask)
labels.append(label)
images = tf.stack(images)
masks = tf.stack(masks)
labels = tf.stack(labels)
# Set the shapes of the final tensors used for training.
# NOTE: This is needed because of `tf.numpy_function`. TensorShape is unknown for tensors
# computed within the `tf.numpy_function`.
images.set_shape((
self.batch_size,
self.image_shape[0],
self.image_shape[1],
self.image_shape[2]
))
masks.set_shape((
self.batch_size,
self.pose_config.label_tensor_shape[0],
self.pose_config.label_tensor_shape[1],
self.pose_config.label_tensor_shape[2]
))
labels.set_shape((
self.batch_size,
self.pose_config.label_tensor_shape[0],
self.pose_config.label_tensor_shape[1],
self.pose_config.label_tensor_shape[2]
))
# Cast the tensors to tf.float32 (Is this required here?)
images = tf.cast(images, tf.float32)
masks = tf.cast(masks, tf.float32)
labels = tf.cast(labels, tf.float32)
return BpData(images, masks, labels)
def _apply_augmentations_to_image(
self,
input_tensor,
stm,
target_shape,
crop_size=None,
background_value=0.0):
"""
Apply spatial and color transformations to an image.
Spatial transform op maps destination image pixel P into source image location Q
by matrix M: Q = P M. Here we first compute a forward mapping Q M^-1 = P, and
finally invert the matrix.
Args:
input_tensor (Tensor): Input image frame tensors (HWC).
sm (Tensor): 3x3 spatial transformation/augmentation matrix.
target_shape (list): output shape of the augmented tensor
crop_size (tuple): (height, width) of the crop area.
It crops the region: top-left (0, 0) to bottom-right (h, w)
background_value (float): The value the background canvas should have.
Returns:
image_augmented (Tensor, HWC): Augmented input tensor.
"""
# Convert image to float if needed (stm_op requirement).
if input_tensor.dtype != tf.float32:
input_tensor = tf.cast(input_tensor, tf.float32)
if stm.dtype != tf.float32:
stm = tf.cast(stm, tf.float32)
dm = tf.matrix_inverse(stm)
# update background value
self.modulus_spatial_transformer.background_value = background_value
# Apply spatial transformations.
# NOTE: Image and matrix need to be reshaped into a batch of one for
# this op.
image_augmented = self.modulus_spatial_transformer(
images=tf.stack(
[input_tensor]), stms=tf.stack(
[dm]), shape=crop_size)[
0, ...]
image_augmented.set_shape(target_shape)
return image_augmented
def _get_tfrecords_iterator(self):
"""Get TFRecordsIterator for a given set of TFRecord files.
Returns:
tfrecords_iterator (tao_core.processors.TFRecordsIterator)
"""
# Check validity of each tfrecord file.
for filename in self.tfrecords_filename_list:
assert tf.data.TFRecordDataset(filename), \
('Expects each file to be valid!', filename)
# Print number of files
num_samples = 0
for filename in self.tfrecords_filename_list:
num_samples_set = sum(
1 for _ in tf.python_io.tf_record_iterator(filename))
num_samples += num_samples_set
print(filename + ': ' + str(num_samples_set))
print("Total Samples: {}".format(num_samples))
# Load and set up modulus TFRecordIterator Processor.
tfrecords_iterator = tao_core.processors.TFRecordsIterator(
file_list=self.tfrecords_filename_list,
batch_size=self.batch_size,
shuffle_buffer_size=self._tfrecords_iterator_attributes['shuffle_buffer_size'],
shuffle=self._tfrecords_iterator_attributes['shuffle'],
repeat=self._tfrecords_iterator_attributes['repeat'],
batch_as_list=self._tfrecords_iterator_attributes['batch_as_list'])
return tfrecords_iterator
def _load_and_decode(self, example):
"""Reads and decodes the data within each record in the tfrecord.
Args:
example (dict): Contains the data encoded in the tfrecords.
Returns:
image (tf.Tensor): Tensor of shape (height, width, 3)
mask (tf.Tensor): Tensor of shape (height, width, 1)
joints (tf.Tensor): Tensor of shape (num_people, num_joints, 3)
meta_data (dict): Contains meta data required for processing
images and labels.
"""
# Get the dataset name
dataset = tf.cast(example['dataset'], tf.string)
# Get the person scales
scales = tf.decode_raw(example['person/scales'], tf.float64)
# Get the person centers
centers = tf.decode_raw(example['person/centers'], tf.float64)
# Create a dict to store the metadata
meta_data = defaultdict()
meta_data['dataset'] = dataset
meta_data['width'] = example['frame/width']
meta_data['height'] = example['frame/height']
meta_data['image_id'] = example['frame/image_id']
meta_data['scales'] = tf.reshape(scales, [-1])
meta_data['centers'] = tf.reshape(centers, [-1, 2])
# Get the labeled joints and transform dataset joints to BpNet joints
# format
joints = tf.decode_raw(example['person/joints'], tf.float64)
joints = self._convert_kpts_to_bpnet_format(dataset, joints)
# Load and decode image frame
image = self._read_image_frame(
self._load_file, self._decode_image, example['frame/image_path'])
# Load and decode mask
mask = self._read_image_frame(
self._load_file, self._decode_mask, example['frame/mask_path'])
return image, mask, joints, meta_data
def _convert_kpts_to_bpnet_format(self, dataset_name_tensor, joints):
"""Converts the keypoints to bpnet format using the dataset parser.
Args:
dataset_name_tensor (tf.Tensor): Name of the dataset
joints (tf.Tensor): Labeled joints in the current dataset format.
Returns:
result (tf.Tensor): Tensor of shape (num_people, num_joints, 3)
"""
# Iterate through all the supported datasets
for dataset_name in self.dataset2spec_map:
# Check if the current dataset matches with any of the supported
# datasets
tf_logical_check = tf.math.equal(
dataset_name_tensor, tf.constant(dataset_name, tf.string))
# If there is a match, the corresponding dataset parser is used to transform
# the labels to bpnet format. Else, it stores a constant zero to
# the result.
result = tf.cond(
tf_logical_check,
lambda dataset_name=dataset_name: tf.numpy_function(
self.dataset_cfgs[dataset_name].transform_labels,
[joints],
tf.float64),
lambda: tf.constant(
0.0,
dtype=tf.float64))
# TODO: Throw error if the dataset is not supported (tf.Assert)
return result
def _transform_labels(self, joint_labels):
"""Transform keypoints to final label tensors with heatmap and pafmap.
Args:
joint_labels (np.ndarray): Ground truth keypoint annotations with shape
(num_persons, num_joints, 3).
Returns:
labels (tf.Tensor): Final label tensors used for training with
shape (TARGET_HEIGHT, TARGET_WIDTH, num_channels).
"""
labels = tf.numpy_function(
self.label_processor.transform_labels, [joint_labels], tf.float64
)
return labels
@property
def num_samples(self):
"""Return number of samples in all label files."""
num_samples = sum([sum(1 for _ in tf.python_io.tf_record_iterator(
filename)) for filename in self.tfrecords_filename_list])
return num_samples
@staticmethod
def _tfrecord_parser():
"""Load and set up Modulus TFRecord features parsers.
Returns:
A dict of tensors with the same keys as the features dict, and dense tensor.
"""
# Processor for parsing meta `features`
features = {
"frame/image_path": tf.FixedLenFeature([], tf.string),
"frame/mask_path": tf.FixedLenFeature([], tf.string),
"frame/width": tf.FixedLenFeature([], tf.int64),
"frame/height": tf.FixedLenFeature([], tf.int64),
"frame/image_id": tf.FixedLenFeature([], tf.int64),
"person/joints": tf.FixedLenFeature([], tf.string),
"person/scales": tf.FixedLenFeature([], tf.string),
"person/centers": tf.FixedLenFeature([], tf.string),
"dataset": tf.FixedLenFeature([], tf.string)
}
proto_parser = tao_core.processors.ParseExampleProto(
features=features, single=True)
return proto_parser
def _image_decoder(self):
"""Create the image decoder.
Returns:
decode_frame_image (tao_core.processors.DecodeImage) : Frame DecodeImage Processor object
for decoding frame inputs.
"""
# Create the image decoder.
decode_frame_image = tao_core.processors.DecodeImage(
encoding=self.image_encoding,
data_format='channels_last',
channels=self.image_dims['channels']
)
# Create the mask decoder.
decode_frame_mask = tao_core.processors.DecodeImage(
encoding=self.image_encoding,
data_format='channels_last',
channels=1
)
return decode_frame_image, decode_frame_mask
@staticmethod
def _read_image_frame(load_func, decode_func, image_name):
"""Read and decode a single image on disk to a tensor.
Args:
load_func (tao_core.processors.LoadFile): File loading function.
decode_func (tao_core.processors.DecodeImage): Image decoding function.
image_name (str): Name of the image.
Returns:
image (Tensor): A decoded 3D image tensor (HWC).
"""
data = load_func(image_name)
image = decode_func(data)
image = tf.cast(image, tf.float32)
return image
@staticmethod
def _resize_image(image, output_height, output_width):
"""Pre-process the image by resizing.
Args:
image (Tensor): Input image (HWC) to be processed.
output_height (int): Output image height.
output_width (int): Output image width.
Returns:
image (Tensor): The image tensor (HWC) after resizing.
"""
image = tf.image.resize_images(image, (output_height, output_width),
method=tf.image.ResizeMethod.BILINEAR)
return image
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/dataloaders/bpnet_dataloader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pose config class for BpNet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from enum import Enum
import json
import numpy as np
import nvidia_tao_tf1.core as tao_core
from nvidia_tao_tf1.core.coreobject import TAOObject
class Joints(str, Enum):
"""Enum class containing the superset of body joints."""
NOSE = "nose"
NECK = "neck"
RIGHT_SHOULDER = "right_shoulder"
RIGHT_ELBOW = "right_elbow"
RIGHT_WRIST = "right_wrist"
LEFT_SHOULDER = "left_shoulder"
LEFT_ELBOW = "left_elbow"
LEFT_WRIST = "left_wrist"
RIGHT_HIP = "right_hip"
RIGHT_KNEE = "right_knee"
RIGHT_ANKLE = "right_ankle"
LEFT_HIP = "left_hip"
LEFT_KNEE = "left_knee"
LEFT_ANKLE = "left_ankle"
RIGHT_EYE = "right_eye"
LEFT_EYE = "left_eye"
RIGHT_EAR = "right_ear"
LEFT_EAR = "left_ear"
class JointVisibility:
"""Class containing the visiblity flags of body joints."""
# labeled and visible
L_VISIBLE = 0
# labeled and occluded,
# but easy to annotate with good accuracy
L_ANNOTATABLE = 1
# labeled and occluded,
# harder to annotate with good accuracy
L_OCCLUDED = 2
# truncated
L_TRUNCATED = 3
# not labeled for this person
NL_FOR_PERSON = 4
# not labeled in this dataset
NL_IN_DATASET = 5
# Define str->var mapping
map_visibiltiy_str = {
'visible': L_VISIBLE,
'annotatable': L_ANNOTATABLE,
'occluded': L_OCCLUDED,
'truncated': L_TRUNCATED,
'not_labeled': NL_FOR_PERSON,
'not_labeled_in_dataset': NL_IN_DATASET
}
class BpNetPoseConfig(TAOObject):
"""Class to hold all BpNet pose related parameters."""
BACKGROUND = "background"
@tao_core.coreobject.save_args
def __init__(self,
target_shape,
pose_config_path,
**kwargs):
"""Constructor.
Args:
target_shape (list): List containing the dimensions of target label [height, width]
pose_config_path (string): Absolute path to the pose config file.
"""
# Load the pose config file
self.pose_config_path = pose_config_path
self.pose_config = self._load_pose_config(pose_config_path)
# Get the person category config
self.person_config = self._get_category(self.pose_config, 'person')
# Get number of joints and edges in the skeleton and assert correctness
self.num_parts = self.person_config["num_joints"]
self.parts = self.person_config["keypoints"]
self.parts2idx = dict(zip(self.parts, range(self.num_parts)))
assert self.num_parts == len(self.parts)
self.skeleton = self.person_config["skeleton_edge_names"]
# The skeleton is 1- indexed. So convert them to 0- indexed list.
self.skeleton2idx = [[jidx_1 - 1, jidx_2 - 1]
for (jidx_1, jidx_2) in self.person_config["skeleton"]]
self.num_connections = len(self.skeleton)
# Get left and right joints
# If image was flipped, swap the left/right joint pairs accordingly
self._left_parts = [self.parts2idx[part] for part in self.parts if 'left_' in part]
self._right_parts = [self.parts2idx[part] for part in self.parts if 'right_' in part]
self.flip_pairs = np.array(
[[left_part, right_part]
for left_part, right_part in zip(self._left_parts, self._right_parts)]
)
# TODO: assert flip pairs are correctly matched. The part name excluding the left/right
# should be the same.
# Add background layer
self.parts += [self.BACKGROUND]
self.num_parts_with_background = len(self.parts)
self.num_paf_channels = 2 * self.num_connections
self.num_heatmap_channels = self.num_parts_with_background
self.num_total_channels = self.num_paf_channels + self.num_heatmap_channels
self.label_slice_indices = {
'paf': [0, self.num_paf_channels],
'heatmap': [self.num_paf_channels,
self.num_paf_channels + self.num_parts],
'heatmap_with_background': [self.num_paf_channels,
self.num_paf_channels + self.num_parts_with_background],
'background': [self.num_paf_channels + self.num_parts,
self.num_paf_channels + self.num_parts_with_background]
}
# Final label tensor shape
self.label_tensor_shape = (target_shape[0], target_shape[1], self.num_total_channels)
# Topology
self.topology = self.pose_config_to_topology(self.pose_config, 'person')
@staticmethod
def _get_category(data, cat_name):
"""Get the configuration corresponding to the given category name."""
return [c for c in data['categories'] if c['name'] == cat_name][0]
@staticmethod
def _load_pose_config(pose_config_path):
"""Load the pose config based on the pose config typpe provided.
Args:
pose_config_path (string): Absolute path to the pose config file.
This determines the skeleton structure and joints to use.
Returns:
result (dict): Dictionary containing the bpnet pose params to use.
"""
if pose_config_path is None:
raise ValueError("Enter a valid path for the pose config.")
return json.load(open(pose_config_path))
@staticmethod
def pose_config_to_topology(pose_config, category):
"""Gets topology tensor from bpnet pose config.
Args:
pose_config (dict): Dictionary containing the bpnet pose params to use.
"""
category_config = BpNetPoseConfig._get_category(pose_config, category)
skeleton = category_config['skeleton']
K = len(skeleton)
topology = np.zeros((K, 4), dtype=np.int)
for k in range(K):
topology[k][0] = 2 * k
topology[k][1] = 2 * k + 1
topology[k][2] = skeleton[k][0] - 1
topology[k][3] = skeleton[k][1] - 1
return topology
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/dataloaders/pose_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for BpNet dataloader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import pytest
import tensorflow as tf
from nvidia_tao_tf1.core.graph import get_init_ops
from nvidia_tao_tf1.cv.bpnet.dataloaders.bpnet_dataloader import BpData
from nvidia_tao_tf1.cv.bpnet.dataloaders.bpnet_dataloader import BpNetDataloader
from nvidia_tao_tf1.cv.bpnet.dataloaders.pose_config import BpNetPoseConfig
from nvidia_tao_tf1.cv.bpnet.dataloaders.processors.augmentation import AugmentationConfig
TEST_DATA_ROOT_PATH = os.getenv("CI_DATA_DIR", "/media/scratch.metropolis2/tao_ci/tao_tf1/data/bpnet")
def build_augmentation_config(augmentation_mode='person_centric', augmentation_dict=None):
"""Initialize and return object of type AugmentationConfig."""
if augmentation_dict is None:
augmentation_dict = {
'spatial_aug_params': {
'flip_lr_prob': 0.5,
'flip_tb_prob': 0.0,
'rotate_deg_max': 40.0,
'rotate_deg_min': None,
'zoom_prob': 0.0,
'zoom_ratio_min': 0.5,
'zoom_ratio_max': 1.1,
'translate_max_x': 40.0,
'translate_min_x': None,
'translate_max_y': 40.0,
'translate_min_y': None,
'use_translate_ratio': False,
'translate_ratio_max': 0.2,
'translate_ratio_min': 0.2,
'target_person_scale': 0.6
},
'spatial_augmentation_mode': augmentation_mode
}
augmentation_config = AugmentationConfig(**augmentation_dict)
return augmentation_config
def build_pose_config(pose_config_path, target_shape=(46, 46)):
"""Initialize and return object of type BpNetPoseConfig."""
pose_config = BpNetPoseConfig(target_shape, pose_config_path)
return pose_config
def build_image_config(image_shape=None, image_encoding='jpg'):
"""Initialize and return dict with image related parameters."""
if image_shape is None:
image_dims = {
"channels": 3,
"height": 368,
"width": 368
}
else:
image_dims = {
"channels": image_shape[2],
"height": image_shape[0],
"width": image_shape[1]
}
image_config = {
'image_dims': image_dims,
'image_encoding': image_encoding
}
return image_config
def build_normalization_config():
"""Initialize and return dict with normalization related parameters."""
normalization_config = {
'image_scale': [256.0, 256.0, 256.0],
'image_offset': [0.5, 0.5, 0.5],
'mask_scale': [255.0],
'mask_offset': [0.0]
}
return normalization_config
def build_dataset_config(
train_records_path=None,
val_records_folder_path=None,
val_records_path=None
):
"""Initialize and return dict with dataset related parameters."""
root_data_path = os.path.join(TEST_DATA_ROOT_PATH, 'test_data/')
train_records_folder_path = os.path.join(TEST_DATA_ROOT_PATH, 'test_data/')
if train_records_path is None:
train_records_path = ['coco/coco_sample.tfrecords']
dataset_config = {
'root_data_path': root_data_path,
'train_records_folder_path': train_records_folder_path,
'train_records_path': train_records_path,
'val_records_folder_path': val_records_folder_path,
'val_records_path': val_records_path,
'dataset_specs': {
'coco': 'nvidia_tao_tf1/cv/bpnet/dataio/dataset_specs/coco_spec.json'
}
}
return dataset_config
def build_label_processor_config():
"""Initialize and return dict with label processor related parameters."""
label_processor_config = {
'paf_gaussian_sigma': 0.03,
'heatmap_gaussian_sigma': 0.15,
'paf_ortho_dist_thresh': 1.0
}
return label_processor_config
def build_dataloader(
batch_size=3,
image_shape=None,
target_shape=None,
train_records_path=None,
pose_config_path=None,
normalization_params=None,
augmentation_mode=None
):
"""Initialize and return object of type BpNetDataloader."""
# Set default values
if image_shape is None:
image_shape = [368, 368, 3]
if target_shape is None:
target_shape = [46, 46]
if train_records_path is None:
train_records_path = ['coco/sample.tfrecords']
if pose_config_path is None:
pose_config_path = \
'nvidia_tao_tf1/cv/bpnet/dataloaders/pose_configurations/bpnet_18joints.json'
# Build BpNetPoseConfig
pose_config = build_pose_config(
pose_config_path=pose_config_path,
target_shape=target_shape
)
# Build image config
image_config = build_image_config(image_shape)
# Build dataset config
dataset_config = build_dataset_config(train_records_path)
# Build augmentation config with default params
augmentation_config = build_augmentation_config(augmentation_mode=augmentation_mode)
# Build label processor config with default params
label_processor_config = build_label_processor_config()
# Build normalization params
normalization_params = build_normalization_config()
dataloader = BpNetDataloader(
batch_size=batch_size,
pose_config=pose_config,
image_config=image_config,
dataset_config=dataset_config,
augmentation_config=augmentation_config,
label_processor_config=label_processor_config,
normalization_params=normalization_params
)
return dataloader, pose_config
def test_dataloader_return_type():
"""Test for correct type."""
dataloader, _ = build_dataloader()
fetches = dataloader()
sess = tf.compat.v1.Session()
sess.run(get_init_ops())
example = sess.run(fetches)
assert type(example) == BpData
@pytest.mark.parametrize(
"pose_config_path",
['nvidia_tao_tf1/cv/bpnet/dataloaders/pose_configurations/bpnet_18joints.json']
)
@pytest.mark.parametrize("image_shape", [[368, 368, 3], [256, 256, 3], [224, 320, 3]])
@pytest.mark.parametrize("target_shape", [[46, 46], [32, 32], [28, 40]])
@pytest.mark.parametrize(
"augmentation_mode", ['person_centric', 'standard', 'standard_with_fixed_aspect_ratio'])
def test_dataloader_shapes(pose_config_path, image_shape, target_shape, augmentation_mode):
"""Test for correct shape of dataloder objects."""
batch_size = 2
# Check if the dataloader should throw an exception for the given parameters
exception_expected = (image_shape[0] // target_shape[0]) != (image_shape[1] // target_shape[1])
# If an exception is expected, use `pytest.raises(Exception)` to assert
if exception_expected:
with pytest.raises(Exception):
dataloader, pose_config = build_dataloader(
batch_size=batch_size,
image_shape=image_shape,
target_shape=target_shape,
pose_config_path=pose_config_path,
augmentation_mode=augmentation_mode
)
else:
dataloader, pose_config = build_dataloader(
batch_size=batch_size,
image_shape=image_shape,
target_shape=target_shape,
pose_config_path=pose_config_path,
augmentation_mode=augmentation_mode
)
fetches = dataloader()
sess = tf.compat.v1.Session()
sess.run(get_init_ops())
example = sess.run(fetches)
# Assert that the shapes of all tensors are as expected.
label_tensor_shape = pose_config.label_tensor_shape
assert np.shape(example.images) == \
(batch_size, image_shape[0], image_shape[1], image_shape[2])
assert np.shape(example.masks) == \
(batch_size, label_tensor_shape[0], label_tensor_shape[1], label_tensor_shape[2])
assert np.shape(example.labels) == \
(batch_size, label_tensor_shape[0], label_tensor_shape[1], label_tensor_shape[2])
# Assert that the images and mask values are within the range after normalization
assert np.equal(np.sum(example.images > 0.500001), 0)
assert np.equal(np.sum(example.images < -0.500001), 0)
assert np.equal(np.sum(example.masks > 1.000001), 0)
assert np.equal(np.sum(example.masks < -0.00001), 0)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/dataloaders/tests/test_bpnet_dataloader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Processor for applying augmentations and other transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
import nvidia_tao_tf1.core as tao_core
from nvidia_tao_tf1.core.coreobject import TAOObject
from nvidia_tao_tf1.cv.bpnet.dataloaders.processors.augmentation_utils import \
AugmentationModes, get_spatial_transformation_matrix_np
class AugmentationConfig(TAOObject):
"""Class to hold and process all Augmentation related parameters."""
# Please refer to `AugmentationModes` definition for details about each
SUPPORTED_MODES = [
AugmentationModes.PERSON_CENTRIC,
AugmentationModes.STANDARD,
AugmentationModes.STANDARD_WITH_FIXED_ASPECT_RATIO
]
@tao_core.coreobject.save_args
def __init__(self,
spatial_aug_params,
color_aug_params=None,
spatial_augmentation_mode=None,
**kwargs):
"""__init__ method.
Args:
aug_params (dict): Spatial transformations param ranges
identity_aug_params (dict): Spatial transformations param ranges to
use when augmentation is disabled.
image_shape (list): List containing the shape of input image. [H, W, C]
target_shape (list): List containing the shape of target labels. [H, W]
pose_config (BpNetPoseConfig):
augmentation_mode (AugmentationModes): Augmentation mode to apply for the
images. Refer to the Enum class for details about the augmentation modes.
"""
self.color_aug_params = color_aug_params
self.spatial_aug_params = spatial_aug_params
# Build the spatial and color augmentaion parameters
# This will update the default parameters with the ones specified
# by the user.
self.spatial_aug_params, self.color_aug_params = \
self.build_augmentation_config(spatial_aug_params, color_aug_params)
# Build the params for identity transformation (no augmentation)
self.identity_spatial_aug_params, self.identity_color_aug_params = \
self.build_augmentation_config()
# If the augmentation mode is not specified, use default
self.spatial_augmentation_mode = spatial_augmentation_mode
if spatial_augmentation_mode is None:
self.spatial_augmentation_mode = AugmentationModes.PERSON_CENTRIC
@staticmethod
def get_default_augmentation_config():
"""Get the default the augmentation config.
Returns:
spatial_aug_params_default (dict): default params to use for spatial transformations
color_aug_params_default (dict): default params to use for color transformations
"""
spatial_aug_params_default = {
'flip_lr_prob': 0.0,
'flip_tb_prob': 0.0,
'rotate_deg_max': 0.0,
'rotate_deg_min': None,
'zoom_prob': 0.0,
'zoom_ratio_min': 1.0,
'zoom_ratio_max': 1.0,
'translate_max_x': 0.0,
'translate_min_x': None,
'translate_max_y': 0.0,
'translate_min_y': None,
'use_translate_ratio': False,
'translate_ratio_max': 0.0,
'translate_ratio_min': 0.0,
'target_person_scale': 0.6
}
# TODO (color augmentations not implemented yet)
color_aug_params_default = {}
return spatial_aug_params_default, color_aug_params_default
@staticmethod
def build_augmentation_config(spatial_aug_params=None, color_aug_params=None):
"""Builds a default augmentation dict and updates with the user provided params.
Args:
spatial_aug_params (dict): User provided params for spatial transformations
color_aug_params (dict): User provided params for color transformations
Returns:
spatial_aug_params_updated (dict): Updated spatial transformations params
color_aug_params_updated (dict): Updated color transformations params
"""
# Get the default spatial and color augmentation parameters
spatial_aug_params_default, color_aug_params_default = \
AugmentationConfig.get_default_augmentation_config()
def _update(d, u):
"""Update the dictionaries.
Args:
d (dict): Dictionary to be updated.
u (dict): Dictionary that is used to update.
Returns:
d (dict): Updated dictionary.
"""
if u is not None:
d.update(u)
return d
sparams_updated = _update(spatial_aug_params_default, spatial_aug_params)
cparams_updated = _update(color_aug_params_default, color_aug_params)
# If min of the translation range is None, use a range symmetrix about the center.
if sparams_updated['translate_min_x'] is None:
sparams_updated['translate_min_x'] = -sparams_updated['translate_max_x']
if sparams_updated['translate_min_y'] is None:
sparams_updated['translate_min_y'] = -sparams_updated['translate_max_y']
# If min of the rotation range is None, use a range symmetrix about 0 deg.
if sparams_updated['rotate_deg_min'] is None:
sparams_updated['rotate_deg_min'] = -sparams_updated['rotate_deg_max']
return sparams_updated, cparams_updated
@staticmethod
def generate_random_spatial_aug_params(aug_params, image_height, image_width):
"""Generates final params to be used for spatial augmentation using the provided ranges.
Args:
aug_params (dict): Spatial transformations param ranges
Returns:
final_params (dict): Final spatial transformations params to be
used for the current sample.
"""
# Determine whether to flip the image in this iteration
flip_lr_flag = np.less(np.random.uniform(0.0, 1.0), aug_params['flip_lr_prob'])
flip_tb_flag = np.less(np.random.uniform(0.0, 1.0), aug_params['flip_tb_prob'])
# Determine the random translation along x and y in this iteration.
# Check whether to use translation range as ratio of image or absolute values
if aug_params['use_translate_ratio']:
if aug_params['translate_ratio_min'] is None:
aug_params['translate_ratio_min'] = -aug_params['translate_ratio_max']
translate_x = np.random.uniform(
aug_params['translate_ratio_min'], aug_params['translate_ratio_max']) * image_width
translate_y = np.random.uniform(
aug_params['translate_ratio_min'], aug_params['translate_ratio_max']) * image_height
else:
translate_x = np.random.random_integers(
aug_params['translate_min_x'], aug_params['translate_max_x'])
translate_y = np.random.random_integers(
aug_params['translate_min_y'], aug_params['translate_max_y'])
# Determine the random scale/zoom
zoom_flag = np.less(np.random.uniform(0.0, 1.0), aug_params['zoom_prob'])
zoom_ratio = np.random.uniform(
aug_params['zoom_ratio_min'], aug_params['zoom_ratio_max']
) if zoom_flag else 1.0
# Determine the random rotation angle based on the specified range.
# If min of the range is None, use a range symetric about 0 deg.
if aug_params['rotate_deg_min'] is None:
aug_params['rotate_deg_min'] = -aug_params['rotate_deg_max']
rotate_deg = np.random.uniform(aug_params['rotate_deg_min'], aug_params['rotate_deg_max'])
rotate_rad = np.radians(rotate_deg)
# Build the final params
final_params = {
'flip_lr': flip_lr_flag,
'flip_tb': flip_tb_flag,
'translate_x': translate_x,
'translate_y': translate_y,
'zoom_ratio': zoom_ratio,
'rotate_rad': rotate_rad
}
return final_params
class BpNetSpatialTransformer(object):
"""Processor that obtains the spatial transformation matrix and applies to kpts."""
def __init__(self,
aug_params,
identity_aug_params,
image_shape,
pose_config,
augmentation_mode=None,
**kwargs):
"""__init__ method.
Args:
aug_params (dict): Spatial transformations param ranges
identity_aug_params (dict): Spatial transformations param ranges to
use when augmentation is disabled.
image_shape (list): List containing the shape of input image. [H, W, C]
pose_config (BpNetPoseConfig):
augmentation_mode (AugmentationModes): Augmentation mode to apply for the
images. Refer to the Enum class for details about the augmentation modes.
"""
self.IMAGE_WIDTH = image_shape[1]
self.IMAGE_HEIGHT = image_shape[0]
self.pose_config = pose_config
self.aug_params = aug_params
self.identity_aug_params = identity_aug_params
self.augmentation_mode = augmentation_mode
# Check if the augmentation mode within the supported modes
if augmentation_mode not in AugmentationConfig.SUPPORTED_MODES:
raise "Mode must be one of {}.".format(AugmentationConfig.SUPPORTED_MODES)
def call(self, image, joint_labels, scales, centers, enable_aug=True):
"""Obtains transormation matrix and transforms labels using the given parameters.
Args:
joint_labels (np.ndarray): Ground truth keypoint annotations with shape
(num_persons, num_joints, 3).
scales (np.ndarray): Contains the bbox widths of all labeled persons
in the image.
centers (np.ndarray): Contains the center of bbox of all labeled persons
in the image.
enable_aug (bool): Boolean flag to toggle augmentation.
Returns:
joint_labels (np.ndarray): Output keypoints after spatial transformation.
"""
# Get image shape
image_height, image_width = image.shape[0:2]
# Get the final augmentation params to build the transformation matrix
if enable_aug:
# If augmentation is enabled, generate final params using the random param ranges.
random_aug_params = \
AugmentationConfig.generate_random_spatial_aug_params(
self.aug_params, image_height, image_width)
else:
# If augmentation is disabled, generate final params using param ranges
# for identity augmentation.
random_aug_params = \
AugmentationConfig.generate_random_spatial_aug_params(
self.identity_aug_params, image_height, image_width)
# Update the config according to the augmentation mode
if self.augmentation_mode == AugmentationModes.PERSON_CENTRIC:
random_aug_params = \
self._update_with_person_centric_params(scales, centers, random_aug_params)
# Get the spatial transformation matrix
stm = get_spatial_transformation_matrix_np(
**random_aug_params,
image_width=image_width,
image_height=image_height,
target_width=self.IMAGE_WIDTH,
target_height=self.IMAGE_HEIGHT,
augmentation_mode=self.augmentation_mode
)
# Transform keypoints
joint_labels = self.apply_affine_transform_kpts(
joint_labels, stm, self.pose_config, random_aug_params['flip_lr'])
# NOTE: This is needed as the spatial transformer in modulus expects
# transposed version of the matrix expected by cv2.warpAffine
stm = np.transpose(stm)
return joint_labels, stm
def _update_with_person_centric_params(self, scales, centers, random_aug_params):
"""Update the spatial augmentation params with PERSON_CENTRIC params.
This ensures that the augmentations are about the person of interest in the
image. So the POI is centered about the image and the scaling is adjusted
such that the POI is approximately scale normalized with respect to the image.
NOTE: The way the data is generated for this mode of augmentation, each image
is repeated `n` times where `n` is the number of persons of interest. Hence,
every time, the image is augmented about each of these POIs. Please refer to
`dataio` to understand how the data is generated for this mode.
Args:
scales (np.ndarray): Contains the bbox widths of all labeled persons
in the image.
centers (np.ndarray): Contains the center of bbox of all labeled persons
in the image.
random_aug_params (dict): Params drawn from a uniform distribution out of
the specified ranges in the config.
Returns:
random_aug_params (dict): Updated `random_aug_params` considering the person
of interest meta data.
"""
# Update the random zoom ratio by considering the desired scale
# of the person of interest (poi).
poi_width = scales[0]
poi_width_ratio = poi_width / self.IMAGE_WIDTH
random_zoom_ratio = random_aug_params['zoom_ratio']
# TODO: Check in what case the denominator is zero.
zoom_ratio = \
self.aug_params['target_person_scale'] / (poi_width_ratio * random_zoom_ratio + 0.00001)
# Update the translation considering the center of the person of interest
poi_center = centers[0]
translate_x = -(poi_center[0] + random_aug_params['translate_x'])
translate_y = -(poi_center[1] + random_aug_params['translate_y'])
random_aug_params['zoom_ratio'] = zoom_ratio
random_aug_params['translate_x'] = translate_x
random_aug_params['translate_y'] = translate_y
return random_aug_params
@staticmethod
def apply_affine_transform_image(image, size, affine_transform, border_value):
"""Apply affine transformation to the image given the transformation matrix.
Args:
image (np.ndarray): Input image to be augmented.
size (tuple): Destination image size (height, width)
affine_transform (np.ndarray): Spatial transformation matrix (stm)
that is used for image augmentation
border_value (tuple/int): Value used in case of a constant border
Returns:
result (np.ndarray): Transformed image.
"""
return cv2.warpAffine(
image, affine_transform[0:2], size, flags=cv2.INTER_CUBIC,
borderMode=cv2.BORDER_CONSTANT, borderValue=border_value
)
@staticmethod
def apply_affine_transform_kpts(keypoints, affine_transform, pose_config, flip_lr_flag):
"""Apply affine transformation to the keypoints given the transformation matrix.
Args:
keypoints (np.ndarray): Input keypoints to be tranformed.
affine_transform (np.ndarray): Spatial transformation matrix (stm)
that is used for the image augmentation.
pose_config (BpNetPoseConfig): Needed to get the pairs of joints
that needs to be swaped in case of flipped image.
flip_lr_flag (bool): Indicates whether the image was flipped
Returns:
keypoints (np.ndarray): Transformed keypoints.
"""
# Convert the keypoints to homogeneous coordinates
num_persons, num_joints, _ = keypoints.shape
homogeneous_coors = np.concatenate((
keypoints[:, :, 0:2], np.ones((num_persons, num_joints, 1))), axis=-1)
# Project the ground turth keypoints to the new space using the affine transformation
transformated_coords = np.matmul(
affine_transform[0:2], homogeneous_coors.transpose([0, 2, 1])
)
# Copy over the transformed keypoints back to the `keypoints` array to retain
# the visibility flags
keypoints[:, :, 0:2] = transformated_coords.transpose([0, 2, 1])
# If image was flipped, swap the left/right joint pairs accordingly
if flip_lr_flag:
temp_left_kpts = keypoints[:, pose_config.flip_pairs[:, 0], :]
temp_right_kpts = keypoints[:, pose_config.flip_pairs[:, 1], :]
keypoints[:, pose_config.flip_pairs[:, 0], :] = temp_right_kpts
keypoints[:, pose_config.flip_pairs[:, 1], :] = temp_left_kpts
return keypoints
class BpNetColorTranformer(object):
"""Processor that transforms images using a given color transformation matrix."""
# TODO: Implement the Color transformer module.
pass
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/dataloaders/processors/augmentation.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License."""
"""BpNet Processor definitions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/dataloaders/processors/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Label transformer for BpNet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import numpy as np
from nvidia_tao_tf1.cv.bpnet.dataloaders.pose_config import JointVisibility
logger = logging.getLogger(__name__)
class LabelProcessor(object):
"""Label Processor class.
This sets up the labels so it can be consumed for training. It transforms the keypoint
labels coming from the dataset into heatmaps (gaussian heatblobs) and part affinity maps
(vector fields showing the limb connections).
"""
HEATMAP_THRESHOLD = 0.001
def __init__(self,
pose_config,
image_shape,
target_shape,
paf_gaussian_sigma=0.03,
heatmap_gaussian_sigma=0.15,
paf_ortho_dist_thresh=1.0):
"""__init__ method.
Args:
pose_config (PoseConfig)
image_shape (list): List containing the shape of input image. [H, W, C]
target_shape (list): List containing the shape of target labels. [H, W]
paf_gaussian_sigma (float): Sigma value to be used for gaussian weighting
of vector fields.
heatmap_gaussian_sigma (float): Sigma value to be used for gaussian weighting
of heatblobs.
paf_ortho_dist_thresh (float): Orthogonal distance threshold for part affinity
fields. Note that this will be multiplied by the stride.
"""
self.TARGET_WIDTH = target_shape[1]
self.TARGET_HEIGHT = target_shape[0]
self.IMAGE_WIDTH = image_shape[1]
self.IMAGE_HEIGHT = image_shape[0]
# Get the stride of the network using the target and image shapes
self.STRIDE = image_shape[0] // target_shape[0]
assert (image_shape[0] // target_shape[0]) == (image_shape[1] // target_shape[1])
# Get the heatmap exponential factor used for adding gaussian heatblobs
self.HEATMAP_EXP_FACTOR = 1 / (2.0 * heatmap_gaussian_sigma * heatmap_gaussian_sigma)
# Get the paf exponential factor used for adding part affinity fields
# TODO: (This is currently not being used) Implement a exp weighted
# pafmap instead of a binary mask based on orthgonal distance.
self.PAF_EXP_FACTOR = 1 / (2.0 * paf_gaussian_sigma * paf_gaussian_sigma)
# Get the orthogonal distance threshold for part affinity fields.
self.PAF_ORTHO_DIST_THRESH = paf_ortho_dist_thresh * self.STRIDE
# Get the slice indices for paf, heatmap and background within the label tensor
self.pose_config = pose_config
self.paf_slice_indices = self.pose_config.label_slice_indices['paf']
self.heatmap_slice_indices = self.pose_config.label_slice_indices['heatmap']
self.background_slice_indices = self.pose_config.label_slice_indices['background']
# Intiliaze the vectors and meshgrid mapping target space to input space.
self.x_vec, self.y_vec, \
self.mgrid_x, self.mgrid_y = self._initialize_grids()
def _initialize_grids(self):
"""Initialize the grid locations mapping indices in target labels to the input space.
Returns:
x_vec (np.ndarray): x indices mapped from target space to input space
y_vec (np.ndarray): y indices mapped from target space to input space
mgrid_x (np.ndarray): row repeated matrix returned by meshgrid
mgrid_y (np.ndarray): column repeated matrix returned by meshgrid
"""
# Vector of indices mapping from target space to input space (spacing equal to stride)
x_vec = np.arange(self.TARGET_WIDTH) * self.STRIDE
y_vec = np.arange(self.TARGET_HEIGHT) * self.STRIDE
# Shifting to center indices of the input strides from top left indices
x_vec = x_vec + (self.STRIDE - 1) / 2
y_vec = y_vec + (self.STRIDE - 1) / 2
# NOTE (Different from prev implementation):
# Here we use centered grid locations as distaces for adding the part affinity fields
# and heatmaps, whereas earlier we used top left locations for paf.
# mgrid_y, mgrid_x = np.meshgrid(y_vec, x_vec, indexing='ij')
# NEW NOTE: Switching to top-left location instead of center of grid temporarily (FIXME)
mgrid_y, mgrid_x = np.mgrid[0:self.IMAGE_HEIGHT:self.STRIDE, 0:self.IMAGE_WIDTH:self.STRIDE]
return x_vec, y_vec, mgrid_x, mgrid_y
def transform_labels(self, joint_labels):
"""Transform the joint labels into final label tensors that can be used for training.
Consists of joint heatmaps, background heatmaps and part affinity maps.
Args:
joint_labels (np.ndarray): Ground truth keypoint annotations with shape
(num_persons, num_joints, 3).
Returns:
labels_arr (np.ndarray): Final label tensors used for training with
shape (TARGET_HEIGHT, TARGET_WIDTH, num_channels).
"""
# Initialize labels array
labels_arr = np.zeros(self.pose_config.label_tensor_shape, dtype=np.float)
# Check if the number of parts in the config match the joint labels
assert self.pose_config.num_parts == joint_labels.shape[1]
# TODO: add support to have different weights for heatmaps and part affinity fields
# for keypoints with different occlusion flags
# Split the label array into pafmaps, heatmaps, and background
part_affinity_fields = \
labels_arr[:, :, self.paf_slice_indices[0]:self.paf_slice_indices[1]]
joint_heatmaps = \
labels_arr[:, :, self.heatmap_slice_indices[0]:self.heatmap_slice_indices[1]]
# For each part, find the joints to use (or the valid joints) based on
# the visiblity flag.
selected_joints_list = []
for jidx in range(self.pose_config.num_parts):
selected_joints = joint_labels[:, jidx, 2] == JointVisibility.L_VISIBLE
selected_joints = np.logical_or(
selected_joints, joint_labels[:, jidx, 2] == JointVisibility.L_ANNOTATABLE)
# TODO: Should handle L_OCCLUDED differently from L_VISIBLE and L_ANNOTATABLE.
selected_joints = np.logical_or(
selected_joints, joint_labels[:, jidx, 2] == JointVisibility.L_OCCLUDED)
selected_joints_list.append(selected_joints)
# TODO: Take channel-wise mask as input to this function
# When JointVisibility is NL_IN_DATASET, the corresponding mask channels
# needs to be masked.
# Generate Joint heatmaps using the joint labels.
labels_arr[:, :, self.heatmap_slice_indices[0]:self.heatmap_slice_indices[1]] = \
self.generate_joint_heatmaps(
joint_heatmaps,
joint_labels,
selected_joints_list
)
# Generate Background heatmap using the joint heatmaps.
labels_arr[:, :, self.background_slice_indices[0]:self.background_slice_indices[1]] = \
self.generate_background_heatmap(
joint_heatmaps
)
# Generate Part Affinity fields using the joint labels.
labels_arr[:, :, self.paf_slice_indices[0]:self.paf_slice_indices[1]] = \
self.generate_part_affinity_fields(
part_affinity_fields,
joint_labels,
selected_joints_list
)
return labels_arr
def generate_joint_heatmaps(self, heatmaps, joint_labels, selected_joints_list):
"""Transform keypoints / joint labels into gaussian heatmaps on lable array.
Args:
heatmaps (np.ndarray): Array of shape (TARGET_WIDTH, TARGET_HEIGHT, num_parts)
initialized with zeroes.
joint_labels (np.ndarray): Ground truth keypoint annotations with shape
(num_persons, num_joints, 3).
selected_joints_list (list): List of Boolean masks for each part that
represent the joints to be used for label generation.
Returns:
heatmaps (np.ndarray): Array of shape (TARGET_WIDTH, TARGET_HEIGHT, num_parts)
containing the gaussian heatblobs representing the joints as peaks.
"""
num_joints = self.pose_config.num_parts
for jidx in range(num_joints):
# Get labels of the people for whom the current joint (jidx) should
# be selected (or is marked/labeled).
joints_to_use = joint_labels[selected_joints_list[jidx], jidx, 0:2]
# TODO: Vectorize this part of the code.
# Iterate over the selected persons with the current joint and add
# them to the heatmap
for _, joint_label in enumerate(joints_to_use):
jx, jy = joint_label
# Compute 1-D gaussian centered around jx and jy respectively.
gaussian1d_x = np.exp(-np.power((self.x_vec - jx), 2) * self.HEATMAP_EXP_FACTOR)
gaussian1d_y = np.exp(-np.power((self.y_vec - jy), 2) * self.HEATMAP_EXP_FACTOR)
# Outer product of the 1-D gaussian results in 2-D gaussian
# centered around (jx, jy) to get the desired heatblob for
# the current joint
gaussian_heatblob = np.outer(gaussian1d_y, gaussian1d_x)
# Combine the current heatblob with the heatmap of the corresponding joint
# by computing element-wise maximum. This is how the overlapping peaks are
# handled.
heatmaps[:, :, jidx] = np.maximum(heatmaps[:, :, jidx], gaussian_heatblob)
# NOTE (Different from prev implementation):
# Clip the values that are below threshold.
# TODO: Check if necessary to clip the max of heatmaps to 1.
heatmaps[:, :, jidx] = np.where(
heatmaps[:, :, jidx] < LabelProcessor.HEATMAP_THRESHOLD, 0.0, heatmaps[:, :, jidx])
return heatmaps
def generate_background_heatmap(self, joint_heatmaps):
"""Generate the background heatmap using joint heatmaps.
Args:
joint_heatmaps (np.ndarray): Array of shape (TARGET_WIDTH, TARGET_HEIGHT, num_parts)
containing the gaussian heatblobs representing the joints as peaks.
Returns:
heatmaps (np.ndarray): Array of shape (TARGET_WIDTH, TARGET_HEIGHT, 1)
containing the background heatmap representing the background area
in the image.
"""
# The idea is to represent everything that is not joint labels as background
# So the joint heatmaps are inverted
# NOTE (Different from prev implementation):
# Clip the values so that they are between 0 and 1.
heatmap = np.clip(1.0 - np.amax(joint_heatmaps, axis=2), 0.0, 1.0)
return np.expand_dims(heatmap, axis=-1)
def generate_part_affinity_fields(self, pafmaps, joint_labels, selected_joints_list):
"""Transform keypoints / joint labels into part affinity fields on lable array.
Args:
pafmaps (np.ndarray): Array of shape (TARGET_WIDTH, TARGET_HEIGHT, num_connections * 2)
initialized with zeroes.
joint_labels (np.ndarray): Ground truth keypoint annotations with shape
(num_persons, num_joints, 3).
selected_joints_list (list): List of Boolean masks for each part that
represent the joints to be used for label generation.
Returns:
pafmaps (np.ndarray): Array of shape (TARGET_WIDTH, TARGET_HEIGHT, num_connections * 2)
containing the part affinity vector fields representing the connections b/w joints.
"""
num_connections = len(self.pose_config.skeleton2idx)
# Initialize the counter to keep track of overlapping paf vectors
counter = np.zeros((
self.TARGET_HEIGHT, self.TARGET_WIDTH, num_connections), dtype=np.int16)
# Iterate through the joint connections
for cidx, (jidx_1, jidx_2) in enumerate(self.pose_config.skeleton2idx):
# Get labels of the people for whom the current connection involving
# joints (jidx_1, jidx_2) should be selected.
valid_connections = selected_joints_list[jidx_1] & selected_joints_list[jidx_2]
connections_start = joint_labels[valid_connections, jidx_1, 0:2]
connections_end = joint_labels[valid_connections, jidx_2, 0:2]
# Iterate over the selected persons with the current connection and add
# them to the pafmap
for (cstart_x, cstart_y), (cend_x, cend_y) \
in zip(connections_start, connections_end):
# X and Y components of the connection
vec_x = cend_x - cstart_x
vec_y = cend_y - cstart_y
conn_length = np.sqrt(np.power(vec_x, 2) + np.power(vec_y, 2))
if conn_length == 0:
logger.warning("Limb length is zeo. Skipping part affinity label.")
continue
# Compute the unit vectors of the connection along x and y directions
norm_x = vec_x / conn_length
norm_y = vec_y / conn_length
# Compute the location of the start of the connections in the target
# space. Ensure they are within the target image bounds.
min_x = max(0, int(round((
min(cstart_x, cend_x) - self.PAF_ORTHO_DIST_THRESH) / self.STRIDE)))
min_y = max(0, int(round((
min(cstart_y, cend_y) - self.PAF_ORTHO_DIST_THRESH) / self.STRIDE)))
# Compute the location of the end of the connections in the target
# space. Ensure they are within the target image bounds.
max_x = min(self.TARGET_WIDTH, int(round((
max(cstart_x, cend_x) + self.PAF_ORTHO_DIST_THRESH) / self.STRIDE)))
max_y = min(self.TARGET_HEIGHT, int(round((
max(cstart_y, cend_y) + self.PAF_ORTHO_DIST_THRESH) / self.STRIDE)))
if max_x < 0 or max_y < 0:
continue
# Crop the region of interest where the paf vectors will be added
mgrid_y_roi = self.mgrid_y[min_y:max_y, min_x:max_x]
mgrid_x_roi = self.mgrid_x[min_y:max_y, min_x:max_x]
# Compute the distance matrix comprising of orthogonal distance of every
# location in the ROI from the vector representing the connection.
dist_matrix = np.abs(
((cstart_y - mgrid_y_roi) * vec_x - (cstart_x - mgrid_x_roi) * vec_y)
/ conn_length
)
# Generate the filter matrix which is a binary array representing the
# locations where the normalized vectors are to be added. Any location
# having an orthogonal distance greater than the threshold is masked.
filter_matrix = np.where(dist_matrix > self.PAF_ORTHO_DIST_THRESH, 0, 1)
# NOTE (Different from prev implementation):
# Use of filter_matrix to mask the pixels farther from the vector
# than the given orthogonal paf distance threshold.
counter[min_y:max_y, min_x:max_x, cidx] += filter_matrix
pafmaps[min_y:max_y, min_x:max_x, cidx * 2 + 0] = norm_x * filter_matrix
pafmaps[min_y:max_y, min_x:max_x, cidx * 2 + 1] = norm_y * filter_matrix
# TODO: Normalize/Average the PAF (otherwise longer limb gives stronger
# absolute strength)
return pafmaps
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/dataloaders/processors/label_processor.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Augmentation Utils."""
from enum import Enum
import math
import cv2
import numpy as np
class AugmentationModes(str, Enum):
"""Enum class containing the augmentation modes.
1. PERSON_CENTRIC: Augementations are centered around each person in the image.
When the train data is compiled for this mode, each image is replicated
N times where N is the number of people in image with certain size criteria.
2. STANDARD: Augmentations are standard, i.e centered around the center of the image
and the aspect ratio of the image is retained.
3. STANDARD_WITH_FIXED_ASPECT_RATIO: Augmentations are standard, i.e centered around
the center of the image. But the aspect ratio is fixed to the network input aspect
ratio.
"""
PERSON_CENTRIC = "person_centric"
STANDARD = "standard"
STANDARD_WITH_FIXED_ASPECT_RATIO = "standard_with_fixed_aspect_ratio"
def flip_matrix_np(horizontal, vertical, width=None, height=None):
"""Construct a spatial transformation matrix that flips.
Note that if width and height are supplied, it will move the object back into the canvas
together with the flip.
Args:
horizontal (bool): If the flipping should be horizontal. Scalar.
vertical (bool): If the flipping should be vertical. Scalar.
width (int): the width of the canvas. Used for translating the coordinates into the canvas.
Defaults to None (no added translation).
height (int): the height of the canvas. Used for translating the coordinates back into the
canvas. Defaults to None (no added translation).
Returns:
(np.ndarray): A fp32 array of shape (3, 3), spatial transformation matrix if horizontal and
vertical are scalars.
"""
if (width is None) ^ (height is None):
raise ValueError(
"Variables `width` and `height` should both be defined, or both `None`."
)
elif width is not None and height is not None:
x_t = horizontal * width
y_t = vertical * height
else:
x_t = 0.0
y_t = 0.0
m = [[1 - 2.0 * horizontal, 0.0, x_t],
[0.0, 1 - 2.0 * vertical, y_t],
[0.0, 0.0, 1.0]]
return np.array(m, dtype=np.float32)
def rotation_matrix_np(theta, width=None, height=None):
"""Construct a rotation transformation matrix.
Note that if width and height are supplied, it will rotate the coordinates around the canvas
center-point, so there will be a translation added to the rotation matrix.
Args:
theta (float): the rotation radian. Scalar.
width (int): the width of the canvas. Used for center rotation. Defaults to None
(no center rotation).
height (int): the height of the canvas. Used for center rotation. Defaults to None
(no center rotation).
Returns:
(np.ndarray): A fp32 tensor of shape (3, 3), spatial transformation matrix if theta is
scalar.
"""
cos_t = math.cos(theta)
sin_t = math.sin(theta)
if (width is None) ^ (height is None):
raise ValueError(
"Variables `width` and `height` should both be defined, or both `None`."
)
elif width is not None and height is not None:
x_t = height * sin_t / 2.0 - width * cos_t / 2.0 + width / 2.0
y_t = -1 * height * cos_t / 2.0 + height / 2.0 - width * sin_t / 2.0
else:
x_t = 0.0
y_t = 0.0
m = [[cos_t, -sin_t, x_t],
[sin_t, cos_t, y_t],
[0.0, 0.0, 1.0]]
return np.array(m, dtype=np.float32)
def translation_matrix_np(x, y):
"""Construct a spatial transformation matrix for translation.
Args:
x (float): the horizontal translation. Scalar.
y (float): the vertical translation. Scalar.
Returns:
(np.ndarray): A fp32 tensor of shape (3, 3), spatial transformation matrix if x and y are
scalars.
"""
m = [[1.0, 0.0, x],
[0.0, 1.0, y],
[0.0, 0.0, 1.0]]
return np.array(m, dtype=np.float32)
def zoom_matrix_np(ratio, width=None, height=None):
"""Construct a spatial transformation matrix for zooming.
Note that if width and height are supplied, it will perform a center-zoom by translation.
Args:
ratio (float or tuple(2) of float): the zoom ratio. If a tuple of length 2 is supplied,
they distinguish between the horizontal and vertical zooming. Scalar or
a tuple of scalars.
width (int): the width of the canvas. Used for center-zooming. Defaults to None (no added
translation).
height (int): the height of the canvas. Used for center-zooming. Defaults to None (no added
translation).
Returns:
(tf.Tensor): A fp32 tensor of shape (3, 3), spatial transformation matrix if ratio is
scalar. If ratio is a vector, (len(ratio), 3, 3).
"""
if isinstance(ratio, tuple) and len(ratio) == 2:
r_x, r_y = ratio
else:
r_x, r_y = ratio, ratio
if (width is None) ^ (height is None):
raise ValueError(
"Variables `width` and `height` should both be defined, or both `None`."
)
elif width is not None and height is not None:
x_t = (width - width * r_x) * 0.5
y_t = (height - height * r_y) * 0.5
else:
x_t = 0.0
y_t = 0.0
m = [[r_x, 0.0, x_t],
[0.0, r_y, y_t],
[0.0, 0.0, 1.0]]
return np.array(m, dtype=np.float32)
def get_spatial_transformation_matrix_np(
target_width,
target_height,
image_width=None,
image_height=None,
stm=None,
flip_lr=False,
flip_tb=False,
translate_x=0,
translate_y=0,
zoom_ratio=1.0,
rotate_rad=0.0,
shear_ratio_x=0.0,
shear_ratio_y=0.0,
augmentation_mode=None,
batch_size=None,
):
"""
The spatial transformation matrix (stm) generator used for augmentation.
This function creates a spatial transformation matrix (stm) that can be used for
generic data augmentation, usually images or coordinates.
Args:
target_width (int): the width of the destination image canvas.
target_height (int): the height of the destination image canvas.
image_width (int): the width of the source image.
image_height (int): the height of the source image.
stm ((3,3) fp32 np.ndarray or None): A spatial transformation matrix produced in this
function and will be used to transform images and coordinates spatiallly.
If ``None`` (default), an identity matrix will be generated.
flip_lr (bool): Flag to indicate whether to flip the image left/right or not.
flip_tb (bool): Flag to indicate whether to flip the image top/bottom or not.
translate_x (int): The amount by which to translate the image horizontally.
translate_y (int): The amount by which to translate the image vertically.
zoom_ratio (float): The ratio by which to zoom into the image. A zooming ratio of 1.0
will not affect the image, while values higher than 1 will result in 'zooming out'
(image gets rendered smaller than the canvas), and vice versa for values below 1.0.
rotate_rad (float): The rotation in radians.
shear_ratio_x (float): The amount to shear the horizontal direction per y row.
shear_ratio_y (float): The amount to shear the vertical direction per x column.
augmentation_mode (AugmentationModes): Augmentation mode to apply for the
images. Refer to the Enum class for details about the augmentation modes.
batch_size (int): If None, return a single matrix, else return a batch of matrices.
Returns:
(np.ndarray): If batch_size is None, a spatial transformation matrix of shape (3,3)
and type np.float32. If batch_size is not None, a tensor of shape (batch_size,3,3).
"""
# Initialize the spatial transform matrix as a 3x3 identity matrix
if stm is None:
stm = np.eye(3, dtype=np.float32)
if augmentation_mode == AugmentationModes.PERSON_CENTRIC:
# Align the center of the person of interest with the origin of the image.
# NOTE: This also includes a random shift in addition to the center of
# POI.
translate_transformation = translation_matrix_np(
translate_x, translate_y
)
stm = np.matmul(translate_transformation, stm)
# Apply rotation transform.
rotation_transformation = rotation_matrix_np(rotate_rad)
stm = np.matmul(rotation_transformation, stm)
# Apply zoom/scale transform.
zoom_transformation = zoom_matrix_np(zoom_ratio)
stm = np.matmul(zoom_transformation, stm)
# Apply horizontal flipping.
flip_transformation = flip_matrix_np(flip_lr, flip_tb)
stm = np.matmul(flip_transformation, stm)
# Align the origin back with the center of the image (once all the
# transformations are applied).
translate_transformation_2 = translation_matrix_np(
target_width // 2, target_height // 2
)
stm = np.matmul(translate_transformation_2, stm)
elif augmentation_mode in (
AugmentationModes.STANDARD, AugmentationModes.STANDARD_WITH_FIXED_ASPECT_RATIO):
# If mode is standard, retain aspect ratio of the original image
if augmentation_mode == AugmentationModes.STANDARD:
aspect_ratio = float(image_width) / float(image_height)
else:
aspect_ratio = 1.0
# Estimate the spatial tranformation matrix using quad->rect mapping
quad_stm = get_quad_tranformation_matrix_np(
image_width,
image_height,
target_width,
target_height,
flip_lr=flip_lr,
flip_tb=flip_tb,
translate_x=translate_x,
translate_y=translate_y,
zoom_ratio=zoom_ratio,
rotate_rad=rotate_rad,
shear_ratio_x=shear_ratio_x,
shear_ratio_y=shear_ratio_y,
aspect_ratio=aspect_ratio
)
quad_stm = np.asarray(quad_stm, np.float32)
stm = np.matmul(quad_stm, stm)
return stm
def get_quad_tranformation_matrix_np(
image_width,
image_height,
target_width,
target_height,
flip_lr=False,
flip_tb=False,
translate_x=0,
translate_y=0,
zoom_ratio=1.0,
rotate_rad=0.0,
shear_ratio_x=0.0,
shear_ratio_y=0.0,
aspect_ratio=1.0
):
"""
The spatial transformation matrix (stm) generated from quad -> rect mapping.
Args:
target_width (int): the width of the destination image canvas.
target_height (int): the height of the destination image canvas.
image_width (int): the width of the source image.
image_height (int): the height of the source image.
flip_lr (bool): Flag to indicate whether to flip the image left/right or not.
flip_tb (bool): Flag to indicate whether to flip the image top/bottom or not.
translate_x (int): The amount by which to translate the image horizontally.
translate_y (int): The amount by which to translate the image vertically.
zoom_ratio (float): The ratio by which to zoom into the image. A zooming ratio of 1.0
will not affect the image, while values higher than 1 will result in 'zooming out'
(image gets rendered smaller than the canvas), and vice versa for values below 1.0.
rotate_rad (float): The rotation in radians.
shear_ratio_x (float): The amount to shear the horizontal direction per y row.
shear_ratio_y (float): The amount to shear the vertical direction per x column.
aspect_ratio (float): The desired aspect ratio of the image in target canvas
Returns:
(np.ndarray): a spatial transformation matrix of shape (3,3) and type np.float32.
"""
# TODO: Add support for shearing
# NOTE: The quad is being estimated for unit scale
translate_x_ratio = translate_x / image_width
translate_y_ratio = translate_y / image_height
quad_ratio = get_quad_ratio_np(
flip_lr=flip_lr,
flip_tb=flip_tb,
translate=(translate_x_ratio, translate_y_ratio),
zoom_ratio=zoom_ratio,
rotate_rad=rotate_rad,
aspect_ratio=aspect_ratio)
# Convert to original image space
quad = np.zeros(quad_ratio.shape, quad_ratio.dtype)
quad[:, 0] = quad_ratio[:, 0] * image_width
quad[:, 1] = quad_ratio[:, 1] * image_height
# Convert to (top-left, top-right, bottom-right, bottom-left)
# cv2.getPerspectiveTransform expects in clockwise format
quad = np.array([
[quad[0][0], quad[0][1]],
[quad[3][0], quad[3][1]],
[quad[2][0], quad[2][1]],
[quad[1][0], quad[1][1]]], dtype=np.float32)
dst_rect = np.array([
[0, 0],
[target_width, 0],
[target_width, target_height],
[0, target_height]], dtype=np.float32)
# Compute perspective transformation from the source and destination points
stm = cv2.getPerspectiveTransform(quad, dst_rect)
return stm
def get_quad_ratio_np(
flip_lr=False,
flip_tb=False,
translate=(0.0, 0.0),
zoom_ratio=1.0,
rotate_rad=0.0,
shear_ratio_x=0.0,
shear_ratio_y=0.0,
aspect_ratio=1.0
):
"""
The quad to rectangle mapping (stm) generated using the desired augmentation params.
Note that the points of the quad are returned in unit scale and need to be scaled
to image space.
Args:
flip_lr (bool): Flag to indicate whether to flip the image left/right or not.
flip_tb (bool): Flag to indicate whether to flip the image top/bottom or not.
translate (tuple): (translate_x_ratio, translate_y_ratio).
zoom_ratio (float): The ratio by which to zoom into the image. A zooming ratio of 1.0
will not affect the image, while values higher than 1 will result in 'zooming out'
(image gets rendered smaller than the canvas), and vice versa for values below 1.0.
rotate_rad (float): The rotation in radians.
shear_ratio_x (float): The amount to shear the horizontal direction per y row.
shear_ratio_y (float): The amount to shear the vertical direction per x column.
aspect_ratio (float): The desired aspect ratio of the image in target canvas
Returns:
(np.ndarray): a quad array of shape (4,2) and type np.float32.
"""
if aspect_ratio > 1.0:
# Scenario where image_width > image_height
# Increase height region
quad = np.array([
[0.0, 0.5 - 0.5 * aspect_ratio],
[0.0, 0.5 + 0.5 * aspect_ratio],
[1.0, 0.5 + 0.5 * aspect_ratio],
[1.0, 0.5 - 0.5 * aspect_ratio]])
elif aspect_ratio < 1.0:
# Scenario where image_width < image_height
# Increase width region
quad = np.array([
[0.5 - 0.5 / aspect_ratio, 0.0],
[0.5 - 0.5 / aspect_ratio, 1.0],
[0.5 + 0.5 / aspect_ratio, 1.0],
[0.5 + 0.5 / aspect_ratio, 0.0]])
else:
# Scenario where image_width = image_height
# No change to target height and width
quad = np.array([
[0.0, 0.0],
[0.0, 1.0],
[1.0, 1.0],
[1.0, 0.0],
])
# Shift to origin to center of image
# to perform augmentations about the center
quad -= 0.5
# Fet rotation matrix and apply rotation
R = np.array([
[np.cos(rotate_rad), -np.sin(rotate_rad)],
[np.sin(rotate_rad), np.cos(rotate_rad)]
])
quad = np.dot(quad, R)
# Apply translation
quad -= np.array(translate)
# Apply scaling
quad /= zoom_ratio
# Shift origin back to top left
quad += 0.5
# Flip let-right if true
if flip_lr:
quad = np.array([
[quad[3][0], quad[3][1]],
[quad[2][0], quad[2][1]],
[quad[1][0], quad[1][1]],
[quad[0][0], quad[0][1]]])
# Flip top-bottom if true
if flip_tb:
quad = np.array([
[quad[1][0], quad[1][1]],
[quad[0][0], quad[0][1]],
[quad[3][0], quad[3][1]],
[quad[2][0], quad[2][1]]])
return quad
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/dataloaders/processors/augmentation_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BpNet Model utils."""
import os
import keras
def get_step_from_filename(path):
"""Gets the step number from a checkpoint filename.
Args:
path (str): path to the checkpoint.
Returns:
int: the step number.
"""
return int(os.path.basename(path).split('.')[1].split('-')[1])
def get_latest_keras_model(results_dir):
"""Get the latest checkpoint path from a given results directory.
Parses through the directory to look for the latest keras file
and returns the path to this file.
Args:
results_dir (str): Path to the results directory.
Returns:
latest_checkpoint (str): Path to the latest checkpoint.
"""
trainable_ckpts = []
for item in os.listdir(results_dir):
if item.endswith(".hdf5"):
try:
step_num = get_step_from_filename(item)
trainable_ckpts.append(step_num)
except IndexError:
continue
num_ckpts = len(trainable_ckpts)
if num_ckpts == 0:
return None
latest_step = sorted(trainable_ckpts, reverse=True)[0]
latest_checkpoint = os.path.join(results_dir, "model.keras-{}.hdf5".format(latest_step))
return latest_checkpoint
def _print_model_summary_recurse(model):
"""Print model summary recursively.
Helper function for printing nested models (ie. models that have models as layers).
Args:
model: Keras model to print.
"""
model.summary()
for l in model.layers:
if isinstance(l, keras.engine.training.Model):
print('where %s is' % l.name)
_print_model_summary_recurse(l)
def print_model_summary(keras_model):
"""Print model summary.
Args:
model: Keras model to print.
"""
_print_model_summary_recurse(keras_model)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/utils/model_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BpNet Export utils."""
import keras
from keras import backend as K
import numpy as np
def get_upsample_kernel(shape, dtype=np.float32):
"""Get a nearest neighbour upsampling kernel.
Args:
shape (tuple): shape of the upsampling kernel.
dtype (np.dtype): kernel data type
Returns:
kernel: generated upsampling kernel.
"""
kernel = np.zeros((shape), dtype=dtype)
for i in range(shape[-1]):
kernel[:, :, i, i] = np.ones((shape[0], shape[1]), dtype=dtype)
return kernel
class CustomUpsampleKernelInitializer:
"""Class for upsample kernel initialization."""
def __call__(self, shape, dtype=None):
"""Function to invoke kernel initializer."""
return get_upsample_kernel(shape, dtype=None)
def update_model(model, sdk_compatible_model=False, upsample_ratio=4, use_conv_transpose=True):
"""Update the model with additonal/custom layers.
Args:
model (KerasModel): trained model
upsample_ratio (int): specifies the upsampling ratio for the upsample layer
Returns:
model (KerasModel): update model
custom_objects: Keras custom objects that are added to model
"""
# Check if the model is a pruned model.
# If given model has only 2 layers and one of the layer is an instance
# of keras.engine.training.Model, it is a pruned model. If so, extract
# the internal model for final export.
num_layers = len(model.layers)
if num_layers == 2:
for layer in model.layers:
if isinstance(layer, keras.engine.training.Model):
model = layer
num_stages = int(len(model.outputs) / 2)
heatmap_out = model.outputs[num_stages - 1]
paf_out = model.outputs[num_stages * 2 - 1]
custom_objects = None
if sdk_compatible_model:
if K.image_data_format() == 'channels_first':
num_paf_channels = paf_out.shape[1]
else:
num_paf_channels = int(paf_out.shape[-1])
# Add upsampling layer for paf
if use_conv_transpose:
paf_out = keras.layers.Conv2DTranspose(
num_paf_channels,
(upsample_ratio, upsample_ratio),
strides=(upsample_ratio, upsample_ratio),
kernel_initializer=get_upsample_kernel,
padding='same'
)(paf_out)
custom_objects = {
'get_upsample_kernel': CustomUpsampleKernelInitializer
}
else:
paf_out = keras.layers.UpSampling2D(
size=(upsample_ratio, upsample_ratio),
data_format=None,
interpolation='nearest',
name="paf_out"
)(paf_out)
updated_model = keras.models.Model(inputs=model.inputs, outputs=[paf_out, heatmap_out])
return updated_model, custom_objects
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/utils/export_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BpNet Utils."""
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BpNet DataIO utils."""
from pycocotools import mask as mask_utils
def annotation_to_rle(segmentation, height, width):
"""
Convert annotation which can be polygons, uncompressed RLE to RLE.
Reference: https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/coco.py
Args:
segmentation: Can be polygons, uncompressed RLE, or RLE
height (int): Height of the image
width (int): Width of the image
Returns:
rle (list): Run length encoding
"""
if type(segmentation) == list:
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = mask_utils.frPyObjects(segmentation, height, width)
rle = mask_utils.merge(rles)
elif type(segmentation['counts']) == list:
# uncompressed RLE
rle = mask_utils.frPyObjects(segmentation, height, width)
else:
# rle
rle = segmentation
return rle
def annotation_to_mask(segmentation, height, width):
"""
Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.
Reference: https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/coco.py
Args:
segmentation: Can be polygons, uncompressed RLE, or RLE
height (int): Height of the image
width (int): Width of the image
Returns:
binary mask (np.ndarray): Binary mask generated using the given annotation
"""
rle = annotation_to_rle(segmentation, height, width)
binary_mask = mask_utils.decode(rle)
return binary_mask
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/utils/dataio_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BpNet Model definitions."""
from nvidia_tao_tf1.cv.bpnet.models.bpnet_lite_model import BpNetLiteModel
from nvidia_tao_tf1.cv.bpnet.models.bpnet_model import BpNetModel
__all__ = (
'BpNetModel',
'BpNetLiteModel',
)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/models/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for BpNetLite model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from nvidia_tao_tf1.core.models.templates.utils import count_layers_by_class_name
from nvidia_tao_tf1.cv.bpnet.models.bpnet_lite_model import BpNetLiteModel
from nvidia_tao_tf1.cv.bpnet.models.templates.utils import add_input
NUM_DENSE_LAYERS = 0
NUM_RESHAPE_LAYERS = 0
NUM_DROPOUT_LAYERS = 0
def test_bpnet_lite_model_builder():
"""Test BpNetLite model builder."""
input_tensor = add_input(name='input', data_format='channels_last')
##################################
# BpNetLiteModel default params
##################################
default_params = {
'backbone_attributes': {
'architecture': 'vgg',
'mtype': 'default',
'use_bias': False
},
'heat_channels': 19,
'paf_channels': 38,
'stages': 6,
'stage1_kernel_sizes': [3, 3, 3],
'stageT_kernel_sizes': [7, 7, 7, 7, 7],
'use_self_attention': False,
'regularization_type': 'l2',
'kernel_regularization_factor': 5e-4,
'bias_regularization_factor': 0
}
model = BpNetLiteModel(**default_params)
model.build(input_tensor)
keras_model = model._keras_model
assert count_layers_by_class_name(keras_model, ['InputLayer']) == 1
assert count_layers_by_class_name(keras_model, ['Conv2D']) == 64
assert count_layers_by_class_name(keras_model,
['Dense']) == NUM_DENSE_LAYERS
assert count_layers_by_class_name(keras_model,
['Reshape']) == NUM_RESHAPE_LAYERS
assert count_layers_by_class_name(keras_model,
['Dropout']) == NUM_DROPOUT_LAYERS
assert count_layers_by_class_name(keras_model, ['MaxPooling2D']) == 0
assert count_layers_by_class_name(
keras_model, ['Concatenate']) == (default_params['stages'] - 1)
assert keras_model.count_params() == 30015638
#################################
# BpNetLiteModel with 3 stages
#################################
model_params = copy.deepcopy(default_params)
model_params['stages'] = 3
model = BpNetLiteModel(**model_params)
model.build(input_tensor)
keras_model = model._keras_model
assert count_layers_by_class_name(keras_model, ['InputLayer']) == 1
assert count_layers_by_class_name(keras_model, ['Conv2D']) == 37
assert count_layers_by_class_name(keras_model,
['Dense']) == NUM_DENSE_LAYERS
assert count_layers_by_class_name(keras_model,
['Reshape']) == NUM_RESHAPE_LAYERS
assert count_layers_by_class_name(keras_model,
['Dropout']) == NUM_DROPOUT_LAYERS
assert count_layers_by_class_name(keras_model, ['MaxPooling2D']) == 0
assert count_layers_by_class_name(
keras_model, ['Concatenate']) == (model_params['stages'] - 1)
assert keras_model.count_params() == 16777835
#####################################
# BpNetLiteModel with helnet18 base
#####################################
model_params = copy.deepcopy(default_params)
model_params['backbone_attributes'] = {
'architecture': 'helnet',
'mtype': 's8_3rdblock',
"nlayers": 18,
'use_batch_norm': False
}
model_params['stages'] = 3
model = BpNetLiteModel(**model_params)
model.build(input_tensor)
keras_model = model._keras_model
assert count_layers_by_class_name(keras_model, ['InputLayer']) == 1
assert count_layers_by_class_name(keras_model, ['Conv2D']) == 39
assert count_layers_by_class_name(keras_model,
['Dense']) == NUM_DENSE_LAYERS
assert count_layers_by_class_name(keras_model,
['Reshape']) == NUM_RESHAPE_LAYERS
assert count_layers_by_class_name(keras_model,
['Dropout']) == NUM_DROPOUT_LAYERS
assert count_layers_by_class_name(keras_model, ['MaxPooling2D']) == 0
assert count_layers_by_class_name(
keras_model, ['Concatenate']) == (model_params['stages'] - 1)
assert keras_model.count_params() == 12463531
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/models/test_bpnet_lite_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BpNetLiteModel model definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import keras
from keras.initializers import constant
from keras.initializers import glorot_uniform
from keras.initializers import random_normal
from keras.layers import Activation
from keras.layers import BatchNormalization
from keras.layers import Concatenate
from keras.layers import Conv2D
from keras.layers import Input
from keras.layers import MaxPooling2D
from keras.layers import Multiply
from keras.models import Model
import tensorflow as tf
from nvidia_tao_tf1.blocks.models.keras_model import KerasModel
import nvidia_tao_tf1.core as tao_core
from nvidia_tao_tf1.core.models.templates.utils import get_batchnorm_axis
from nvidia_tao_tf1.core.templates.resnet import ResNet
from nvidia_tao_tf1.cv.bpnet.models.templates.helnet import HelNet
from nvidia_tao_tf1.cv.bpnet.models.templates.vgg import VggNet
from nvidia_tao_tf1.cv.common.utilities.tlt_utils import encode_from_keras
class BpNetLiteModel(KerasModel):
"""BpNetLiteModel model definition.
This model consists of a faster alternatives for the BpNet model.
Some differences/optimizations include:
- Fused intial and refinement networks
- Lighter interface layers between backbone and refinement stages
- Self attention layers
More information can be found here: https://confluence.nvidia.com/x/85mwFg
"""
@tao_core.coreobject.save_args
def __init__(self,
backbone_attributes,
stages=6,
heat_channels=19,
paf_channels=38,
stage1_kernel_sizes=None,
stageT_kernel_sizes=None,
use_self_attention=False,
data_format='channels_last',
use_bias=True,
regularization_type='l1',
kernel_regularization_factor=1e-9,
bias_regularization_factor=1e-9,
kernel_initializer='random_normal',
**kwargs):
"""Initialize the model.
Args:
backbone (str): vgg, helnet
stages (int): Number of stages of refinement in the network
data_format (str): Channel ordering
regularization_type (str): 'l1', 'l2' or 'l1_l2'.
regularization_factor (float): regularization weight.
"""
super(BpNetLiteModel, self).__init__(**kwargs)
self._backbone_attributes = backbone_attributes
self._data_format = data_format
self._stages = stages
self._paf_stages = stages
self._cmap_stages = stages
self._heat_channels = heat_channels
self._paf_channels = paf_channels
if stage1_kernel_sizes is None:
self._stage1_kernel_sizes = [3, 3, 3]
else:
self._stage1_kernel_sizes = stage1_kernel_sizes
if stageT_kernel_sizes is None:
self._stageT_kernel_sizes = [7, 7, 7, 7, 7]
else:
self._stageT_kernel_sizes = stageT_kernel_sizes
self._use_self_attention = use_self_attention
self._use_bias = use_bias
if kernel_initializer == 'xavier':
self._kernel_initializer = glorot_uniform()
else:
self._kernel_initializer = random_normal(stddev=0.01)
self._bias_initializer = constant(0.0)
self._regularization_type = regularization_type
self._kernel_regularization_factor = kernel_regularization_factor
self._bias_regularization_factor = bias_regularization_factor
self._set_regularizer()
def _set_regularizer(self):
"""Return regularization function."""
if self._regularization_type == 'l1':
kernel_regularizer = keras.regularizers.l1(
self._kernel_regularization_factor)
bias_regularizer = keras.regularizers.l1(
self._bias_regularization_factor)
elif self._regularization_type == 'l2':
kernel_regularizer = keras.regularizers.l2(
self._kernel_regularization_factor)
bias_regularizer = keras.regularizers.l2(
self._bias_regularization_factor)
elif self._regularization_type == 'l1_l2':
kernel_regularizer = keras.regularizers.l1_l2(
self._kernel_regularization_factor)
bias_regularizer = keras.regularizers.l1_l2(
self._bias_regularization_factor)
else:
raise NotImplementedError(
"Regularization type: {} is not supported.".format(
self._regularization_type))
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
def regularization_losses(self):
"""Get the regularization losses.
Returns:
Scalar (tensor fp32) with the model dependent (regularization) losses.
"""
return tf.reduce_sum(self.keras_model.losses)
def _maxpool(self, input_tensor, name, kernel_size=(2, 2), stride=2):
"""Add MaxPool layer to the network.
Args:
input_tensor (Tensor): An input tensor object.
kernel_size (int): Size of the kernel.
stride (int): Size of the stride.
name (str): Name of the maxpool layer.
Returns:
tensor (Tensor): The output Tensor object after construction.
"""
return MaxPooling2D(kernel_size, (stride, stride),
padding='same',
name=name,
data_format=self._data_format)(input_tensor)
def _conv2d_block(self,
input_tensor,
num_filters,
kernel_size,
name,
stride=1,
activation_type=None,
use_bn=False):
"""Construct a convolution layer to the network.
Args:
input_tensor (Tensor): An input tensor object.
num_filters (int): Number of filters.
kernel_size (int): Size of the kernel.
stride (int): Size of the stride.
name (str): Name of the conv block.
Returns:
tensor (Tensor): The output Tensor object after construction.
"""
conv_layer = Conv2D(num_filters,
kernel_size=kernel_size,
strides=(stride, stride),
padding='same',
data_format=self._data_format,
use_bias=self._use_bias,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
name=name)
tensor = conv_layer(input_tensor)
if use_bn:
tensor = BatchNormalization(axis=get_batchnorm_axis(
self._data_format),
name=name + "/BN")(tensor)
if activation_type is not None:
tensor = Activation(activation_type,
name=name + "/" + activation_type)(tensor)
return tensor
def _build_vgg_backbone(self, x):
"""Build a VGG backbone network.
Args:
x (Tensor): Input tensor.
Returns:
x (Tensor): Output Tensor (feature map)
"""
# Block 1
x = self._conv2d_block(x,
64, (3, 3),
name='block1_conv1',
activation_type='relu')
x = self._conv2d_block(x,
64, (3, 3),
name='block1_conv2',
activation_type='relu')
x = self._maxpool(x, name='block1_pool')
# Block 2
x = self._conv2d_block(x,
128, (3, 3),
name='block2_conv1',
activation_type='relu')
x = self._conv2d_block(x,
128, (3, 3),
name='block2_conv2',
activation_type='relu')
x = self._maxpool(x, name='block2_pool')
# Block 3
x = self._conv2d_block(x,
256, (3, 3),
name='block3_conv1',
activation_type='relu')
x = self._conv2d_block(x,
256, (3, 3),
name='block3_conv2',
activation_type='relu')
x = self._conv2d_block(x,
256, (3, 3),
name='block3_conv3',
activation_type='relu')
x = self._conv2d_block(x,
256, (3, 3),
name='block3_conv4',
activation_type='relu')
x = self._maxpool(x, name='block3_pool')
# Block 4
x = self._conv2d_block(x,
512, (3, 3),
name='block4_conv1',
activation_type='relu')
x = self._conv2d_block(x,
512, (3, 3),
name='block4_conv2',
activation_type='relu')
# Non-VGG layers
x = self._conv2d_block(x,
256, (3, 3),
name='interface/conv4_3',
activation_type='relu')
x = self._conv2d_block(x,
128, (3, 3),
name='interface/conv4_4',
activation_type='relu')
return x
def _build_stage1(self,
x,
paf_channels,
cmap_channels,
num_channels,
kernel_sizes,
scope_name='stage1/'):
"""Build the first stage of body pose estimation network.
Args:
x (Tensor): Input tensor.
out_channels (int): Number of final output channels
(depends on number of parts and branch)
scope_name (str): Scope name for the stage (ex. stage1/heat_branch)
Returns:
x (Tensor): Output Tensor
"""
for cid, kernel_size in enumerate(kernel_sizes):
x = self._conv2d_block(x,
num_channels,
kernel_size,
name=scope_name + 'conv{}'.format(cid + 1),
activation_type='relu')
# Split into PAF and CMAP branches
paf_out = self._conv2d_block(
x,
512, (1, 1),
name=scope_name + 'paf/conv{}'.format(len(kernel_sizes) + 1),
activation_type='relu')
cmap_out = self._conv2d_block(
x,
512, (1, 1),
name=scope_name + 'cmap/conv{}'.format(len(kernel_sizes) + 1),
activation_type='relu')
paf_out = self._conv2d_block(paf_out,
paf_channels, (1, 1),
name=scope_name + 'paf_branch/out')
cmap_out = self._conv2d_block(cmap_out,
cmap_channels, (1, 1),
name=scope_name + 'heat_branch/out')
return paf_out, cmap_out
def _build_stageT(self,
x,
paf_channels,
cmap_channels,
scope_name,
kernel_sizes,
num_channels=128,
is_final_block=False):
"""Build the first stage of body pose estimation network.
Args:
x (Tensor): Input tensor.
out_channels (int): Number of final output channels
(depends on number of parts and branch)
scope_name (str): Scope name for the stage
(ex. stage2/heat_branch, stage3/paf_branch etc.)
Returns:
x (Tensor): Output Tensor
"""
for cid, kernel_size in enumerate(kernel_sizes):
x = self._conv2d_block(x,
num_channels,
kernel_size,
name=scope_name + 'conv{}'.format(cid + 1),
activation_type='relu')
# Split into PAF and CMAP branches
paf_out = self._conv2d_block(
x,
num_channels, (1, 1),
name=scope_name + 'paf/conv{}'.format(len(kernel_sizes) + 1),
activation_type='relu')
cmap_out = self._conv2d_block(
x,
num_channels, (1, 1),
name=scope_name + 'cmap/conv{}'.format(len(kernel_sizes) + 1),
activation_type='relu')
# Self attention block
if self._use_self_attention:
paf_att = self._conv2d_block(paf_out,
num_channels, (3, 3),
name=scope_name +
'paf/attention_conv',
activation_type='tanh')
cmap_att = self._conv2d_block(cmap_out,
num_channels, (3, 3),
name=scope_name +
'cmap/attention_conv',
activation_type='sigmoid')
# apply attention maps
paf_out = Multiply()([paf_out, paf_att])
cmap_out = Multiply()([cmap_out, cmap_att])
if is_final_block:
last_paf_layer_name = 'paf_out'
last_cmap_layer_name = 'heatmap_out'
else:
last_paf_layer_name = scope_name + 'paf_branch/out'
last_cmap_layer_name = scope_name + 'heat_branch/out'
paf_out = self._conv2d_block(paf_out,
paf_channels, (1, 1),
name=last_paf_layer_name)
cmap_out = self._conv2d_block(cmap_out,
cmap_channels, (1, 1),
name=last_cmap_layer_name)
return paf_out, cmap_out
def build(self, input_image):
"""Create a Keras model to perform body pose estimation.
Args:
inputs (4D tensor): the input images.
"""
cmap_outputs = []
paf_outputs = []
# Define the inputs to the network
input_layer = Input(tensor=input_image,
shape=(None, None, 3),
name='input_1')
# Add Backbone network
# VGG backbone
if self._backbone_attributes["architecture"] == 'vgg':
self._backbone_attributes["nlayers"] = self._backbone_attributes.get("nlayers", 19)
assert self._backbone_attributes["nlayers"] == 19, "Only VGG19 is supported currently."
use_bias = self._backbone_attributes["use_bias"]
model = VggNet(self._backbone_attributes["nlayers"],
input_layer,
use_batch_norm=True,
data_format=self._data_format,
use_pooling=False,
use_bias=use_bias,
use_modified_vgg=True,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
feat = model.outputs[0]
# Helnet backbone
elif self._backbone_attributes["architecture"] == 'helnet':
model = HelNet(
self._backbone_attributes["nlayers"],
input_layer,
self._backbone_attributes["mtype"],
use_last_block=False,
use_batch_norm=self._backbone_attributes["use_batch_norm"],
data_format=self._data_format)
feat = model.outputs[0]
# Resnet backbone
elif self._backbone_attributes["architecture"] == 'resnet':
model = ResNet(
self._backbone_attributes["nlayers"],
input_layer,
use_batch_norm=self._backbone_attributes["use_batch_norm"],
data_format=self._data_format)
feat = model.outputs[0]
# Else raise error
else:
raise NotImplementedError(
"Backbone network: {} is not supported.".format(
self._backbone_attributes["architecture"]))
# If enabled, add a convolution with 128 kernels,
# in essence, to reduce the backbone feat map size
feat = self._conv2d_block(feat,
128, (3, 3),
name='channel_reduction_conv',
activation_type='relu')
# Add Stage 1 network
paf_out, cmap_out = self._build_stage1(
feat,
self._paf_channels,
self._heat_channels,
num_channels=128,
kernel_sizes=self._stage1_kernel_sizes,
scope_name="stage1/")
paf_outputs.append(paf_out)
cmap_outputs.append(cmap_out)
# Add Stages >= 2
for stage_idx in range(2, self._stages + 1):
x = Concatenate()([feat, cmap_outputs[-1], paf_outputs[-1]])
paf_out, cmap_out = self._build_stageT(
x,
self._paf_channels,
self._heat_channels,
"stage{}/".format(stage_idx),
kernel_sizes=self._stageT_kernel_sizes,
num_channels=128,
is_final_block=(stage_idx == self._stages))
paf_outputs.append(paf_out)
cmap_outputs.append(cmap_out)
model = Model(inputs=input_layer, outputs=cmap_outputs + paf_outputs)
self._keras_model = model
return self._keras_model.outputs
def get_lr_multipiers(self):
"""Get the Learning rate multipliers for different stages of the model."""
# setup lr multipliers for conv layers
lr_mult = dict()
for layer in self._keras_model.layers:
if isinstance(layer, Conv2D):
# stage = 1
if re.match("stage1.*", layer.name):
kernel_name = layer.weights[0].name.split(':')[0]
lr_mult[kernel_name] = 1
if len(layer.weights) > 1:
bias_name = layer.weights[1].name.split(':')[0]
lr_mult[bias_name] = 2
# stage > 1
elif re.match("stage.*", layer.name):
kernel_name = layer.weights[0].name.split(':')[0]
lr_mult[kernel_name] = 4
if len(layer.weights) > 1:
bias_name = layer.weights[1].name.split(':')[0]
lr_mult[bias_name] = 8
# output nodes
elif re.match(".*out", layer.name):
kernel_name = layer.weights[0].name.split(':')[0]
lr_mult[kernel_name] = 4
if len(layer.weights) > 1:
bias_name = layer.weights[1].name.split(':')[0]
lr_mult[bias_name] = 8
# vgg
else:
# Commented for TLT branch
# logger.info("Layer matched as backbone layer: {}".format(layer.name))
kernel_name = layer.weights[0].name.split(':')[0]
lr_mult[kernel_name] = 1
if len(layer.weights) > 1:
bias_name = layer.weights[1].name.split(':')[0]
lr_mult[bias_name] = 2
return lr_mult
def save_model(self, file_name, enc_key=None):
"""Save the model to disk.
Args:
file_name (str): Model file name.
enc_key (str): Key string for encryption.
Raises:
ValueError if postprocessing_config is None but save_metadata is True.
"""
self.keras_model.save(file_name, overwrite=True)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/models/bpnet_lite_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BpNetModel model definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import re
import keras
from keras.initializers import constant
from keras.initializers import glorot_uniform
from keras.initializers import random_normal
from keras.layers import Activation
from keras.layers import BatchNormalization
from keras.layers import Concatenate
from keras.layers import Conv2D
from keras.layers import Input
from keras.layers import MaxPooling2D
from keras.layers import Multiply
from keras.models import Model
import tensorflow as tf
import nvidia_tao_tf1.core as tao_core
from nvidia_tao_tf1.blocks.models.keras_model import KerasModel
from nvidia_tao_tf1.core.models.templates.utils import get_batchnorm_axis
from nvidia_tao_tf1.core.templates.resnet import ResNet
from nvidia_tao_tf1.cv.bpnet.models.templates.helnet import HelNet
from nvidia_tao_tf1.cv.bpnet.models.templates.vgg import VggNet
from nvidia_tao_tf1.cv.common.utilities.tlt_utils import encode_from_keras
logger = logging.getLogger(__name__)
class BpNetModel(KerasModel):
"""BpNet model definition."""
@tao_core.coreobject.save_args
def __init__(self,
backbone_attributes,
stages=6,
heat_channels=19,
paf_channels=38,
use_self_attention=False,
data_format='channels_last',
use_bias=True,
regularization_type='l1',
kernel_regularization_factor=1e-9,
bias_regularization_factor=1e-9,
kernel_initializer='random_normal',
**kwargs):
"""Initialize the model.
Args:
backbone (str): vgg, helnet
stages (int): Number of stages of refinement in the network
data_format (str): Channel ordering
regularization_type (str): 'l1', 'l2' or 'l1_l2'.
regularization_factor (float): regularization weight.
"""
super(BpNetModel, self).__init__(**kwargs)
self._backbone_attributes = backbone_attributes
self._data_format = data_format
self._stages = stages
self._paf_stages = stages
self._cmap_stages = stages
self._heat_channels = heat_channels
self._paf_channels = paf_channels
self._use_self_attention = use_self_attention
self._use_bias = use_bias
if kernel_initializer == 'xavier':
self._kernel_initializer = glorot_uniform()
else:
self._kernel_initializer = random_normal(stddev=0.01)
self._bias_initializer = constant(0.0)
self._regularization_type = regularization_type
self._kernel_regularization_factor = kernel_regularization_factor
self._bias_regularization_factor = bias_regularization_factor
self._set_regularizer()
# TODO: move to utils
def _set_regularizer(self):
"""Return regularization function."""
if self._regularization_type == 'l1':
kernel_regularizer = keras.regularizers.l1(
self._kernel_regularization_factor)
bias_regularizer = keras.regularizers.l1(
self._bias_regularization_factor)
elif self._regularization_type == 'l2':
kernel_regularizer = keras.regularizers.l2(
self._kernel_regularization_factor)
bias_regularizer = keras.regularizers.l2(
self._bias_regularization_factor)
elif self._regularization_type == 'l1_l2':
kernel_regularizer = keras.regularizers.l1_l2(
self._kernel_regularization_factor)
bias_regularizer = keras.regularizers.l1_l2(
self._bias_regularization_factor)
else:
raise NotImplementedError(
"Regularization type: {} is not supported.".format(
self._regularization_type))
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
def regularization_losses(self):
"""Get the regularization losses.
Returns:
Scalar (tensor fp32) with the model dependent (regularization) losses.
"""
return tf.reduce_sum(self.keras_model.losses)
# TODO (sakthivels): move to layers.py
def _maxpool(self, input_tensor, name, kernel_size=(2, 2), stride=2):
"""Add MaxPool layer to the network.
Args:
input_tensor (Tensor): An input tensor object.
kernel_size (int): Size of the kernel.
stride (int): Size of the stride.
name (str): Name of the maxpool layer.
Returns:
tensor (Tensor): The output Tensor object after construction.
"""
return MaxPooling2D(kernel_size, (stride, stride),
padding='same',
name=name,
data_format=self._data_format)(input_tensor)
# TODO (sakthivels): move to layers.py (and remove regaulizers
# and initalizers. use update_regularizers in utils.py )
def _conv2d_block(self,
input_tensor,
num_filters,
kernel_size,
name,
stride=1,
activation_type=None,
use_bn=False):
"""Construct a convolution layer to the network.
Args:
input_tensor (Tensor): An input tensor object.
num_filters (int): Number of filters.
kernel_size (int): Size of the kernel.
stride (int): Size of the stride.
name (str): Name of the conv block.
Returns:
tensor (Tensor): The output Tensor object after construction.
"""
conv_layer = Conv2D(num_filters,
kernel_size=kernel_size,
strides=(stride, stride),
padding='same',
data_format=self._data_format,
use_bias=self._use_bias,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
name=name)
tensor = conv_layer(input_tensor)
if use_bn:
tensor = BatchNormalization(axis=get_batchnorm_axis(
self._data_format),
name=name + "/BN")(tensor)
if activation_type is not None:
tensor = Activation(activation_type,
name=name + "/" + activation_type)(tensor)
return tensor
# TODO (sakthivels): move to base_models and add get_customvgg to cut off at layer 10
def _build_vgg_backbone(self, x):
"""Build a VGG backbone network.
Args:
x (Tensor): Input tensor.
Returns:
x (Tensor): Output Tensor (feature map)
"""
# Block 1
x = self._conv2d_block(x,
64, (3, 3),
name='block1_conv1',
activation_type='relu')
x = self._conv2d_block(x,
64, (3, 3),
name='block1_conv2',
activation_type='relu')
x = self._maxpool(x, name='block1_pool')
# Block 2
x = self._conv2d_block(x,
128, (3, 3),
name='block2_conv1',
activation_type='relu')
x = self._conv2d_block(x,
128, (3, 3),
name='block2_conv2',
activation_type='relu')
x = self._maxpool(x, name='block2_pool')
# Block 3
x = self._conv2d_block(x,
256, (3, 3),
name='block3_conv1',
activation_type='relu')
x = self._conv2d_block(x,
256, (3, 3),
name='block3_conv2',
activation_type='relu')
x = self._conv2d_block(x,
256, (3, 3),
name='block3_conv3',
activation_type='relu')
x = self._conv2d_block(x,
256, (3, 3),
name='block3_conv4',
activation_type='relu')
x = self._maxpool(x, name='block3_pool')
# Block 4
x = self._conv2d_block(x,
512, (3, 3),
name='block4_conv1',
activation_type='relu')
x = self._conv2d_block(x,
512, (3, 3),
name='block4_conv2',
activation_type='relu')
# Non-VGG layers
x = self._conv2d_block(x,
256, (3, 3),
name='interface/conv4_3',
activation_type='relu')
x = self._conv2d_block(x,
128, (3, 3),
name='interface/conv4_4',
activation_type='relu')
return x
# TODO (sakthivels): add as a seperate head (initial_stages)
def _build_stage1(self,
x,
out_channels,
num_channels=128,
scope_name='stage1/'):
"""Build the first stage of body pose estimation network.
Args:
x (Tensor): Input tensor.
out_channels (int): Number of final output channels
(depends on number of parts and branch)
scope_name (str): Scope name for the stage (ex. stage1/heat_branch)
Returns:
x (Tensor): Output Tensor
"""
x = self._conv2d_block(x,
num_channels, (3, 3),
name=scope_name + 'conv1',
activation_type='relu')
x = self._conv2d_block(x,
num_channels, (3, 3),
name=scope_name + 'conv2',
activation_type='relu')
x = self._conv2d_block(x,
num_channels, (3, 3),
name=scope_name + 'conv3',
activation_type='relu')
x = self._conv2d_block(x,
512, (1, 1),
name=scope_name + 'conv4',
activation_type='relu')
x = self._conv2d_block(x,
out_channels, (1, 1),
name=scope_name + 'out')
return x
# TODO (sakthivels): add as a seperate head (refinement_stages)
def _build_stageT(self,
x,
out_channels,
scope_name,
branch_type,
num_channels=128,
kernel_size=(7, 7),
is_final_block=False):
"""Build the first stage of body pose estimation network.
Args:
x (Tensor): Input tensor.
out_channels (int): Number of final output channels
(depends on number of parts and branch)
scope_name (str): Scope name for the stage
(ex. stage2/heat_branch, stage3/paf_branch etc.)
Returns:
x (Tensor): Output Tensor
"""
x = self._conv2d_block(x,
num_channels,
kernel_size,
name=scope_name + 'conv1',
activation_type='relu')
x = self._conv2d_block(x,
num_channels,
kernel_size,
name=scope_name + 'conv2',
activation_type='relu')
x = self._conv2d_block(x,
num_channels,
kernel_size,
name=scope_name + 'conv3',
activation_type='relu')
x = self._conv2d_block(x,
num_channels,
kernel_size,
name=scope_name + 'conv4',
activation_type='relu')
x = self._conv2d_block(x,
num_channels,
kernel_size,
name=scope_name + 'conv5',
activation_type='relu')
x = self._conv2d_block(x,
num_channels, (1, 1),
name=scope_name + 'conv6',
activation_type='relu')
# Self attention block
# TODO (sakthivels): add as a seperate head (self_attention_head)
if self._use_self_attention:
if branch_type == "paf":
activation_type = 'tanh'
else:
activation_type = 'sigmoid'
out_att = self._conv2d_block(x,
num_channels, (3, 3),
name=scope_name + 'attention_conv',
activation_type=activation_type)
# apply attention maps
x = Multiply()([x, out_att])
# Final layer naming
if is_final_block:
last_layer_name = "{}_{}".format(branch_type, 'out')
else:
last_layer_name = "{}{}".format(scope_name, 'out')
x = self._conv2d_block(x,
out_channels, (1, 1),
name=last_layer_name)
return x
# TODO (sakthivels): build model by adding blocks to base in loop (in config as list)
def build(self, input_image):
"""Create a Keras model to perform body pose estimation.
Args:
inputs (4D tensor): the input images.
"""
heat_outputs = []
paf_outputs = []
# Define the inputs to the network
input_layer = Input(tensor=input_image,
shape=(None, None, 3),
name='input_1')
# Add Backbone network
# VGG backbone
if self._backbone_attributes["architecture"] == 'vgg':
self._backbone_attributes["nlayers"] = self._backbone_attributes.get("nlayers", 19)
assert self._backbone_attributes["nlayers"] == 19, "Only VGG19 is supported currently."
use_bias = self._backbone_attributes["use_bias"]
model = VggNet(self._backbone_attributes["nlayers"],
input_layer,
use_batch_norm=True,
data_format=self._data_format,
use_pooling=False,
use_bias=use_bias,
use_modified_vgg=True,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
feat = model.outputs[0]
# Helnet backbone
elif self._backbone_attributes["architecture"] == 'helnet':
model = HelNet(
self._backbone_attributes["nlayers"],
input_layer,
self._backbone_attributes["mtype"],
use_last_block=False,
use_batch_norm=self._backbone_attributes["use_batch_norm"],
data_format=self._data_format)
feat = model.outputs[0]
# Resnet backbone
elif self._backbone_attributes["architecture"] == 'resnet':
model = ResNet(
self._backbone_attributes["nlayers"],
input_layer,
use_batch_norm=self._backbone_attributes["use_batch_norm"],
data_format=self._data_format)
feat = model.outputs[0]
# Else raise error
else:
raise NotImplementedError(
"Backbone network: {} is not supported.".format(
self._backbone_attributes["architecture"]))
# If enabled, add a convolution with 128 kernels,
# in essence, to reduce the backbone feat map size
feat = self._conv2d_block(feat,
128, (3, 3),
name='channel_reduction_conv',
activation_type='relu')
# Add Stage 1 network
paf_outputs.append(
self._build_stage1(feat,
self._paf_channels,
num_channels=128,
scope_name="stage1/paf_branch/"))
heat_outputs.append(
self._build_stage1(feat,
self._heat_channels,
num_channels=128,
scope_name="stage1/heat_branch/"))
# Add Stages >= 2
for stage_idx in range(2, self._stages + 1):
x = Concatenate()([feat, heat_outputs[-1], paf_outputs[-1]])
paf_outputs.append(
self._build_stageT(x, self._paf_channels,
"stage{}/paf_branch/".format(stage_idx),
"paf", is_final_block=(stage_idx == self._stages)))
heat_outputs.append(
self._build_stageT(x, self._heat_channels,
"stage{}/heat_branch/".format(stage_idx),
"heatmap", is_final_block=(stage_idx == self._stages)))
model = Model(inputs=input_layer, outputs=heat_outputs + paf_outputs)
self._keras_model = model
return self._keras_model.outputs
def get_lr_multipiers(self):
"""Get the Learning rate multipliers for different stages of the model."""
# setup lr multipliers for conv layers
lr_mult = dict()
for layer in self._keras_model.layers:
if isinstance(layer, Conv2D):
# stage = 1
if re.match("stage1.*", layer.name):
kernel_name = layer.weights[0].name.split(':')[0]
lr_mult[kernel_name] = 1
if len(layer.weights) > 1:
bias_name = layer.weights[1].name.split(':')[0]
lr_mult[bias_name] = 2
# stage > 1
elif re.match("stage.*", layer.name):
kernel_name = layer.weights[0].name.split(':')[0]
lr_mult[kernel_name] = 4
if len(layer.weights) > 1:
bias_name = layer.weights[1].name.split(':')[0]
lr_mult[bias_name] = 8
# output nodes
elif re.match(".*out", layer.name):
kernel_name = layer.weights[0].name.split(':')[0]
lr_mult[kernel_name] = 4
if len(layer.weights) > 1:
bias_name = layer.weights[1].name.split(':')[0]
lr_mult[bias_name] = 8
# vgg
else:
# Commented for TLT branch
# logger.info("Layer matched as backbone layer: {}".format(layer.name))
kernel_name = layer.weights[0].name.split(':')[0]
lr_mult[kernel_name] = 1
if len(layer.weights) > 1:
bias_name = layer.weights[1].name.split(':')[0]
lr_mult[bias_name] = 2
return lr_mult
def save_model(self, file_name, enc_key=None):
"""Save the model to disk.
Args:
file_name (str): Model file name.
enc_key (str): Key string for encryption.
Raises:
ValueError if postprocessing_config is None but save_metadata is True.
"""
self.keras_model.save(file_name, overwrite=True)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/models/bpnet_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for BpNet model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from nvidia_tao_tf1.core.models.templates.utils import count_layers_by_class_name
from nvidia_tao_tf1.cv.bpnet.models.bpnet_model import BpNetModel
from nvidia_tao_tf1.cv.bpnet.models.templates.utils import add_input
NUM_DENSE_LAYERS = 0
NUM_RESHAPE_LAYERS = 0
NUM_DROPOUT_LAYERS = 0
def test_bpnet_model_builder():
"""Test BpNet model builder."""
input_tensor = add_input(name='input', data_format='channels_last')
#############################
# BpNetModel default params
#############################
default_params = {
'backbone_attributes': {
'architecture': 'vgg',
'mtype': 'default',
'use_bias': False
},
'use_self_attention': False,
'stages': 6,
'regularization_type': 'l2',
'kernel_regularization_factor': 5e-4,
'bias_regularization_factor': 0,
'heat_channels': 19,
'paf_channels': 38
}
model = BpNetModel(**default_params)
model.build(input_tensor)
keras_model = model._keras_model
# import pdb; pdb.set_trace()
assert count_layers_by_class_name(keras_model, ['InputLayer']) == 1
assert count_layers_by_class_name(keras_model, ['Conv2D']) == 92
assert count_layers_by_class_name(keras_model,
['Dense']) == NUM_DENSE_LAYERS
assert count_layers_by_class_name(keras_model,
['Reshape']) == NUM_RESHAPE_LAYERS
assert count_layers_by_class_name(keras_model,
['Dropout']) == NUM_DROPOUT_LAYERS
assert count_layers_by_class_name(keras_model, ['MaxPooling2D']) == 0
assert count_layers_by_class_name(
keras_model, ['Concatenate']) == (default_params['stages'] - 1)
assert keras_model.count_params() == 52319510
############################
# BpNetModel with 3 stages
############################
model_params = copy.deepcopy(default_params)
model_params['stages'] = 3
model = BpNetModel(**model_params)
model.build(input_tensor)
keras_model = model._keras_model
assert count_layers_by_class_name(keras_model, ['InputLayer']) == 1
assert count_layers_by_class_name(keras_model, ['Conv2D']) == 50
assert count_layers_by_class_name(keras_model,
['Dense']) == NUM_DENSE_LAYERS
assert count_layers_by_class_name(keras_model,
['Reshape']) == NUM_RESHAPE_LAYERS
assert count_layers_by_class_name(keras_model,
['Dropout']) == NUM_DROPOUT_LAYERS
assert count_layers_by_class_name(keras_model, ['MaxPooling2D']) == 0
assert count_layers_by_class_name(
keras_model, ['Concatenate']) == (model_params['stages'] - 1)
assert keras_model.count_params() == 25965035
################################
# BpNetModel with helnet10 base
################################
model_params = copy.deepcopy(default_params)
model_params['backbone_attributes'] = {
'architecture': 'helnet',
'mtype': 's8_3rdblock',
"nlayers": 10,
'use_batch_norm': False
}
model = BpNetModel(**model_params)
model.build(input_tensor)
keras_model = model._keras_model
assert count_layers_by_class_name(keras_model, ['InputLayer']) == 1
assert count_layers_by_class_name(keras_model, ['Conv2D']) == 88
assert count_layers_by_class_name(keras_model,
['Dense']) == NUM_DENSE_LAYERS
assert count_layers_by_class_name(keras_model,
['Reshape']) == NUM_RESHAPE_LAYERS
assert count_layers_by_class_name(keras_model,
['Dropout']) == NUM_DROPOUT_LAYERS
assert count_layers_by_class_name(keras_model, ['MaxPooling2D']) == 0
assert count_layers_by_class_name(
keras_model, ['Concatenate']) == (model_params['stages'] - 1)
assert keras_model.count_params() == 46456022
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/models/test_bpnet_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Maglev model templates for VGG16/19."""
from keras import backend as K
from keras.layers import Dense, Dropout, Flatten
from keras.layers import MaxPooling2D
from keras.models import Model
from nvidia_tao_tf1.core.templates.utils import arg_scope
from nvidia_tao_tf1.core.templates.utils import CNNBlock
def VggNet(nlayers, inputs, use_batch_norm=False, data_format=None, add_head=False,
nclasses=None, kernel_regularizer=None, bias_regularizer=None, activation_type='relu',
use_pooling=True, freeze_bn=False, freeze_blocks=None, use_bias=True,
dropout=0.5, use_modified_vgg=False):
"""
Construct a fixed-depth VggNet, based on the architectures from the original paper [1].
Args:
nlayers (int): the number of layers in the desired VGG (e.g. 16, 19).
inputs (tensor): the input tensor.
use_batch_norm (bool): whether batchnorm should be added after each convolution.
data_format (str): either 'channels_last' (NHWC) or 'channels_first' (NCHW).
add_head (bool): whether to add the original [1] classification head. Note that if you
don't include the head, the actual number of layers in the model produced by this
function is 'nlayers-3`, as they don't include the last 3 FC layers.
nclasses (int): the number of classes to be added to the classification head. Can be `None`
if unused.
kernel_regularizer: regularizer to apply to kernels.
bias_regularizer: regularizer to apply to biases.
use_pooling (bool): whether to use MaxPooling2D layer after first conv layer or use a
stride of 2 for first convolutional layer in subblock
freeze_bn(bool): Whether or not to freeze the BN layers.
freeze_blocks(list): the list of blocks in the model to be frozen.
use_bias(bool): whether or not to use bias for the conv layers.
dropout(float): The drop rate for dropout.
Returns:
Model: the output model after applying the VggNet on top of input `x`.
[1] Very Deep Convolutional Networks for Large-Scale Image Recognition
(https://arxiv.org/abs/1409.1556)
"""
if data_format is None:
data_format = K.image_data_format()
if freeze_blocks is None:
freeze_blocks = []
# Perform strided convolutions if pooling disabled.
first_stride = 1
stride = 2
if use_pooling:
# Disable strided convolutions with pooling enabled.
stride = 1
# Define a block functor which can create blocks.
with arg_scope([CNNBlock],
use_batch_norm=use_batch_norm,
use_shortcuts=False,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activation_type=activation_type,
freeze_bn=freeze_bn,
use_bias=use_bias):
# Implementing VGG 16 architecture.
if nlayers == 16:
# Block - 1.
x = CNNBlock(repeat=2, stride=first_stride, subblocks=[(3, 64)], index=1,
freeze_block=(1 in freeze_blocks))(inputs)
if use_pooling:
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same',
data_format=data_format, name='block1_pool')(x)
# Block - 2.
x = CNNBlock(repeat=2, stride=stride, subblocks=[(3, 128)], index=2,
freeze_block=(2 in freeze_blocks))(x)
if use_pooling:
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same',
data_format=data_format, name='block2_pool')(x)
# Block - 3.
x = CNNBlock(repeat=3, stride=stride, subblocks=[(3, 256)], index=3,
freeze_block=(3 in freeze_blocks))(x)
if use_pooling:
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same',
data_format=data_format, name='block3_pool')(x)
# Block - 4.
x = CNNBlock(repeat=3, stride=stride, subblocks=[(3, 512)], index=4,
freeze_block=(4 in freeze_blocks))(x)
if use_pooling:
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same',
data_format=data_format, name='block4_pool')(x)
# Block - 5.
x = CNNBlock(repeat=3, stride=stride, subblocks=[(3, 512)], index=5,
freeze_block=(5 in freeze_blocks))(x)
# Implementing VGG 19 architecture.
elif nlayers == 19:
# Block - 1.
x = CNNBlock(repeat=2, stride=first_stride, subblocks=[(3, 64)], index=1,
freeze_block=(1 in freeze_blocks))(inputs)
if use_pooling:
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same',
data_format=data_format, name='block1_pool')(x)
# Block - 2.
x = CNNBlock(repeat=2, stride=stride, subblocks=[(3, 128)], index=2,
freeze_block=(2 in freeze_blocks))(x)
if use_pooling:
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same',
data_format=data_format, name='block2_pool')(x)
# Block - 3.
x = CNNBlock(repeat=4, stride=stride, subblocks=[(3, 256)], index=3,
freeze_block=(3 in freeze_blocks))(x)
if use_pooling:
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same',
data_format=data_format, name='block3_pool')(x)
if use_modified_vgg:
# Block - 4.
# Repeat is 2 unlike original VGG19
x = CNNBlock(repeat=2, stride=stride, subblocks=[(3, 512)], index=4,
freeze_block=(4 in freeze_blocks))(x)
if use_pooling:
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same',
data_format=data_format, name='block4_pool')(x)
# Non-VGG layers
# Renaming index to 6 for new naming
x = CNNBlock(repeat=1, stride=1, subblocks=[(3, 256)], index=6,
freeze_block=(4 in freeze_blocks))(x)
else:
# Block - 4.
x = CNNBlock(repeat=4, stride=stride, subblocks=[(3, 512)], index=4,
freeze_block=(4 in freeze_blocks))(x)
if use_pooling:
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same',
data_format=data_format, name='block4_pool')(x)
# Block - 5.
x = CNNBlock(repeat=4, stride=stride, subblocks=[(3, 512)], index=5,
freeze_block=(5 in freeze_blocks))(x)
else:
raise NotImplementedError('A VGG with nlayers=%d is not implemented.' % nlayers)
if add_head:
# Add final Max Pooling layer if there are FC layers. Otherwise return the
# feature extractor trunk with a stride of 16
if use_pooling:
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same',
data_format=data_format, name='block5_pool')(x)
# Classification block.
x = Flatten(name='flatten')(x)
x = Dense(4096, activation='relu', name='fc1')(x)
if dropout > 0:
x = Dropout(dropout)(x)
x = Dense(4096, activation='relu', name='fc2')(x)
if dropout > 0:
x = Dropout(dropout)(x)
x = Dense(nclasses, activation='softmax', name='output_fc')(x)
# Naming model.
model_name = 'vgg%d' % nlayers
if not use_pooling:
model_name += '_nopool'
if use_batch_norm:
model_name += '_bn'
# Set up keras model object.
model = Model(inputs=inputs, outputs=x, name=model_name)
return model
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/models/templates/vgg.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model templates for BpNet HelNets (modified versions of original HelNet)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from keras import backend as K
from keras.layers import BatchNormalization
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.models import Model
from keras.utils.data_utils import get_file
from nvidia_tao_tf1.core.decorators.arg_scope import arg_scope
from nvidia_tao_tf1.core.models.templates.utils import add_activation
from nvidia_tao_tf1.core.models.templates.utils import get_batchnorm_axis
from nvidia_tao_tf1.core.models.templates.utils import performance_test_model
from nvidia_tao_tf1.cv.bpnet.models.templates.utils import CNNBlock
logger = logging.getLogger(__name__)
def HelNet(nlayers,
inputs,
mtype='default',
pooling=False,
use_last_block=True,
use_batch_norm=False,
data_format=None,
kernel_regularizer=None,
bias_regularizer=None,
activation_type='relu',
activation_kwargs=None,
block_widths=(64, 128, 256, 512),
weights=None):
"""
Construct a HelNet with a set amount of layers.
The HelNet family is very similar, and in its convolutional core identical, to the ResNet family
described in [1]. The main differences are: the absence of shortcuts (skip connections); the use
of a different head; and usually one or two changes in the striding. We've also made the second
layer (max pool) optional, though it was standard for ResNets described in the paper [1].
Args:
nlayers (int): the number of layers desired for this HelNet (e.g. 6, 10, ..., 34).
inputs (tensor): the input tensor `x`.
pooling (bool): whether max-pooling with a stride of 2 should be used as the second layer.
If `False`, this stride will be added to the next convolution instead.
use_batch_norm (bool): whether batchnorm should be added after each convolution.
data_format (str): either 'channels_last' (NHWC) or 'channels_f
irst' (NCHW).
kernel_regularizer (float): regularizer to apply to kernels.
bias_regularizer (float): regularizer to apply to biases.
activation_type (str): Type of activation.
activation_kwargs (dict): Additional activation keyword arguments to be fed to
the add_activation function.
weights (str): download and load in pretrained weights, f.e. 'imagenet'.
block_widths (tuple of ints): width i.e. number of features maps in each convolutional block
in the model.
Returns:
Model: the output model after applying the HelNet on top of input `x`.
[1] Deep Residual Learning for Image Recognition (https://arxiv.org/abs/1512.03385)
"""
if data_format is None:
data_format = K.image_data_format()
activation_kwargs = activation_kwargs or {}
# Create HelNet-0 model for training diagnostics.
if nlayers == 0:
return performance_test_model(inputs, data_format, activation_type)
if mtype == 'default':
fl_stride = (2, 2)
fl_drate = (1, 1)
third_stride = 2
third_drate = (1, 1)
elif mtype == 's8_3rdblock_wdilation':
fl_stride = (2, 2)
fl_drate = (1, 1)
third_stride = 1
third_drate = (2, 2)
elif mtype == 's8_3rdblock':
fl_stride = (2, 2)
fl_drate = (1, 1)
third_stride = 1
third_drate = (1, 1)
elif mtype == 's8_1stlayer_wdilation':
fl_stride = (1, 1)
fl_drate = (2, 2)
third_stride = 2
third_drate = (1, 1)
elif mtype == 's8_1stlayer':
fl_stride = (1, 1)
fl_drate = (1, 1)
third_stride = 2
third_drate = (1, 1)
else:
raise NotImplementedError(
"Helnet type: {} is not supported.".format(mtype))
x = Conv2D(64, (7, 7),
strides=fl_stride,
dilation_rate=fl_drate,
padding='same',
data_format=data_format,
use_bias=not (use_batch_norm),
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
name='conv1')(inputs)
if use_batch_norm:
x = BatchNormalization(axis=get_batchnorm_axis(data_format),
name='bn_conv1')(x)
x = add_activation(activation_type, **activation_kwargs)(x)
if pooling:
x = MaxPooling2D(pool_size=(2, 2),
strides=(2, 2),
padding='same',
data_format=data_format)(x)
first_stride = 1
else:
first_stride = 2
# Define a block functor which can create blocks
with arg_scope([CNNBlock],
use_batch_norm=use_batch_norm,
use_shortcuts=False,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activation_type=activation_type,
activation_kwargs=activation_kwargs,
use_bias=not (use_batch_norm)):
if nlayers == 6:
x = CNNBlock(repeat=1,
stride=first_stride,
subblocks=[(3, block_widths[0])],
index=1)(x)
x = CNNBlock(repeat=1,
stride=2,
subblocks=[(3, block_widths[1])],
index=2)(x)
x = CNNBlock(repeat=1,
stride=third_stride,
subblocks=[(3, block_widths[2])],
index=3,
first_subblock_dilation_rate=third_drate)(x)
if use_last_block:
x = CNNBlock(repeat=1,
stride=1,
subblocks=[(3, block_widths[3])],
index=4)(x)
elif nlayers == 10:
x = CNNBlock(repeat=1,
stride=first_stride,
subblocks=[(3, block_widths[0])] * 2,
index=1)(x)
x = CNNBlock(repeat=1,
stride=2,
subblocks=[(3, block_widths[1])] * 2,
index=2)(x)
x = CNNBlock(repeat=1,
stride=third_stride,
subblocks=[(3, block_widths[2])] * 2,
index=3,
first_subblock_dilation_rate=third_drate)(x)
if use_last_block:
x = CNNBlock(repeat=1,
stride=1,
subblocks=[(3, block_widths[3])] * 2,
index=4)(x)
elif nlayers == 12:
x = CNNBlock(repeat=1,
stride=first_stride,
subblocks=[(3, block_widths[0])] * 2,
index=1)(x)
x = CNNBlock(repeat=1,
stride=2,
subblocks=[(3, block_widths[1])] * 2,
index=2)(x)
x = CNNBlock(repeat=2,
stride=third_stride,
subblocks=[(3, block_widths[2])] * 2,
index=3,
first_subblock_dilation_rate=third_drate)(x)
if use_last_block:
x = CNNBlock(repeat=1,
stride=1,
subblocks=[(3, block_widths[3])] * 2,
index=4)(x)
elif nlayers == 18:
x = CNNBlock(repeat=2,
stride=first_stride,
subblocks=[(3, block_widths[0])] * 2,
index=1)(x)
x = CNNBlock(repeat=2,
stride=2,
subblocks=[(3, block_widths[1])] * 2,
index=2)(x)
x = CNNBlock(repeat=2,
stride=third_stride,
subblocks=[(3, block_widths[2])] * 2,
index=3,
first_subblock_dilation_rate=third_drate)(x)
if use_last_block:
x = CNNBlock(repeat=2,
stride=1,
subblocks=[(3, block_widths[3])] * 2,
index=4)(x)
elif nlayers == 26:
x = CNNBlock(repeat=3,
stride=first_stride,
subblocks=[(3, block_widths[0])] * 2,
index=1)(x)
x = CNNBlock(repeat=4,
stride=2,
subblocks=[(3, block_widths[1])] * 2,
index=2)(x)
x = CNNBlock(repeat=3,
stride=third_stride,
subblocks=[(3, block_widths[2])] * 2,
index=3,
first_subblock_dilation_rate=third_drate)(x)
if use_last_block:
x = CNNBlock(repeat=2,
stride=1,
subblocks=[(3, block_widths[3])] * 2,
index=4)(x)
elif nlayers == 34:
x = CNNBlock(repeat=3,
stride=first_stride,
subblocks=[(3, block_widths[0])] * 2,
index=1)(x)
x = CNNBlock(repeat=4,
stride=2,
subblocks=[(3, block_widths[1])] * 2,
index=2)(x)
x = CNNBlock(repeat=6,
stride=third_stride,
subblocks=[(3, block_widths[2])] * 2,
index=3,
first_subblock_dilation_rate=third_drate)(x)
if use_last_block:
x = CNNBlock(repeat=3,
stride=1,
subblocks=[(3, block_widths[3])] * 2,
index=4)(x)
else:
raise NotImplementedError(
'A Helnet with nlayers=%d is not implemented.' % nlayers)
model_name = 'helnet%d_s16' % nlayers
if pooling:
model_name += '_nopool'
if use_batch_norm:
model_name += '_bn'
model = Model(inputs=inputs, outputs=x, name=model_name)
if weights == 'imagenet':
logger.warning(
"Imagenet weights can not be used for production models.")
if nlayers == 18:
if use_batch_norm:
weights_path = get_file(
'imagenet_helnet18-bn_weights_20170729.h5',
'https://s3-us-west-2.amazonaws.com/'
'9j2raan2rcev-ai-infra-models/'
'imagenet_helnet18-bn_weights_20170729.h5',
cache_subdir='models',
md5_hash='6a2d59e48d8b9f0b41a2b02a2f3c018e')
else:
weights_path = get_file(
'imagenet_helnet18-no-bn_weights_20170729.h5',
'https://s3-us-west-2.amazonaws.com/'
'9j2raan2rcev-ai-infra-models/'
'imagenet_helnet18-no-bn_weights_20170729.h5',
cache_subdir='models',
md5_hash='3282b1e5e7f8e769a034103c455968e6')
model.load_weights(weights_path, by_name=True)
return model
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/models/templates/helnet.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BpNet base model definitions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/models/templates/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import keras
import pytest
from nvidia_tao_tf1.core.models.templates.utils import count_layers_by_class_name
from nvidia_tao_tf1.cv.bpnet.models.templates.helnet import HelNet
mtype_stride_mapping = {
"default": 16,
"s8_3rdblock": 8,
"s8_3rdblock_wdilation": 8,
"s8_1stlayer": 8,
"s8_1stlayer_wdilation": 8
}
topologies = [
# Test the different nlayers
(6, False, True, "channels_first", "default"),
(10, False, True, "channels_first", "default"),
(12, False, True, "channels_first", "default"),
(18, False, True, "channels_first", "default"),
# Without BN
(18, False, True, "channels_first", "default"),
# With pooling
(18, False, True, "channels_first", "default"),
# channels_last:
# With BN, with pooling
(18, True, True, "channels_last", "default"),
# No BN, no pooling
(18, False, False, "channels_last", "default"),
# test cases for model types
(18, False, False, "channels_last", "s8_3rdblock"),
(18, True, False, "channels_first", "s8_3rdblock_wdilation"),
(18, True, True, "channels_first", "s8_1stlayer"),
(18, False, True, "channels_last", "s8_1stlayer_wdilation"),
]
@pytest.mark.parametrize("nlayers,pooling,use_batch_norm,data_format, mtype",
topologies)
def test_helnet(nlayers, pooling, use_batch_norm, data_format, mtype):
"""Test headless Helnets for a variety of topologies and parameters."""
w, h = 960, 480
expected_stride = mtype_stride_mapping[mtype]
if data_format == "channels_last":
shape = (w, h, 3)
elif data_format == "channels_first":
shape = (3, w, h)
inputs = keras.layers.Input(shape=shape)
model = HelNet(
nlayers,
inputs,
mtype=mtype,
pooling=pooling,
use_batch_norm=use_batch_norm,
data_format=data_format,
)
# Batchnorm check
n_batchnorms = count_layers_by_class_name(model, ["BatchNormalization"])
if use_batch_norm:
assert n_batchnorms > 0
else:
assert n_batchnorms == 0
# There should be no bias if batch norm is on (~5% gain in training speed with DGX-1 Volta).
for layer in model.layers:
if isinstance(layer, keras.layers.Conv2D):
assert layer.get_config()["use_bias"] == (not use_batch_norm)
# Layer count check
n_layers_counted = count_layers_by_class_name(model, ["Conv2D", "Dense"])
expected_nlayers = nlayers - 1 # subtract one because it's headless
assert n_layers_counted == expected_nlayers
# Check model output shape
output_shape = model.outputs[0].get_shape()
expected_spatial_shape = (int(w / expected_stride),
int(h / expected_stride))
if data_format == "channels_last":
assert output_shape[1:3] == expected_spatial_shape
elif data_format == "channels_first":
assert output_shape[2:4] == expected_spatial_shape
def test_helnet_variable_feature_maps():
"""Test headless Helnets for a change in number of feature maps."""
shape = (3, 960, 480)
block_widths = (64, 128, 64, 128)
inputs = keras.layers.Input(shape=shape)
# create a HelNet34 network and change width of blocks 3 and 4
model = HelNet(34,
inputs,
pooling=False,
use_batch_norm=True,
block_widths=block_widths)
for layer in model.layers:
config = layer.get_config()
# check if layer is a convolutional layer
if type(layer) == keras.layers.convolutional.Conv2D:
layer_name = config["name"]
# check if layer is a block layer
if layer_name.split("_")[0] == "block":
block_num = int(layer_name.split("_")[1][0])
assert config["filters"] == block_widths[block_num - 1]
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/models/templates/test_helnet.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modulus utilities for model templates."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import keras
from nvidia_tao_tf1.core.decorators.arg_scope import add_arg_scope
from nvidia_tao_tf1.core.models.templates.quantized_conv2d import QuantizedConv2D
from nvidia_tao_tf1.core.models.templates.utils import add_activation, get_batchnorm_axis
from nvidia_tao_tf1.core.models.templates.utils import SUBBLOCK_IDS
from nvidia_tao_tf1.core.utils import get_uid
logger = logging.getLogger(__name__)
def add_input(channels=3,
height=256,
width=256,
name='inputs',
data_format='channels_first'):
"""
Build sample input for testing.
Args:
name (str): Name of the input tensor. Default value is 'inputs'
data_format (str): Expected tensor format, either `channels_first` or `channels_last`.
Default value is `channels_first`.
channels, height, width (all int): Input image dimentions.
"""
# Set sample inputs.
if data_format == 'channels_first':
shape = (channels, height, width)
elif data_format == 'channels_last':
shape = (height, width, channels)
else:
raise ValueError(
'Provide either `channels_first` or `channels_last` for `data_format`.'
)
input_tensor = keras.layers.Input(shape=shape, name=name)
return input_tensor
class CNNBlock(object):
"""A functor for creating a block of layers.
Modified version of modulus. The difference is in the way dilation rate is being used. The one in modulus will apply
it to all layers. Here we add another argument 'first_subblock_dilation_rate' and pass to the
subblock function where dilation is applied only to the first subblock, similar to stride.
Dilations are set to 'dilation_rate' for all layers beyond the first subblock.
"""
@add_arg_scope
def __init__(
self,
use_batch_norm,
use_shortcuts,
data_format,
kernel_regularizer,
bias_regularizer,
repeat,
stride,
subblocks,
index=None,
activation_type='relu',
activation_kwargs=None,
dilation_rate=(1, 1),
first_subblock_dilation_rate=None,
all_projections=False,
use_bias=True,
name_prefix=None,
quantize=False,
bitwidth=8,
):
"""
Initialization of the block functor object.
Args:
use_batch_norm (bool): whether batchnorm should be added after each convolution.
use_shortcuts (bool): whether shortcuts should be used. A typical ResNet by definition
uses shortcuts, but these can be toggled off to use the same ResNet topology without
the shortcuts.
data_format (str): either 'channels_last' (NHWC) or 'channels_first' (NCHW).
kernel_regularizer (float): regularizer to apply to kernels.
bias_regularizer (float): regularizer to apply to biases.
repeat (int): repeat number.
stride (int): The filter stride to be applied only to the first subblock (typically used
for downsampling). Strides are set to 1 for all layers beyond the first subblock.
subblocks (list of tuples): A list of tuples defining settings for each consecutive
convolution. Example:
`[(3, 64), (3, 64)]`
The two items in each tuple represents the kernel size and the amount of filters in
a convolution, respectively. The convolutions are added in the order of the list.
index (int): the index of the block to be created.
activation_type (str): activation function type.
activation_kwargs (dict): Additional activation keyword arguments to be fed to
the add_activation function.
dilation_rate (int or (int, int)): An integer or tuple/list of n integers, specifying
the dilation rate to use for dilated convolution.
first_subblock_dilation_rate (int): The dilation to be applied only to first subblock
(typically used instead of downsampling). Dilations are set to 'dilation_rate'
for all layers beyond the first subblock.
all_projections (bool): A boolean flag to determinte whether all shortcut connections
should be implemented as projection layers to facilitate full pruning or not.
use_bias (bool): whether the layer uses a bias vector.
name_prefix (str): Prefix the name with this value.
quantize (bool): A boolean flag to determine whether to use quantized conv2d or not.
bitwidth (integer): quantization bitwidth.
"""
self.use_batch_norm = use_batch_norm
self.use_shortcuts = use_shortcuts
self.all_projections = all_projections
self.data_format = data_format
self.kernel_regularizer = kernel_regularizer
self.bias_regularizer = bias_regularizer
self.activation_type = activation_type
self.activation_kwargs = activation_kwargs or {}
self.dilation_rate = dilation_rate
self.first_subblock_dilation_rate = first_subblock_dilation_rate
self.repeat = repeat
self.stride = stride
self.use_bias = use_bias
self.subblocks = subblocks
self.subblock_ids = SUBBLOCK_IDS()
self.quantize = quantize
self.bitwidth = bitwidth
if index is not None:
self.name = "block_%d" % index
else:
self.name = "block_%d" % (get_uid("block") + 1)
if name_prefix is not None:
self.name = name_prefix + "_" + self.name
def __call__(self, x):
"""Build the block.
Args:
x (tensor): input tensor.
Returns:
tensor: the output tensor after applying the block on top of input `x`.
"""
for i in range(self.repeat):
name = '%s%s_' % (self.name, self.subblock_ids[i])
if i == 0:
# Set the stride only on the first layer.
stride = self.stride
first_subblock_dilation_rate = self.first_subblock_dilation_rate
dimension_changed = True
else:
stride = 1
first_subblock_dilation_rate = None
dimension_changed = False
x = self._subblocks(x,
stride,
first_subblock_dilation_rate,
dimension_changed,
name_prefix=name)
return x
def _subblocks(self,
x,
stride,
first_subblock_dilation_rate,
dimension_changed,
name_prefix=None):
"""
Stack several convolutions in a specific sequence given by a list of subblocks.
Args:
x (tensor): the input tensor.
stride (int): The filter stride to be applied only to the first subblock (typically used
for downsampling). Strides are set to 1 for all layers beyond the first subblock.
first_subblock_dilation_rate (int): The dilation to be applied only to first subblock
(typically used instead of downsampling). Dilations are set to 'dilation_rate'
for all layers beyond the first subblock.
dimension_changed (bool): This indicates whether the dimension has been changed for this
block. If this is true, then we need to account for the change, or else we will be
unable to re-add the shortcut tensor due to incompatible dimensions. This can be
solved by applying a (1x1) convolution [1]. (The paper also notes the possibility of
zero-padding the shortcut tensor to match any larger output dimension, but this is
not implemented.)
name_prefix (str): name prefix for all the layers created in this function.
Returns:
tensor: the output tensor after applying the ResNet block on top of input `x`.
"""
bn_axis = get_batchnorm_axis(self.data_format)
shortcut = x
nblocks = len(self.subblocks)
for i in range(nblocks):
kernel_size, filters = self.subblocks[i]
if i == 0:
strides = (stride, stride)
else:
strides = (1, 1)
if i == 0 and self.first_subblock_dilation_rate is not None:
# if first block, use dilation rate from the first_subblock_dilation_rate
dilation_rate = self.first_subblock_dilation_rate
else:
# if not fist block, use the common dilation rate
dilation_rate = self.dilation_rate
# Keras doesn't support dilation_rate != 1 if stride != 1.
if strides != (1, 1) and dilation_rate != (1, 1):
dilation_rate = (1, 1)
logger.warning(
"Dilation rate {} is incompatible with stride {}. "
"Setting dilation rate to {} for layer {}conv_{}.".format(
self.dilation_rate, strides, dilation_rate,
name_prefix, i + 1))
if self.quantize:
x = QuantizedConv2D(
filters,
(kernel_size, kernel_size),
strides=strides,
padding="same",
dilation_rate=dilation_rate,
data_format=self.data_format,
use_bias=self.use_bias,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
bitwidth=self.bitwidth,
name="%sconv_%d" % (name_prefix, i + 1),
)(x)
else:
x = keras.layers.Conv2D(
filters,
(kernel_size, kernel_size),
strides=strides,
padding="same",
dilation_rate=dilation_rate,
data_format=self.data_format,
use_bias=self.use_bias,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
name="%sconv_%d" % (name_prefix, i + 1),
)(x)
if self.use_batch_norm:
x = keras.layers.BatchNormalization(axis=bn_axis,
name="%sbn_%d" %
(name_prefix, i + 1))(x)
if i != nblocks - 1: # All except last conv in block.
x = add_activation(self.activation_type,
**self.activation_kwargs)(x)
if self.use_shortcuts:
if self.all_projections:
# Implementing shortcut connections as 1x1 projection layers irrespective of
# dimension change.
if self.quantize:
shortcut = QuantizedConv2D(
filters,
(1, 1),
strides=(stride, stride),
data_format=self.data_format,
dilation_rate=self.dilation_rate,
use_bias=self.use_bias,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
bitwidth=self.bitwidth,
name="%sconv_shortcut" % name_prefix,
)(shortcut)
else:
shortcut = keras.layers.Conv2D(
filters,
(1, 1),
strides=(stride, stride),
data_format=self.data_format,
dilation_rate=self.dilation_rate,
use_bias=self.use_bias,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
name="%sconv_shortcut" % name_prefix,
)(shortcut)
if self.use_batch_norm:
shortcut = keras.layers.BatchNormalization(
axis=bn_axis,
name="%sbn_shortcut" % name_prefix)(shortcut)
else:
# Add projection layers to shortcut only if there is a change in dimesion.
if dimension_changed: # Dimension changed.
if self.quantize:
shortcut = QuantizedConv2D(
filters,
(1, 1),
strides=(stride, stride),
data_format=self.data_format,
dilation_rate=self.dilation_rate,
use_bias=self.use_bias,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
bitwidth=self.bitwidth,
name="%sconv_shortcut" % name_prefix,
)(shortcut)
else:
shortcut = keras.layers.Conv2D(
filters,
(1, 1),
strides=(stride, stride),
data_format=self.data_format,
dilation_rate=self.dilation_rate,
use_bias=self.use_bias,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
name="%sconv_shortcut" % name_prefix,
)(shortcut)
if self.use_batch_norm:
shortcut = keras.layers.BatchNormalization(
axis=bn_axis,
name="%sbn_shortcut" % name_prefix)(shortcut)
x = keras.layers.add([x, shortcut])
x = add_activation(self.activation_type, **self.activation_kwargs)(x)
return x
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/models/templates/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the Weighted Momementum Optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import pytest
import tensorflow as tf
from nvidia_tao_tf1.blocks.learning_rate_schedules import ConstantLearningRateSchedule
from nvidia_tao_tf1.core.training import enable_deterministic_training
from nvidia_tao_tf1.cv.bpnet.optimizers.weighted_momentum_optimizer import \
WeightedMomentumOptimizer
def _set_seeds(seed):
"""Set seeds for reproducibility.
Args:
seed (int): random seed value.
"""
np.random.seed(seed)
tf.compat.v1.set_random_seed(seed)
@pytest.fixture(scope="module", autouse=True)
def determinism():
enable_deterministic_training()
_set_seeds(42)
class SimpleConvModel(object):
"""Simple 2-layer convolutional model to test with."""
def __init__(self, initial_value=0.125):
"""__init__ method.
Args:
intial_value (float): weights initializer value.
"""
self.initial_value = initial_value
self._built = False
self.W1 = None
self.b1 = None
self.W2 = None
self.b2 = None
@property
def trainable_vars(self):
"""Return trainable variables."""
return [self.W1, self.b1, self.W2, self.b2]
def _build(self, x):
"""Build weights.
Args:
x (tensor): input tensor to the model.
"""
if self._built:
return
# Build W1 and b1.
in_channels = x.shape[1] # NCHW
out_channels = 7
W1_initial_value = np.ones([3, 3, in_channels, out_channels],
dtype=np.float32)
W1_initial_value *= self.initial_value
self.W1 = tf.Variable(initial_value=W1_initial_value, trainable=True)
self.b1 = tf.Variable(initial_value=np.zeros([out_channels],
dtype=np.float32),
trainable=True)
# Build W2 and b2.
in_channels = out_channels
out_channels = 13
W2_initial_value = np.ones([5, 5, in_channels, out_channels],
dtype=np.float32)
W2_initial_value *= self.initial_value
self.W2 = tf.Variable(initial_value=W2_initial_value, trainable=True)
self.b2 = tf.Variable(initial_value=np.zeros([out_channels],
dtype=np.float32),
trainable=True)
def __call__(self, x):
"""Call method.
Args:
x (tensor): input tensor to the model.
Returns:
y (tensor): output tensor.
"""
self._build(x)
# h = W1*x + b1
h = tf.nn.conv2d(input=x,
filters=self.W1,
strides=1,
padding="VALID",
data_format="NCHW")
h = tf.nn.bias_add(value=h, bias=self.b1, data_format="NCHW")
# y = W2*h + b2
y = tf.nn.conv2d(input=h,
filters=self.W2,
strides=1,
padding="VALID",
data_format="NCHW")
y = tf.nn.bias_add(value=y, bias=self.b2, data_format="NCHW")
return y
class TestScenarios(object):
"""Tests with different weighting cases."""
NUM_STEPS = 3
LEARNING_RATE = 1e-5
@staticmethod
def get_loss(model):
"""Loss tensor to optimize.
Args:
model (SimpleConvModel): model to get predictions to compute loss.
Returns:
loss (tensor): compputed loss based on given gt.
"""
x = tf.ones([3, 5, 16, 32]) # NCHW.
y = model(x)
y_true = tf.ones_like(y)
loss = tf.reduce_sum(
input_tensor=tf.compat.v1.squared_difference(y_true, y))
return loss
def _init_helper(self, weight_default_value=1.0):
"""Intialize the model, loss and optimizer.
Args:
weight_default_value (float): default weight value to be used to
initialize WeightedMomentumOptimizer.
"""
# Reset the graph so the variable names remain consistent
tf.compat.v1.reset_default_graph()
# Get model
model = SimpleConvModel()
# Get losses
loss = self.get_loss(model)
# Get LR scheduler
lr_scheduler = ConstantLearningRateSchedule(self.LEARNING_RATE)
# Get weighted momentum optimizer
weighted_optimizer = WeightedMomentumOptimizer(
lr_scheduler, weight_default_value=weight_default_value)
return model, loss, weighted_optimizer
def test_zero_weight_all_variables(self):
"""Test that if zero weight is applied to every variable, nothing changes."""
model, loss, weighted_optimizer = self._init_helper(
weight_default_value=0.0)
weighted_optimizer.build()
min_op = weighted_optimizer.minimize(loss, var_list=None)
model_var_fetches = model.trainable_vars
with tf.compat.v1.Session() as session:
session.run(tf.compat.v1.global_variables_initializer())
# Run training for a few steps. Variables should never be updated.
initial_values = session.run(model_var_fetches)
for _ in range(self.NUM_STEPS):
session.run(min_op)
final_values = session.run(model_var_fetches)
for old, new in zip(initial_values, final_values):
np.testing.assert_array_equal(old, new)
assert not np.isnan(old).any()
@pytest.fixture
def regularly_optimized_values(self):
"""First train a model without any gradient weigting, ie. weight=1.0."""
model, loss, weighted_optimizer = self._init_helper(
weight_default_value=1.0)
weighted_optimizer.build()
min_op = weighted_optimizer.minimize(loss, var_list=None)
with tf.compat.v1.Session() as session:
session.run(tf.compat.v1.global_variables_initializer())
# Run training for a few steps.
for _ in range(self.NUM_STEPS):
session.run(min_op)
final_values = session.run(model.trainable_vars)
tf.compat.v1.reset_default_graph()
return final_values
def test_grad_weights_dict_with_zero_weights(self):
"""Test if setting zero weights using the grad_weights_dict yields unchanged varaibles."""
model, loss, weighted_optimizer = self._init_helper(
weight_default_value=1.0)
model_var_fetches = model.trainable_vars
grad_weights_dict = {}
for var in model_var_fetches:
grad_weights_dict[var.op.name] = 0.0
weighted_optimizer.set_grad_weights_dict(grad_weights_dict)
weighted_optimizer.build()
min_op = weighted_optimizer.minimize(loss, var_list=None)
with tf.compat.v1.Session() as session:
session.run(tf.compat.v1.global_variables_initializer())
# Run training for a few steps. Variables should never be updated.
initial_values = session.run(model_var_fetches)
for _ in range(self.NUM_STEPS):
session.run(min_op)
final_values = session.run(model_var_fetches)
for old, new in zip(initial_values, final_values):
np.testing.assert_array_equal(old, new)
assert not np.isnan(old).any()
def test_grad_weights_dict_with_altzero_weights(self):
"""Test if setting alternate zero weights using the grad_weights_dict yields
alternate unchanged variables."""
model, loss, weighted_optimizer = self._init_helper(
weight_default_value=1.0)
model_var_fetches = model.trainable_vars
grad_weights_dict = {}
for idx, var in enumerate(model_var_fetches):
grad_weights_dict[var.op.name] = 0.0 if idx % 2 == 0 else 0.5
weighted_optimizer.set_grad_weights_dict(grad_weights_dict)
weighted_optimizer.build()
min_op = weighted_optimizer.minimize(loss, var_list=None)
with tf.compat.v1.Session() as session:
session.run(tf.compat.v1.global_variables_initializer())
# Run training for a few steps. Variables should never be updated.
initial_values = session.run(model_var_fetches)
for _ in range(self.NUM_STEPS):
session.run(min_op)
final_values = session.run(model_var_fetches)
for idx, (old, new) in enumerate(zip(initial_values, final_values)):
if idx % 2 == 0:
np.testing.assert_array_equal(old[idx], new[idx])
else:
assert not np.isclose(old[idx], new[idx]).any()
assert not np.isnan(old).any()
# TODO: @vpraveen: re-enable this test after you understand
# why identity test is failing.
@pytest.mark.skipif(
os.getenv("RUN_ON_CI", "0") == "1",
reason="Temporarily skipping from CI execution."
)
def test_training_with_idenity_weight_matches_default_training(
self, regularly_optimized_values):
"""Test training with var grad weights set to 1.0.
Args:
regularly_optimized_values (pytest.fixture): to train a model
without any gradient weigting for comparison.
"""
model, loss, weighted_optimizer = self._init_helper(
weight_default_value=1.0)
model_var_fetches = model.trainable_vars
grad_weights_dict = {}
for var in model_var_fetches:
grad_weights_dict[var.op.name] = 1.0
weighted_optimizer.set_grad_weights_dict(grad_weights_dict)
weighted_optimizer.build()
min_op = weighted_optimizer.minimize(loss, var_list=None)
with tf.compat.v1.Session() as session:
session.run(tf.compat.v1.global_variables_initializer())
# Run training for a few steps.
for _ in range(self.NUM_STEPS):
session.run(min_op)
weighted_final_values = session.run(model.trainable_vars)
for expected, actual in zip(regularly_optimized_values,
weighted_final_values):
np.testing.assert_array_equal(
expected,
actual)
assert not np.isnan(expected).any()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/optimizers/test_weighted_momentum_optimizer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Weighted Momentum Optimizer with suport for layerwise gradient weighting/masking."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nvidia_tao_tf1.blocks.optimizers.optimizer import Optimizer
from nvidia_tao_tf1.core.coreobject import save_args
import numpy as np
import tensorflow as tf
class WeightedMomentumOptimizer(Optimizer):
"""
WeightedMomentumOptimizer class.
Has suport for layerwise gradient weighting/masking for kernels and biases.
"""
@save_args
def __init__(self,
learning_rate_schedule,
grad_weights_dict=None,
weight_default_value=1.0,
momentum=0.9,
use_nesterov=False,
**kwargs):
"""__init__ method.
learning_rate_schedule (LearningRateSchedule): The object from which we obtain the
learning rate scalar tensor.
momentum (float): A float value or a constant float tensor. The momentum factor. The method
falls back into gradient descend optimizer when momentum is set to 0.
use_nesterov (bool): If True, use the Nesterov momentum.
"""
super(WeightedMomentumOptimizer,
self).__init__(learning_rate_schedule=learning_rate_schedule,
**kwargs)
self._learning_rate_schedule = learning_rate_schedule
self._momentum = momentum
self._use_nesterov = use_nesterov
self._grad_weights_dict = grad_weights_dict if grad_weights_dict is not None else {}
self._weight_default_value = weight_default_value
self._optimizer_built = False
def set_grad_weights_dict(self, grad_weights_dict):
"""Build the optimizer."""
self._grad_weights_dict = grad_weights_dict
def build(self):
"""Build the optimizer."""
self._learning_rate_tensor = self._learning_rate_schedule.get_tensor()
self._optimizer = tf.compat.v1.train.MomentumOptimizer(
learning_rate=self._learning_rate_tensor,
momentum=self._momentum,
use_nesterov=self._use_nesterov,
)
self._optimizer_built = True
if not tf.executing_eagerly():
self._distribute()
@property
def vars_and_grad_weights(self):
"""Return a handle on the trainable variables and their corresponding weights.
Returns:
(list): Returns a list of (variable, gradient_weights) tuples.
Raises:
RuntimeError: If the gradient weights / variables have yet to be defined.
"""
if len(self._grad_weights) == 0 or len(self._var_list) == 0:
raise RuntimeError(
"Please call `minimize` or `compute_gradients` beforehand.")
return list(zip(self._var_list, self._grad_weights))
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""Apply gradients.
Args:
grads_and_vars (list): List of (gradient, variable) pairs as returned by
`compute_gradients()`.
global_step (tf.Variable): Optional variable to increment by one after the variables
have been updated.
name (str): Optional name for the returned operation. Default to the name passed to the
constructor.
Returns:
(tf.Operation): An operation that applies the specified gradients. If `global_step`
was not `None`, that operation also increments `global_step`.
"""
return self._optimizer.apply_gradients(grads_and_vars=grads_and_vars,
global_step=global_step,
name=name)
def compute_gradients(self, loss, var_list=None, **kwargs):
"""Compute gradients and apply gradient weights.
Args:
loss (tf.Tensor): A tensor containing the value to compute gradients for.
var_list (list): Optional list or tuple of `tf.Variable` to compute gradients for.
Defaults to the list of variables collected in the graph under the key
`GraphKeys.TRAINABLE_VARIABLES`.
gate_gradients: How to gate the computation of gradients. Can be
`tf.compat.v1.<GATE_NONE, GATE_OP, GATE_GRAPH>`.
aggregation_method: Specifies the method used to combine gradient terms. Valid values
are defined in the class `AggregationMethod`.
colocate_gradients_with_ops (bool): If `True`, try colocating gradients with the
corresponding op.
grad_loss (tf.Tensor): Optional. A tensor holding the gradient computed for loss.
Returns:
A list of (gradient, variable) pairs. Variable is always present, but gradient can
be `None`.
"""
self._build_weights(var_list=var_list)
if not self._optimizer_built:
self.build()
# Compute gradients as you would normally.
grads_and_vars = self._optimizer.compute_gradients(loss=loss,
var_list=var_list,
**kwargs)
# Apply the weights.
weighted_grads_and_vars = []
for i, (grad, var) in enumerate(grads_and_vars):
weighted_grad = None
if grad is not None:
weighted_grad = grad * self._grad_weights[i]
weighted_grads_and_vars.append((weighted_grad, var))
self._grads_and_vars = grads_and_vars
self._weighted_grads_and_vars = weighted_grads_and_vars
return weighted_grads_and_vars
def minimize(
self,
loss,
global_step=None,
var_list=None,
gate_gradients=tf.compat.v1.train.Optimizer.GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
name=None,
grad_loss=None,
):
"""Minimize op.
Args:
loss (tf.Tensor): A tensor containing the value to minimize.
global_step (tf.Variable): Optional variable to increment by one after the variables
have been updated.
var_list (list): Optional list or tuple of `tf.Variable` objects to update when
minimizing the `loss`. Defaults to the list of variables collected in the graph
under the key `GraphKeys.TRAINABLE_VARIABLES`.
gate_gradients: How to gate the computation of gradients. Can be
`tf.compat.v1.<GATE_NONE, GATE_OP, GATE_GRAPH>`.
aggregation_method: Specifies the method used to combine gradient terms. Valid values
are defined in the class `AggregationMethod`.
colocate_gradients_with_ops (bool): If `True`, try colocating gradients with the
corresponding op.
name (str): Optional named for the returned operation.
grad_loss (tf.Tensor): Optional. A tensor holding the gradient computed for loss.
Returns:
(tf.Operation): Op that updates the variables in `var_list`. If `global_step` was not
`None`, that operation also increments `global_step`.
"""
# Compute the weighted gradients.
grads_and_vars = self.compute_gradients(
loss=loss,
var_list=var_list,
gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
grad_loss=grad_loss,
)
# Apply the weighted gradients.
optimize_op = self.apply_gradients(grads_and_vars=grads_and_vars,
global_step=global_step,
name=name)
return optimize_op, grads_and_vars, grads_and_vars
def _build_weights(self, var_list=None):
"""Helper that defines the weights associated with the variables' gradients."""
# Reset.
self._grad_weights = []
if var_list is None:
var_list = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES)
with tf.compat.v1.variable_scope("grad_weight",
reuse=tf.compat.v1.AUTO_REUSE):
# This scope allows us to reuse gradient weights that may already have been defined.
# This is useful in e.g. the context of multi-task training, where each task may have
# its own optimizer on a different set of variables, but some of which are common. For
# those variables that are common (e.g. belong to the "feature extractor"), we want
# to reuse the same gradient weights (which may also turn out to save on memory usage).
# For those variables that are not common, the AUTO_REUSE results in the creation of
# a new gradient weight.
for var in var_list:
# Give the weight variable a name. The string manipulations are necessary for TF not
# to complain.
weight_name = var.name[:var.name.find(":")]
initial_value = np.ones(var.shape.as_list(),
dtype=var.dtype.as_numpy_dtype)
if weight_name in self._grad_weights_dict:
initial_value = self._grad_weights_dict[
weight_name] * initial_value
else:
initial_value = self._weight_default_value * initial_value
self._grad_weights.append(
tf.compat.v1.get_variable(
name=weight_name,
dtype=var.dtype.base_dtype,
initializer=initial_value,
trainable=False,
))
self._var_list = var_list
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/optimizers/weighted_momentum_optimizer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BpNet Optimizers."""
from nvidia_tao_tf1.cv.bpnet.optimizers.weighted_momentum_optimizer import \
WeightedMomentumOptimizer
__all__ = ('WeightedMomentumOptimizer', )
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/optimizers/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BpNet Trainers."""
from nvidia_tao_tf1.cv.bpnet.trainers.bpnet_trainer import BpNetTrainer
__all__ = (
'BpNetTrainer',
)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/trainers/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BpNet Trainer."""
import glob
import logging
import os
import shutil
import keras
import tensorflow as tf
from nvidia_tao_tf1.blocks.trainer import Trainer
import nvidia_tao_tf1.core as tao_core
from nvidia_tao_tf1.core import distribution
from nvidia_tao_tf1.core.utils import mkdir_p, set_random_seed
from nvidia_tao_tf1.cv.common.utilities.serialization_listener import \
EpochModelSerializationListener
from nvidia_tao_tf1.cv.common.utilities.tlt_utils \
import get_latest_checkpoint, get_step_from_ckzip, get_tf_ckpt, load_pretrained_weights
from nvidia_tao_tf1.cv.common.utilities.tlt_utils import get_latest_tlt_model, load_model
from nvidia_tao_tf1.cv.detectnet_v2.tfhooks.task_progress_monitor_hook import (
TaskProgressMonitorHook
)
from nvidia_tao_tf1.cv.detectnet_v2.tfhooks.utils import get_common_training_hooks
logger = logging.getLogger(__name__)
MODEL_EXTENSION = ".hdf5"
class BpNetTrainer(Trainer):
"""BpNet Trainer class for building training graph and graph execution."""
@tao_core.coreobject.save_args
def __init__(self,
checkpoint_dir,
log_every_n_secs=1,
checkpoint_n_epoch=1,
num_epoch=2,
summary_every_n_steps=1,
infrequent_summary_every_n_steps=0,
validation_every_n_epoch=20,
max_ckpt_to_keep=5,
pretrained_weights=None,
load_graph=False,
inference_spec=None,
finetuning_config=None,
use_stagewise_lr_multipliers=False,
evaluator=None,
random_seed=42,
key=None,
final_model_name='bpnet_model',
**kwargs):
"""__init__ method.
Args:
checkpoint_dir (str): path to directory containing checkpoints.
log_every_n_secs (int): log every n secs.
checkpoint_n_epoch (int): how often to save checkpoint.
num_epoch (int): Number of epochs to train for.
summary_every_n_steps (int): summary every n steps.
infrequent_summary_every_n_steps (int): infrequent summary every n steps.
validation_every_n_steps (int): run evaluation every n steps.
max_ckpt_to_keep (int): How many checkpoints to keep.
pretrained_weighted (str): Pretrained weights path.
use_stagewise_lr_multipliers (bool): Option to enable use of
stagewise learning rate multipliers using WeightedMomentumOptimizer.
evaluator (TAOObject): evaluate predictions and save statistics.
random_seed (int): random seed.
"""
super(BpNetTrainer, self).__init__(**kwargs)
assert checkpoint_n_epoch <= num_epoch, "Checkpoint_n_epochs must be \
<= num_epochs"
assert (num_epoch % checkpoint_n_epoch) == 0, "Checkpoint_n_epoch should\
be a divisor of num_epoch"
self._checkpoint_dir = checkpoint_dir
self._pretrained_weights = pretrained_weights
self._load_graph = load_graph
self._log_every_n_secs = log_every_n_secs
self._checkpoint_n_epoch = checkpoint_n_epoch
self._num_epoch = num_epoch
self._infrequent_summary_every_n_steps = infrequent_summary_every_n_steps
self._evaluator = evaluator
self._random_seed = random_seed
self._summary_every_n_steps = summary_every_n_steps
self._max_ckpt_to_keep = max_ckpt_to_keep
self._validation_every_n_epoch = validation_every_n_epoch
self._steps_per_epoch = self._total_loss = self._train_op = None
self.inference_spec = inference_spec
self._finetuning_config = finetuning_config
if self._finetuning_config is None:
self._finetuning_config = {
'is_finetune_exp': False,
'checkpoint_path': None,
}
self.use_stagewise_lr_multipliers = use_stagewise_lr_multipliers
self._key = key
self._generate_output_sequence()
self.final_model_name = final_model_name
# Checks
if self._load_graph:
assert self._pretrained_weights is not None, "Load graph is True,\
please specify pretrained model to use to load the graph."
assert self.inference_spec is not None, "Please specify inference spec\
path in the config file."
@property
def train_op(self):
"""Return train op of Trainer."""
return self._train_op
def _check_if_first_run(self):
files = [
file for file in glob.glob(
self._checkpoint_dir +
'/model.epoch-*')]
return (not bool(len(files)))
def _generate_output_sequence(self):
"""Generates required output sequence."""
stages = self._model._stages
cmaps = [('cmap', i) for i in range(1, stages + 1)]
pafs = [('paf', i) for i in range(1, stages + 1)]
output_seq = []
output_seq.extend(cmaps)
output_seq.extend(pafs)
self.output_seq = output_seq
def update_regularizers(self, keras_model, kernel_regularizer=None,
bias_regularizer=None):
"""Update regularizers for models that are being loaded."""
model_config = keras_model.get_config()
for layer, layer_config in zip(keras_model.layers, model_config['layers']):
# Updating regularizer parameters for conv2d, depthwise_conv2d and dense layers.
if type(layer) in [keras.layers.convolutional.Conv2D,
keras.layers.core.Dense,
keras.layers.DepthwiseConv2D]:
if hasattr(layer, 'kernel_regularizer'):
layer_config['config']['kernel_regularizer'] = kernel_regularizer
if hasattr(layer, 'bias_regularizer'):
layer_config['config']['bias_regularizer'] = bias_regularizer
prev_model = keras_model
keras_model = keras.models.Model.from_config(model_config)
keras_model.set_weights(prev_model.get_weights())
return keras_model
def _build_distributed(self):
"""Build the training and validation graph, with Horovod Distributer enabled."""
# Use Horovod distributor for multi-gpu training.
self._ngpus = distribution.get_distributor().size()
# Set random seed for distributed training.
seed = distribution.get_distributor().distributed_seed(self._random_seed)
set_random_seed(seed)
# Must set the correct learning phase, `1` is training mode.
keras.backend.set_learning_phase(1)
with tf.name_scope("DataLoader"):
# Prepare data for training and validation.
data = self._dataloader()
# Total training samples and steps per epoch.
self._samples_per_epoch = self._dataloader.num_samples
self._steps_per_epoch = \
self._samples_per_epoch // (self._dataloader.batch_size * self._ngpus)
self._last_step = self._num_epoch * self._steps_per_epoch
with tf.name_scope("Model"):
if self._load_graph:
logger.info(("Loading pretrained model graph as is from {}...").format(
self._pretrained_weights))
# Load the model
loaded_model = load_model(self._pretrained_weights, self._key)
logger.warning("Ignoring regularization factors for pruning exp..!")
# WAR is to define an input layer explicitly with data.images as
# tensor. This resolves the input type/shape mismatch error.
# But this creates a submodel within the model. And currently,
# there are two input layers.
# TODO: See if the layers can be expanded or better solution.
input_layer = keras.layers.Input(
tensor=data.images,
shape=(None, None, 3),
name='input_1')
# TODO: Enable once tested.
# loaded_model = self.update_regularizers(
# loaded_model, self._model._kernel_regularizer, self._model._bias_regularizer
# )
loaded_model = self.update_regularizers(loaded_model)
predictions = loaded_model(input_layer)
self._model._keras_model = keras.models.Model(
inputs=input_layer, outputs=predictions)
else:
logger.info("Building model graph from model defintion ...")
predictions = self._model(inputs=data.images)
# Print out model summary.
# print_model_summary(self._model._keras_model) # Disable for TLT
if self._check_if_first_run(
) and not self._finetuning_config["is_finetune_exp"]:
logger.info("First run ...")
# Initialize model with pre-trained weights
if self._pretrained_weights is not None and not self._load_graph:
logger.info(
("Intializing model with pre-trained weights {}...").format(
self._pretrained_weights))
load_pretrained_weights(
self._model._keras_model,
self._pretrained_weights,
key=self._key,
logger=None)
elif self._finetuning_config["is_finetune_exp"]:
logger.info(
("Finetuning started -> Loading from {} checkpoint...").format(
self._finetuning_config["checkpoint_path"]))
# NOTE: The last step here might be different because of the difference in
# dataset sizes - steps_per_epoch might be small for a smaller
# dataset
current_step = get_step_from_ckzip(self._finetuning_config["checkpoint_path"])
if "epoch" in self._finetuning_config["checkpoint_path"]:
current_step *= self._steps_per_epoch
self._last_step = current_step + (
self._num_epoch - self._finetuning_config["ckpt_epoch_num"]
) * self._steps_per_epoch
logger.info("Updated last_step: {}".format(self._last_step))
else:
logger.info(
"Not first run and not finetuning experiment -> \
Loading from latest checkpoint...")
if self.use_stagewise_lr_multipliers:
lr_mult = self._model.get_lr_multipiers()
else:
lr_mult = {}
with tf.name_scope("Loss"):
label_slice_indices = self._dataloader.pose_config.label_slice_indices
self._losses = self._loss(data.labels,
predictions,
data.masks,
self.output_seq,
label_slice_indices)
self._model_loss = self._model.regularization_losses()
self._total_loss = tf.reduce_sum(
self._losses) / self._dataloader.batch_size + self._model_loss
tf.summary.scalar(name='total_loss', tensor=self._total_loss)
with tf.name_scope("Optimizer"):
# Update decay steps
_learning_rate_scheduler_type = type(self._optimizer._learning_rate_schedule).__name__
if 'SoftstartAnnealingLearningRateSchedule' in _learning_rate_scheduler_type:
self._optimizer._learning_rate_schedule.last_step = self._last_step
elif 'BpNetExponentialDecayLRSchedule' in _learning_rate_scheduler_type:
self._optimizer._learning_rate_schedule.update_decay_steps(
self._steps_per_epoch)
self._optimizer.build()
self._optimizer.set_grad_weights_dict(lr_mult)
self._train_op = self._optimizer.minimize(
loss=self._total_loss,
global_step=tf.compat.v1.train.get_global_step())[0]
def build(self):
"""Build the training and validation graph."""
self._build_distributed()
def train(self):
"""Run training."""
is_master = distribution.get_distributor().is_master()
if not is_master:
checkpoint_dir = None
checkpoint_path = None
else:
checkpoint_dir = self._checkpoint_dir
checkpoint_path = self._finetuning_config["checkpoint_path"]
mkdir_p(checkpoint_dir)
# TODO: tensorboard visualization of sample outputs at each stage
# TODO: CSV Logger like in Keras for epoch wise loss summary
# TODO: Add more log_tensors: stagewise_loss etc.
log_tensors = {
'step': tf.compat.v1.train.get_global_step(),
'loss': self._total_loss,
'epoch': tf.compat.v1.train.get_global_step() / self._steps_per_epoch}
serialization_listener = EpochModelSerializationListener(
checkpoint_dir=checkpoint_dir,
model=self._model,
key=self._key,
steps_per_epoch=self._steps_per_epoch,
max_to_keep=None
)
listeners = [serialization_listener]
common_hooks = get_common_training_hooks(
log_tensors=log_tensors,
log_every_n_secs=self._log_every_n_secs,
checkpoint_n_steps=self._checkpoint_n_epoch *
self._steps_per_epoch,
model=None,
last_step=self._last_step,
checkpoint_dir=checkpoint_dir,
scaffold=self.scaffold,
steps_per_epoch=self._steps_per_epoch,
summary_every_n_steps=self._summary_every_n_steps,
infrequent_summary_every_n_steps=0,
listeners=listeners,
key=self._key,
)
# Add hook to stop training if the loss becomes nan
self._hooks = self._hooks + [tf.train.NanTensorHook(
self._total_loss, fail_on_nan_loss=True
)]
if is_master:
self._hooks.append(TaskProgressMonitorHook(log_tensors,
checkpoint_dir,
self._num_epoch,
self._steps_per_epoch))
hooks = common_hooks + self._hooks
# If specific checkpoint path provided, then pick up the params from that
# Otherwise, use the latest checkpoint from the checkpoint dir
if self._finetuning_config["is_finetune_exp"]:
latest_step = get_step_from_ckzip(checkpoint_path)
if "epoch" in checkpoint_path:
latest_step *= self._steps_per_epoch
checkpoint_filename = get_tf_ckpt(checkpoint_path, self._key, latest_step)
else:
checkpoint_filename = get_latest_checkpoint(checkpoint_dir, self._key)
self.run_training_loop(
train_op=self._train_op,
hooks=hooks,
checkpoint_filename_with_path=checkpoint_filename
)
# Once training is completed, copy the lastest model to weights directory
if is_master:
latest_tlt_model_path = get_latest_tlt_model(checkpoint_dir, extension=MODEL_EXTENSION)
if latest_tlt_model_path and os.path.exists(latest_tlt_model_path):
final_model_path = os.path.join(checkpoint_dir, self.final_model_name + MODEL_EXTENSION)
logger.info("Saving the final step model to {}".format(final_model_path))
shutil.copyfile(latest_tlt_model_path, final_model_path)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/trainers/bpnet_trainer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test BpNet Trainer."""
from collections import namedtuple
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import numpy as np
import tensorflow as tf
from nvidia_tao_tf1.core.utils import get_all_simple_values_from_event_file
from nvidia_tao_tf1.cv.bpnet.dataloaders.pose_config import BpNetPoseConfig
from nvidia_tao_tf1.cv.bpnet.dataloaders.processors.augmentation import AugmentationConfig
from nvidia_tao_tf1.cv.bpnet.learning_rate_schedules.exponential_decay_schedule import \
BpNetExponentialDecayLRSchedule
from nvidia_tao_tf1.cv.bpnet.losses.bpnet_loss import BpNetLoss
from nvidia_tao_tf1.cv.bpnet.models.bpnet_model import BpNetModel
from nvidia_tao_tf1.cv.bpnet.optimizers.weighted_momentum_optimizer import \
WeightedMomentumOptimizer
from nvidia_tao_tf1.cv.bpnet.trainers.bpnet_trainer import BpNetTrainer
BpData = namedtuple('BpData', ['images', 'masks', 'labels'])
class SyntheticDataloader:
def __init__(self, batch_size, image_shape, label_shape):
"""init funtion for Synthetic Dataloader
Args:
batch_size (int): batch size to use for training
image_shape (list): HWC ordering
label_shape (list): HWC ordering
"""
self.images = tf.convert_to_tensor(np.random.randn(
batch_size, image_shape[0], image_shape[1], image_shape[2]
),
dtype=tf.float32)
self.masks = tf.convert_to_tensor(np.random.randn(
batch_size, label_shape[0], label_shape[1], label_shape[2]
),
dtype=tf.float32)
self.labels = tf.convert_to_tensor(np.random.randn(
batch_size, label_shape[0], label_shape[1], label_shape[2]
),
dtype=tf.float32)
self.num_samples = batch_size
self.batch_size = batch_size
self.pose_config = create_pose()
def __call__(self):
return BpData(self.images, self.masks, self.labels)
def create_pose():
"""
Create bpnet pose config object.
Returns:
(BpNetPoseConfig)
"""
pose_config_root = "nvidia_tao_tf1/cv/bpnet/dataloaders"
pose_config_path = os.path.join(
pose_config_root,
"pose_configurations/bpnet_18joints.json"
)
pose_config_spec = {
'target_shape': [32, 32],
'pose_config_path': pose_config_path
}
return BpNetPoseConfig(**pose_config_spec)
def create_augmentation():
"""
Create bpnet augmentation config object.
Returns:
(AugmentationConfig)
"""
augmentation_config_spec = {'spatial_aug_params': {
'flip_lr_prob': 0.5,
'rotate_deg_max': 40.0,
'rotate_deg_min': 15.0,
'zoom_prob': 0.0,
'zoom_ratio_min': 0.5,
'zoom_ratio_max': 1.1,
'translate_max_x': 40.0,
'translate_min_x': 10,
'translate_max_y': 40.0,
'translate_min_y': 10,
'target_person_scale': 0.7},
'identity_spatial_aug_params': None,
'spatial_augmentation_mode': 'person_centric'
}
return AugmentationConfig(**augmentation_config_spec)
def create_optimizer():
"""
Create bpnet weighted momentum optimizer object.
Returns:
(WeightedMomentumOptimizer)
"""
learning_rate_spec = {
'learning_rate': 2e-5,
'decay_epochs': 17,
'decay_rate': 0.333,
'min_learning_rate': 8.18938e-08
}
lr_scheduler = BpNetExponentialDecayLRSchedule(**learning_rate_spec)
optimizer_spec = {
'learning_rate_schedule': lr_scheduler,
'grad_weights_dict': None,
'weight_default_value': 1.0,
'momentum': 0.9,
'use_nesterov': False
}
optimizer = WeightedMomentumOptimizer(**optimizer_spec)
return optimizer
def create_model():
"""
Create bpnet model object.
Returns:
(BpNetModel)
"""
backbone_attr = {
'architecture': 'vgg',
'mtype': 'default',
'use_bias': False
}
model_spec = {
'backbone_attributes': backbone_attr,
'stages': 3,
'heat_channels': 19,
'paf_channels': 38,
'use_self_attention': False,
'data_format': 'channels_last',
'use_bias': True,
'regularization_type': 'l2',
'kernel_regularization_factor': 5e-4,
'bias_regularization_factor': 0.0
}
return BpNetModel(**model_spec)
def create_trainer(checkpoint_dir):
"""
Create trainer object.
Args:
checkpoint_dir (str): folder path for model.
"""
optimizer = create_optimizer()
model = create_model()
dataloader = SyntheticDataloader(2, [256, 256, 3], [32, 32, 57])
loss = BpNetLoss()
inference_spec = "nvidia_tao_tf1/cv/bpnet/experiment_specs/infer_default.yaml"
trainer_specs = {
'checkpoint_dir': checkpoint_dir,
'optimizer': optimizer,
'model': model,
'dataloader': dataloader,
'loss': loss,
'key': '0',
"inference_spec": inference_spec,
"num_epoch": 5
}
trainer = BpNetTrainer(**trainer_specs)
return trainer
def test_trainer_train(tmpdir):
"""Test whether trainer trains correctly."""
trainer = create_trainer(str(tmpdir))
trainer.build()
trainer.train()
train_op = trainer.train_op
assert train_op is not None
assert isinstance(train_op, tf.Operation)
tensorboard_log_dir = os.path.join(str(tmpdir), "events")
assert os.path.isdir(tensorboard_log_dir), (
f"Tensorboard log directory not found at {tensorboard_log_dir}"
)
values_dict = get_all_simple_values_from_event_file(tensorboard_log_dir)
loss_key = 'Loss/total_loss'
assert loss_key in values_dict.keys()
# Get loss values as a list for all steps.
loss_values = [loss_tuple[1] for loss_tuple in values_dict[loss_key].items()]
# Form a list to determine whether loss has decreased across each step.
is_loss_reduced = [loss_values[i] >= loss_values[i+1]
for i in range(len(loss_values)-1)]
loss_reduced_percentage = sum(is_loss_reduced) / len(is_loss_reduced)
assert loss_reduced_percentage >= 0.5
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/trainers/test_bpnet_trainer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BpNet Exponential Decay Learning Rate Schedule."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nvidia_tao_tf1.blocks.learning_rate_schedules.exponential_decay_schedule import (
ExponentialDecayLearningRateSchedule
)
from nvidia_tao_tf1.core.coreobject import save_args
class BpNetExponentialDecayLRSchedule(ExponentialDecayLearningRateSchedule):
"""BpNetExponentialDecayLRSchedule class.
Derived from ExponentialDecayLearningRateSchedule to accomodate
option to use decay_epochs and/instead of `decay_steps`. This helps
to avoid manually calculating `decay_steps` for different settings
like multi-gpu training, different sizes of datasets, batchsizes etc.
"""
@save_args
def __init__(self,
decay_epochs,
decay_steps=None,
**kwargs):
"""__init__ method.
decayed_learning_rate = learning_rate *
decay_rate ^ (global_step / decay_steps)
Args:
decay_epochs (int): number of epochs before next decay.
decay_steps (int): number of steps before next decay.
"""
super(BpNetExponentialDecayLRSchedule, self).__init__(decay_steps=decay_steps,
**kwargs)
self._decay_epochs = decay_epochs
def update_decay_steps(self, steps_per_epoch):
"""Update the decay steps using decay_epochs and steps_per_epoch."""
self._decay_steps = self._decay_epochs * steps_per_epoch
print("Decay Steps: {}".format(self._decay_steps))
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/learning_rate_schedules/exponential_decay_schedule.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BpNet Learning Rate Schedules."""
from nvidia_tao_tf1.cv.bpnet.learning_rate_schedules.exponential_decay_schedule import (
BpNetExponentialDecayLRSchedule,
)
__all__ = (
'BpNetExponentialDecayLRSchedule',
)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/learning_rate_schedules/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pytest
import tensorflow as tf
from nvidia_tao_tf1.cv.bpnet.learning_rate_schedules.exponential_decay_schedule import (
BpNetExponentialDecayLRSchedule,
)
STEPS_PER_EPOCH = 25
@pytest.mark.parametrize(
"learning_rate, decay_epochs, decay_rate, staircase, min_learning_rate,"
"expected_steps, expected_values",
[
(
1.0,
1,
0.1,
False,
0.0,
[26, 51, 71, 101],
[0.1, 0.01, 0.0015848934, 0.0001],
),
(
1.0,
1,
0.1,
False,
0.0005,
[26, 51, 71, 101],
[0.1, 0.01, 0.0015848934, 0.0005],
),
(1.0, 1, 0.1, True, 0.0, [26, 51, 71, 101], [0.1, 0.01, 0.01, 0.0001]),
(1.0, 1, 0.1, True, 0.0005, [26, 51, 71, 101], [0.1, 0.01, 0.01, 0.0005]),
],
)
def test_exponential_decay_schedule(
learning_rate,
decay_epochs,
decay_rate,
staircase,
min_learning_rate,
expected_steps,
expected_values,
):
"""
Test ExponentialDecayLearningRateSchedule class.
Args:
learning_rate (float): initial learning rate to be used.
decay_epochs (int): number of epochs before next decay.
decay_rate (float): the decay rate.
staircase (bool): whether to apply decay in a discrete staircase as opposed to
continuous fashion.
min_learning_rate (float): the minimum learning rate to be used.
expected_steps (list): the steps over which the lr decay is evaluated.
expected_values (list): the expected learning rates at the expected_steps.
"""
global_step_tensor = tf.compat.v1.train.get_or_create_global_step()
increment_global_step_op = tf.compat.v1.assign(
global_step_tensor, global_step_tensor + 1
)
initializer = tf.compat.v1.global_variables_initializer()
schedule = BpNetExponentialDecayLRSchedule(
decay_epochs,
learning_rate=learning_rate,
decay_rate=decay_rate,
staircase=staircase,
min_learning_rate=min_learning_rate
)
# update the decay_steps
schedule.update_decay_steps(STEPS_PER_EPOCH)
lr_tensor = schedule.get_tensor()
result_values = []
sess = tf.compat.v1.Session()
with sess.as_default():
sess.run(initializer)
for _ in range(110):
global_step, lr = sess.run([increment_global_step_op, lr_tensor])
if global_step in expected_steps:
result_values.append(lr)
assert np.allclose(result_values, expected_values)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/learning_rate_schedules/test_exponential_decay_schedule.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BpNet Scripts."""
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/scripts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Export Keras model to etlt format."""
import argparse
import logging
import os
from nvidia_tao_tf1.cv.bpnet.exporter.bpnet_exporter import BpNetExporter
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
import nvidia_tao_tf1.cv.common.utilities.path_processing as io_utils
logger = logging.getLogger(__name__)
DEFAULT_MAX_WORKSPACE_SIZE = 2 * (1 << 30)
DEFAULT_MAX_BATCH_SIZE = 1
def build_command_line_parser(parser=None):
"""Simple function to parse arguments."""
if parser is None:
parser = argparse.ArgumentParser(description='Export a Bpnet TLT model.')
parser.add_argument("-m",
"--model",
help="Path to the model file.",
type=str,
required=True,
default=None)
parser.add_argument("-k",
"--key",
help="Key to load the model.",
type=str,
required=False,
default="")
parser.add_argument("-o",
"--output_file",
type=str,
default=None,
help="Output file (defaults to $(input_filename).etlt)")
parser.add_argument("--force_ptq",
action="store_true",
default=False,
help="Flag to force post training quantization for QAT models.")
# Int8 calibration arguments.
parser.add_argument("--cal_data_file",
default="",
type=str,
help="Tensorfile to run calibration for int8 optimization.")
parser.add_argument("--cal_image_dir",
default="",
type=str,
help="Directory of images to run int8 calibration if "
"data file is unavailable")
parser.add_argument("--data_type",
type=str,
default="fp32",
help="Data type for the TensorRT export.",
choices=["fp32", "fp16", "int8"])
parser.add_argument("-s",
"--strict_type_constraints",
action="store_true",
default=False,
help="Apply TensorRT strict_type_constraints or not for INT8 mode.")
parser.add_argument('--cal_cache_file',
default='./cal.bin',
type=str,
help='Calibration cache file to write to.')
parser.add_argument("--batches",
type=int,
default=10,
help="Number of batches to calibrate over.")
parser.add_argument("--max_workspace_size",
type=int,
default=DEFAULT_MAX_WORKSPACE_SIZE,
help="Max size of workspace to be set for TensorRT engine builder.")
parser.add_argument("--max_batch_size",
type=int,
default=DEFAULT_MAX_BATCH_SIZE,
help="Max batch size for TensorRT engine builder.")
parser.add_argument("--batch_size",
type=int,
default=1,
help="Number of images per batch.")
parser.add_argument("-e",
"--experiment_spec",
type=str,
default=None,
help="Path to the experiment spec file.")
parser.add_argument("--engine_file",
type=str,
default=None,
help="Path to the exported TRT engine.")
parser.add_argument("--static_batch_size",
type=int,
default=-1,
help="Set a static batch size for exported etlt model. \
Default is -1(dynamic batch size).")
parser.add_argument("-v",
"--verbose",
action="store_true",
default=False,
help="Verbosity of the logger.")
parser.add_argument('-d',
'--input_dims',
type=str,
default='256,256,3',
help='Input dims: channels_first(CHW) or channels_last (HWC).')
parser.add_argument('--sdk_compatible_model',
action='store_true',
help='Generate SDK (TLT CV Infer / DS) compatible model.')
parser.add_argument('-u',
'--upsample_ratio',
type=int,
default=4,
help='[NMS][CustomLayers] Upsampling factor.')
parser.add_argument('-i',
'--data_format',
choices=['channels_last', 'channels_first'],
type=str,
default='channels_last',
help='Channel Ordering, channels_first(NCHW) or channels_last (NHWC).')
parser.add_argument('-t',
'--backend',
choices=['onnx', 'uff', 'tfonnx'],
type=str,
default='onnx',
help="Model type to export to.")
parser.add_argument('--opt_batch_size',
type=int,
default=1,
help="Optimium batch size to use for int8 calibration.")
parser.add_argument('-r',
'--results_dir',
type=str,
default=None,
help='Path to a folder where experiment outputs will be created, \
or specify in spec file.')
return parser
def parse_command_line(args=None):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser()
return vars(parser.parse_known_args(args)[0])
def run_export(Exporter, args):
"""Wrapper to run export of tlt models.
Args:
Exporter(object): The exporter class instance.
args (dict): Dictionary of parsed arguments to run export.
backend(str): Exported model backend, either 'uff' or 'onnx'.
Returns:
No explicit returns.
"""
# Parsing command line arguments.
model_path = args['model']
key = args['key']
# Calibrator configuration.
cal_cache_file = args['cal_cache_file']
cal_image_dir = args['cal_image_dir']
cal_data_file = args['cal_data_file']
batch_size = args['batch_size']
n_batches = args['batches']
data_type = args['data_type']
strict_type = args['strict_type_constraints']
output_file = args['output_file']
experiment_spec = args['experiment_spec']
engine_file_name = args['engine_file']
max_workspace_size = args['max_workspace_size']
max_batch_size = args['max_batch_size']
static_batch_size = args['static_batch_size']
opt_batch_size = args['opt_batch_size']
force_ptq = args['force_ptq']
sdk_compatible_model = args['sdk_compatible_model']
upsample_ratio = args['upsample_ratio']
data_format = args['data_format']
backend = args['backend']
results_dir = args['results_dir']
input_dims = [int(i) for i in args["input_dims"].split(',')]
assert len(input_dims) == 3, "Input dims need to have three values."
save_engine = False
if engine_file_name is not None:
save_engine = True
# Make results dir if it doesn't already exist
if results_dir:
io_utils.mkdir_p(results_dir)
# Writing out status file for TAO.
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True,
verbosity=1,
append=True
)
)
status_logging.get_status_logger().write(
data=None,
status_level=status_logging.Status.STARTED,
message="Starting export."
)
log_level = "INFO"
if args['verbose']:
log_level = "DEBUG"
# Configure the logger.
logging.basicConfig(
format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level=log_level
)
# Set default output filename if the filename
# isn't provided over the command line.
output_extension = backend
if backend in ["onnx", "tfonnx"]:
output_extension = "onnx"
if output_file is None:
split_name = os.path.splitext(model_path)[0]
output_file = f"{split_name}.{output_extension}"
if not output_file.endswith(output_extension):
output_file = f"{output_file}.{output_extension}"
logger.info("Saving exported model to {}".format(output_file))
# Warn the user if an exported file already exists.
assert not os.path.exists(output_file), "Default output file {} already "\
"exists".format(output_file)
# Make an output directory if necessary.
output_root = os.path.dirname(os.path.realpath(output_file))
if not os.path.exists(output_root):
os.makedirs(output_root)
# Build exporter instance
exporter = Exporter(model_path, key,
backend=backend,
experiment_spec_path=experiment_spec,
data_type=data_type,
strict_type=strict_type,
data_format=data_format)
# Export the model to etlt file and build the TRT engine.
exporter.export(input_dims,
output_file,
backend,
data_file_name=cal_data_file,
calibration_cache=os.path.realpath(cal_cache_file),
n_batches=n_batches,
batch_size=batch_size,
save_engine=save_engine,
engine_file_name=engine_file_name,
calibration_images_dir=cal_image_dir,
max_batch_size=max_batch_size,
static_batch_size=static_batch_size,
max_workspace_size=max_workspace_size,
force_ptq=force_ptq,
sdk_compatible_model=sdk_compatible_model,
upsample_ratio=upsample_ratio,
opt_batch_size=opt_batch_size)
def main(cl_args=None):
"""Run exporting."""
try:
args = parse_command_line(cl_args)
run_export(BpNetExporter, args)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Export finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Export was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/scripts/export.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command line interface for converting pose datasets to TFRecords."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import logging
import os
import yaml
from nvidia_tao_tf1.cv.bpnet.dataio.build_converter import build_converter
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
import nvidia_tao_tf1.cv.common.utilities.path_processing as io_utils
def build_command_line_parser(parser=None):
"""
Convert a pose dataset to TFRecords.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism.
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(prog='dataset_convert',
description='Convert pose datasets to TFRecords')
parser.add_argument(
'-d',
'--dataset_spec',
required=True,
help='Path to the dataset spec containing config for exporting .tfrecords.')
parser.add_argument(
'-o',
'--output_filename',
required=True,
help='Output file name.')
parser.add_argument(
'-m',
'--mode',
required=False,
default='train',
help='Converter mode: train/test.')
parser.add_argument(
'-p',
'--num_partitions',
type=int,
required=False,
default=1,
help='Number of partitions (folds).')
parser.add_argument(
'-s',
'--num_shards',
type=int,
required=False,
default=0,
help='Number of shards.')
parser.add_argument(
'--generate_masks',
action='store_true',
help='Generate and save masks of regions with unlabeled people - used for training.')
parser.add_argument(
'--check_files',
action='store_true',
help='Check if the files including images and masks exist in the given root data dir.')
parser.add_argument(
'-r',
'--results_dir',
type=str,
default=None,
help='Path to a folder where experiment outputs will be created, or specify in spec file.')
return parser
def parse_command_line_args(cl_args=None):
"""Parser command line arguments to the trainer.
Args:
cl_args (list): List of strings used as command line arguments.
Returns:
args_parsed: Parsed arguments.
"""
parser = build_command_line_parser()
args = parser.parse_args(cl_args)
return args
def main(cl_args=None):
"""Generate tfrecords based on user arguments.
Args:
args(list): list of arguments to be parsed if called from another module.
"""
args = parse_command_line_args(cl_args)
results_dir = args.results_dir
# Make results dir if it doesn't already exist
if results_dir:
io_utils.mkdir_p(results_dir)
# Writing out status file for TAO.
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True,
verbosity=1,
append=True
)
)
status_logging.get_status_logger().write(
data=None,
status_level=status_logging.Status.STARTED,
message="Starting Dataset convert."
)
# Load config file
if args.dataset_spec.endswith(".json"):
with open(args.dataset_spec, "r") as f:
dataset_spec = json.load(f)
elif args.dataset_spec.endswith(".yaml"):
with open(args.dataset_spec, 'r') as f:
dataset_spec = yaml.load(f.read())
else:
raise ValueError("Experiment spec file extension not supported.")
converter = build_converter(
dataset_spec,
args.output_filename,
mode=args.mode,
num_partitions=args.num_partitions,
num_shards=args.num_shards,
generate_masks=args.generate_masks,
check_if_images_and_masks_exist=args.check_files)
converter.convert()
if __name__ == '__main__':
try:
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
main()
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Dataset convert finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Dataset convert was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/scripts/dataset_convert.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BpNet training script."""
import argparse
import logging
import os
import tensorflow as tf
import yaml
import nvidia_tao_tf1.core as tao_core
from nvidia_tao_tf1.core import distribution
import nvidia_tao_tf1.cv.bpnet # noqa # pylint: disable=unused-import
from nvidia_tao_tf1.cv.bpnet.dataio.coco_dataset import COCODataset
from nvidia_tao_tf1.cv.bpnet.trainers.bpnet_trainer import MODEL_EXTENSION
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
import nvidia_tao_tf1.cv.common.utilities.path_processing as io_utils
from nvidia_tao_tf1.cv.common.utilities.path_processing import mkdir_p
from nvidia_tao_tf1.cv.common.utilities.tlt_utils import get_latest_tlt_model
formatter = logging.Formatter(
"%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s") # noqa
handler = logging.StreamHandler() # noqa
handler.setFormatter(formatter) # noqa
logging.basicConfig(
level='INFO'
) # noqa
# Replace existing handlers with ours to avoid duplicate messages.
logging.getLogger().handlers = [] # noqa
logging.getLogger().addHandler(handler) # noqa
logger = logging.getLogger(__name__)
def build_command_line_parser(parser=None):
"""
Parse command-line flags passed to the training script.
Args:
args (list of str): Command line arguments list.
Returns:
Namespace with members for all parsed arguments.
"""
if parser is None:
parser = argparse.ArgumentParser(prog='train', description='Run BpNet train.')
parser.add_argument(
'-e',
'--experiment_spec_file',
type=str,
default='nvidia_tao_tf1/cv/bpnet/experiment_specs/experiment_spec.yaml',
help='Path to a single file containing a complete experiment spec.')
parser.add_argument(
'-r',
'--results_dir',
type=str,
default=None,
help='Path to a folder where experiment outputs will be created, or specify in spec file.')
parser.add_argument(
'-ll',
'--log_level',
type=str,
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
default='INFO',
help='Set logging level.')
parser.add_argument(
'-k',
'--key',
default="",
type=str,
required=False,
help='The key to load pretrained weights and save intermediate snapshopts and final model.'
)
return parser
def parse_command_line(args=None):
"""Simple function to parse command line arguments.
Args:
args (list): List of strings used as command line arguments.
If None, sys.argv is used.
Returns:
args_parsed: Parsed arguments.
"""
parser = build_command_line_parser()
args_parsed = parser.parse_args(args)
return args_parsed
def main(cl_args=None):
"""Launch the training process."""
tf.logging.set_verbosity(tf.logging.INFO)
args = parse_command_line(cl_args)
config_path = args.experiment_spec_file
results_dir = args.results_dir
key = args.key
# Build logger file.
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s')
logger = logging.getLogger(__name__)
logger_tf = logging.getLogger('tensorflow')
# If not on DEBUG, set logging level to 'WARNING' to suppress outputs from other processes.
level = 'DEBUG' if args.log_level == 'DEBUG' else 'WARNING'
logger.setLevel(level)
# Load experiment spec.
if not os.path.isfile(config_path):
raise ValueError("Experiment spec file cannot be found.")
with open(config_path, 'r') as yaml_file:
spec = yaml.load(yaml_file.read())
# Build the model saving directory.
if results_dir is not None:
spec['checkpoint_dir'] = results_dir
elif spec['checkpoint_dir']:
results_dir = spec['checkpoint_dir']
else:
raise ValueError('Checkpoint directory not specified, please specify it through -r or'
'through the checkpoint_dir field in your model config.')
mkdir_p(results_dir)
# Add key
if key is not None:
spec['key'] = key
distribution.set_distributor(distribution.HorovodDistributor())
is_master = distribution.get_distributor().is_master()
if is_master:
logger.setLevel(args.log_level)
logger_tf.setLevel(args.log_level)
# Writing out status file for TAO.
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=is_master,
verbosity=1,
append=True
)
)
# Build trainer from spec.
trainer = tao_core.coreobject.deserialize_tao_object(spec)
logger.info('done')
trainer.build()
logger.info('training')
status_logging.get_status_logger().write(
data=None,
status_level=status_logging.Status.STARTED,
message="Starting BPnet training."
)
trainer.train()
logger.info('Training has finished...')
status_logging.get_status_logger().write(
data=None,
status_level=status_logging.Status.RUNNING,
message="BPnet training loop finished."
)
# Save the training spec in the results directory.
if is_master:
trainer.to_yaml(os.path.join(results_dir, 'experiment_spec.yaml'))
status_logging.get_status_logger().write(
data=None,
status_level=status_logging.Status.SUCCESS,
message="BPnet training experimenent finished successfully."
)
if __name__ == "__main__":
try:
main()
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Training was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/scripts/train.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BpNet pruning wrapper."""
import argparse
import logging
import os
from nvidia_tao_tf1.core.pruning.pruning import prune
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
import nvidia_tao_tf1.cv.common.utilities.path_processing as io_utils
from nvidia_tao_tf1.cv.common.utilities.tlt_utils import model_io
logger = logging.getLogger(__name__)
def build_command_line_parser(parser=None):
"""Build the command line parser using argparse.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism.
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(description="Run BpNet pruning.")
parser.add_argument("-m",
"--model",
type=str,
help="Path to the target model for pruning",
required=True,
default=None)
parser.add_argument("-o",
"--output_file",
type=str,
help="Output file path for pruned model",
required=True,
default=None)
parser.add_argument('-k',
'--key',
required=False,
default="",
type=str,
help='Key to load a .tlt model')
parser.add_argument('-n',
'--normalizer',
type=str,
default='max',
help="`max` to normalize by dividing each norm by the \
maximum norm within a layer; `L2` to normalize by \
dividing by the L2 norm of the vector comprising all \
kernel norms. (default: `max`)")
parser.add_argument('-eq',
'--equalization_criterion',
type=str,
default='union',
help="Criteria to equalize the stats of inputs to an \
element wise op layer. Options are \
[arithmetic_mean, geometric_mean, union, \
intersection]. (default: `union`)")
parser.add_argument("-pg",
"--pruning_granularity",
type=int,
help="Pruning granularity: number of filters to remove \
at a time. (default:8)",
default=8)
parser.add_argument("-pth",
"--pruning_threshold",
type=float,
help="Threshold to compare normalized norm against \
(default:0.1)", default=0.1)
parser.add_argument("-nf",
"--min_num_filters",
type=int,
help="Minimum number of filters to keep per layer. \
(default:16)", default=16)
parser.add_argument("-el",
"--excluded_layers", action='store',
type=str, nargs='*',
help="List of excluded_layers. Examples: -i item1 \
item2", default=[])
parser.add_argument("-v",
"--verbose",
action='store_true',
help="Include this flag in command line invocation for \
verbose logs.")
parser.add_argument('-r',
'--results_dir',
type=str,
default=None,
help='Path to a folder where experiment outputs will be created, \
or specify in spec file.')
return parser
def parse_command_line(args):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser()
return parser.parse_args(args)
def run_pruning(args=None):
"""Prune an encrypted Keras model."""
results_dir = args.results_dir
output_file = args.output_file
# Make results dir if it doesn't already exist
if results_dir:
io_utils.mkdir_p(results_dir)
# Writing out status file for TAO.
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True,
verbosity=1,
append=True
)
)
status_logging.get_status_logger().write(
data=None,
status_level=status_logging.Status.STARTED,
message="Starting pruning."
)
# Set up logger verbosity.
verbosity = 'INFO'
if args.verbose:
verbosity = 'DEBUG'
# Configure the logger.
logging.basicConfig(
format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level=verbosity)
assert args.equalization_criterion in \
['arithmetic_mean', 'geometric_mean', 'union', 'intersection'], \
"Equalization criterion are [arithmetic_mean, geometric_mean, union, \
intersection]."
assert args.normalizer in ['L2', 'max'], \
"normalizer options are [L2, max]."
final_model = model_io(args.model, enc_key=args.key)
# Fix bug 3869039
try:
final_model = final_model.get_layer("model_1")
except Exception:
pass
# Make results dir if it doesn't already exist
if not os.path.exists(os.path.dirname(output_file)):
io_utils.mkdir_p(os.path.dirname(output_file))
# TODO: Set shape and print summary to understand
# the reduction in channels after pruning
# logger.info("Unpruned model summary")
# final_model.summary() # Disabled for TLT release
# Printing out the loaded model summary
force_excluded_layers = []
force_excluded_layers += final_model.output_names
# Pruning trained model
pruned_model = prune(
model=final_model,
method='min_weight',
normalizer=args.normalizer,
criterion='L2',
granularity=args.pruning_granularity,
min_num_filters=args.min_num_filters,
threshold=args.pruning_threshold,
equalization_criterion=args.equalization_criterion,
excluded_layers=args.excluded_layers + force_excluded_layers,
output_layers_with_outbound_nodes=force_excluded_layers)
# logger.info("Model summary of the pruned model")
# pruned_model.summary() # Disabled for TLT release
logger.info("Number of params in original model): {}".format(
final_model.count_params()))
logger.info("Number of params in pruned model): {}".format(
pruned_model.count_params()))
logger.info("Pruning ratio (pruned model / original model): {}".format(
pruned_model.count_params() / final_model.count_params()))
# Save the encrypted pruned model
if not output_file.endswith(".hdf5"):
output_file = f"{output_file}.hdf5"
# Save decrypted pruned model.
pruned_model.save(output_file, overwrite=True)
def main(args=None):
"""Wrapper function for pruning."""
try:
# parse command line
args = parse_command_line(args)
run_pruning(args)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Pruning finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Pruning was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/scripts/prune.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Export Keras model to other formats."""
import argparse
import logging
import os
import keras
from keras import backend as K
from nvidia_tao_tf1.core.export import keras_to_caffe, keras_to_onnx, keras_to_uff
import nvidia_tao_tf1.cv.bpnet.utils.export_utils as export_utils
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.common.utilities.export_utils import convertKeras2TFONNX
import nvidia_tao_tf1.cv.common.utilities.path_processing as io_utils
def build_command_line_parser(parser=None):
"""Build the command line parser using argparse.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism.
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(prog='export', description='Encrypted UFF exporter.')
parser.add_argument(
'-m',
'--model_filename',
type=str,
required=True,
default=None,
help="Absolute path to Keras model file \
(could be .h5, .hdf5 format).")
parser.add_argument('-o',
'--output_filename',
required=False,
type=str,
default=None,
help='Path to the output file (without extension).')
parser.add_argument(
'-t',
'--export_type',
choices=['onnx', 'tfonnx', 'uff', 'caffe'],
type=str,
default='uff',
help="Model type to export to."
)
parser.add_argument(
'-ll',
'--log_level',
type=str,
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
default='INFO',
help='Set logging level.'
)
parser.add_argument(
'--sdk_compatible_model',
action='store_true',
help='Generate SDK compatible model.'
)
parser.add_argument(
'-ur',
'--upsample_ratio',
type=int,
default=4,
help='[NMS][CustomLayers] Upsampling factor.'
)
parser.add_argument(
'-df',
'--data_format',
type=str,
default='channels_last',
help='Channel Ordering, channels_first(NCHW) or channels_last (NHWC).'
)
parser.add_argument(
'-s',
'--target_opset',
required=False,
type=int,
default=10,
help='Target opset version to use for onnx conversion.'
)
parser.add_argument(
'-r',
'--results_dir',
type=str,
default=None,
help='Path to a folder where experiment outputs will be created, \
or specify in spec file.')
return parser
def parse_command_line(args=None):
"""Simple function to parse command line arguments.
Args:
args (list): List of strings used as command line arguments.
If None, sys.argv is used.
Returns:
args_parsed: Parsed arguments.
"""
parser = build_command_line_parser()
args_parsed = parser.parse_args(args)
return args_parsed
def main(cl_args=None):
"""Run exporting."""
args_parsed = parse_command_line(cl_args)
results_dir = args_parsed.results_dir
# Make results dir if it doesn't already exist
if results_dir:
io_utils.mkdir_p(results_dir)
# Writing out status file for TAO.
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True,
verbosity=1,
append=True
)
)
status_logging.get_status_logger().write(
data=None,
status_level=status_logging.Status.STARTED,
message="Starting export."
)
model_name = args_parsed.model_filename
if args_parsed.output_filename is None:
output_filename_noext = model_name
else:
output_filename_noext = args_parsed.output_filename
target_opset = args_parsed.target_opset
# Build logger file.
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s')
logger = logging.getLogger(__name__)
logger.setLevel(args_parsed.log_level)
# Set channels ordering in keras backend
K.set_image_data_format(args_parsed.data_format)
# Load Keras model from file.
model = keras.models.load_model(model_name)
logger.info('model summary:')
model.summary()
model, custom_objects = export_utils.update_model(
model,
sdk_compatible_model=args_parsed.sdk_compatible_model,
upsample_ratio=args_parsed.upsample_ratio
)
# Export to UFF.
if args_parsed.export_type == 'uff':
output_filename = output_filename_noext + '.uff'
_, out_tensor_name, _ = keras_to_uff(model,
output_filename,
None,
custom_objects=custom_objects)
logger.info('Output tensor names are: ' + ', '.join(out_tensor_name))
# Export to Caffe.
if args_parsed.export_type == 'caffe':
output_filename = output_filename_noext + '.caffe'
prototxt_filename = output_filename_noext + '.proto'
_, out_tensor_name = keras_to_caffe(
model,
prototxt_filename,
output_filename,
output_node_names=None)
logger.info('Output tensor names are: ' + ', '.join(out_tensor_name))
# Export to onnx
if args_parsed.export_type == 'onnx':
output_filename = output_filename_noext + '.onnx'
(in_tensor, out_tensor, in_tensor_shape) = \
keras_to_onnx(
model,
output_filename,
custom_objects=custom_objects,
target_opset=target_opset)
logger.info('In: "%s" dimension of %s Out "%s"' % (in_tensor, in_tensor_shape, out_tensor))
# Export through keras->tf->onnx path
if args_parsed.export_type == 'tfonnx':
# Create froxen graph as .pb file.
convertKeras2TFONNX(model,
output_filename_noext,
output_node_names=None,
target_opset=target_opset,
custom_objects=custom_objects,
logger=logger)
if __name__ == "__main__":
try:
main()
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Export finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Export was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/scripts/export_keras.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BpNet inference script."""
import argparse
import logging
import os
from nvidia_tao_tf1.cv.bpnet.inferencer.bpnet_inferencer import BpNetInferencer
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
import nvidia_tao_tf1.cv.common.utilities.path_processing as io_utils
def build_command_line_parser(parser=None):
"""Build the command line parser using argparse.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism.
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(prog='infer', description='Run BpNet inference.')
parser.add_argument(
'-i',
'--inference_spec',
type=str,
default='nvidia_tao_tf1/cv/bpnet/experiment_specs/infer_default.yaml',
help='Path to inference spec.'
)
parser.add_argument(
'-m',
'--model_filename',
type=str,
required=False,
default=None,
help='Path to model file to use for inference. If None, \
the model path from the inference spec will be used.'
)
parser.add_argument(
'--input_type',
default="image",
type=str,
choices=["image", "dir", "json"],
help='Input type that you want to specify.'
)
parser.add_argument(
'--input',
default=None,
type=str,
help='Path to image / dir / json to run inference on.'
)
parser.add_argument(
'--image_root_path',
default='',
type=str,
help='Root dir path to image(s). If specified, \
image paths are assumed to be relative to this.'
)
parser.add_argument(
'-k',
'--key',
default="",
type=str,
required=False,
help="The API key to decrypt the model."
)
parser.add_argument(
'--results_dir',
default=None,
type=str,
help='Results directory for inferences. Inference result visualizations \
will be dumped in this directory if --dump_visualizations is set.',
)
parser.add_argument(
'--dump_visualizations',
action='store_true',
default=False,
help='If enabled, saves images with inference visualization to \
`results/images_annotated` directory.'
)
parser.add_argument(
'-ll',
'--log_level',
type=str,
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
default='INFO',
help='Set logging level.')
return parser
def parse_command_line_args(cl_args=None):
"""Parser command line arguments to the trainer.
Args:
cl_args(sys.argv[1:]): Arg from the command line.
Returns:
args: Parsed arguments using argparse.
"""
parser = build_command_line_parser(parser=None)
args = parser.parse_args(cl_args)
return args
def main(cl_args=None):
"""Launch the model inference process."""
args = parse_command_line_args(cl_args)
enc_key = args.key
inference_spec_path = args.inference_spec
model_filename = args.model_filename
input_type = args.input_type
_input = args.input
image_root_path = args.image_root_path
results_dir = args.results_dir
dump_visualizations = args.dump_visualizations
# Make results dir if it doesn't already exist
if results_dir:
io_utils.mkdir_p(results_dir)
# Writing out status file for TAO.
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True,
verbosity=1,
append=True
)
)
status_logging.get_status_logger().write(
data=None,
status_level=status_logging.Status.STARTED,
message="Starting inference."
)
# Init logging
# tf.logging.set_verbosity(tf.logging.INFO)
logging.basicConfig(
format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s')
logger = logging.getLogger(__name__)
logger_tf = logging.getLogger('tensorflow')
logger.setLevel(args.log_level)
logger_tf.setLevel(args.log_level)
# Read input
if input_type == "image":
logger.info("Reading from image: {}".format(_input))
io_utils.check_file(_input)
data = [_input]
elif input_type == "dir":
io_utils.check_dir(_input)
logger.info("Reading from directory: {}".format(_input))
data = os.listdir(_input)
image_root_path = _input
elif input_type == "json":
io_utils.check_file(_input)
logger.info("Reading from json file: {}".format(_input))
data = io_utils.load_json_file(_input)
else:
raise ValueError("Unsupported input type: {}".format(input_type))
# Load inference spec file
inference_spec = io_utils.load_yaml_file(inference_spec_path)
# Load inference spec file
experiment_spec = io_utils.load_yaml_file(inference_spec['train_spec'])
# Enforce results_dir value if dump_visualizations is true.
if dump_visualizations and not results_dir:
raise ValueError("--results_dir must be specified if dumping visualizations.")
# Load model
if model_filename is None:
model_full_path = inference_spec['model_path']
logger.warning("No model provided! Using model_path from inference spec file.")
else:
model_full_path = model_filename
logger.info("Loading {} for inference.".format(model_full_path))
# logger.info(model.summary()) # Disabled for TLT
# Initialize BpNetInferencer
inferencer = BpNetInferencer(
model_full_path,
inference_spec,
experiment_spec,
key=enc_key
)
# Run inference
inferencer.run(
data,
results_dir=results_dir,
image_root_path=image_root_path,
dump_visualizations=dump_visualizations
)
if __name__ == "__main__":
try:
main()
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Inference finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Inference was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/scripts/inference.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BpNet evaluation script."""
import argparse
import logging
import os
from nvidia_tao_tf1.cv.bpnet.dataio.coco_dataset import COCODataset
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
import nvidia_tao_tf1.cv.common.utilities.path_processing as io_utils
def build_command_line_parser(parser=None):
"""Build the command line parser using argparse.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism.
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(prog='evaluate', description='Run BpNet evaluation.')
parser.add_argument(
'-i',
'--inference_spec',
type=str,
default='nvidia_tao_tf1/cv/bpnet/experiment_specs/infer_default.yaml',
help='Path to model file to evaluate.'
)
parser.add_argument(
'-m',
'--model_filename',
type=str,
required=False,
default=None,
help='Path to model file to use for evaluation. If None, \
the model path from the inference spec will be used.'
)
parser.add_argument(
'--dataset',
default="coco",
type=str,
choices=["coco"],
help='Dataset to run evaluation on.'
)
parser.add_argument(
'-d',
'--dataset_spec',
required=True,
help='Path to the dataset spec.'
)
parser.add_argument(
'-k',
'--key',
default="",
type=str,
required=False,
help="The API key to decrypt the model."
)
parser.add_argument(
'--results_dir',
default='/workspace/tlt-experiments/bpnet/',
type=str,
help='Results directory',
)
parser.add_argument(
'-ll',
'--log_level',
type=str,
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
default='INFO',
help='Set logging level.')
return parser
def parse_command_line_args(cl_args=None):
"""Parser command line arguments to the trainer.
Args:
cl_args(sys.argv[1:]): Arg from the command line.
Returns:
args: Parsed arguments using argparse.
"""
parser = build_command_line_parser(parser=None)
args = parser.parse_args(cl_args)
return args
def main(cl_args=None):
"""Launch the model evaluation process."""
args = parse_command_line_args(cl_args)
enc_key = args.key
inference_spec_path = args.inference_spec
model_filename = args.model_filename
dataset_spec = args.dataset_spec
results_dir = args.results_dir
# Init logging
# tf.logging.set_verbosity(tf.logging.INFO)
logging.basicConfig(
format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s')
logger = logging.getLogger(__name__)
logger_tf = logging.getLogger('tensorflow')
logger.setLevel(args.log_level)
logger_tf.setLevel(args.log_level)
# Load dataset_spec file
dataset_spec = io_utils.load_json_file(dataset_spec)
# Load inference spec file
inference_spec = io_utils.load_yaml_file(inference_spec_path)
# Load inference spec file
experiment_spec = io_utils.load_yaml_file(inference_spec['train_spec'])
# Make results dir if it doesn't already exist
io_utils.mkdir_p(results_dir)
# Writing out status file for TAO.
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True,
verbosity=1,
append=True
)
)
status_logging.get_status_logger().write(
data=None,
status_level=status_logging.Status.STARTED,
message="Starting Evaluation."
)
# Load model
if model_filename is None:
model_full_path = inference_spec['model_path']
logger.warning("No model provided! Using model_path from inference spec file.")
else:
model_full_path = model_filename
logger.info("Loading {} for evaluation".format(model_full_path))
# logger.info(model.summary()) # Disabled for TLT
# Initialize COCODataset
dataset = COCODataset(dataset_spec)
# Run inference
detections_path = dataset.infer(
model_full_path,
inference_spec,
experiment_spec,
results_dir,
key=enc_key
)
# Run evaluation
eval_results = COCODataset.evaluate(dataset.test_coco, detections_path, results_dir).stats
eval_dict = {"AP_0.50:0.95_all": eval_results[0],
"AP_0.5": eval_results[1],
"AP_0.75": eval_results[2],
"AP_0.50:0.95_medium": eval_results[3],
"AP_0.50:0.95_large": eval_results[4],
"AR_0.50:0.95_all": eval_results[5],
"AR_0.5": eval_results[6],
"AR_0.75": eval_results[7],
"AR_0.50:0.95_medium": eval_results[8],
"AR_0.50:0.95_large": eval_results[9]
}
status_logging.get_status_logger().kpi = eval_dict
status_logging.get_status_logger().write(
status_level=status_logging.Status.RUNNING,
message="Evaluation metrics generated."
)
if __name__ == "__main__":
try:
main()
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Evaluation finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Evaluation was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/scripts/evaluate.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TLT command line wrapper to invoke CLI scripts."""
import sys
import nvidia_tao_tf1.cv.bpnet.scripts
from nvidia_tao_tf1.cv.common.entrypoint.entrypoint import launch_job
def main():
"""Function to launch the job."""
launch_job(nvidia_tao_tf1.cv.bpnet.scripts, "bpnet", sys.argv[1:])
if __name__ == "__main__":
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/entrypoint/bpnet.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bpnet entrypoint."""
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/entrypoint/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License."""
"""BpNet Export definitions."""
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/exporter/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bpnet exporter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import logging
import os
from shutil import copyfile
import keras
try:
import tensorrt # noqa pylint: disable=W0611 pylint: disable=W0611
except Exception as e:
logger = logging.getLogger(__name__)
logger.warning(
"Failed to import TensorRT package, exporting TLT to a TensorRT engine "
"will not be available."
)
from numba import cuda
import numpy as np
try:
from nvidia_tao_tf1.core.export._tensorrt import Engine, ONNXEngineBuilder, UFFEngineBuilder
except Exception as e:
logger = logging.getLogger(__name__)
logger.warning(
"Cannot import TensorRT packages, exporting to TLT to a TensorRT engine "
"will not be available."
)
from nvidia_tao_tf1.cv.bpnet.utils import export_utils
from nvidia_tao_tf1.cv.common.utilities import path_processing as io_utils
from nvidia_tao_tf1.cv.core.export.base_exporter import BaseExporter
logger = logging.getLogger(__name__)
class BpNetExporter(BaseExporter):
"""Exporter class to export a trained BpNet model."""
def __init__(self,
model_path=None,
key=None,
data_type="fp32",
strict_type=False,
experiment_spec_path="",
backend="onnx",
data_format="channels_last"):
"""Instantiate the BpNet exporter to export etlt model.
Args:
model_path(str): Path to the BpNet model file.
key (str): Key to decode the model.
data_type (str): Backend data-type for the optimized TensorRT engine.
experiment_spec_path (str): Path to BpNet experiment spec file.
backend (str): Type of intermediate backend parser to be instantiated.
"""
super(BpNetExporter, self).__init__(model_path=model_path,
key=key,
data_type=data_type,
strict_type=strict_type,
backend=backend,
data_format=data_format)
self.experiment_spec_path = experiment_spec_path
self.experiment_spec = io_utils.load_yaml_file(experiment_spec_path)
# Set keras backend format
keras.backend.set_image_data_format(data_format)
# Reformat preprocessing params to use with preprocessing block in
# BaseExporter
self.preprocessing_params = self._reformat_data_preprocessing_parameters(
self.experiment_spec['dataloader']['normalization_params'])
def _reformat_data_preprocessing_parameters(self, normalization_params=None):
"""Reformat normalization params to be consumed by pre-processing block.
Args:
normalization_params (dict): Normalization params used for training
Returns:
preprocessing_params (dict): Preprocessing parameters including mean, scale
and flip_channel keys.
"""
if normalization_params is None:
logger.warning("Using default normalization params!")
means = [0.5, 0.5, 0.5]
scale = [256.0, 256.0, 256.0]
else:
means = normalization_params['image_offset']
scale = normalization_params['image_scale']
# Reformat as expected by preprocessing funtion.
means = np.array(means) * np.array(scale)
scale = 1.0 / np.array(scale)
# Network is trained in RGB format
flip_channel = False
preprocessing_params = {"scale": scale,
"means": means,
"flip_channel": flip_channel}
return preprocessing_params
def export_to_etlt(self, output_filename, sdk_compatible_model=False, upsample_ratio=4.0):
"""Function to export model to etlt.
Args:
output_filename (str): Output .etlt filename
sdk_compatible_model (bool): Generate SDK (TLT CV Infer/DS/IX) compatible model
upsample_ratio (int): [NMS][CustomLayers] Upsampling factor.
Returns:
tmp_file_name (str): Temporary unencrypted file
in_tensor_names (list): List of input tensor names
out_tensor_names (list): List of output tensor names
"""
# Load Keras model from file.
model = self.load_model(self.model_path)
# model.summary() # Disable for TLT release
model, custom_objects = export_utils.update_model(
model, sdk_compatible_model=sdk_compatible_model, upsample_ratio=upsample_ratio)
output_filename, in_tensor_names, out_tensor_names = self.save_exported_file(
model,
output_filename,
custom_objects=custom_objects,
delete_tmp_file=False,
target_opset=10
)
# Trigger garbage collector to clear memory of the deleted loaded model
del model
gc.collect()
# NOTE: sometimes cuda doesn't release the GPU memory even after graph
# is cleared. So explicitly clear GPU memory.
cuda.close()
return output_filename, in_tensor_names, out_tensor_names
def export(self,
input_dims,
output_filename,
backend,
calibration_cache="",
data_file_name="",
n_batches=1,
batch_size=1,
verbose=True,
calibration_images_dir="",
save_engine=False,
engine_file_name="",
max_workspace_size=1 << 30,
max_batch_size=1,
force_ptq=False,
static_batch_size=None,
sdk_compatible_model=False,
upsample_ratio=4.0,
save_unencrypted_model=False,
validate_trt_engine=False,
opt_batch_size=1):
"""Export.
Args:
ETLT export
input_dims (list): Input dims with channels_first(CHW) or channels_last (HWC)
output_filename (str): Output .etlt filename
backend (str): Model type to export to
Calibration and TRT export
calibration_cache (str): Calibration cache file to write to or read from.
data_file_name (str): Tensorfile to run calibration for int8 optimization
n_batches (int): Number of batches to calibrate over
batch_size (int): Number of images per batch
verbose (bool): Verbosity of the logger
calibration_images_dir (str): Directory of images to run int8 calibration if
data file is unavailable.
save_engine (bool): If True, saves trt engine file to `engine_file_name`
engine_file_name (str): Output trt engine file
max_workspace_size (int): Max size of workspace to be set for trt engine builder.
max_batch_size (int): Max batch size for trt engine builder
force_ptq (bool): Flag to force post training quantization for QAT models
static_batch_size (int): Set a static batch size for exported etlt model.
Default is -1(dynamic batch size)
opt_batch_size (int): Optimum batch size to use for model conversion.
Deployment model
sdk_compatible_model (bool): Generate SDK (TLT CV Infer/DS/IX) compatible model
upsample_ratio (int): [NMS][CustomLayers] Upsampling factor.
Debugging
save_unencrypted_model (bool): Flag to save unencrypted model (debug purpose)
validate_trt_engine (bool): Flag to enable trt engine execution for validation.
"""
if force_ptq:
print("BpNet doesn't support QAT. Post training quantization is used by default.")
# set dynamic_batch flag
dynamic_batch = bool(static_batch_size <= 0)
_, in_tensor_name, out_tensor_names = self.export_to_etlt(
output_filename,
sdk_compatible_model=sdk_compatible_model,
upsample_ratio=upsample_ratio
)
# Get int8 calibrator
calibrator = None
max_batch_size = max(batch_size, max_batch_size)
data_format = self.data_format
preprocessing_params = self.preprocessing_params
input_dims = tuple(input_dims)
logger.debug("Input dims: {}".format(input_dims))
if self.backend == "tfonnx":
backend = "onnx"
keras.backend.clear_session()
if self.data_type == "int8":
# no tensor scale, take traditional INT8 calibration approach
# use calibrator to generate calibration cache
calibrator = self.get_calibrator(calibration_cache=calibration_cache,
data_file_name=data_file_name,
n_batches=n_batches,
batch_size=batch_size,
input_dims=input_dims,
calibration_images_dir=calibration_images_dir,
preprocessing_params=preprocessing_params)
logger.info("Calibration takes time especially if number of batches is large.")
# Assuming single input node graph for uff engine creation.
if not isinstance(input_dims, dict):
input_dims_dict = {in_tensor_name: input_dims}
# Verify with engine generation / run calibration.
if backend == "uff":
engine_builder = UFFEngineBuilder(output_filename,
in_tensor_name,
input_dims_dict,
out_tensor_names,
max_batch_size=max_batch_size,
max_workspace_size=max_workspace_size,
dtype=self.data_type,
strict_type=self.strict_type,
verbose=verbose,
calibrator=calibrator,
tensor_scale_dict=self.tensor_scale_dict,
data_format=data_format)
elif backend == "onnx":
tensor_scale_dict = None if force_ptq else self.tensor_scale_dict
engine_builder = ONNXEngineBuilder(output_filename,
max_batch_size=max_batch_size,
max_workspace_size=max_workspace_size,
dtype=self.data_type,
strict_type=self.strict_type,
verbose=verbose,
calibrator=calibrator,
tensor_scale_dict=tensor_scale_dict,
dynamic_batch=dynamic_batch,
input_dims=input_dims_dict,
opt_batch_size=opt_batch_size)
else:
raise NotImplementedError("Invalid backend.")
trt_engine = engine_builder.get_engine()
if save_engine:
with open(engine_file_name, "wb") as outf:
outf.write(trt_engine.serialize())
if validate_trt_engine:
try:
engine = Engine(trt_engine)
dummy_input = np.ones((1,) + input_dims)
trt_output = engine.infer(dummy_input)
logger.info("TRT engine outputs: {}".format(trt_output.keys()))
for output_name in trt_output.keys():
out = trt_output[output_name]
logger.info("{}: {}".format(output_name, out.shape))
except Exception:
logger.error("TRT engine validation error!")
if trt_engine:
del trt_engine
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/exporter/bpnet_exporter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BpNet Inference definition."""
import copy
import json
import logging
import os
import cv2
import numpy as np
import tqdm
from nvidia_tao_tf1.core.coreobject import TAOObject
from nvidia_tao_tf1.cv.bpnet.dataloaders.pose_config import BpNetPoseConfig
from nvidia_tao_tf1.cv.bpnet.inferencer.postprocessor import BpNetPostprocessor
import nvidia_tao_tf1.cv.bpnet.inferencer.utils as inferencer_utils
from nvidia_tao_tf1.cv.common.utilities.tlt_utils import model_io
# Setup logger.
formatter = logging.Formatter(
'%(levelname)-8s%(asctime)s | %(name)s: %(message)s')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logging.basicConfig(
level='INFO'
)
logging.getLogger().handlers = []
logging.getLogger().addHandler(handler)
logger = logging.getLogger(__name__)
class BpNetInferencer(TAOObject):
"""BpNet Inferencer Class."""
def __init__(self,
model_full_path,
inference_spec,
experiment_spec,
key=None,
**kwargs):
"""Init.
Args:
model_full_path (string): Full path to the model.
inference_spec (dict): Inference specification.
experiment_spec (dict): Training experiment specification.
"""
# Load the model.
if model_full_path.endswith('.engine'):
# Use TensorRT for inference
# import TRTInferencer only if it's a TRT Engine.
from nvidia_tao_tf1.cv.core.inferencer.trt_inferencer import TRTInferencer
self.model = TRTInferencer(model_full_path)
self.is_trt_model = True
# NOTE: There is bug when using tensorflow graph alongside tensorrt engine excution
# Model inference gives completely wrong results. So disable tf section when using
# tensorrt engine for inference.
self.use_tf_postprocess = False
else:
if key is None:
raise ValueError("Missing key argument needed to load model!")
self.model = model_io(model_full_path, enc_key=key)
self.is_trt_model = False
self.use_tf_postprocess = True
# logger.info(self.model.summary())
logger.info(model_full_path)
logger.info("Successfully loaded {}".format(model_full_path))
self._experiment_spec = experiment_spec
self._input_shape = inference_spec.get('input_shape', [256, 256])
self._keep_aspect_ratio_mode = inference_spec.get(
'keep_aspect_ratio_mode', 'adjust_network_input')
# Output blob that should be used for evaluation. The network may consist
# of multiple stages of refinement and this option lets us pick the stage]
# to evaluate our results. If None, last stage is picked.
self._output_stage_to_use = inference_spec.get(
'output_stage_to_use', None)
# Threshold value to use for filtering peaks after Non-max supression.
self.heatmap_threshold = inference_spec.get('heatmap_threshold', 0.1)
# Threshold value to use for suppressing connection in part affinity
# fields.
self.paf_threshold = inference_spec.get('paf_threshold', 0.05)
# Read params from the experiment spec
self._channel_format = experiment_spec['model']['data_format']
# Normalization params
normalization_params = experiment_spec['dataloader']['normalization_params']
self.normalization_scale = normalization_params['image_scale']
self.normalization_offset = normalization_params['image_offset']
# Output shape and stride
self._train_model_target_shape = \
experiment_spec['dataloader']['pose_config']['target_shape']
image_dims = experiment_spec['dataloader']['image_config']['image_dims']
self._train_model_input_shape = [
image_dims['height'], image_dims['width']]
self._model_stride = self._train_model_input_shape[0] // self._train_model_target_shape[0]
# (fy, fy) factors by which the output blobs need to upsampled before post-processing.
# If None, this will be the same as the stride value of the model.
self.output_upsampling_factor = inference_spec.get(
'output_upsampling_factor', [self._model_stride, self._model_stride])
# Get pose config to generate the topology
pose_config_path = experiment_spec['dataloader']['pose_config']['pose_config_path']
self.bpnet_pose_config = BpNetPoseConfig(
self._train_model_target_shape,
pose_config_path
)
self.pose_config = self.bpnet_pose_config.pose_config
self.topology = self.bpnet_pose_config.topology
# Initialize visualization object
self.visualizer = inferencer_utils.Visualizer(self.topology)
# List of scales to use for multi-scale evaluation. Only used
# when `multi_scale_inference` is True.
self.multi_scale_inference = inference_spec.get(
'multi_scale_inference', False)
self.scales = inference_spec.get('scales', None)
if self.scales is None:
self.scales = [1.0]
# Intialize results dictionary with pose config and inference spec
self.results = copy.deepcopy(self.pose_config)
self.results['inference_spec'] = inference_spec
# Initialize post-processor
self.bpnet_postprocessor = BpNetPostprocessor(
self.topology,
self.bpnet_pose_config.num_parts,
use_tf_postprocess=self.use_tf_postprocess
)
self.valid_image_ext = ['jpg', 'jpeg', 'png']
# Check valid cases for trt inference
if self.is_trt_model:
if self.multi_scale_inference:
logger.warning("Multi-scale inference not supported for trt inference. "
"Switching to single scale!!")
self.multi_scale_inference = False
if self._keep_aspect_ratio_mode == "adjust_network_input":
logger.warning("Keep aspect ratio mode `adjust_network_input` not supported"
" for trt inference. Switching to `pad_image_input`!!")
self._keep_aspect_ratio_mode = "pad_image_input"
def infer(self, input_tensor):
"""Run model prediction.
Args:
input_tensor (numpy.ndarray): Model input
Returns:
heatmap (numpy.ndarray): heatmap tensor of shape (H, W, C1)
paf (numpy.ndarray): part affinity field tensor of shape (H, W, C2)
"""
# create input tensor (1 x H x W x C)
# NOTE: Assumes channels_last.
input_tensor = np.transpose(np.float32(
input_tensor[:, :, :, np.newaxis]), (3, 0, 1, 2))
if self.is_trt_model:
# Run model prediction using trt engine
try:
output_blobs = self.model.predict(input_tensor)
except Exception as error:
logger.error("TRT execution failed. Please ensure that the `input_shape` "
"matches the model input dims")
logger.error(error)
raise error
output_blobs = list(output_blobs.values())
assert len(output_blobs) == 2, "Number of outputs more than 2. Please verify."
heatmap_idx, paf_idx = (-1, -1)
for idx in range(len(output_blobs)):
if output_blobs[idx].shape[-1] == self.bpnet_pose_config.num_heatmap_channels:
heatmap_idx = idx
if output_blobs[idx].shape[-1] == self.bpnet_pose_config.num_paf_channels:
paf_idx = idx
if heatmap_idx == -1 or paf_idx == -1:
raise Exception("Please verify model outputs!")
heatmap = np.squeeze(output_blobs[heatmap_idx])
paf = np.squeeze(output_blobs[paf_idx])
else:
# Run model prediction using keras model
output_blobs = self.model.predict(input_tensor)
total_stages = len(output_blobs) // 2
if self._output_stage_to_use is None:
self._output_stage_to_use = total_stages
heatmap = np.squeeze(output_blobs[(self._output_stage_to_use - 1)][0])
paf = np.squeeze(
output_blobs[total_stages + (self._output_stage_to_use - 1)][0])
try:
assert heatmap.shape[:-1] == paf.shape[:-1]
except AssertionError as error:
logger.error("Model outputs are not as expected. "
"The heatmaps and part affinity maps have the following "
"dimensions: {} and {}, whereas, the height and width of "
"both should be same. Ensure the model has been exported "
"correctly. (Hint: use --sdk_compatible_model only for "
"deployment)".format(heatmap.shape[:-1], paf.shape[:-1]))
raise error
return heatmap, paf
def postprocess(self,
heatmap,
paf,
image,
scale_factor,
offset_factor):
"""Postprocess function.
Args:
heatmap (numpy.ndarray): heatmap tensor of shape (H, W, C1)
paf (numpy.ndarray): part affinity field tensor of shape (H, W, C2)
image (numpy.ndarray): Input image
scale_factor (list): scale factor with format (fx, fy)
offset_factor (list): offset factor with format (oy, ox)
Returns:
keypoints (list): List of list consiting of keypoints of every
detected person.
viz_image (numpy.ndarray): Image with skeleton overlay
"""
# Find peak candidates
peaks, _ = self.bpnet_postprocessor.find_peaks(heatmap)
# Find connection candidates
connection_all = self.bpnet_postprocessor.find_connections(
peaks, paf, image.shape[1])
# Connect the parts
humans, candidate_peaks = self.bpnet_postprocessor.connect_parts(
connection_all, peaks, self.topology)
# Get final keypoint list and scores
keypoints, scores = self.bpnet_postprocessor.get_final_keypoints(
humans, candidate_peaks, scale_factor, offset_factor)
# Visualize the results on the image
viz_image = self.visualizer.keypoints_viz(image.copy(), keypoints)
return keypoints, scores, viz_image
def run_pipeline(self, image):
"""Run bodypose infer pipeline.
Args:
image (np.ndarray): Input image to run inference on.
It is in BGR and (H, W, C) format.
Returns:
heatmap (np.ndarray): heatmap tensor of shape (H, W, C1)
where C1 corresponds to num_parts + 1 (for background)
paf (np.ndarray): part affinity field tensor of shape
(H, W, C2) where C2 corresponds to num_connections * 2
scale_factor (list): scale factor with format (fx, fy)
offset_factor (list): offset factor with format (oy, ox)
"""
# Preprocess the input with desired input shape and aspect ratio mode
# Normalize the image with coeffs from training
preprocessed_image, preprocess_params = inferencer_utils.preprocess(
image,
self._input_shape,
self.normalization_offset,
self.normalization_scale,
keep_aspect_ratio_mode=self._keep_aspect_ratio_mode,
)
# Infer on the preprocessed input tensor
heatmap, paf = self.infer(preprocessed_image)
fy, fx = (
self.output_upsampling_factor[0], self.output_upsampling_factor[1])
heatmap = cv2.resize(
heatmap, (0, 0), fx=fx, fy=fy, interpolation=cv2.INTER_CUBIC)
paf = cv2.resize(
paf, (0, 0), fx=fx, fy=fy, interpolation=cv2.INTER_CUBIC)
# Compute the scale factor and offset factor to bring the
# keypoints back to original image space.
# NOTE: scale format is (fx, fy)
scale_factor = [
1. / preprocess_params['scale'][0],
1. / preprocess_params['scale'][1]]
if self.output_upsampling_factor is not None:
scale_factor[0] = scale_factor[0] * \
(self._model_stride / self.output_upsampling_factor[1])
scale_factor[1] = scale_factor[1] * \
(self._model_stride / self.output_upsampling_factor[0])
offset_factor = [-preprocess_params['offset']
[0], -preprocess_params['offset'][1]]
return heatmap, paf, scale_factor, offset_factor
def run_multi_scale_pipeline(self, image):
"""Run bodypose multi-scale infer pipeline.
Args:
image (np.ndarray): Input image to run inference on.
It is in BGR and (H, W, C) format.
Returns:
final_heatmaps (np.ndarray): heatmap tensor of shape (H, W, C1)
where C1 corresponds to num_parts + 1 (for background)
final_pafs (np.ndarray): part affinity field tensor of shape
(H, W, C2) where C2 corresponds to num_connections * 2
scale_factor (list): scale factor with format (fx, fy)
offset_factor (list): offset factor with format (oy, ox)
"""
# Sort the scales
self.scales.sort()
max_scale_idx = len(self.scales) - 1
results = {}
# Iterate over the scales
for idx, scale in enumerate(self.scales):
# Get the scaled input shape
scaled_input_shape = [
self._input_shape[0] * scale,
self._input_shape[1] * scale]
# Preprocess the input with desired input shape and aspect ratio mode
# Normalize the image with coeffs from training
preprocessed_image, preprocess_params = inferencer_utils.preprocess(
image,
scaled_input_shape,
self.normalization_offset,
self.normalization_scale,
keep_aspect_ratio_mode=self._keep_aspect_ratio_mode,
)
# Pad the image to account for stride
padded_image, padding = inferencer_utils.pad_bottom_right(
preprocessed_image, self._model_stride, (0, 0, 0))
# Infer on the preprocessed input tensor
heatmap, paf = self.infer(padded_image)
results[idx] = {
'scale': scale,
'preprocessed_image_shape': preprocessed_image.shape[:2],
'padded_image_shape': padded_image.shape[:2],
'padding': padding,
'preprocess_params': preprocess_params.copy(),
'heatmap': heatmap,
'paf': paf
}
# Resize the output layers to the largest scale network input size
output_blob_shapes = results[max_scale_idx]['preprocessed_image_shape']
# NOTE: For multi-scale inference, the output_sampling_factor is fixed to model stride
# of the largest scale.
output_upsampling_factor = [self._model_stride, self._model_stride]
# Initialize final heatmaps.pafs that will be computed as a combination
# of heatmaps/pafs at different scales.
final_heatmaps = np.zeros(
(output_blob_shapes[0], output_blob_shapes[1], 19), dtype=np.float32)
final_pafs = np.zeros(
(output_blob_shapes[0],
output_blob_shapes[1],
38),
dtype=np.float32)
for idx in results.keys():
scale = results[idx]['scale']
padded_image_shape = results[idx]['padded_image_shape']
padding = results[idx]['padding']
# Resize the heatmap and paf x the model stride
fy, fx = (output_upsampling_factor[0], output_upsampling_factor[1])
heatmap = cv2.resize(
results[idx]['heatmap'],
(0, 0),
fx=fx,
fy=fy,
interpolation=cv2.INTER_CUBIC)
paf = cv2.resize(
results[idx]['paf'],
(0, 0),
fx=fx,
fy=fy,
interpolation=cv2.INTER_CUBIC)
# Remove the padding from the heatmaps and paf.
# This is equivalent to what was added to the image
heatmap = heatmap[:padded_image_shape[0] - padding[2],
:padded_image_shape[1] - padding[3], :]
paf = paf[:padded_image_shape[0] - padding[2],
:padded_image_shape[1] - padding[3], :]
# Resize the heatmap and paf to the shape of preprocessed input
# for the largest scale
heatmap = cv2.resize(
heatmap,
(output_blob_shapes[1], output_blob_shapes[0]),
interpolation=cv2.INTER_CUBIC)
paf = cv2.resize(
paf,
(output_blob_shapes[1], output_blob_shapes[0]),
interpolation=cv2.INTER_CUBIC)
# Compute the average heatmaps and pafs
final_heatmaps = final_heatmaps + heatmap / len(self.scales)
final_pafs = final_pafs + paf / len(self.scales)
# Compute the scale factor and offset factor to bring the
# keypoints back to original image space
preprocess_params = results[max_scale_idx]['preprocess_params']
scale_factor = [
1. / preprocess_params['scale'][0],
1. / preprocess_params['scale'][1]]
offset_factor = [-preprocess_params['offset']
[0], -preprocess_params['offset'][1]]
return final_heatmaps, final_pafs, scale_factor, offset_factor
def dump_results(self, results_dir, results):
"""Save the results.
Args:
results_dir (str): Path to the directory to save results.
results (dict): results that is to be saved as json.
"""
results_path = os.path.join(results_dir, 'detections.json')
with open(results_path, "w") as f:
json.dump(results, f, indent=2)
f.close()
def run(self,
data,
results_dir=None,
image_root_path='',
visualize=False,
dump_visualizations=False):
"""Run bodypose infer pipeline.
Args:
data: Input data to run inference on. This could be
a list or a dictionary.
results_dir (str): directory path to save the results
image_root_path (str): Root path of the images. If specified,
it is appended in front of the image paths.
visualize (bool): Option to enable visualization.
dump_visualizations (bool): If enabled, saves images with
inference visualization to `results/images_annotated` directory
Returns:
results (dict): Dictionary containing image paths and results.
"""
if isinstance(data, list):
self.results['images'] = [
dict(full_image_path=img_path) for img_path in data]
elif isinstance(data, dict):
self.results.update(data)
if data.get('images') is None:
raise Exception("Verify input json format!")
# Check whether to dump image visualizations.
if dump_visualizations and not results_dir:
logger.warning("No results_dir provided. Ignoring visualization dumping!")
dump_visualizations = False
if dump_visualizations:
visualization_dir = os.path.join(results_dir, "images_annotated")
if not os.path.exists(visualization_dir):
os.makedirs(visualization_dir)
for idx, data_point in enumerate(tqdm.tqdm(self.results['images'])):
# Get full path and verify extension
image_path = data_point['full_image_path']
full_path = os.path.join(image_root_path, image_path)
if not full_path.split('.')[-1].lower() in self.valid_image_ext:
continue
# Read image
image = cv2.imread(full_path)
if image is None:
logger.error("Error reading image: {}".format(full_path))
continue
# Check if to do multi-scale inference
if self.multi_scale_inference:
heatmap, paf, scale_factor, offset_factor = self.run_multi_scale_pipeline(
image)
else:
heatmap, paf, scale_factor, offset_factor = self.run_pipeline(
image)
# Post-process the heatmap and the paf to obtain the final parsed
# skeleton results
keypoints, scores, viz_image = self.postprocess(
heatmap,
paf,
image,
scale_factor,
offset_factor
)
# Visualize the image
if visualize:
cv2.imshow('output', viz_image)
cv2.waitKey(0)
# Add the results to the results dict
self.results['images'][idx]['keypoints'] = keypoints
self.results['images'][idx]['scores'] = scores
if dump_visualizations:
self.results['images'][idx]['viz_id'] = idx
# Save annotated image
cv2.imwrite(os.path.join(visualization_dir, "{}.png".format(idx)), viz_image)
# Dump the results to results dir
if results_dir:
self.dump_results(results_dir, self.results)
return self.results
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/inferencer/bpnet_inferencer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BpNet Post-processor."""
import numpy as np
import tensorflow as tf
from nvidia_tao_tf1.core.coreobject import TAOObject
import nvidia_tao_tf1.cv.bpnet.inferencer.utils as inferencer_utils
# Enable eager execution
# tf.compat.v1.enable_eager_execution()
class BpNetPostprocessor(TAOObject):
"""BpNet Postprocessor class."""
def __init__(
self,
topology,
num_parts,
heatmap_threshold=0.1,
paf_threshold=0.05,
heatmap_gaussian_sigma=3,
heatmap_gaussian_kernel=5,
line_integral_samples_num=10,
line_integral_count_threshold=0.8,
num_parts_thresh=4,
overall_score_thresh=0.4,
use_tf_postprocess=True):
"""Init.
Args:
topology (np.ndarray): N x 4 array where N is the number of
connections, and the columns are (start_paf_idx, end_paf_idx,
start_conn_idx, end_conn_idx)
num_parts (int): number of keypoints in the given model
heatmap_threshold (float): Threshold value to use for
filtering peaks after Non-max supression.
paf_threshold (float): Threshold value to use for
suppressing connection in part affinity fields.
heatmap_gaussian_sigma (float): sigma to use for gaussian filter
used for smoothing the heatmap
heatmap_gaussian_kernel (int): kernel size to use for gaussian filter
used for smoothing the heatmap
line_integral_samples_num (int): number of sampling points (N) along each vector
line_integral_count_threshold (float): Threshold on the ratio of qualified points
to total sample points (line_integral_samples_num)
num_parts_thresh (int): Minimum number of parts needed to qualify as a detection
overall_score_thresh (float): Minimum overall score needed to qualify as a detection
use_tf_postprocess (bool): Enable use of tensorflow based find peaks.
If False, reverts to numpy and cv2 based ops.
"""
self.topology = topology
self.num_parts = num_parts
self.num_connections = topology.shape[0]
self.heatmap_threshold = heatmap_threshold
self.paf_threshold = paf_threshold
self.num_parts_thresh = num_parts_thresh
self.overall_score_thresh = overall_score_thresh
self.heatmap_gaussian_sigma = heatmap_gaussian_sigma
self.heatmap_gaussian_kernel = heatmap_gaussian_kernel
self.line_integral_count_threshold = line_integral_count_threshold
self.line_integral_samples_num = line_integral_samples_num
self.use_tf_postprocess = use_tf_postprocess
# array of points used to sample along each vector and
# it has shape (1, N, 1)
self.line_integral_samples = np.arange(
line_integral_samples_num,
dtype=np.float32).reshape(1, -1, 1)
# Initialize gaussian kernel used for peak smoothing
self.gaussian_kernel = inferencer_utils.get_gaussian_kernel(
heatmap_gaussian_kernel, self.num_parts, sigma=self.heatmap_gaussian_sigma)
# Initialize peak nms graph
# NOTE: There is bug when using tensorflow graph alongside tensorrt engine excution
# Model inference gives completely wrong results. So disable this section when using
# tensorrt engine for inference.
if use_tf_postprocess:
self.graph = tf.compat.v1.get_default_graph()
self.persistent_sess = tf.Session(graph=self.graph)
self.heatmap_tf = tf.placeholder(
tf.float32,
shape=(1, None, None, self.num_parts))
self.peaks_map_tf = self.peak_nms_tf(self.heatmap_tf)
# Dry run with dummy input
self.persistent_sess.run(tf.global_variables_initializer())
self.persistent_sess.run(
self.peaks_map_tf,
feed_dict={
self.heatmap_tf: [np.ndarray(shape=(256, 256, self.num_parts),
dtype=np.float32)]
}
)
def peak_nms_tf(self, heatmap):
"""Peak non-max suppresion using tensorflow.
Steps:
a. Refine the heatmap using gaussian smoothing
b. Find the local maximums using window of size K
and substitute the center pixel with max using maxpool
c. Compare this with the smoothed heatmap and retain the
original heatmap values where they match. Other pixel
locations (non-maximum) are suppressed to 0.
Args:
heatmap (tf.Tensor): heatmap tensor with shape (N, H, W, C)
where C is the number of parts.
Returns:
peaks_map (tf.Tensor): heatmap after NMS
"""
# Define gaussian kernel
with tf.compat.v1.variable_scope('postprocess'):
gaussian_kernel_tf = tf.Variable(
tf.convert_to_tensor(self.gaussian_kernel),
name='gauss_kernel')
# Apply depthwise conv with gaussian kernel
smoothed_heatmap = tf.nn.depthwise_conv2d(
heatmap,
filter=gaussian_kernel_tf,
strides=[1, 1, 1, 1],
padding='SAME')
# Non-max suppresion by using maxpool on the smoothed heatmap
maxpool_kernel_size = (3, 3)
maxpooled_heatmap = tf.nn.pool(
smoothed_heatmap,
window_shape=maxpool_kernel_size,
pooling_type='MAX',
padding='SAME')
peaks_map = tf.where(
tf.equal(smoothed_heatmap, maxpooled_heatmap),
heatmap,
tf.zeros_like(heatmap))
return peaks_map
def peak_nms(self, heatmap):
"""Peak non-max suppresion.
Steps:
a. Refine the heatmap using gaussian smoothing
b. Find the local maximums using window of size K
and substitute the center pixel with max using maxpool
c. Compare this with the smoothed heatmap and retain the
original heatmap values where they match. Other pixel
locations (non-maximum) are suppressed to 0.
Args:
heatmap (np.ndarray): heatmap tensor with shape (H, W, C)
where C is the number of parts.
Returns:
peaks_map (np.ndarray): heatmap after NMS
"""
smoothed_heatmap = inferencer_utils.apply_gaussian_smoothing(
heatmap,
kernel_size=self.heatmap_gaussian_kernel,
sigma=self.heatmap_gaussian_sigma,
backend="cv")
return inferencer_utils.nms_np(smoothed_heatmap)
def find_peaks(self, heatmap):
"""Find peak candidates using the heatmap.
Steps:
a. Smooth the heatmap and apply Non-max suppression
b. Further suppress the peaks with scores below defined
`heatmap_threshold`
c. Gather peaks accorind to keypoint ordering
Args:
heatmap (np.ndarray): heatmap array with shape (H, W, C)
where C is the number of parts.
Returns:
peaks (list): List of candidate peaks per keypoint
"""
if self.use_tf_postprocess:
# Drop the last channel which corresponds to background
# Expand dims before passing into the tensorflow graph
heatmap = np.expand_dims(heatmap[:, :, :-1], axis=0)
# Run non-max suppression using tensorflow ops
peaks_map = self.persistent_sess.run(
[self.peaks_map_tf], feed_dict={self.heatmap_tf: heatmap})
peaks_map = np.squeeze(peaks_map)
heatmap = heatmap[0]
else:
# Drop the last channel which corresponds to background
heatmap = heatmap[:, :, :-1]
# Run non-max suppression
peaks_map = self.peak_nms(heatmap)
# Further suppress the peaks with scores below defined threshold
peak_ys, peak_xs, kpt_idxs = np.where(
peaks_map > self.heatmap_threshold)
# Sort the peaks based on the kpt ordering
sorted_indices = kpt_idxs.argsort()
kpt_idxs = kpt_idxs[sorted_indices]
peak_ys = peak_ys[sorted_indices]
peak_xs = peak_xs[sorted_indices]
# Gather the peaks according to their keypoint index
peak_counter = 0
peaks = [[] for kpt_idx in range(0, self.num_parts)]
for (kpt_idx, peak_y, peak_x) in zip(kpt_idxs, peak_ys, peak_xs):
peak_score = heatmap[peak_y, peak_x, kpt_idx]
peaks[kpt_idx].append((peak_x, peak_y, peak_score, peak_counter))
peak_counter += 1
return peaks, peak_counter
@staticmethod
def get_bipartite_graph(conn_start, conn_end, n_start, n_end):
"""Get the bipartite graph for candidate limb connections.
The vertices represent the keypoint candidates and the edges
represent the connection candidates.
Args:
conn_start (np.ndarray): keypoint candidates for source keypoint
conn_end (np.ndarray): keypoint candidates for end keypoint
n_start (int): number of keypoint candidates for source keypoint
n_end (int): number of keypoint candidates for end keypoint
Returns:
out (np.ndarray): bipartite graph of shape (n_end, n_start, 2)
"""
# Expand dims to (1, nA, 2)
kpts_start = np.expand_dims(conn_start[:, :2], axis=0)
# Expand dims to (nB, 1, 2)
kpts_end = np.expand_dims(conn_end[:, :2], axis=1)
# Broadcast nB times along first dim
kpts_start = np.broadcast_to(kpts_start, (n_end, n_start, 2))
# Return the bipartite graph of vectors
return (kpts_end - kpts_start), kpts_start
def compute_line_integral(
self, bipartite_graph, connection_paf, kpts_start):
"""Compute the line integral along the vector of each candidate limbs.
This gives each connection a score which will be used for
the assignment step.
Args:
bipartite_graph (np.ndarray): contains candidate limbs
of shape (nB, nA, 2)
connection_paf (np.ndarray): part affinity field for candidate limb
connecting two keypoints with shape (H, W, 2)
Returns:
weighted_bipartite_graph (np.ndarray): scores of the candidate connections
"""
# Calculate unit step size along the vector
bipartite_graph = bipartite_graph.reshape(-1, 1, 2)
steps = (1 / (self.line_integral_samples_num - 1) * bipartite_graph)
# Sample N points along every candidate limb vector
points = steps * self.line_integral_samples + \
kpts_start.reshape(-1, 1, 2)
points = points.round().astype(dtype=np.int32)
x = points[..., 0].ravel()
y = points[..., 1].ravel()
# Get part afifnity vector fields along the limb sample points
paf_vectors = connection_paf[y, x].reshape(
-1, self.line_integral_samples_num, 2)
# Compute the candidate limb unit vectors
limb_vec_norm = np.linalg.norm(
bipartite_graph, ord=2, axis=-1, keepdims=True)
limb_unit_vec = bipartite_graph / (limb_vec_norm + 1e-6)
# Compute the dot prodcut at each location of the candidate limbs
# with the part affinity vectors at that location
scores = (paf_vectors * limb_unit_vec).sum(-1).reshape(-1,
self.line_integral_samples_num)
# Suppress scores below given threshold
valid_scores_mask = scores > self.paf_threshold
num_qualified_points = valid_scores_mask.sum(1)
# Compute the line integral / weighted bipartite graph by summing
# over the scores of valid points
weighted_bipartite_graph = (
scores * valid_scores_mask).sum(1) / (num_qualified_points + 1e-6)
return weighted_bipartite_graph, num_qualified_points
@staticmethod
def assignment(valid_candidate_limb_pairs, weighted_bipartite_graph):
"""Assignment algorithm to obtain final connections with maximum score.
Steps:
a. Sort each possible connection by its score.
b. The connection with the highest score is chosen as final connection.
c. Move to next possible connection. If no parts of this connection
have been assigned to a final connection before, this is a final connection.
d. Repeat the step 3 until we are done.
Args:
valid_candidate_limb_pairs (list): list of arrays with start and end conn indices
weighted_bipartite_graph (np.ndarray): scores of the candidate connections
Returns:
conn_start_idx (np.ndarray): start indices of the final connections
conn_end_idx (np.ndarray): end indices of the final connections
weighted_bipartite_graph (np.ndarray): scores of the final connections
"""
# Sort based on scores
order = weighted_bipartite_graph.argsort()[::-1]
weighted_bipartite_graph = weighted_bipartite_graph[order]
conn_start_idx = valid_candidate_limb_pairs[1][order]
conn_end_idx = valid_candidate_limb_pairs[0][order]
idx = []
has_start_kpt = set()
has_end_kpt = set()
# Start assignment from the largest score
for t, (i, j) in enumerate(zip(conn_start_idx, conn_end_idx)):
if i not in has_start_kpt and j not in has_end_kpt:
idx.append(t)
has_start_kpt.add(i)
has_end_kpt.add(j)
idx = np.asarray(idx, dtype=np.int32)
return conn_start_idx[idx], conn_end_idx[idx], weighted_bipartite_graph[idx]
def find_connections(self, peaks, paf, image_width):
"""Find connection candidates using the part affinity fields.
Steps:
a. Obtain the bipartite graph vectors between each pairs of
keypoint connection
b. Compute line integral over the part affinity fileds along
the candidate connection vectors to obtain the weighted
bipartite graph
c. Suppress the candidates that don't meet given criterions
d. Assigment: find the connections that maximize the total
score when matching the bipartite graph.
Args:
peaks (list): List of candidate peaks per keypoint
paf (np.ndarray): part affinity fields with shape (H, W, C)
where C is the (number of connections * 2)
image_width (int): width of the image
Returns:
connection_all (list): List of all detected connections for
each part/limb.
"""
connection_all = []
for k in range(self.num_connections):
connection_paf = paf[:, :, self.topology[k][:2]]
conn_start = np.array(peaks[self.topology[k][2]])
conn_end = np.array(peaks[self.topology[k][3]])
n_start = len(conn_start)
n_end = len(conn_end)
if (n_start == 0 or n_end == 0):
connection_all.append([])
continue
# Get the bipartite graph - all possible connections between two
# types of candidate keypoints
bipartite_graph, kpts_start = self.get_bipartite_graph(
conn_start, conn_end, n_start, n_end)
# Get weighted bipartite graph using line integral over the part
# affinity fields
weighted_bipartite_graph, num_qualified_points = self.compute_line_integral(
bipartite_graph, connection_paf, kpts_start)
# Suppress the candidate limbs that don't meet the following
# criterion
num_thresh_points = self.line_integral_count_threshold * \
self.line_integral_samples_num
is_criterion_met = np.logical_and(
weighted_bipartite_graph > 0,
num_qualified_points > num_thresh_points)
valid_condidate_limb_idxs = np.where(is_criterion_met)[0]
if len(valid_condidate_limb_idxs) == 0:
connection_all.append([])
continue
valid_candidate_limb_pairs = np.divmod(
valid_condidate_limb_idxs, n_start)
weighted_bipartite_graph = weighted_bipartite_graph[valid_condidate_limb_idxs]
# Assignment algorithm to get final connections
conn_start_idx, conn_end_idx, connection_scores = self.assignment(
valid_candidate_limb_pairs, weighted_bipartite_graph)
connections = list(zip(conn_start[conn_start_idx, 3].astype(np.int32),
conn_end[conn_end_idx, 3].astype(np.int32),
connection_scores))
connection_all.append(np.array(connections))
return connection_all
def connect_parts(self, connection_all, peaks_all, topology):
"""Connect the parts to build the final skeleton(s).
Steps:
a. Iterate through every connection and start with assigning to a
new human everytime.
b. This initial human will be updated with the connection end
every time a start part in current connection is already
part of the human.
c. If two humans share the same part index with the same coordintes,
but have disjoint connections, they are merged into one.
d. We iterate b and c until all connections are exhausted.
c. Suppress the humans that don't meet certain criterion.
Args:
connection_all (list): List of all detected connections for
each part/limb.
peaks_all (list): List of all candidate peaks per keypoint
topology (np.ndarray): N x 4 array where N is the number of
connections, and the columns are (start_paf_idx, end_paf_idx,
start_conn_idx, end_conn_idx)
Returns:
humans (np.ndarray): array with final skeletons of shape (N, M)
where N is the number of skeletons and M is (num_parts + 2)
candidate_peaks (np.ndarray): array with all candidate peaks of
shape (N, 4) where N is number of peaks
"""
# Initialize humans array with (N, num_parts + 2)
# Last column: total parts for person corresponding to that row
# Second last column: score of the overall configuration
humans = -1 * np.ones((0, self.num_parts + 2))
# Concat all peaks into an (N x 4) array
candidate_peaks = np.array(
[item for sublist in peaks_all for item in sublist])
# Iterate through each edge
for pidx in range(self.num_connections):
if not len(connection_all[pidx]):
continue
kpts_start = connection_all[pidx][:, 0]
kpts_end = connection_all[pidx][:, 1]
start_idx, end_idx = np.array(topology[pidx][2:])
# Iterate through all connections corresponding to current edge
for cidx in range(len(connection_all[pidx])):
# Check if multiple humans share the same connection
humans_sharing_kpts = []
for j in range(len(humans)):
is_present = (humans[j][start_idx] == kpts_start[cidx] or
humans[j][end_idx] == kpts_end[cidx])
if is_present:
humans_sharing_kpts.append(j)
# If only one row/human shares the part index, assign the part index
# end to the human
if (len(humans_sharing_kpts) == 1):
j = humans_sharing_kpts[0]
if (humans[j][end_idx] != kpts_end[cidx]):
humans[j][end_idx] = kpts_end[cidx]
humans[j][-1] += 1
humans[j][-2] += \
candidate_peaks[kpts_end[cidx]
.astype(int), 2] + connection_all[pidx][cidx][2]
# If two rows/humans share the part index, and the union of
# the connections are disjoint, then merge them
# else, just assign the end keypoint to current row
# similar to the case 1
elif (len(humans_sharing_kpts) == 2):
j1, j2 = humans_sharing_kpts
membership = ((humans[j1] >= 0).astype(
int) + (humans[j2] >= 0).astype(int))[:-2]
if len(np.nonzero(membership == 2)[0]) == 0: # merge
humans[j1][:-2] += (humans[j2][:-2] + 1)
humans[j1][-2:] += humans[j2][-2:]
humans[j1][-2] += connection_all[pidx][cidx][2]
humans = np.delete(humans, j2, 0)
else:
humans[j1][end_idx] = kpts_end[cidx]
humans[j1][-1] += 1
humans[j1][-2] += \
candidate_peaks[kpts_end[cidx]
.astype(int), 2] + connection_all[pidx][cidx][2]
# If the start index is not in any row/humans, create a new row/human
# Idea is that everytime there is a new connection, we assign it to a
# new human, and later merge them together as above.
elif not len(humans_sharing_kpts) and (pidx < self.num_connections - 2):
row = -1 * np.ones((self.num_parts + 2))
row[start_idx] = kpts_start[cidx]
row[end_idx] = kpts_end[cidx]
row[-1] = 2
row[-2] = sum(
candidate_peaks[connection_all[pidx][cidx, :2]
.astype(int), 2]) + connection_all[pidx][cidx][2]
humans = np.vstack([humans, row])
# Suppress the humans/rows based on the following criterion:
# 1. Parts fewer than given threshold
# 2. Overall score lesser than given threshold
invalid_idx = []
for hidx in range(len(humans)):
if humans[hidx][-1] < self.num_parts_thresh or \
humans[hidx][-2] / humans[hidx][-1] < self.overall_score_thresh:
invalid_idx.append(hidx)
humans = np.delete(humans, invalid_idx, axis=0)
return humans, candidate_peaks
def get_final_keypoints(
self,
humans,
candidate_peaks,
scale_factor,
offset_factor):
"""Get final scaled keypoints.
Args:
humans (np.ndarray): array with final skeletons of shape (N, M)
where N is the number of skeletons and M is (num_parts + 2)
candidate_peaks (np.ndarray): array with all candidate peaks of
shape (N, 4) where N is number of peaks
scale_factor (list): scale factor with format (fx, fy)
offset_factor (list): offset factor with format (oy, ox)
Returns:
keypoints (list): List of lists containing keypoints per skeleton
scores (list): List of scores per skeleton
"""
keypoints = []
scores = []
for human in humans:
keypoint_indexes = human[0:self.num_parts]
person_keypoint_coordinates = []
# This is the sum of all keypoint and paf scores
person_score = human[-2]
for index in keypoint_indexes:
# No candidates for keypoint
if index == -1:
X, Y = 0, 0
else:
X = scale_factor[1] * \
candidate_peaks[index.astype(int), 0] + offset_factor[0]
Y = scale_factor[0] * \
candidate_peaks[index.astype(int), 1] + offset_factor[1]
person_keypoint_coordinates.append([X, Y])
keypoints.append(person_keypoint_coordinates)
# Scale the scores between 0 and 1
# TODO: how is this used by COCO eval
scores.append(1 - 1.0 / person_score)
return keypoints, scores
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/inferencer/postprocessor.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License."""
"""BpNet Inferencer definitions."""
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/inferencer/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BpNet Inference utils."""
from enum import Enum
import cv2
import numpy as np
from scipy.ndimage import gaussian_filter
import scipy.stats as st
class KeepAspectRatioMode(str, Enum):
"""Enum class containing the different modes to keep aspect ratio."""
PAD_IMAGE_INPUT = "pad_image_input"
ADJUST_NETWORK_INPUT = "adjust_network_input"
def pad_image_input(input_shape, raw_frame):
"""Pad raw input to maintain aspect ratio for inference.
Args:
input_shape (tuple): desired (height, width) of the
network input
raw_frame (np.ndarray): Unprocessed frame in HWC format.
Returns:
res (np.ndarray): Padded frame.
offset (list): (x, y) offsets used during padding
"""
image_height = raw_frame.shape[0]
image_width = raw_frame.shape[1]
# Offset for width, height.
offset = [0, 0]
desired_aspect_ratio = input_shape[1] / input_shape[0]
if image_width / image_height == desired_aspect_ratio:
return raw_frame, offset
# Need to pad height.
if image_width / image_height > desired_aspect_ratio:
pad_length = int(
(image_width / desired_aspect_ratio - image_height)
)
pad_length_half = int(pad_length / 2.0)
offset[1] = pad_length_half
# Border order: top, bottom, left, right
res = cv2.copyMakeBorder(
raw_frame,
pad_length_half,
pad_length - pad_length_half,
0,
0,
cv2.BORDER_CONSTANT,
value=(128, 128, 128),
)
# Need to pad width.
else:
pad_length = int(
(image_height * desired_aspect_ratio - image_width)
)
pad_length_half = int(pad_length / 2.0)
offset[0] = pad_length_half
# Border order: top, bottom, left, right
res = cv2.copyMakeBorder(
raw_frame,
0,
0,
pad_length_half,
pad_length - pad_length_half,
cv2.BORDER_CONSTANT,
value=(128, 128, 128),
)
return res, offset
def adjust_network_input(input_shape, image_shape):
"""Pad raw input to maintain aspect ratio for inference.
Args:
input_shape (tuple): desired (height, width) of the
network input
image_shape (tuple): (height, width) of the image
Returns:
scale (tuple): tuple containing the scaling factors.
offset (list): list containing the x and y offset values
"""
image_height = image_shape[0]
image_width = image_shape[1]
offset = [0, 0]
desired_aspect_ratio = input_shape[1] / input_shape[0]
# If the image aspect ratio is greater than desiered aspect ratio
# fix the scale as the ratio of the heights, else fix it as ratio
# of the widths. The other side gets adjusted by the same amount.
if image_width / image_height > desired_aspect_ratio:
scale = (input_shape[0] / image_height, input_shape[0] / image_height)
else:
scale = (input_shape[1] / image_width, input_shape[1] / image_width)
return scale, offset
def convert_color_format(image, input_color_format, desired_color_format):
"""Convert from one image color format, to another.
Args:
image (np.ndarray): input image
input_color_format (str): color format of input
desired_color_format (str): color format to convert to
Returns:
image (np.ndarray): procesed image
"""
# Enforce BGR (currently, doesn't support other formats)
assert (
"B" in input_color_format
and "G" in input_color_format
and "R" in input_color_format
), "Color order must have B,G,R"
if desired_color_format == "RGB":
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image
def resize_image(image, input_shape, keep_aspect_ratio_mode=None):
"""Resize the input image based on the given mode.
Args:
image (np.ndarray): input image
input_shape (tuple): (height, width) of the image
keep_aspect_ratio_mode (str): determines how the image is
resized. Choices include [`adjust_network_input`,
`pad_image_input`, None]
Returns:
image (np.ndarray): procesed image
scale (tuple): scale used to resize the image (fy, fx)
offset (list): (x, y) offsets used during padding
"""
image_shape = image.shape
# Estimate the scale and offset needed
# NOTE: scale is (fy, fx)
# No need to retain aspect ratio
if keep_aspect_ratio_mode is None:
offset = [0, 0]
scale = (
input_shape[0] / image_shape[0],
input_shape[1] / image_shape[1]
)
# Retain aspect ratio by padding the network input accordingly
elif keep_aspect_ratio_mode == KeepAspectRatioMode.ADJUST_NETWORK_INPUT:
scale, offset = adjust_network_input(input_shape, image_shape)
# Retain aspect ratio by padding the image input accordingly
elif keep_aspect_ratio_mode == KeepAspectRatioMode.PAD_IMAGE_INPUT:
image, offset = pad_image_input(input_shape, image)
padded_image_shape = image.shape
scale = (
input_shape[0] / padded_image_shape[0],
input_shape[1] / padded_image_shape[1]
)
else:
raise ValueError("keep aspect ratio mode: {} not supported. Please \
choose in [`pad_image_input`, `adjust_network_input`, None]".format(
keep_aspect_ratio_mode))
# Resize image using the given scale
image = cv2.resize(
image,
(0, 0),
fx=scale[1],
fy=scale[0],
interpolation=cv2.INTER_CUBIC)
return image, scale, offset
def normalize_image(image, scale, offset):
"""Normalize image.
Args:
image (np.ndarray): input image
scale (list): normalization scale used in training
offset (list): normalization offset used in training
Returns:
(np.ndarray): procesed image
"""
return np.subtract(np.divide(image, scale), offset)
def preprocess(orig_image,
input_shape,
normalization_offset,
normalization_scale,
input_color_format="BGR",
desired_color_format="RGB",
keep_aspect_ratio_mode=None):
"""Preprocess image.
TODO: Ideally should be using augmentation module
Args:
image (HWC): input image
input_shape (tuple): (height, width) of the image
keep_aspect_ratio_mode (str): determines how the image is
resized. Choices include [`adjust_network_input`,
`pad_image_input`, None]
normalization_scale (list): normalization scale used in training
normalization_offset (list): normalization offset used in training
input_color_format (str): color format of input
desired_color_format (str): color format to convert to
Returns:
preprocessed_image (np.ndarray): procesed image
preprocess_params (dict): contains the params used for pre-processing
"""
image = orig_image.copy()
image, scale, offset = resize_image(
image, input_shape, keep_aspect_ratio_mode)
# Convert to desired color format
# NOTE: currently supports only BGR as input
image = convert_color_format(
image,
input_color_format,
desired_color_format)
# Normalize image
preprocessed_image = normalize_image(
image, normalization_scale, normalization_offset)
# preprocessed_image = image
preprocess_params = {
'scale': scale,
'offset': offset
}
return preprocessed_image, preprocess_params
def pad_bottom_right(image, stride, pad_value):
"""Pad image on the bottom right side.
Args:
image (HWC): input image
stride (int): stride size of the model
pad_value (tuple): pixel value to use for padded regions
Returns:
img_padded (np.ndarray): procesed image
pad (dict): contains the padding values
"""
h = image.shape[0]
w = image.shape[1]
# Pad ordering: [top, left, bottom, right]
pad = 4 * [None]
pad[0] = 0
pad[1] = 0
pad[2] = 0 if (h % stride == 0) else stride - (h % stride)
pad[3] = 0 if (w % stride == 0) else stride - (w % stride)
img_padded = cv2.copyMakeBorder(image, pad[0], pad[2], pad[1], pad[3],
cv2.BORDER_CONSTANT, value=pad_value)
return img_padded, pad
def get_gaussian_kernel(kernlen, num_channels, sigma=3, dtype=np.float32):
"""Get gaussian kernel to use as weights.
Args:
kernlen (HWC): kernel size to use
num_channels (int): number of channels to filter
dtype (dtype): data type to use for the gaussian kernel
sigma (float): sigma value to use for the gaussian kernel
Returns:
out_filter (np.ndarray): gaussian kernel of shape
(kernlen, kernlen, num_channels, 1)
"""
interval = (2 * sigma + 1.) / (kernlen)
x = np.linspace(-sigma - interval / 2., sigma + interval / 2., kernlen + 1)
kern1d = np.diff(st.norm.cdf(x))
kernel_raw = np.sqrt(np.outer(kern1d, kern1d))
kernel = kernel_raw / kernel_raw.sum()
out_filter = np.array(kernel, dtype=dtype)
out_filter = out_filter.reshape((kernlen, kernlen, 1, 1))
out_filter = np.repeat(out_filter, num_channels, axis=2)
return out_filter
def apply_gaussian_smoothing(input_array, kernel_size=5, sigma=3, backend="cv"):
"""Apply gaussian smoothing.
Args:
input_array (np.ndarray): input array to apply gaussian
smoothing with shape (H, W, C)
kernel_size (int): kernel size to use
sigma (float): sigma value to use for the gaussian kernel
Returns:
output_array (np.ndarray): output after gaussian smoothing
with same shape as `input_array`
"""
output_array = np.zeros(shape=input_array.shape)
num_channels = input_array.shape[-1]
if backend == "cv":
for channel_idx in range(0, num_channels):
output_array[:, :, channel_idx] = cv2.GaussianBlur(
input_array[:, :, channel_idx],
ksize=(kernel_size, kernel_size),
sigmaX=sigma,
sigmaY=sigma
)
elif backend == "scipy":
for channel_idx in range(0, num_channels):
output_array[:, :, channel_idx] = gaussian_filter(
input_array[:, :, channel_idx],
sigma=sigma
)
else:
raise ValueError("Unsupported backend for gaussian smoothing.")
return output_array
def nms_np(input_array, threshold=0.0):
"""Apply non-max suppression.
Args:
input_array (np.ndarray): Input array to apply nms
with shape (H, W, C)
threshold (float): Optional thhreshold to suppress
values below a threshold.
Returns:
output_array (np.ndarray): output after nms
with same shape as `input_array` and retaining
only the peaks.
"""
shift_val = 1
output_array = np.zeros(shape=input_array.shape)
num_channels = input_array.shape[-1]
zeros_arr = np.zeros(input_array.shape[:-1])
for channel_idx in range(0, num_channels):
center_arr = input_array[:, :, channel_idx]
# init shifted array placeholders with zeros
shift_left, shift_right, shift_up, shift_down = np.tile(zeros_arr, (4, 1, 1))
# shift input down by shift value
shift_down[shift_val:, :] = center_arr[:-shift_val, :]
# shift input up by shift value
shift_up[:-shift_val, :] = center_arr[shift_val:, :]
# shift input to the right by shift value
shift_right[:, shift_val:] = center_arr[:, :-shift_val]
# shift input to the left by shift value
shift_left[:, :-shift_val] = center_arr[:, shift_val:]
# Check where pixels the center values are max in the given
# local window size
peaks_binary = np.logical_and.reduce(
(center_arr >= shift_left,
center_arr >= shift_right,
center_arr >= shift_up,
center_arr >= shift_down,
center_arr > threshold))
# Copy over the only the peaks to output array, rest are suppressed.
output_array[:, :, channel_idx] = peaks_binary * center_arr
return output_array
class Visualizer(object):
"""Visualizer class definitions."""
# TODO: Move this to separate visualizer module
def __init__(self, topology):
"""Init.
Args:
topology (np.ndarray): N x 4 array where N is the number of
connections, and the columns are (start_paf_idx, end_paf_idx,
start_conn_idx, end_conn_idx)
"""
self.topology = topology
def keypoints_viz(self, image, keypoints):
"""Function to visualize the given keypoints.
Args:
image (np.ndarray): Input image
keypoints (list): List of lists containing keypoints per skeleton
Returns:
image (np.ndarray): image with result overlay
"""
topology = self.topology
peak_color = (255, 150, 0)
edge_color = (254, 0, 190)
stick_width = 2
for i in range(topology.shape[0]):
start_idx = topology[i][2]
end_idx = topology[i][3]
for n in range(len(keypoints)):
start_joint = keypoints[n][start_idx]
end_joint = keypoints[n][end_idx]
if 0 in start_joint or 0 in end_joint:
continue
cv2.circle(
image, (int(
start_joint[0]), int(
start_joint[1])), 4, peak_color, thickness=-1)
cv2.circle(
image, (int(
end_joint[0]), int(
end_joint[1])), 4, peak_color, thickness=-1)
cv2.line(
image, (int(
start_joint[0]), int(
start_joint[1])), (int(
end_joint[0]), int(
end_joint[1])), edge_color, thickness=stick_width)
return image
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/bpnet/inferencer/utils.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""IVA DetectNet V2 root module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DetectNet V2 model visualization routines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/visualization/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DetectNet V2 visualization utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import tensorflow as tf
import nvidia_tao_tf1.core as tao_core
from nvidia_tao_tf1.cv.common.visualizer.base_visualizer import Descriptor
from nvidia_tao_tf1.cv.common.visualizer.tensorboard_visualizer import TensorBoardVisualizer
from nvidia_tao_tf1.cv.detectnet_v2.rasterizers.bbox_rasterizer import BboxRasterizer
logger = logging.getLogger(__name__)
class TargetClassConfig(object):
"""Target class config."""
def __init__(self, coverage_threshold):
"""Constructor."""
self.coverage_threshold = coverage_threshold
class DetectNetTBVisualizer(TensorBoardVisualizer):
"""Visualizer implemented as a static class."""
target_class_config = Descriptor("_target_class_config")
@classmethod
def build(cls, coverage_thresholds, enabled, num_images):
"""Build the Visualizer.
Arguments:
enabled (bool): Boolean to enabled visualizer.
num_images (int): Number of images to be rendered.
"""
# Create / set the properties.
cls._target_class_config = {
class_name: TargetClassConfig(coverage_threshold)
for class_name, coverage_threshold in coverage_thresholds.items()
}
super().build(enabled,
num_images=num_images)
@classmethod
def build_from_config(cls, visualizer_config):
"""Build visualizer from config.
Arguments:
visualizer_config (visualizer_config_pb2.VisualizerConfig).
"""
coverage_thresholds = {
class_name: visualizer_config.target_class_config[class_name].coverage_threshold
for class_name in visualizer_config.target_class_config
}
cls.build(
coverage_thresholds, visualizer_config.enabled,
visualizer_config.num_images
)
@classmethod
def visualize_elliptical_bboxes(cls, target_class_names, input_images, coverage,
abs_bboxes):
"""Visualize bboxes as ellipses on tensorboard.
Args:
target_class_names: List of target class names.
input_images: Input images to visualize.
coverage: Coverage predictions, shape
[batch_size, num_classes, 1, grid_height, grid_width].
abs_bboxes: Bounding box predictions in absolute coordinates, shape
[batch_size, num_classes, 4, grid_height, grid_width].
"""
# Compute the number of images to visualize as the minimum of the user
# parameter and the actual minibatch size.
batch_size = min(cls.num_images, input_images.shape[0])
# Initially we have one bbox per grid cell.
num_bboxes = [tf.cast(abs_bboxes.shape[3] * abs_bboxes.shape[4], tf.int32)]
# Get visualization image size.
image_height = tf.cast(input_images.shape[2], tf.int32)
image_width = tf.cast(input_images.shape[3], tf.int32)
# Constants.
deadzone_radius = 1.0
draw_mode = tao_core.processors.BboxRasterizer.DRAW_MODE_ELLIPSE
# Loop over each image, and add predicted bboxes for each class to lists, sorted
# by ascending coverage value.
bboxes_per_image = []
bbox_class_ids = []
bbox_matrices = []
bbox_gradients = []
bbox_coverage_radii = []
bbox_flags = []
for n in range(batch_size):
bboxes_per_class = 0
for target_class_index, target_class_name in enumerate(target_class_names):
# Extract input arrays and flatten.
coverages = tf.reshape(coverage[n, target_class_index], num_bboxes)
xmin = tf.reshape(abs_bboxes[n, target_class_index, 0], num_bboxes)
ymin = tf.reshape(abs_bboxes[n, target_class_index, 1], num_bboxes)
xmax = tf.reshape(abs_bboxes[n, target_class_index, 2], num_bboxes)
ymax = tf.reshape(abs_bboxes[n, target_class_index, 3], num_bboxes)
zero = tf.zeros(shape=num_bboxes)
one = tf.zeros(shape=num_bboxes)
# Bbox color comes from its coverage value.
gradients = tf.transpose([[zero, zero, coverages]], (2, 0, 1))
# Compute bbox matrices based on bbox coordinates.
# Use constants for bbox params.
matrices, coverage_radii, _ =\
BboxRasterizer.bbox_from_rumpy_params(xmin=xmin, ymin=ymin,
xmax=xmax, ymax=ymax,
cov_radius_x=tf.fill(num_bboxes, 1.0),
cov_radius_y=tf.fill(num_bboxes, 1.0),
bbox_min_radius=tf.fill(num_bboxes, 0.0),
cov_center_x=tf.fill(num_bboxes, 0.5),
cov_center_y=tf.fill(num_bboxes, 0.5),
deadzone_radius=deadzone_radius)
flags = tf.fill(num_bboxes, tf.cast(draw_mode, tf.uint8))
# Filter out bboxes with min > max.
xdiff_mask = tf.cast(xmax > xmin, tf.float32)
ydiff_mask = tf.cast(ymax > ymin, tf.float32)
coverages *= xdiff_mask * ydiff_mask
# Sort bboxes by ascending coverage.
sort_value = -coverages
_, sorted_indices = tf.nn.top_k(input=sort_value, k=num_bboxes[0])
# Cut down work by throwing away bboxes with too small coverage.
coverage_threshold = 0.
if target_class_name in cls.target_class_config:
coverage_threshold =\
cls.target_class_config[target_class_name].coverage_threshold
half = tf.cast(tf.reduce_sum(tf.where(tf.less(coverages, coverage_threshold),
one, zero)), tf.int32)
sorted_indices = sorted_indices[half:]
# Rearrange data arrays into sorted order, and append to lists.
bboxes_per_class += tf.size(sorted_indices)
bbox_class_ids.append(tf.fill(num_bboxes, target_class_index))
bbox_matrices.append(tf.gather(matrices, sorted_indices))
bbox_gradients.append(tf.gather(gradients, sorted_indices))
bbox_coverage_radii.append(tf.gather(coverage_radii, sorted_indices))
bbox_flags.append(tf.gather(flags, sorted_indices))
bboxes_per_image += [bboxes_per_class]
# Rasterize everything in one go.
gradient_flags = [tao_core.processors.BboxRasterizer.GRADIENT_MODE_MULTIPLY_BY_COVERAGE]
rasterizer = tao_core.processors.BboxRasterizer()
images = rasterizer(num_images=batch_size,
num_classes=len(target_class_names),
num_gradients=1,
image_height=image_height,
image_width=image_width,
bboxes_per_image=bboxes_per_image,
bbox_class_ids=tf.concat(bbox_class_ids, axis=0),
bbox_matrices=tf.concat(bbox_matrices, axis=0),
bbox_gradients=tf.concat(bbox_gradients, axis=0),
bbox_coverage_radii=tf.concat(bbox_coverage_radii, axis=0),
bbox_flags=tf.concat(bbox_flags, axis=0),
gradient_flags=gradient_flags)
# Add target classes dimension and tile it as many times as there are classes.
inputs_tiled = tf.tile(tf.stack([input_images], axis=1),
[1, len(target_class_names), 1, 1, 1])
# Show image through at spots where we have a strong prediction.
images += inputs_tiled * images
# Add images to Tensorboard.
for target_class_index, target_class_name in enumerate(target_class_names):
cls.image(
'%s_preds' % target_class_name,
images[:, target_class_index],
collections=[tao_core.hooks.utils.INFREQUENT_SUMMARY_KEY])
@classmethod
def _draw_bboxes(cls, input_images, coverage, abs_bboxes, coverage_threshold=0.005):
"""
Visualize bbox rectangles.
Args:
input_images: Tensor holding the input images (NCHW).
coverage: Coverage predictions, shape [batch_size, 1, grid_height, grid_width].
abs_bboxes: Bounding box predictions in absolute coordinates, shape
[batch_size, 4, grid_height, grid_width].
coverage_threshold (float32): Threshold value for coverage values to visualize.
Returns:
Images with drawn bounding boxes in NWHC format.
"""
# Reshape the bbox predictions into [batch_size, num_bboxes, 4].
batch_size = tf.cast(abs_bboxes.shape[0], tf.int32)
num_bboxes = tf.cast(abs_bboxes.shape[2] * abs_bboxes.shape[3], tf.int32)
bboxes = tf.transpose(tf.reshape(abs_bboxes, [batch_size, 4, num_bboxes]), [0, 2, 1])
# Normalize bboxes to [0,1] range.
height = tf.cast(input_images.shape[2], tf.float32)
width = tf.cast(input_images.shape[3], tf.float32)
xmin = bboxes[:, :, 0] / width
ymin = bboxes[:, :, 1] / height
xmax = bboxes[:, :, 2] / width
ymax = bboxes[:, :, 3] / height
# Convert to [y_min, x_min, y_max, x_max] order. Bboxes tensor shape is
# [batch_size, num_bboxes, 4].
bboxes = tf.stack([ymin, xmin, ymax, xmax], 2)
# Mask out bboxes with coverage below a threshold or min > max.
coverage = tf.reshape(coverage, [batch_size, num_bboxes, 1])
coverage_mask = tf.cast(coverage > coverage_threshold, tf.float32)
xdiff_mask = tf.reshape(tf.cast(xmax > xmin, tf.float32), [batch_size, num_bboxes, 1])
ydiff_mask = tf.reshape(tf.cast(ymax > ymin, tf.float32), [batch_size, num_bboxes, 1])
mask = coverage_mask * xdiff_mask * ydiff_mask
bboxes = tf.multiply(bboxes, mask)
# Convert input image to NHWC.
input_images = tf.transpose(input_images, [0, 2, 3, 1])
# Draw bboxes.
output_images = tf.image.draw_bounding_boxes(input_images, bboxes)
return output_images
@classmethod
def visualize_rectangular_bboxes(cls, target_class_names, input_images, coverage,
abs_bboxes):
"""Visualize bboxes as rectangles on tensorboard.
Args:
target_class_names: List of target class names.
input_images: Input images to visualize.
coverage: Coverage predictions, shape [batch_size, 1, grid_height, grid_width].
abs_bboxes: Bounding box predictions in absolute coordinates, shape
[batch_size, 4, grid_height, grid_width].
"""
# Loop over each target class and call visualization of the bounding boxes.
for target_class_index, target_class_name in enumerate(target_class_names):
# Look up coverage threshold for this class.
coverage_threshold = 0.
if target_class_name in cls.target_class_config:
coverage_threshold =\
cls.target_class_config[target_class_name].coverage_threshold
output_images = cls._draw_bboxes(input_images,
coverage[:, target_class_index],
abs_bboxes[:, target_class_index],
coverage_threshold)
cls.image(
'%s_rectangle_bbox_preds' % target_class_name,
output_images,
data_format='channels_last',
collections=[tao_core.hooks.utils.INFREQUENT_SUMMARY_KEY]
)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/visualization/visualizer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test visualizations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import pytest
import tensorflow as tf
import nvidia_tao_tf1.core
from nvidia_tao_tf1.cv.detectnet_v2.proto.visualizer_config_pb2 import VisualizerConfig
from nvidia_tao_tf1.cv.detectnet_v2.visualization.visualizer import \
DetectNetTBVisualizer as Visualizer
def test_build_visualizer():
"""Test visualizer config parsing."""
config = VisualizerConfig()
# Default values should pass.
Visualizer.build_from_config(config)
config.enabled = True
config.num_images = 3
config.target_class_config['car'].coverage_threshold = 1.0
Visualizer.build_from_config(config)
assert Visualizer.enabled is True
assert Visualizer.num_images == 3
assert Visualizer.target_class_config['car'].coverage_threshold == 1.0
def test_draw_bboxes():
"""Test superposition of bounding boxes and input images."""
batch_size = 1
width = 6
height = 6
grid_width = 2
grid_height = 2
channels = 3
# Get zero images as the input.
input_images = np.zeros([batch_size, channels, width, height], dtype=np.float32)
# All coverage 0 except 2.
preds = np.zeros([batch_size, 1, grid_height, grid_width], dtype=np.float32)
preds[0, 0, 0, 1] = 1.
preds[0, 0, 1, 0] = 1.
# Absolute coordinates.
bbox_preds = np.zeros([batch_size, 4, grid_height, grid_width], dtype=np.float32)
# A large bbox in the bottom left (cov 1).
bbox_preds[0, :, 1, 0] = [1, 1, 6, 6] # These indices start from 1 on the image plane.
# A bbox on the top right (coverage 1).
bbox_preds[0, :, 0, 1] = [1, 3, 3, 5]
# Note that this bounding box has 0 coverage.
bbox_preds[0, :, 1, 1] = [2, 2, 4, 4]
output_images = Visualizer._draw_bboxes(input_images, preds, bbox_preds,
coverage_threshold=0.5)
with tf.Session().as_default():
bbox_images = output_images.eval()
# Check the function returns [NHWC] format.
assert bbox_images.shape == (batch_size, height, width, channels)
# Collapse the bounding box raster on single channel.
# We do not know what color the tf bounding box drawing will assign to them, so we will
# only check the binary values.
bbox_black_black_white = np.int32(np.sum(bbox_images, 3) > 0)
# 1 large bounding box between pixels 1-6 and a small one framed at pixels (1,3,3,5) expected
# bounding box (2,2,4,4) drops due to coverage threshold.
bounding_box_expected = np.array([[[1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 1],
[1, 1, 1, 0, 0, 1],
[1, 0, 1, 0, 0, 1],
[1, 1, 1, 0, 0, 1],
[1, 1, 1, 1, 1, 1]]], dtype=np.int32)
np.testing.assert_array_equal(bounding_box_expected, bbox_black_black_white)
@pytest.mark.skipif(os.getenv("RUN_ON_CI", "0") == "1", reason="Cannot be run on CI")
def test_visualize_elliptical_bboxes():
"""Test the visualize_elliptical_bboxes staticmethod."""
target_class_names = ['class_1', 'class_2']
num_classes = len(target_class_names)
H, W = 5, 7
input_image_shape = (2, 3, 2 * H, 2 * W) # NCHW.
input_images = \
tf.constant(np.random.randn(*input_image_shape), dtype=np.float32)
coverage_shape = (2, num_classes, 1, H, W)
coverage = tf.constant(np.random.uniform(low=0.0, high=1.0, size=coverage_shape),
dtype=tf.float32)
abs_bboxes_shape = (2, num_classes, 4, H, W)
abs_bboxes = tf.constant(np.random.uniform(low=-1.0, high=1.0, size=abs_bboxes_shape),
dtype=tf.float32)
# Build the Visualizer.
config = VisualizerConfig()
config.enabled = True
config.num_images = 3
Visualizer.build_from_config(config)
Visualizer.visualize_elliptical_bboxes(
target_class_names=target_class_names,
input_images=input_images,
coverage=coverage,
abs_bboxes=abs_bboxes)
# Check that we added the expected visualizations to Tensorboard.
summaries = tf.get_collection(nvidia_tao_tf1.core.hooks.utils.INFREQUENT_SUMMARY_KEY)
assert len(summaries) == num_classes
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/visualization/tests/test_visualizer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""A launcher script for Dashnet tasks inside a runtime container."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from maglev_sdk.docker_container.entrypoint import main
if __name__ == '__main__':
main('detecnet_v2', 'nvidia_tao_tf1/cv/detectnet_v2/scripts')
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/docker/detectnet_v2.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converts a KITTI detection dataset to TFRecords."""
from __future__ import absolute_import
from __future__ import print_function
from collections import Counter
import json
import logging
import os
import random
import numpy as np
from PIL import Image
from six.moves import range
import tensorflow as tf
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.detectnet_v2.common.dataio.converter_lib import _bytes_feature
from nvidia_tao_tf1.cv.detectnet_v2.common.dataio.converter_lib import _float_feature
from nvidia_tao_tf1.cv.detectnet_v2.common.dataio.converter_lib import _int64_feature
from nvidia_tao_tf1.cv.detectnet_v2.dataio.dataset_converter_lib import DatasetConverter
logger = logging.getLogger(__name__)
class KITTIConverter(DatasetConverter):
"""Converts a KITTI detection dataset to TFRecords."""
def __init__(self, root_directory_path, num_partitions, num_shards,
output_filename,
sample_modifier,
image_dir_name=None,
label_dir_name=None,
kitti_sequence_to_frames_file=None,
point_clouds_dir=None,
calibrations_dir=None,
extension='.png',
partition_mode='sequence',
val_split=None,
use_dali=False,
class2idx=None):
"""Initialize the converter.
Args:
root_directory_path (string): Dataset directory path relative to data root.
num_partitions (int): Number of partitions (folds).
num_shards (int): Number of shards.
output_filename (str): Path for the output file.
sample_modifier(SampleModifier): An instance of sample modifier
that does e.g. duplication and filtering of samples.
image_dir_name (str): Name of the subdirectory containing images.
label_dir_name (str): Name of the subdirectory containing the label files for the
respective images in image_dir_name
kitti_sequence_to_frames_file (str): name of the kitti sequence to frames map file in
root directory path. This file contains a mapping of the sequences to images in
image_dir_name.
point_clouds_dir (str): Path to the point cloud data within root_dirctory_path.
calibrations_dir (str): Path to the calibration data within root_dirctory_path.
extension (str): Extension of the images in the dataset directory.
partition_mode (str): Mode to partitition the dataset. We only support sequence or
random split mode. In the sequence mode, it is mandatory to instantiate the
kitti sequence to frames file. Also, any arbitrary number of partitions maybe
used. However, for random split, the sequence map file is ignored and only 2
partitions can every be used. Here, the data is divided into two folds
1. validation fold
2. training fold
Validation fold (defaults to fold=0) contains val_split% of data, while train
fold contains (100-val_split)% of data.
val_split (int): Percentage split for validation. This is used with the random
partition mode only.
"""
super(KITTIConverter, self).__init__(
root_directory_path=root_directory_path,
num_partitions=num_partitions,
num_shards=num_shards,
output_filename=output_filename,
sample_modifier=sample_modifier)
# KITTI defaults.
self.images_dir = image_dir_name
self.labels_dir = label_dir_name
self.point_clouds_dir = point_clouds_dir
self.calibrations_dir = calibrations_dir
self.extension = extension
self.partition_mode = partition_mode
self.sequence_to_frames_file = kitti_sequence_to_frames_file
self.val_split = val_split / 100.
self.use_dali = use_dali
self.class2idx = class2idx
self.idx2class = None
def _partition(self):
"""Partition KITTI dataset to self.output_partitions partitions based on sequences.
The following code is a modified version of the KITTISplitter class in Rumpy.
Returns:
partitions (list): A list of lists of frame ids, one list per partition.
"""
logger.debug("Generating partitions")
s_logger = status_logging.get_status_logger()
s_logger.write(message="Generating partitions")
partitions = [[] for _ in range(self.output_partitions)]
# Sequence wise parition to n partitions.
if self.partition_mode == 'sequence':
if not self.sequence_to_frames_file:
raise ValueError("Kitti sequence to frames file is required for "
"sequence wise paritioning. Please set this as the relative "
"path to the file from `root_directory_path`")
# Create sequence to frames mapping.
self.sequence_to_frames_map = self._read_sequence_to_frames_file()
if self.output_partitions > 1:
sorted_sequences = sorted(iter(self.sequence_to_frames_map.items()),
key=lambda k_v: (-len(k_v[1]), k_v[0]))
total_frames = 0
for counter, (_, frame_ids) in enumerate(sorted_sequences):
total_frames += len(frame_ids)
partition_idx = counter % self.output_partitions
partitions[partition_idx].extend(frame_ids)
logger.debug("Total number of frames: {}".format(total_frames))
s_logger.kpi = {
"num_images": total_frames
}
s_logger.write(
message=f"Total number of images: {total_frames}"
)
# in Rumpy with 5 folds, the first validation bucket contains the fifth sequence.
# Similarly, the second validation bucket contains samples from the fourth sequence,
# and so on. Thus, the partition order needs to be reversed to match the Rumpy
# validation buckets.
partitions = partitions[::-1]
else:
partitions = [[frame for frames in list(self.sequence_to_frames_map.values())
for frame in frames]]
s_logger.kpi = {
"num_images": len(partitions[0])
}
s_logger.write(
message=f"Total number of images: {len(partitions[0])}"
)
# Paritioning data in random to train and val split.
elif self.partition_mode == 'random':
assert self.output_partitions == 2, "Invalid number of partitions ({}) "\
"for random split mode.".format(self.output_partitions)
assert 0 <= self.val_split < 1, (
"Validation split must satisfy the criteria, 0 <= val_split < 100. "
)
images_root = os.path.join(self.root_dir, self.images_dir)
images_list = [os.path.splitext(imfile)[0] for imfile in
sorted(os.listdir(images_root)) if
imfile.endswith(self.extension)]
total_num_images = len(images_list)
num_val_images = (int)(self.val_split * total_num_images)
logger.debug("Validation percentage: {}".format(self.val_split))
partitions[0].extend(images_list[:num_val_images])
partitions[1].extend(images_list[num_val_images:])
for part in partitions:
random.shuffle(part)
logger.info("Num images in\nTrain: {}\tVal: {}".format(len(partitions[1]),
len(partitions[0])))
s_logger.kpi = {
"num_images": total_num_images
}
s_logger.write(
message="Num images in\nTrain: {}\tVal: {}".format(
len(partitions[1]),
len(partitions[0])
)
)
if self.val_split == 0:
logger.info("Skipped validation data...")
s_logger.write(message="Skipped validation data.")
else:
validation_note = (
"Validation data in partition 0. Hence, while choosing the validation"
"set during training choose validation_fold 0."
)
logger.info(validation_note)
s_logger.write(message=validation_note)
else:
raise NotImplementedError("Unknown partition mode. Please stick to either "
"random or sequence")
return partitions
def _create_example_proto(self, frame_id):
"""Generate the example proto for this frame.
Args:
frame_id (string): The frame id.
Returns:
example (tf.train.Example): An Example containing all labels for the frame.
"""
# Create proto for the training example. Populate with frame attributes.
example = self._example_proto(frame_id)
if self.use_dali:
width, height = self._get_image_size(frame_id)
self._add_image(example, frame_id)
self._add_targets(example, frame_id, width, height)
# Add labels.
else:
self._add_targets(example, frame_id)
self._add_point_cloud(example, frame_id)
self._add_calibrations(example, frame_id)
return example
def _add_image(self, example, frame_id):
"""Add encoded image to example."""
image_file = os.path.join(self.root_dir, self.images_dir, frame_id + self.extension)
image_string = open(image_file, "rb").read()
f = example.features.feature
f['frame/encoded'].MergeFrom(_bytes_feature(image_string))
def _add_point_cloud(self, example, frame_id):
"""Add path to the point cloud file in the Example protobuf."""
if self.point_clouds_dir is not None:
frame_id = os.path.join(self.point_clouds_dir, frame_id)
f = example.features.feature
f['point_cloud/id'].MergeFrom(_bytes_feature(frame_id.encode('utf-8')))
f['point_cloud/num_input_channels'].MergeFrom(_int64_feature(4))
def _add_calibrations(self, example, frame_id):
"""Add calibration matrices in the Example protobuf."""
if self.calibrations_dir is not None:
calibration_file = os.path.join(self.root_dir,
self.calibrations_dir, '{}.txt'.format(frame_id))
self._add_calibration_matrices(example, calibration_file)
def _read_sequence_to_frames_file(self):
with open(os.path.join(self.root_dir, self.sequence_to_frames_file), 'r') as f:
sequence_to_frames_map = json.load(f)
return sequence_to_frames_map
def _get_image_size(self, frame_id):
"""Read image size from the image file, image sizes vary in KITTI."""
image_file = os.path.join(self.root_dir, self.images_dir, frame_id + self.extension)
width, height = Image.open(image_file).size
return width, height
def _example_proto(self, frame_id):
"""Generate a base Example protobuf to which KITTI-specific features are added."""
width, height = self._get_image_size(frame_id)
# Add the image directory name to the frame id so that images and
# point clouds can be easily stored in separate folders.
frame_id = os.path.join(self.images_dir, frame_id)
example = tf.train.Example(features=tf.train.Features(feature={
'frame/id': _bytes_feature(frame_id.encode('utf-8')),
'frame/height': _int64_feature(height),
'frame/width': _int64_feature(width),
}))
return example
def _add_targets(self, example, frame_id, width=None, height=None):
"""Add KITTI target features such as bbox to the Example protobuf.
Reads labels from KITTI txt files with following fields:
(From Kitti devkit's README)
1 type Describes the type of object: 'Car', 'Van',
'Truck', 'Pedestrian', 'Person_sitting', 'Cyclist',
'Tram', 'Misc' or 'DontCare'
1 truncated Float from 0 (non-truncated) to 1 (truncated),
where truncated refers to the object leaving image
boundaries
1 occluded Integer (0,1,2,3) indicating occlusion state:
0 = fully visible, 1 = partly occluded
2 = largely occluded, 3 = unknown
1 alpha Observation angle of object, ranging [-pi..pi]
4 bbox 2D bounding box of object in the image (0-based
index): contains left, top, right, bottom pixel
coordinates
3 dimensions 3D object dimensions: height, width, length (in
meters)
3 location 3D object location x,y,z in camera coordinates (in
meters)
1 rotation_y Rotation ry around Y-axis in camera coordinates
[-pi..pi]
Args:
example (tf.train.Example): The Example protobuf for this frame.
frame_id (string): Frame id.
"""
object_classes = []
truncation = []
occlusion = []
observation_angle = []
coordinates_x1 = []
coordinates_y1 = []
coordinates_x2 = []
coordinates_y2 = []
world_bbox_h = []
world_bbox_w = []
world_bbox_l = []
world_bbox_x = []
world_bbox_y = []
world_bbox_z = []
world_bbox_rot_y = []
object_class_ids = []
# reads the labels as a list of tuples
label_file = os.path.join(self.root_dir, self.labels_dir, '{}.txt'.format(frame_id))
# np.genfromtxt will fail if the class name is integer literal: '1', etc
with open(label_file) as lf:
labels = lf.readlines()
labels = [l.strip() for l in labels if l.strip()]
labels = [l.split() for l in labels]
# Following steps require the class names to be bytes
labels = [[l[0].encode("utf-8")] + [float(x) for x in l[1:]] for l in labels]
for label in labels:
assert len(label) == 15, 'Ground truth kitti labels should have only 15 fields.'
x1 = int(label[4])
y1 = int(label[5])
x2 = int(label[6])
y2 = int(label[7])
# Check to make sure the coordinates are 'ltrb' format.
error_string = "Top left coordinate must be less than bottom right."\
"Error in object {} of label_file {}. \nCoordinates: "\
"x1 = {}, x2 = {}, y1: {}, y2: {}".format(labels.index(label),
label_file,
x1, x2, y1, y2)
if not (x1 < x2 and y1 < y2):
logger.debug(error_string)
logger.debug("Skipping this object")
# @scha: KITTI does not have annotation id
self.log_warning[f"{label_file}_{labels.index(label)}"] = [x1, y1, x2, y2]
continue
# Map object classes as they are in the dataset to target classes of the model
self.class_map[label[0]] = label[0].lower()
object_class = label[0].lower()
if self.use_dali:
if (object_class.decode() not in self.class2idx):
logger.debug("Skipping the class {} in dataset".format(object_class))
continue
object_classes.append(object_class)
truncation.append(label[1])
occlusion.append(int(label[2]))
observation_angle.append(label[3])
if self.use_dali:
# @tylerz: DALI requires relative coordinates and integer
coordinates_x1.append(float(label[4]) / width)
coordinates_y1.append(float(label[5]) / height)
coordinates_x2.append(float(label[6]) / width)
coordinates_y2.append(float(label[7]) / height)
object_class_id = self.class2idx[object_class.decode()]
object_class_ids.append(object_class_id)
else:
coordinates_x1.append(label[4])
coordinates_y1.append(label[5])
coordinates_x2.append(label[6])
coordinates_y2.append(label[7])
world_bbox_h.append(label[8])
world_bbox_w.append(label[9])
world_bbox_l.append(label[10])
world_bbox_x.append(label[11])
world_bbox_y.append(label[12])
world_bbox_z.append(label[13])
world_bbox_rot_y.append(label[14])
f = example.features.feature
if self.use_dali:
f['target/object_class_id'].MergeFrom(_float_feature(*object_class_ids))
else:
f['target/object_class'].MergeFrom(_bytes_feature(*object_classes))
f['target/truncation'].MergeFrom(_float_feature(*truncation))
f['target/occlusion'].MergeFrom(_int64_feature(*occlusion))
f['target/observation_angle'].MergeFrom(_float_feature(*observation_angle))
f['target/coordinates_x1'].MergeFrom(_float_feature(*coordinates_x1))
f['target/coordinates_y1'].MergeFrom(_float_feature(*coordinates_y1))
f['target/coordinates_x2'].MergeFrom(_float_feature(*coordinates_x2))
f['target/coordinates_y2'].MergeFrom(_float_feature(*coordinates_y2))
f['target/world_bbox_h'].MergeFrom(_float_feature(*world_bbox_h))
f['target/world_bbox_w'].MergeFrom(_float_feature(*world_bbox_w))
f['target/world_bbox_l'].MergeFrom(_float_feature(*world_bbox_l))
f['target/world_bbox_x'].MergeFrom(_float_feature(*world_bbox_x))
f['target/world_bbox_y'].MergeFrom(_float_feature(*world_bbox_y))
f['target/world_bbox_z'].MergeFrom(_float_feature(*world_bbox_z))
f['target/world_bbox_rot_y'].MergeFrom(_float_feature(*world_bbox_rot_y))
def _add_calibration_matrices(self, example, filename):
"""Add KITTI calibration matrices to the Example protobuf.
Adds the following matrices to the Example protobuf:
- 4x4 transformation matrix from Lidar coordinates to camera coordinates.
- 3x4 projection matrix from Lidar coordinates to image plane.
Args:
example: Protobuf to which the matrices are added.
filename: Absolute path to the calibration file.
"""
# KITTI calibration file has the following format (each matrix is given on a separate
# line in the file in the following order):
# P0: 3x4 projection matrix after rectification for camera 0 (12 floats)
# P1: 3x4 projection matrix after rectification for camera 1 (12 floats)
# P2: 3x4 projection matrix after rectification for camera 2 (12 floats)
# P3: 3x4 projection matrix after rectification for camera 3 (12 floats)
# R0_rect: 3x3 rectifying rotation matrix (9 floats)
# Tr_velo_to_cam: 3x4 transformation matrix from Lidar to reference camera (12 floats)
# Tr_imu_to_velo: 3x4 transformation matrix from GPS/IMU to Lidar (12 floats)
if os.path.isfile(filename):
# Camera projection matrix after rectification. Projects a 3D point X = (x, y, z, 1)^T
# in rectified (rotated) camera coordinates to a point Y = (u, v, 1)^T in the camera
# image with Y = P2*X. P2 corresponds to the left color image camera.
P2 = np.genfromtxt(filename, dtype=np.float32, skip_header=2,
skip_footer=4, usecols=tuple(range(1, 13)))
# Rectifying rotation matrix
R0_rect = np.genfromtxt(filename, dtype=np.float32, skip_header=4,
skip_footer=2, usecols=tuple(range(1, 10)))
# Rigid body transformation matrix from Lidar coordinates to camera coordinates
Tr_velo_to_cam = np.genfromtxt(filename, dtype=np.float32, skip_header=5,
skip_footer=1, usecols=tuple(range(1, 13)))
else:
raise IOError("Calibration file %s not found." % filename)
P2 = P2.reshape((3, 4))
# Expand R0_rect by appending 4th row and column of zeros, and setting R0[3, 3] = 1
R0_rect = R0_rect.reshape((3, 3))
R0 = np.eye(4)
R0[:3, :3] = R0_rect
# Expand Tr_velo_to_cam by appending 4th row of zeros, and setting Tr[3, 3] = 1
Tr_velo_to_cam = Tr_velo_to_cam.reshape((3, 4))
Tr = np.eye(4)
Tr[:3, :4] = Tr_velo_to_cam
# Transformation matrix T_lidar_to_camera = R0*Tr_velo_to_cam from Lidar coordinates
# (x, y, z, 1)^T to reference camera coordinates (u, v, w, q)^T
T_lidar_to_camera = np.dot(R0, Tr)
# Projection matrix P_lidar_to_image = P2*T_lidar_to_camera from Lidar coordinates
# (x, y, z, 1)^T to image coordinates (u, v, w)^T
P_lidar_to_image = np.dot(P2, T_lidar_to_camera)
f = example.features.feature
f['calibration/T_lidar_to_camera'].MergeFrom(_float_feature(*T_lidar_to_camera.flatten()))
f['calibration/P_lidar_to_image'].MergeFrom(_float_feature(*P_lidar_to_image.flatten()))
def _count_targets(self, example):
"""Count the target objects in the given example protobuf.
Args:
example (tf.train.Example): Example protobuf containing the labels for a frame.
Returns:
object_count (Counter): Number of objects per target class.
"""
target_classes = example.features.feature['target/object_class'].bytes_list.value
if len(target_classes) == 0:
target_classes_id = example.features.feature['target/object_class_id'].float_list.value
if len(target_classes_id) != 0:
if self.idx2class is None:
self.idx2class = {self.class2idx[k] : k for k in self.class2idx}
target_classes = []
for idx in target_classes_id:
target_classes.append(self.idx2class[idx].encode("ascii"))
object_count = Counter(target_classes)
return object_count
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/dataio/kitti_converter_lib.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modify samples before writing them to .tfrecords."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import Counter
from six.moves import range
class SampleModifier(object):
"""Modify samples to be exported to .tfrecords.
Currently, sample duplication and filtering are implemented.
"""
def __init__(self, filter_samples_containing_only, dominant_target_classes,
minimum_target_class_imbalance, num_duplicates, max_training_samples,
source_to_target_class_mapping, validation_fold, num_folds=None):
"""Initialize a SampleModifier.
Args:
filter_samples_containing_only (list): List of strings indicating such target classes
that will be filtered if the sample contains only that class.
dominant_target_classes (list): List of strings indicating the dominant target classes:
Target classes to be considered as dominant when determining whether to duplicate a
sample.
minimum_target_class_imbalance (dict): Target class - float pairs indicating the
minimum imbalance determining when to duplicate. Basically if the class imbalance
within the frame is larger than this, duplicate. E.g. if
#bicycles / #dominant class objects > minimum_target_class_imbalance[bicycle],
duplicate. Default value for a class is 1.0 if not given.
num_duplicates (int): Number of duplicate samples to be added when the duplication
condition above is fulfilled.
max_training_samples (int): Maximum number of training samples. The number of training
samples is capped to this number, i.e. any samples beyond it are filtered out.
source_to_target_class_mapping (dict): Mapping from label/source classes to
target classes.
validation_fold (int): Validation fold number (0-based).
num_folds (int): The total number of folds.
"""
self.filter_samples_containing_only = filter_samples_containing_only
self.dominant_target_classes = dominant_target_classes
self.minimum_target_class_imbalance = minimum_target_class_imbalance
self.num_duplicates = num_duplicates
self.source_to_target_class_mapping = source_to_target_class_mapping
self.validation_fold = validation_fold
self.filter_samples_containing_only = [bytes(f_tmp, 'utf-8') for f_tmp in
self.filter_samples_containing_only]
self.dominant_target_classes = [bytes(f_tmp, 'utf-8') for
f_tmp in self.dominant_target_classes]
# Check that these two parameters have been defined in the mapping.
assert set(self.dominant_target_classes) <= \
set(self.source_to_target_class_mapping.values())
assert set(self.filter_samples_containing_only) <= \
set(self.source_to_target_class_mapping.values())
if max_training_samples > 0:
if num_folds is None:
raise ValueError(("Number of folds must be specified if max_training_samples>0"))
self.sample_counts = [0] * num_folds
if validation_fold is not None:
self.max_samples_per_training_fold = max_training_samples // (num_folds - 1)
else:
self.max_samples_per_training_fold = max_training_samples // num_folds
else:
self.max_samples_per_training_fold = 0
def _is_in_training_set(self, fold):
"""Return True if the provided fold number is in the training set, otherwise False."""
in_training_set = True
if self.validation_fold is not None:
if fold == self.validation_fold:
in_training_set = False
return in_training_set
def modify_sample(self, example, fold):
"""Modify a sample if it belongs to the training set.
If the validation set is not defined, then no changes are made to the sample.
Args:
example: tf.train.Example instance.
fold (int): fold to add sample to.
Return:
examples: List of modified examples.
"""
# Apply modifications only to the training set, i.e., exclude the validation examples.
if self._is_in_training_set(fold):
filtered_example = self._filter_sample(example)
if filtered_example:
examples = self._duplicate_sample(filtered_example)
else:
examples = []
if self.max_samples_per_training_fold > 0:
# Filter examples out if we have reached the max number of samples per fold.
max_to_retain = self.max_samples_per_training_fold - self.sample_counts[fold]
examples = examples[:max_to_retain]
self.sample_counts[fold] += len(examples)
else:
examples = [example]
return examples
def _get_target_classes_in_sample(self, example):
"""Return target classes contained in the given sample.
Args:
example (tf.train.Example): The sample.
Returns:
target_classes_in_sample (list): List of strings
indicating the target class names present in the sample.
"""
source_classes_in_sample = \
example.features.feature['target/object_class'].bytes_list.value
src_mapping = [self.source_to_target_class_mapping.get(
source_classes_in_sample_tmp) for source_classes_in_sample_tmp
in source_classes_in_sample]
target_classes_in_sample = [x for x in src_mapping if x is not None]
return target_classes_in_sample
def _filter_sample(self, example):
"""Filter samples based on the image contents and custom rules.
Args:
example: tf.train.Example.
Return:
filtered_example: None if the sample was filtered, otherwise return example as is.
"""
filtered_example = example
if self.filter_samples_containing_only:
target_classes_in_sample = self._get_target_classes_in_sample(example)
# Check whether the sample contains only objects in a single class.
same_class = (len(set(target_classes_in_sample)) == 1 and
target_classes_in_sample[0] in self.filter_samples_containing_only)
if same_class:
filtered_example = None # Filter the example.
return filtered_example
def _duplicate_sample(self, example):
"""Duplicate samples based on the image contents and custom rules.
Args:
example: tf.train.Example object.
Return:
duplicated_examples: A list of tf.train.Example objects. The list contains multiple
copies of the sample if it is duplicated.
"""
target_classes_in_sample = self._get_target_classes_in_sample(example)
duplicated_examples = [example]
if self.dominant_target_classes and target_classes_in_sample:
# Count number of objects per target class in this sample.
target_class_counts = Counter(target_classes_in_sample)
# Ad-hoc rules for duplicating frames.If the imbalance
# #rare class / #dominant class > minimum_imbalance[rare_class], then
# duplicate.
rare_target_classes = \
[target_class_name for target_class_name in set(target_classes_in_sample)
if target_class_name not in self.dominant_target_classes]
# Check if the minimum imbalance is exceeded for any class in this frame.
minimum_imbalance_exceeded = \
any([target_class_counts[rare_target_class] >
target_class_counts[dominant_target_class] *
self.minimum_target_class_imbalance.get(rare_target_class, 1.0)
for rare_target_class in rare_target_classes
for dominant_target_class in self.dominant_target_classes])
if minimum_imbalance_exceeded:
# Duplicate.
for _ in range(self.num_duplicates):
duplicated_examples.append(example)
return duplicated_examples
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/dataio/sample_modifier.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""Tools to convert datasets into .tfrecords."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/dataio/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Export datasets to .tfrecords files based on dataset export config."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
from nvidia_tao_tf1.cv.detectnet_v2.dataio.build_converter import build_converter
from nvidia_tao_tf1.cv.detectnet_v2.proto.dataset_config_pb2 import DataSource
TEMP_TFRECORDS_DIR = os.path.join(tempfile.gettempdir(), 'temp-tfrecords')
def _clear_temp_dir():
"""If the temporary directory exists, remove its contents. Otherwise, create the directory."""
temp_dir = TEMP_TFRECORDS_DIR
if os.path.exists(temp_dir):
shutil.rmtree(temp_dir)
os.makedirs(temp_dir)
def export_tfrecords(dataset_export_config, validation_fold):
"""A helper function to export .tfrecords files based on a list of dataset export config.
Args:
dataset_export_config: A list of DatasetExportConfig objects.
validation_fold (int): Validation fold number (0-based). Indicates which fold from the
training data to use as validation. Can be None.
Return:
data_sources: A list of DataSource proto objects.
"""
dataset_path = TEMP_TFRECORDS_DIR
_clear_temp_dir()
data_sources = []
for dataset_idx, config in enumerate(dataset_export_config):
temp_filename = str(dataset_idx)
temp_filename = os.path.join(dataset_path, temp_filename)
converter = build_converter(config, temp_filename, validation_fold)
converter.convert()
# Create a DataSource message for this dataset
data_source = DataSource()
data_source.tfrecords_path = temp_filename + '*'
data_source.image_directory_path = config.image_directory_path
data_sources.append(data_source)
return data_sources
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/dataio/export.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build a dataset converter object based on dataset export config."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.detectnet_v2.dataio.build_sample_modifier import build_sample_modifier
from nvidia_tao_tf1.cv.detectnet_v2.dataio.coco_converter_lib import COCOConverter
from nvidia_tao_tf1.cv.detectnet_v2.dataio.kitti_converter_lib import KITTIConverter
logger = logging.getLogger(__name__)
def build_converter(dataset_export_config, output_filename, validation_fold=None):
"""Build a DatasetConverter object.
Build and return an object of desired subclass of DatasetConverter based on
given dataset export configuration.
Args:
dataset_export_config (DatasetExportConfig): Dataset export configuration object
output_filename (string): Path for the output file.
validation_fold (int): Optional. Validation fold number (0-based). Indicates which
partition is the validation fold. If samples are modified, then the modifications
are applied only to the training set, while the validation set remains unchanged.
Return:
converter (DatasetConverter): An object of desired subclass of DatasetConverter.
"""
convert_config_type = dataset_export_config.WhichOneof('convert_config_type')
config = getattr(dataset_export_config, convert_config_type)
if convert_config_type == "kitti_config":
# Fetch the dataset configuration object first
# This can be extended for other data formats such as Pascal VOC,
# or open Images etc. For GA, we will stick to kitti configuration.
config = getattr(dataset_export_config, "kitti_config")
constructor_kwargs = {'root_directory_path': config.root_directory_path,
'partition_mode': config.partition_mode,
'num_partitions': config.num_partitions,
'num_shards': config.num_shards,
'output_filename': output_filename}
# Create a SampleModifier
sample_modifier_config = dataset_export_config.sample_modifier_config
sample_modifier = build_sample_modifier(
sample_modifier_config=sample_modifier_config,
validation_fold=validation_fold,
num_folds=config.num_partitions)
constructor_kwargs['sample_modifier'] = sample_modifier
constructor_kwargs['image_dir_name'] = config.image_dir_name
constructor_kwargs['label_dir_name'] = config.label_dir_name
# Those two directories are by default empty string in proto
# Here we do some check to make them default to None(in constructor)
# Otherwise it will raise error if we pass the empty strings
# directly to constructors.
if config.point_clouds_dir:
constructor_kwargs['point_clouds_dir'] = config.point_clouds_dir
if config.calibrations_dir:
constructor_kwargs['calibrations_dir'] = config.calibrations_dir
constructor_kwargs['extension'] = config.image_extension or '.png'
constructor_kwargs['val_split'] = config.val_split
if config.kitti_sequence_to_frames_file:
constructor_kwargs['kitti_sequence_to_frames_file'] = \
config.kitti_sequence_to_frames_file
logger.info("Instantiating a kitti converter")
status_logging.get_status_logger().write(
message="Instantiating a kitti converter",
status_level=status_logging.Status.STARTED)
converter = KITTIConverter(**constructor_kwargs)
return converter
if convert_config_type == "coco_config":
# Fetch the dataset configuration object first
# This can be extended for other data formats such as Pascal VOC,
# or open Images etc. For GA, we will stick to kitti configuration.
config = getattr(dataset_export_config, "coco_config")
constructor_kwargs = {'root_directory_path': config.root_directory_path,
'num_partitions': config.num_partitions,
'output_filename': output_filename}
constructor_kwargs['annotation_files'] = [p for p in config.annotation_files]
constructor_kwargs['image_dir_names'] = [p for p in config.img_dir_names]
assert len(constructor_kwargs['image_dir_names']) == \
len(constructor_kwargs['annotation_files'])
assert len(constructor_kwargs['image_dir_names']) == \
config.num_partitions
# If only one value is given to num_shards, same num_shards
# will be applied to each partition
if len(config.num_shards) == 1:
constructor_kwargs['num_shards'] = list(config.num_shards) * config.num_partitions
elif len(config.num_shards) == config.num_partitions:
constructor_kwargs['num_shards'] = config.num_shards
else:
raise ValueError("Number of Shards {} do not match the size of number of partitions "
"{}.".format(len(config.num_shards), config.num_partitions))
# Create a SampleModifier
sample_modifier_config = dataset_export_config.sample_modifier_config
sample_modifier = build_sample_modifier(
sample_modifier_config=sample_modifier_config,
validation_fold=validation_fold,
num_folds=config.num_partitions)
constructor_kwargs['sample_modifier'] = sample_modifier
logger.info("Instantiating a coco converter")
status_logging.get_status_logger().write(
message="Instantiating a coco converter",
status_level=status_logging.Status.STARTED)
converter = COCOConverter(**constructor_kwargs)
return converter
raise NotImplementedError("Only supports KITTI or COCO")
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/dataio/build_converter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build sample modifier to write to tfrecords."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from nvidia_tao_tf1.cv.detectnet_v2.dataio.sample_modifier import SampleModifier
def build_sample_modifier(sample_modifier_config, validation_fold, num_folds=None):
"""Build a SampleModifier object.
Args:
sample_modifier_config(SampleModifierConfig): Configuration of sample modifier.
validation_fold (int): Validation fold number (0-based). If samples are modified, then the
modifications are applied only to the training set, while the validation set
remains unchanged.
num_folds (int): The total number of folds.
Return:
sample_modifier(SampleModifier): The created SampleModifier instance.
"""
# Convert unicode strings to python strings for class mapping.
source_to_target_class_mapping = \
{bytes(str(source_class_name), 'utf-8'): bytes(str(target_class_name), 'utf-8')
for source_class_name, target_class_name
in six.iteritems(sample_modifier_config.source_to_target_class_mapping)}
sample_modifier = SampleModifier(
filter_samples_containing_only=sample_modifier_config.filter_samples_containing_only,
dominant_target_classes=sample_modifier_config.dominant_target_classes,
minimum_target_class_imbalance=sample_modifier_config.minimum_target_class_imbalance,
num_duplicates=sample_modifier_config.num_duplicates,
max_training_samples=sample_modifier_config.max_training_samples,
source_to_target_class_mapping=source_to_target_class_mapping,
validation_fold=validation_fold,
num_folds=num_folds)
return sample_modifier
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/dataio/build_sample_modifier.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converts an object detection dataset to TFRecords."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import ABCMeta
from abc import abstractmethod
from collections import Counter
import json
import logging
import os
import random
import six
import tensorflow as tf
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.detectnet_v2.common.dataio.converter_lib import _shard
from nvidia_tao_tf1.cv.detectnet_v2.common.dataio.converter_lib import _shuffle
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.utilities import get_data_root
logger = logging.getLogger(__name__)
class DatasetConverter(six.with_metaclass(ABCMeta, object)):
"""Converts an object detection dataset to TFRecords.
This class needs to be subclassed, and the convert() and
create_example_proto() methods overridden to do the dataset
conversion. Splitting of partitions to shards, shuffling and
writing TFRecords are implemented here, as well as counting
of written targets.
"""
@abstractmethod
def __init__(self, root_directory_path, num_partitions, num_shards,
output_filename, sample_modifier):
"""Initialize the converter.
Args:
root_directory_path (string): Dataset directory path relative to data root.
num_partitions (int): Number of partitions (folds).
num_shards (int): Number of shards.
output_filename (str): Path for the output file.
sample_modifier(SampleModifier): An instance of sample modifier
that does e.g. duplication and filtering of samples.
"""
self.root_dir = os.path.join(get_data_root(), root_directory_path)
self.root_dir = os.path.abspath(self.root_dir)
self.output_partitions = num_partitions
self.output_shards = num_shards
self.output_filename = output_filename
output_dir = os.path.dirname(self.output_filename)
# Make the output directory to write the shard.
if not os.path.exists(output_dir):
logger.info("Creating output directory {}".format(output_dir))
os.makedirs(output_dir)
self.sample_modifier = sample_modifier
self.class_map = {}
self.log_warning = {}
# Set a fixed seed to get a reproducible sequence.
random.seed(42)
def convert(self):
"""Do the dataset conversion."""
# Divide dataset into partitions and shuffle them.
partitions = self._partition()
_shuffle(partitions)
# Shard and write the partitions to tfrecords.
object_count = self._write_partitions(partitions)
# Log how many objects per class got written in total.
logger.info("Cumulative object statistics")
cumulative_count_dict = {
target_class.decode("ascii"): object_count.get(target_class)
for target_class in object_count.keys()
}
s_logger = status_logging.get_status_logger()
s_logger.categorical = {"num_objects": cumulative_count_dict}
s_logger.write(
message="Cumulative object statistics"
)
self._log_object_count(object_count)
# Print out the class map
log_str = "Class map. \nLabel in GT: Label in tfrecords file "
for key, value in six.iteritems(self.class_map):
log_str += "\n{}: {}".format(key, value)
logger.info(log_str)
s_logger.write(message=log_str)
note_string = (
"For the dataset_config in the experiment_spec, "
"please use labels in the tfrecords file, while writing the classmap.\n"
)
print(note_string)
s_logger.write(message=note_string)
logger.info("Tfrecords generation complete.")
s_logger.write(
status_level=status_logging.Status.SUCCESS,
message="TFRecords generation complete."
)
# Save labels with error to a JSON file
self._save_log_warnings()
def _write_partitions(self, partitions):
"""Shard and write partitions into tfrecords.
Args:
partitions (list): A list of list of frame IDs.
Returns:
object_count (Counter): The total number of objects per target class.
"""
# Divide partitions into shards.
sharded_partitions = _shard(partitions, self.output_shards)
# Write .tfrecords to disk for each partition and shard.
# Also count the target objects per partition and over the whole dataset.
object_count = Counter()
for p, partition in enumerate(sharded_partitions):
partition_object_count = Counter()
for s, shard in enumerate(partition):
shard_object_count = self._write_shard(shard, p, s)
partition_object_count += shard_object_count
# Log the count in this partition and increase total
# object count.
self._log_object_count(partition_object_count)
object_count += partition_object_count
return object_count
def _write_shard(self, shard, partition_number, shard_number):
"""Write a single shard into the tfrecords file.
Note that the dataset-specific part is captured in function
create_example_proto() which needs to be overridden for each
specific dataset.
Args:
shard (list): A list of frame IDs for this shard.
partition_number (int): Current partition (fold) index.
shard_number (int): Current shard index.
Returns:
object_count (Counter): The number of written objects per target class.
"""
logger.info('Writing partition {}, shard {}'.format(partition_number, shard_number))
status_logging.get_status_logger().write(
message='Writing partition {}, shard {}'.format(partition_number, shard_number)
)
output = self.output_filename
if self.output_partitions != 0:
output = '{}-fold-{:03d}-of-{:03d}'.format(output, partition_number,
self.output_partitions)
if self.output_shards != 0:
output = '{}-shard-{:05d}-of-{:05d}'.format(output, shard_number, self.output_shards)
object_count = Counter()
# Store all the data for the shard.
writer = tf.python_io.TFRecordWriter(output)
for frame_id in shard:
# Create the Example with all labels for this frame_id.
example = self._create_example_proto(frame_id)
# The example might be skipped e.g. due to missing labels.
if example is not None:
# Apply modifications to the current sample such as filtering and duplication.
# Only samples in the training set are modified.
modified_examples = self.sample_modifier.modify_sample(example, partition_number)
# Write the list of (possibly) modified samples.
frame_object_count = Counter()
for modified_example in modified_examples:
# Serialize the example.
writer.write(modified_example.SerializeToString())
# Count objects that got written per target class.
frame_object_count += self._count_targets(modified_example)
object_count += frame_object_count
writer.close()
return object_count
@abstractmethod
def _partition(self):
"""Return dataset partitions."""
pass
@abstractmethod
def _create_example_proto(self, frame_id):
"""Generate the example for this frame."""
pass
def _save_log_warnings(self):
"""Store out of bound bounding boxes to a json file."""
if self.log_warning:
logger.info("Writing the log_warning.json")
with open(f"{self.output_filename}_warning.json", "w") as f:
json.dump(self.log_warning, f, indent=2)
logger.info("There were errors in the labels. Details are logged at"
" %s_waring.json", self.output_filename)
def _count_targets(self, example):
"""Count the target objects in the given example protobuf.
Args:
example (tf.train.Example): Example protobuf containing the labels for a frame.
Returns:
object_count (Counter): Number of objects per target class.
"""
target_classes = example.features.feature['target/object_class'].bytes_list.value
object_count = Counter(target_classes)
return object_count
def _log_object_count(self, object_counts):
"""Log object counts per target class.
Args:
objects_counts (Counter or dict): Number of objects per target class.
"""
log_str = '\nWrote the following numbers of objects:'
for target_class, object_count in six.iteritems(object_counts):
log_str += "\n{}: {}".format(target_class, object_count)
log_str += "\n"
logger.info(log_str)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/dataio/dataset_converter_lib.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converts a COCO detection dataset to TFRecords."""
from __future__ import absolute_import
from __future__ import print_function
from collections import Counter
import logging
import os
from pycocotools.coco import COCO
import six
import tensorflow as tf
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.detectnet_v2.common.dataio.converter_lib import _bytes_feature
from nvidia_tao_tf1.cv.detectnet_v2.common.dataio.converter_lib import _float_feature
from nvidia_tao_tf1.cv.detectnet_v2.common.dataio.converter_lib import _int64_feature
from nvidia_tao_tf1.cv.detectnet_v2.dataio.dataset_converter_lib import DatasetConverter
logger = logging.getLogger(__name__)
class COCOConverter(DatasetConverter):
"""Converts a COCO detection dataset to TFRecords."""
def __init__(self, root_directory_path, num_partitions, num_shards,
output_filename,
sample_modifier,
image_dir_names=None,
annotation_files=None,
use_dali=False,
class2idx=None):
"""Initialize the converter.
Args:
root_directory_path (string): Dataset directory path relative to data root.
num_partitions (int): Number of partitions (folds).
num_shards (list): Number of shards for each partition.
output_filename (str): Path for the output file.
sample_modifier(SampleModifier): An instance of sample modifier
that does e.g. duplication and filtering of samples.
image_dir_names (list): List of image directories for each partition.
annotation_files (list): List of annotation files for each partition
"""
super(COCOConverter, self).__init__(
root_directory_path=root_directory_path,
num_partitions=num_partitions,
num_shards=num_shards,
output_filename=output_filename,
sample_modifier=sample_modifier)
# COCO defaults.
self.coco = []
self.cat_idx = {}
self.img_dir_names = image_dir_names
self.annotation_files = annotation_files
self.use_dali = use_dali
self.class2idx = class2idx
self.idx2class = None
def _partition(self):
"""Load COCO annotations."""
logger.debug("Generating partitions")
s_logger = status_logging.get_status_logger()
s_logger.write(message="Generating partitions")
partitions = []
cat_idx = {}
for ann_file in self.annotation_files:
ann_file = os.path.join(self.root_dir, ann_file)
if not os.path.exists(ann_file):
raise FileNotFoundError(f"Failed to load annotation from {ann_file}")
logger.debug("Loadding annotations from {}".format(ann_file))
c = COCO(ann_file)
# Error checking on the annotation file
if len(c.anns) == 0:
raise ValueError(f"\"annotations\" field is missing in the JSON file {ann_file}")
if len(c.imgs) == 0:
raise ValueError(f"\"images\" field is missing in the JSON file {ann_file}")
if len(c.cats) == 0:
raise ValueError(f"\"categories\" field is missing in the JSON file {ann_file}")
cats = c.loadCats(c.getCatIds())
if len(cat_idx) and sorted(cat_idx.keys()) != sorted([cat['id'] for cat in cats]):
raise ValueError("The categories in your partitions don't match. "
"Please check your labels again")
for cat in cats:
# Remove any white spaces
cat_idx[cat['id']] = cat['name'].replace(" ", "")
self.coco.append(c)
partitions.append(c.getImgIds())
self.idx2class = cat_idx
if self.class2idx is None:
self.class2idx = {v: k for k, v in self.idx2class.items()}
return partitions
def _write_shard(self, shard, partition_number, shard_number):
"""Write a single shard into the tfrecords file.
Note that the dataset-specific part is captured in function
create_example_proto() which needs to be overridden for each
specific dataset.
Args:
shard (list): A list of frame IDs for this shard.
partition_number (int): Current partition (fold) index.
shard_number (int): Current shard index.
Returns:
object_count (Counter): The number of written objects per target class.
"""
logger.info('Writing partition {}, shard {}'.format(partition_number, shard_number))
status_logging.get_status_logger().write(
message='Writing partition {}, shard {}'.format(partition_number, shard_number)
)
output = self.output_filename
if self.output_partitions != 0:
output = '{}-fold-{:03d}-of-{:03d}'.format(output, partition_number,
self.output_partitions)
if self.output_shards[partition_number] != 0:
output = '{}-shard-{:05d}-of-{:05d}'.format(output, shard_number,
self.output_shards[partition_number])
object_count = Counter()
# Store all the data for the shard.
writer = tf.python_io.TFRecordWriter(output)
for frame_id in shard:
# Create the Example with all labels for this frame_id.
example = self._create_example_proto(frame_id, partition_number)
# The example might be skipped e.g. due to missing labels.
if example is not None:
# Apply modifications to the current sample such as filtering and duplication.
# Only samples in the training set are modified.
modified_examples = self.sample_modifier.modify_sample(example, partition_number)
# Write the list of (possibly) modified samples.
frame_object_count = Counter()
for modified_example in modified_examples:
# Serialize the example.
writer.write(modified_example.SerializeToString())
# Count objects that got written per target class.
frame_object_count += self._count_targets(modified_example)
object_count += frame_object_count
writer.close()
return object_count
def _shard(self, partitions):
"""Shard each partition."""
shards = []
for partition, num_shards in zip(partitions, self.output_shards):
num_shards = max(num_shards, 1) # 0 means 1 shard.
result = []
if len(partition) == 0:
continue
shard_size = len(partition) // num_shards
for i in range(num_shards):
begin = i * shard_size
end = (i + 1) * shard_size if i + 1 < num_shards else len(partition)
result.append(partition[begin:end])
shards.append(result)
return shards
def _write_partitions(self, partitions):
"""Shard and write partitions into tfrecords.
Args:
partitions (list): A list of list of frame IDs.
Returns:
object_count (Counter): The total number of objects per target class.
"""
# Divide partitions into shards.
sharded_partitions = self._shard(partitions)
# Write .tfrecords to disk for each partition and shard.
# Also count the target objects per partition and over the whole dataset.
object_count = Counter()
for p, partition in enumerate(sharded_partitions):
partition_object_count = Counter()
for s, shard in enumerate(partition):
shard_object_count = self._write_shard(shard, p, s)
partition_object_count += shard_object_count
# Log the count in this partition and increase total
# object count.
self._log_object_count(partition_object_count)
object_count += partition_object_count
return object_count
def convert(self):
"""Do the dataset conversion."""
# Load coco annotations for each partition.
partitions = self._partition()
# Shard and write the partitions to tfrecords.
object_count = self._write_partitions(partitions)
# Log how many objects per class got written in total.
logger.info("Cumulative object statistics")
cumulative_count_dict = {
target_class.decode("ascii"): object_count.get(target_class)
for target_class in object_count.keys()
}
s_logger = status_logging.get_status_logger()
s_logger.categorical = {"num_objects": cumulative_count_dict}
s_logger.write(
message="Cumulative object statistics"
)
self._log_object_count(object_count)
# Print out the class map
log_str = "Class map. \nLabel in GT: Label in tfrecords file "
for key, value in six.iteritems(self.class_map):
log_str += "\n{}: {}".format(key, value)
logger.info(log_str)
s_logger.write(message=log_str)
note_string = (
"For the dataset_config in the experiment_spec, "
"please use labels in the tfrecords file, while writing the classmap.\n"
)
print(note_string)
s_logger.write(message=note_string)
logger.info("Tfrecords generation complete.")
s_logger.write(
status_level=status_logging.Status.SUCCESS,
message="TFRecords generation complete."
)
# Save labels with error to a JSON file
self._save_log_warnings()
def _create_example_proto(self, img_id, partition_number):
"""Generate the example proto for this img.
Args:
img_id (int): The img id.
partition_number (string): The partition number.
Returns:
example (tf.train.Example): An Example containing all labels for the frame.
"""
# Create proto for the training example.
# Load neccesary dict for img and annotations
img_dict = self.coco[partition_number].loadImgs(img_id)[0]
annIds = self.coco[partition_number].getAnnIds(imgIds=img_dict['id'])
ann_dict = self.coco[partition_number].loadAnns(annIds)
orig_filename = self.coco[partition_number].loadImgs(img_id)[0]['file_name']
# Need to remove the file extensions to meet KITTI format
# Prepend the image directory name of the current partition
img_id = os.path.join(self.img_dir_names[partition_number], orig_filename.rsplit(".", 1)[0])
example = self._example_proto(img_id, img_dict)
if self.use_dali:
width, height = img_dict['width'], img_dict['height']
img_full_path = os.path.join(self.root_dir, self.img_dir_names[partition_number])
self._add_image(example, img_dict, img_dir=img_full_path)
self._add_targets(example, img_dict, ann_dict, width, height)
else:
self._add_targets(example, img_dict, ann_dict)
return example
def _example_proto(self, img_id, img_dict):
"""Generate a base Example protobuf to which COCO-specific features are added."""
width, height = img_dict['width'], img_dict['height']
example = tf.train.Example(features=tf.train.Features(feature={
'frame/id': _bytes_feature(img_id.encode('utf-8')),
'frame/height': _int64_feature(height),
'frame/width': _int64_feature(width),
}))
return example
def _add_image(self, example, img_dict, img_dir):
"""Add encoded image to example."""
image_file = os.path.join(img_dir, img_dict['file_name'])
image_string = open(image_file, "rb").read()
f = example.features.feature
f['frame/encoded'].MergeFrom(_bytes_feature(image_string))
def _add_targets(self, example, img_dict, ann_dict, width=None, height=None):
"""Add COCO target features such as bbox to the Example protobuf.
Args:
example (tf.train.Example): The Example protobuf for this frame.
frame_id (string): Frame id.
"""
object_classes = []
truncation = []
occlusion = []
observation_angle = []
coordinates_x1 = []
coordinates_y1 = []
coordinates_x2 = []
coordinates_y2 = []
world_bbox_h = []
world_bbox_w = []
world_bbox_l = []
world_bbox_x = []
world_bbox_y = []
world_bbox_z = []
world_bbox_rot_y = []
object_class_ids = []
# reads the labels as a list of tuples
labels = ann_dict
if isinstance(labels, tuple):
labels = [labels]
for label in labels:
# Convert x,y,w,h to x1,y1,x2,y2 format
bbox = label['bbox']
bbox = [bbox[0], bbox[1], (bbox[2] + bbox[0]), (bbox[3] + bbox[1])]
x1 = int(bbox[0])
y1 = int(bbox[1])
x2 = int(bbox[2])
y2 = int(bbox[3])
# Check to make sure the coordinates are 'ltrb' format.
error_string = "Top left coordinate must be less than bottom right."\
f"Error in Img Id {img_dict['id']} and Ann Id {label['id']}. \n"\
f"Coordinates: x1 = {x1}, x2 = {x2}, y1: {y1}, y2: {y2}"
if not (x1 < x2 and y1 < y2):
logger.debug(error_string)
logger.debug("Skipping this object")
self.log_warning[label['id']] = [x1, y1, x2, y2]
continue
# Convert category id to actual category
cat = self.idx2class[label['category_id']]
# Map object classes as they are in the dataset to target classes of the model
self.class_map[cat] = cat.lower()
object_class = cat.lower()
if self.use_dali:
if (str(object_class) not in self.class2idx):
logger.debug("Skipping the class {} in dataset".format(object_class))
continue
object_classes.append(object_class)
truncation.append(0)
occlusion.append(0)
observation_angle.append(0)
if self.use_dali:
# @tylerz: DALI requires relative coordinates and integer
coordinates_x1.append(float(bbox[0]) / width)
coordinates_y1.append(float(bbox[1]) / height)
coordinates_x2.append(float(bbox[2]) / width)
coordinates_y2.append(float(bbox[3]) / height)
object_class_id = self.class2idx[str(object_class)]
object_class_ids.append(object_class_id)
else:
coordinates_x1.append(bbox[0])
coordinates_y1.append(bbox[1])
coordinates_x2.append(bbox[2])
coordinates_y2.append(bbox[3])
world_bbox_h.append(0)
world_bbox_w.append(0)
world_bbox_l.append(0)
world_bbox_x.append(0)
world_bbox_y.append(0)
world_bbox_z.append(0)
world_bbox_rot_y.append(0)
f = example.features.feature
if self.use_dali:
f['target/object_class_id'].MergeFrom(_float_feature(*object_class_ids))
else:
f['target/object_class'].MergeFrom(_bytes_feature(*object_classes))
f['target/truncation'].MergeFrom(_float_feature(*truncation))
f['target/occlusion'].MergeFrom(_int64_feature(*occlusion))
f['target/observation_angle'].MergeFrom(_float_feature(*observation_angle))
f['target/coordinates_x1'].MergeFrom(_float_feature(*coordinates_x1))
f['target/coordinates_y1'].MergeFrom(_float_feature(*coordinates_y1))
f['target/coordinates_x2'].MergeFrom(_float_feature(*coordinates_x2))
f['target/coordinates_y2'].MergeFrom(_float_feature(*coordinates_y2))
f['target/world_bbox_h'].MergeFrom(_float_feature(*world_bbox_h))
f['target/world_bbox_w'].MergeFrom(_float_feature(*world_bbox_w))
f['target/world_bbox_l'].MergeFrom(_float_feature(*world_bbox_l))
f['target/world_bbox_x'].MergeFrom(_float_feature(*world_bbox_x))
f['target/world_bbox_y'].MergeFrom(_float_feature(*world_bbox_y))
f['target/world_bbox_z'].MergeFrom(_float_feature(*world_bbox_z))
f['target/world_bbox_rot_y'].MergeFrom(_float_feature(*world_bbox_rot_y))
def _count_targets(self, example):
"""Count the target objects in the given example protobuf.
Args:
example (tf.train.Example): Example protobuf containing the labels for a frame.
Returns:
object_count (Counter): Number of objects per target class.
"""
target_classes = example.features.feature['target/object_class'].bytes_list.value
if len(target_classes) == 0:
target_classes_id = example.features.feature['target/object_class_id'].float_list.value
if len(target_classes_id) != 0:
if self.idx2class is None:
self.idx2class = {self.class2idx[k] : k for k in self.class2idx}
target_classes = []
for idx in target_classes_id:
target_classes.append(self.idx2class[idx].encode("ascii"))
object_count = Counter(target_classes)
return object_count
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/dataio/coco_converter_lib.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the dataset converter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from google.protobuf.text_format import Merge as merge_text_proto
import pytest
from nvidia_tao_tf1.cv.detectnet_v2.dataio.build_converter import build_converter
from nvidia_tao_tf1.cv.detectnet_v2.dataio.dataset_converter_lib import DatasetConverter
from nvidia_tao_tf1.cv.detectnet_v2.dataio.kitti_converter_lib import KITTIConverter
import nvidia_tao_tf1.cv.detectnet_v2.proto.dataset_export_config_pb2 as\
dataset_export_config_pb2
import nvidia_tao_tf1.cv.detectnet_v2.scripts.dataset_convert as dataset_converter
@pytest.mark.parametrize("dataset_export_spec", ['kitti_spec.txt'])
def test_dataset_converter(mocker, dataset_export_spec):
"""Check that dataset_converter.py can be loaded and run."""
dataset_export_spec_path = os.path.dirname(os.path.realpath(__file__))
dataset_export_spec_path = \
os.path.join(dataset_export_spec_path, '..', 'dataset_specs', dataset_export_spec)
# Mock the slow functions of converters that are part of constructor.
mocker.patch.object(KITTIConverter, "_read_sequence_to_frames_file", return_value=None)
mock_converter = mocker.patch.object(DatasetConverter, "convert")
args = ['-d', dataset_export_spec_path, '-o', './tmp']
dataset_converter.main(args)
mock_converter.assert_called_once()
@pytest.mark.parametrize("dataset_export_spec,converter_type", [('kitti_spec.txt', KITTIConverter)])
def test_build_converter(mocker, dataset_export_spec, converter_type):
"""Check that build_converter returns a DatasetConverter of the desired type."""
# Mock the slow functions of converters that are part of constructor.
mocker.patch.object(KITTIConverter, "_read_sequence_to_frames_file", return_value=None)
dataset_export_spec_path = os.path.dirname(os.path.realpath(__file__))
dataset_export_spec_path = \
os.path.join(dataset_export_spec_path, '..', 'dataset_specs', dataset_export_spec)
dataset_export_config = dataset_export_config_pb2.DatasetExportConfig()
with open(dataset_export_spec_path, "r") as f:
merge_text_proto(f.read(), dataset_export_config)
converter = build_converter(dataset_export_config=dataset_export_config,
output_filename=dataset_export_spec_path)
assert isinstance(converter, converter_type)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/dataio/tests/test_dataset_convert.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the COCO converter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import inspect
import os
import numpy as np
from PIL import Image
from pycocotools.coco import COCO
import pytest
import six
import tensorflow as tf
from nvidia_tao_tf1.cv.detectnet_v2.dataio.build_converter import build_converter
import nvidia_tao_tf1.cv.detectnet_v2.proto.coco_config_pb2 as coco_config_pb2
import nvidia_tao_tf1.cv.detectnet_v2.proto.dataset_export_config_pb2 as\
dataset_export_config_pb2
def _dataset_export_config(num_partitions=1, coco_label_path="coco.json"):
"""Return a COCO dataset export configuration with given number of partitions."""
dataset_export_config = dataset_export_config_pb2.DatasetExportConfig()
coco_config = coco_config_pb2.COCOConfig()
root_dir = os.path.dirname(os.path.abspath(
inspect.getsourcefile(lambda: None)))
root_dir += "/test_data"
coco_config.root_directory_path = root_dir
coco_config.num_partitions = num_partitions
coco_config.num_shards[:] = [0]
coco_config.img_dir_names[:] = ["image_2"]
coco_config.annotation_files[:] = [coco_label_path]
get_mock_images(coco_config)
dataset_export_config.coco_config.CopyFrom(coco_config)
return dataset_export_config
def get_mock_images(coco_config):
"""Generate mock images from the image_ids."""
image_root = os.path.join(
coco_config.root_directory_path, coco_config.img_dir_names[0])
if not os.path.exists(image_root):
os.makedirs(image_root)
image_file = {'000000552775': (375, 500),
'000000394940': (426, 640),
'000000015335': (640, 480)}
for idx, sizes in six.iteritems(image_file):
image_file_name = os.path.join(image_root,
'{}{}'.format(idx, ".jpg"))
image = Image.new("RGB", sizes)
image.save(image_file_name)
return image_file
def _mock_open_image(image_file):
"""Mock image open()."""
# the images are opened to figure out their dimensions so mock the image size
mock_image = namedtuple('mock_image', ['size'])
images = {'000000552775': mock_image((375, 500)),
'000000394940': mock_image((426, 640)),
'000000015335': mock_image((640, 480))}
# return the mocked image corresponding to frame_id in the image_file path
for frame_id in images.keys():
if frame_id in image_file:
return images[frame_id]
return mock_image
def _mock_converter(mocker, tmpdir, coco_label_path):
"""Return a COCO converter with a mocked sequence to frame map."""
output_filename = os.path.join(str(tmpdir), 'coco_test.tfrecords')
# Mock image open().
mocker.patch.object(Image, 'open', _mock_open_image)
# Convert a few COCO labels to TFrecords.
dataset_export_config = _dataset_export_config(coco_label_path=coco_label_path)
converter = build_converter(dataset_export_config, output_filename)
return converter, output_filename, dataset_export_config
@pytest.mark.parametrize("coco_label_path", ['coco.json'])
@pytest.mark.parametrize("use_dali", [False, True])
def test_tfrecords_roundtrip(mocker, tmpdir, coco_label_path, use_dali):
"""Test converting a few labels to TFRecords and parsing them back to Python."""
converter, output_filename, dataset_export_config = _mock_converter(mocker,
tmpdir,
coco_label_path)
converter.use_dali = use_dali
converter.convert()
class2idx = converter.class2idx
tfrecords = tf.python_io.tf_record_iterator(output_filename + '-fold-000-of-001')
# Load small coco example annotation
coco_local_path = os.path.join(dataset_export_config.coco_config.root_directory_path,
dataset_export_config.coco_config.annotation_files[0])
coco = COCO(coco_local_path)
frame_ids = ['000000552775', '000000394940', '000000015335']
expected_frame_ids = [os.path.join("image_2", frame_id) for frame_id in frame_ids]
# Specific to each test case
for i, record in enumerate(tfrecords):
example = tf.train.Example()
example.ParseFromString(record)
features = example.features.feature
# Load expected values from the local JSON COCO annotation
frame_id = os.path.basename(expected_frame_ids[i])
expected_img = coco.loadImgs(int(frame_id))[0]
expected_annIds = coco.getAnnIds(imgIds=[int(frame_id)])
expected_anns = coco.loadAnns(expected_annIds)
expected_cats = []
expected_x1, expected_y1, expected_x2, expected_y2 = [], [], [], []
for ann in expected_anns:
cat = coco.loadCats(ann['category_id'])[0]['name']
if use_dali:
expected_x1.append(ann['bbox'][0] / expected_img['width'])
expected_y1.append(ann['bbox'][1] / expected_img['height'])
expected_x2.append((ann['bbox'][0] + ann['bbox'][2]) / expected_img['width'])
expected_y2.append((ann['bbox'][1] + ann['bbox'][3]) / expected_img['height'])
expected_cats.append(class2idx[cat.replace(" ", "").lower()])
else:
expected_x1.append(ann['bbox'][0])
expected_y1.append(ann['bbox'][1])
expected_x2.append(ann['bbox'][0] + ann['bbox'][2])
expected_y2.append(ann['bbox'][1] + ann['bbox'][3])
expected_cats.append(cat.replace(" ", "").encode('utf-8'))
bbox_values = [expected_x1, expected_y1,
expected_x2, expected_y2]
assert features['frame/id'].bytes_list.value[0] == bytes(expected_frame_ids[i], 'utf-8')
assert features['frame/width'].int64_list.value[0] == expected_img['width']
assert features['frame/height'].int64_list.value[0] == expected_img['height']
if use_dali:
assert features['target/object_class_id'].float_list.value[:] == expected_cats
else:
assert features['target/object_class'].bytes_list.value[:] == expected_cats
bbox_features = ['target/coordinates_' +
x for x in ('x1', 'y1', 'x2', 'y2')]
for feature, expected_value in zip(bbox_features, bbox_values):
np.testing.assert_allclose(
features[feature].float_list.value[:], expected_value)
return converter
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/dataio/tests/test_coco_converter_lib.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.