python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
'''
Utilities to match ground truth boxes to anchor boxes.
Copyright (C) 2018 Pierluigi Ferrari
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from __future__ import division
import numpy as np
def match_bipartite_greedy(weight_matrix):
'''
Returns a bipartite matching according to the given weight matrix.
The algorithm works as follows:
Let the first axis of `weight_matrix` represent ground truth boxes
and the second axis anchor boxes.
The ground truth box that has the greatest similarity with any
anchor box will be matched first, then out of the remaining ground
truth boxes, the ground truth box that has the greatest similarity
with any of the remaining anchor boxes will be matched second, and
so on. That is, the ground truth boxes will be matched in descending
order by maximum similarity with any of the respectively remaining
anchor boxes.
The runtime complexity is O(m^2 * n), where `m` is the number of
ground truth boxes and `n` is the number of anchor boxes.
Arguments:
weight_matrix (array): A 2D Numpy array that represents the weight matrix
for the matching process. If `(m,n)` is the shape of the weight matrix,
it must be `m <= n`. The weights can be integers or floating point
numbers. The matching process will maximize, i.e. larger weights are
preferred over smaller weights.
Returns:
A 1D Numpy array of length `weight_matrix.shape[0]` that represents
the matched index along the second axis of `weight_matrix` for each index
along the first axis.
'''
weight_matrix = np.copy(weight_matrix) # We'll modify this array.
num_ground_truth_boxes = weight_matrix.shape[0]
# Only relevant for fancy-indexing below.
all_gt_indices = list(range(num_ground_truth_boxes))
# This 1D array will contain for each ground truth box the index of
# the matched anchor box.
matches = np.zeros(num_ground_truth_boxes, dtype=np.int)
# In each iteration of the loop below, exactly one ground truth box
# will be matched to one anchor box.
for _ in range(num_ground_truth_boxes):
# Find the maximal anchor-ground truth pair in two steps: First, reduce
# over the anchor boxes and then reduce over the ground truth boxes.
# Reduce along the anchor box axis.
anchor_indices = np.argmax(weight_matrix, axis=1)
overlaps = weight_matrix[all_gt_indices, anchor_indices]
# Reduce along the ground truth box axis.
ground_truth_index = np.argmax(overlaps)
anchor_index = anchor_indices[ground_truth_index]
matches[ground_truth_index] = anchor_index # Set the match.
# Set the row of the matched ground truth box and the column of the matched
# anchor box to all zeros. This ensures that those boxes will not be matched again,
# because they will never be the best matches for any other boxes.
weight_matrix[ground_truth_index] = 0
weight_matrix[:, anchor_index] = 0
return matches
def match_multi(weight_matrix, threshold):
'''
Matches all elements.
If the weight matrix contains elements that should be ignored, the row or column
representing the respective elemet should be set to a value below `threshold`.
Arguments:
weight_matrix (array): A 2D Numpy array that represents the weight matrix
for the matching process. If `(m,n)` is the shape of the weight matrix,
it must be `m <= n`. The weights can be integers or floating point
numbers. The matching process will maximize, i.e. larger weights are
preferred over smaller weights.
threshold (float): A float that represents the threshold (i.e. lower bound)
that must be met by a pair of elements to produce a match.
Returns:
Two 1D Numpy arrays of equal length that represent the matched indices. The first
array contains the indices along the first axis of `weight_matrix`, the second array
contains the indices along the second axis.
'''
num_anchor_boxes = weight_matrix.shape[1]
# Only relevant for fancy-indexing below.
all_anchor_indices = list(range(num_anchor_boxes))
# Find the best ground truth match for every anchor box.
# Array of shape (weight_matrix.shape[1],)
ground_truth_indices = np.argmax(weight_matrix, axis=0)
# Array of shape (weight_matrix.shape[1],)
overlaps = weight_matrix[ground_truth_indices, all_anchor_indices]
# Filter out the matches with a weight below the threshold.
anchor_indices_thresh_met = np.nonzero(overlaps >= threshold)[0]
gt_indices_thresh_met = ground_truth_indices[anchor_indices_thresh_met]
return gt_indices_thresh_met, anchor_indices_thresh_met
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/box_coder/matching_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TF implementation of SSD output decoder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.engine.topology import InputSpec, Layer
import tensorflow as tf
class DecodeDetections(Layer):
'''
A Keras layer to decode the raw SSD prediction output.
Input shape:
3D tensor of shape `(batch_size, n_boxes, n_classes + 12)`.
Output shape:
3D tensor of shape `(batch_size, top_k, 6)`.
'''
def __init__(self,
confidence_thresh=0.01,
iou_threshold=0.45,
top_k=200,
nms_max_output_size=400,
img_height=None,
img_width=None,
**kwargs):
'''Init function.'''
if (img_height is None) or (img_width is None):
raise ValueError("If relative box coordinates are supposed to be converted to absolute \
coordinates, the decoder needs the image size in order to decode the predictions, but \
`img_height == {}` and `img_width == {}`".format(img_height, img_width))
# We need these members for the config.
self.confidence_thresh = confidence_thresh
self.iou_threshold = iou_threshold
self.top_k = top_k
self.img_height = img_height
self.img_width = img_width
self.nms_max_output_size = nms_max_output_size
super(DecodeDetections, self).__init__(**kwargs)
def build(self, input_shape):
'''Layer build function.'''
self.input_spec = [InputSpec(shape=input_shape)]
super(DecodeDetections, self).build(input_shape)
def call(self, y_pred, mask=None):
'''
Layer call function.
Input shape:
3D tensor of shape `(batch_size, n_boxes, n_classes + 12)`.
Returns:
3D tensor of shape `(batch_size, top_k, 6)`. The second axis is zero-padded
to always yield `top_k` predictions per batch item. The last axis contains
the coordinates for each predicted box in the format
`[class_id, confidence, xmin, ymin, xmax, ymax]`.
'''
# 1. calculate boxes location
scores = y_pred[..., 1:-12]
cx_pred = y_pred[..., -12]
cy_pred = y_pred[..., -11]
w_pred = y_pred[..., -10]
h_pred = y_pred[..., -9]
w_anchor = y_pred[..., -6] - y_pred[..., -8]
h_anchor = y_pred[..., -5] - y_pred[..., -7]
cx_anchor = tf.truediv(y_pred[..., -6] + y_pred[..., -8], 2.0)
cy_anchor = tf.truediv(y_pred[..., -5] + y_pred[..., -7], 2.0)
cx_variance = y_pred[..., -4]
cy_variance = y_pred[..., -3]
variance_w = y_pred[..., -2]
variance_h = y_pred[..., -1]
# Convert anchor box offsets to image offsets.
cx = cx_pred * cx_variance * w_anchor + cx_anchor
cy = cy_pred * cy_variance * h_anchor + cy_anchor
w = tf.exp(w_pred * variance_w) * w_anchor
h = tf.exp(h_pred * variance_h) * h_anchor
# Convert 'centroids' to 'corners'.
xmin = cx - 0.5 * w
ymin = cy - 0.5 * h
xmax = cx + 0.5 * w
ymax = cy + 0.5 * h
xmin = tf.expand_dims(xmin * self.img_width, axis=-1)
ymin = tf.expand_dims(ymin * self.img_height, axis=-1)
xmax = tf.expand_dims(xmax * self.img_width, axis=-1)
ymax = tf.expand_dims(ymax * self.img_height, axis=-1)
# [batch_size, num_boxes, 1, 4]
boxes = tf.stack(values=[xmin, ymin, xmax, ymax], axis=-1)
# 2. apply NMS
nmsed_box, nmsed_score, nmsed_class, _ = tf.image.combined_non_max_suppression(
boxes,
scores,
max_output_size_per_class=self.nms_max_output_size,
max_total_size=self.top_k,
iou_threshold=self.iou_threshold,
score_threshold=self.confidence_thresh,
pad_per_class=False,
clip_boxes=False,
name='batched_nms')
nmsed_class += 1
nmsed_score = tf.expand_dims(nmsed_score, axis=-1)
nmsed_class = tf.expand_dims(nmsed_class, axis=-1)
outputs = tf.concat([nmsed_class, nmsed_score, nmsed_box], axis=-1)
return outputs
def compute_output_shape(self, input_shape):
'''Keras layer compute_output_shape.'''
batch_size, _, _ = input_shape
return (batch_size, self.top_k, 6) # Last axis: (cls_ID, confidence, 4 box coordinates)
def get_config(self):
'''Keras layer get config.'''
config = {
'confidence_thresh': self.confidence_thresh,
'iou_threshold': self.iou_threshold,
'top_k': self.top_k,
'nms_max_output_size': self.nms_max_output_size,
'img_height': self.img_height,
'img_width': self.img_width,
}
base_config = super(DecodeDetections, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/box_coder/output_decoder_layer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test output decoder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Input
from keras.models import Model
import numpy as np
from nvidia_tao_tf1.cv.ssd.box_coder.output_decoder_layer import DecodeDetections
def test_output_decoder_no_compression():
x = Input(shape=(2, 15))
y = DecodeDetections(top_k=2, nms_max_output_size=5, img_height=300, img_width=300)(x)
model = Model(inputs=x, outputs=y)
encoded_val = '''np.array(
[[[ 0. , 1. , 0. , -2.46207404,
-5.01084082, -21.38983255, -20.27411479, 0.25 ,
0.5 , 0.96124919, 0.96124919, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , -1.07137391,
-2.54451304, -3.64782921, -7.11356512, 0.25 ,
0.5 , 0.62225397, 1.24450793, 0.1 ,
0.1 , 0.2 , 0.2 ]]]
)'''
encoded_val = eval(encoded_val)[:, :, :]
expected = '''np.array(
[[[ 1. , 1. , 127.67308, 148.65036, 130.63277,
151.04959],
[ 1. , 0. , 0. , 0. , 0. ,
0. ]]])'''
expected = eval(expected)
pred = model.predict(encoded_val)
assert np.max(abs(pred - expected)) < 1e-5
def test_output_decoder_compression():
x = Input(shape=(10, 15))
y = DecodeDetections(top_k=5, nms_max_output_size=15, img_height=300, img_width=300)(x)
model = Model(inputs=x, outputs=y)
encoded_val = '''np.array(
[[
[ 0. , 1. , 0. , 4.36584869,
0.26784348, -1.88672624, -8.81819805, 0.05 ,
0.5 , 1.24450793, 0.62225397, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 0. , 1. , 0. , 3.56231825,
0.26784348, -1.88672624, -8.81819805, 0.15 ,
0.5 , 1.24450793, 0.62225397, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 0. , 0. , 1. , 2.75878782,
0.26784348, -1.88672624, -8.81819805, 0.25 ,
0.5 , 1.24450793, 0.62225397, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 1.95525739,
0.26784348, -1.88672624, -8.81819805, 0.35 ,
0.5 , 1.24450793, 0.62225397, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 1.15172695,
0.26784348, -1.88672624, -8.81819805, 0.45 ,
0.5 , 1.24450793, 0.62225397, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 0.34819652,
0.26784348, -1.88672624, -8.81819805, 0.55 ,
0.5 , 1.24450793, 0.62225397, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , -0.45533391,
0.26784348, -1.88672624, -8.81819805, 0.65 ,
0.5 , 1.24450793, 0.62225397, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , -1.25886435,
0.26784348, -1.88672624, -8.81819805, 0.75 ,
0.5 , 1.24450793, 0.62225397, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , -2.06239478,
0.26784348, -1.88672624, -8.81819805, 0.85 ,
0.5 , 1.24450793, 0.62225397, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , -2.86592521,
0.26784348, -1.88672624, -8.81819805, 0.95 ,
0.5 , 1.24450793, 0.62225397, 0.1 ,
0.1 , 0.2 , 0.2 ]]])'''
encoded_val = eval(encoded_val)[:, :, :]
expected = '''np.array(
[[[ 1., 1., 227.77, 166.17693, 473.4848, 172.46394],
[ 2., 1., 204.19827, 166.17693, 408.7723, 172.46394],
[ 1., 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0., 0.]]])'''
expected = eval(expected)
pred = model.predict(encoded_val)
assert np.max(abs(pred - expected)) < 1e-5
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/box_coder/tests/test_output_decoder_layer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test input encoder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from nvidia_tao_tf1.cv.ssd.box_coder.input_encoder import SSDInputEncoder
from nvidia_tao_tf1.cv.ssd.box_coder.ssd_input_encoder import SSDInputEncoderNP
def test_input_encoder_np():
encoder = SSDInputEncoderNP(img_height=300,
img_width=300,
n_classes=3,
predictor_sizes=[(1, 2), (1, 2)],
scales=[0.1, 0.88, 1.05],
aspect_ratios_per_layer=[[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5]],
two_boxes_for_ar1=True,
steps=None,
offsets=None,
clip_boxes=False,
variances=[0.1, 0.1, 0.2, 0.2],
pos_iou_threshold=0.5,
neg_iou_limit=0.5,
normalize_coords=True)
gt = np.array([[0, 10, 10, 100, 100], [1, 2, 3, 6, 8]])
# expected value from GitHub original repo based on Numpy
expected = '''np.array(
[[ 1. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.25 ,
0.5 , 0.1 , 0.1 , 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.25 ,
0.5 , 0.29664794, 0.29664794, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.25 ,
0.5 , 0.14142136, 0.07071068, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.25 ,
0.5 , 0.07071068, 0.14142136, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.25 ,
0.5 , 0.17320508, 0.05773503, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.25 ,
0.5 , 0.05773503, 0.17320508, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.75 ,
0.5 , 0.1 , 0.1 , 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.75 ,
0.5 , 0.29664794, 0.29664794, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.75 ,
0.5 , 0.14142136, 0.07071068, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.75 ,
0.5 , 0.07071068, 0.14142136, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.75 ,
0.5 , 0.17320508, 0.05773503, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.75 ,
0.5 , 0.05773503, 0.17320508, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.25 ,
0.5 , 0.88 , 0.88 , 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 0. , 1. , 0. , -2.46207404,
-5.01084082, -21.38983255, -20.27411479, 0.25 ,
0.5 , 0.96124919, 0.96124919, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.25 ,
0.5 , 1.24450793, 0.62225397, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , -1.07137391,
-2.54451304, -3.64782921, -7.11356512, 0.25 ,
0.5 , 0.62225397, 1.24450793, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.75 ,
0.5 , 0.88 , 0.88 , 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.75 ,
0.5 , 0.96124919, 0.96124919, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.75 ,
0.5 , 1.24450793, 0.62225397, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.75 ,
0.5 , 0.62225397, 1.24450793, 0.1 ,
0.1 , 0.2 , 0.2 ]])'''
assert np.max(abs(encoder(gt)[:, :-1] - eval(expected)[:, :-1])) < 1e-5
def test_input_encoder_multimatch_np():
encoder = SSDInputEncoderNP(img_height=300,
img_width=300,
n_classes=3,
predictor_sizes=[(1, 10), (1, 10)],
scales=[0.1, 0.88, 1.05],
aspect_ratios_per_layer=[[2.0],
[2.0]],
two_boxes_for_ar1=True,
steps=None,
offsets=None,
clip_boxes=False,
variances=[0.1, 0.1, 0.2, 0.2],
pos_iou_threshold=0.01,
neg_iou_limit=0.01,
normalize_coords=True)
raw_gt = '''np.array(
[[1, 0, 139, 36, 171],
[1, 23, 139, 66, 171],
[0, 50, 139, 306, 171]])'''
gt = eval(raw_gt)
# expected value from GitHub original repo based on Numpy
expected = '''np.array(
[[ 0. , 1. , 0. , 0.70710678,
2.3570226 , -0.82126017, 2.05556056, 0.05 ,
0.5 , 0.14142136, 0.07071068, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 0. , 1. , 0. , -0.11785113,
2.3570226 , 0.06714572, 2.05556056, 0.15 ,
0.5 , 0.14142136, 0.07071068, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 0. , 1. , 0. , -7.18891894,
2.3570226 , 0.06714572, 2.05556056, 0.25 ,
0.5 , 0.14142136, 0.07071068, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 17.20626501,
2.3570226 , 8.98703236, 2.05556056, 0.35 ,
0.5 , 0.14142136, 0.07071068, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 10.1351972 ,
2.3570226 , 8.98703236, 2.05556056, 0.45 ,
0.5 , 0.14142136, 0.07071068, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 3.06412939,
2.3570226 , 8.98703236, 2.05556056, 0.55 ,
0.5 , 0.14142136, 0.07071068, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , -4.00693843,
2.3570226 , 8.98703236, 2.05556056, 0.65 ,
0.5 , 0.14142136, 0.07071068, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , -11.07800624,
2.3570226 , 8.98703236, 2.05556056, 0.75 ,
0.5 , 0.14142136, 0.07071068, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , -18.14907405,
2.3570226 , 8.98703236, 2.05556056, 0.85 ,
0.5 , 0.14142136, 0.07071068, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , -25.22014186,
2.3570226 , 8.98703236, 2.05556056, 0.95 ,
0.5 , 0.14142136, 0.07071068, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 4.36584869,
0.26784348, -1.88672624, -8.81819805, 0.05 ,
0.5 , 1.24450793, 0.62225397, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 3.56231825,
0.26784348, -1.88672624, -8.81819805, 0.15 ,
0.5 , 1.24450793, 0.62225397, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 2.75878782,
0.26784348, -1.88672624, -8.81819805, 0.25 ,
0.5 , 1.24450793, 0.62225397, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 1.95525739,
0.26784348, -1.88672624, -8.81819805, 0.35 ,
0.5 , 1.24450793, 0.62225397, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 1.15172695,
0.26784348, -1.88672624, -8.81819805, 0.45 ,
0.5 , 1.24450793, 0.62225397, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 0.34819652,
0.26784348, -1.88672624, -8.81819805, 0.55 ,
0.5 , 1.24450793, 0.62225397, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , -0.45533391,
0.26784348, -1.88672624, -8.81819805, 0.65 ,
0.5 , 1.24450793, 0.62225397, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , -1.25886435,
0.26784348, -1.88672624, -8.81819805, 0.75 ,
0.5 , 1.24450793, 0.62225397, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , -2.06239478,
0.26784348, -1.88672624, -8.81819805, 0.85 ,
0.5 , 1.24450793, 0.62225397, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , -2.86592521,
0.26784348, -1.88672624, -8.81819805, 0.95 ,
0.5 , 1.24450793, 0.62225397, 0.1 ,
0.1 , 0.2 , 0.2 ]])'''
assert np.max(abs(encoder(gt)[:, :-1] - eval(expected)[:, :-1])) < 1e-5
def test_input_encoder():
encoder = SSDInputEncoder(img_height=300,
img_width=300,
n_classes=3,
predictor_sizes=[(1, 2), (1, 2)],
scales=[0.1, 0.88, 1.05],
aspect_ratios_per_layer=[[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5]],
two_boxes_for_ar1=True,
steps=None,
offsets=None,
clip_boxes=False,
variances=[0.1, 0.1, 0.2, 0.2],
pos_iou_threshold=0.5,
neg_iou_limit=0.5,
normalize_coords=True)
gt = tf.constant(np.array([[0, 10, 10, 100, 100], [1, 2, 3, 6, 8]]))
# expected value from GitHub original repo based on Numpy
expected = '''np.array(
[[ 1. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.25 ,
0.5 , 0.1 , 0.1 , 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.25 ,
0.5 , 0.29664794, 0.29664794, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.25 ,
0.5 , 0.14142136, 0.07071068, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.25 ,
0.5 , 0.07071068, 0.14142136, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.25 ,
0.5 , 0.17320508, 0.05773503, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.25 ,
0.5 , 0.05773503, 0.17320508, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.75 ,
0.5 , 0.1 , 0.1 , 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.75 ,
0.5 , 0.29664794, 0.29664794, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.75 ,
0.5 , 0.14142136, 0.07071068, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.75 ,
0.5 , 0.07071068, 0.14142136, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.75 ,
0.5 , 0.17320508, 0.05773503, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.75 ,
0.5 , 0.05773503, 0.17320508, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.25 ,
0.5 , 0.88 , 0.88 , 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 0. , 1. , 0. , -2.46207404,
-5.01084082, -21.38983255, -20.27411479, 0.25 ,
0.5 , 0.96124919, 0.96124919, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.25 ,
0.5 , 1.24450793, 0.62225397, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , -1.07137391,
-2.54451304, -3.64782921, -7.11356512, 0.25 ,
0.5 , 0.62225397, 1.24450793, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.75 ,
0.5 , 0.88 , 0.88 , 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.75 ,
0.5 , 0.96124919, 0.96124919, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.75 ,
0.5 , 1.24450793, 0.62225397, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.75 ,
0.5 , 0.62225397, 1.24450793, 0.1 ,
0.1 , 0.2 , 0.2 ]])'''
with tf.Session() as sess:
assert np.max(abs(sess.run(encoder([gt]))[:, :, :-1] - eval(expected)[:, :-1])) < 1e-5
def test_input_encoder_multimatch():
encoder = SSDInputEncoder(img_height=300,
img_width=300,
n_classes=3,
predictor_sizes=[(1, 10), (1, 10)],
scales=[0.1, 0.88, 1.05],
aspect_ratios_per_layer=[[2.0],
[2.0]],
two_boxes_for_ar1=True,
steps=None,
offsets=None,
clip_boxes=False,
variances=[0.1, 0.1, 0.2, 0.2],
pos_iou_threshold=0.01,
neg_iou_limit=0.01,
normalize_coords=True)
raw_gt = '''np.array(
[[1, 0, 139, 36, 171],
[1, 23, 139, 66, 171],
[0, 50, 139, 306, 171]])'''
gt = tf.constant(eval(raw_gt))
# expected value from GitHub original repo based on Numpy
expected = '''np.array(
[[ 0. , 1. , 0. , 0.70710678,
2.3570226 , -0.82126017, 2.05556056, 0.05 ,
0.5 , 0.14142136, 0.07071068, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 0. , 1. , 0. , -0.11785113,
2.3570226 , 0.06714572, 2.05556056, 0.15 ,
0.5 , 0.14142136, 0.07071068, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 0. , 1. , 0. , -7.18891894,
2.3570226 , 0.06714572, 2.05556056, 0.25 ,
0.5 , 0.14142136, 0.07071068, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 17.20626501,
2.3570226 , 8.98703236, 2.05556056, 0.35 ,
0.5 , 0.14142136, 0.07071068, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 10.1351972 ,
2.3570226 , 8.98703236, 2.05556056, 0.45 ,
0.5 , 0.14142136, 0.07071068, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 3.06412939,
2.3570226 , 8.98703236, 2.05556056, 0.55 ,
0.5 , 0.14142136, 0.07071068, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , -4.00693843,
2.3570226 , 8.98703236, 2.05556056, 0.65 ,
0.5 , 0.14142136, 0.07071068, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , -11.07800624,
2.3570226 , 8.98703236, 2.05556056, 0.75 ,
0.5 , 0.14142136, 0.07071068, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , -18.14907405,
2.3570226 , 8.98703236, 2.05556056, 0.85 ,
0.5 , 0.14142136, 0.07071068, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , -25.22014186,
2.3570226 , 8.98703236, 2.05556056, 0.95 ,
0.5 , 0.14142136, 0.07071068, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 4.36584869,
0.26784348, -1.88672624, -8.81819805, 0.05 ,
0.5 , 1.24450793, 0.62225397, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 3.56231825,
0.26784348, -1.88672624, -8.81819805, 0.15 ,
0.5 , 1.24450793, 0.62225397, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 2.75878782,
0.26784348, -1.88672624, -8.81819805, 0.25 ,
0.5 , 1.24450793, 0.62225397, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 1.95525739,
0.26784348, -1.88672624, -8.81819805, 0.35 ,
0.5 , 1.24450793, 0.62225397, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 1.15172695,
0.26784348, -1.88672624, -8.81819805, 0.45 ,
0.5 , 1.24450793, 0.62225397, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 0.34819652,
0.26784348, -1.88672624, -8.81819805, 0.55 ,
0.5 , 1.24450793, 0.62225397, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , -0.45533391,
0.26784348, -1.88672624, -8.81819805, 0.65 ,
0.5 , 1.24450793, 0.62225397, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , -1.25886435,
0.26784348, -1.88672624, -8.81819805, 0.75 ,
0.5 , 1.24450793, 0.62225397, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , -2.06239478,
0.26784348, -1.88672624, -8.81819805, 0.85 ,
0.5 , 1.24450793, 0.62225397, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , -2.86592521,
0.26784348, -1.88672624, -8.81819805, 0.95 ,
0.5 , 1.24450793, 0.62225397, 0.1 ,
0.1 , 0.2 , 0.2 ]])'''
with tf.Session() as sess:
assert np.max(abs(sess.run(encoder([gt]))[:, :, :-1] - eval(expected)[:, :-1])) < 1e-5
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/box_coder/tests/test_input_encoder.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/utils/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper function to load model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import keras
from nvidia_tao_tf1.cv.common.utils import load_keras_model
from nvidia_tao_tf1.cv.ssd.architecture.ssd_loss import SSDLoss
# from nvidia_tao_tf1.cv.ssd.builders import dataset_builder
# from nvidia_tao_tf1.cv.ssd.builders import model_builder
from nvidia_tao_tf1.cv.ssd.layers.anchor_box_layer import AnchorBoxes
from nvidia_tao_tf1.encoding import encoding
CUSTOM_OBJS = {
'AnchorBoxes': AnchorBoxes
}
def get_model_with_input(model_path, input_layer):
"""Implement a trick to replace input tensor."""
model = load_keras_model(model_path,
custom_objects=CUSTOM_OBJS)
optimizer = model.optimizer
_explored_layers = dict()
for l in model.layers:
_explored_layers[l.name] = [False, None]
layers_to_explore = [l for l in model.layers if (type(l) == keras.layers.InputLayer)]
model_outputs = {}
# Loop until we reach the last layer.
while layers_to_explore:
layer = layers_to_explore.pop(0)
# Skip layers that may be revisited in the graph to prevent duplicates.
if not _explored_layers[layer.name][0]:
# Check if all inbound layers explored for given layer.
if not all([
_explored_layers[l.name][0]
for n in layer._inbound_nodes
for l in n.inbound_layers
]):
continue
outputs = None
# Visit input layer.
if type(layer) == keras.layers.InputLayer:
# skip input layer and use outside input tensors intead.
_explored_layers[layer.name][0] = True
_explored_layers[layer.name][1] = None
layers_to_explore.extend([node.outbound_layer for
node in layer._outbound_nodes])
continue
else:
# Create new layer.
layer_config = layer.get_config()
new_layer = type(layer).from_config(layer_config)
# Add to model.
outputs = []
for node in layer._inbound_nodes:
prev_outputs = []
for idx, l in enumerate(node.inbound_layers):
if type(l) == keras.layers.InputLayer:
prev_outputs.append(input_layer)
else:
keras_layer = _explored_layers[l.name][1]
_tmp_output = keras_layer.get_output_at(node.node_indices[idx])
prev_outputs.append(_tmp_output)
assert prev_outputs, "Expected non-input layer to have inputs."
if len(prev_outputs) == 1:
prev_outputs = prev_outputs[0]
outputs.append(new_layer(prev_outputs))
if len(outputs) == 1:
outputs = outputs[0]
weights = layer.get_weights()
if weights is not None:
new_layer.set_weights(weights)
outbound_nodes = layer._outbound_nodes
if not outbound_nodes:
model_outputs[layer.output.name] = outputs
layers_to_explore.extend([node.outbound_layer for node in outbound_nodes])
# Mark current layer as visited and assign output nodes to the layer.
_explored_layers[layer.name][0] = True
_explored_layers[layer.name][1] = new_layer
else:
continue
# Create new keras model object from pruned specifications.
# only use input_image as Model Input.
output_tensors = [model_outputs[l.name] for l in model.outputs if l.name in model_outputs]
new_model = keras.models.Model(inputs=input_layer, outputs=output_tensors, name=model.name)
# Set the optimizer to the new model
new_model.optimizer = optimizer
return new_model
def load_model(model_path, experiment_spec, is_dssd, input_tensor=None, key=None):
"""Load a model either in .h5 format, .tlt format or .hdf5 format."""
_, ext = os.path.splitext(model_path)
# if ext == '.h5':
# # build model and load weights
# assert experiment_spec is not None, "To load weights, spec file must be provided"
# model = model_builder.build(experiment_spec, is_dssd, model_only=True,
# input_tensor=input_tensor)
# model.load_weights(model_path)
if ext == '.hdf5':
ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)
CUSTOM_OBJS['compute_loss'] = ssd_loss.compute_loss
# directly load model, add dummy loss since loss is never required.
if input_tensor is None:
# load the model to get img width/height
model = load_keras_model(model_path, custom_objects=CUSTOM_OBJS, compile=True)
bs, im_channel, im_height, im_width = model.layers[0].input_shape[:]
if bs is not None:
new_input = keras.layers.Input(shape=(im_channel, im_height, im_width),
name="Input")
ssd_loss_ = SSDLoss(neg_pos_ratio=3, alpha=1.0)
CUSTOM_OBJS['compute_loss'] = ssd_loss_.compute_loss
model = get_model_with_input(model_path, new_input)
else:
input_layer = keras.layers.Input(tensor=input_tensor, name="Input")
model = get_model_with_input(model_path, input_layer)
elif ext == '.tlt':
os_handle, temp_file_name = tempfile.mkstemp(suffix='.hdf5')
os.close(os_handle)
with open(temp_file_name, 'wb') as temp_file, open(model_path, 'rb') as encoded_file:
encoding.decode(encoded_file, temp_file, key)
encoded_file.close()
temp_file.close()
# recursive call
model = load_model(temp_file_name, experiment_spec, is_dssd, input_tensor, None)
os.remove(temp_file_name)
else:
raise NotImplementedError("{0} file is not supported!".format(ext))
return model
def save_model(keras_model, model_path, key, save_format=None):
"""Save a model to either .h5, .tlt or .hdf5 format."""
_, ext = os.path.splitext(model_path)
if (save_format is not None) and (save_format != ext):
# recursive call to save a correct model
return save_model(keras_model, model_path + save_format, key, None)
if ext == '.hdf5':
keras_model.save(model_path, overwrite=True, include_optimizer=True)
else:
raise NotImplementedError("{0} file is not supported!".format(ext))
return model_path
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/utils/model_io.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities related to boxes/anchors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.image.python.ops import image_ops
from nvidia_tao_tf1.cv.ssd.utils.tensor_utils import tensor_strided_replace
def bipartite_match_row(similarity, scope=None):
'''
Returns a bipartite matching according to the similarity matrix.
This is a greedy bi-partite matching algorithm.
Arguments:
similarity (tensor): A 2D tensor represents the similarity of each pair
of boxes.
Returns:
matches (tensor): 1-D tensor with `matches.shape[0]==similarity.shape[0]`
that represents which column box the corresponding row box should
match to.
'''
with tf.name_scope(scope, 'BipartiteMatch'):
matches, _ = image_ops.bipartite_match(
-1.0 * similarity, num_valid_rows=tf.cast(tf.shape(similarity)[0], tf.float32))
matches = tf.reshape(matches, [-1])
matches = tf.cast(matches, tf.int32)
return matches
def multi_match(similarity, threshold, scope=None):
'''
Returns argmax match according to similarity.
Matches all elements along the second axis of `similarity` to their best
matches along the first axis subject to the constraint that the weight of
a match must be greater than or equal to `threshold` in order to produce a match.
If the weight matrix contains elements that should be ignored, the row or column
representing the respective elemet should be set to a value below `threshold`.
Arguments:
similarity (tensor): A 2D tensor represents the similarity of each pair
of boxes.
threshold (float): A float that represents the threshold (i.e. lower bound)
that must be met by a pair of elements to produce a match.
Returns:
gt_idx, anchor_idx (tensor): Two 1D tensor of equal length that represent
the matched indices. `gt_idx` contains the indices along the first axis of
`similarity`, `anchor_idx` contains the indices along the second axis.
'''
with tf.name_scope(scope, 'MultiMatch'):
gt_indices = tf.argmax(similarity, axis=0, output_type=tf.int32)
num_col = tf.shape(similarity)[1]
gt_indices_flatten = tf.range(0, num_col) + num_col * gt_indices
overlaps = tf.gather(tf.reshape(similarity, [-1]), gt_indices_flatten)
# Filter out the matches with a weight below the threshold.
anchor_indices_thresh_met = tf.reshape(tf.where(overlaps >= threshold), [-1])
gt_indices_thresh_met = tf.gather(gt_indices, anchor_indices_thresh_met)
return tf.cast(gt_indices_thresh_met, tf.int32), \
tf.cast(anchor_indices_thresh_met, tf.int32)
def area(boxlist, scope=None):
"""Computes area of boxes.
Args:
boxlist (tensor): Tensor of shape [N,4], holding xmin, ymin, xmax, ymax
scope: name scope.
Returns:
a tensor with shape [N] representing box areas.
"""
with tf.name_scope(scope, 'Area'):
return (boxlist[:, 2] - boxlist[:, 0]) * (boxlist[:, 3] - boxlist[:, 1])
def intersection(boxlist1, boxlist2, scope=None):
"""Compute pairwise intersection areas between boxes.
Args:
boxlist1 (tensor): Tensor of shape [N,4], holding xmin, ymin, xmax, ymax
boxlist2 (tensor): Tensor of shape [M,4], holding xmin, ymin, xmax, ymax
scope: name scope.
Returns:
a tensor with shape [N, M] representing pairwise intersections
"""
with tf.name_scope(scope, 'Intersection'):
x_min1, y_min1, x_max1, y_max1 = tf.split(
value=boxlist1, num_or_size_splits=4, axis=1)
x_min2, y_min2, x_max2, y_max2 = tf.split(
value=boxlist2, num_or_size_splits=4, axis=1)
all_pairs_min_ymax = tf.minimum(y_max1, tf.transpose(y_max2))
all_pairs_max_ymin = tf.maximum(y_min1, tf.transpose(y_min2))
intersect_heights = tf.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin)
all_pairs_min_xmax = tf.minimum(x_max1, tf.transpose(x_max2))
all_pairs_max_xmin = tf.maximum(x_min1, tf.transpose(x_min2))
intersect_widths = tf.maximum(0.0, all_pairs_min_xmax - all_pairs_max_xmin)
return intersect_heights * intersect_widths
def iou(boxlist1, boxlist2, scope=None):
"""Computes pairwise intersection-over-union between box collections.
Args:
boxlist1 (tensor): Tensor of shape [N,4], holding xmin, ymin, xmax, ymax
boxlist2 (tensor): Tensor of shape [M,4], holding xmin, ymin, xmax, ymax
scope: name scope.
Returns:
a tensor with shape [N, M] representing pairwise iou scores.
"""
with tf.name_scope(scope, 'IOU'):
intersections = intersection(boxlist1, boxlist2)
areas1 = area(boxlist1)
areas2 = area(boxlist2)
unions = (
tf.expand_dims(areas1, 1) + tf.expand_dims(areas2, 0) - intersections)
return tf.where(
tf.equal(intersections, 0.0),
tf.zeros_like(intersections), tf.truediv(intersections, unions))
def np_elem_iou(boxes1, boxes2, border_pixels='half'):
'''
numpy version of element-wise iou.
Computes the intersection-over-union similarity (also known as Jaccard similarity)
of two sets of axis-aligned 2D rectangular boxes.
Let `boxes1` and `boxes2` contain `m` and `n` boxes, respectively.
In 'outer_product' mode, returns an `(m,n)` matrix with the IoUs for all possible
combinations of the boxes in `boxes1` and `boxes2`.
In 'element-wise' mode, `m` and `n` must be broadcast-compatible. Refer to the explanation
of the `mode` argument for details.
Arguments:
boxes1 (array): Either a 1D Numpy array of shape `(4, )` containing the coordinates
for one box in the format specified by `coords` or a 2D Numpy array of shape
`(m, 4)` containing the coordinates for `m` boxes. If `mode` is set to 'element_wise',
the shape must be broadcast-compatible with `boxes2`.
boxes2 (array): Either a 1D Numpy array of shape `(4, )` containing the coordinates for
one box in the format specified by `coords` or a 2D Numpy array of shape `(n, 4)`
containing the coordinates for `n` boxes. If `mode` is set to 'element_wise', the
shape must be broadcast-compatible with `boxes1`.`.
border_pixels (str, optional): How to treat the border pixels of the bounding boxes.
Can be 'include', 'exclude', or 'half'. If 'include', the border pixels belong
to the boxes. If 'exclude', the border pixels do not belong to the boxes.
If 'half', then one of each of the two horizontal and vertical borders belong
to the boxex, but not the other.
Returns:
A 1D or 2D Numpy array (refer to the `mode` argument for details) of dtype float
containing values in [0,1], the Jaccard similarity of the boxes in `boxes1` and
`boxes2`. 0 means there is no overlap between two given boxes, 1 means their
coordinates are identical.
'''
# Make sure the boxes have the right shapes.
if boxes1.ndim > 2:
raise ValueError("boxes1 must have rank either 1 or 2, but has rank {}."
.format(boxes1.ndim))
if boxes2.ndim > 2:
raise ValueError("boxes2 must have rank either 1 or 2, but has rank {}."
.format(boxes2.ndim))
if boxes1.ndim == 1:
boxes1 = np.expand_dims(boxes1, axis=0)
if boxes2.ndim == 1:
boxes2 = np.expand_dims(boxes2, axis=0)
if not (boxes1.shape[1] == boxes2.shape[1] == 4):
raise ValueError("Boxes list last dim should be 4 but got shape {} and {}, respectively."
.format(boxes1.shape, boxes2.shape))
# Set the correct coordinate indices for the respective formats.
xmin = 0
ymin = 1
xmax = 2
ymax = 3
# Compute the union areas.
if border_pixels == 'half':
d = 0
elif border_pixels == 'include':
d = 1
elif border_pixels == 'exclude':
d = -1
# Compute the IoU.
min_xy = np.maximum(boxes1[:, [xmin, ymin]], boxes2[:, [xmin, ymin]])
max_xy = np.minimum(boxes1[:, [xmax, ymax]], boxes2[:, [xmax, ymax]])
# Compute the side lengths of the intersection rectangles.
side_lengths = np.maximum(0, max_xy - min_xy + d)
intersection_areas = side_lengths[:, 0] * side_lengths[:, 1]
boxes1_areas = (boxes1[:, xmax] - boxes1[:, xmin] + d) * (boxes1[:, ymax] - boxes1[:, ymin] + d)
boxes2_areas = (boxes2[:, xmax] - boxes2[:, xmin] + d) * (boxes2[:, ymax] - boxes2[:, ymin] + d)
union_areas = boxes1_areas + boxes2_areas - intersection_areas
return intersection_areas / union_areas
def corners_to_centroids(tensor, start_index, scope=None):
'''
Convert corner coordinates to centroids (tf tensor).
Arguments:
tensor (array): A tf nD tensor containing the four consecutive coordinates
to be converted somewhere in the last axis.
start_index (int): The index of the first coordinate in the last axis of `tensor`.
scope (str, optional): The scope prefix of the name_scope.
Returns:
A tf nD tensor in centroids coordinates.
'''
with tf.name_scope(scope, 'CornerCoordToCentroids'):
tt = tf.gather(tensor, tf.constant(start_index, dtype=tf.int32), axis=-1)
bb = tf.gather(tensor, tf.constant(start_index+2, dtype=tf.int32), axis=-1)
ll = tf.gather(tensor, tf.constant(start_index+1, dtype=tf.int32), axis=-1)
rr = tf.gather(tensor, tf.constant(start_index+3, dtype=tf.int32), axis=-1)
"""
original new_coords = [
tf.truediv(tensor[..., start_index] + tensor[..., start_index+2], 2.0),
tf.truediv(tensor[..., start_index+1] + tensor[..., start_index+3], 2.0),
tensor[..., start_index+2] - tensor[..., start_index],
tensor[..., start_index+3] - tensor[..., start_index+1]]
"""
new_coords = [
tf.truediv(tt + bb, 2.0),
tf.truediv(ll + rr, 2.0),
bb - tt,
rr - ll
]
return tensor_strided_replace(tensor, (start_index, start_index + 4), new_coords, -1)
def np_convert_coordinates(tensor, start_index, conversion):
'''
Convert coordinates for axis-aligned 2D boxes between two coordinate formats.
Creates a copy of `tensor`, i.e. does not operate in place. Currently there are
three supported coordinate formats that can be converted from and to each other:
1) (xmin, xmax, ymin, ymax) - the 'minmax' format
2) (xmin, ymin, xmax, ymax) - the 'corners' format
2) (cx, cy, w, h) - the 'centroids' format
Arguments:
tensor (array): A Numpy nD array containing the four consecutive coordinates
to be converted somewhere in the last axis.
start_index (int): The index of the first coordinate in the last axis of `tensor`.
conversion (str, optional): The conversion direction. Can be 'minmax2centroids',
'centroids2minmax', 'corners2centroids', 'centroids2corners', 'minmax2corners',
or 'corners2minmax'.
border_pixels (str, optional): How to treat the border pixels of the bounding boxes.
Can be 'include', 'exclude', or 'half'. If 'include', the border pixels belong
to the boxes. If 'exclude', the border pixels do not belong to the boxes.
If 'half', then one of each of the two horizontal and vertical borders belong
to the boxex, but not the other.
Returns:
A Numpy nD array, a copy of the input tensor with the converted coordinates
in place of the original coordinates and the unaltered elements of the original
tensor elsewhere.
'''
ind = start_index
tensor1 = np.copy(tensor).astype(np.float)
if conversion == 'minmax2centroids':
tensor1[..., ind] = (tensor[..., ind] + tensor[..., ind+1]) / 2.0 # Set cx
tensor1[..., ind+1] = (tensor[..., ind+2] + tensor[..., ind+3]) / 2.0 # Set cy
tensor1[..., ind+2] = tensor[..., ind+1] - tensor[..., ind] # Set w
tensor1[..., ind+3] = tensor[..., ind+3] - tensor[..., ind+2] # Set h
elif conversion == 'centroids2minmax':
tensor1[..., ind] = tensor[..., ind] - tensor[..., ind+2] / 2.0 # Set xmin
tensor1[..., ind+1] = tensor[..., ind] + tensor[..., ind+2] / 2.0 # Set xmax
tensor1[..., ind+2] = tensor[..., ind+1] - tensor[..., ind+3] / 2.0 # Set ymin
tensor1[..., ind+3] = tensor[..., ind+1] + tensor[..., ind+3] / 2.0 # Set ymax
elif conversion == 'corners2centroids':
tensor1[..., ind] = (tensor[..., ind] + tensor[..., ind+2]) / 2.0 # Set cx
tensor1[..., ind+1] = (tensor[..., ind+1] + tensor[..., ind+3]) / 2.0 # Set cy
tensor1[..., ind+2] = tensor[..., ind+2] - tensor[..., ind] # Set w
tensor1[..., ind+3] = tensor[..., ind+3] - tensor[..., ind+1] # Set h
elif conversion == 'centroids2corners':
tensor1[..., ind] = tensor[..., ind] - tensor[..., ind+2] / 2.0 # Set xmin
tensor1[..., ind+1] = tensor[..., ind+1] - tensor[..., ind+3] / 2.0 # Set ymin
tensor1[..., ind+2] = tensor[..., ind] + tensor[..., ind+2] / 2.0 # Set xmax
tensor1[..., ind+3] = tensor[..., ind+1] + tensor[..., ind+3] / 2.0 # Set ymax
elif conversion in ['minmax2corners', 'corners2minmax']:
tensor1[..., ind+1] = tensor[..., ind+2]
tensor1[..., ind+2] = tensor[..., ind+1]
else:
raise ValueError("Unexpected conversion value.")
return tensor1
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/utils/box_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensor utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def _get_non_empty_rows_2d_sparse_non_empty(input_tensor):
"""
Helper function to retrieve non-empty rows of a 2d sparse tensor.
Args:
input_tensor (tf.sparse.SparseTensor): must be 2-D and non-empty
Returns:
output_tensor (tf.sparse.SparseTensor): output tensor with all rows non-empty
"""
old_inds = input_tensor.indices
_, new_rows = tf.unique(old_inds[:, 0], out_idx=tf.int64)
num_new_rows = tf.reduce_max(new_rows) + 1
cols = old_inds[:, 1]
out_tensor = tf.sparse.SparseTensor(indices=tf.stack([new_rows, cols], axis=1),
values=input_tensor.values,
dense_shape=[num_new_rows, input_tensor.dense_shape[1]])
return out_tensor
def get_non_empty_rows_2d_sparse(input_tensor):
"""
Helper function to retrieve non-empty rows of a 2d sparse tensor.
Args:
input_tensor (tf.sparse.SparseTensor): must be 2-D
Returns:
output_tensor (tf.sparse.SparseTensor): output tensor with all rows non-empty
"""
cols = input_tensor.dense_shape[1]
empty_tensor = tf.sparse.SparseTensor(
indices=tf.zeros(dtype=tf.int64, shape=[0, 2]),
values=tf.zeros(dtype=input_tensor.dtype, shape=[0]),
dense_shape=[0, cols])
return tf.cond(tf.equal(tf.size(input_tensor.indices), 0), true_fn=lambda: empty_tensor,
false_fn=lambda: _get_non_empty_rows_2d_sparse_non_empty(input_tensor))
def tensor_slice_replace(a, b, a_idx, b_idx, scope=None):
'''
Returns a new tensor same as `a` but with `a[a_idx] = b[b_idx]`.
Args:
a, b (tensor): `a` and `b` must have same shape except for
the first dimension.
a_idx, b_idx (tensor): 1D tensors. `a_idx` and `b_idx` must
have the same shape and all elements in `a_idx` should
be smaller than `a.shape[0]`. Similar for `b_idx`
Returns:
c (tensor): A tensor same as `a` but with `a_idx` repalced
by `b[b_idx]`.
'''
with tf.name_scope(scope, 'SliceReplace'):
a_all_idx = tf.range(tf.shape(a)[0])
_, a_remaining_idx = tf.setdiff1d(a_all_idx, a_idx)
return tf.dynamic_stitch([a_remaining_idx, a_idx],
[tf.gather(a, a_remaining_idx),
tf.gather(b, b_idx)])
def tensor_strided_replace(a, a_range, b, axis=0, scope=None):
'''
Tensor strided replace.
Return a new tensor same as `a` but with `a[...,a_range,...] = b`
`b` shape on axis can be different from `a_range`.
Args:
a, b (tensor): `a` and `b` must have same shape except for the
`axis` dimension. Moreover, `b` can be a list of tensors
with no `axis` dimension, in which case tensors in `b` will
be stacked.
a_range (tuple): a tuple with 2 integers. `a[tuple[0]:tuple[1]]`
will be replaced by `b`
axis (0 or -1): along which axis to replace
Returns:
c (tensor): the replaced tensor.
'''
with tf.name_scope(scope, 'StridedReplace'):
if axis not in [0, -1]:
raise NotImplementedError("This function only supports axis 0 or 1")
if type(b) == tuple or type(b) == list:
b = tf.stack(list(b), axis=axis)
concat_list = [None, b, None]
if a_range[0] < 0:
end = a.get_shape().as_list()[-1]
else:
end = 0
"""
axis == 0:
concat_list[0] = a[:a_range[0]]
concat_list[-1] = a[a_range[1]:]
axis == -1:
concat_list[0] = a[..., :a_range[0]]
concat_list[-1] = a[..., a_range[1]:]
"""
a0 = tf.gather(a, tf.range(0, end + a_range[0]), axis=axis)
a1 = tf.gather(a, tf.range(end + a_range[1], a.get_shape().as_list()[axis]), axis=axis)
concat_list[0] = a0
concat_list[-1] = a1
return tf.concat(concat_list, axis=axis)
def get_init_ops():
"""Return all ops required for initialization."""
"""copied from dlav.common.graph.initializer"""
return tf.group(tf.local_variables_initializer(),
tf.tables_initializer(),
*tf.get_collection('iterator_init'))
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/utils/tensor_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Load an experiment spec file to run SSD training, evaluation, pruning."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
from google.protobuf.text_format import Merge as merge_text_proto
from nvidia_tao_tf1.cv.common.spec_validator import eval_str, length, SpecValidator, ValueChecker
import nvidia_tao_tf1.cv.ssd.proto.experiment_pb2 as experiment_pb2
logger = logging.getLogger(__name__)
_SSD_OPTIONAL_CHEKER = {"aspect_ratios": ValueChecker("!=", ""),
"aspect_ratios_global": ValueChecker("!=", ""),
"scales": ValueChecker("!=", ""),
"steps": ValueChecker("!=", ""),
"offsets": ValueChecker("!=", "")}
_SSD_VALUE_CHECKER_ = {"aspect_ratios": [ValueChecker(">", 0, length, "The length of "),
ValueChecker(">", 0, eval_str)],
"aspect_ratios_global": [ValueChecker(">", 0, length, "The length of "),
ValueChecker(">", 0, eval_str)],
"scales": [ValueChecker(">", 0, length, "The length of "),
ValueChecker(">", 0, eval)],
"steps": [ValueChecker(">", 0, length, "The length of "),
ValueChecker(">", 0, eval)],
"offsets": [ValueChecker(">", 0, length, "The length of "),
ValueChecker(">", 0, eval),
ValueChecker("<", 1.0, eval)],
"variances": [ValueChecker("!=", ""),
ValueChecker("=", 4, length, "The length of "),
ValueChecker(">", 0, eval)],
"arch": [ValueChecker("!=", ""),
ValueChecker("in", ["resnet",
"vgg",
"darknet",
"mobilenet_v1",
"mobilenet_v2",
"squeezenet",
"googlenet",
"efficientnet_b0",
"efficientnet_b1"])],
"nlayers": [ValueChecker(">=", 0)],
"batch_size_per_gpu": [ValueChecker(">", 0)],
"num_epochs": [ValueChecker(">", 0)],
"min_learning_rate": [ValueChecker(">", 0)],
"max_learning_rate": [ValueChecker(">", 0)],
"soft_start": [ValueChecker(">", 0), ValueChecker("<", 1.0)],
"annealing": [ValueChecker(">", 0), ValueChecker("<", 1.0)],
"validation_period_during_training": [ValueChecker(">", 0)],
"batch_size": [ValueChecker(">", 0)],
"matching_iou_threshold": [ValueChecker(">", 0),
ValueChecker("<", 1.0)],
"confidence_threshold": [ValueChecker(">", 0),
ValueChecker("<", 1.0)],
"clustering_iou_threshold": [ValueChecker(">", 0),
ValueChecker("<", 1.0)],
"top_k": [ValueChecker(">", 0)],
"output_width": [ValueChecker(">", 32)],
"output_height": [ValueChecker(">", 32)],
"output_channel": [ValueChecker("in", [1, 3])],
"monitor": [ValueChecker("in", ["loss", "validation_loss", "val_loss"])],
"min_delta": [ValueChecker(">=", 0)],
"patience": [ValueChecker(">=", 0)],
"checkpoint_interval": [ValueChecker(">=", 0)]}
TRAIN_EXP_REQUIRED_MSG = ["ssd_config", "training_config", "eval_config",
"augmentation_config", "nms_config", "dataset_config"]
EVAL_EXP_REQUIRED_MSG = ["ssd_config", "eval_config", "nms_config"
"augmentation_config", "dataset_config"]
INFERENCE_EXP_REQUIRED_MSG = ["ssd_config", "eval_config", "nms_config"
"augmentation_config", "dataset_config"]
EXPORT_EXP_REQUIRED_MSG = ["ssd_config", "nms_config"]
_REQUIRED_MSG_ = {"training_config": ["learning_rate", "regularizer"],
"learning_rate": ["soft_start_annealing_schedule"],
"soft_start_annealing_schedule": ["min_learning_rate",
"max_learning_rate",
"soft_start",
"annealing"],
"dataset_config": ["target_class_mapping"]}
def spec_validator(spec, required_msg=None, ssd_spec_validator=None):
"""do spec validation for SSD/DSSD."""
if required_msg is None:
required_msg = []
if ssd_spec_validator is None:
ssd_spec_validator = SpecValidator(required_msg_dict=_REQUIRED_MSG_,
value_checker_dict=_SSD_VALUE_CHECKER_,
option_checker_dict=_SSD_OPTIONAL_CHEKER)
ssd_spec_validator.validate(spec, required_msg)
def validate_train_spec(spec):
"""do spec validation check for training spec."""
# @TODO(tylerz): workaround for one-of behavior to check train dataset existence
ssd_spec_validator = SpecValidator(required_msg_dict=_REQUIRED_MSG_,
value_checker_dict=_SSD_VALUE_CHECKER_,
option_checker_dict=_SSD_OPTIONAL_CHEKER)
ssd_spec_validator.required_msg_dict["dataset_config"].append("data_sources")
if spec.dataset_config.data_sources[0].tfrecords_path == "":
ssd_spec_validator.value_checker_dict["label_directory_path"] = [ValueChecker("!=", "")]
ssd_spec_validator.value_checker_dict["image_directory_path"] = [ValueChecker("!=", "")]
# Remove the empty validation dataset to skip the check
for idx in range(len(spec.dataset_config.validation_data_sources)):
if spec.dataset_config.validation_data_sources[idx].image_directory_path == "" or \
spec.dataset_config.validation_data_sources[idx].label_directory_path == "":
del spec.dataset_config.validation_data_sources[idx]
spec_validator(spec, TRAIN_EXP_REQUIRED_MSG, ssd_spec_validator)
def validate_eval_spec(spec):
"""do spec validation check for training spec."""
# @TODO(tylerz): workaround for one-of behavior to check validation dataset existence
ssd_spec_validator = SpecValidator(required_msg_dict=_REQUIRED_MSG_,
value_checker_dict=_SSD_VALUE_CHECKER_,
option_checker_dict=_SSD_OPTIONAL_CHEKER)
ssd_spec_validator.required_msg_dict["dataset_config"].append("validation_data_sources")
ssd_spec_validator.value_checker_dict["label_directory_path"] = [ValueChecker("!=", "")]
ssd_spec_validator.value_checker_dict["image_directory_path"] = [ValueChecker("!=", "")]
# Skip the check for label and image in data_sources
# cause we don't care training dataset in evaluation
for idx in range(len(spec.dataset_config.data_sources)):
spec.dataset_config.data_sources[idx].image_directory_path = "./fake_dir"
spec.dataset_config.data_sources[idx].label_directory_path = "./fake_dir"
spec_validator(spec, EVAL_EXP_REQUIRED_MSG, ssd_spec_validator)
def load_proto(spec_path, proto_buffer, default_spec_path=None, merge_from_default=True):
"""Load spec from file and merge with given proto_buffer instance.
Args:
spec_path (str): location of a file containing the custom spec proto.
proto_buffer(pb2): protocal buffer instance to be loaded.
default_spec_path(str): location of default spec to use if merge_from_default is True.
merge_from_default (bool): disable default spec, if False, spec_path must be set.
Returns:
proto_buffer(pb2): protocol buffer instance updated with spec.
"""
def _load_from_file(filename, pb2):
with open(filename, "r") as f:
merge_text_proto(f.read(), pb2)
# Setting this flag false prevents concatenating repeated-fields
if merge_from_default:
assert default_spec_path, \
"default spec path has to be defined if merge_from_default is enabled"
# Load the default spec
_load_from_file(default_spec_path, proto_buffer)
else:
assert spec_path, "spec_path has to be defined, if merge_from_default is disabled"
# Merge a custom proto on top of the default spec, if given
if spec_path:
logger.info("Merging specification from %s", spec_path)
_load_from_file(spec_path, proto_buffer)
return proto_buffer
def load_experiment_spec(spec_path=None, arch_check=None):
"""Load experiment spec from a .txt file and return an experiment_pb2.Experiment object.
Args:
spec_path (str): location of a file containing the custom experiment spec proto.
dataset_export_spec_paths (list of str): paths to the dataset export specs.
Returns:
experiment_spec: protocol buffer instance of type experiment_pb2.Experiment. with network
config always in ssd_config message.
is_dssd: build dssd network?
"""
merge_from_default = (spec_path is None)
if merge_from_default:
print("No spec file passed in. Loading default experiment spec!!!")
experiment_spec = experiment_pb2.Experiment()
file_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
default_spec_path = os.path.join(file_path, 'experiment_specs/default_spec.txt')
experiment_spec = load_proto(spec_path, experiment_spec, default_spec_path,
merge_from_default)
network_arch = experiment_spec.WhichOneof('network')
assert network_arch is not None, 'Network config missing in spec file.'
experiment_spec.ssd_config.CopyFrom(getattr(experiment_spec, network_arch))
network_arch = network_arch.split('_')[0]
assert arch_check is None or arch_check.lower() == network_arch, \
'The spec file specifies %s but you typed %s in command line.' % (network_arch, arch_check)
return experiment_spec, network_arch == 'dssd'
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/utils/spec_loader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test box utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from nvidia_tao_tf1.cv.ssd.utils import box_utils
def test_np_iou():
a = np.array([0, 0, 2, 2])
b = np.array([[0, 0, 2, 2], [0, 0, 2, 1], [1, 0, 2, 2], [9, 10, 11, 12], [1, 1, 3, 3]])
assert max(abs(box_utils.np_elem_iou(a, b) - np.array([1.0, 0.5, 0.5, 0.0, 1.0 / 7.0]))) < 1e-10
def test_np_iou_binary():
a = np.array([[0, 0, 2, 2], [0, 0, 2, 1], [1, 0, 2, 2]])
b = np.array([[0, 0, 2, 2], [0, 0, 2, 1], [1, 0, 2, 2]])
assert max(abs(box_utils.np_elem_iou(a, b) - np.array([1.0, 1.0, 1.0]))) < 1e-10
def test_tf_iou():
a = tf.constant(np.array([[0, 0, 2, 2]]), dtype=tf.float32)
b = tf.constant(np.array([[0, 0, 2, 2], [0, 0, 2, 1], [1, 0, 2, 2], [9, 10, 11, 12],
[1, 1, 3, 3]]), dtype=tf.float32)
with tf.Session() as sess:
result = sess.run(box_utils.iou(a, b))
assert np.max(abs(result - np.array([[1.0, 0.5, 0.5, 0.0, 1.0 / 7.0]]))) < 1e-5
def test_bipartite_match_row():
sim_matrix = [[0.9, 0.8, 0.3, 0.2, 0.1, 0.7, 0.5],
[0.6, 0.2, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.0, 0.0, 0.0, 0.0, 0.9, 0.7, 0.8],
[0.0, 0.0, 0.3, 0.2, 0.0, 0.0, 0.0]]
a = tf.constant(np.array(sim_matrix), dtype=tf.float32)
b = a - 1.0
with tf.Session() as sess:
result = sess.run(box_utils.bipartite_match_row(a))
assert max(abs(result - np.array([0, 1, 4, 2]))) < 1e-5
result = sess.run(box_utils.bipartite_match_row(b))
assert max(abs(result - np.array([0, 1, 4, 2]))) < 1e-5
def test_multi_match():
sim_matrix = [[0.9, 0.8, 0.3, 0.2, 0.1, 0.7, 0.5],
[0.6, 0.2, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.8, 0.8],
[0.0, 0.0, 0.4, 0.3, 0.0, 0.0, 0.0]]
a = tf.constant(np.array(sim_matrix), dtype=tf.float32)
with tf.Session() as sess:
gt, anchor = sess.run(box_utils.multi_match(a, 0.2))
assert set(zip(anchor, gt)) == set([(0, 0), (1, 0), (2, 3), (3, 3), (5, 2), (6, 2)])
def test_corners_to_centroids():
corner_box = [[1.0, 1.0, 2.0, 2.0],
[0.0, 0.0, 2.0, 3.0]]
tf_box = tf.constant(np.array(corner_box), dtype=tf.float32)
expected = [[1.5, 1.5, 1.0, 1.0],
[1.0, 1.5, 2.0, 3.0]]
with tf.Session() as sess:
result = sess.run(box_utils.corners_to_centroids(tf_box, 0))
assert np.max(abs(result - np.array(expected))) < 1e-5
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/utils/tests/test_box_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test mAP evaluator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from nvidia_tao_tf1.cv.ssd.utils import tensor_utils
def test_get_non_empty_rows_2d_sparse():
empty_tensor = tf.sparse.SparseTensor(
indices=tf.zeros(dtype=tf.int64, shape=[0, 2]),
values=[],
dense_shape=[10000, 9])
empty_results = tf.sparse.to_dense(tensor_utils.get_non_empty_rows_2d_sparse(empty_tensor))
non_empty_zero = tf.zeros(dtype=tf.int32, shape=[100, 1000])
non_empty_tensor = tf.sparse.from_dense(non_empty_zero)
non_empty_zero_results = tensor_utils.get_non_empty_rows_2d_sparse(non_empty_tensor)
non_empty_zero_results = tf.sparse.to_dense(non_empty_zero_results)
non_empty = tf.sparse.from_dense(tf.constant(np.array([[1, 0, 3], [0, 0, 0], [0, 0, 9]])))
non_empty_results = tf.sparse.to_dense(tensor_utils.get_non_empty_rows_2d_sparse(non_empty))
with tf.Session() as sess:
result = sess.run(empty_results)
assert result.shape == (0, 9)
result = sess.run(non_empty_zero_results)
assert result.shape == (0, 1000)
result = sess.run(non_empty_results)
assert np.max(abs(result - np.array([[1, 0, 3], [0, 0, 9]]))) < 1e-5
def test_tensor_slice_replace():
a = tf.constant(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]))
b = tf.constant(np.array([[-1, -2, -3], [-4, -5, -6], [-7, -8, -9]]))
a_idx = tf.constant(np.array([1, 2]), dtype=tf.int32)
b_idx = tf.constant(np.array([1, 0]), dtype=tf.int32)
with tf.Session() as sess:
result = sess.run(tensor_utils.tensor_slice_replace(a, b, a_idx, b_idx))
assert np.max(abs(result - np.array([[1, 2, 3], [-4, -5, -6], [-1, -2, -3]]))) < 1e-5
def test_tensor_strided_replace():
a = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
b = np.array([[-1, -2, -3],
[-4, -5, -6],
[-7, -8, -9]])
a = tf.constant(a)
b = tf.constant(b)
with tf.Session() as sess:
result = sess.run(tensor_utils.tensor_strided_replace(a, (1, 2), b))
expected = np.array([[1, 2, 3],
[-1, -2, -3],
[-4, -5, -6],
[-7, -8, -9],
[7, 8, 9]])
assert np.max(abs(result - expected)) < 1e-5
result = sess.run(tensor_utils.tensor_strided_replace(a, (1, 2), b, -1))
expected = np.array([[1, -1, -2, -3, 3],
[4, -4, -5, -6, 6],
[7, -7, -8, -9, 9]])
assert np.max(abs(result - expected)) < 1e-5
result = sess.run(tensor_utils.tensor_strided_replace(a, (0, 3), b))
expected = np.array([[-1, -2, -3],
[-4, -5, -6],
[-7, -8, -9]])
assert np.max(abs(result - expected)) < 1e-5
def _test_setup_keras_backend(): # comment out since this function doesn't work
tensor_utils.setup_keras_backend('float32', True)
with tf.Session() as sess:
assert sess.run(tf.keras.backend.learning_phase()) == 1
assert tf.keras.backend.floatx() == 'float32'
tensor_utils.setup_keras_backend('float16', False)
with tf.Session() as sess:
assert sess.run(tf.keras.backend.learning_phase()) == 0
assert tf.keras.backend.floatx() == 'float16'
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/utils/tests/test_tensor_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test spec loader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import pytest
from nvidia_tao_tf1.cv.ssd.utils.spec_loader import load_experiment_spec, spec_validator,\
validate_eval_spec, validate_train_spec
def test_spec_loader():
experiment_spec, is_dssd = load_experiment_spec()
assert is_dssd
assert len(experiment_spec.ssd_config.arch) > 3
assert experiment_spec.eval_config.validation_period_during_training > 0
assert experiment_spec.training_config.num_epochs > 0
assert experiment_spec.nms_config.top_k > 0
with pytest.raises(AssertionError):
experiment_spec, is_dssd = load_experiment_spec(arch_check='random')
def catch_assert_error(spec):
with pytest.raises(AssertionError):
spec_validator(spec)
def assert_check(spec):
try:
spec_validator(spec)
except AssertionError:
return False
return True
def test_spec_validator():
experiment_spec, _ = load_experiment_spec()
# ssd_config_test:
test_spec = copy.deepcopy(experiment_spec)
test_spec.ssd_config.aspect_ratios_global = "[]"
catch_assert_error(test_spec)
test_spec = copy.deepcopy(experiment_spec)
test_spec.ssd_config.aspect_ratios = "[[1.0, 2.0, 0.5], [1.0, 2.0, 0.5, 3.0, 1.0/3.0], \
[1.0, 2.0, 0.5, 3.0, 1.0/3.0], \
[1.0, 2.0, 0.5, 3.0, 1.0/3.0], \
[1.0, 2.0, 0.5], [1.0, 2.0, 0.5]]"
test_spec.ssd_config.aspect_ratios_global = ""
assert assert_check(test_spec)
test_spec = copy.deepcopy(experiment_spec)
test_spec.ssd_config.aspect_ratios = "[[-1.0, 2.0, 0.5], [1.0, 2.0, 0.5, 3.0, 1.0/3.0], \
[1.0, 2.0, 0.5, 3.0, 1.0/3.0], \
[1.0, 2.0, 0.5, 3.0, 1.0/3.0], \
[1.0, 2.0, 0.5], [1.0, 2.0, 0.5]]"
test_spec.ssd_config.aspect_ratios_global = ""
catch_assert_error(test_spec)
test_spec = copy.deepcopy(experiment_spec)
test_spec.ssd_config.variances = "[0.1, 0.1, 0.2, 0.2, 1]"
catch_assert_error(test_spec)
test_spec = copy.deepcopy(experiment_spec)
test_spec.ssd_config.arch = "renset"
catch_assert_error(test_spec)
test_spec = copy.deepcopy(experiment_spec)
test_spec.ssd_config.arch = "mobilenet_v2"
test_spec.ssd_config.nlayers = 0
assert assert_check(test_spec)
test_spec = copy.deepcopy(experiment_spec)
test_spec.ssd_config.arch = "efficientnet_b1"
test_spec.ssd_config.nlayers = 0
assert assert_check(test_spec)
test_spec = copy.deepcopy(experiment_spec)
test_spec.ssd_config.arch = ""
catch_assert_error(test_spec)
# train_config_test:
test_spec = copy.deepcopy(experiment_spec)
test_spec.training_config.num_epochs = 0
catch_assert_error(test_spec)
test_spec = copy.deepcopy(experiment_spec)
test_spec.training_config.learning_rate.soft_start_annealing_schedule.soft_start = 0
catch_assert_error(test_spec)
test_spec.training_config.early_stopping.monitor = "losss"
catch_assert_error(test_spec)
# eval_config_test:
test_spec = copy.deepcopy(experiment_spec)
test_spec.eval_config.batch_size = 0
catch_assert_error(test_spec)
test_spec = copy.deepcopy(experiment_spec)
test_spec.eval_config.matching_iou_threshold = 1.1
catch_assert_error(test_spec)
# nms_config_test:
test_spec = copy.deepcopy(experiment_spec)
test_spec.nms_config.clustering_iou_threshold = 1.1
catch_assert_error(test_spec)
test_spec = copy.deepcopy(experiment_spec)
test_spec.nms_config.top_k = 0
catch_assert_error(test_spec)
# aug_config_test:
test_spec = copy.deepcopy(experiment_spec)
test_spec.augmentation_config.output_channel = 4
catch_assert_error(test_spec)
# dataset_config_test:
test_spec = copy.deepcopy(experiment_spec)
test_spec.dataset_config.data_sources[0].label_directory_path = ""
with pytest.raises(AssertionError):
validate_train_spec(test_spec)
test_spec = copy.deepcopy(experiment_spec)
test_spec.dataset_config.validation_data_sources[0].label_directory_path = ""
with pytest.raises(AssertionError):
validate_eval_spec(test_spec)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/utils/tests/test_spec_loader.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/models/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IVA SSD model construction wrapper functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Conv2D
from nvidia_tao_tf1.cv.ssd.models.dssd_arch import attach_pred_layers, generate_dssd_layers
from nvidia_tao_tf1.cv.ssd.models.ssd_backbones import get_raw_ssd_model
def _shrink_module(fmap_outputs,
num_output_channels,
data_format,
kernel_regularizer):
'''Reduce fmaps in fmap_outputs to num_output_channels.
Args:
fmap_outputs: List of keras tensors.
num_output_channels: Output channels each output needs, length equal to fmap_outputs
data_format: data_format
kernel_regularizer: kernel_regularizer
Returns:
feature_maps: length n list of keras tensors, each of which is output tensor of SSD module
'''
feature_maps = []
for idx, fmap in enumerate(fmap_outputs):
if num_output_channels[idx] != 0:
fmap = Conv2D(num_output_channels[idx],
kernel_size=1,
strides=1,
dilation_rate=1,
padding='same',
data_format=data_format,
activation=None,
kernel_regularizer=kernel_regularizer,
use_bias=False,
name='ssd_shrink_block_%d' % (idx))(fmap)
feature_maps.append(fmap)
return feature_maps
def get_base_model(input_tensor,
arch,
nlayers,
is_dssd,
pred_num_channels,
kernel_regularizer=None,
bias_regularizer=None,
freeze_blocks=None,
freeze_bn=None):
'''Wrapper function to get base model.
Args:
input_tensor: Image tensor.
arch, nlayers: feature extractor config.
is_dssd: whether dssd arch is needed
pred_num_channels: #channels for pred module
kernel_regularizer: kernel_regularizer
bias_regularizer: bias_regularizer
freeze_blocks: set blocks to non-trainable in extractors (kept pretrained weights forever)
freeze_bn: freeze bn in pretrained feature extractors.
Returns:
pred_layers: list of pred tensors.
'''
ssd_layers = get_raw_ssd_model(input_tensor,
arch,
nlayers,
kernel_regularizer,
bias_regularizer,
freeze_blocks,
freeze_bn)
if is_dssd:
shrinkage = [0, 0, 0, 0, 0, 0]
if arch in ['mobilenet_v1', 'mobilenet_v2', 'squeezenet']:
shrinkage = [256, 0, 0, 0, 0, 0]
if arch == 'resnet' and nlayers in [10]:
shrinkage = [256, 0, 0, 0, 0, 0]
ssd_layers = _shrink_module(ssd_layers, shrinkage, 'channels_first', kernel_regularizer)
# attach dssd module
ssd_layers = generate_dssd_layers(ssd_layers,
data_format='channels_first',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer)
pred_layers = attach_pred_layers(ssd_layers,
pred_num_channels,
data_format='channels_first',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer)
return pred_layers
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/models/base_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IVA DSSD model constructor based on SSD layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Add, BatchNormalization, Conv2D, Conv2DTranspose, Multiply, \
ReLU, ZeroPadding2D
def _deconv_module(tensor_small,
tensor_large,
tensor_large_shape,
module_index=0,
data_format='channels_first',
kernel_regularizer=None,
bias_regularizer=None):
'''
Deconv module of DSSD. output is a tensor with same shape as tensor_large.
Args:
tensor_small: a keras tensor for small feature map
tensor_large: a keras tensor for immediate larger feature map in backbone
tensor_large_shape: [c, h, w] of large tensor
module_index: int representing the index of the module
data_format: data format
kernel_regularizer: keras regularizer for kernel
bias_regularizer: keras regularizer for bias
Returns:
deconv_tensor: tensor representing feature maps used for prediction
'''
bn_axis = 1 if data_format == 'channels_first' else 3
x = Conv2D(int(tensor_large_shape[0]),
kernel_size=3,
strides=1,
padding='same',
data_format=data_format,
activation=None,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
use_bias=False,
name='dssd_large_conv_0_block_'+str(module_index))(tensor_large)
x = BatchNormalization(axis=bn_axis, name='dssd_large_bn_0_block_'+str(module_index))(x)
x = ReLU(name='dssd_large_relu_0_block_'+str(module_index))(x)
x = Conv2D(int(tensor_large_shape[0]),
kernel_size=3,
strides=1,
padding='same',
data_format=data_format,
activation=None,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
use_bias=False,
name='dssd_large_conv_1_block_'+str(module_index))(x)
x = BatchNormalization(axis=bn_axis, name='dssd_large_bn_1_block_'+str(module_index))(x)
y = Conv2DTranspose(int(tensor_large_shape[0]), (2, 2), strides=(2, 2),
padding='same', output_padding=None,
data_format=data_format, dilation_rate=(1, 1),
activation=None, use_bias=False,
kernel_regularizer=kernel_regularizer,
name='dssd_small_deconv_block_'+str(module_index))(tensor_small)
if data_format == "channels_first":
h_upsampled = tensor_small._keras_shape[2]*2
w_upsampled = tensor_small._keras_shape[3]*2
elif data_format == "channels_last":
h_upsampled = tensor_small._keras_shape[1]*2
w_upsampled = tensor_small._keras_shape[2]*2
# perform a trick to match size of x and y
if h_upsampled == tensor_large_shape[1]:
# keep size unchanging
h_pad = 1
h_kernel = 3
elif h_upsampled > tensor_large_shape[1]:
# make spatial size - 1
h_pad = 0
h_kernel = 2
else:
# make spatial size + 1
h_pad = 1
h_kernel = 2
if w_upsampled == tensor_large_shape[2]:
# keep size unchanged
w_pad = 1
w_kernel = 3
elif w_upsampled > tensor_large_shape[2]:
# make spatial size - 1
w_pad = 0
w_kernel = 2
else:
# make sptial size + 1
w_pad = 1
w_kernel = 2
y = ZeroPadding2D(padding=(h_pad, w_pad), data_format=data_format,
name='dssd_pad_'+str(module_index))(y)
y = Conv2D(int(tensor_large_shape[0]),
kernel_size=(h_kernel, w_kernel),
strides=1,
padding='valid',
data_format=data_format,
activation=None,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
use_bias=False,
name='dssd_small_conv_block_'+str(module_index))(y)
y = BatchNormalization(axis=bn_axis, name='dssd_small_bn_block_'+str(module_index))(y)
# finally... We multiply the small and large fmaps
x = Multiply(name='dssd_mul_block_'+str(module_index))([x, y])
x = ReLU(name='dssd_relu_block_'+str(module_index))(x)
return x
def _pred_module(feature_map,
module_index=0,
num_channels=0,
data_format='channels_first',
kernel_regularizer=None,
bias_regularizer=None):
'''
predict module.
Args:
feature_map: keras tensor for feature maps used for prediction.
module_index: the index of module
num_channels: the number of output feature map channels, use 0 to skip pred_module
data_format: data format
kernel_regularizer: keras regularizer for kernel
bias_regularizer: keras regularizer for bias
Returns:
pred_map: a keras tensor with channel number defined by num_channels (if not zero) and
map size same as feature_map.
'''
if num_channels == 0:
return feature_map
assert num_channels in [256, 512, 1024], "num_channels only supports 0, 256, 512, 1024"
bn_axis = 1 if data_format == 'channels_first' else 3
x = Conv2D(num_channels // 4,
kernel_size=1,
strides=1,
padding='same',
data_format=data_format,
activation=None,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
use_bias=False,
name='ssd_mpred_conv_0_block_'+str(module_index))(feature_map)
x = BatchNormalization(axis=bn_axis, name='ssd_mpred_bn_0_block_'+str(module_index))(x)
x = ReLU(name='ssd_mpred_relu_0_block_'+str(module_index))(x)
x = Conv2D(num_channels // 4,
kernel_size=1,
strides=1,
padding='same',
data_format=data_format,
activation=None,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
use_bias=False,
name='ssd_mpred_conv_1_block_'+str(module_index))(x)
x = BatchNormalization(axis=bn_axis, name='ssd_mpred_bn_1_block_'+str(module_index))(x)
x = ReLU(name='ssd_mpred_relu_1_block_'+str(module_index))(x)
x = Conv2D(num_channels,
kernel_size=1,
strides=1,
padding='same',
data_format=data_format,
activation=None,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
use_bias=False,
name='ssd_mpred_conv_2_block_'+str(module_index))(x)
x = BatchNormalization(axis=bn_axis, name='ssd_mpred_bn_2_block_'+str(module_index))(x)
y = Conv2D(num_channels,
kernel_size=1,
strides=1,
padding='same',
data_format=data_format,
activation=None,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
use_bias=False,
name='ssd_mpred_conv_3_block_'+str(module_index))(feature_map)
y = BatchNormalization(axis=bn_axis, name='ssd_mpred_bn_3_block_'+str(module_index))(y)
x = Add(name='ssd_mpred_add_block_'+str(module_index))([x, y])
x = ReLU(name='ssd_mpred_relu_3_block_'+str(module_index))(x)
return x
def generate_dssd_layers(ssd_layers,
data_format='channels_first',
kernel_regularizer=None,
bias_regularizer=None):
'''
Get DSSD layers from SSD layers.
Args:
ssd_layers: SSD layers from SSD feature maps.
data_format: data format
kernel_regularizer: keras regularizer for kernel
bias_regularizer: keras regularizer for bias
Returns:
dssd_layers: DSSD layers each of which has same shape with that in ssd_layers.
'''
# NCHW or NHWC
l_vals = list(zip(*[l.shape[1:] for l in ssd_layers]))
if data_format == 'channels_first':
l_c, l_h, l_w = l_vals
else:
l_h, l_w, l_c = l_vals
results = [ssd_layers[-1]]
for idx, i in enumerate(reversed(range(len(ssd_layers)-1))):
dssd_layer = _deconv_module(results[-1],
ssd_layers[i],
[l_c[i], l_h[i], l_w[i]], # [c, h, w]
module_index=idx,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer)
results.append(dssd_layer)
# return large fmap first.
return results[::-1]
def attach_pred_layers(dssd_layers,
num_channels,
data_format='channels_first',
kernel_regularizer=None,
bias_regularizer=None):
'''
Get pred module attached feature map.
Args:
dssd_layers: keras tensor for feature maps right before prediction module
num_channels: the number of output feature map channels, use 0 to skip pred_module
data_format: data format
kernel_regularizer: keras regularizer for kernel
bias_regularizer: keras regularizer for bias
Returns:
pred_map: a keras tensor with channel number defined by num_channels (if not zero) and
map size same as feature_map.
'''
results = []
for idx, l in enumerate(dssd_layers):
pred_layer = _pred_module(l, idx, num_channels, data_format, kernel_regularizer,
bias_regularizer)
results.append(pred_layer)
return results
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/models/dssd_arch.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""Patch keras's conv2d and pool2d for handling floor mode in downsampling with kernel_size=2."""
import keras
def conv_output_length(input_length, filter_size,
padding, stride, dilation=1):
"""Determines output length of a convolution given input length.
# Arguments
input_length: integer.
filter_size: integer.
padding: one of `"same"`, `"valid"`, `"full"`.
stride: integer.
dilation: dilation rate, integer.
# Returns
The output length (integer).
"""
if input_length is None:
return None
assert padding in {'same', 'valid', 'full', 'causal'}
dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)
if padding == 'same':
_padding = (filter_size - 1) // 2
return (input_length + 2 * _padding - dilated_filter_size) // stride + 1
if padding == 'valid':
output_length = input_length - dilated_filter_size + 1
elif padding == 'causal':
output_length = input_length
elif padding == 'full':
output_length = input_length + dilated_filter_size - 1
return (output_length + stride - 1) // stride
def conv_compute_output_shape(self, input_shape):
"""Compute the output dimension of a convolution."""
if self.data_format == 'channels_last':
space = input_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return (input_shape[0],) + tuple(new_space) + (self.filters,)
if self.data_format == 'channels_first':
space = input_shape[2:]
new_space = []
for i in range(len(space)):
new_dim = conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return (input_shape[0], self.filters) + tuple(new_space)
return None
def pool_compute_output_shape(self, input_shape):
"""Compute the output dimension of a pooling."""
if self.data_format == 'channels_first':
rows = input_shape[2]
cols = input_shape[3]
elif self.data_format == 'channels_last':
rows = input_shape[1]
cols = input_shape[2]
rows = conv_output_length(rows, self.pool_size[0],
self.padding, self.strides[0])
cols = conv_output_length(cols, self.pool_size[1],
self.padding, self.strides[1])
if self.data_format == 'channels_first':
return (input_shape[0], input_shape[1], rows, cols)
if self.data_format == 'channels_last':
return (input_shape[0], rows, cols, input_shape[3])
return None
def patch():
"""Apply the patches to the module."""
keras.layers.MaxPooling2D.compute_output_shape = pool_compute_output_shape
keras.layers.Conv2D.compute_output_shape = conv_compute_output_shape
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/models/patch_keras.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IVA SSD model construction wrapper functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import BatchNormalization, Conv2D, ReLU
from nvidia_tao_tf1.cv.common.models.backbones import get_backbone
def _expansion_module(input_tensor,
expansion_config,
data_format,
kernel_regularizer,
bias_regularizer):
'''Add expansion layers to input_tensor.
Args:
input_tensor: keras tensor to expand on
expansion_config: list of length n (n is the final feature maps needed). Each element is a
sublist for convs inside. Each element in this sublist is 5-element tuple (or list) for
(num_filters, kernel_size, strides, dilation, need_bn). The paper VGG16 SSD300 expansion
can be generated using `expansion_config = [[(1024, 3, 1, 6, False),
(1024, 1, 1, 1, False)], [(256, 1, 1, 1, False), (512, 3, 2, 1, False)],
[(128, 1, 1, 1, False), (256, 3, 2, 1, False)],
[(128, 1, 1, 1, False), (256, 3, 1, 1, False)],
[(128, 1, 1, 1, False), (256, 3, 1, 1, False)]]`
data_format: data_format
kernel_regularizer: kernel_regularizer
bias_regularizer: bias_regularizer
Returns:
feature_maps: length n list of keras tensors, each of which is tensor right after the last
conv layers in sublist of expansion_config.
'''
x = input_tensor
feature_maps = []
bn_axis = 1 if data_format == 'channels_first' else 3
for b_id, block in enumerate(expansion_config):
for c_id, conv in enumerate(block):
n, k, s, d, bn = conv
x = Conv2D(n,
kernel_size=k,
strides=s,
dilation_rate=d,
padding='same',
data_format=data_format,
activation=None,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
use_bias=(not bn),
name='ssd_expand_block_%d_conv_%d' % (b_id, c_id))(x)
if bn:
x = BatchNormalization(axis=bn_axis,
name='ssd_expand_block_%d_bn_%d' % (b_id, c_id))(x)
x = ReLU(name='ssd_expand_block_%d_relu_%d' % (b_id, c_id))(x)
feature_maps.append(x)
return feature_maps
def get_raw_ssd_model(input_tensor,
arch,
nlayers,
kernel_regularizer=None,
bias_regularizer=None,
freeze_blocks=None,
freeze_bn=None):
'''Return feature maps same as original SSD paper.
Args:
input_tensor: image tensor
arch: feature extractor arch
nlayers: arch layers
kernel_regularizer: kernel_regularizer
bias_regularizer: bias_regularizer
freeze_blocks: freeze_blocks
freeze_bn: freeze_bn
Returns:
layers: (default 6) keras tensors for feature maps.
'''
base_model = get_backbone(input_tensor,
arch,
data_format='channels_first',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
freeze_blocks=freeze_blocks,
freeze_bn=freeze_bn,
nlayers=nlayers,
use_batch_norm=True,
use_pooling=False,
use_bias=False,
all_projections=True,
dropout=1e-3,
force_relu=True)
large_net_expansion = [[(512, 1, 1, 1, False), (512, 3, 1, 1, True)],
[(256, 1, 1, 1, False), (512, 3, 2, 1, True)],
[(128, 1, 1, 1, False), (256, 3, 2, 1, True)],
[(128, 1, 1, 1, False), (256, 3, 2, 1, True)],
[(128, 1, 1, 1, False), (256, 3, 2, 1, True)]]
mid_net_expansion = [[(256, 1, 1, 1, False), (256, 3, 1, 1, True)],
[(128, 1, 1, 1, False), (256, 3, 2, 1, True)],
[(64, 1, 1, 1, False), (128, 3, 2, 1, True)],
[(64, 1, 1, 1, False), (128, 3, 2, 1, True)],
[(64, 1, 1, 1, False), (128, 3, 2, 1, True)]]
sm_net_expansion = [[],
[(64, 1, 1, 1, False), (128, 3, 2, 1, True)],
[(64, 1, 1, 1, False), (128, 3, 2, 1, True)],
[(64, 1, 1, 1, False), (128, 3, 2, 1, True)],
[(64, 1, 1, 1, False), (128, 3, 2, 1, True)]]
if arch == 'resnet':
exp_config = large_net_expansion
if nlayers == 10:
fmaps = ['block_2a_relu']
exp_config = sm_net_expansion
elif nlayers == 18:
fmaps = ['block_2b_relu']
exp_config = mid_net_expansion
elif nlayers == 34:
fmaps = ['block_2d_relu']
exp_config = mid_net_expansion
elif nlayers == 50:
fmaps = ['block_2d_relu']
elif nlayers == 101:
fmaps = ['block_2d_relu']
else:
raise ValueError("ResNet-{} architecture is currently not implemented\n"
"Please choose out of the following:\n{}.".
format(nlayers, '10, 18, 34, 50, 101'))
elif arch == 'vgg':
exp_config = large_net_expansion
if nlayers == 16:
fmaps = ['block_4c_relu']
elif nlayers == 19:
fmaps = ['block_4d_relu']
else:
raise ValueError("ResNet-{} architecture is currently not implemented\n"
"Please choose out of the following:\n{}.".
format(nlayers, '16, 19'))
elif arch == 'darknet':
if nlayers == 19:
exp_config = mid_net_expansion
fmaps = ['b3_conv3_lrelu', 'b4_conv5_lrelu']
elif nlayers == 53:
exp_config = large_net_expansion
fmaps = ['b3_add8', 'b4_add8']
else:
raise ValueError("DarkNet-{} architecture is currently not implemented\n"
"Please choose out of the following:\n{}.".
format(nlayers, '19, 53'))
exp_config = exp_config[:-1]
elif arch == 'mobilenet_v1':
fmaps = ['conv_pw_relu_5']
exp_config = sm_net_expansion
elif arch == 'mobilenet_v2':
fmaps = ['re_lu_7']
exp_config = sm_net_expansion
elif arch == 'squeezenet':
fmaps = ['fire8']
exp_config = sm_net_expansion
elif arch == 'googlenet':
fmaps = ['inception_3b_output']
exp_config = large_net_expansion
elif arch == 'efficientnet_b0':
fmaps = ['block4a_expand_activation', 'block6a_expand_activation']
exp_config = large_net_expansion
exp_config = exp_config[:-1]
else:
raise ValueError("{} architecture is currently not implemented\n".
format(arch))
exp_fmaps = _expansion_module(base_model.layers[-1].output,
exp_config,
data_format='channels_first',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer)
return [base_model.get_layer(l).output for l in fmaps] + exp_fmaps
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/models/ssd_backbones.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test dssd arch."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import keras
import numpy as np
import pytest
import tensorflow as tf
from nvidia_tao_tf1.cv.ssd.models.dssd_arch import attach_pred_layers, generate_dssd_layers
from nvidia_tao_tf1.cv.ssd.models.patch_keras import patch as ssd_keras_patch
# Patching SSD Keras.
ssd_keras_patch()
def build_dummy_model(tensor_shape):
data_format = 'channels_first' if tensor_shape[0] == 3 else 'channels_last'
layers = [keras.layers.Input(shape=tensor_shape)]
for _ in range(5):
new_layer = keras.layers.Conv2D(512,
kernel_size=3,
strides=2,
padding='same',
data_format=data_format,
use_bias=False)(layers[-1])
layers.append(new_layer)
return layers, data_format
def test_dssd_layers_shape():
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 0.9
sess = tf.Session(config=config)
keras.backend.set_session(sess)
test_input_shape = [(3, 512, 512), (512, 51, 3), (1, 3, 3), (5, 3, 3), (64, 51, 3), (3, 11, 64)]
for idx, input_shape in enumerate(test_input_shape):
layers, data_format = build_dummy_model(input_shape)
new_layers = generate_dssd_layers(layers, data_format)
model = keras.models.Model(inputs=layers[0], outputs=new_layers)
pred = model.predict(np.random.rand(3, *input_shape))
for i in range(6):
assert pred[i][0].shape == layers[i].shape[1:]
if idx < 3:
continue
# test pred module
no_pred_layers = attach_pred_layers(new_layers, 0, data_format)
no_pred_model = keras.models.Model(inputs=layers[0], outputs=no_pred_layers)
# same model
assert no_pred_model.count_params() == model.count_params()
pred = no_pred_model.predict(np.random.rand(3, *input_shape))
for i in range(6):
assert pred[i][0].shape == layers[i].shape[1:]
for pred_channel in [1, 8, 9999]:
with pytest.raises(AssertionError):
attach_pred_layers(new_layers, pred_channel, data_format)
for pred_channel in [256, 512, 1024]:
pred_layers = attach_pred_layers(new_layers, pred_channel, data_format)
pred_model = keras.models.Model(inputs=layers[0], outputs=pred_layers)
pred = pred_model.predict(np.random.rand(3, *input_shape))
for i in range(6):
if data_format == 'channels_first':
assert pred[i][0].shape[1:] == layers[i].shape[2:]
assert pred[i][0].shape[0] == pred_channel
else:
assert pred[i][0].shape[:-1] == layers[i].shape[1:-1]
assert pred[i][0].shape[-1] == pred_channel
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/models/tests/test_dssd_arch.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test wrapper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import keras
import numpy as np
import tensorflow as tf
from nvidia_tao_tf1.cv.ssd.models.base_model import get_base_model
from nvidia_tao_tf1.cv.ssd.models.patch_keras import patch as ssd_keras_patch
# Patching SSD Keras.
ssd_keras_patch()
def test_base_model():
'''
Test wrapper.
No need for exaustive test as it's done in test_ssd_backbones.
'''
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 0.9
sess = tf.Session(config=config)
keras.backend.set_session(sess)
arch_configs = [[(3, 33, 128), 'resnet', 18, True, 256],
[(3, 33, 128), 'resnet', 18, False, 0],
[(3, 33, 128), 'darknet', 53, True, 1024],
[(3, 33, 128), 'resnet', 10, True, 256],
[(3, 33, 128), 'mobilenet_v2', 18, True, 256],
[(3, 33, 128), 'resnet', 50, True, 256],
[(3, 33, 128), 'resnet', 101, True, 1024]]
for arch_config in arch_configs:
input_tensor = keras.layers.Input(shape=arch_config[0])
ssd_dssd = get_base_model(input_tensor, *arch_config[1:])
pred_model = keras.models.Model(inputs=input_tensor, outputs=ssd_dssd)
pred_model.predict(np.random.rand(1, *arch_config[0]))
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/models/tests/test_base_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test ssd base."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import keras
import numpy as np
import tensorflow as tf
from nvidia_tao_tf1.cv.ssd.models.patch_keras import patch as ssd_keras_patch
from nvidia_tao_tf1.cv.ssd.models.ssd_backbones import get_raw_ssd_model
# Patching SSD Keras.
ssd_keras_patch()
def test_ssd_arch():
'''To make sure all the SSD model works.'''
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 0.9
sess = tf.Session(config=config)
keras.backend.set_session(sess)
input_shape = (3, 33, 32)
archs = [('resnet', x) for x in [10, 18, 34, 50, 101]] + [('darknet', 19), ('darknet', 53)] + \
[('vgg', 19), ('vgg', 16)] + [('squeezenet', 0), ('googlenet', 0)] + \
[('mobilenet_v1', 0), ('mobilenet_v2', 0)]
input_tensor = keras.layers.Input(shape=input_shape)
for arch, nlayers in archs:
out_layers = get_raw_ssd_model(input_tensor, arch, nlayers)
pred_model = keras.models.Model(inputs=input_tensor, outputs=out_layers)
pred_model.predict(np.random.rand(1, *input_shape))
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/models/tests/test_ssd_backbones.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SSD Loss for training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class SSDLoss:
'''The SSD loss, see https://arxiv.org/abs/1512.02325.'''
def __init__(self,
neg_pos_ratio=3,
n_neg_min=0,
alpha=1.0):
'''
Initialization of SSD Loss.
Arguments:
neg_pos_ratio (int, optional): The maximum ratio of negative (i.e. background)
to positive ground truth boxes to include in the loss computation.
There are no actual background ground truth boxes of course, but `y_true`
contains anchor boxes labeled with the background class. Since
the number of background boxes in `y_true` will usually exceed
the number of positive boxes by far, it is necessary to balance
their influence on the loss. Defaults to 3 following the paper.
n_neg_min (int, optional): The minimum number of negative ground truth boxes to
enter the loss computation *per batch*. This argument can be used to make
sure that the model learns from a minimum number of negatives in batches
in which there are very few, or even none at all, positive ground truth
boxes. It defaults to 0 and if used, it should be set to a value that
stands in reasonable proportion to the batch size used for training.
alpha (float, optional): A factor to weight the localization loss in the
computation of the total loss. Defaults to 1.0 following the paper.
'''
self.neg_pos_ratio = neg_pos_ratio
self.n_neg_min = n_neg_min
self.alpha = alpha
def smooth_L1_loss(self, y_true, y_pred):
'''
Compute smooth L1 loss, see references.
Arguments:
y_true (nD tensor): A TensorFlow tensor of any shape containing the ground truth data.
In this context, the expected tensor has shape `(batch_size, #boxes, 4)` and
contains the ground truth bounding box coordinates, where the last dimension
contains `(xmin, xmax, ymin, ymax)`.
y_pred (nD tensor): A TensorFlow tensor of identical structure to `y_true` containing
the predicted data, in this context the predicted bounding box coordinates.
Returns:
The smooth L1 loss, a nD-1 Tensorflow tensor. In this context a 2D tensor
of shape (batch, n_boxes_total).
References:
https://arxiv.org/abs/1504.08083
'''
absolute_loss = tf.abs(y_true - y_pred)
square_loss = 0.5 * (y_true - y_pred)**2
l1_loss = tf.where(tf.less(absolute_loss, 1.0),
square_loss, absolute_loss - 0.5)
return tf.reduce_sum(l1_loss, axis=-1)
def log_loss(self, y_true, y_pred):
'''
Compute the softmax log loss.
Arguments:
y_true (nD tensor): A TensorFlow tensor of any shape containing the ground truth data.
In this context, the expected tensor has shape (batch_size, #boxes, #classes)
and contains the ground truth bounding box categories.
y_pred (nD tensor): A TensorFlow tensor of identical structure to `y_true` containing
the predicted data, in this context the predicted bounding box categories.
Returns:
The softmax log loss, a nD-1 Tensorflow tensor. In this context a 2D tensor
of shape (batch, n_boxes_total).
'''
# Make sure that `y_pred` doesn't contain any zeros (which would break the log function)
y_pred = tf.maximum(y_pred, 1e-15)
# Compute the log loss
log_loss = -tf.reduce_sum(y_true * tf.log(y_pred), axis=-1)
return log_loss
def compute_loss(self, y_true, y_pred):
'''
Compute the loss of the SSD model prediction against the ground truth.
Arguments:
y_true (array): A Numpy array of shape `(batch_size, #boxes, #classes + 12)`,
where `#boxes` is the total number of boxes that the model predicts
per image. Be careful to make sure that the index of each given
box in `y_true` is the same as the index for the corresponding
box in `y_pred`. The last axis must have length `#classes + 12` and contain
`[classes one-hot encoded, 4 ground truth box coordinate offsets,
8 arbitrary entries]`
in this order, including the background class. The last eight entries of the
last axis are not used by this function and therefore their contents are
irrelevant, they only exist so that `y_true` has the same shape as `y_pred`,
where the last four entries of the last axis contain the anchor box
coordinates, which are needed during inference. Important: Boxes that
you want the cost function to ignore need to have a one-hot
class vector of all zeros.
y_pred (Keras tensor): The model prediction. The shape is identical
to that of `y_true`,
i.e. `(batch_size, #boxes, #classes + 12)`.
The last axis must contain entries in the format
`[classes one-hot encoded, 4 predicted box coordinate offsets,
8 arbitrary entries]`.
Returns:
A scalar, the total multitask loss for classification and localization.
'''
self.neg_pos_ratio = tf.constant(self.neg_pos_ratio)
self.n_neg_min = tf.constant(self.n_neg_min)
self.alpha = tf.constant(self.alpha)
batch_size = tf.shape(y_pred)[0] # Output dtype: tf.int32
# Output dtype: tf.int32, note that `n_boxes` in this context denotes
# the total number of boxes per image, not the number of boxes per cell.
n_boxes = tf.shape(y_pred)[1]
# 1: Compute the losses for class and box predictions for every box.
# Output shape: (batch_size, n_boxes)
classification_loss = tf.to_float(self.log_loss(y_true[:, :, :-12],
y_pred[:, :, :-12]))
# Output shape: (batch_size, n_boxes)
localization_loss = tf.to_float(self.smooth_L1_loss(y_true[:, :, -12:-8],
y_pred[:, :, -12:-8]))
# 2: Compute the classification losses for the positive and negative targets.
# Create masks for the positive and negative ground truth classes.
# Tensor of shape (batch_size, n_boxes)
negatives = y_true[:, :, 0]
# Tensor of shape (batch_size, n_boxes)
positives = tf.to_float(tf.reduce_max(y_true[:, :, 1:-12], axis=-1))
# Count the number of positive boxes (classes 1 to n) in y_true across the whole batch.
n_positive = tf.reduce_sum(positives)
# Now mask all negative boxes and sum up the losses for the positive boxes PER batch item
# (Keras loss functions must output one scalar loss value PER batch item, rather than just
# one scalar for the entire batch, that's why we're not summing across all axes).
# Tensor of shape (batch_size,)
pos_class_loss = tf.reduce_sum(
classification_loss * positives, axis=-1)
# Compute the classification loss for the negative default boxes (if there are any).
# First, compute the classification loss for all negative boxes.
# Tensor of shape (batch_size, n_boxes)
neg_class_loss_all = classification_loss * negatives
n_neg_losses = tf.count_nonzero(neg_class_loss_all, dtype=tf.int32)
n_negative_keep = tf.minimum(tf.maximum(self.neg_pos_ratio * tf.to_int32(n_positive),
self.n_neg_min), n_neg_losses)
def f1():
return tf.zeros([batch_size])
# Otherwise compute the negative loss.
def f2():
# Tensor of shape (batch_size * n_boxes,)
neg_class_loss_all_1D = tf.reshape(neg_class_loss_all, [-1])
_, indices = tf.nn.top_k(neg_class_loss_all_1D,
k=n_negative_keep,
sorted=False)
# ...and with these indices we'll create a mask...
negatives_keep = tf.scatter_nd(indices=tf.expand_dims(indices, axis=1),
updates=tf.ones_like(
indices, dtype=tf.int32),
shape=tf.shape(neg_class_loss_all_1D))
negatives_keep = tf.to_float(tf.reshape(
negatives_keep, [batch_size, n_boxes]))
neg_class_loss = tf.reduce_sum(
classification_loss * negatives_keep, axis=-1)
return neg_class_loss
neg_class_loss = tf.cond(
tf.equal(n_neg_losses, tf.constant(0)), f1, f2)
# Tensor of shape (batch_size,)
class_loss = pos_class_loss + neg_class_loss
loc_loss = tf.reduce_sum(localization_loss * positives, axis=-1)
# 4: Compute the total loss.
total_loss = (class_loss + self.alpha * loc_loss) / \
tf.maximum(1.0, n_positive)
total_loss = total_loss * tf.to_float(batch_size)
return total_loss
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/architecture/ssd_loss.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/architecture/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IVA SSD base architecture."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Concatenate, Conv2D, Input, Permute, Reshape, Softmax
from keras.models import Model
import numpy as np
from nvidia_tao_tf1.core.models.quantize_keras_model import create_quantized_keras_model
from nvidia_tao_tf1.cv.ssd.layers.anchor_box_layer import AnchorBoxes
from nvidia_tao_tf1.cv.ssd.models.base_model import get_base_model
def ssd(image_size,
n_classes,
is_dssd,
kernel_regularizer=None,
freeze_blocks=None,
freeze_bn=None,
min_scale=None,
max_scale=None,
scales=None,
aspect_ratios_global=None,
aspect_ratios_per_layer=None,
two_boxes_for_ar1=False,
steps=None,
offsets=None,
clip_boxes=False,
variances=None,
confidence_thresh=0.01,
iou_threshold=0.45,
top_k=200,
nms_max_output_size=400,
arch="resnet",
nlayers=10,
pred_num_channels=0,
input_tensor=None,
qat=True):
'''
Build a Keras model with SSD300 architecture, see references.
The base network is a reduced atrous VGG-16, extended by the SSD architecture,
as described in the paper.
Most of the arguments that this function takes are only needed for the anchor
box layers. In case you're training the network, the parameters passed here must
be the same as the ones used to set up `SSDBoxEncoder`. In case you're loading
trained weights, the parameters passed here must be the same as the ones used
to produce the trained weights.
Some of these arguments are explained in more detail in the documentation of the
`SSDBoxEncoder` class.
Note: Requires Keras v2.0 or later. Currently works only with the
TensorFlow backend (v1.0 or later).
Arguments:
image_size (tuple): The input image size in the format `(channels, height, width)`.
n_classes (int): The number of positive classes plus background class,
e.g. 21 for Pascal VOC, 81 for MS COCO.
is_dssd (bool): Is it a DSSD model or SSD model.
arch (string): Network architecture
nlayers (int): Property of the arch
pred_num_channels (int): number of channels for convs inside pred module.
kernel_regularizer (float, optional): Applies to all convolutional layers.
freeze_blocks (list, optional): The indices of the freezed subblocks
freeze_bn (boolean, optional): Whether to freeze the BN layers
min_scale (float, optional): The smallest scaling factor for the size of the anchor
boxes as a fraction of the shorter side of the input images.
max_scale (float, optional): The largest scaling factor for the size of the anchor boxes
as a fraction of the shorter side of the input images. All scaling factors between
the smallest and the largest will be linearly interpolated. Note that the second to
last of the linearly interpolated scaling factors will actually be the scaling factor
for the last predictor layer, while the last scaling factor is used for the second box
for aspect ratio 1 in the last predictor layer if `two_boxes_for_ar1` is `True`.
scales (list, optional): A list of floats containing scaling factors per convolutional
predictor layer. This list must be one element longer than the number of predictor
layers. The first `k` elements are the scaling factors for the `k` predictor layers,
while the last element is used for the second box for aspect ratio 1 in the last
predictor layer if `two_boxes_for_ar1` is `True`. This additional last scaling factor
must be passed either way, even if it is not being used. If a list is passed, this
argument overrides `min_scale` and `max_scale`. All scaling factors must be greater
than zero.
aspect_ratios_global (list, optional): The list of aspect ratios for which anchor boxes are
to be generated. This list is valid for all prediction layers.
aspect_ratios_per_layer (list, optional): A list containing one aspect ratio list for each
prediction layer. This allows you to set the aspect ratios for each predictor layer
individually, which is the case for the original SSD300 implementation. If a list is
passed, it overrides `aspect_ratios_global`.
two_boxes_for_ar1 (bool, optional): Only relevant for aspect ratio lists that contain 1.
Will be ignored otherwise. If `True`, two anchor boxes will be generated for aspect
ratio 1. The first will be generated using the scaling factor for the respective layer,
the second one will be generated using geometric mean of said scaling factor and next
bigger scaling factor.
steps (list, optional): `None` or a list with as many elements as there are pred layers.
The elements can be either ints/floats or tuples of two ints/floats. These numbers
represent for each predictor layer how many pixels apart the anchor box center points
should be vertically and horizontally along the spatial grid over the image. If the
list contains ints/floats, then that value will be used for both spatial dimensions.
If the list contains tuples of two ints/floats, then they represent
`(step_height, step_width)`. If no steps are provided, then they will be computed such
that the anchor box center points will form an equidistant grid within the image
dimensions.
offsets (list, optional): `None` or a list with as many elements as there are predictor
layers. The elements can be either floats or tuples of two floats. These numbers
represent for each predictor layer how many pixels from the top and left boarders of
the image the top-most and left-most anchor box center points should be as a fraction of
`steps`. The last bit is important: The offsets are not absolute pixel values, but
fractions of the step size specified in the `steps` argument. If the list contains
floats, then that value will be used for both spatial dimensions. If the list contains
tuples of two floats, then they represent `(vertical_offset, horizontal_offset)`. If no
offsets are provided, then they will default to 0.5 of the step size.
clip_boxes (bool, optional): If `True`, clips the anchor box coordinates to stay within
image boundaries.
variances (list, optional): A list of 4 floats >0. The anchor box offset for each coordinate
will be divided by its respective variance value.
divide_by_stddev (array-like, optional): `None` or an array-like object of non-zero integers
or floating point values of any shape that is broadcast-compatible with the image shape.
The image pixel intensity values will be divided by the elements of this array. For
example, pass a list of three integers to perform per-channel standard deviation
normalization for color images.
swap_channels (list, optional): Either `False` or a list of integers representing the
desired order in which the input image channels should be swapped.
confidence_thresh (float, optional): A float in [0,1), the minimum classification confidence
in a specific positive class in order to be considered for the non-maximum suppression
stage for the respective class. A lower value will result in a larger part of the
selection process being done by the non-maximum suppression stage, while a larger value
will result in a larger part of the selection process happening in the confidence
thresholding stage.
iou_threshold (float, optional): A float in [0,1]. All boxes that have a Jaccard similarity
of greater than `iou_threshold` with a locally maximal box will be removed from the set
of predictions for a given class, where 'maximal' refers to the box's confidence score.
top_k (int, optional): The number of highest scoring predictions to be kept for each batch
item after the non-maximum suppression stage.
nms_max_output_size (int, optional): The maximal number of predictions that will be left
over after the NMS stage.
return_predictor_sizes (bool, optional): If `True`, this function not only returns the
model, but also a list containing the spatial dimensions of the predictor layers. This
isn't strictly necessary since you can always get their sizes easily via the Keras API,
but it's convenient and less error-prone to get them this way. They are only relevant
for training anyway (SSDBoxEncoder needs to know the spatial dimensions of the predictor
layers), for inference you don't need them.
qat (bool): If `True`, build an quantization aware model.
Returns:
model: The Keras SSD300 model.
predictor_sizes (optional): A Numpy array containing the `(height, width)` portion
of the output tensor shape for each convolutional predictor layer. During
training, the generator function needs this in order to transform
the ground truth labels into tensors of identical structure as the
output tensors of the model, which is in turn needed for the cost
function.
References:
https://arxiv.org/abs/1512.02325v5
'''
# n_predictor conv layers in the network is 6 for the original SSD300.
n_predictor_layers = 6
img_channels, img_height, img_width = image_size[0], image_size[1], image_size[2]
############################################################################
# Get a few exceptions out of the way.
############################################################################
if aspect_ratios_global is None and aspect_ratios_per_layer is None:
raise ValueError("`aspect_ratios_global` and `aspect_ratios_per_layer` cannot both be None.\
At least one needs to be specified.")
if aspect_ratios_per_layer:
if len(aspect_ratios_per_layer) != n_predictor_layers:
raise ValueError("It must be either aspect_ratios_per_layer is None or \
len(aspect_ratios_per_layer) == {}, but len(aspect_ratios_per_layer) == {}."
.format(n_predictor_layers, len(aspect_ratios_per_layer)))
if (min_scale is None or max_scale is None) and scales is None:
raise ValueError(
"Either `min_scale` and `max_scale` or `scales` need to be specified.")
if scales:
if len(scales) != n_predictor_layers+1:
raise ValueError("It must be either scales is None or len(scales) == {}, but \
len(scales) == {}.".format(n_predictor_layers+1, len(scales)))
else: # If no explicit list of scaling factors was passed, compute the list of scaling factors
scales = np.linspace(min_scale, max_scale, n_predictor_layers+1)
if len(variances) != 4:
raise ValueError("4 variance values must be pased, but {} values were received."
.format(len(variances)))
variances = np.array(variances)
if np.any(variances <= 0):
raise ValueError("All variances must be >0, but the variances given are {}"
.format(variances))
if (not (steps is None)) and (len(steps) != n_predictor_layers):
raise ValueError(
"You must provide at least one step value per predictor layer.")
if (not (offsets is None)) and (len(offsets) != n_predictor_layers):
raise ValueError(
"You must provide at least one offset value per predictor layer.")
############################################################################
# Compute the anchor box parameters.
############################################################################
# Set the aspect ratios for each predictor layer. These are needed for the anchor box layers.
if aspect_ratios_per_layer:
aspect_ratios = aspect_ratios_per_layer
else:
aspect_ratios = [aspect_ratios_global] * n_predictor_layers
# Compute the number of boxes to be predicted per cell for each predictor layer.
# We need this so that we know how many channels the predictor layers need to have.
if aspect_ratios_per_layer:
n_boxes = []
for ar in aspect_ratios_per_layer:
if (1 in ar) & two_boxes_for_ar1:
# +1 for the second box for aspect ratio 1
n_boxes.append(len(ar) + 1)
else:
n_boxes.append(len(ar))
else:
# If only a global aspect ratio list was passed, then the number of boxes is the same
# for each predictor layer
if (1 in aspect_ratios_global) & two_boxes_for_ar1:
n_boxes = len(aspect_ratios_global) + 1
else:
n_boxes = len(aspect_ratios_global)
n_boxes = [n_boxes] * n_predictor_layers
if steps is None:
steps = [None] * n_predictor_layers
if offsets is None:
offsets = [None] * n_predictor_layers
############################################################################
# Build the network.
############################################################################
if input_tensor is None:
x = Input(shape=(img_channels, img_height, img_width), name="Input")
else:
x = Input(tensor=input_tensor, name="Input")
feature_map_list = get_base_model(x, arch, nlayers, is_dssd, pred_num_channels,
kernel_regularizer=kernel_regularizer,
freeze_blocks=freeze_blocks,
freeze_bn=freeze_bn)
if len(feature_map_list) != 6:
raise ValueError('Need 6 feature maps from base model')
conf_list = []
loc_list = []
anchor_list = []
for idx, feature_map in enumerate(feature_map_list):
conf = Conv2D(n_boxes[idx] * n_classes, (3, 3),
padding='same',
kernel_initializer='he_normal',
kernel_regularizer=kernel_regularizer,
name='ssd_conf_'+str(idx))(feature_map)
loc = Conv2D(n_boxes[idx] * 4, (3, 3),
padding='same',
kernel_initializer='he_normal',
kernel_regularizer=kernel_regularizer,
name='ssd_loc_'+str(idx))(feature_map)
conf_list.append(conf)
loc_list.append(loc)
if qat:
raw_model = Model(inputs=x, outputs=conf_list+loc_list)
qat_model = create_quantized_keras_model(raw_model)
conf_list = []
loc_list = []
for idx in range(len(feature_map_list)):
conf_list.append(qat_model.get_layer('ssd_conf_'+str(idx)).output)
loc_list.append(qat_model.get_layer('ssd_loc_'+str(idx)).output)
for idx, loc in enumerate(loc_list):
anchor = AnchorBoxes(img_height, img_width,
this_scale=scales[idx],
next_scale=scales[idx+1],
aspect_ratios=aspect_ratios[idx],
two_boxes_for_ar1=two_boxes_for_ar1,
this_steps=steps[idx],
this_offsets=offsets[idx],
clip_boxes=clip_boxes,
variances=variances,
name='ssd_anchor_'+str(idx))(loc)
anchor_list.append(anchor)
conf_list = [Reshape((-1, 1, n_classes),
name='conf_reshape_'+str(idx)
)(Permute((2, 3, 1))(x)) for idx, x in enumerate(conf_list)]
loc_list = [Reshape((-1, 1, 4),
name='loc_reshape_'+str(idx)
)(Permute((2, 3, 1))(x)) for idx, x in enumerate(loc_list)]
anchor_list = [Reshape((-1, 1, 8),
name='anchor_reshape_'+str(idx)
)(x) for idx, x in enumerate(anchor_list)]
# Concatenate the predictions from the different layers
# Axis0 (batch) and axis2 (n_classes or 4, respectively) are identical for all layer predictions
# so we want to concatenate along axis 1, the number of boxes per layer
# Output shape of `mbox_conf`: (batch, n_boxes_total, n_classes)
mbox_conf = Concatenate(axis=1, name='mbox_conf')(conf_list)
# Output shape of `mbox_loc`: (batch, n_boxes_total, 4)
mbox_loc = Concatenate(axis=1, name='mbox_loc')(loc_list)
# Output shape of `mbox_priorbox`: (batch, n_boxes_total, 8)
mbox_priorbox = Concatenate(axis=1, name='mbox_priorbox')(anchor_list)
# The box coordinate predictions will go into the loss function just the way they are,
# but for the class predictions, we'll apply a softmax activation layer first
# @TODO(tylerz): Using softmax for class prediction
mbox_conf = Permute((3, 2, 1), name="before_softmax_permute")(mbox_conf)
mbox_conf_softmax = Softmax(axis=1, name='mbox_conf_softmax_')(mbox_conf)
mbox_conf_softmax = Permute(
(3, 2, 1), name='mbox_conf_softmax')(mbox_conf_softmax)
# Concatenate the class and box predictions and the anchors to one large predictions vector
# Output shape of `predictions`: (batch, n_boxes_total, n_classes + 4 + 8)
# @TODO(tylerz): Using softmax for class prediction
predictions = Concatenate(
axis=-1)([mbox_conf_softmax, mbox_loc, mbox_priorbox])
predictions = Reshape((-1, n_classes+4+8),
name='ssd_predictions')(predictions)
return Model(inputs=x, outputs=predictions, name=('dssd_' if is_dssd else 'ssd_')+arch)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/architecture/ssd_arch.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test ssd arch builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nvidia_tao_tf1.cv.ssd.architecture.ssd_arch import ssd
import nvidia_tao_tf1.cv.ssd.models.patch_keras
nvidia_tao_tf1.cv.ssd.models.patch_keras.patch()
def test_arch():
model = ssd((3, 300, 300),
3, True,
kernel_regularizer=None,
freeze_blocks=[0],
freeze_bn=None,
min_scale=0.1,
max_scale=0.8,
scales=None,
aspect_ratios_global=[1, 0.5, 2],
aspect_ratios_per_layer=None,
two_boxes_for_ar1=False,
steps=None,
offsets=None,
clip_boxes=False,
variances=[0.1, 0.1, 0.2, 0.2],
confidence_thresh=0.01,
iou_threshold=0.45,
top_k=200,
nms_max_output_size=400,
arch="resnet",
nlayers=10,
input_tensor=None,
qat=False)
assert model.get_layer('conv1').trainable is False
assert model.get_layer('ssd_predictions').output_shape[-2:] == (5829, 15)
model = ssd((3, 300, 300),
3, False,
kernel_regularizer=None,
freeze_blocks=[0],
freeze_bn=None,
min_scale=0.1,
max_scale=0.8,
scales=None,
aspect_ratios_global=[1, 0.5, 2],
aspect_ratios_per_layer=None,
two_boxes_for_ar1=False,
steps=None,
offsets=None,
clip_boxes=False,
variances=[0.1, 0.1, 0.2, 0.2],
confidence_thresh=0.01,
iou_threshold=0.45,
top_k=200,
nms_max_output_size=400,
arch="resnet",
nlayers=10,
input_tensor=None,
qat=True)
assert model.get_layer('conv1').trainable is False
assert model.get_layer('ssd_predictions').output_shape[-2:] == (5829, 15)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/architecture/tests/test_arch.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test ssd loss."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nvidia_tao_tf1.cv.ssd.architecture.ssd_loss import SSDLoss
def test_loss_zero():
ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)
y_true = [[[1, 0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.1, 0.2, 0.2]]]
y_pred = y_true
with tf.Session() as sess:
assert abs(sess.run(ssd_loss.compute_loss(tf.constant(y_true),
tf.constant(y_pred)))[0]) < 1e-5
def test_loss_non_zero_loc():
ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)
y_true = [[[1, 0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.1, 0.2, 0.2]]]
y_pred = [[[1, 0, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.1, 0.2, 0.2]]]
with tf.Session() as sess:
log_loss = sess.run(ssd_loss.log_loss(tf.constant(y_true)[:, :, :-12],
tf.constant(y_pred)[:, :, :-12]))
loc_loss = sess.run(ssd_loss.smooth_L1_loss(tf.constant(y_true)[:, :, -12:-8],
tf.constant(y_pred)[:, :, -12:-8]))
total_loss = sess.run(ssd_loss.compute_loss(tf.constant(y_true), tf.constant(y_pred)))
assert abs(log_loss[0]) < 1e-5
assert abs(loc_loss[0] - 0.00125) < 1e-5
assert abs(total_loss[0]) < 1e-5
def test_loss_non_zero():
ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)
y_true = [[[1, 0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.1, 0.2, 0.2]]]
y_pred = [[[0.3, 0, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.1, 0.2, 0.2]]]
with tf.Session() as sess:
log_loss = sess.run(ssd_loss.log_loss(tf.constant(y_true)[:, :, :-12],
tf.constant(y_pred)[:, :, :-12]))
loc_loss = sess.run(ssd_loss.smooth_L1_loss(tf.constant(y_true)[:, :, -12:-8],
tf.constant(y_pred)[:, :, -12:-8]))
total_loss = sess.run(ssd_loss.compute_loss(tf.constant(y_true), tf.constant(y_pred)))
assert abs(log_loss[0] - 1.2039728) < 1e-5
assert abs(loc_loss[0] - 0.00125) < 1e-5
assert abs(total_loss[0]) < 1e-5
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/architecture/tests/test_loss.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IVA SSD entrypoint scripts."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/scripts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Export an ssd model."""
# import build_command_line_parser as this is needed by entrypoint
from nvidia_tao_tf1.cv.common.export.app import build_command_line_parser # noqa pylint: disable=W0611
from nvidia_tao_tf1.cv.common.export.app import launch_export
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.ssd.export.onnx_exporter import SSDOnnxExporter as Exporter
import nvidia_tao_tf1.cv.ssd.models.patch_keras
nvidia_tao_tf1.cv.ssd.models.patch_keras.patch()
if __name__ == "__main__":
try:
launch_export(Exporter, backend='onnx')
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Export finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Export was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/scripts/export.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command line interface for converting detection datasets to TFRecords."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import glob
import logging
import os
import struct
from google.protobuf.text_format import Merge as merge_text_proto
import tensorflow as tf
from nvidia_tao_tf1.core.utils.path_utils import expand_path
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.detectnet_v2.dataio.build_converter import build_converter
import nvidia_tao_tf1.cv.detectnet_v2.proto.dataset_export_config_pb2 as dataset_export_config_pb2
logger = logging.getLogger(__name__)
def build_command_line_parser(parser=None):
"""Build command line parser for dataset_convert."""
if parser is None:
parser = argparse.ArgumentParser(
prog='dataset_converter',
description='Convert object detection datasets to TFRecords.'
)
parser.add_argument(
'-d',
'--dataset_export_spec',
required=True,
help='Path to the detection dataset spec containing config for exporting .tfrecords.')
parser.add_argument(
'-o',
'--output_filename',
required=True,
help='Output file name.')
parser.add_argument(
'-f',
'--validation_fold',
type=int,
default=argparse.SUPPRESS,
help='Indicate the validation fold in 0-based indexing. \
This is required when modifying the training set but otherwise optional.')
parser.add_argument(
'-v',
'--verbose',
action='store_true',
help="Flag to get detailed logs during the conversion process."
)
parser.add_argument(
"-r",
"--results_dir",
type=str,
default=None,
help="Path to the results directory"
)
parser.add_argument(
"-c",
"--class_names_file",
type=str,
default=None,
help="Path to file contain list of the class names. \
dataset_convert will map class names to index \
starting from 1."
)
return parser
def parse_command_line_args(cl_args=None):
"""Parse sys.argv arguments from commandline.
Args:
cl_args: List of command line arguments.
Returns:
args: list of parsed arguments.
"""
parser = build_command_line_parser()
args = parser.parse_args(cl_args)
return args
def create_tfrecord_idx(tf_record_path, idx_path):
"""
Create index file for a tfrecord.
From: https://github.com/NVIDIA/DALI/blob/master/tools/tfrecord2idx .
"""
f = open(tf_record_path, 'rb')
idx = open(idx_path, 'w')
while True:
current = f.tell()
try:
# length
byte_len = f.read(8)
if len(byte_len) == 0:
break
# crc
f.read(4)
proto_len = struct.unpack('q', byte_len)[0]
# proto
f.read(proto_len)
# crc
f.read(4)
idx.write(str(current) + ' ' + str(f.tell() - current) + '\n')
except Exception:
print("Not a valid TFRecord file")
break
f.close()
idx.close()
def main(args=None):
"""
Convert an object detection dataset to TFRecords.
Args:
args(list): list of arguments to be parsed if called from another module.
"""
args = parse_command_line_args(cl_args=args)
verbosity = 'INFO'
if args.verbose:
verbosity = 'DEBUG'
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=verbosity)
if args.results_dir is not None:
results_dir = expand_path(args.results_dir)
if not os.path.exists(results_dir):
os.makedirs(results_dir)
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True,
verbosity=logger.getEffectiveLevel(),
append=False
)
)
status_logging.get_status_logger().write(
data=None,
message="Starting Object Detection Dataset Convert.",
status_level=status_logging.Status.STARTED
)
# Load config from the proto file.
dataset_export_config = dataset_export_config_pb2.DatasetExportConfig()
with open(expand_path(args.dataset_export_spec), "r") as f:
merge_text_proto(f.read(), dataset_export_config)
if not dataset_export_config.target_class_mapping:
if expand_path(args.class_names_file) is not None:
with open(expand_path(args.class_names_file), "r") as f:
classes = sorted({x.strip().lower() for x in f.readlines()})
class_mapping = dict(zip(classes, range(1, len(classes)+1)))
else:
raise ValueError("Set target_class_mapping in dataset convert spec file or "
"specify class_names_file.")
else:
mapping_dict = dataset_export_config.target_class_mapping
classes = sorted({str(x).lower() for x in mapping_dict.values()})
val_class_mapping = dict(
zip(classes, range(1, len(classes)+1)))
class_mapping = {key.lower(): val_class_mapping[str(val.lower())]
for key, val in mapping_dict.items()}
converter = build_converter(dataset_export_config, args.output_filename, None)
converter.use_dali = True
converter.class2idx = class_mapping
converter.convert()
# Create index file for tfrecord:
data_source = expand_path(args.output_filename) + "*"
tfrecord_path_list = glob.glob(data_source)
# create index for tfrecords
for tfrecord_path in tfrecord_path_list:
root_path, tfrecord_file = os.path.split(tfrecord_path)
idx_path = os.path.join(root_path, "idx-"+tfrecord_file)
if not os.path.exists(idx_path):
create_tfrecord_idx(tfrecord_path, idx_path)
if __name__ == '__main__':
try:
main()
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Dataset convert was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
if type(e) == tf.errors.ResourceExhaustedError:
logger = logging.getLogger(__name__)
logger.error(
"Ran out of GPU memory, please lower the batch size, use a smaller input "
"resolution, or use a smaller backbone."
)
exit(1)
else:
# throw out the error as-is if they are not OOM error
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Dataset convert finished successfully."
)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/scripts/dataset_convert.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perform continuous SSD training on a tfrecords dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
from math import ceil
from multiprocessing import cpu_count
import os
import tempfile
from google.protobuf.json_format import MessageToDict
import keras
from keras import backend as K
from keras.callbacks import EarlyStopping, TerminateOnNaN
from keras.optimizers import SGD
import tensorflow as tf
from nvidia_tao_tf1.core.export._quantized import check_for_quantized_layers
from nvidia_tao_tf1.cv.common.callbacks.enc_model_saver_callback import KerasModelSaver
from nvidia_tao_tf1.cv.common.callbacks.loggers import TAOStatusLogger
from nvidia_tao_tf1.cv.common.evaluator.ap_evaluator import APEvaluator
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.common.mlops.clearml import get_clearml_task
from nvidia_tao_tf1.cv.common.mlops.wandb import check_wandb_logged_in, initialize_wandb
from nvidia_tao_tf1.cv.common.utils import check_tf_oom, hvd_keras, initialize, reg_dict
from nvidia_tao_tf1.cv.common.utils import OneIndexedCSVLogger as CSVLogger
from nvidia_tao_tf1.cv.common.utils import SoftStartAnnealingLearningRateScheduler as LRS
from nvidia_tao_tf1.cv.ssd.architecture.ssd_loss import SSDLoss
from nvidia_tao_tf1.cv.ssd.builders import dataset_builder
from nvidia_tao_tf1.cv.ssd.builders import eval_builder
from nvidia_tao_tf1.cv.ssd.builders import model_builder
from nvidia_tao_tf1.cv.ssd.callbacks.detection_metric_callback_bg import DetectionMetricCallbackBG
from nvidia_tao_tf1.cv.ssd.callbacks.tb_callback import SSDTensorBoard, SSDTensorBoardImage
import nvidia_tao_tf1.cv.ssd.models.patch_keras
from nvidia_tao_tf1.cv.ssd.utils.model_io import CUSTOM_OBJS, load_model
from nvidia_tao_tf1.cv.ssd.utils.spec_loader import (
eval_str,
load_experiment_spec,
validate_train_spec
)
from nvidia_tao_tf1.cv.ssd.utils.tensor_utils import get_init_ops
nvidia_tao_tf1.cv.ssd.models.patch_keras.patch()
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level='INFO')
logger = logging.getLogger(__name__)
verbose = 0
def load_model_as_pretrain(model_path, experiment_spec, is_dssd, use_dali=False,
local_rank=0, shard_id=0, num_shards=1,
key=None, kernel_regularizer=None,
resume_from_training=False):
"""
Load a model as pretrained weights.
If the model is pruned, just return the model.
Always return two models, first is for training, last is a template with input placeholder.
"""
train_dataset, val_dataset = dataset_builder.build_dataset(experiment_spec, is_dssd,
device_id=local_rank,
shard_id=shard_id,
num_shards=num_shards)
if resume_from_training:
if use_dali:
model_load = load_model(model_path, experiment_spec, is_dssd, None, key)
model_load_train = load_model(model_path, experiment_spec, is_dssd,
train_dataset.images, key)
optimizer = model_load.optimizer
return model_load_train, model_load, train_dataset, val_dataset, optimizer
model_load = load_model(model_path, experiment_spec, is_dssd, None, key)
return model_load, model_load, train_dataset, val_dataset, model_load.optimizer
if use_dali:
input_tensor = train_dataset.images
else:
input_tensor = None
model_train, model_eval = \
model_builder.build(experiment_spec, is_dssd,
input_tensor=input_tensor,
kernel_regularizer=kernel_regularizer)
model_load = load_model(model_path, experiment_spec, is_dssd, None, key)
strict_mode = True
error_layers = []
loaded_layers = []
for layer in model_train.layers[1:]:
# The layer must match up to ssd layers.
if layer.name.find('ssd_') != -1:
strict_mode = False
try:
l_return = model_load.get_layer(layer.name)
except ValueError:
if layer.name[-3:] != 'qdq' and strict_mode:
error_layers.append(layer.name)
# Some layers are not there
continue
try:
wts = l_return.get_weights()
if len(wts) > 0:
layer.set_weights(wts)
loaded_layers.append(layer.name)
except ValueError:
if strict_mode:
# This is a pruned model
print('The shape of this layer does not match original model:', layer.name)
print('Loading the model as a pruned model.')
model_config = model_load.get_config()
for layer, layer_config in zip(model_load.layers, model_config['layers']):
if hasattr(layer, 'kernel_regularizer'):
layer_config['config']['kernel_regularizer'] = kernel_regularizer
reg_model = keras.models.Model.from_config(model_config, custom_objects=CUSTOM_OBJS)
reg_model.set_weights(model_load.get_weights())
os_handle, temp_file_name = tempfile.mkstemp(suffix='.hdf5')
os.close(os_handle)
reg_model.save(temp_file_name, overwrite=True, include_optimizer=False)
if use_dali:
train_model = load_model(temp_file_name, experiment_spec, is_dssd,
train_dataset.images, None)
else:
train_model = load_model(temp_file_name, experiment_spec, is_dssd,
None, None)
os.remove(temp_file_name)
return train_model, model_load, train_dataset, val_dataset, None
error_layers.append(layer.name)
if len(error_layers) > 0:
print('Weights for those layers can not be loaded:', error_layers)
print('STOP trainig now and check the pre-train model if this is not expected!')
print("Layers that load weights from the pretrained model:", loaded_layers)
return model_train, model_eval, train_dataset, val_dataset, None
def run_experiment(config_path, results_dir, resume_weights,
key, check_arch=None, init_epoch=1, use_multiprocessing=False):
"""
Launch experiment that trains the model.
NOTE: Do not change the argument names without verifying that cluster submission works.
Args:
config_path (str): Path to a text file containing a complete experiment configuration.
results_dir (str): Path to a folder where various training outputs will be written.
If the folder does not already exist, it will be created.
resume_weights (str): Optional path to a pretrained model file.
init_epoch (int): The number of epoch to resume training.
check_arch (enum): choose from [None, 'ssd', 'dssd']. If not None, raise error if spec file
says otherwise.
"""
hvd = hvd_keras()
hvd.init()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = str(hvd.local_rank())
sess = tf.Session(config=config)
K.set_session(sess)
verbose = 1 if hvd.rank() == 0 else 0
is_master = hvd.rank() == 0
if is_master and not os.path.exists(results_dir):
os.makedirs(results_dir)
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=is_master,
verbosity=1,
append=True
)
)
# Load experiment spec.
experiment_spec, is_dssd = load_experiment_spec(config_path, check_arch)
validate_train_spec(experiment_spec)
initialize(experiment_spec.random_seed, hvd)
cls_mapping = experiment_spec.dataset_config.target_class_mapping
classes = sorted({str(x) for x in cls_mapping.values()})
if is_master:
if experiment_spec.training_config.HasField("visualizer"):
network_name = "dssd" if is_dssd else "ssd"
visualizer_config = experiment_spec.training_config.visualizer
if visualizer_config.HasField("clearml_config"):
clearml_config = visualizer_config.clearml_config
get_clearml_task(clearml_config, network_name)
if visualizer_config.HasField("wandb_config"):
wandb_config = visualizer_config.wandb_config
wandb_logged_in = check_wandb_logged_in()
wandb_name = f"{wandb_config.name}" if wandb_config.name else \
f"{network_name}_training"
wandb_stream_config = MessageToDict(
experiment_spec,
preserving_proto_field_name=True,
including_default_value_fields=True
)
initialize_wandb(
project=wandb_config.project if wandb_config.project else None,
entity=wandb_config.entity if wandb_config.entity else None,
config=wandb_stream_config,
notes=wandb_config.notes if wandb_config.notes else None,
tags=wandb_config.tags if wandb_config.tags else None,
sync_tensorboard=True,
save_code=False,
results_dir=results_dir,
wandb_logged_in=wandb_logged_in,
name=wandb_name
)
# Load training parameters
num_epochs = experiment_spec.training_config.num_epochs
batch_size_per_gpu = experiment_spec.training_config.batch_size_per_gpu
lrconfig = experiment_spec.training_config.learning_rate.soft_start_annealing_schedule
# config kernel regularizer
reg_type = experiment_spec.training_config.regularizer.type
reg_weight = experiment_spec.training_config.regularizer.weight
kr = None
if reg_type:
if reg_type > 0:
assert 0 < reg_weight < 1, \
"Weight decay should be no less than 0 and less than 1"
kr = reg_dict[reg_type](reg_weight)
if experiment_spec.ssd_config.alpha != 0.0:
alpha = experiment_spec.ssd_config.alpha
else:
alpha = 1.0
if experiment_spec.ssd_config.neg_pos_ratio != 0.0:
neg_pos_ratio = experiment_spec.ssd_config.neg_pos_ratio
else:
neg_pos_ratio = 3
use_dali = False
# @TODO(tylerz): if there is tfrecord, then use dali.
if experiment_spec.dataset_config.data_sources[0].tfrecords_path != "":
use_dali = True
# build train/val data and model, configure optimizer && loss
sgd = SGD(lr=0, decay=0, momentum=0.9, nesterov=False)
ssd_loss = SSDLoss(neg_pos_ratio=neg_pos_ratio, alpha=alpha)
if resume_weights is not None:
if init_epoch == 1:
resume_from_training = False
else:
resume_from_training = True
logger.info("Loading pretrained weights. This may take a while...")
model, model_eval, train_dataset, val_dataset, optimizer = \
load_model_as_pretrain(model_path=resume_weights,
experiment_spec=experiment_spec,
is_dssd=is_dssd,
use_dali=use_dali,
local_rank=hvd.local_rank(),
shard_id=hvd.rank(),
num_shards=hvd.size(),
key=key,
kernel_regularizer=kr,
resume_from_training=resume_from_training)
if use_dali:
label_tensor = [train_dataset.labels]
else:
label_tensor = None
# check if the loaded model is QAT
qat_flag = experiment_spec.training_config.enable_qat
if not qat_flag and check_for_quantized_layers(model_eval):
raise ValueError("QAT training is disabled but the pretrained model is a QAT model.")
if qat_flag and not check_for_quantized_layers(model_eval):
raise ValueError("QAT training is enabled but the pretrained model is not a QAT model.")
if init_epoch == 1:
print("Initialize optimizer")
model.compile(optimizer=hvd.DistributedOptimizer(sgd),
loss=ssd_loss.compute_loss,
target_tensors=label_tensor)
else:
print("Resume optimizer from pretrained model")
model.compile(optimizer=hvd.DistributedOptimizer(optimizer),
loss=ssd_loss.compute_loss,
target_tensors=label_tensor)
else:
train_dataset, val_dataset = dataset_builder.build_dataset(experiment_spec, is_dssd,
device_id=hvd.local_rank(),
shard_id=hvd.rank(),
num_shards=hvd.size())
if use_dali:
input_tensor = train_dataset.images
else:
input_tensor = None
model, model_eval = \
model_builder.build(experiment_spec, is_dssd,
input_tensor=input_tensor,
kernel_regularizer=kr)
print("Initialize optimizer")
if use_dali:
label_tensor = [train_dataset.labels]
else:
label_tensor = None
model.compile(optimizer=hvd.DistributedOptimizer(sgd),
loss=ssd_loss.compute_loss,
target_tensors=label_tensor)
# configure LR scheduler
total_num = train_dataset.n_samples
iters_per_epoch = int(ceil(total_num / batch_size_per_gpu / hvd.size()))
max_iterations = num_epochs * iters_per_epoch
lr_scheduler = LRS(base_lr=lrconfig.max_learning_rate * hvd.size(),
min_lr_ratio=lrconfig.min_learning_rate / lrconfig.max_learning_rate,
soft_start=lrconfig.soft_start,
annealing_start=lrconfig.annealing,
max_iterations=max_iterations)
callbacks = [hvd.callbacks.BroadcastGlobalVariablesCallback(0),
hvd.callbacks.MetricAverageCallback(),
lr_scheduler,
TerminateOnNaN()]
init_step = (init_epoch - 1) * iters_per_epoch
lr_scheduler.reset(init_step)
sess.run(get_init_ops())
if hvd.rank() == 0:
model.summary()
logger.info("Number of images in the training dataset:\t{:>6}"
.format(train_dataset.n_samples))
logger.info("Number of images in the validation dataset:\t{:>6}"
.format(val_dataset.n_samples))
if not os.path.exists(os.path.join(results_dir, 'weights')):
os.mkdir(os.path.join(results_dir, 'weights'))
if experiment_spec.ssd_config.arch in ['resnet', 'darknet', 'vgg']:
# append nlayers into meta_arch_name
arch_name = experiment_spec.ssd_config.arch + \
str(experiment_spec.ssd_config.nlayers)
else:
arch_name = experiment_spec.ssd_config.arch
meta_arch_name = 'dssd_' if is_dssd else 'ssd_'
ckpt_path = str(os.path.join(results_dir, 'weights',
meta_arch_name + arch_name + '_epoch_{epoch:03d}.hdf5'))
save_period = experiment_spec.training_config.checkpoint_interval or 1
# This callback will update model_eval and save the model.
model_checkpoint = KerasModelSaver(ckpt_path, key, save_period, verbose=1)
csv_path = os.path.join(
results_dir, meta_arch_name + 'training_log_' + arch_name + '.csv')
csv_logger = CSVLogger(filename=csv_path,
separator=',',
append=False)
callbacks.append(model_checkpoint)
if len(val_dataset) > 0:
# Load NMS parameters
conf_th = experiment_spec.nms_config.confidence_threshold
clustering_iou = experiment_spec.nms_config.clustering_iou_threshold
top_k = experiment_spec.nms_config.top_k
nms_max_output = top_k
# build eval graph
K.set_learning_phase(0)
built_eval_model = eval_builder.build(model_eval, conf_th,
clustering_iou, top_k,
nms_max_output,
include_encoded_pred=True)
# Load evaluation parameters
every_k = experiment_spec.eval_config.validation_period_during_training
ap_mode = experiment_spec.eval_config.average_precision_mode
matching_iou = experiment_spec.eval_config.matching_iou_threshold
matching_iou = matching_iou if matching_iou > 0 else 0.5
ap_mode_dict = {0: "sample", 1: "integrate"}
average_precision_mode = ap_mode_dict[ap_mode]
evaluator = APEvaluator(len(classes)+1,
conf_thres=experiment_spec.nms_config.confidence_threshold,
matching_iou_threshold=matching_iou,
average_precision_mode=average_precision_mode)
ssd_loss_val = SSDLoss(neg_pos_ratio=neg_pos_ratio, alpha=alpha)
n_box, n_attr = model_eval.layers[-1].output_shape[1:]
op_pred = tf.placeholder(tf.float32, shape=(None, n_box, n_attr))
op_true = tf.placeholder(tf.float32, shape=(None, n_box, n_attr))
loss_ops = [op_true, op_pred,
ssd_loss_val.compute_loss(op_true, op_pred)]
eval_callback = DetectionMetricCallbackBG(ap_evaluator=evaluator,
built_eval_model=built_eval_model,
eval_sequence=val_dataset,
loss_ops=loss_ops,
eval_model=model_eval,
metric_interval=every_k,
verbose=verbose)
K.set_learning_phase(1)
callbacks.append(eval_callback)
if hvd.rank() == 0:
callbacks.append(csv_logger)
status_logger = TAOStatusLogger(
results_dir,
append=True,
num_epochs=num_epochs,
is_master=hvd.rank() == 0,
)
callbacks.append(status_logger)
# init EarlyStopping callback:
if experiment_spec.training_config.HasField("early_stopping"):
es_config = experiment_spec.training_config.early_stopping
# align the validation name
if es_config.monitor == "val_loss":
es_config.monitor = "validation_loss"
if es_config.monitor == "validation_loss":
if len(val_dataset) <= 0:
raise ValueError("Validation dataset is needed for "
"using validation_loss as the early stopping monitor")
if experiment_spec.eval_config.validation_period_during_training != 1:
raise ValueError("validation_period_during_training should be 1 for "
"using validation_loss as the early stopping monitor")
es_cb = EarlyStopping(monitor=es_config.monitor,
min_delta=es_config.min_delta,
patience=es_config.patience,
verbose=True)
callbacks.append(es_cb)
if hvd.rank() == 0:
if experiment_spec.training_config.visualizer.enabled:
tb_log_dir = os.path.join(results_dir, "events")
tb_cb = SSDTensorBoard(log_dir=tb_log_dir, write_graph=False)
callbacks.append(tb_cb)
variances = eval_str(experiment_spec.ssd_config.variances)
tbimg_cb = SSDTensorBoardImage(tb_log_dir, experiment_spec, variances,
experiment_spec.training_config.visualizer.num_images)
fetches = [tf.assign(tbimg_cb.img, model.inputs[0], validate_shape=False),
tf.assign(tbimg_cb.label, model.targets[0], validate_shape=False)]
model._function_kwargs = {'fetches': fetches}
callbacks.append(tbimg_cb)
if use_dali:
model.fit(steps_per_epoch=iters_per_epoch,
epochs=num_epochs,
callbacks=callbacks,
initial_epoch=init_epoch - 1,
verbose=verbose)
else:
# @TODO(tylerz): run into deadlock on P40 with use_multiprocessing = True on small dataset
# So enable multi-thread mode if dataset is small.
# https://github.com/keras-team/keras/issues/10340.
workers = experiment_spec.training_config.n_workers or (cpu_count()-1)
model.fit_generator(generator=train_dataset,
steps_per_epoch=iters_per_epoch,
epochs=num_epochs,
callbacks=callbacks,
initial_epoch=init_epoch - 1,
use_multiprocessing=use_multiprocessing,
max_queue_size=experiment_spec.training_config.max_queue_size or 20,
shuffle=False,
workers=workers,
verbose=verbose)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Training finished successfully."
)
def build_command_line_parser(parser=None):
'''build parser.'''
if parser is None:
parser = argparse.ArgumentParser(
prog='train', description='Train an SSD model.')
parser.add_argument(
'-e',
'--experiment_spec_file',
type=str,
required=True,
help='Path to spec file. Absolute path or relative to working directory. \
If not specified, default spec from spec_loader.py is used.')
parser.add_argument(
'-r',
'--results_dir',
type=str,
required=True,
help='Path to a folder where experiment outputs should be written.'
)
parser.add_argument(
'-k',
'--key',
type=str,
default="",
required=False,
help='Key to save or load a .tlt model.'
)
parser.add_argument(
'-m',
'--resume_model_weights',
type=str,
default=None,
help='Path to a model to continue training.'
)
parser.add_argument(
'--initial_epoch',
type=int,
default=1,
help='Set resume epoch'
)
parser.add_argument(
'--arch',
choices=[None, 'ssd', 'dssd'],
default=None,
help='Which architecture to uses'
)
parser.add_argument(
'--use_multiprocessing',
action="store_true",
default=False
)
return parser
def parse_command_line(args):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser()
return parser.parse_args(args)
@check_tf_oom
def main(args=None):
"""Run the training process."""
args = parse_command_line(args)
try:
run_experiment(config_path=args.experiment_spec_file,
results_dir=args.results_dir,
resume_weights=args.resume_model_weights,
init_epoch=args.initial_epoch,
key=args.key,
check_arch=args.arch,
use_multiprocessing=args.use_multiprocessing)
logger.info("Training finished successfully.")
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Training was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
logger.info("Training was interrupted.")
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/scripts/train.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to prune the SSD TLT model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from nvidia_tao_tf1.cv.common.magnet_prune import ( # noqa pylint: disable=unused-import
build_command_line_parser,
main,
)
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
import nvidia_tao_tf1.cv.ssd.models.patch_keras
nvidia_tao_tf1.cv.ssd.models.patch_keras.patch()
if __name__ == "__main__":
try:
main(sys.argv[1:])
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Pruning finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Pruning was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/scripts/prune.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple Stand-alone inference script for SSD models trained using TAO."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import os
import keras.backend as K
import numpy as np
from nvidia_tao_tf1.cv.common.inferencer.inferencer import Inferencer
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.common.utils import check_tf_oom
from nvidia_tao_tf1.cv.ssd.builders import eval_builder
import nvidia_tao_tf1.cv.ssd.models.patch_keras
from nvidia_tao_tf1.cv.ssd.utils.model_io import load_model
from nvidia_tao_tf1.cv.ssd.utils.spec_loader import INFERENCE_EXP_REQUIRED_MSG, \
load_experiment_spec, spec_validator
nvidia_tao_tf1.cv.ssd.models.patch_keras.patch()
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level='INFO')
def build_command_line_parser(parser=None):
'''build parser.'''
if parser is None:
parser = argparse.ArgumentParser(description='TLT SSD Inference Tool')
parser.add_argument('-m',
'--model_path',
type=str,
required=True,
help='Path to a TLT model or TensorRT engine.')
parser.add_argument('-i',
'--image_dir',
required=True,
type=str,
help='The path to input image or directory.')
parser.add_argument('-k',
'--key',
type=str,
default="",
help='Key to save or load a .tlt model. Must present if -m is a TLT model')
parser.add_argument('-e',
'--experiment_spec',
required=True,
type=str,
help='Path to an experiment spec file for training.')
parser.add_argument('-t',
'--threshold',
type=float,
default=0.3,
help='Confidence threshold for inference.')
parser.add_argument('-r',
'--results_dir',
type=str,
help='Path to a folder where the logs are stored.')
parser.add_argument('-b',
'--batch_size',
type=int,
required=False,
default=1,
help=argparse.SUPPRESS)
return parser
def parse_command_line(args):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser()
return parser.parse_args(args)
def keras_output_process_fn(inferencer, y_encoded):
"function to process keras model output."
return y_encoded
def trt_output_process_fn(inferencer, y_encoded):
"function to process TRT model output."
det_out, keep_k = y_encoded
result = []
for idx, k in enumerate(keep_k.reshape(-1)):
det = det_out[idx].reshape(-1, 7)[:k]
xmin = det[:, 3] * inferencer.model_input_width
ymin = det[:, 4] * inferencer.model_input_height
xmax = det[:, 5] * inferencer.model_input_width
ymax = det[:, 6] * inferencer.model_input_height
cls_id = det[:, 1]
conf = det[:, 2]
result.append(np.stack((cls_id, conf, xmin, ymin, xmax, ymax), axis=-1))
return result
def inference(arguments):
'''make inference.'''
config_path = arguments.experiment_spec
experiment_spec, is_dssd = load_experiment_spec(config_path)
spec_validator(experiment_spec, INFERENCE_EXP_REQUIRED_MSG)
classes = sorted({str(x) for x in
experiment_spec.dataset_config.target_class_mapping.values()})
class_mapping = dict(zip(range(1, len(classes)+1), classes))
results_dir = arguments.results_dir
if results_dir is not None:
if not os.path.exists(results_dir):
os.makedirs(results_dir)
log_dir = results_dir
else:
log_dir = os.path.dirname(arguments.model_path)
status_file = os.path.join(log_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True,
verbosity=1,
append=True
)
)
s_logger = status_logging.get_status_logger()
s_logger.write(
status_level=status_logging.Status.STARTED,
message="Starting {} Inference.".format("DSSD" if is_dssd else "SSD")
)
img_mean = experiment_spec.augmentation_config.image_mean
if experiment_spec.augmentation_config.output_channel == 3:
if img_mean:
img_mean = [img_mean['b'], img_mean['g'], img_mean['r']]
else:
img_mean = [103.939, 116.779, 123.68]
else:
if img_mean:
img_mean = [img_mean['l']]
else:
img_mean = [117.3786]
K.clear_session() # Clear previous models from memory.
K.set_learning_phase(0)
if os.path.splitext(arguments.model_path)[1] in ['.h5', '.tlt', '.hdf5']:
model = load_model(arguments.model_path, experiment_spec, is_dssd, key=arguments.key)
# Load evaluation parameters
conf_th = experiment_spec.nms_config.confidence_threshold
iou_th = experiment_spec.nms_config.clustering_iou_threshold
top_k = experiment_spec.nms_config.top_k
nms_max_output = top_k
# Build evaluation model
model = eval_builder.build(model, conf_th, iou_th, top_k, nms_max_output)
inferencer = Inferencer(keras_model=model,
batch_size=experiment_spec.eval_config.batch_size,
infer_process_fn=keras_output_process_fn,
class_mapping=class_mapping,
threshold=arguments.threshold,
img_mean=img_mean,
keep_aspect_ratio=False)
print("Using TLT model for inference, setting batch size to the one in eval_config:",
experiment_spec.eval_config.batch_size)
else:
inferencer = Inferencer(trt_engine_path=arguments.model_path,
infer_process_fn=trt_output_process_fn,
class_mapping=class_mapping,
threshold=arguments.threshold,
img_mean=img_mean,
keep_aspect_ratio=False)
print("Using TensorRT engine for inference, setting batch size to engine's one:",
inferencer.batch_size)
out_image_path = os.path.join(arguments.results_dir, "images_annotated")
out_label_path = os.path.join(arguments.results_dir, "labels")
os.makedirs(out_image_path, exist_ok=True)
os.makedirs(out_label_path, exist_ok=True)
inferencer.infer(arguments.image_dir, out_image_path, out_label_path)
s_logger.write(
status_level=status_logging.Status.SUCCESS,
message="Inference finished successfully."
)
@check_tf_oom
def main(args=None):
"""Run the inference process."""
try:
args = parse_command_line(args)
inference(args)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Inference was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/scripts/inference.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple Stand-alone evaluate script for SSD models trained using TAO."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import os
import sys
import keras.backend as K
from keras.utils.data_utils import OrderedEnqueuer
import numpy as np
import tensorflow as tf
from tqdm import trange
from nvidia_tao_tf1.cv.common.evaluator.ap_evaluator import APEvaluator
from nvidia_tao_tf1.cv.common.inferencer.inferencer import Inferencer
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.common.utils import check_tf_oom
from nvidia_tao_tf1.cv.ssd.builders import eval_builder
from nvidia_tao_tf1.cv.ssd.builders.data_sequence import SSDDataSequence
import nvidia_tao_tf1.cv.ssd.models.patch_keras
from nvidia_tao_tf1.cv.ssd.utils.model_io import load_model
from nvidia_tao_tf1.cv.ssd.utils.spec_loader import load_experiment_spec, validate_eval_spec
nvidia_tao_tf1.cv.ssd.models.patch_keras.patch()
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level='INFO')
def build_command_line_parser(parser=None):
'''build parser.'''
if parser is None:
parser = argparse.ArgumentParser(description='TLT SSD Evaluation Tool')
parser.add_argument('-m',
'--model_path',
help='Path to an SSD TLT model or TensorRT engine.',
required=True,
type=str)
parser.add_argument('-k',
'--key',
type=str,
default="",
help='Key to save or load a .tlt model.')
parser.add_argument('-e',
'--experiment_spec',
required=True,
type=str,
help='Experiment spec file for training and evaluation.')
parser.add_argument('-r',
'--results_dir',
type=str,
help='Path to a folder where the logs are stored.')
parser.add_argument('-i',
'--image_dir',
type=str,
required=False,
default=None,
help=argparse.SUPPRESS)
parser.add_argument('-l',
'--label_dir',
type=str,
required=False,
help=argparse.SUPPRESS)
parser.add_argument('-b',
'--batch_size',
type=int,
required=False,
default=1,
help=argparse.SUPPRESS)
return parser
def parse_command_line(args):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser()
return parser.parse_args(args)
def keras_output_process_fn(inferencer, y_encoded):
"function to process keras model output."
return y_encoded
def trt_output_process_fn(inferencer, y_encoded):
"function to process TRT model output."
det_out, keep_k = y_encoded
result = []
for idx, k in enumerate(keep_k.reshape(-1)):
det = det_out[idx].reshape(-1, 7)[:k]
xmin = det[:, 3] * inferencer.model_input_width
ymin = det[:, 4] * inferencer.model_input_height
xmax = det[:, 5] * inferencer.model_input_width
ymax = det[:, 6] * inferencer.model_input_height
cls_id = det[:, 1]
conf = det[:, 2]
result.append(
np.stack((cls_id, conf, xmin, ymin, xmax, ymax), axis=-1))
return result
def evaluate(arguments):
'''make evaluation.'''
config_path = arguments.experiment_spec
experiment_spec, is_dssd = load_experiment_spec(config_path)
validate_eval_spec(experiment_spec)
classes = sorted({str(x) for x in
experiment_spec.dataset_config.target_class_mapping.values()})
K.clear_session() # Clear previous models from memory.
results_dir = arguments.results_dir
if results_dir is not None:
if not os.path.exists(results_dir):
os.makedirs(results_dir)
log_dir = results_dir
else:
log_dir = os.path.dirname(arguments.model_path)
status_file = os.path.join(log_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True,
verbosity=1,
append=True
)
)
s_logger = status_logging.get_status_logger()
s_logger.write(
status_level=status_logging.Status.STARTED,
message="Starting {} evaluation.".format("DSSD" if is_dssd else "SSD")
)
if experiment_spec.eval_config.visualize_pr_curve:
vis_path = os.path.dirname(arguments.model_path)
else:
vis_path = None
if os.path.splitext(arguments.model_path)[1] in ['.h5', '.tlt', '.hdf5']:
K.set_learning_phase(0)
model = load_model(arguments.model_path, experiment_spec,
is_dssd, key=arguments.key)
# Load evaluation parameters
conf_th = experiment_spec.nms_config.confidence_threshold
iou_th = experiment_spec.nms_config.clustering_iou_threshold
top_k = experiment_spec.nms_config.top_k
nms_max_output = top_k
# Build evaluation model
model = eval_builder.build(
model, conf_th, iou_th, top_k, nms_max_output)
inferencer = Inferencer(keras_model=model,
batch_size=experiment_spec.eval_config.batch_size,
infer_process_fn=keras_output_process_fn,
class_mapping=None,
threshold=experiment_spec.nms_config.confidence_threshold)
print("Using TLT model for inference, setting batch size to the one in eval_config:",
experiment_spec.eval_config.batch_size)
else:
# Works in python 3.6
cpu_cnt = os.cpu_count()
if cpu_cnt is None:
cpu_cnt = 1
session_config = tf.compat.v1.ConfigProto(
device_count={'GPU': 0, 'CPU': cpu_cnt}
)
session = tf.Session(config=session_config)
# Pin TF to CPU to avoid TF & TRT CUDA context conflict
K.set_session(session)
inferencer = Inferencer(trt_engine_path=arguments.model_path,
batch_size=experiment_spec.eval_config.batch_size,
infer_process_fn=trt_output_process_fn,
class_mapping=None,
threshold=experiment_spec.nms_config.confidence_threshold)
print("Using TensorRT engine for inference, setting batch size to",
f"{experiment_spec.eval_config.batch_size} from eval_config")
ap_mode = experiment_spec.eval_config.average_precision_mode
matching_iou = experiment_spec.eval_config.matching_iou_threshold
matching_iou = matching_iou if matching_iou > 0 else 0.5
ap_mode_dict = {0: "sample", 1: "integrate"}
average_precision_mode = ap_mode_dict[ap_mode]
evaluator = APEvaluator(len(classes)+1,
conf_thres=experiment_spec.nms_config.confidence_threshold,
matching_iou_threshold=matching_iou,
average_precision_mode=average_precision_mode)
val_dataset = SSDDataSequence(dataset_config=experiment_spec.dataset_config,
augmentation_config=experiment_spec.augmentation_config,
batch_size=experiment_spec.eval_config.batch_size,
is_training=False,
encode_fn=None)
# Prepare labels
gt_labels = []
pred_labels = []
tr = trange(len(val_dataset), file=sys.stdout)
tr.set_description('Producing predictions')
enqueuer = OrderedEnqueuer(val_dataset, use_multiprocessing=False)
enqueuer.start(workers=max(os.cpu_count() - 1, 1), max_queue_size=20)
output_generator = enqueuer.get()
output_height = val_dataset.output_height
output_width = val_dataset.output_width
# Loop over all batches.
for _ in tr:
# Generate batch.
batch_X, batch_labs = next(output_generator)
y_pred = inferencer._predict_batch(batch_X)
gt_labels.extend(batch_labs)
conf_thres = experiment_spec.nms_config.confidence_threshold
for i in range(len(y_pred)):
y_pred_valid = y_pred[i][y_pred[i][:, 1] > conf_thres]
y_pred_valid[..., 2] = np.clip(y_pred_valid[..., 2].round(), 0.0,
output_width)
y_pred_valid[..., 3] = np.clip(y_pred_valid[..., 3].round(), 0.0,
output_height)
y_pred_valid[..., 4] = np.clip(y_pred_valid[..., 4].round(), 0.0,
output_width)
y_pred_valid[..., 5] = np.clip(y_pred_valid[..., 5].round(), 0.0,
output_height)
pred_labels.append(y_pred_valid)
enqueuer.stop()
results = evaluator(
gt_labels,
pred_labels,
verbose=True,
class_names=["bg"] + classes if experiment_spec.eval_config.visualize_pr_curve else None,
vis_path=vis_path)
_, average_precisions = results
mean_average_precision = np.mean(average_precisions[1:])
print("*******************************")
for i in range(len(classes)):
print("{:<14}{:<6}{}".format(
classes[i], 'AP', round(average_precisions[i+1], 3)))
print("{:<14}{:<6}{}".format('', 'mAP', round(mean_average_precision, 3)))
print("*******************************")
s_logger.kpi.update({'mAP': float(mean_average_precision)})
s_logger.write(
status_level=status_logging.Status.SUCCESS,
message="Evaluation finished successfully."
)
@check_tf_oom
def main(args=None):
"""Run the evaluation process."""
try:
args = parse_command_line(args)
evaluate(args)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Evaluation was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/scripts/evaluate.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""TLT command line wrapper to invoke CLI scripts."""
import sys
from nvidia_tao_tf1.cv.common.entrypoint.entrypoint import launch_job
import nvidia_tao_tf1.cv.ssd.scripts
def main():
"""Function to launch the job."""
launch_job(nvidia_tao_tf1.cv.ssd.scripts, "ssd", sys.argv[1:])
if __name__ == "__main__":
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/entrypoint/ssd.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""TLT SSD entrypoint."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/entrypoint/__init__.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/experiment_specs/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to export a trained SSD model to an ETLT file for deployment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/export/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SSD export model to encrypted ONNX."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import tempfile
import keras.backend as K
from keras.layers import Permute, Reshape
from keras.models import Model
import numpy as np
import onnx
import onnx_graphsurgeon as onnx_gs
import tensorflow as tf
from nvidia_tao_tf1.core.export._onnx import keras_to_onnx
# Import quantization layer processing.
from nvidia_tao_tf1.core.export._quantized import (
check_for_quantized_layers,
process_quantized_layers,
)
from nvidia_tao_tf1.cv.common.export.keras_exporter import KerasExporter as Exporter
from nvidia_tao_tf1.cv.ssd.layers.anchor_box_layer import AnchorBoxes
from nvidia_tao_tf1.cv.ssd.utils.model_io import load_model
from nvidia_tao_tf1.cv.ssd.utils.spec_loader import load_experiment_spec
NUM_FEATURE_MAPS = 6
logger = logging.getLogger(__name__)
class SSDOnnxExporter(Exporter):
"""Exporter class to export a trained RetinaNet model."""
def __init__(self, model_path=None,
key=None,
data_type="fp32",
strict_type=False,
experiment_spec_path="",
backend="onnx",
**kwargs):
"""Instantiate the SSD exporter to export a trained SSD .tlt model.
Args:
model_path(str): Path to the SSD model file.
key (str): Key to decode the model.
data_type (str): Backend data-type for the optimized TensorRT engine.
strict_type(bool): Apply TensorRT strict_type_constraints or not for INT8 mode.
experiment_spec_path (str): Path to SSD experiment spec file.
backend (str): Type of intermediate backend parser to be instantiated.
"""
super(SSDOnnxExporter, self).__init__(model_path=model_path,
key=key,
data_type=data_type,
strict_type=strict_type,
backend=backend)
self.experiment_spec_path = experiment_spec_path
assert os.path.isfile(self.experiment_spec_path), \
"Experiment spec file not found at {}.".format(self.experiment_spec_path)
self.experiment_spec = None
self.is_dssd = None
self.backend = 'onnx'
def load_model(self, backend="onnx"):
"""Simple function to load the SSD Keras model."""
experiment_spec, is_dssd = load_experiment_spec(self.experiment_spec_path)
K.clear_session()
K.set_learning_phase(0)
model = load_model(self.model_path, experiment_spec,
is_dssd, key=self.key)
outputs = self.generate_trt_output(model.get_layer('mbox_loc').output,
model.get_layer('mbox_conf_softmax').output,
model.get_layer('mbox_priorbox').output)
model = Model(inputs=model.input, outputs=outputs)
if check_for_quantized_layers(model):
model, tensor_scale_dict = process_quantized_layers(
model, backend,
calib_cache=None,
calib_json=None)
nodes = list(tensor_scale_dict.keys())
for k in nodes:
if k.find('Input') != -1:
tensor_scale_dict['Input'] = tensor_scale_dict.pop(k)
# ZeroPadding is fused with its following conv2d/depthwiseconv2d, collapse them.
padding_nodes = []
for k in tensor_scale_dict:
if '/Pad' in k:
# this is a ZeroPadding node
padding_nodes.append(k)
for n in padding_nodes:
tensor_scale_dict.pop(n)
self.tensor_scale_dict = tensor_scale_dict
self.experiment_spec = experiment_spec
# @tylerz: clear the session and reload the model to remove _1 suffix
# Save model to a temp file so we can reload it later.
os_handle, tmp_model_file_name = tempfile.mkstemp(suffix=".hdf5")
os.close(os_handle)
model.save(tmp_model_file_name)
# Make sure Keras session is clean and tuned for inference.
K.clear_session()
K.set_learning_phase(0)
model = load_model(tmp_model_file_name, experiment_spec,
is_dssd, key=self.key)
# Delete temp file.
os.remove(tmp_model_file_name)
img_mean = experiment_spec.augmentation_config.image_mean
self.image_mean = [103.939, 116.779, 123.68] \
if experiment_spec.augmentation_config.output_channel == 3 else [117.3786]
if img_mean:
if experiment_spec.augmentation_config.output_channel == 3:
self.image_mean = [img_mean['b'], img_mean['g'], img_mean['r']]
else:
self.image_mean = [img_mean['l']]
return model
def generate_trt_output(self, loc, conf, anchor):
"""Manipulate model outputs so we can use TRT NMS plugin."""
out_loc = Reshape((-1, 1, 1), name='loc_data')(loc)
out_conf = Reshape((-1, 1, 1), name='conf_data')(conf)
out_anchor = Reshape((-1, 2, 4), name="anchor_reshape")(anchor)
out_anchor = Permute((2, 1, 3), name="anchor_permute")(out_anchor)
out_anchor = Reshape((2, -1, 1), name='anchor_data')(out_anchor)
return [out_loc, out_conf, out_anchor]
def save_exported_file(self, model, output_file_name):
"""Save the exported model file.
This routine converts a keras model to onnx/uff model
based on the backend the exporter was initialized with.
Args:
model (keras.model.Model): Decoded keras model to be exported.
output_file_name (str): Path to the output file.
Returns:
output_file_name (str): Path to the output ONNX file.
"""
if self.backend == "onnx":
keras_to_onnx(model, output_file_name,
custom_objects={'AnchorBoxes': AnchorBoxes})
tf.reset_default_graph()
onnx_model = onnx.load(output_file_name)
onnx_model = self.node_process(onnx_model)
os.remove(output_file_name)
onnx.save(onnx_model, output_file_name)
logger.info("Converted model was saved into %s", output_file_name)
return output_file_name
raise NotImplementedError("Invalid backend provided. {}".format(self.backend))
def set_input_output_node_names(self):
"""Set input output node names."""
self.output_node_names = ["NMS"]
self.input_node_names = ["Input"]
def node_process(self, ssd_graph):
"""Manipulating the dynamic graph to make it compatible with TRT.
Args:
ssd_graph (gs.DynamicGraph): Dynamic graph from the TF Proto file.
Returns:
ssd_graph (gs.DymanicGraph): Post processed dynamic graph which is ready to be
serialized as a uff file.
"""
ssd_graph = onnx_gs.import_onnx(ssd_graph)
spec = self.experiment_spec
num_classes = len({str(x) for x in
spec.dataset_config.target_class_mapping.values()})
anchor_data = self._get_onnx_node_by_name(
ssd_graph, 'anchor_data').outputs[0]
loc_data = self._get_onnx_node_by_name(
ssd_graph, 'loc_data').outputs[0]
conf_data = self._get_onnx_node_by_name(
ssd_graph, 'conf_data').outputs[0]
nms_out = onnx_gs.Variable(
"NMS",
dtype=np.float32
)
nms_out_1 = onnx_gs.Variable(
"NMS_1",
dtype=np.float32
)
nms_attrs = dict()
nms_attrs["shareLocation"] = 1
nms_attrs["varianceEncodedInTarget"] = 0
nms_attrs["backgroundLabelId"] = 0
nms_attrs["confidenceThreshold"] = spec.nms_config.confidence_threshold
nms_attrs["nmsThreshold"] = spec.nms_config.clustering_iou_threshold
nms_attrs["topK"] = 2*spec.nms_config.top_k
nms_attrs["codeType"] = 1
nms_attrs["keepTopK"] = spec.nms_config.top_k
nms_attrs["numClasses"] = num_classes + 1
nms_attrs["inputOrder"] = [1, 2, 0]
nms_attrs["confSigmoid"] = 0
nms_attrs["isNormalized"] = 1
nms_attrs["scoreBits"] = spec.nms_config.infer_nms_score_bits
nms_plugin = onnx_gs.Node(
op="NMSDynamic_TRT",
name="NMS",
inputs=[anchor_data, loc_data, conf_data],
outputs=[nms_out, nms_out_1],
attrs=nms_attrs
)
ssd_graph.nodes.append(nms_plugin)
ssd_graph.outputs = nms_plugin.outputs
ssd_graph.cleanup().toposort()
self._fix_onnx_paddings(ssd_graph)
return onnx_gs.export_onnx(ssd_graph)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/export/onnx_exporter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class to export trained .tlt models to etlt file."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import tempfile
import graphsurgeon as gs
import keras
from keras import backend as K
from keras.layers import Permute, Reshape
import tensorflow as tf
import uff
# Import quantization layer processing.
from nvidia_tao_tf1.core.export._quantized import (
check_for_quantized_layers,
process_quantized_layers,
)
from nvidia_tao_tf1.core.export._uff import keras_to_pb
from nvidia_tao_tf1.cv.common.export.keras_exporter import KerasExporter as Exporter
from nvidia_tao_tf1.cv.common.types.base_ds_config import BaseDSConfig
from nvidia_tao_tf1.cv.ssd.layers.anchor_box_layer import AnchorBoxes
from nvidia_tao_tf1.cv.ssd.utils.model_io import load_model
from nvidia_tao_tf1.cv.ssd.utils.spec_loader import EXPORT_EXP_REQUIRED_MSG, \
load_experiment_spec, spec_validator
NUM_FEATURE_MAPS = 6
logger = logging.getLogger(__name__)
class SSDExporter(Exporter):
"""Exporter class to export a trained SSD model."""
def __init__(self, model_path=None,
key=None,
data_type="fp32",
strict_type=False,
experiment_spec_path="",
backend="uff",
**kwargs):
"""Instantiate the SSD exporter to export a trained SSD .tlt model.
Args:
model_path(str): Path to the SSD model file.
key (str): Key to decode the model.
data_type (str): Backend data-type for the optimized TensorRT engine.
strict_type(bool): Apply TensorRT strict_type_constraints or not for INT8 mode.
experiment_spec_path (str): Path to SSD experiment spec file.
backend (str): Type of intermediate backend parser to be instantiated.
"""
super(SSDExporter, self).__init__(model_path=model_path,
key=key,
data_type=data_type,
strict_type=strict_type,
backend=backend)
self.experiment_spec_path = experiment_spec_path
assert os.path.isfile(self.experiment_spec_path), \
"Experiment spec file not found at {}.".format(self.experiment_spec_path)
self.experiment_spec = None
self.is_dssd = None
def load_model(self, backend="uff"):
"""Simple function to load the SSD Keras model."""
experiment_spec, is_dssd = load_experiment_spec(self.experiment_spec_path)
spec_validator(experiment_spec, EXPORT_EXP_REQUIRED_MSG)
K.clear_session()
K.set_learning_phase(0)
model = load_model(self.model_path, experiment_spec,
is_dssd, key=self.key)
outputs = self.generate_trt_output(model.get_layer('mbox_loc').output,
model.get_layer('mbox_conf_softmax').output,
model.get_layer('mbox_priorbox').output)
model = keras.models.Model(inputs=model.input,
outputs=outputs)
if check_for_quantized_layers(model):
model, tensor_scale_dict = process_quantized_layers(
model, backend,
calib_cache=None,
calib_json=None)
# ZeroPadding is fused with its following conv2d/depthwiseconv2d, collapse them.
padding_nodes = []
for k in tensor_scale_dict:
if '/Pad' in k:
# this is a ZeroPadding node
padding_nodes.append(k)
for n in padding_nodes:
tensor_scale_dict.pop(n)
self.tensor_scale_dict = tensor_scale_dict
self.experiment_spec = experiment_spec
img_mean = experiment_spec.augmentation_config.image_mean
self.image_mean = [103.939, 116.779, 123.68] \
if experiment_spec.augmentation_config.output_channel == 3 else [117.3786]
if img_mean:
if experiment_spec.augmentation_config.output_channel == 3:
self.image_mean = [img_mean['b'], img_mean['g'], img_mean['r']]
else:
self.image_mean = [img_mean['l']]
return model
def generate_trt_output(self, loc, conf, anchor):
"""Manipulate model outputs so we can use TRT NMS plugin."""
out_loc = Reshape((-1, 1, 1), name='loc_data')(loc)
out_conf = Reshape((-1, 1, 1), name='conf_data')(conf)
out_anchor = Reshape((-1, 2, 4), name="anchor_reshape")(anchor)
out_anchor = Permute((2, 1, 3), name="anchor_permute")(out_anchor)
out_anchor = Reshape((2, -1, 1), name='anchor_data')(out_anchor)
return [out_loc, out_conf, out_anchor]
def save_exported_file(self, model, output_file_name):
"""Save the exported model file.
This routine converts a keras model to onnx/uff model
based on the backend the exporter was initialized with.
Args:
model (keras.model.Model): Decoded keras model to be exported.
output_file_name (str): Path to the output file.
Returns:
output_file_name (str): Path to the output UFF model.
"""
os_handle, tmp_pb_file = tempfile.mkstemp(suffix=".pb")
os.close(os_handle)
if self.backend == "uff":
keras_to_pb(model, tmp_pb_file, None,
custom_objects={'AnchorBoxes': AnchorBoxes})
tf.reset_default_graph()
dynamic_graph = gs.DynamicGraph(tmp_pb_file)
dynamic_graph = self.node_process(dynamic_graph)
os.remove(tmp_pb_file)
uff.from_tensorflow(dynamic_graph.as_graph_def(),
['NMS'],
output_filename=output_file_name,
text=False,
quiet=True)
return output_file_name
raise NotImplementedError("Invalid backend provided. {}".format(self.backend))
def set_input_output_node_names(self):
"""Set input output node names."""
self.output_node_names = ["NMS"]
self.input_node_names = ["Input"]
def node_process(self, ssd_graph):
"""Manipulating the ssd dynamic graph to make it compatible with TRT.
Args:
ssd_graph (gs.DynamicGraph): Dynamic graph of the SSD model from the TF Proto file.
Returns:
ssd_graph (gs.DymanicGraph): Post processed dynamic graph which is ready to be
serialized as a uff file.
"""
spec = self.experiment_spec
FirstDimTile = [
gs.create_node(name="FirstDimTile_{}".format(i),
trt_plugin=True,
op="BatchTilePlugin_TRT")
for i in range(NUM_FEATURE_MAPS)
]
num_classes = len({str(x) for x in
spec.dataset_config.target_class_mapping.values()})
# TensorRT Bug 2603572, anchor_data/Reshape must be at the very beginning!
NMS = gs.create_plugin_node(name='NMS', op='NMS_TRT',
inputs=['anchor_data/Reshape',
'loc_data/Reshape',
'conf_data/Reshape'],
shareLocation=1,
varianceEncodedInTarget=0,
backgroundLabelId=0,
confidenceThreshold=spec.nms_config.confidence_threshold,
nmsThreshold=spec.nms_config.clustering_iou_threshold,
topK=2*spec.nms_config.top_k, # topK as NMS input
codeType=1,
keepTopK=spec.nms_config.top_k, # NMS output topK
numClasses=num_classes+1, # +1 for background class
inputOrder=[1, 2, 0],
confSigmoid=0,
isNormalized=1,
scoreBits=spec.nms_config.infer_nms_score_bits)
# Create a mapping of namespace names -> plugin nodes.
namespace_plugin_map = {"ssd_anchor_{}/FirstDimTile".format(i): FirstDimTile[i] for i in
range(NUM_FEATURE_MAPS)}
softmax_remove_list = ["mbox_conf_softmax_/transpose",
"mbox_conf_softmax_/transpose_1"]
softmax_connect_list = [("mbox_conf_softmax_/Softmax", "mbox_conf_softmax/transpose"),
("before_softmax_permute/transpose", "mbox_conf_softmax_/Softmax")]
def connect(dynamic_graph, connections_list):
for node_a_name, node_b_name in connections_list:
if node_a_name not in dynamic_graph.node_map[node_b_name].input:
dynamic_graph.node_map[node_b_name].input.insert(0, node_a_name)
# Create a new graph by collapsing namespaces
ssd_graph.remove(softmax_remove_list)
connect(ssd_graph, softmax_connect_list)
ssd_graph.append(NMS)
ssd_graph.collapse_namespaces(namespace_plugin_map)
return ssd_graph
def get_class_labels(self):
"""Get list of class labels to serialize to a labels.txt file."""
classes = sorted({str(x) for x in
self.experiment_spec.dataset_config.target_class_mapping.values()})
# add background label at idx=0:
classes = ["background"] + classes
return classes
def generate_ds_config(self, input_dims, num_classes=None):
"""Generate Deepstream config element for the exported model."""
if input_dims[0] == 1:
color_format = "l"
else:
color_format = "bgr" if self.preprocessing_arguments["flip_channel"] else "rgb"
kwargs = {
"data_format": self.data_format,
"backend": self.backend,
# Setting this to 0 by default because there are more
# detection networks.
"network_type": 0,
"maintain_aspect_ratio": False
}
if num_classes:
kwargs["num_classes"] = num_classes
if self.backend == "uff":
kwargs.update({
"input_names": self.input_node_names,
"output_names": self.output_node_names
})
ds_config = BaseDSConfig(
self.preprocessing_arguments["scale"],
self.preprocessing_arguments["means"],
input_dims,
color_format,
self.key,
**kwargs
)
return ds_config
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/export/ssd_exporter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Unit test for SSD model export functionality.'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import keras
import pytest
import tensorflow as tf
from nvidia_tao_tf1.cv.common.utils import encode_from_keras
from nvidia_tao_tf1.cv.ssd.architecture.ssd_arch import ssd
import nvidia_tao_tf1.cv.ssd.models.patch_keras
from nvidia_tao_tf1.cv.ssd.utils.spec_loader import eval_str, load_experiment_spec
nvidia_tao_tf1.cv.ssd.models.patch_keras.patch()
backbone_configs = [
('resnet', 10, False, False, "fp32"),
('resnet', 10, False, True, "int8"),
('resnet', 18, False, False, "int8"),
('vgg', 16, True, False, "fp16"),
]
keras.backend.set_image_data_format('channels_first')
@pytest.fixture
def _spec_file():
'''default spec file.'''
parent_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
return os.path.join(parent_dir, 'experiment_specs/default_spec.txt')
@pytest.fixture
def spec():
'''spec.'''
experiment_spec, _ = load_experiment_spec()
return experiment_spec
@pytest.mark.skipif(os.getenv("RUN_ON_CI", "0") == "1", reason="Cannot be run on CI")
@pytest.mark.script_launch_mode('subprocess')
@pytest.mark.parametrize("model_type, nlayers, is_dssd, qat, data_type",
backbone_configs)
def test_export_uff(script_runner, spec, _spec_file, model_type,
nlayers, is_dssd, qat, data_type):
'''test to make sure the export works and uff model can be parsed without issues.'''
# pin GPU ID 0 so it uses the newest GPU ARCH for INT8
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
keras.backend.set_session(sess)
enc_key = 'nvidia_tlt'
cls_mapping = spec.dataset_config.target_class_mapping
classes = sorted({str(x) for x in cls_mapping.values()})
# n_classes + 1 for background class
n_classes = len(classes) + 1
scales = eval_str(spec.ssd_config.scales)
aspect_ratios_global = eval_str(
spec.ssd_config.aspect_ratios_global)
aspect_ratios_per_layer = eval_str(
spec.ssd_config.aspect_ratios)
steps = eval_str(spec.ssd_config.steps)
offsets = eval_str(spec.ssd_config.offsets)
variances = eval_str(spec.ssd_config.variances)
freeze_blocks = eval_str(spec.ssd_config.freeze_blocks)
freeze_bn = eval_str(spec.ssd_config.freeze_bn)
keras.backend.clear_session()
model = ssd(image_size=(3, 300, 300),
n_classes=n_classes,
is_dssd=is_dssd,
nlayers=nlayers,
kernel_regularizer=None,
freeze_blocks=freeze_blocks,
freeze_bn=freeze_bn,
scales=scales,
min_scale=spec.ssd_config.min_scale,
max_scale=spec.ssd_config.max_scale,
aspect_ratios_global=aspect_ratios_global,
aspect_ratios_per_layer=aspect_ratios_per_layer,
two_boxes_for_ar1=spec.ssd_config.two_boxes_for_ar1,
steps=steps,
offsets=offsets,
clip_boxes=spec.ssd_config.clip_boxes,
variances=variances,
arch=model_type,
input_tensor=None,
qat=qat)
os_handle, tmp_keras_model = tempfile.mkstemp(suffix=".tlt")
os.close(os_handle)
encode_from_keras(model, tmp_keras_model, enc_key.encode())
os_handle, tmp_exported_model = tempfile.mkstemp(suffix=".onnx")
os.close(os_handle)
os.remove(tmp_exported_model)
del model
# export to etlt model
script = 'nvidia_tao_tf1/cv/ssd/scripts/export.py'
env = os.environ.copy()
# 1. export in FP32 mode
if data_type == "fp32":
args = ['-m', tmp_keras_model,
'-k', enc_key,
'--experiment_spec', _spec_file,
'-o', tmp_exported_model,
'--static_batch_size', "1"]
keras.backend.clear_session()
ret = script_runner.run(script, env=env, *args)
# before abort, remove the created temp files when exception raises
try:
assert ret.success
assert os.path.isfile(tmp_exported_model)
if os.path.exists(tmp_exported_model):
os.remove(tmp_exported_model)
except AssertionError:
# if the script runner failed, the tmp_exported_model may not be created at all
if os.path.exists(tmp_exported_model):
os.remove(tmp_exported_model)
os.remove(tmp_keras_model)
raise(AssertionError(ret.stdout + ret.stderr))
# 2. export in FP16 mode
if data_type == "fp16":
args = ['-m', tmp_keras_model,
'-k', enc_key,
'--experiment_spec', _spec_file,
'-o', tmp_exported_model,
'--data_type', 'fp16',
'--static_batch_size', "1"]
keras.backend.clear_session()
ret = script_runner.run(script, env=env, *args)
try:
assert ret.success
assert os.path.isfile(tmp_exported_model)
if os.path.exists(tmp_exported_model):
os.remove(tmp_exported_model)
except AssertionError:
if os.path.exists(tmp_exported_model):
os.remove(tmp_exported_model)
os.remove(tmp_keras_model)
raise(AssertionError(ret.stdout + ret.stderr))
# 3. export in INT8 mode with random data for calibration
# 4. export in INT8 mode with tensor_scale_dict
os_handle, tmp_data_file = tempfile.mkstemp()
os.close(os_handle)
os.remove(tmp_data_file)
os_handle, tmp_cache_file = tempfile.mkstemp()
os.close(os_handle)
os.remove(tmp_cache_file)
if data_type == "int8":
if qat:
args = ['-m', tmp_keras_model,
'-k', enc_key,
'--experiment_spec', _spec_file,
'-o', tmp_exported_model,
'--data_type', 'int8',
'--cal_cache_file', tmp_cache_file,
'--static_batch_size', "1"]
keras.backend.clear_session()
ret = script_runner.run(script, env=env, *args)
try:
assert ret.success
assert os.path.isfile(tmp_exported_model)
assert os.path.isfile(tmp_cache_file)
except AssertionError:
raise AssertionError(ret.stdout + ret.stderr)
else:
args = ['-m', tmp_keras_model,
'-k', enc_key,
'--experiment_spec', _spec_file,
'-o', tmp_exported_model,
'--data_type', 'int8',
'--cal_data_file', tmp_data_file,
'--cal_image_dir', "",
'--batches', '1',
'--batch_size', '1',
'--cal_cache_file', tmp_cache_file,
'--static_batch_size', "1"]
keras.backend.clear_session()
ret = script_runner.run(script, env=env, *args)
try:
# this is the last export, retain the etlt model for following check
assert ret.success
assert os.path.isfile(tmp_exported_model)
if os.path.exists(tmp_data_file):
os.remove(tmp_data_file)
if os.path.exists(tmp_cache_file):
os.remove(tmp_cache_file)
except AssertionError:
raise(AssertionError(ret.stdout + ret.stderr))
# clear the tmp files
if os.path.exists(tmp_exported_model):
os.remove(tmp_exported_model)
if os.path.exists(tmp_cache_file):
os.remove(tmp_cache_file)
if os.path.exists(tmp_keras_model):
os.remove(tmp_keras_model)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/export/tests/test_uff_export.py |
"Module containing core routines for CV applications."
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/core/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for RandomShift processor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pytest
import tensorflow as tf
from nvidia_tao_tf1.cv.core.augment.random_shift import RandomShift
@pytest.mark.parametrize("shift_percent_max, shift_probability, frame_shape, message",
[(-1, 0.5, [10, 10, 3],
"RandomShift.shift_percent_max (-1) is not within the range [0, 1]."),
(0.05, 1.5, [10, 10, 3],
"RandomShift.shift_probability (1.5) is not within the range [0, 1]."),
])
def test_invalid_random_shift_parameters(shift_percent_max, shift_probability, frame_shape,
message):
"""Test RandomShift processor constructor error handling on invalid arguments."""
with pytest.raises(ValueError) as exc:
RandomShift(shift_percent_max=shift_percent_max,
shift_probability=shift_probability,
frame_shape=frame_shape)
assert message in str(exc)
def test_random_shift_call_with_no_percent():
"""Test RandomShift processor call."""
op = RandomShift(shift_percent_max=0.0,
shift_probability=1.0,
frame_shape=[25, 25, 3])
bbox = {
'x': tf.constant(10.),
'y': tf.constant(10.),
'h': tf.constant(10.),
'w': tf.constant(10.),
}
out_bbox = op(bbox)
with tf.Session() as sess:
bbox = sess.run(bbox)
out_bbox = sess.run(out_bbox)
for key in bbox:
assert bbox[key] == 10
assert bbox[key] == out_bbox[key]
def test_random_shift_call_with_no_prob():
"""Test RandomShift processor call."""
op = RandomShift(shift_percent_max=1.0,
shift_probability=0.0,
frame_shape=[25, 25, 3])
bbox = {
'x': tf.constant(10.),
'y': tf.constant(10.),
'h': tf.constant(10.),
'w': tf.constant(10.),
}
out_bbox = op(bbox)
with tf.Session() as sess:
bbox = sess.run(bbox)
out_bbox = sess.run(out_bbox)
for key in bbox:
assert bbox[key] == 10
assert bbox[key] == out_bbox[key]
def test_random_shift_call_():
"""Test RandomShift processor call."""
op = RandomShift(shift_percent_max=1.0,
shift_probability=1.0,
frame_shape=[25, 25, 3])
bbox = {
'x': tf.constant(10.),
'y': tf.constant(10.),
'h': tf.constant(10.),
'w': tf.constant(10.),
}
out_bbox = op(bbox)
with tf.Session() as sess:
bbox = sess.run(bbox)
out_bbox = sess.run(out_bbox)
for key in bbox:
assert bbox[key] == 10
assert bbox[key] != out_bbox[key]
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/core/augment/random_shift_test.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Processor for applying random shift transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nvidia_tao_tf1.core.processors.processors import Processor
import tensorflow as tf
class RandomShift(Processor):
"""Random shift processor to shift bounding boxes."""
def __init__(self, shift_percent_max, shift_probability, frame_shape, **kwargs):
"""Construct a random blur processor.
Args:
shift_percent_max (float): Maximum percent shift of bounding box
shift_probability (float): Probability that a shift will occur.
frame_shape (float): shape of frame (HWC).
"""
super(RandomShift, self).__init__(**kwargs)
if shift_percent_max < 0.0 or shift_percent_max > 1.0:
raise ValueError(
"RandomShift.shift_percent_max ({}) is not within the range [0, 1].".format(
shift_percent_max))
if shift_probability < 0.0 or shift_probability > 1.0:
raise ValueError(
"RandomShift.shift_probability ({}) is not within the range [0, 1].".format(
shift_probability))
self._shift_percent_max = shift_percent_max
self._shift_probability = shift_probability
self._frame_shape = frame_shape
def __repr__(self):
"""Return a string representation of the processor."""
return "RandomShift(shift_percent_max={}, shift_probability={})".format(
self._shift_percent_max, self._shift_probability)
def _build(self, *args, **kwargs):
"""Initialize random variables used for op.
The build function should be used when wanting to apply a consistent random shift to
multiple images. This means if a random shift is performed on one image, a random shift
will occur on other images passed through this Processor. The shift amount may vary.
"""
shift_probability = tf.random_uniform([], minval=0.0, maxval=1.0)
should_shift = tf.less(shift_probability, self._shift_probability)
self._percentage = tf.cond(should_shift, lambda: self._shift_percent_max, lambda: 0.0)
def call(self, in_bbox):
"""Return a shifted bounding box.
Args:
in_bbox (dict): contains 'x', 'y', 'w', 'h' information for bounding box.
Returns:
bbox (dict): contains modified 'x', 'y', 'w', 'h' information for bounding
box.
"""
if (self._shift_percent_max == 0.0 or self._shift_probability == 0.0):
return in_bbox
bbox = {}
for key in in_bbox:
bbox[key] = tf.identity(in_bbox[key])
# x shift is relative to width of bbox.
bound = bbox['w'] * self._percentage
coord_noise = tf.random_uniform([], minval=-1.0, maxval=1.0, dtype=tf.float32) * bound
bbox['x'] += coord_noise
# y shift is relative to width of bbox.
bound = bbox['h'] * self._percentage
coord_noise = tf.random_uniform([], minval=-1.0, maxval=1.0, dtype=tf.float32) * bound
bbox['y'] += coord_noise
# NOTE: to preserve square bbox, the same shift is applied to width and height.
bound = bbox['w'] * self._percentage
square_preserve = tf.random_uniform([], minval=-1.0, maxval=1.0, dtype=tf.float32) * bound
bbox['w'] += square_preserve
bbox['h'] += square_preserve
bbox['x'] = tf.reshape(bbox['x'], ())
bbox['y'] = tf.reshape(bbox['y'], ())
bbox['w'] = tf.reshape(bbox['w'], ())
bbox['h'] = tf.reshape(bbox['h'], ())
return self._correct_bbox_bounds(bbox, self._frame_shape)
@staticmethod
def _correct_bbox_bounds(bbox, frame_shape):
"""Fix bounding box coordinates within shape of frame.
Args:
bbox (dict): contains 'x', 'y', 'w', 'h' information for bounding box.
frame_shape (Tensor float32): shape of frame (HWC).
Returns:
bbox (dict): contains 'x', 'y', 'w', 'h' information for bounding box within
frame shape.
"""
frame_h = frame_shape[0] - 1
frame_w = frame_shape[1] - 1
bbox['x'] = tf.clip_by_value(bbox['x'], clip_value_min=0, clip_value_max=frame_w)
bbox['y'] = tf.clip_by_value(bbox['y'], clip_value_min=0, clip_value_max=frame_h)
width_large = tf.greater(bbox['x'] + bbox['w'], frame_w)
height_large = tf.greater(bbox['y'] + bbox['h'], frame_h)
new_width = tf.cond(width_large, lambda: frame_w - bbox['x'], lambda: bbox['w'])
new_height = tf.cond(height_large, lambda: frame_h - bbox['y'], lambda: bbox['h'])
max_square_dim = tf.minimum(new_width, new_height)
bbox['w'] = tf.clip_by_value(bbox['w'], clip_value_min=0, clip_value_max=max_square_dim)
bbox['h'] = tf.clip_by_value(bbox['h'], clip_value_min=0, clip_value_max=max_square_dim)
return bbox
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/core/augment/random_shift.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Processor for applying random gamma transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nvidia_tao_tf1.core.processors.processors import Processor
import tensorflow as tf
class RandomGamma(Processor):
"""Random gamma processor."""
def __init__(self, gamma_type, gamma_mu, gamma_std, gamma_max, gamma_min, gamma_probability,
**kwargs):
"""Construct a random gamma processor.
Args:
gamma_type (string): Describes type of random sampling for gamma ['normal', 'uniform'].
gamma_mu (float): Mu for gamma normal distribution.
gamma_std (float): Standard deviation for gamma normal distribution.
gamma_max (float): Maximum value for gamma uniform distribution.
gamma_min (float): Minimum value for gamma uniform distribution.
gamma_probability (float): Probability that a gamma correction will occur.
"""
super(RandomGamma, self).__init__(**kwargs)
if gamma_type not in ('normal', 'uniform'):
raise ValueError("RandomGamma.gamma_type ({}) is not one of "
"['normal', 'uniform'].".format(gamma_type))
if gamma_mu < 0:
raise ValueError("RandomGamma.gamma_mu ({}) is not positive.".format(gamma_mu))
if gamma_std < 0:
raise ValueError("RandomGamma.gamma_std ({}) is not positive.".format(gamma_std))
if gamma_min < 0:
raise ValueError("RandomGamma.gamma_min ({}) is not positive.".format(gamma_min))
if gamma_max < 0:
raise ValueError("RandomGamma.gamma_max ({}) is not positive.".format(gamma_max))
if gamma_max < gamma_min:
raise ValueError("RandomGamma.gamma_max ({}) is less than "
"RandomGamma.gamma_min ({}).".format(gamma_max, gamma_min))
if gamma_max == gamma_min and gamma_max != 1.0:
raise ValueError("RandomGamma.gamma_max ({}) is equal to RandomGamma.gamma_min "
"({}) but is not 1.0.".format(gamma_max, gamma_min))
if gamma_probability < 0.0 or gamma_probability > 1.0:
raise ValueError(
"RandomGamma.gamma_probability ({}) is not within the range [0, 1].".format(
gamma_probability))
self._gamma_type = gamma_type
self._gamma_mu = float(gamma_mu)
self._gamma_std = float(gamma_std)
self._gamma_max = float(gamma_max)
self._gamma_min = float(gamma_min)
self._gamma_probability = gamma_probability
def __repr__(self):
"""Return a string representation of the processor."""
_rep = "RandomGamma(gamma_type={}, gamma_mu={}, gamma_max={}, gamma_std={}, " \
"gamma_min={}, gamma_probability={})".format(self._gamma_type,
self._gamma_mu,
self._gamma_std,
self._gamma_max,
self._gamma_min,
self._gamma_probability)
return _rep
def _build(self, *args, **kwargs):
"""Initialize random variables used for op.
The build function should be used when wanting to apply the same random gamma to multiple
images.
"""
gamma_probability = tf.random_uniform([], minval=0.0, maxval=1.0)
self._should_gamma = tf.less(gamma_probability, self._gamma_probability)
if self._gamma_type == 'uniform':
self._random_gamma = tf.random_uniform([], minval=self._gamma_min,
maxval=self._gamma_max)
elif self._gamma_type == 'normal':
val = tf.random_normal([], mean=self._gamma_mu, stddev=self._gamma_std)
# NOTE: Using absolute value would obtain more useful random gammas as opposed to using
# relu: tf.nn.relu(dist.sample([])) which would set all negative gamma to 0.
# Set all negative gamma to its absolute value.
self._random_gamma = tf.abs(val)
def call(self, image):
"""Return a gamma corrected image.
Args:
image (Tensor): Image to be gamma corrected (NHWC) or (HWC) or (NCHW) or (CHW).
Returns:
output_image (Tensor): Image that may be gamma corrected.
Same data format as input (NHWC) or (HWC) or (NCHW) or (CHW).
"""
if self._gamma_min == self._gamma_max == 1.0 and self._gamma_type == 'uniform':
return image
corrected_image = tf.image.adjust_gamma(image, gamma=self._random_gamma)
output_image = tf.cond(self._should_gamma, lambda: corrected_image, lambda: image)
return output_image
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/core/augment/random_gamma.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for RandomBlur processor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from mock import patch
import pytest
import tensorflow as tf
from nvidia_tao_tf1.cv.core.augment.random_blur import RandomBlur
@pytest.mark.parametrize("blur_choices, blur_probability, channels, message",
[([1, 2, 3], 0.5, 1, "RandomBlur.blur_choices ([1, 2, 3]) contains "
"an even kernel size (2)."),
([1, -1, 3], 0.5, 1, "RandomBlur.blur_choices ([1, -1, 3]) contains "
"an invalid kernel size (-1)."),
([1, 5, 3], 1.5, 1, "RandomBlur.blur_probability (1.5) is not "
"within the range [0, 1]."),
([1, 2, 3], 0.5, 2, "RandomBlur.blur_choices ([1, 2, 3]) contains an "
"even kernel size (2)."),
([1, -1, 3], 0.5, 2, "RandomBlur.blur_choices ([1, -1, 3]) contains an "
"invalid kernel size (-1)."),
([1, 5, 3], 1.5, 2, "RandomBlur.blur_probability (1.5) is not within "
"the range [0, 1]."),
([1, 2, 3], 0.5, 3, "RandomBlur.blur_choices ([1, 2, 3]) contains "
"an even kernel size (2)."),
([1, -1, 3], 0.5, 3, "RandomBlur.blur_choices ([1, -1, 3]) contains an "
"invalid kernel size (-1)."),
([1, 5, 3], 1.5, 3, "RandomBlur.blur_probability (1.5) is not within "
"the range [0, 1]."),
])
def test_invalid_random_blur_parameters(blur_choices, blur_probability, channels, message):
"""Test RandomBlur processor constructor error handling on invalid arguments."""
with pytest.raises(ValueError) as exc:
RandomBlur(blur_choices=blur_choices,
blur_probability=blur_probability,
channels=channels)
assert message in str(exc)
@pytest.mark.parametrize("channels",
[1, 2, 3])
def test_random_blur_call_with_no_blur_choices(channels):
"""Test RandomBlur processor call."""
op = RandomBlur(blur_choices=[],
blur_probability=1.0,
channels=channels)
width = height = 10
image = tf.ones([height, width, 3]) * 2
out_image = op(image, height, width)
assert_op = tf.assert_equal(image, out_image)
with tf.Session() as sess:
sess.run(assert_op)
@pytest.mark.parametrize("channels",
[1, 2, 3])
@patch("nvidia_tao_tf1.cv.core.augment.random_blur.tf.nn.depthwise_conv2d",
side_effect=tf.nn.depthwise_conv2d)
def test_delegates_to_tf_depthwise_conv2d(spied_depthwise_conv2d, channels):
op = RandomBlur(blur_choices=[5],
blur_probability=1,
channels=channels)
width = height = 10
image = tf.ones([height, width, channels]) * 2
out_image = op(image, height, width)
spied_depthwise_conv2d.assert_called_once()
with tf.Session():
assert tf.shape(out_image).eval().tolist() == [10, 10, channels]
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/core/augment/random_blur_test.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Processors for transforming and augmenting data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nvidia_tao_tf1.cv.core.augment.random_blur import RandomBlur
from nvidia_tao_tf1.cv.core.augment.random_gamma import RandomGamma
from nvidia_tao_tf1.cv.core.augment.random_shift import RandomShift
__all__ = (
'RandomBlur',
'RandomGamma',
'RandomShift',
)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/core/augment/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for RandomGamma processor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from mock import patch
import pytest
import tensorflow as tf
from nvidia_tao_tf1.cv.core.augment.random_gamma import RandomGamma
@pytest.mark.parametrize("gamma_type, gamma_mu, gamma_std, gamma_max, gamma_min, gamma_probability,"
"message",
[('uniform', 1.0, 1.0, 2, -1, 0.5,
"RandomGamma.gamma_min (-1) is not positive."),
('uniform', 1.0, 1.0, -0.1, 1, 0.5,
"RandomGamma.gamma_max (-0.1) is not positive."),
('uniform', 1.0, 1.0, 0.1, 1, 0.5,
"RandomGamma.gamma_max (0.1) is less than RandomGamma.gamma_min (1)."),
('uniform', 1.0, 1.0, 2, 1, -0.1,
"RandomGamma.gamma_probability (-0.1) is not within the range [0, 1]."),
('uniform', 1.0, 1.0, 2, 1, 1.1,
"RandomGamma.gamma_probability (1.1) is not within the range [0, 1]."),
('uniform', 1.0, 1.0, 2, 2, 0.7,
"RandomGamma.gamma_max (2) is equal to RandomGamma.gamma_min (2) but "
"is not 1.0."),
('test', 1.0, 1.0, 2, 2, 0.7,
"RandomGamma.gamma_type (test) is not one of ['normal', 'uniform']."),
('normal', -1.0, 1.0, 2, 2, 0.7,
"RandomGamma.gamma_mu (-1.0) is not positive."),
('normal', 1.0, -1.0, 2, 2, 0.7,
"RandomGamma.gamma_std (-1.0) is not positive."),
])
def test_invalid_random_gamma_parameters(gamma_type, gamma_mu, gamma_std, gamma_max, gamma_min,
gamma_probability, message):
"""Test RandomGamma processor constructor error handling on invalid arguments."""
with pytest.raises(ValueError) as exc:
RandomGamma(gamma_type=gamma_type,
gamma_mu=gamma_mu,
gamma_std=gamma_std,
gamma_max=gamma_max,
gamma_min=gamma_min,
gamma_probability=gamma_probability)
assert message in str(exc)
def test_random_gamma_call_with_same_gamma_one():
"""Test RandomGamma processor call."""
op = RandomGamma(gamma_type='uniform',
gamma_mu=1.0,
gamma_std=0.3,
gamma_max=1,
gamma_min=1,
gamma_probability=1.0)
image = tf.ones([10, 10, 3]) * 2
out_image = op(image)
assert_op = tf.assert_equal(image, out_image)
with tf.Session() as sess:
sess.run(assert_op)
@patch("nvidia_tao_tf1.cv.core.augment.random_gamma.tf.image.adjust_gamma",
side_effect=tf.image.adjust_gamma)
@patch("moduluspy.nvidia_tao_tf1.core.processors.augment.random_glimpse.tf.random_uniform")
def test_delegates_to_tf_image_adjust_gamma_uniform(mocked_random_uniform, spied_adjust_gamma):
mocked_random_uniform.return_value = tf.constant(2, dtype=tf.float32)
op = RandomGamma(gamma_type='uniform',
gamma_mu=1.0,
gamma_std=0.3,
gamma_max=2,
gamma_min=1,
gamma_probability=1.0)
image = tf.ones([10, 10, 3]) * 2
op(image)
spied_adjust_gamma.assert_called_with(image, gamma=mocked_random_uniform.return_value)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/core/augment/random_gamma_test.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Processor for applying random blur transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nvidia_tao_tf1.core.processors.processors import Processor
import tensorflow as tf
class RandomBlur(Processor):
"""Random blur processor."""
def __init__(self, blur_choices, blur_probability, channels, **kwargs):
"""Construct a random blur processor.
Args:
blur_choices (list): Choices of odd integer kernel size for blurring.
blur_probability (float): Probability that a blur will occur.
"""
super(RandomBlur, self).__init__(**kwargs)
for size in blur_choices:
if size % 2 == 0:
raise ValueError("RandomBlur.blur_choices ({}) contains an even "
"kernel size ({}).".format(blur_choices, size))
if size < 1:
raise ValueError("RandomBlur.blur_choices ({}) contains an invalid "
"kernel size ({}).".format(blur_choices, size))
if blur_probability < 0.0 or blur_probability > 1.0:
raise ValueError(
"RandomBlur.blur_probability ({}) is not within the range [0, 1].".format(
blur_probability))
self._blur_choices_list = list(blur_choices)
self._blur_choices = tf.convert_to_tensor(self._blur_choices_list, dtype=tf.int32)
self._blur_probability = blur_probability
self._channels = channels
def __repr__(self):
"""Return a string representation of the processor."""
return "RandomBlur(blur_choices={}, blur_probability={})".format(self._blur_choices,
self._blur_probability)
def _build(self, *args, **kwargs):
"""Initialize random variables used for op.
The build function should be used when wanting to apply the same random blur to multiple
images.
"""
self._kernel = self._get_random_kernel(self._channels)
blur_probability = tf.random_uniform([], minval=0.0, maxval=1.0)
self._should_blur = tf.less(blur_probability, self._blur_probability)
def call(self, image, output_height, output_width):
"""Return a blurred image.
Args:
image (Tensor): Image to be blurred (HWC).
output_height (int): Output image height.
output_width (int): Output image width.
Returns:
output_image (Tensor): Image that may blurred.
"""
if self._kernel is None:
return image
batch = tf.stack([image])
blurred = tf.nn.depthwise_conv2d(batch, self._kernel, strides=[1, 1, 1, 1],
padding='VALID', data_format='NHWC')
output_image = tf.cond(self._should_blur, lambda: blurred, lambda: batch)
output_image = tf.squeeze(output_image, axis=0)
output_image = tf.image.resize_images(output_image, (output_height, output_width),
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
return output_image
def _get_random_kernel(self, channels=3):
"""Generate random average kernel.
Returns:
kernel (Tensor float32): Average kernel for 3 channel images.
Intended to be used with conv2d.
channels (int64): Channels of kernel, default to 3.
"""
if not self._blur_choices_list:
return None
random_index = tf.random_uniform([], minval=0, maxval=len(self._blur_choices_list),
dtype=tf.int32)
size = self._blur_choices[random_index]
kernel = tf.ones((size, size, channels, 1), dtype=tf.float32)
kernel /= (tf.cast(size, dtype=tf.float32) ** 2)
return kernel
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/core/augment/random_blur.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Core Inferencer module."""
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/core/inferencer/__init__.py |
# Copyright 2021 NVIDIA Corporation. All rights reserved.
"""Wrapper class for performing TensorRT inference."""
import logging
import tensorrt as trt
from nvidia_tao_tf1.core.export._tensorrt import Engine
logger = logging.getLogger(__name__)
# TensorRT logger singleton
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
class TRTInferencer(object):
"""TensorRT model inference wrapper."""
def __init__(self, trt_engine):
"""Initialize the TensorRT model builder.
Args:
trt_engine (str or trt.ICudaEngine): trt engine path or
deserialized trt engine.
"""
# Initialize runtime needed for loading TensorRT engine from file
trt.init_libnvinfer_plugins(TRT_LOGGER, "")
self.trt_runtime = trt.Runtime(TRT_LOGGER)
if isinstance(trt_engine, trt.ICudaEngine):
# It's already an engine
self.trt_engine_path = None
self.trt_engine = trt_engine
else:
# Assume it's a filepath
self.trt_engine_path = trt_engine
# Deserialize the engine
self.trt_engine = self._load_trt_engine_file(self.trt_engine_path)
self.engine = Engine(self.trt_engine)
def _load_trt_engine_file(self, trt_engine_path):
"""Load serialized engine file into memory.
Args:
trt_engine_path (str): path to the tensorrt file
Returns:
trt_engine (trt.ICudaEngine): deserialized engine
"""
# Load serialized engine file into memory
with open(trt_engine_path, "rb") as f:
trt_engine = self.trt_runtime.deserialize_cuda_engine(f.read())
logger.info("Loading TensorRT engine: {}".format(trt_engine_path))
return trt_engine
def predict(self, input_data):
"""Do inference with TensorRT engine.
Args:
input_data (np.ndarray): Inputs to run inference on.
Returns:
(dict): dictionary mapping output names to output values.
"""
return self.engine.infer(input_data)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/core/inferencer/trt_inferencer.py |
import gc
import logging
import os
import keras
from keras import backend as K
from keras.layers import Concatenate, Conv2D, Dense, Flatten, Input
from keras.models import Model
from numba import cuda
import numpy as np
import pytest
from nvidia_tao_tf1.blocks.models import KerasModel
from nvidia_tao_tf1.core.export._tensorrt import Engine, ONNXEngineBuilder, UFFEngineBuilder
from nvidia_tao_tf1.cv.common.utilities.tlt_utils import encode_from_keras, load_model
from nvidia_tao_tf1.cv.core.export.base_exporter import BaseExporter
logger = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level='INFO'
)
class DummyModel(KerasModel):
"""Dummy model for tests."""
def _build_dummy_model(self, dummy_tensor):
"""Build Dummy model for testing purposes.
Args:
dummy_tensor (tensor): Input tensor to model
Returns:
x_3 (tensor): Dummy model output.
"""
x_1_1 = Conv2D(32,
kernel_size=(3, 3),
strides=(1, 1),
data_format='channels_first',
name='layer-1-1')(dummy_tensor)
x_2_1 = Conv2D(32,
kernel_size=(3, 3),
strides=(1, 1),
data_format='channels_first',
name='layer-2-1')(dummy_tensor)
x_2 = Concatenate(axis=1)([x_1_1, x_2_1])
x_2_flatten = Flatten(data_format='channels_first')(x_2)
x_3 = Dense(10)(x_2_flatten)
return x_3
def build(self, key, dummy_input):
"""Build Dummy Model.
Args:
key (str): Key to decode/encode model.
dummy_input (tensor): Dummy input for model.
Returns
outputs from model.
"""
model_name = 'DummyNet'
dummy_tensor = Input(tensor=dummy_input, name='dummy_input')
dummy_output = self._build_dummy_model(dummy_tensor)
model = Model(inputs=[dummy_tensor], outputs=[dummy_output], name=model_name)
self._keras_model = model
return self._keras_model.outputs
def save_model(self, file_name, enc_key='test', encrypt=True):
"""Save Dummy Model.
Args:
file_name (str): File to save dummy model.
enc_key (str): Key to encode model.
"""
if encrypt:
encode_from_keras(
self._keras_model,
file_name,
bytes(enc_key, 'utf-8'))
else:
self._keras_model.save(file_name)
class ExporterTest(BaseExporter):
"""Exporter class for testing purposes."""
def __init__(self,
model_path=None,
key='test',
data_type='int8',
backend='tfonnx',
strict_type=False,
data_format='channels_first'):
"""Instantiate exporter for testing.
Args:
model_path (str): Path to dummy model file.
key (str): Key to decode model.
data_type (str): Backend data-type for the optimized TensorRT engine.
strict_type (bool): Apply TensorRT strict_type_constraints or not for INT8 mode.
backend (str): Type of intermediate backend parser to be instantiated.
"""
super(ExporterTest, self).__init__(model_path=model_path,
key=key,
data_type=data_type,
backend=backend,
strict_type=strict_type,
data_format=data_format)
keras.backend.set_image_data_format(data_format)
def export_to_etlt(self, output_filename, target_opset=10):
"""Function to export model to etlt.
Args:
output_filename (str): Output .etlt filename
target_opset (int): Target opset version to use for onnx conversion.
Returns:
output_onnx_filename (str): Temporary unencrypted file
in_tensor_names (list): List of input tensor names
out_tensor_names (list): List of output tensor names
"""
keras.backend.set_learning_phase(0)
model = load_model(self.model_path,
key=self.key)
output_node_names = ['dense_1/BiasAdd']
tmp_file_name, in_tensor_names, out_tensor_names = self.save_exported_file(
model,
output_filename,
output_node_names=output_node_names,
target_opset=target_opset,
delete_tmp_file=False
)
del model
# del model
gc.collect()
cuda.close()
return tmp_file_name, in_tensor_names, out_tensor_names
def export(self,
input_dims,
output_filename,
backend,
calibration_cache='',
data_file_name='',
n_batches=1,
batch_size=1,
verbose=True,
target_opset=10,
calibration_images_dir='',
save_engine=True,
engine_file_name='',
max_workspace_size=1 << 30,
max_batch_size=1,
opt_batch_size=1,
static_batch_size=1,
save_unencrypted_model=False,
validate_trt_engine=True,
tmp_file_name='',
in_tensor_name='',
out_tensor_name='',
tmp_dir=''
):
"""Export.
Args:
ETLT export
input_dims (list): Input dims with channels_first(CHW) or channels_last (HWC)
output_filename (str): Output .etlt filename
backend (str): Model type to export to
Calibration and TRT export
calibration_cache (str): Calibration cache file to write to or read from.
data_file_name (str): Tensorfile to run calibration for int8 optimization
n_batches (int): Number of batches to calibrate over
batch_size (int): Number of images per batch
verbose (bool): Verbosity of the logger
target_opset (int): Target opset version to use for onnx conversion.
calibration_images_dir (str): Directory of images to run int8 calibration if
data file is unavailable.
save_engine (bool): If True, saves trt engine file to `engine_file_name`
engine_file_name (str): Output trt engine file
max_workspace_size (int): Max size of workspace to be set for trt engine builder.
max_batch_size (int): Max batch size for trt engine builder
opt_batch_size (int): Optimum batch size to use for model conversion.
Default is 1.
static_batch_size (int): Set a static batch size for exported etlt model.
Default is -1(dynamic batch size)
save_unencrypted_model (bool): Option on whether to save an encrypted model or not.
validated_trt_engine (bool): Option to validate trt engine.
tmp_file_name (str): Temporary file name to use.
in_tensor_name (str): Input tensor name to the model.
out_tensor_name (str): Output tensor name to the model.
tmp_dir (str): Pytests temporally directory. Used only for int8 export.
"""
# Get int8 calibrator.
calibrator = None
max_batch_size = max(batch_size, max_batch_size)
data_format = self.data_format
input_dims = (1, 256, 256)
if self.backend == 'tfonnx':
backend = 'onnx'
preprocessing_params = {'scale': [0.5], 'means': [0.5], 'flip_channel': False}
keras.backend.clear_session()
if self.data_type == 'int8':
calibration_cache = os.path.join(tmp_dir, 'calibration')
calibration_data = os.path.join(tmp_dir, 'calibration2')
calibrator = self.get_calibrator(
calibration_cache=calibration_cache,
data_file_name=calibration_data,
n_batches=n_batches,
batch_size=batch_size,
input_dims=input_dims,
calibration_images_dir='nvidia_tao_tf1/cv/core/export/images/',
preprocessing_params=preprocessing_params
)
if backend == 'onnx':
engine_builder = ONNXEngineBuilder(tmp_file_name,
max_batch_size=max_batch_size,
max_workspace_size=max_workspace_size,
dtype=self.data_type,
strict_type=self.strict_type,
verbose=verbose,
calibrator=calibrator,
tensor_scale_dict=self.tensor_scale_dict,
dynamic_batch=True,
input_dims=None,
opt_batch_size=opt_batch_size)
elif backend == 'uff':
engine_builder = UFFEngineBuilder(tmp_file_name,
in_tensor_name,
max_batch_size=max_batch_size,
max_workspace_size=max_workspace_size,
dtype=self.data_type,
strict_type=self.strict_type,
verbose=verbose,
calibrator=calibrator,
tensor_scale_dict=self.tensor_scale_dict,
data_format=data_format)
else:
raise NotImplementedError("Invalid backend")
trt_engine = engine_builder.get_engine()
if save_engine:
with open(engine_file_name, 'wb') as outf:
outf.write(trt_engine.serialize())
if validate_trt_engine:
try:
engine = Engine(trt_engine)
dummy_input = np.ones((1,) + input_dims)
trt_output = engine.infer(dummy_input)
logger.info('TRT engine outputs: {}'.format(trt_output.keys()))
for output_name in trt_output.keys():
out = trt_output[output_name]
logger.info('{}: {}'.format(output_name, out.shape))
except Exception as error:
logger.error('TRT engine validation error!')
logger.error(error)
if trt_engine:
del trt_engine
@pytest.mark.parametrize(
"encrypt_model",
[False]
)
def test_export(tmpdir, encrypt_model):
'''Function to test model exports.
Args:
tmpdir (str): Pytests temporary directory.
Returns:
'''
key = 'test'
if encrypt_model:
model_filename = "model.tlt"
else:
model_filename = "model.hdf5"
model_path = os.path.join(tmpdir, model_filename)
model = DummyModel()
dummy_input = np.random.randn(1, 1, 256, 256)
dummy_input = K.constant(dummy_input)
model.build(key, dummy_input)
model.save_model(model_path, key, encrypt=encrypt_model)
exporter = ExporterTest(model_path,
key=key,
backend='tfonnx',
data_type='int8')
tmp_file_name, in_tensor_name, out_tensor_name = exporter.export_to_etlt(
model_path, target_opset=10)
# Test ONNX export.
onnx_output_path = os.path.join(tmpdir, 'output_onnx')
onnx_engine_file_name = onnx_output_path
exporter.backend = 'onnx'
exporter.data_type = 'fp32'
exporter.export(input_dims=None,
output_filename=onnx_output_path,
backend='onnx',
target_opset=10,
tmp_file_name=tmp_file_name,
in_tensor_name=in_tensor_name,
out_tensor_name=out_tensor_name,
engine_file_name=onnx_engine_file_name)
assert os.path.isfile(onnx_output_path)
# Test UFF export.
uff_output_path = os.path.join(tmpdir, 'output_uff')
uff_engine_file_name = uff_output_path
exporter.backend = 'uff'
exporter.export(input_dims=None,
output_filename=uff_output_path,
backend='onnx',
engine_file_name=uff_engine_file_name,
target_opset=10,
tmp_file_name=tmp_file_name,
in_tensor_name=in_tensor_name,
out_tensor_name=out_tensor_name)
assert os.path.isfile(uff_output_path)
# Test int8 export.
int_eight_output_path = os.path.join(tmpdir, 'int_eight_output')
exporter.export(input_dims=None,
output_filename=int_eight_output_path,
backend='onnx',
target_opset=10,
tmp_file_name=tmp_file_name,
in_tensor_name=in_tensor_name,
out_tensor_name=out_tensor_name,
engine_file_name=int_eight_output_path,
tmp_dir=tmpdir)
assert os.path.isfile(int_eight_output_path)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/core/export/test_base_exporter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common export functions for DriveIX."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/core/export/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class to export trained .tlt keras models to etlt file, do int8 calibration etc."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import abstractmethod
import json
import logging
import os
import random
import struct
try:
import tensorrt # noqa pylint: disable=W0611 pylint: disable=W0611
from nvidia_tao_tf1.cv.common.export.tensorfile import TensorFile
from nvidia_tao_tf1.cv.common.export.tensorfile_calibrator import TensorfileCalibrator
from nvidia_tao_tf1.cv.common.export.trt_utils import (
NV_TENSORRT_MAJOR,
NV_TENSORRT_MINOR,
NV_TENSORRT_PATCH
)
except Exception as e:
logger = logging.getLogger(__name__)
logger.warning(
"Failed to import TensorRT package, exporting TLT to a TensorRT engine "
"will not be available."
)
import keras
import numpy as np
from PIL import Image
from six.moves import xrange
from tqdm import tqdm
from nvidia_tao_tf1.cv.common.utilities.tlt_utils import load_model, save_exported_file # noqa pylint: disable=C0412
# Define valid backend available for the exporter.
VALID_BACKEND = ["uff", "onnx", "tfonnx"]
logger = logging.getLogger(__name__)
class BaseExporter(object):
"""Base class for exporter."""
def __init__(self,
model_path=None,
key=None,
data_type="fp32",
strict_type=False,
backend="onnx",
data_format="channels_first",
**kwargs):
"""Initialize the base exporter.
Args:
model_path (str): Path to the model file.
key (str): Key to load the model.
data_type (str): Path to the TensorRT backend data type.
strict_type(bool): Apply TensorRT strict_type_constraints or not for INT8 mode.
backend (str): TensorRT parser to be used.
data_format (str): Channel Ordering, channels_first(NCHW) or channels_last (NHWC)
Returns:
None.
"""
self.data_type = data_type
self.strict_type = strict_type
self.model_path = model_path
# if key is str, it will be converted to bytes in nvidia_tao_tf1.encoding
self.key = key
self.set_backend(backend)
self.data_format = data_format
self.tensor_scale_dict = None
self._trt_version_number = NV_TENSORRT_MAJOR * 1000 + NV_TENSORRT_MINOR * 100 + \
NV_TENSORRT_PATCH
def set_session(self):
"""Set keras backend session."""
raise NotImplementedError("To be implemented by the class being used.")
def set_keras_backend_dtype(self):
"""Set the keras backend data type."""
raise NotImplementedError(
"To be implemented by the class being used.")
@abstractmethod
def set_input_output_node_names(self):
"""Set input output node names."""
raise NotImplementedError(
"This function is not implemented in the base class.")
def extract_tensor_scale(self, model, backend):
"""Extract tensor scale from QAT trained model and de-quantize the model."""
raise NotImplementedError(
"This function is not implemented in the base class.")
def set_backend(self, backend):
"""Set keras backend.
Args:
backend (str): Backend to be used.
Currently only UFF is supported.
"""
if backend not in VALID_BACKEND:
raise NotImplementedError(
'Invalid backend "{}" called'.format(backend))
self.backend = backend
def load_model(self, model_path):
"""Simple function to get the keras model.
Args:
model_path (str): Path to encrypted keras model
Returns:
model (keras.models.Model): Loaded model
"""
keras.backend.set_learning_phase(0)
return load_model(model_path, key=self.key)
def save_exported_file(self,
model,
output_filename,
custom_objects=None,
output_node_names=None,
target_opset=None,
delete_tmp_file=True):
"""Save the exported model file.
This routine converts a keras model to onnx/uff model
based on the backend the exporter was initialized with.
Args:
model (keras.models.Model): Keras model to be saved.
output_filename (str): Output .etlt filename
custom_objects (dict): Custom keras objects if any.
output_node_names (list): List of names of model output node.
target_opset (int): Target opset version to use for onnx conversion.
delete_tmp_file (bool): Option to toggle deletion of
temporary unencrypted backend (onnx/uff/..) file
Returns:
tmp_file_name (str): Temporary unencrypted backend file
in_tensor_names (list): List of input tensor names
out_tensor_names (list): List of output tensor names
"""
tmp_file_name, in_tensor_names, out_tensor_names = save_exported_file(
model,
output_filename,
self.key,
backend=self.backend,
custom_objects=custom_objects,
output_node_names=output_node_names,
target_opset=target_opset,
logger=logger,
delete_tmp_file=delete_tmp_file
)
return tmp_file_name, in_tensor_names, out_tensor_names
def get_calibrator(self,
calibration_cache,
data_file_name,
n_batches,
batch_size,
input_dims,
calibration_images_dir=None,
preprocessing_params=None):
"""Simple function to get an int8 calibrator.
Args:
calibration_cache (str): Path to store the int8 calibration cache file.
data_file_name (str): Path to the TensorFile. If the tensorfile doesn't exist
at this path, then one is created with either n_batches of random tensors,
images from the file in calibration_images_dir of dimensions
(batch_size,) + (input_dims)
n_batches (int): Number of batches to calibrate the model over.
batch_size (int): Number of input tensors per batch.
input_dims (tuple): Tuple of input tensor dimensions in CHW order.
calibration_images_dir (str): Path to a directory of images to generate the
data_file from.
preprocessing_params (dict): Normalization parameters including mean, scale
and flip_channel keys.
Returns:
calibrator (nvidia_tao_tf1.cv.common.export.base_calibrator.TensorfileCalibrator):
TRTEntropyCalibrator2 instance to calibrate the TensorRT engine.
"""
if not os.path.exists(data_file_name):
self.generate_tensor_file(data_file_name,
calibration_images_dir,
input_dims,
n_batches=n_batches,
batch_size=batch_size,
preprocessing_params=preprocessing_params)
calibrator = TensorfileCalibrator(data_file_name,
calibration_cache,
n_batches,
batch_size)
return calibrator
def _calibration_cache_from_dict(self, tensor_scale_dict,
calibration_cache=None,
calib_json=None):
"""Write calibration cache file for QAT model.
This function converts a tensor scale dictionary generated by processing
QAT models to TRT readable format. By default we set it as a
trt.IInt8.EntropyCalibrator2 cache file.
Args:
tensor_scale_dict (dict): The dictionary of parameters: scale_value file.
calibration_cache (str): Path to output calibration cache file.
Returns:
No explicit returns.
"""
if calibration_cache is not None:
cal_cache_str = "TRT-{}-EntropyCalibration2\n".format(
self._trt_version_number)
assert not os.path.exists(calibration_cache), (
"A pre-existing cache file exists. Please delete this "
"file and re-run export."
)
# Converting float numbers to hex representation.
for tensor in tensor_scale_dict:
scaling_factor = tensor_scale_dict[tensor] / 127.0
cal_scale = hex(struct.unpack(
"i", struct.pack("f", scaling_factor))[0])
assert cal_scale.startswith(
"0x"), "Hex number expected to start with 0x."
cal_scale = cal_scale[2:]
cal_cache_str += tensor + ": " + cal_scale + "\n"
with open(calibration_cache, "w") as f:
f.write(cal_cache_str)
if calib_json is not None:
calib_json_data = {"tensor_scales": {}}
for tensor in tensor_scale_dict:
calib_json_data["tensor_scales"][tensor] = float(
tensor_scale_dict[tensor])
with open(calib_json, "w") as outfile:
json.dump(calib_json_data, outfile, indent=4)
def set_data_preprocessing_parameters(self, input_dims, preprocessing_params):
"""Set data pre-processing parameters for the int8 calibration.
--> preprocessed_data = (input - means) * scale
Args:
input_dims (list): Input dims with channels_first(CHW) or channels_last (HWC)
preprocessing_params (dict): Includes `means`, `scale` and `flip_channel`
params used for preprocessing for int8 calibration.
"""
if self.data_format == "channels_first":
num_channels = input_dims[0]
else:
num_channels = input_dims[-1]
if num_channels == 3:
if not preprocessing_params:
logger.warning("Using default preprocessing_params!")
means = [128.0, 128.0, 128.0]
scale = [1. / 256.0, 1. / 256.0, 1. / 256.0]
flip_channel = False
else:
means = preprocessing_params['means']
scale = preprocessing_params['scale']
flip_channel = preprocessing_params['flip_channel']
assert len(means) == 3, "Image mean should have 3 values for RGB inputs."
elif num_channels == 1:
if not preprocessing_params:
logger.warning("Using default preprocessing_params!")
means = [128.0]
scale = [1. / 256.0]
else:
means = preprocessing_params['means']
scale = preprocessing_params['scale']
assert len(means) == 1, "Image mean should have 3 values for RGB inputs."
flip_channel = False
else:
raise NotImplementedError(
"Invalid number of dimensions {}.".format(num_channels))
self.preprocessing_arguments = {"scale": scale,
"means": means,
"flip_channel": flip_channel}
def generate_tensor_file(self,
data_file_name,
calibration_images_dir,
input_dims,
n_batches=10,
batch_size=1,
preprocessing_params=None):
"""Generate calibration Tensorfile for int8 calibrator.
This function generates a calibration tensorfile from a directory of images, or dumps
n_batches of random numpy arrays of shape (batch_size,) + (input_dims).
Args:
data_file_name (str): Path to the output tensorfile to be saved.
calibration_images_dir (str): Path to the images to generate a tensorfile from.
input_dims (list): Input shape in CHW order.
n_batches (int): Number of batches to be saved.
batch_size (int): Number of images per batch.
preprocessing_params (dict): Includes `means`, `scale` and `flip_channel`
params used for preprocessing for int8 calibration.
Returns:
No explicit returns.
"""
if not os.path.exists(calibration_images_dir):
logger.info("Generating a tensorfile with random tensor images. This may work well as "
"a profiling tool, however, it may result in inaccurate results at "
"inference. Please generate a tensorfile using the tlt-int8-tensorfile, "
"or provide a custom directory of images for best performance.")
self.generate_random_tensorfile(data_file_name,
input_dims,
n_batches=n_batches,
batch_size=batch_size)
else:
# Preparing the list of images to be saved.
num_images = n_batches * batch_size
valid_image_ext = ['jpg', 'jpeg', 'png']
image_list = [os.path.join(calibration_images_dir, image)
for image in os.listdir(calibration_images_dir)
if image.split('.')[-1] in valid_image_ext]
if len(image_list) < num_images:
raise ValueError('Not enough number of images provided:'
' {} < {}'.format(len(image_list), num_images))
image_idx = random.sample(xrange(len(image_list)), num_images)
self.set_data_preprocessing_parameters(input_dims, preprocessing_params)
# Set input dims
if self.data_format == "channels_first":
channels, image_height, image_width = input_dims
else:
image_height, image_width, channels = input_dims
# Writing out processed dump.
with TensorFile(data_file_name, 'w') as f:
for chunk in tqdm(image_idx[x:x+batch_size] for x in xrange(0, len(image_idx),
batch_size)):
dump_data = self.prepare_chunk(chunk, image_list,
image_width=image_width,
image_height=image_height,
channels=channels,
batch_size=batch_size,
data_format=self.data_format,
**self.preprocessing_arguments)
f.write(dump_data)
f.closed
@staticmethod
def generate_random_tensorfile(data_file_name, input_dims, n_batches=1, batch_size=1):
"""Generate a random tensorfile.
This function generates a random tensorfile containing n_batches of random np.arrays
of dimensions (batch_size,) + (input_dims).
Args:
data_file_name (str): Path to where the data tensorfile will be stored.
input_dims (tuple): Input blob dimensions in CHW order.
n_batches (int): Number of batches to save.
batch_size (int): Number of images per batch.
Return:
No explicit returns.
"""
sample_shape = (batch_size, ) + tuple(input_dims)
with TensorFile(data_file_name, 'w') as f:
for i in tqdm(xrange(n_batches)):
logger.debug("Writing batch: {}".format(i))
dump_sample = np.random.sample(sample_shape)
f.write(dump_sample)
@staticmethod
def prepare_chunk(image_ids,
image_list,
image_width=480,
image_height=272,
channels=3,
scale=1.0,
means=None,
flip_channel=False,
batch_size=1,
data_format="channels_first"):
"""Prepare a single batch of data to dump into a Tensorfile.
1. Convert data to desired size and color order
2. If `flip_channel`, switch channel ordering (RGB -> BGR)
3. Normalize data using means and scale
--> preprocessed_data = (input - mean) * scale
4. Batch up the processed images in chunks of size batchsize
Args:
image_ids (list): Image indexes to read as part of current chunk
image_list (list): List of all image paths
image_width (int): Input width to use for the model
image_height (int): Input height to use for the model
channels (int): Input channels to use for the model
scale (float/list): Scaling param used for image normalization
means (float/list): Offset param used for image normalization
flip_channel (bool): If True, converts image from RGB -> BGR
batch_size (int): Batch size of current data chunck
data_format (str): Channel Ordering `channels_first`(NCHW) or `channels_last`(NHWC)
Returns:
dump_placeholder (np.ndarray): Preprocessed data chunk.
"""
if data_format == "channels_first":
dump_placeholder = np.zeros(
(batch_size, channels, image_height, image_width))
else:
dump_placeholder = np.zeros(
(batch_size, image_height, image_width, channels))
for i in xrange(len(image_ids)):
idx = image_ids[i]
im = Image.open(image_list[idx]).resize((image_width, image_height),
Image.ANTIALIAS)
if channels == 1:
logger.debug("Converting image from RGB to Grayscale")
if im.mode in ('RGBA', 'LA') or (im.mode == 'P' and 'transparency' in im.info):
bg_colour = (255, 255, 255)
# Need to convert to RGBA if LA format due to a bug in PIL
alpha = im.convert('RGBA').split()[-1]
# Create a new background image of our matt color.
# Must be RGBA because paste requires both images have the same format
bg = Image.new("RGBA", im.size, bg_colour + (255,))
bg.paste(im, mask=alpha)
im = im.convert('L')
dump_input = np.asarray(im).astype(np.float32)
dump_input = dump_input[:, :, np.newaxis]
elif channels == 3:
dump_input = np.asarray(im.convert('RGB')).astype(np.float32)
else:
raise NotImplementedError("Unsupported channel dimensions.")
# flip channel: RGB --> BGR
if flip_channel:
dump_input = dump_input[:, :, ::-1]
# Normalization ---
# --> preprocessed_data = (input - mean) * scale
# means is a list of per-channel means, (H, W, C) - (C)
if means is not None:
dump_input -= np.array(means)
if data_format == "channels_first":
scale = np.asarray(scale)
num_channels = scale.shape[0]
if num_channels > 1:
scale = np.reshape(scale, (num_channels, 1, 1))
# (H, W, C) --> (C, H, W)
dump_input = dump_input.transpose(2, 0, 1) * scale
else:
dump_input = dump_input * scale
dump_placeholder[i, :, :, :] = dump_input
return dump_placeholder
def get_input_dims(self, data_file_name=None, model=None):
"""Simple function to get input layer dimensions.
Args:
data_file_name (str): Path to the calibration tensor file.
model (keras.models.Model): Keras model object.
Returns:
input_dims (list): Input dimensions in CHW order.
"""
if not os.path.exists(data_file_name):
logger.debug(
"Data file doesn't exist. Pulling input dimensions from the network.")
input_dims = self.get_input_dims_from_model(model)
else:
# Read the input dims from the Tensorfile.
logger.debug("Reading input dims from tensorfile.")
with TensorFile(data_file_name, "r") as tfile:
batch = tfile.read()
# Disabling pylint for py3 in this line due to a pylint issue.
# Follow issue: https://github.com/PyCQA/pylint/issues/3139
# and remove when ready.
input_dims = np.array(batch).shape[1:] # pylint: disable=E1136
return input_dims
@staticmethod
def get_input_dims_from_model(model=None):
"""Read input dimensions from the model.
Args:
model (keras.models.Model): Model to get input dimensions from.
Returns:
input_dims (tuple): Input dimensions.
"""
if model is None:
raise IOError("Invalid model object.")
input_dims = model.layers[0].input_shape[1:]
return input_dims
@abstractmethod
def export(self, output_file_name, backend,
calibration_cache="", data_file_name="",
n_batches=1, batch_size=1, verbose=True,
calibration_images_dir="", save_engine=False,
engine_file_name="", max_workspace_size=1 << 30,
max_batch_size=1, force_ptq=False):
"""Simple function to export a model.
This function sets the first converts a keras graph to uff and then saves it to an etlt
file. After which, it verifies the parsability of the etlt file by creating a TensorRT
engine of desired backend datatype.
Args:
output_file_name (str): Path to the output etlt file.
backend (str): Backend parser to be used. ("uff", "onnx).
calibration_cache (str): Path to the output calibration cache file.
data_file_name (str): Path to the data tensorfile for int8 calibration.
n_batches (int): Number of batches to calibrate model for int8 calibration.
batch_size (int): Number of images per batch.
verbose (bool): Flag to set verbose logging.
calibration_images_dir (str): Path to a directory of images for custom data
to calibrate the model over.
save_engine (bool): Flag to save the engine after training.
engine_file_name (str): Path to the engine file name.
force_ptq (bool): Flag to force post training quantization using TensorRT
for a QAT trained model. This is required iff the inference platform is
a Jetson with a DLA.
Returns:
No explicit returns.
"""
raise NotImplementedError("Base Class doesn't implement this method.")
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/core/export/base_exporter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DriveIX FpeNet module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nvidia_tao_tf1.cv.fpenet import dataloader
from nvidia_tao_tf1.cv.fpenet import evaluation
from nvidia_tao_tf1.cv.fpenet import losses
from nvidia_tao_tf1.cv.fpenet import models
from nvidia_tao_tf1.cv.fpenet import trainers
__all__ = (
'dataloader',
'evaluation',
'losses',
'models',
'trainers',
)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License."""
"""Defines FpeNet Visualization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nvidia_tao_tf1.cv.fpenet.visualization.fpenet_visualization import FpeNetVisualizer
__all__ = (
'FpeNetVisualizer',
)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/visualization/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for FpeNet Visualization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from nvidia_tao_tf1.cv.fpenet.visualization.fpenet_visualization import FpeNetVisualizer
def test_visualize(tmpdir):
"""Test FpeNetVisualizer call."""
model_id = "model_42"
checkpoint_dir = os.path.join(str(tmpdir), model_id)
visualizer = FpeNetVisualizer(checkpoint_dir,
num_images=3)
image = tf.random.uniform(shape=[4, 1, 80, 80])
kpts = tf.ones((4, 80, 2)) # batch_size, num_keypoints, num_dim (x, y)[20., 30.]
visualizer.visualize_images(image, kpts)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/visualization/test_fpenet_visualization.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FpeNet Image Visualization utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import cv2
import numpy as np
import tensorflow as tf
class FpeNetVisualizer(object):
"""FpeNetVisualizer object."""
def __init__(self, checkpoint_dir, num_images=3):
"""Instantiate FpeNetVisualizer object.
Args:
checkpoint_dir (str): Path to directory containing checkpoints.
num_images (int): Number of data images to show on Tensorboard.
"""
if num_images < 0:
raise ValueError("FpeNetVisualizer.num_images ({}) is "
"not positive.".format(num_images))
self._checkpoint_dir = os.path.join(checkpoint_dir, 'visualization')
self._model_id = os.path.basename(os.path.normpath(checkpoint_dir))
self._num_images = int(num_images)
def visualize_images(self,
image,
kpts,
name='input_image',
data_format='channels_first',
viz_phase='training'):
"""Add a 4D tensor to Tensorboard.
Args:
image (Tensor): 4D tensor in NCHW or NHWC format.
kpts (Tensor): 3D tensor of keypoints in (batch x num_keypoints x 2).
name (str): Image name. Default is 'input_image'.
data_format (string): Format of the input values.
Must be either 'channels_first' (NCHW) or 'channels_last' (NHWC).
"""
def _visualize_image(image, kpts):
# Do the actual drawing in python
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
for kpt in kpts:
x = int(kpt[0])
y = int(kpt[1])
cv2.circle(image, (x, y), 2, (0, 0, 255), -1)
image = np.expand_dims(image, axis=0)
return image
if self._num_images > 0:
image = tf.cast(tf.clip_by_value(image, 0., 255.), tf.uint8)
# Images must be in NHWC format. Convert as needed.
if data_format == 'channels_first':
image = tf.transpose(image, (0, 2, 3, 1))
image_kpts = tf.compat.v1.py_func(_visualize_image, [image[0], kpts[0]], tf.uint8)
tf.summary.image("kpts_image_" + viz_phase, image_kpts)
tf.summary.image(name+'_face_' + viz_phase, image, self._num_images)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/visualization/fpenet_visualization.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Loss functions used by FpeNet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import keras.backend as K
import tensorflow as tf
from nvidia_tao_tf1.blocks.losses.loss import Loss
from nvidia_tao_tf1.core.coreobject import save_args
from nvidia_tao_tf1.cv.fpenet.dataloader.fpenet_dataloader import (
build_augmentation_config,
get_all_transformations_matrices,
get_transformation_ops
)
class FpeLoss(Loss):
"""Loss functions used by FpeNet."""
@save_args
def __init__(self,
loss_type='l1',
kpts_coeff=0.01,
weights_dict=None,
mask_occ=False,
elt_loss_info=None,
**kwargs):
"""Init function.
Args:
loss_type (str): Type of loss to use ('l1', 'square_euclidean', 'wing_loss').
kpts_coeff (float): Coefficent the loss is multiplied with.
weights_dict (dict of floats): Contains the weights for the 'eyes',
the 'mouth', and the rest of the 'face'. These dict keys must be
present, and the elements must sum up to 1.
mask_occ (Boolean): If True, will mask all occluded points.
elt_loss_info (dict): Dictionary about ELT loss from experiment spec.
elt_alpha (float): Weight for ELT loss.
enable_elt_loss (Bool): Flag to enable ELT loss.
modulus_spatial_augmentation: Augmentation config.
Raises:
ValueError: If loss type is not a supported type (not 'l1',
'square_euclidean' or 'wing_loss').
"""
super(FpeLoss, self).__init__(**kwargs)
self.kpts_coeff = kpts_coeff
if weights_dict:
assert type(
weights_dict) == dict, 'Please provide a dict type object.'
assert sum(weights_dict.values()) == 1.0,\
'The sum of all class weights must be exactly 1.0.'
assert all(key in weights_dict for key in ('face', 'eyes', 'mouth')),\
'Please provide the correct dict entries as float values.'
self.weights_dict = weights_dict
self.mask_occ = mask_occ
self.elt_loss_info = elt_loss_info
self.loss_type = loss_type
if loss_type == 'l1':
self.loss_func = self.l1_loss
elif loss_type == 'square_euclidean':
self.loss_func = self.sqeuclidean_loss
elif loss_type == 'wing_loss':
self.loss_func = self.wing_loss
else:
raise ValueError('%s loss type not supported.' % loss_type)
def __call__(self,
y_true,
y_pred,
occ_true,
occ_masking_info,
num_keypoints=80,
loss_name='landmarks'):
"""Model loss __call__ method.
Args:
y_true (tensor): The real ground truth value.
y_pred (tensor): The predicted value.
occ_true (tensor): Ground truth occlusions value.
occ_masking_info (tensor): Ground truth to configure masking or
no masking per set data.
num_keypoints (int): Total number of keypoints.
loss_name (str): String for loss type to add to logging.
Returns:
loss (tensor): Scalar loss.
"""
# modify occlusions based on occ_masking_info
# samples with no occlusion are set to 1.0, otherise 0.0
# making occ for all samples not to be masked as 1.0
occ_true_masked = tf.maximum(occ_true,
tf.transpose(
tf.tile([occ_masking_info],
[num_keypoints, 1])
)
)
# Mask occluded points
if self.mask_occ:
y_true = tf.multiply(y_true,
tf.expand_dims(occ_true_masked, 2))
y_pred = tf.multiply(y_pred,
tf.expand_dims(occ_true_masked, 2))
# Compute total loss.
loss = self.loss_weighted(y_true, y_pred, self.kpts_coeff,
self.loss_func, self.loss_type,
self.weights_dict, num_keypoints, loss_name)
return loss
@staticmethod
def l1_loss(y_true, y_pred, kpts_coeff):
"""Compute l1 loss.
Args:
y_true (tensor): The real ground truth value.
y_pred (tensor): The predicted value.
kpts_coeff (float): Coefficent the loss is multiplied with
(dummy value here, in order to be compatible across parameters).
Returns:
loss (tensor): A scalar l1 loss computed with y_true and y_pred.
"""
loss = K.mean(K.sum(K.abs(y_true - y_pred), axis=0))
return loss
@staticmethod
def sqeuclidean_loss(y_true, y_pred, kpts_coeff):
"""Compute squared euclidean distance.
Args:
y_true (tensor): The real ground truth value.
y_pred (tensor): The predicted value.
kpts_coeff (float): Coefficent the loss is multiplied with.
Returns:
loss (tensor): A scalar distance error computed with y_true and y_pred.
"""
loss = kpts_coeff * K.mean(K.sum(K.square(y_true - y_pred), axis=0))
return loss
@staticmethod
def wing_loss(y_true, y_pred, kpts_coeff):
"""
Compute wing loss as described in below paper.
http://openaccess.thecvf.com/content_cvpr_2018/papers/Feng_Wing_Loss_for_CVPR_2018_paper.pdf.
Args:
y_true (tensor): The real ground truth value.
y_pred (tensor): The predicted value.
kpts_coeff (float): Coefficent the loss is multiplied with.
Returns:
loss (tensor): A scalar distance error computed with y_true and y_pred.
"""
# non-negative w sets the range of the nonlinear part to (−w, w)
w = 10.0
# epsilon limits the curvature of the nonlinear region
epsilon = 2.0
x = y_true - y_pred
c = w * (1.0 - tf.math.log(1.0 + w/epsilon))
absolute_x = tf.abs(x)
losses = tf.where(
tf.greater(w, absolute_x),
w * tf.math.log(1.0 + absolute_x/epsilon),
absolute_x - c
)
wing_loss = tf.reduce_mean(tf.reduce_sum(losses, axis=[1, 2]), axis=0)
loss = kpts_coeff * wing_loss
return loss
@staticmethod
def loss_weighted(y_true,
y_pred,
kpts_coeff,
loss_func,
loss_type,
weights_dict=None,
num_keypoints=80,
loss_name='landmarks'):
"""Compute squared euclidean distance.
Args:
y_true (tensor): The real ground truth value.
y_pred (tensor): The predicted value.
kpts_coeff (float): Coefficent the loss is multiplied with.
loss_type (string): Type of loss- 'l1', 'square_euclidean' or `wing_loss'.
weights_dict (dict of floats): Contains the weights for the 'eyes', the 'mouth',
the 'pupil' and the rest of the 'face'. These dict keys must be present,
and the elements must sum up to 1.
ordering of points listed here-
https://docs.google.com/document/d/13q8NciZtGyx5TgIgELkCbXGfE7PstKZpI3cENBGWkVw/edit#
num_keypoints (int): Number of facial keypoints for computing the loss.
loss_name (str): String for loss type to add to logging.
Returns:
loss (tensor): A scalar distance error computed with y_true and y_pred.
mouth_loss (tensor): Loss for the mouth only.
eyes_loss (tensor): Loss for the eyes only.
"""
# Loss for all key points except for those on the mouth and the eyelids:
eyelids_start_idx = 36
face_true = y_true[:, 0:eyelids_start_idx, :]
face_pred = y_pred[:, 0:eyelids_start_idx, :]
face_loss = loss_func(face_true, face_pred, kpts_coeff)
# Loss for 6 keypoints eyelids on each eye:
eyelids_nkpts = 6 * 2
eyelids_end_idx = eyelids_start_idx + eyelids_nkpts
eyelids_true = y_true[:, eyelids_start_idx:eyelids_end_idx, :]
eyelids_pred = y_pred[:, eyelids_start_idx:eyelids_end_idx, :]
eyes_loss = loss_func(eyelids_true, eyelids_pred, kpts_coeff)
# Loss for all keypoints on the mouth:
mouth_start_idx = eyelids_end_idx
mouth_end_idx = 68
mouth_true = y_true[:, mouth_start_idx:mouth_end_idx, :]
mouth_pred = y_pred[:, mouth_start_idx:mouth_end_idx, :]
mouth_loss = loss_func(mouth_true, mouth_pred, kpts_coeff)
# More facial points with 80 keypoints
if (num_keypoints == 80):
# Loss for pupils points
pupils_start_idx = mouth_end_idx
pupils_end_idx = 76
pupils_true = y_true[:, pupils_start_idx:pupils_end_idx, :]
pupils_pred = y_pred[:, pupils_start_idx:pupils_end_idx, :]
pupils_loss = loss_func(pupils_true, pupils_pred, kpts_coeff)
eyes_loss = eyes_loss + pupils_loss
# Loss on remaining 4 ear points
ears_start_idx = pupils_end_idx
ears_end_idx = 80
ears_true = y_true[:, ears_start_idx:ears_end_idx, :]
ears_pred = y_pred[:, ears_start_idx:ears_end_idx, :]
ears_loss = loss_func(ears_true, ears_pred, kpts_coeff)
face_loss = face_loss + ears_loss
if weights_dict:
tf.compat.v1.summary.scalar(
name=str('%s_face_loss' % loss_type), tensor=face_loss)
tf.compat.v1.summary.scalar(
name=str('%s_eyelids_loss' % loss_type), tensor=eyes_loss)
tf.compat.v1.summary.scalar(
name=str('%s_mouth_loss' % loss_type), tensor=mouth_loss)
loss = (weights_dict['face'] * face_loss +
weights_dict['eyes'] * eyes_loss +
weights_dict['mouth'] * mouth_loss)
else:
loss = loss_func(y_true, y_pred, kpts_coeff)
net_loss_name = str('%s_net_loss' % loss_type)
if weights_dict:
net_loss_name = str('weighted_%s' % net_loss_name)
if loss_name == 'elt':
net_loss_name = str('elt_%s' % net_loss_name)
tf.compat.v1.summary.scalar(name=net_loss_name, tensor=loss)
return loss, mouth_loss, eyes_loss
class FpeNetEltLoss(FpeLoss):
"""
ELT loss used by FpeNet.
Defined in- "Improving Landmark Localization with Semi-Supervised Learning"
CVPR'2018
"""
@save_args
def __init__(self,
elt_loss_info,
image_height=80,
image_width=80,
image_channel=1,
num_keypoints=80,
**kwargs):
"""Init function.
Args:
elt_loss_info (dict): Information on ELT params.
elt_alpha (float): Weight for ELT loss.
enable_elt_loss (Bool): Flag to enable ELT loss.
modulus_spatial_augmentation: Augmentation config.
image_height (int): Image height.
image_width (int): Image width.
image_channel (int): Number of image channels.
num_keypoints (int): Number of facial keypoints.
Returns:
None
"""
self.enable_elt_loss = elt_loss_info['enable_elt_loss']
self.elt_alpha = elt_loss_info['elt_alpha']
self.image_width = image_width
self.image_height = image_height
self.image_channel = image_channel
self.num_keypoints = num_keypoints
augmentation_info = elt_loss_info['modulus_spatial_augmentation']
self.augmentation_config = build_augmentation_config(augmentation_info)
frame_shape = [self.image_height, self.image_width, self.image_channel]
frame_shape = map(float, frame_shape)
self._stm_op, self._ctm_op, self._blur_op, \
self._gamma_op, self._shift_op = \
get_transformation_ops(self.augmentation_config, frame_shape)
def transform_images(self, images):
"""Transforms the images with a random affine transformation.
Args:
images (Tensor): Decoded input images of shape (NCHW).
Returns:
transformed_images (Tensor): Transformed images tensor.
sm (Tensor): 3x3 spatial transformation/augmentation matrix.
"""
# get spatial transoformation matrix
sm, _ = get_all_transformations_matrices(self.augmentation_config,
self.image_height,
self.image_width,
enable_augmentation=True)
# Apply augmentations to frame tensors.
transformed_images = []
for i in range(images.shape[0]):
transformed_image = self._apply_augmentations_to_frame(images[i, :, :, :], sm)
transformed_images.append(tf.transpose(transformed_image, perm=[2, 0, 1]))
transformed_images = tf.stack(transformed_images)
transformed_images = transformed_images
# return transformed images and transform matrix
return(transformed_images, sm)
def transform_points(self, ground_truth_labels, sm):
"""
Transforms the (x,y) keypoints using a given tranformation matrix.
Args:
ground_truth_labels (Tensor) : a matrix of key_point locations (N x num_kpts x 2)
sm (Tensor): 3x3 spatial transformation/augmentation matrix.
Returns:
kpts_norm (Tensor): Transformed points matrix of key_point locations (N x num_kpts x 2).
"""
kpts_norm = []
for i in range(ground_truth_labels.shape[0]):
kpts_norm.append(self._apply_augmentations_to_kpts(ground_truth_labels[i, :, :], sm))
kpts_norm = tf.stack(kpts_norm)
return(kpts_norm)
def _apply_augmentations_to_frame(self, input_tensor, sm):
"""
Apply spatial and color transformations to an image.
Spatial transform op maps destination image pixel P into source image location Q
by matrix M: Q = P M. Here we first compute a forward mapping Q M^-1 = P, and
finally invert the matrix.
Args:
input_tensor (Tensor): Input image frame tensors (HWC).
sm (Tensor): 3x3 spatial transformation/augmentation matrix.
Returns:
image (Tensor, CHW): Augmented input tensor.
"""
# Convert image to float if needed (stm_op requirement).
if input_tensor.dtype != tf.float32:
input_tensor = tf.cast(input_tensor, tf.float32)
dm = tf.matrix_inverse(sm)
# NOTE: Image and matrix need to be reshaped into a batch of one for this op.
# Apply spatial transformations.
input_tensor = tf.transpose(input_tensor, perm=[1, 2, 0])
image = self._stm_op(images=tf.stack([tf.image.grayscale_to_rgb(input_tensor)]),
stms=tf.stack([dm]))
image = tf.image.rgb_to_grayscale(image)
image = tf.reshape(image, [self.image_height, self.image_width,
self.image_channel])
return image
def _apply_augmentations_to_kpts(self, key_points, mapMatrix):
"""
Apply augmentation to keypoints.
This methods get matrix of keypoints and returns a matrix of
their affine transformed location.
Args:
key_points: a matrix of key_point locations in the format (#key-points, 2)
num_keypoints: number of keypoints
MapMatrix: affine transformation of shape (2 * 3)
Returns:
A matrix of affine transformed key_point location in the
format (#key-points, 2)
"""
kpts = tf.concat([tf.transpose(key_points),
tf.ones([1, self.num_keypoints],
dtype=tf.float32)], axis=0)
new_kpt_points = tf.matmul(tf.transpose(mapMatrix), kpts)
new_kpt_points = tf.slice(new_kpt_points, [0, 0], [2, -1])
return tf.transpose(new_kpt_points)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/losses/fpenet_loss.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for FpeNet loss functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pytest
import tensorflow as tf
from nvidia_tao_tf1.cv.fpenet.losses.fpenet_loss import FpeLoss
def test_fpenet_loss_call():
"""
Test call function for loss computation and exception on unsupported loss_type.
"""
# create dummy data
y_true = tf.ones((4, 80, 2)) # batch_size, num_keypoints, num_dim (x, y)
y_pred = tf.zeros((4, 80, 2)) # batch_size, num_keypoints, num_dim (x, y)
occ_true = tf.ones((4, 80)) # batch_size, num_keypoints
occ_masking_info = tf.zeros((4)) # batch_size
# Test 1: 'unknown' loss_type
with pytest.raises(ValueError):
FpeLoss('unknown')
# Test 2: 'l1' loss_type
loss, _, __ = FpeLoss('l1')(y_true=y_true,
y_pred=y_pred,
occ_true=occ_true,
occ_masking_info=occ_masking_info)
with tf.Session() as sess:
loss_np = sess.run(loss)
expected_loss_l1 = 4.0
np.testing.assert_almost_equal(loss_np, expected_loss_l1, decimal=6)
# Test 3: 'square_euclidean'
loss, _, __ = FpeLoss('square_euclidean',
kpts_coeff=1.0)(y_true=y_true,
y_pred=y_pred,
occ_true=occ_true,
occ_masking_info=occ_masking_info)
with tf.Session() as sess:
loss_np = sess.run(loss)
expected_loss_squared_euc = 4.0
np.testing.assert_almost_equal(loss_np, expected_loss_squared_euc, decimal=6)
# Test 4: 'wing_loss'
loss, _, __ = FpeLoss('wing_loss',
kpts_coeff=0.01)(y_true=y_true,
y_pred=y_pred,
occ_true=occ_true,
occ_masking_info=occ_masking_info)
with tf.Session() as sess:
loss_np = sess.run(loss)
expected_loss_wing = 6.48744
np.testing.assert_almost_equal(loss_np, expected_loss_wing, decimal=6)
# Test 5: occlusion masking
occ_true = tf.zeros((4, 80)) # batch_size, num_keypoints
mask_occ = True
loss, _, __ = FpeLoss('l1',
mask_occ=mask_occ)(y_true=y_true,
y_pred=y_pred,
occ_true=occ_true,
occ_masking_info=occ_masking_info)
with tf.Session() as sess:
loss_np = sess.run(loss)
expected_loss_l1 = 0.0
np.testing.assert_almost_equal(loss_np, expected_loss_l1, decimal=6)
# Test 6: face region losses test
y_true = tf.concat([tf.ones((4, 40, 2)), tf.zeros((4, 40, 2))], axis=1)
occ_true = tf.ones((4, 80)) # batch_size, num_keypoints
face_loss, mouth_loss, eyes_loss = FpeLoss('l1')(y_true=y_true,
y_pred=y_pred,
occ_true=occ_true,
occ_masking_info=occ_masking_info)
with tf.Session() as sess:
loss_np_face, loss_np_eyes, loss_np_mouth = sess.run([face_loss, eyes_loss, mouth_loss])
expected_loss_l1_face = 2.0
expected_loss_l1_mouth = 0.0
expected_loss_l1_eyes = 1.3333334
np.testing.assert_almost_equal(loss_np_face, expected_loss_l1_face, decimal=6)
np.testing.assert_almost_equal(loss_np_eyes, expected_loss_l1_eyes, decimal=6)
np.testing.assert_almost_equal(loss_np_mouth, expected_loss_l1_mouth, decimal=6)
# Test 7: face dictionary weights test
y_true = tf.concat([tf.ones((4, 30, 2)), tf.zeros((4, 30, 2)), tf.ones((4, 20, 2))], axis=1)
weights_dict = {'face': 0.3, 'mouth': 0.3, 'eyes': 0.4}
face_loss, mouth_loss, eyes_loss = FpeLoss('l1',
weights_dict=weights_dict)(
y_true=y_true,
y_pred=y_pred,
occ_true=occ_true,
occ_masking_info=occ_masking_info)
with tf.Session() as sess:
loss_np_face, loss_np_eyes, loss_np_mouth = sess.run([face_loss, eyes_loss, mouth_loss])
expected_loss_l1_face = 4.28
expected_loss_l1_eyes = 4.0
expected_loss_l1_mouth = 1.6
np.testing.assert_almost_equal(loss_np_face, expected_loss_l1_face, decimal=6)
np.testing.assert_almost_equal(loss_np_eyes, expected_loss_l1_eyes, decimal=6)
np.testing.assert_almost_equal(loss_np_mouth, expected_loss_l1_mouth, decimal=6)
# Test 8: 68 points landmarks
y_true = tf.ones((3, 68, 2)) # batch_size, num_keypoints, num_dim (x, y)
y_pred = tf.zeros((3, 68, 2)) # batch_size, num_keypoints, num_dim (x, y)
occ_true = tf.ones((3, 68)) # batch_size, num_keypoints
occ_masking_info = tf.zeros((3)) # batch_size
loss, _, __ = FpeLoss('l1',
mask_occ=mask_occ)(y_true=y_true,
y_pred=y_pred,
occ_true=occ_true,
occ_masking_info=occ_masking_info,
num_keypoints=68)
with tf.Session() as sess:
loss_np = sess.run(loss)
expected_loss_l1 = 3.0
np.testing.assert_almost_equal(loss_np, expected_loss_l1, decimal=6)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/losses/test_fpenet_loss.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License."""
"""Defines loss functions and classes used by Fpenet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nvidia_tao_tf1.cv.fpenet.losses.fpenet_loss import FpeLoss
__all__ = (
'FpeLoss',
)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/losses/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""A launcher script for DriveIX FPENet tasks inside a runtime container."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from maglev_sdk.docker_container.entrypoint import main
if __name__ == '__main__':
main('fpenet', 'nvidia_tao_tf1/cv/fpenet/scripts')
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/docker/fpenet.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FPE DataIO pipeline script which generates tfrecords."""
import argparse
import json
import os
from time import time
import cv2
import numpy as np
import tensorflow as tf
from yaml import load
# Color definition for stdout logs.
CRED = '\033[91m'
CGREEN = '\033[92m'
CYELLOW = '\033[93m'
CEND = '\033[0m'
def _bytes_feature(value):
'''
Returns a bytes_list from a string / byte.
Args:
value (str): String value.
Returns:
Bytes list.
'''
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _float_feature(value):
'''
Returns a float_list from a float / double.
Args:
value (float): Float value.
Returns:
Float list.
'''
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def _int64_feature(value):
'''
Returns an int64_list from a bool / enum / int / uint.
Args:
value (int64): Int64 value.
Returns:
Float list.
'''
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _int64_feature_array(value):
'''
Returns an int64_list from an array.
Args:
value (ndarray): Numpy nd array.
Returns:
int64 list.
'''
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _dtype_feature(ndarray):
'''
Returns an float_list from an ndarray.
Args:
value (ndarray): Numpy nd array.
Returns:
Float list.
'''
assert isinstance(ndarray, np.ndarray)
return tf.train.Feature(float_list=tf.train.FloatList(value=ndarray))
def parse_json_contents(jsonFile, args):
'''
Function to read ground truth json file.
Args:
jsonFile (str): Path of json file.
args (dict): User arguments.
Returns:
dataset (list): list of samples, sample{img_path, landmarks, occ}.
'''
num_keypoints = args['num_keypoints']
json_data = json.loads(open(jsonFile, 'r').read())
dataset = list()
for img in json_data:
sample = dict()
sample['img_path'] = ''
sample['landmarks'] = np.zeros((num_keypoints, 2))
sample['occlusions'] = np.zeros((num_keypoints, 1))
try:
fname = str(img['filename'])
if not os.path.exists(os.path.join(args['image_root_path'], fname)):
print(CRED + 'Image does not exist: {}'.format(fname) + CEND)
continue
# Start collecting points information from the json file.
x = [0] * num_keypoints
y = [0] * num_keypoints
# Occlusion tags
tags = [0] * num_keypoints
for chunk in img['annotations']:
if 'fiducialpoints' not in chunk['class'].lower():
continue
points_data = (point for point in chunk if ('class' not in point and
'version' not in point))
for point in points_data:
number = int(
''.join(c for c in str(point) if c.isdigit()))
if 'x' in str(point).lower() and number <= num_keypoints:
x[number - 1] = str(int(float(chunk[point])))
if 'y' in str(point).lower() and number <= num_keypoints:
y[number - 1] = str(int(float(chunk[point])))
if 'occ' in str(point).lower() and number <= num_keypoints and chunk[point]:
tags[number - 1] = 1
sample = dict()
sample['img_path'] = fname
sample['landmarks'] = np.asarray([x, y]).T
sample['occlusions'] = np.asarray(tags).T
dataset.append(sample)
except Exception as e:
print(CRED + str(e) + CEND)
return dataset
def get_bbox(x1, y1, x2, y2):
'''
Function to get normalized boundiung box.
This module makes the bounding box sqaure by
increasing the lower of the bounding width and height.
Args:
x1 (int): x_min value of bbox.
y1 (int): y_min value of bbox.
x2 (int): x_max value of bbox.
y2 (int): y_max value of bbox.
Returns:
Normalized bounding box coordinates in form [x1, y1, x2, y2].
'''
x_start = int(np.floor(x1))
x_end = int(np.ceil(x2))
y_start = int(np.floor(y1))
y_end = int(np.ceil(y2))
width = np.ceil(x_end - x_start)
height = np.ceil(y_end - y_start)
if width < height:
diff = height - width
x_start -= (np.ceil(diff/2.0))
x_end += (np.floor(diff/2.0))
elif width > height:
diff = width - height
y_start -= (np.ceil(diff/2.0))
y_end += (np.floor(diff/2.0))
width = x_end - x_start
height = y_end - y_start
assert width == height
rect_init_square = [int(x_start), int(y_start), int(width), int(height)]
return rect_init_square
def enlarge_bbox(bbox, ratio=1.0):
'''
Module enlarges the bounding box by a scaling factor.
Args:
bbox (list): Bounding box coordinates of the form [x1, y1, x2, y2].
ratio (float): Bounding box enlargement scale/ratio.
Returns:
Scaled bounding box coordinates.
'''
x_start, y_start, width, height = bbox
x_end = x_start + width
y_end = y_start + height
assert width == height, 'width %s is not equal to height %s'\
% (width, height)
change = ratio - 1.0
shift = int((change/2.0)*width)
x_start_new = int(np.floor(x_start - shift))
x_end_new = int(np.ceil(x_end + shift))
y_start_new = int(np.floor(y_start - shift))
y_end_new = int(np.ceil(y_end + shift))
# Assertion for increase length.
width = int(x_end_new - x_start_new)
height = int(y_end_new - y_start_new)
assert height == width
rect_init_square = [x_start_new, y_start_new, width, height]
return rect_init_square
def detect_bbox(kpts, img_size, dist_ratio, num_kpts=68):
'''
Utility to get the bounding box using only kpt information.
This method gets the kpts and the original image size.
Then, it then gets a square encompassing all key-points and
later enlarges that by dist_ratio.
Args:
kpts: the kpts in either format of 1-dim of size #kpts * 2
or 2-dim of shape [#kpts, 2].
img_size: a 2-value tuple indicating the size of the original image
with format (width_size, height_size)
dist_ratio: the ratio by which the original key-points to be enlarged.
num_kpts (int): Number of keypoints.
Returns:
bbox with values (x_start, y_start, width, height).
'''
x_min = np.nanmin(kpts[:, 0])
x_max = np.nanmax(kpts[:, 0])
y_min = np.nanmin(kpts[:, 1])
y_max = np.nanmax(kpts[:, 1])
bbox = get_bbox(x_min, y_min, x_max, y_max)
# Enlarge the bbox by a ratio.
rect = enlarge_bbox(bbox, dist_ratio)
# Ensure enlarged bounding box within image bounds.
if((bbox[0] < 0) or
(bbox[1] < 0) or
(bbox[2] + bbox[0] > img_size[0]) or
(bbox[3] + bbox[1] > img_size[1])):
return None
return rect
def write_tfrecord(dataset, setid, args):
'''
Utility to dump tfrecords with all data.
Args:
dataset (list): list of samples, sample{img_path, landmarks, occ}.
setid (str): Set name.
args (dict): User provided arguments.
Returns:
None
'''
tfRecordPath = os.path.join(args['save_root_path'],
args['save_path'],
setid,
args['tfrecord_folder'])
if not os.path.exists(tfRecordPath):
os.makedirs(tfRecordPath)
recordfile = os.path.join(tfRecordPath, args['tfrecord_name'])
writer = tf.io.TFRecordWriter(recordfile)
N = len(dataset)
count = 0
for i in range(N):
img_name = dataset[i]['img_path']
landmarks = dataset[i]['landmarks'].astype('float')
landmarks_occ = dataset[i]['occlusions'].astype(int)
image_path = os.path.join(args['image_root_path'], img_name)
image = cv2.imread(image_path)
if image is None:
print(CRED + 'Bad image:{}'.format(image_path) + CEND)
continue
image_shape = image.shape
bbox = detect_bbox(kpts=landmarks[:args['num_keypoints'], :],
img_size=(image_shape[1], image_shape[0]),
dist_ratio=args['bbox_enlarge_ratio'],
num_kpts=args['num_keypoints'])
if bbox is None:
continue
feature_dict = {
'train/image_frame_name' : _bytes_feature(img_name.encode()),
'train/image_frame_width' : _int64_feature(image_shape[1]),
'train/image_frame_height' : _int64_feature(image_shape[0]),
'train/facebbx_x' : _int64_feature(bbox[0]),
'train/facebbx_y' : _int64_feature(bbox[1]),
'train/facebbx_w' : _int64_feature(bbox[2]),
'train/facebbx_h' : _int64_feature(bbox[3]),
'train/landmarks' : _dtype_feature(landmarks.reshape(-1)),
'train/landmarks_occ' : _int64_feature_array(landmarks_occ.reshape(-1))
}
example = tf.train.Example(
features=tf.train.Features(feature=feature_dict))
writer.write(example.SerializeToString())
count = count + 1
print(CYELLOW + 'recordtype:{} count: {}'.format(recordfile, count) + CEND)
writer.close()
def tfrecord_manager(args):
'''
Function to read json files for all sets and create tfrecords.
Args:
args (dict): User provided arguments.
- "sets": Set IDs to extract as a list. Example- [set1, set2, set3].
- "gt_path": Ground truth json path.
- "save_path": Save path for TF Records.
- "gt_root_path": Root path for ground truth jsons.
- "save_root_path": Root path for saving tfrecords data.
- "image_root_path": Root path for the images.
- "tf_folder": TF record folder name.
- "tfrecord_name": TF record file name.
- "num_keypoints": Number of keypoints.
- "bbox_enlarge_ratio": Scale to enlarge bounding box with.
Returns:
None
'''
for setid in args['sets']:
now = time()
set_gt_path = os.path.join(args['gt_root_path'], args['gt_path'], setid)
jsonList = []
for x in os.listdir(set_gt_path):
if('.json' in x):
jsonList.append(x)
# collect data from all GT json files for the setid
dataset = list()
for jsonfile in jsonList:
jsonPath = os.path.join(set_gt_path, jsonfile)
jsondata = parse_json_contents(jsonPath, args)
print(CGREEN + 'Json {} has image count: {}'.format(jsonPath, len(jsondata)) + CEND)
dataset.extend(jsondata)
write_tfrecord(dataset, setid, args)
print(CGREEN + 'Set {} has total image count: {}'.format(setid, len(dataset)) + CEND)
set_time = round(time() - now, 2)
print(CGREEN + 'DataIO for {} done in {} sec.'.format(setid, str(set_time)) + CEND)
def main():
'''Main function to parse use arguments and call tfrecord manager.'''
parser = argparse.ArgumentParser(
description="Generate TFRecords from json ground truth")
parser.add_argument('-e', '--exp_spec',
type=str, required=True,
help='Config file with dataio inputs.')
args, _ = parser.parse_known_args()
config_path = args.exp_spec
with open(config_path, 'r') as f:
args = load(f)
tfrecord_manager(args)
if __name__ == '__main__':
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/dataio/generate_dataset.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License."""
"""Defines DataIO for Fpenet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/dataio/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test FpeNet DataIO dataloader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import unittest
from yaml import load
from nvidia_tao_tf1.cv.fpenet.dataio.generate_dataset import tfrecord_manager
test_config_path = 'nvidia_tao_tf1/cv/fpenet/dataio/testdata/test_config.yaml'
class GenerateDatasetTest(unittest.TestCase):
def test_generate_dataset(self):
'''Test if a sample dataset is generated.'''
with open(test_config_path, 'r') as f:
args = load(f)
tfrecord_manager(args)
# tests on generated files
tfRecordPath = os.path.join(args['save_root_path'],
args['save_path'],
'testdata',
args['tfrecord_folder'])
recordfile = os.path.join(tfRecordPath, args['tfrecord_name'])
assert os.path.exists(recordfile)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/dataio/test_generate_dataset.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""A launcher script for DriveIX FPENet tasks inside a runtime container."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from maglev_sdk.docker_container.entrypoint import main
if __name__ == '__main__':
main('fpenet_dataio', 'nvidia_tao_tf1/cv/fpenet/dataio')
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/dataio/docker/fpenet_dataio.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for FpeNet model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from os import remove
from os.path import splitext
from keras.layers import Input
from keras.models import load_model
from keras.models import model_from_json
import pytest
from nvidia_tao_tf1.core.models.templates.utils import count_layers_by_class_name
from nvidia_tao_tf1.cv.fpenet.models.custom.softargmax import Softargmax
from nvidia_tao_tf1.cv.fpenet.models.fpenet import FpeNetModel
def load_keras_model(file_name, remove_file=False):
"""Load a Keras model from an HDF5 file or from a JSON file.
Args:
file_name (str): File name of the model file, must end on '.h5', '.hdf5' or '.json'.
remove_file (bool): Toggles if the file shall be removed after successful loading.
"""
_, extension = splitext(file_name.lower())
if extension in ['.h5', '.hdf5']:
model = load_model(
file_name,
custom_objects={'Softargmax': Softargmax},
compile=False)
elif extension == '.json':
with open(file_name, 'r') as json_file:
json_content = json_file.read()
model = model_from_json(
json_content, custom_objects={'Softargmax': Softargmax})
else:
raise ValueError(
'Can only load a model with extensions .h5, .hdf5 or .json, \
got %s.' % extension)
if remove_file:
remove(file_name)
return model
def save_keras_model(keras_model, base_name):
"""Save a model to JSON and HDF5 format and return their file paths.
Args:
keras_model (Model): Model to be saved.
base_name (str): Base name for the files to be written.
"""
json_string = keras_model.to_json()
json_file_name = base_name + '.json'
with open(json_file_name, 'w') as json_file:
json_file.write(json_string)
hdf5_file_name = base_name + '.h5'
keras_model.save(hdf5_file_name)
return json_file_name, hdf5_file_name
def build_sample_images(name='inputs',
data_format='channels_first',
channels=1,
height=80,
width=80):
"""Construct FpeNet model for testing.
Args:
name (str): Name of the input tensor. Default value is 'inputs'
data_format (str): Expected tensor format, either `channels_first` or `channels_last`.
Default value is `channels_first`.
channels, height, width (all int): Input image dimentions.
"""
# Set sample inputs.
if data_format == 'channels_first':
shape = (channels, height, width)
elif data_format == 'channels_last':
shape = (height, width, channels)
else:
raise ValueError(
'Provide either `channels_first` or `channels_last` for `data_format`.'
)
image_face = Input(shape=shape, name=name)
return image_face
def model_io(keras_model, base_name, remove_file):
""" Serialize a model to HDF5 and JSON format, an then deserialize it again.
Args:
keras_model (Model): The Keras model to be tested.
base_name (str): Name of the files, without extension.
remove_file (bool): Toggles if the files shall be removed after successful loading.
"""
json_file_name, hdf5_file_name = save_keras_model(keras_model, base_name)
model = load_keras_model(json_file_name, remove_file)
model = load_keras_model(hdf5_file_name, remove_file)
return model
@pytest.mark.parametrize('data_format', ['channels_first'])
def test_layer_counts_default(
data_format,
save=False,
):
"""Test for correct layer counts of the default version with classical spatial convolutions.
Args:
data_format (str): Expected tensor format, either `channels_first` or `channels_last`.
Default value is `channels_first`.
save (bool): Toggles it the model should be serialized as JSON and HDF5 format.
"""
image_face = build_sample_images(
name='input_face', data_format=data_format)
blocks_decoder = [[(3, 64), (1, 64)]] * 4
model = FpeNetModel(
pooling=True,
use_batch_norm=False,
data_format=data_format,
kernel_regularizer=None,
bias_regularizer=None,
activation_type='relu',
activation_kwargs=None,
blocks_encoder=None,
block_trunk=None,
blocks_decoder=blocks_decoder,
block_post_decoder=None,
nkeypoints=80,
beta=0.1)
keras_model = model.construct(image_face)
base_name = keras_model.name
keras_model.summary()
assert count_layers_by_class_name(keras_model, ['InputLayer']) == 1
assert count_layers_by_class_name(keras_model, ['Conv2D']) == 17
assert count_layers_by_class_name(keras_model, ['Dense']) == 0
assert count_layers_by_class_name(keras_model, ['Reshape']) == 0
assert count_layers_by_class_name(keras_model, ['Dropout']) == 0
assert count_layers_by_class_name(keras_model, ['MaxPooling2D']) == 4
assert count_layers_by_class_name(keras_model, ['Concatenate']) == 4
assert count_layers_by_class_name(keras_model, ['Softargmax']) == 1
assert keras_model.count_params() == 523408
remove_file = not save
if not os.path.exists(base_name + '.h5'):
model_io(keras_model, base_name, remove_file)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/models/test_fpenet.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FpeNetBaseModel class that takes care of constructing, training and validating a model."""
import gc
import logging
import keras
from keras.layers import Input
from nvidia_tao_tf1.blocks.models import KerasModel
from nvidia_tao_tf1.core.coreobject import save_args
from nvidia_tao_tf1.cv.common.utilities.tlt_utils import encode_from_keras, model_io
from nvidia_tao_tf1.cv.fpenet.models.custom.softargmax import Softargmax
from nvidia_tao_tf1.cv.fpenet.models.fpenet import FpeNetModel
logger = logging.getLogger(__name__)
class FpeNetBaseModel(KerasModel):
"""FpeNetBaseModel class.
FpeNetBaseModel contains functions for constructing and manipulating a Keras based FpeNet model,
building training and validation graphs for the model, and visualizing predictions.
"""
@save_args
def __init__(self,
model_parameters,
visualization_parameters=None,
**kwargs):
"""__init__ method.
Args:
model_parameters (dict): A dictionary of all the parameters required in initialization.
visualization_parameters (dict): A dictionary of info required by visualization.
"""
super(FpeNetBaseModel, self).__init__(**kwargs)
self._data_format = 'channels_first'
self._regularizer_type = model_parameters.get('regularizer_type')
self._regularizer_weight = model_parameters.get('regularizer_weight')
self._pretrained_model_path = model_parameters.get('pretrained_model_path')
self._model_type = model_parameters.get('type')
self._use_upsampling_layer = model_parameters.get('use_upsampling_layer')
self._input_shape = None
self._output_shape = None
self._keras_model = None
if 'beta' in model_parameters.keys():
self._beta = model_parameters.get('beta')
else:
raise ValueError('beta value not provided.')
self._kernel_regularizer = None
self._bias_regularizer = None
# Set regularizer.
if self._regularizer_type.lower() == 'l1':
self._kernel_regularizer = keras.regularizers.l1(
self._regularizer_weight)
self._bias_regularizer = keras.regularizers.l1(
self._regularizer_weight)
elif self._regularizer_type.lower() == 'l2':
self._kernel_regularizer = keras.regularizers.l2(
self._regularizer_weight)
self._bias_regularizer = keras.regularizers.l2(
self._regularizer_weight)
else:
raise ValueError('%s regularizer type not supported.' % self._regularizer_type)
def build(self, input_images, enc_key=None, num_keypoints=80):
"""Build a FpeNet Keras model.
Args:
input_images: Input images to FpeNet model.
"""
input_tensor = Input(tensor=input_images, name="input_face_images")
logging.info("model type is: " + self._model_type)
if self._keras_model is not None:
predictions = {}
predictions['landmarks'] = self._keras_model.outputs[0]
predictions['confidence'] = self._keras_model.outputs[1]
predictions['heat_map'] = self._keras_model.outputs[2]
return predictions
if self._model_type == 'FpeNet_base':
model = FpeNetModel(
data_format=self._data_format,
blocks_decoder=[[(3, 64), (1, 64)]] * 4,
nkeypoints=num_keypoints,
additional_conv_layer=True,
use_upsampling_layer=self._use_upsampling_layer,
beta=self._beta,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
elif self._model_type == 'FpeNet_base_5x5_conv':
model = FpeNetModel(
data_format=self._data_format,
blocks_decoder=[[(5, 64), (1, 64)]] * 4,
nkeypoints=num_keypoints,
additional_conv_layer=False,
use_upsampling_layer=self._use_upsampling_layer,
beta=self._beta,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
elif self._model_type == 'FpeNet_release':
model = FpeNetModel(
data_format=self._data_format,
blocks_decoder=[[(3, 64), (1, 64)]] * 4,
nkeypoints=num_keypoints,
additional_conv_layer=True,
use_upsampling_layer=True,
beta=self._beta,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
elif self._model_type == 'FpeNet_public':
model = FpeNetModel(
data_format=self._data_format,
blocks_decoder=[[(3, 64), (1, 64)]] * 4,
nkeypoints=num_keypoints,
additional_conv_layer=False,
use_upsampling_layer=False,
beta=self._beta,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
else:
raise NotImplementedError(
'A FpeNet model of type %s is not implemented.' %
self._model_type)
model = model.construct(input_tensor)
# If you have weights you've trained previously, you can load them into this model.
if self._pretrained_model_path is not None:
loaded_model = model_io(self._pretrained_model_path,
enc_key=enc_key,
custom_objects={"Softargmax": Softargmax})
loaded_model_layers = [layer.name for layer in loaded_model.layers]
logger.info("Loading weights from pretrained model file. {}".format(
self._pretrained_model_path))
for layer in model.layers:
if layer.name in loaded_model_layers:
pretrained_layer = loaded_model.get_layer(layer.name)
weights_pretrained = pretrained_layer.get_weights()
model_layer = model.get_layer(layer.name)
try:
model_layer.set_weights(weights_pretrained)
except ValueError:
continue
del loaded_model
gc.collect()
else:
logger.info('This model will be trained from scratch.')
self._keras_model = model
predictions = {}
predictions['landmarks'] = self._keras_model.outputs[0]
predictions['confidence'] = self._keras_model.outputs[1]
predictions['heat_map'] = self._keras_model.outputs[2]
return predictions
def load_model(self, model_file):
"""Load a previously saved Keras model.
Args:
model_file: Keras Model file name.
"""
self._keras_model = keras.models.load_model(model_file, compile=False)
self._input_shape = self._keras_model.input_shape
self._output_shape = self._keras_model.output_shape
@property
def keras_model(self):
"""Get Keras model as a class property."""
return self._keras_model
@keras_model.setter
def keras_model(self, model):
"""Set to a new model."""
self._keras_model = model
def save_model(self, file_name, enc_key=None):
"""Save the model to disk.
Args:
file_name (str): Model file name.
enc_key (str): Key string for encryption.
Raises:
ValueError if postprocessing_config is None but save_metadata is True.
"""
self.keras_model.save(file_name, overwrite=True)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/models/fpenet_basemodel.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FpeNet Model definitions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nvidia_tao_tf1.cv.fpenet.models.fpenet_basemodel import FpeNetBaseModel
__all__ = ('FpeNetBaseModel', )
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/models/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for FpeNet BaseModel model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Input
from nvidia_tao_tf1.core.models.templates.utils import count_layers_by_class_name
from nvidia_tao_tf1.cv.fpenet.models.rcnet import RecombinatorNet
def build_sample_images(name='inputs',
data_format='channels_first',
channels=1,
height=80,
width=80):
"""Construct FpeNet model for testing.
Args:
name (str): Name of the input tensor. Default value is 'inputs'
data_format (str): Expected tensor format, either `channels_first` or `channels_last`.
Default value is `channels_first`.
channels, height, width (all int): Input image dimentions.
"""
# Set sample inputs.
if data_format == 'channels_first':
shape = (channels, height, width)
elif data_format == 'channels_last':
shape = (height, width, channels)
else:
raise ValueError(
'Provide either `channels_first` or `channels_last` for `data_format`.'
)
image_face = Input(shape=shape, name=name)
return image_face
def test_fpenet_model_builder():
"""Test FpeNetBaseModel constructor."""
image_face = build_sample_images(name='input_face')
# Test: 'FpeNet_base'
block_trunk = [(3, 64), (3, 64), (1, 64)]
blocks_encoder = [[(3, 64)]] * 4
blocks_decoder = [[(3, 64), (1, 64)]] * 4
model = RecombinatorNet(inputs=image_face,
pooling=True,
use_batch_norm=False,
data_format='channels_first',
kernel_regularizer=None,
bias_regularizer=None,
activation_type='relu',
activation_kwargs=None,
blocks_encoder=blocks_encoder,
block_trunk=block_trunk,
blocks_decoder=blocks_decoder,
use_upsampling_layer=False)
assert count_layers_by_class_name(model, ['InputLayer']) == 1
assert count_layers_by_class_name(model, ['Conv2D']) == 15
assert count_layers_by_class_name(model, ['Dense']) == 0
assert count_layers_by_class_name(model, ['Reshape']) == 0
assert count_layers_by_class_name(model, ['Dropout']) == 0
assert count_layers_by_class_name(model, ['MaxPooling2D']) == 4
assert count_layers_by_class_name(model, ['Concatenate']) == 4
assert count_layers_by_class_name(model, ['Softargmax']) == 0
assert model.count_params() == 566784
# Test: channels_last
data_format = 'channels_last'
image_face = build_sample_images(name='input_face', data_format=data_format)
block_trunk = [(3, 64), (3, 64), (1, 64)]
blocks_encoder = [[(3, 64)]] * 4
blocks_decoder = [[(3, 64), (1, 64)]] * 4
model = RecombinatorNet(inputs=image_face,
pooling=True,
use_batch_norm=False,
data_format=data_format,
kernel_regularizer=None,
bias_regularizer=None,
activation_type='relu',
activation_kwargs=None,
blocks_encoder=blocks_encoder,
block_trunk=block_trunk,
blocks_decoder=blocks_decoder,
use_upsampling_layer=False)
assert count_layers_by_class_name(model, ['InputLayer']) == 1
assert count_layers_by_class_name(model, ['Conv2D']) == 15
assert count_layers_by_class_name(model, ['Dense']) == 0
assert count_layers_by_class_name(model, ['Reshape']) == 0
assert count_layers_by_class_name(model, ['Dropout']) == 0
assert count_layers_by_class_name(model, ['MaxPooling2D']) == 4
assert count_layers_by_class_name(model, ['Concatenate']) == 4
assert count_layers_by_class_name(model, ['Softargmax']) == 0
assert model.count_params() == 566784
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/models/test_rcnet.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FpeNet model definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Activation
from keras.layers import BatchNormalization
from keras.layers import Conv2D
from keras.models import Model
from nvidia_tao_tf1.core.models.templates.utils import CNNBlock
from nvidia_tao_tf1.core.models.templates.utils import get_batchnorm_axis
from nvidia_tao_tf1.cv.fpenet.models.custom.softargmax import Softargmax
from nvidia_tao_tf1.cv.fpenet.models.rcnet import RecombinatorNet
class FpeNetModel(object):
"""FpeNet model definition."""
def __init__(self,
pooling=True,
use_batch_norm=False,
data_format='channels_first',
kernel_regularizer=None,
bias_regularizer=None,
activation_type='relu',
activation_kwargs=None,
blocks_encoder=None,
block_trunk=None,
blocks_decoder=None,
block_post_decoder=None,
nkeypoints=None,
beta=0.1,
additional_conv_layer=True,
use_upsampling_layer=True):
"""__init__ method.
Construct a Fiducial Points Estimator network with a Softargmax activation function.
Based on Recombinator Networks.
Described in the paper (in particular in its appendix) [1].
Args:
pooling (bool): whether max-pooling with a stride of 2 should be used.
If `False`, this stride will be added to the next convolution instead.
use_batch_norm (bool): whether batchnorm should be added after each convolution.
data_format (str): either 'channels_last' (NHWC) or 'channels_first' (NCHW).
kernel_regularizer (float): regularizer to apply to kernels.
bias_regularizer (float): regularizer to apply to biases.
activation_type (str): Type of activation.
activation_kwargs (dict): Additional activation keyword arguments to be fed to
the add_activation function.
blocks_encoder (list of convolution blocks): A convolution block is a list of (K, C)
tuples of arbitrary length (aka convolution subblocks), where C is the number of
output channels and K the kernel size of a convolution layer. The encoder reduces
the spatial resolution horizontally and vertically by a factor of two (stride of 2)
per convolution block, respectively.
block_trunk (one convolution block): This block preserves the spatial resolution of the
final encoder output and 'refines' it.
blocks_decoder (list of convolution blocks): The decoder increases the spatial
resolution horizontally and vertically by a factor of two (upsample factor of 2) per
convolution block, respectively. blocks_encoder and blocks_decoder must have the
same length of convolution blocks, while the number of convolution layers per block
may vary.
block_post_decoder (one convolution block): This optional block preserves the spatial
resolution of the final decoder output and 'refines' it before predicting the
key-points feature maps.
nkeypoints (int): Number of key points to be predicted. A 1x1 convolution layer is
appended to the final decoder output with nkeypoints output channels, and a
corresponding Softargmax operator is added.
beta (float): Softargmax coefficient used for multiplying the key-point maps after
subtracting the channel-wise maximum.
additional_conv_layer (bool): additional convolutional layer in the end.
use_upsampling_layer (bool): upsamping layer or decconv layer
Returns:
Model: The output model after applying the Fiducial Point Estimator net on top of
input `x`.
[1] Improving Landmark Localization with Semi-Supervised Learning
(https://arxiv.org/abs/1709.01591)
"""
# Check whether data format is supported.
if data_format not in ["channels_first", "channels_last"]:
raise ValueError("Unsupported data_format: {}.".format(data_format))
self._pooling = pooling
self._use_batch_norm = use_batch_norm
self._data_format = data_format
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._activation_type = activation_type
self._activation_kwargs = activation_kwargs or {}
self._blocks_encoder = blocks_encoder or [[(3, 64)]] * 4
self._block_trunk = block_trunk or [(3, 64), (3, 64), (1, 64)]
self._blocks_decoder = blocks_decoder or [[(3, 64), (1, 64)]] * 4
self._block_post_decoder = block_post_decoder or list()
self._nkeypoints = nkeypoints
self._beta = beta
self._additional_conv_layer = additional_conv_layer
self._use_upsampling_layer = use_upsampling_layer
def conv2D_bn_activation(
self,
x,
use_batch_norm,
filters,
kernel_size,
strides=(1, 1),
activation_type="relu",
activation_kwargs=None,
data_format=None,
kernel_regularizer=None,
bias_regularizer=None,
layer_name=None,
use_bias=True,
trainable=True
):
"""
Add a conv layer, followed by batch normalization and activation.
Args:
x (tensor): the inputs (tensor) to the convolution layer.
use_batch_norm (bool): use batch norm.
filters (int): the number of filters.
kernel_size: An integer or tuple/list of 2 integers, specifying the
width and height of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
activation_type (str): activation function name, e.g., 'relu'.
activation_kwargs (dict): Additional activation keyword arguments to be fed to
the add_activation function.
data_format (str): either 'channels_last' or 'channels_first'.
kernel_regularizer (`regularizer`): regularizer for the kernels.
bias_regularizer (`regularizer`): regularizer for the biases.
layer_name(str): layer name prefix.
use_bias(bool): whether or not use bias in convolutional layer.
Returns:
x (tensor): the output tensor of the convolution layer.
"""
if layer_name is not None:
layer_name = "%s_m%d" % (layer_name, filters)
x = Conv2D(filters=filters,
kernel_size=kernel_size,
strides=strides,
padding="same",
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
name=layer_name,
use_bias=use_bias,
trainable=trainable)(x)
if use_batch_norm:
if layer_name is not None:
layer_name += "_bn"
x = BatchNormalization(axis=get_batchnorm_axis(data_format),
name=layer_name,
trainable=trainable)(x)
if activation_type:
# activation_kwargs = activation_kwargs or {}
x = Activation(activation_type)(x)
# x = add_activation(activation_type, **activation_kwargs)(x)
return x
def construct(self, imageface):
"""Create a template for a Fiducial Points Estimator network.
Args:
image_face (Tensor): Input tensor for face.
"""
# First construct the RCN back-bone:
rcn_model = RecombinatorNet(inputs=imageface,
pooling=self._pooling,
use_batch_norm=self._use_batch_norm,
data_format=self._data_format,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation_type=self._activation_type,
activation_kwargs=self._activation_kwargs,
blocks_encoder=self._blocks_encoder,
block_trunk=self._block_trunk,
blocks_decoder=self._blocks_decoder,
use_upsampling_layer=self._use_upsampling_layer)
# Grab the output tensor of our RCN back-bone:
rcn_output = rcn_model.outputs[0]
# If specified, add a convolution block on top of the RCN back-bone ...
if self._block_post_decoder:
block = [(3, 128), (1, 64)]
rcn_output = CNNBlock(
use_batch_norm=self._use_batch_norm,
use_shortcuts=False,
repeat=1,
stride=1,
subblocks=block,
activation_type=self._activation_type,
use_bias=not (self._use_batch_norm),
data_format=self._data_format,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation_kwargs=self._activation_kwargs)(rcn_output)
# additional conv layer as head of RCN
if self._additional_conv_layer is True:
rcn_output = self.conv2D_bn_activation(
rcn_output,
use_batch_norm=False,
filters=64,
kernel_size=1,
strides=(1, 1),
activation_type=self._activation_type,
layer_name='conv_keypoints',
use_bias=True,
data_format=self._data_format,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation_kwargs=self._activation_kwargs)
# add final 1x1 convolution for predicting the target number of key points
conv_keypoints = self.conv2D_bn_activation(
rcn_output,
use_batch_norm=False,
filters=self._nkeypoints,
kernel_size=1,
strides=(1, 1),
activation_type=None,
layer_name='conv_keypoints',
use_bias=True,
data_format=self._data_format,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation_kwargs=self._activation_kwargs)
# Grab the output shape of the tensor produced by the previous conv operator
# (important to add `as_list()` so that a snapshot of the shape is taken during build time
# and not derived from the graph during run time).
conv_keypoints_shape = conv_keypoints.get_shape().as_list()
# Add a Softargmax activation:
softargmax, confidence = Softargmax(
conv_keypoints_shape,
beta=self._beta,
data_format=self._data_format,
name='softargmax')(conv_keypoints)
# Derive a model name from the number of key points:
keypoints_model_name = 'fpe_%s_%dkpts' % (rcn_model.name,
self._nkeypoints)
outputs_list = [softargmax, confidence, conv_keypoints]
rcn_keypoints_model = Model(inputs=imageface,
outputs=outputs_list,
name=keypoints_model_name)
return rcn_keypoints_model
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/models/fpenet.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for FpeNet BaseModel model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Input
from nvidia_tao_tf1.core.models.templates.utils import count_layers_by_class_name
from nvidia_tao_tf1.cv.fpenet.models.fpenet_basemodel import FpeNetBaseModel
def build_sample_images(name='inputs',
data_format='channels_first',
channels=1,
height=80,
width=80):
"""Construct FpeNet model for testing.
Args:
name (str): Name of the input tensor. Default value is 'inputs'
data_format (str): Expected tensor format, either `channels_first` or `channels_last`.
Default value is `channels_first`.
channels, height, width (all int): Input image dimentions.
"""
# Set sample inputs.
if data_format == 'channels_first':
shape = (channels, height, width)
elif data_format == 'channels_last':
shape = (height, width, channels)
else:
raise ValueError(
'Provide either `channels_first` or `channels_last` for `data_format`.'
)
image_face = Input(shape=shape, name=name)
return image_face
def test_fpenet_model_builder():
"""Test FpeNetBaseModel constructor."""
image_face = build_sample_images(name='input_face')
# Test: 'FpeNet_base'
model_parameters = {
'beta': 0.01,
'dropout_rate': 0.5,
'freeze_Convlayer': None,
'pretrained_model_path': None,
'regularizer_type': 'l2',
'regularizer_weight': 1.0e-05,
'type': 'FpeNet_base',
'use_upsampling_layer': False,
'visualization_parameters': None,
}
model = FpeNetBaseModel(model_parameters)
model.build(image_face, num_keypoints=80)
keras_model = model._keras_model
assert count_layers_by_class_name(keras_model, ['InputLayer']) == 1
assert count_layers_by_class_name(keras_model, ['Conv2D']) == 17
assert count_layers_by_class_name(keras_model, ['Dense']) == 0
assert count_layers_by_class_name(keras_model, ['Reshape']) == 0
assert count_layers_by_class_name(keras_model, ['Dropout']) == 0
assert count_layers_by_class_name(keras_model, ['MaxPooling2D']) == 4
assert count_layers_by_class_name(keras_model, ['Concatenate']) == 4
assert count_layers_by_class_name(keras_model, ['Softargmax']) == 1
assert keras_model.count_params() == 588944
# Test: 'FpeNet_base_5x5_conv'
model_parameters = {
'beta': 0.01,
'dropout_rate': 0.5,
'freeze_Convlayer': None,
'pretrained_model_path': None,
'regularizer_type': 'l2',
'regularizer_weight': 1.0e-05,
'type': 'FpeNet_base_5x5_conv',
'use_upsampling_layer': False,
'visualization_parameters': None,
}
model = FpeNetBaseModel(model_parameters)
model.build(image_face, num_keypoints=80)
keras_model = model._keras_model
assert count_layers_by_class_name(keras_model, ['InputLayer']) == 1
assert count_layers_by_class_name(keras_model, ['Conv2D']) == 16
assert count_layers_by_class_name(keras_model, ['Dense']) == 0
assert count_layers_by_class_name(keras_model, ['Reshape']) == 0
assert count_layers_by_class_name(keras_model, ['Dropout']) == 0
assert count_layers_by_class_name(keras_model, ['MaxPooling2D']) == 4
assert count_layers_by_class_name(keras_model, ['Concatenate']) == 4
assert count_layers_by_class_name(keras_model, ['Softargmax']) == 1
assert keras_model.count_params() == 1109072
# Test: 68 points model
model_parameters = {
'beta': 0.01,
'dropout_rate': 0.5,
'freeze_Convlayer': None,
'pretrained_model_path': None,
'regularizer_type': 'l2',
'regularizer_weight': 1.0e-05,
'type': 'FpeNet_base',
'use_upsampling_layer': False,
'visualization_parameters': None,
}
model = FpeNetBaseModel(model_parameters)
model.build(image_face, num_keypoints=68)
keras_model = model._keras_model
assert count_layers_by_class_name(keras_model, ['InputLayer']) == 1
assert count_layers_by_class_name(keras_model, ['Conv2D']) == 17
assert count_layers_by_class_name(keras_model, ['Dense']) == 0
assert count_layers_by_class_name(keras_model, ['Reshape']) == 0
assert count_layers_by_class_name(keras_model, ['Dropout']) == 0
assert count_layers_by_class_name(keras_model, ['MaxPooling2D']) == 4
assert count_layers_by_class_name(keras_model, ['Concatenate']) == 4
assert count_layers_by_class_name(keras_model, ['Softargmax']) == 1
assert keras_model.count_params() == 588164
# Test: Upsampling layer model
model_parameters = {
'beta': 0.01,
'dropout_rate': 0.5,
'freeze_Convlayer': None,
'pretrained_model_path': None,
'regularizer_type': 'l2',
'regularizer_weight': 1.0e-05,
'type': 'FpeNet_base',
'use_upsampling_layer': True,
'visualization_parameters': None,
}
model = FpeNetBaseModel(model_parameters)
model.build(image_face, num_keypoints=80)
keras_model = model._keras_model
assert count_layers_by_class_name(keras_model, ['InputLayer']) == 1
assert count_layers_by_class_name(keras_model, ['Conv2D']) == 17
assert count_layers_by_class_name(keras_model, ['Dense']) == 0
assert count_layers_by_class_name(keras_model, ['Reshape']) == 0
assert count_layers_by_class_name(keras_model, ['Dropout']) == 0
assert count_layers_by_class_name(keras_model, ['MaxPooling2D']) == 4
assert count_layers_by_class_name(keras_model, ['Concatenate']) == 4
assert count_layers_by_class_name(keras_model, ['Softargmax']) == 1
assert keras_model.count_params() == 523408
# Test: Varying input image dimension
image_face = build_sample_images(name='input_face', height=112, width=112)
model_parameters = {
'beta': 0.01,
'dropout_rate': 0.5,
'freeze_Convlayer': None,
'pretrained_model_path': None,
'regularizer_type': 'l2',
'regularizer_weight': 1.0e-05,
'type': 'FpeNet_base',
'use_upsampling_layer': False,
'visualization_parameters': None,
}
model = FpeNetBaseModel(model_parameters)
model.build(image_face, num_keypoints=80)
keras_model = model._keras_model
assert count_layers_by_class_name(keras_model, ['InputLayer']) == 1
assert count_layers_by_class_name(keras_model, ['Conv2D']) == 17
assert count_layers_by_class_name(keras_model, ['Dense']) == 0
assert count_layers_by_class_name(keras_model, ['Reshape']) == 0
assert count_layers_by_class_name(keras_model, ['Dropout']) == 0
assert count_layers_by_class_name(keras_model, ['MaxPooling2D']) == 4
assert count_layers_by_class_name(keras_model, ['Concatenate']) == 4
assert count_layers_by_class_name(keras_model, ['Softargmax']) == 1
assert keras_model.count_params() == 601232
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/models/test_fpenet_basemodel.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model template for Recombinator Networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import keras
import keras.backend as K
from keras.layers import concatenate
from keras.layers import Conv2DTranspose
from keras.layers import MaxPooling2D
from keras.layers import UpSampling2D
from keras.models import Model
from nvidia_tao_tf1.core.models.templates.utils import CNNBlock
concat_axis_map = {'channels_last': 3, 'channels_first': 1}
def RecombinatorNet(inputs,
pooling=True,
use_batch_norm=False,
data_format='channels_first',
kernel_regularizer=None,
bias_regularizer=None,
activation_type='relu',
activation_kwargs=None,
blocks_encoder=None,
block_trunk=None,
blocks_decoder=None,
use_upsampling_layer=True):
"""
Construct a Recombinator Network template.
Described in the paper [1].
Args:
pooling (bool): whether max-pooling with a stride of 2 should be used.
If `False`, this stride will be added to the next convolution instead.
use_batch_norm (bool): whether batchnorm should be added after each convolution.
data_format (str): either 'channels_last' (NHWC) or 'channels_first' (NCHW).
kernel_regularizer (float): regularizer to apply to kernels.
bias_regularizer (float): regularizer to apply to biases.
activation_type (str): Type of activation.
activation_kwargs (dict): Additional activation keyword arguments to be fed to
the add_activation function.
blocks_encoder (list of convolution blocks): A convolution block is a list of (K, C) tuples
of arbitrary length, where C is the number of output channels and K the kernel size of
a convolution layer. The encoder reduces the spatial resolution horizontally
and vertically by a factor of two (stride of 2) per convolution block, respectively.
block_trunk (one convolution block): This block preserves the spatial resolution of the
final encoder output and 'refines' it.
blocks_decoder (list of convolution blocks): The decoder increases the spatial resolution
horizontally and vertically by a factor of two (upsample factor of 2) per convolution
block, respectively. blocks_encoder and blocks_decoder must have the same length of
convolution blocks, while the number of convolution layers per block may vary.
use_upsampling_layer (bool): use upsampling or deconv layer
Returns:
Model: The output model after applying the Recombinator Network on top of input `x`.
[1] Recombinator Networks: Learning Coarse-to-Fine Feature Aggregation
(https://arxiv.org/abs/1511.07356)
"""
assert len(blocks_encoder) == len(
blocks_decoder
), 'Need an equal list length for blocks_encoder and blocks_decoder (number of RCN branches).'
nbranches = len(blocks_encoder)
if data_format is None:
data_format = K.image_data_format()
activation_kwargs = activation_kwargs or {}
blocks_encoder = blocks_encoder or [[(3, 64)]] * 4
block_trunk = block_trunk or [(3, 64), (3, 64), (1, 64)]
blocks_decoder = blocks_decoder or [[(3, 64), (1, 64)]] * 4
# Adjust the convolution stride of the encoder depending on the pooling setting:
if pooling:
filter_stride_encoder = 1
else:
filter_stride_encoder = 2
concat_axis = concat_axis_map[data_format]
deconv_kernel_initializer = keras.initializers.he_uniform()
encoder_outputs = list()
x = inputs
# Create the encoder blocks (strided):
for block in blocks_encoder:
if not pooling:
encoder_outputs.append(x)
x = CNNBlock(
repeat=1,
stride=filter_stride_encoder,
subblocks=block,
use_batch_norm=use_batch_norm,
use_shortcuts=False,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activation_type=activation_type,
activation_kwargs=activation_kwargs,
use_bias=not (use_batch_norm))(x)
if pooling:
encoder_outputs.append(x)
x = MaxPooling2D(
pool_size=(2, 2),
strides=(2, 2),
padding='same',
data_format=data_format)(x)
# Create the trunk block (unstrided):
x = CNNBlock(repeat=1,
stride=1,
subblocks=block_trunk,
use_batch_norm=use_batch_norm,
use_shortcuts=False,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activation_type=activation_type,
activation_kwargs=activation_kwargs,
use_bias=not (use_batch_norm))(x)
# Create the decoder blocks (up-sampling):
for block in blocks_decoder:
# upsampling or deconv layer
if use_upsampling_layer:
x = UpSampling2D(size=(2, 2),
data_format=data_format,
trainable=False)(x)
else:
kernel_size, filters = block
# Fixing kernel size for deconv
kernel_size = (2, 2)
x = Conv2DTranspose(filters[1],
kernel_size=kernel_size,
strides=2,
output_padding=(0, 0),
use_bias=False,
kernel_initializer=deconv_kernel_initializer,
data_format=data_format)(x)
concat_input_encoder = encoder_outputs.pop()
x = concatenate(axis=concat_axis, inputs=[x, concat_input_encoder])
x = CNNBlock(repeat=1,
stride=1,
subblocks=block,
use_batch_norm=use_batch_norm,
use_shortcuts=False,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activation_type=activation_type,
activation_kwargs=activation_kwargs,
use_bias=not (use_batch_norm))(x)
model_name = 'rcn_%dbranches' % nbranches
if not pooling:
model_name += '_nopool'
if use_batch_norm:
model_name += '_bn'
model = Model(inputs=inputs, outputs=x, name=model_name)
return model
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/models/rcnet.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Softargmax operator implementation in Keras."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras import backend as K
from keras.initializers import Constant
from keras.layers import Layer
import numpy as np
class Softargmax(Layer):
"""Class for a custom Softargmax operator in Keras."""
def __init__(self,
input_shape,
beta,
data_format='channels_first',
**kwargs):
"""Initialize the Softargmax operator.
Args:
input_shape (4-element list or tuple): Input shape to the Softargmax operator.
beta (float): Coefficient used for multiplying the key-point maps after
subtracting the channel-wise maximum.
Optional args:
data_format (str): Expected tensor format, either 'channels_first' or 'channels_last'.
Default value is 'channels_first'.
"""
assert isinstance(input_shape, (list, tuple))
self._input_shape = tuple(input_shape)
assert len(self._input_shape) == 4
if data_format == 'channels_first':
_, self._nkeypoints, self._height, self._width = self._input_shape
elif data_format == 'channels_last':
_, self._height, self._width, self._nkeypoints = self._input_shape
else:
raise ValueError(
"Provide either 'channels_first' or 'channels_last' for `data_format`."
)
self._beta = beta
self._data_format = data_format
kwargs['trainable'] = False
super(Softargmax, self).__init__(
input_shape=self._input_shape, **kwargs)
row_initializer, column_initializer = Softargmax._index_initializers(
self._height, self._width, K.floatx())
self._row_indexes = self.add_weight(
name='row_indexes',
shape=(1, 1, self._height, self._width),
initializer=row_initializer,
trainable=False)
self._column_indexes = self.add_weight(
name='column_indexes',
shape=(1, 1, self._height, self._width),
initializer=column_initializer,
trainable=False)
def build(self, input_shape):
"""Create the Softargmax operator at build time.
Args:
input_shape (4-element list or tuple): Input shape to the Softargmax operator.
"""
assert isinstance(input_shape, (list, tuple))
input_shape = tuple(input_shape)
assert len(input_shape) == 4
assert tuple(self._input_shape[1:]) == tuple(input_shape[1:])
super(Softargmax, self).build(input_shape)
def call(self, inputs):
"""Call the Softargmax operator at run time.
Args:
inputs (Tensor, 4D): Input tensor with the data to process.
"""
if self._data_format == 'channels_last':
inputs = K.permute_dimensions(inputs, (0, 3, 1, 2))
elif self._data_format != 'channels_first':
raise ValueError(
'Provide either `channels_first` or `channels_last` for `data_format`.'
)
# Shape: (N, C, 1, 1) - Find the maximum pixel value in each channel (corresponding to a
# key point each).
max_per_channel = self._reduce_channel(inputs, K.max)
# Shape: (N, C, H, W) - Shift the original input values down by the maximum value per
# channel. Results in a 4D tensor with non-positive values.
normalized = inputs - max_per_channel
# normalized = inputs
# Shape: (N, C, H, W) - Multiply all values by the pre-defined beta value and exponentiate
# them.
prod_beta = self._beta * normalized
exp_maps = K.exp(prod_beta)
# Shape: (N, C, 1, 1) - Sum-reduce all channels to a single value.
sum_per_channel = self._reduce_channel(exp_maps)
# Shape: (N, C, 1, 1) - Find the average value per channel through division
# by the number of pixels per channel.
# Output value, representing the confidence of each key point.
# confidence_output = sum_per_channel / (self._height * self._width)
# Shape: (N, C, H, W) - Softmax operation per channel: Divide all exp_maps by their channel
# sum. Results in probability values of the key-point location in every pixel (values in
# [0,1] interval). Each channel sums up to 1.
prob_maps = exp_maps / (sum_per_channel)
# confidence_output = tf.math.reduce_max(prob_maps, axis=[2, 3], keepdims=True)
confidence_output = K.max(prob_maps, axis=[2, 3], keepdims=True)
# Shape: (N, C, 1, 1) - Multiply the column and row indexes with prob_maps (in batched_dot
# fashion), respectively. Then sum-reduce them to a single coordinate, corresponding to the
# weighted location of a key point. Both are output values.
x_coord_output = Softargmax._reduce_channel(
self._column_indexes * prob_maps, K.sum)
y_coord_output = Softargmax._reduce_channel(
self._row_indexes * prob_maps, K.sum)
# Shape: (N, C, 3, 1) - Concatenate all output values.
outputs = K.concatenate(
[x_coord_output, y_coord_output, confidence_output], axis=2)
# Shape: (N, C, 3) - Eliminate the redundant dimension.
outputs = K.squeeze(outputs, axis=3)
# case when we would like to reshape the outputs at the layer stage
# outputs_1 = K.reshape(outputs[:,:,:2], (outputs.shape[0].value,-1, 1, 1))
# outputs_2 = K.reshape(outputs[:,:,2], (outputs.shape[0].value,-1, 1, 1))
# separating the keypoints and confidence predictions
outputs_1 = outputs[:, :, :2]
outputs_2 = outputs[:, :, 2]
return[outputs_1, outputs_2]
def compute_output_shape(self, input_shape):
"""Compute the shape of the output tensor produced by the Softargmax operator.
Args:
input_shape (4-element list or tuple): Input shape to the Softargmax operator.
"""
assert isinstance(input_shape, (list, tuple))
assert len(input_shape) == 4
if self._data_format == 'channels_first':
batch_size, nkeypoints, _, _ = input_shape
elif self._data_format == 'channels_last':
batch_size, _, _, nkeypoints = input_shape
else:
raise ValueError(
'Provide either `channels_first` or `channels_last` for `data_format`.'
)
output_shape_1 = (batch_size, nkeypoints*2, 1, 1)
output_shape_2 = (batch_size, nkeypoints, 1, 1)
return [output_shape_1, output_shape_2]
def get_config(self):
"""Create the config, enabling (de)serialization."""
config = super(Softargmax, self).get_config()
config['input_shape'] = self._input_shape
config['beta'] = self._beta
config['data_format'] = self._data_format
return config
@classmethod
def _index_initializers(cls, height, width, dtype):
"""Create constant initializers for the x and y locations, respectively.
Args:
height (int): Input height to the Softargmax operator.
width (int): Input width to the Softargmax operator.
dtype (type): Data type for the initializers.
"""
col_indexes_per_row = np.arange(0, height, dtype=dtype)
row_indexes_per_col = np.arange(0, width, dtype=dtype)
# col_grid gives a column measurement matrix to be used for getting
# 'x'. It is a matrix where each row has the sequential values starting
# from 0 up to n_col-1:
# 0,1,2, ..., n_col-1
# 0,1,2, ..., n_col-1
# 0,1,2, ..., n_col-1
# row_grid gives a row measurement matrix to be used for getting 'y'.
# It is a matrix where each column has the sequential values starting
# from 0 up to n_row-1:
# 0,0,0, ..., 0
# 1,1,1, ..., 1
# 2,2,2, ..., 2
# ...
# n_row-1, ..., n_row-1
col_grid, row_grid = np.meshgrid(row_indexes_per_col,
col_indexes_per_row)
row_index_init = Constant(value=row_grid)
col_index_init = Constant(value=col_grid)
return row_index_init, col_index_init
@classmethod
def _reduce_channel(cls, inputs, operation=K.sum, keepdims=True):
"""Reduce all channels with the specified operation to a single value.
Args:
inputs (Tensor, 4D): Input tensor with the data to reduced.
Optional args:
operation (function): Reduce operation to be performed (default: K.sum).
keepdims (bool): Toggles if 1-dimensions should be kept (default: True).
"""
reduced_per_row = operation(inputs, axis=2, keepdims=True)
reduced_per_channel = operation(
reduced_per_row, axis=3, keepdims=keepdims)
return reduced_per_channel
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/models/custom/softargmax.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FpeNet Models utililty definitions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/models/custom/__init__.py |
# Copyright (channels) 2020, NVIDIA CORPORATION. All rights reserved.
""" Test for the Softargmax operator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Input
from keras.models import Model
import numpy as np
import pytest
from nvidia_tao_tf1.cv.fpenet.models.custom.softargmax import Softargmax
def softargmax_numpy(input_vals,
beta,
epsilon=1e-6,
data_format='channels_first'):
"""A NumPy implementation of Softargmax.
Args:
input_vals (numpy.ndarray, 4d): Input values to be processed.
beta (float): Coefficient used for multiplying the key-point maps after
subtracting the channel-wise maximum.
Optional args:
epsilon (float): Epsilon added to the denominator of the Softmax-like operation
per channel (default value taken from original implementation in Theano).
data_format (str): Expected tensor format, either 'channels_first' or 'channels_last'.
Default value is 'channels_first' because 'channels_last' is not implemented.
"""
if data_format == 'channels_last':
input_vals = input_vals.transpose(0, 3, 1, 2)
elif data_format != 'channels_first':
raise ValueError(
'Provide either `channels_first` or `channels_last` for `data_format`.'
)
# Shape: (batch_size, channels, height, width)
batch_size, channels, height, width = input_vals.shape
# Cast inputs to float32 precision:
# input_vals = input_vals.astype('float32')
n_row, n_col = height, width
n_kpts = channels
# Shape: (batch_size, channels, height*width)
input_3d = input_vals.reshape(batch_size, channels, height * width)
# Shape: (batch_size, channels) - Find the maximum pixel value in each channel (corresponding to
# a key-point
# each).
map_max = input_3d.max(axis=2)
# Shape: (batch_size, channels, 1) - Achieve the same number of dimensions as input_3d so that
# we can calculate the difference.
map_max_3d = np.expand_dims(map_max, axis=-1)
# Shape: (batch_size, channels, height*width) - Shift the original input values down by the
# maximum value, achieving a 3d tensor with non-positive values.
input_3d = input_3d - map_max_3d
# Everything in this section can be implemented with a standard Softmax call from cuDNN or
# TensorRT:
###############################################################################################
# Shape: (batch_size, channels, height*width) - Multiply each (non-positive) value from input_3d
# with beta. For hand-pose network, beta = 0.1.
product_beta = np.multiply(input_3d, beta)
# Shape: (batch_size, channels, height*width) - See activation value range here:
# https://www.wolframalpha.com/input/?i=exp(0.1x).
# Maximum value of exp_maps is 1 (at location of maximum from input_3d)because inputs are
# non-positive.
# Runtime on Intel(R) Core(TM) i7-6850K CPU @ 3.60GHz: 1.25 ms.
exp_maps = np.exp(product_beta)
# print(exp_maps.reshape(batch_size,channels,height,width)[0,0,:,:2])
probs = exp_maps.mean(axis=2)
# Shape: (batch_size, channels) - Sum of all values along positional dimension.
exp_maps_sum = np.sum(exp_maps, axis=2)
# print(exp_maps.reshape(batch_size,channels,height,width).[0,0,:,:2])
# Shape: (batch_size, channels), output matrix, third output index of the layer.
# z_vals = exp_maps_sum
# Shape after loop: (batch_size, channels, 1, 1) - Achieve 4d representation for element-wise
# division.
input_3d_sum_4d = exp_maps_sum
for _ in range(2):
input_3d_sum_4d = np.expand_dims(input_3d_sum_4d, axis=-1)
# Shape: (batch_size, channels, height, width) - Achieve 4d representation for element-wise
# division.
exp_maps_reshaped = exp_maps.reshape([-1, n_kpts, n_row, n_col])
# Shape: (batch_size, channels, 1, 1) - Add epsilon to prevent division by zero.
input_3d_sum_4d_epsilon = np.add(input_3d_sum_4d, epsilon)
# Shape: (batch_size, channels, height, width) - Divide each element by the sum.
# Similar to classical Softmax. Resulting in float values between 0 and 1 that can be
# interpreted as probabilities.
# Runtime on Intel(R) Core(TM) i7-6850K CPU @ 3.60GHz: 0.4 ms
normalized_maps_4d = np.divide(exp_maps_reshaped, input_3d_sum_4d_epsilon)
###############################################################################################
# Shape: (batch_size, channels, height, width), output tensor, fourth and last output index of
# the layer
# z_maps = normalized_maps_4d
col_vals = np.arange(n_col, dtype=input_vals.dtype)
col_repeat = np.tile(col_vals, n_row)
# Shape: (1, 1, height, width)
col_idx = col_repeat.reshape(1, 1, n_row, n_col)
# col_mat gives a column measurement matrix to be used for getting
# 'x'. It is a matrix where each row has the sequential values starting
# from 0 up to n_col-1:
# 0,1,2, ..., n_col-1
# 0,1,2, ..., n_col-1
# 0,1,2, ..., n_col-1
row_vals = np.arange(n_row, dtype=input_vals.dtype)
row_repeat = np.repeat(row_vals, n_col)
# Shape: (1, 1, height, width)
row_idx = row_repeat.reshape(1, 1, n_row, n_col)
# row_mat gives a row measurement matrix to be used for getting 'y'.
# It is a matrix where each column has the sequential values starting
# from 0 up to n_row-1:
# 0,0,0, ..., 0
# 1,1,1, ..., 1
# 2,2,2, ..., 2
# ...
# n_row-1, ..., n_row-1
# Shape: (batch_size, channels, height, width)
# Get a probability-weighted column index.
# Runtime on Intel(R) Core(TM) i7-6850K CPU @ 3.60GHz: 0.5 ms
weighted_x = np.multiply(normalized_maps_4d, col_idx)
# Shape: (batch_size, channels, height*width)
# Reshape for sum operation
weighted_x_3d = weighted_x.reshape(batch_size, channels, height * width)
# Shape: (batch_size, channels)
# Calculate weighted sum of X coordinates for each key-point.
# Output matrix, first output index of the layer
x_vals = np.sum(weighted_x_3d, axis=2)
# Shape: (batch_size, channels, height, width)
# Get a probability-weighted row index.
# Runtime on Intel(R) Core(TM) i7-6850K CPU @ 3.60GHz: 0.5 ms
weighted_y = np.multiply(normalized_maps_4d, row_idx)
# Shape: (batch_size, channels, height*width)
# Reshape for sum operation
weighted_y_3d = weighted_y.reshape(batch_size, channels, height * width)
# Shape: (batch_size, channels), output matrix, second output index of the layer
# Calculate weighted sum of Y coordinates for each key-point.
y_vals = np.sum(weighted_y_3d, axis=2)
outputs = np.stack((x_vals, y_vals, probs), axis=1)
outputs = np.transpose(outputs, (0, 2, 1))
# keypoints values
outputs1 = outputs[:, :, :2]
# confidence values
outputs2 = outputs[:, :, 2]
return outputs1, outputs2
def calculate_absdiff(tensor_a, tensor_b):
"""Calculate the absolute difference between two tensors.
Args:
tensor_a (numpy.ndarray): The first tensor.
tensor_b (numpy.ndarray): The second tensor.
"""
assert hasattr(tensor_a, 'shape') and hasattr(tensor_b, 'shape')
assert tensor_a.shape == tensor_b.shape
diff = tensor_a - tensor_b
absdiff = np.abs(diff)
return absdiff
def create_softargmax_model(input_shape, beta, data_format):
"""Create a Keras model consisting of a single Softargmax layer.
Args:
input_shape (4-element list or tuple): Input shape in the specified data format.
beta (float): Coefficient used for multiplying the key-point maps after
subtracting the channel-wise maximum.
data_format (str): Expected tensor format, either 'channels_first' or 'channels_last'.
"""
input_shape_without_batch = input_shape[1:]
inputs = Input(name='input', shape=input_shape_without_batch)
softargmax = Softargmax(
input_shape, beta=beta, data_format=data_format)(inputs)
model = Model(inputs=inputs, outputs=softargmax)
return model
@pytest.mark.parametrize(
'batch_size, nkeypoints, height, width, beta, data_format',
[(1, 68, 80, 80, 0.1, 'channels_first'),
(128, 68, 80, 80, 0.1, 'channels_first'),
(3, 21, 99, 40, 0.5, 'channels_last')])
def test_softargmax(batch_size,
nkeypoints,
height,
width,
beta,
data_format,
acceptable_diff=1e-4):
""" Test the Softargmax implementation in Keras against a NumPy implementation.
Args:
batch_size, nkeypoints, height, width: Input dimensions to be processed.
beta (float): Coefficient used for multiplying the key-point maps after
subtracting the channel-wise maximum.
Optional args:
data_format (str): Expected tensor format, either 'channels_first' or 'channels_last'.
acceptable_diff (float): Indicates the maximum acceptable difference value between the
Keras prediction and the NumPy prediction.
"""
if data_format == 'channels_first':
input_shape = (batch_size, nkeypoints, height, width)
elif data_format == 'channels_last':
input_shape = (batch_size, height, width, nkeypoints)
else:
raise ValueError(
'Provide either `channels_first` or `channels_last` for `data_format`.'
)
model = create_softargmax_model(input_shape, beta, data_format)
model.compile(optimizer='rmsprop', loss='mse')
input_vals = np.random.rand(batch_size, nkeypoints, height, width)
input_vals = input_vals.astype('float32')
if data_format == 'channels_last':
input_vals = input_vals.transpose(0, 2, 3, 1)
elif data_format != 'channels_first':
raise ValueError(
'Provide either `channels_first` or `channels_last` for `data_format`.'
)
prediction_keypoints, prediction_confidence = model.predict(input_vals)
assert hasattr(prediction_confidence, 'shape')
epsilon_numpy = 1e-6
prediction_numpy_keypoints, prediction_numpy_confidence = softargmax_numpy(
input_vals, beta=beta, epsilon=epsilon_numpy, data_format=data_format)
assert hasattr(prediction_numpy_confidence, 'shape')
absdiff = calculate_absdiff(prediction_keypoints, prediction_numpy_keypoints)
max_absdiff = np.max(absdiff)
assert max_absdiff < acceptable_diff, 'The acceptable maximum absolute difference between \
Numpy and Keras prediction exceeds the specified threshold.'
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/models/custom/test_softargmax.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FpeNet Trainers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nvidia_tao_tf1.cv.fpenet.trainers.fpenet_trainer import FpeNetTrainer
__all__ = (
'FpeNetTrainer',
)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/trainers/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FpeNet Trainer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import logging
import os
from keras import backend as K
import tensorflow as tf
import nvidia_tao_tf1.core as tao_core
from nvidia_tao_tf1.blocks.trainer import Trainer
from nvidia_tao_tf1.core import distribution
from nvidia_tao_tf1.core.hooks.sample_counter_hook import SampleCounterHook
from nvidia_tao_tf1.core.utils import set_random_seed
from nvidia_tao_tf1.cv.common.utilities.serialization_listener import (
EpochModelSerializationListener
)
from nvidia_tao_tf1.cv.common.utilities.tlt_utils import get_tf_ckpt
from nvidia_tao_tf1.cv.detectnet_v2.tfhooks.task_progress_monitor_hook import (
TaskProgressMonitorHook
)
from nvidia_tao_tf1.cv.detectnet_v2.tfhooks.utils import get_common_training_hooks
from nvidia_tao_tf1.cv.fpenet.evaluation.fpenet_evaluator import FpeNetEvaluator
from nvidia_tao_tf1.cv.fpenet.losses.fpenet_loss import FpeNetEltLoss
from nvidia_tao_tf1.cv.fpenet.visualization import FpeNetVisualizer
logger = logging.getLogger(__name__)
MODEL_EXTENSION=".hdf5"
class FpeNetTrainer(Trainer):
"""FpeNet Trainer object that builds the training graph and execution."""
@tao_core.coreobject.save_args
def __init__(self,
checkpoint_dir=None,
random_seed=42,
log_every_n_secs=60,
checkpoint_n_epoch=1,
num_epoch=40,
infrequent_summary_every_n_steps=0,
enable_visualization=True,
visualize_num_images=3,
num_keypoints=80,
key=None,
log_resources=False,
**kwargs):
"""__init__ method.
Args:
checkpoint_dir (str): Path to directory containing checkpoints.
random_seed (int): Set random seed.
log_every_n_secs (int): Log every n secs.
checkpoint_n_epoch (int): Checkpoint every n epochs.
num_epoch (int): Number of training epochs.
infrequent_summary_every_n_epoch (int): Infrequent summary every n epoch.
enable_visualization (bool): Toggle to enable visualization.
visualize_num_images (int): Number of data images to show on Tensorboard.
num_keypoints (int): Number of facial keypoints.
key (str): Key to decode the model.
log_resources (bool): Toggle to log GPU usage resources in tensorboard.
"""
super(FpeNetTrainer, self).__init__(**kwargs)
self._random_seed = random_seed
self._checkpoint_dir = checkpoint_dir
self._log_every_n_secs = log_every_n_secs
self._checkpoint_n_epoch = checkpoint_n_epoch
self._num_epoch = num_epoch
self._infrequent_summary_every_n_steps = infrequent_summary_every_n_steps
self._summary_every_n_steps = self._steps_per_epoch = \
self._validation_every_n_steps = self._train_op = self._evaluator =\
self._eyelids_loss = self._mouth_loss = None
self._total_loss = 0.0
self._enable_visualization = enable_visualization
self._visualize_num_images = visualize_num_images
self._visualizer = FpeNetVisualizer(
self._checkpoint_dir, num_images=self._visualize_num_images)
self._num_keypoints = num_keypoints
self._key = key
self._log_resources = log_resources
self._worker_count = distribution.get_distributor().size()
self._worker_index = distribution.get_distributor().rank()
def build(
self,
eval_mode='validation',
eval_model_path=None):
"""
Build the training and validation graph.
Args:
eval_mode (str): Evaluation mode- 'validation' or 'kpi_testing'
'validation'- Validation step durng training.
'kpi_testing'- KPI data testing.
eval_model_path (str): Path to the model file to be evaluated.
"""
# Set random seeds.
seed = distribution.get_distributor().distributed_seed(
self._random_seed)
set_random_seed(seed)
# Set learning phase to 1 for building the train graph.
K.set_learning_phase(1)
# Prepare data for training.
images, ground_truth_labels, num_samples, occ_masking_info = \
self._dataloader(phase='training')
# Compute num_samples per gpu.
num_samples = num_samples // self._worker_count
self._batch_size = self._dataloader.batch_size
self._steps_per_epoch = num_samples // self._batch_size
self._last_step = self._steps_per_epoch * self._num_epoch
# Visualization of images and data distribution.
if self._enable_visualization:
# Add images to Tensorboard.
self._visualizer.visualize_images(
images, ground_truth_labels[0], viz_phase='training')
# Summary and validate at the end of every epoch.
self._summary_every_n_steps = self._steps_per_epoch / 10
# Build model.
predictions = self._model.build(images,
num_keypoints=self._num_keypoints,
enc_key=self._key)
predictions_coord = K.reshape(predictions['landmarks'],
(self._dataloader.batch_size,
self._num_keypoints, 2))
# Add images to Tensorboard.
if self._enable_visualization:
self._visualizer.visualize_images(images,
predictions_coord,
viz_phase='training_predictions')
# For freezing parts of the model.
trainable_weights = self._model.keras_model.trainable_weights
# Build optimizer.
if hasattr(self._optimizer, '_learning_rate_schedule') and \
hasattr(self._optimizer._learning_rate_schedule, '_last_step'):
self._optimizer._learning_rate_schedule._last_step = self._last_step
self._optimizer.build()
# Compute loss.
self._landmarks_loss, self._mouth_loss, self._eyelids_loss = \
self._loss(y_true=ground_truth_labels[0],
y_pred=predictions_coord,
occ_true=ground_truth_labels[1],
occ_masking_info=occ_masking_info,
num_keypoints=self._num_keypoints)
self._total_loss += self._landmarks_loss
# Compute ELT loss.
if not eval_mode == 'kpi_testing':
elt_loss = FpeNetEltLoss(self._loss.elt_loss_info,
image_height=self._dataloader.image_height,
image_width=self._dataloader.image_width,
num_keypoints=self._num_keypoints)
if elt_loss.enable_elt_loss:
# apply random transform to images and also retrieve transformation matrix
images_tm, mapMatrix = elt_loss.transform_images(images)
# make predictions on the transformed images using current model
predictions_tm = self._model.keras_model(images_tm)
predictions_tm_coord = K.reshape(predictions_tm[0],
(self._dataloader.batch_size,
self._num_keypoints, 2))
# apply same transformation to predicted/ground truth labels
ground_truth_labels_tm = elt_loss.transform_points(ground_truth_labels[0],
mapMatrix)
# compute elt loss
self._elt_loss, _, _ = self._loss(y_true=ground_truth_labels_tm,
y_pred=predictions_tm_coord,
occ_true=ground_truth_labels[1],
occ_masking_info=occ_masking_info,
num_keypoints=self._num_keypoints,
loss_name='elt')
# scale the elt loss term
self._total_loss += elt_loss.elt_alpha * self._elt_loss
# Add images to Tensorboard.
if self._enable_visualization:
self._visualizer.visualize_images(images_tm,
ground_truth_labels_tm,
viz_phase='training_elt')
# Create optimizer.
self._train_op = self._optimizer.minimize(loss=self._total_loss,
var_list=trainable_weights)
if eval_model_path is None:
logger.info(
"Evaluation model file path wasn't provided. "
"Getting the latest checkpoint in {checkpoint_dir}".format(
checkpoint_dir=self._checkpoint_dir
)
)
eval_model_path = self.get_latest_checkpoint(
self._checkpoint_dir,
self._key,
extension=MODEL_EXTENSION)
logger.info("Evaluating using the model at {eval_model_path}".format(
eval_model_path=eval_model_path
))
# Build evaluator.
self._evaluator = FpeNetEvaluator(
self._model, self._dataloader, self._checkpoint_dir, eval_mode,
self._visualizer, self._enable_visualization, self._num_keypoints,
self._loss, key=self._key, model_path=eval_model_path,
steps_per_epoch=self._steps_per_epoch
)
self._evaluator.build()
self._validation_every_n_steps = self._steps_per_epoch * self._checkpoint_n_epoch
@property
def train_op(self):
"""Return train optimizer of Trainer."""
return self._train_op
def get_latest_checkpoint(
self,
results_dir,
key,
extension=".ckzip"):
"""Get the latest checkpoint path from a given results directory.
Parses through the directory to look for the latest checkpoint file
and returns the path to this file.
Args:
results_dir (str): Path to the results directory.
key (str): Key to load .tlt model
extension (str): Extension of the file to be filtered.
Returns:
ckpt_path (str): Path to the latest checkpoint.
"""
print(f"Checkpoint results dir {results_dir}")
checkpoint_glob_string = os.path.join(
results_dir, f"model.epoch-*{extension}"
)
trainable_ckpts = [
int(os.path.basename(item).split('.')[1].split('-')[1])
for item in glob.glob(checkpoint_glob_string)
]
num_ckpts = len(trainable_ckpts)
if num_ckpts == 0:
return None
latest_step = sorted(trainable_ckpts, reverse=True)[0]
latest_checkpoint = os.path.join(
results_dir,
f"model.epoch-{latest_step}{extension}"
)
if extension in [".tlt", ".hdf5"]:
return latest_checkpoint
return get_tf_ckpt(latest_checkpoint, key, latest_step)
def train(self):
"""Run the training."""
checkpoint_dir = self._checkpoint_dir \
if distribution.get_distributor().is_master() else None
log_tensors = {
'step': tf.train.get_global_step(),
'loss': self._total_loss,
'epoch': tf.train.get_global_step() / self._steps_per_epoch,
'landmarks_loss': self._landmarks_loss
}
if self._loss.elt_loss_info['enable_elt_loss']:
log_tensors['elt_loss'] = self._elt_loss
serialization_listener = EpochModelSerializationListener(
checkpoint_dir=checkpoint_dir,
model=self._model,
key=self._key,
steps_per_epoch=self._steps_per_epoch,
max_to_keep=None
)
listeners = [serialization_listener]
common_hooks = get_common_training_hooks(
log_tensors=log_tensors,
log_every_n_secs=self._log_every_n_secs,
checkpoint_n_steps=self._checkpoint_n_epoch * self._steps_per_epoch,
model=None,
last_step=self._last_step,
checkpoint_dir=checkpoint_dir,
scaffold=self.scaffold,
steps_per_epoch=self._steps_per_epoch,
summary_every_n_steps=self._summary_every_n_steps,
infrequent_summary_every_n_steps=self._infrequent_summary_every_n_steps,
validation_every_n_steps=self._validation_every_n_steps,
evaluator=self._evaluator,
listeners=listeners,
key=self._key
)
if self._worker_index == 0:
self._hooks = [SampleCounterHook(batch_size=self._worker_count * self._batch_size,
name="Train")]
else:
self._hooks = []
# if self._log_resources:
# self._hooks = self._hooks + [ResourceHook(checkpoint_dir, write_interval=1)]
if self._worker_index == 0:
self._hooks.append(TaskProgressMonitorHook(log_tensors,
checkpoint_dir,
self._num_epoch,
self._steps_per_epoch))
hooks = self._hooks + common_hooks
checkpoint_filename = self.get_latest_checkpoint(self._checkpoint_dir, self._key)
self.run_training_loop(
train_op=self._train_op,
hooks=hooks,
checkpoint_dir=checkpoint_filename)
def run_testing(self):
"""Run testing on test and KPI data after training is done."""
self._evaluator.evaluate()
def run_training_loop(self, train_op, hooks, checkpoint_dir=None):
"""Run the training loop in a tensorflow session.
Args:
train_op (tensor): Tensorflow op to be evaluated to take a training step.
hooks (list of Hooks): List of Tensorflow Hooks to be used as callbacks while running
training.
checkpoint_dir (str): for resuming from a checkpoint. If this value is `None` it will
not restore variables. If it points to a directory, it will find the latest variable
snapshot and resume from there.
"""
# Save checkpoints only on worker 0 to prevent other workers from corrupting them.
# The SingularMonitoredSession takes care of session initialization,
# restoring from a checkpoint, saving to a checkpoint, and closing when done
# or an error occurs.
# Notice we are not using the `MonitoredTrainingSession` variant because that automatically
# adds unwanted hooks if a `checkpoint_dir` is provided: and if we do not provide it,
# we cannot resume out checkpoint.
config = distribution.get_distributor().get_config()
ignore_keras_values = checkpoint_dir is not None
if hooks is None:
hooks = []
if self._model.keras_model is not None:
# KerasModelHook takes care of initializing model variables.
hooks.insert(0, tao_core.hooks.KerasModelHook(
self._model.keras_model,
ignore_keras_values)
)
with tf.compat.v1.train.SingularMonitoredSession(
hooks=hooks,
scaffold=self.scaffold,
config=config,
checkpoint_filename_with_path=checkpoint_dir
) as sess:
try:
while not sess.should_stop():
# Run training ops with the wrapped session.
sess.run(train_op)
except (KeyboardInterrupt, SystemExit):
logger.info("Training interrupted.")
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/trainers/fpenet_trainer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for FPENet Trainer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import keras.backend as K
from nvidia_tao_tf1.blocks import learning_rate_schedules
from nvidia_tao_tf1.blocks import optimizers
import tensorflow as tf
from nvidia_tao_tf1.core.utils import get_all_simple_values_from_event_file
from nvidia_tao_tf1.cv.fpenet.losses.fpenet_loss import FpeLoss
from nvidia_tao_tf1.cv.fpenet.models.fpenet_basemodel import FpeNetBaseModel
from nvidia_tao_tf1.cv.fpenet.trainers.fpenet_trainer import FpeNetTrainer
class _synthetic_dataloader():
"""Create synthetic dataloader for test."""
def __init__(self, phase='training'):
self.phase = phase
self.batch_size = 4
self.image_width = 80
self.image_height = 80
self.images = tf.fill((4, 1, 80, 80), 255.0)
def __call__(self, repeat=True, phase='validation'):
images = self.images
label = (tf.zeros([4, 80, 2], dtype='float32'), tf.zeros([4, 80], dtype='float32'))
masking_occ_info = tf.zeros([4], dtype='float32')
num_samples = 4
return images, label, num_samples, masking_occ_info
def _create_trainer(phase, checkpoint_dir):
"""
Create trainer object.
Args:
phase (str): phase for dataloader- 'training' or 'validation'
checkpoint_dir (str): folder path for model.
"""
dataloader = _synthetic_dataloader(phase=phase)
model_parameters = {
'beta': 0.01,
'dropout_rate': 0.5,
'freeze_Convlayer': None,
'pretrained_model_path': None,
'regularizer_type': 'l2',
'regularizer_weight': 1.0e-05,
'type': 'FpeNet_base',
'use_upsampling_layer': False,
'visualization_parameters': None,
}
model = FpeNetBaseModel(model_parameters)
learning_rate_schedule = learning_rate_schedules.SoftstartAnnealingLearningRateSchedule(
annealing=0.5,
base_learning_rate=0.0005,
min_learning_rate=1.0e-07,
soft_start=0.3
)
optimizer = optimizers.AdamOptimizer(
learning_rate_schedule=learning_rate_schedule,
)
elt_loss_info = {
'elt_alpha': 0.5,
'enable_elt_loss': True,
'modulus_spatial_augmentation': {}}
loss = FpeLoss('l1', elt_loss_info=elt_loss_info)
trainer = FpeNetTrainer(
dataloader=dataloader,
model=model,
optimizer=optimizer,
loss=loss,
checkpoint_dir=checkpoint_dir,
random_seed=42,
log_every_n_secs=5,
checkpoint_n_epoch=1,
num_epoch=10,
infrequent_summary_every_n_steps=0,
enable_visualization=False,
visualize_num_images=3,
num_keypoints=80,
key="0"
)
return trainer
def test_trainer_train(tmpdir):
"""Test whether trainer trains correctly."""
K.clear_session()
trainer = _create_trainer('training', str(tmpdir))
trainer.build()
trainer.train()
# Test on trainable weights. Need to update if freezing a part of model.
assert len(trainer._model.keras_model.trainable_weights) == 38
tensorboard_log_dir = os.path.join(str(tmpdir), "events")
assert os.path.isdir(tensorboard_log_dir), (
f"Tensorboard log directory not found at {tensorboard_log_dir}"
)
values_dict = get_all_simple_values_from_event_file(tensorboard_log_dir)
loss_key = 'l1_net_loss'
assert loss_key in values_dict.keys()
# Get loss values as a list for all steps.
loss_values = [loss_tuple[1] for loss_tuple in values_dict[loss_key].items()]
# Form a list to determine whether loss has decreased across each step.
is_loss_reduced = [loss_values[i] >= loss_values[i+1]
for i in range(len(loss_values)-1)]
loss_reduced_percentage = sum(is_loss_reduced) / len(is_loss_reduced)
assert loss_reduced_percentage >= 0.5
def test_trainer_evaluator(tmpdir):
"""Test whether trainer passes variables to evaluator correctly."""
K.clear_session()
trainer = _create_trainer('vaidation', str(tmpdir))
trainer.build()
evaluator = trainer._evaluator
# Assert that instance variables of evaluator match with values in trainer spec.
assert evaluator.save_dir == tmpdir
assert evaluator.mode == "validation"
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/trainers/test_fpenet_trainer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for FpeNet train script."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from mock import MagicMock
import pytest
from nvidia_tao_tf1.cv.fpenet.scripts.train import main
@pytest.mark.parametrize("results_dir", ['results_dir', None])
@pytest.mark.parametrize("is_master", [True, False])
@pytest.mark.parametrize("log_level", ['INFO', 'ERROR'])
def test_train_script_main(mocker, log_level, is_master, results_dir):
"""
Test FpeNet train script main function.
Args:
mocker (Mocker obj): Mocker instance for replaying of expectations on mock objects.
log_level (str): Log level. Options 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'.
is_master (bool): distribution to check current process is the master process.
results_dir (str): Result checkpoint path.
Returns:
None
"""
test_spec = expected_spec = {
'checkpoint_dir': 'fake_dir',
}
scripts_module = "nvidia_tao_tf1.cv.fpenet.scripts"
mocked_mkdir_p = mocker.patch(f'{scripts_module}.train.mkdir_p')
mocked_trainer = MagicMock()
mocked_deserialize = mocker.patch(
f'{scripts_module}.train.nvidia_tao_tf1.core.coreobject.deserialize_tao_object',
return_value=mocked_trainer
)
mocker.patch(
f'{scripts_module}.train.nvidia_tao_tf1.core.distribution.'
'distribution.Distributor.is_master',
return_value=is_master
)
mocker.patch(
f'{scripts_module}.train.yaml.load',
return_value=test_spec
)
args = ['-l', log_level]
if results_dir:
args += ['-r', results_dir]
expected_spec['checkpoint_dir'] = 'results_dir'
args += ['-k', '0']
main(args)
mocked_deserialize.assert_called_once_with(expected_spec)
mocked_trainer.build.assert_called_once()
mocked_trainer.train.assert_called_once()
if is_master:
mocked_mkdir_p.assert_called_once()
mocked_trainer.to_yaml.assert_called_once()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/scripts/test_train.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FpeNet Scripts."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/scripts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Export trained FPENet Keras model to UFF format.'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import os
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.common.utilities.path_processing import mkdir_p
from nvidia_tao_tf1.cv.fpenet.exporter.fpenet_exporter import FpeNetExporter
DEFAULT_MAX_WORKSPACE_SIZE = 1 * (1 << 30)
DEFAULT_MAX_BATCH_SIZE = 1
def build_command_line_parser(parser=None):
'''Build the command line parser using argparse.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism.
Returns:
parser
'''
if parser is None:
parser = argparse.ArgumentParser(prog='export', description='Encrypted UFF exporter.')
parser.add_argument(
'-m',
'--model_filename',
type=str,
required=True,
help='Absolute path to the model file to export.'
)
parser.add_argument(
'-k',
'--key',
required=False,
type=str,
default="",
help='Key to save or load a model.')
parser.add_argument(
'-o',
'--out_file',
required=False,
type=str,
default=None,
help='Path to the output .etlt file.')
parser.add_argument(
'-t',
'--target_opset',
required=False,
type=int,
default=10,
help='Target opset version to use for onnx conversion.')
parser.add_argument(
'--cal_data_file',
default='',
type=str,
help='Tensorfile to run calibration for int8 optimization.')
parser.add_argument(
'--cal_image_dir',
default='',
type=str,
help='Directory of images to run int8 calibration if data file is unavailable')
parser.add_argument(
'--data_type',
type=str,
default='fp32',
help='Data type for the TensorRT export.',
choices=['fp32', 'fp16', 'int8'])
parser.add_argument(
'-s',
'--strict_type_constraints',
action='store_true',
default=False,
help='Apply TensorRT strict_type_constraints or not for INT8 mode.')
parser.add_argument(
'--cal_cache_file',
default='./cal.bin',
type=str,
help='Calibration cache file to write to.')
parser.add_argument(
'--batches',
type=int,
default=10,
help='Number of batches to calibrate over.')
parser.add_argument(
'--max_workspace_size',
type=int,
default=DEFAULT_MAX_WORKSPACE_SIZE,
help='Max size of workspace to be set for TensorRT engine builder.')
parser.add_argument(
'--max_batch_size',
type=int,
default=DEFAULT_MAX_BATCH_SIZE,
help='Max batch size for TensorRT engine builder.')
parser.add_argument(
'--batch_size',
type=int,
default=1,
help='Number of images per batch.')
parser.add_argument(
'--engine_file',
type=str,
default=None,
help='Path to the exported TRT engine.')
parser.add_argument(
'--static_batch_size',
type=int,
default=-1,
help='Set a static batch size for exported etlt model. \
Default is -1(dynamic batch size).')
parser.add_argument(
'--opt_batch_size',
type=int,
default=1,
help="Optimium batch size to use for int8 calibration.")
parser.add_argument(
'-d',
'--input_dims',
type=str,
default='1,80,80',
help='Input dims: channels_first(CHW) or channels_last (HWC).')
parser.add_argument(
'-b',
'--backend',
choices=['uff', 'tfonnx', 'onnx'],
type=str,
default='tfonnx',
help='Model type to export to.')
parser.add_argument(
'-ll',
'--log_level',
type=str,
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
default='INFO',
help='Set logging level.'
)
parser.add_argument(
'-r',
'--results_dir',
type=str,
default=None,
help='Path to a folder where experiment outputs will be created, or specify in spec file.')
return parser
def parse_command_line(args=None):
'''Simple function to parse command line arguments.
Args:
args (list): List of strings used as command line arguments.
If None, sys.argv is used.
Returns:
args_parsed: Parsed arguments.
'''
parser = build_command_line_parser()
args_parsed = parser.parse_args(args)
return args_parsed
def run_export(args=None):
'''Wrapper to run export of tlt models.
Args:
args (dict): Dictionary of parsed arguments to run export.
Returns:
None.
'''
results_dir = args.results_dir
if results_dir:
mkdir_p(results_dir)
# Writing out status file for TAO.
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True,
verbosity=1,
append=True
)
)
status_logging.get_status_logger().write(
data=None,
status_level=status_logging.Status.STARTED,
message="Starting export."
)
# Parsing command line arguments.
model_name = args.model_filename
key = args.key
output_filename = args.out_file
backend = args.backend
input_dims = [int(i) for i in args.input_dims.split(',')]
assert len(input_dims) == 3, "Input dims need to have three values."
target_opset = args.target_opset
log_level = args.log_level
# Calibrator configuration.
cal_cache_file = args.cal_cache_file
cal_image_dir = args.cal_image_dir
cal_data_file = args.cal_data_file
batch_size = args.batch_size
n_batches = args.batches
data_type = args.data_type
strict_type = args.strict_type_constraints
engine_file_name = args.engine_file
max_workspace_size = args.max_workspace_size
max_batch_size = args.max_batch_size
static_batch_size = args.static_batch_size
opt_batch_size = args.opt_batch_size
save_engine = False
if engine_file_name is not None:
save_engine = True
# Build logger file.
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s')
logger = logging.getLogger(__name__)
logger.setLevel(log_level)
logger.warning('Please verify the input dimension and input name before using this code!')
# Set default output filename if the filename
# isn't provided over the command line.
output_extension = backend
if backend in ["onnx", "tfonnx"]:
output_extension = "onnx"
if output_filename is None:
split_name = os.path.splitext(model_name)[0]
output_filename = f"{split_name}.{output_extension}"
if not output_filename.endswith(output_extension):
output_filename = f"{output_filename}.{output_extension}"
logger.info("Saving exported model to {}".format(output_filename))
# Make an output directory if necessary.
output_root = os.path.dirname(os.path.realpath(output_filename))
if not os.path.exists(output_root):
os.makedirs(output_root)
# Build exporter instance
exporter = FpeNetExporter(model_name,
key,
backend=backend,
data_type=data_type,
strict_type=strict_type)
# Export the model to etlt file and build the TRT engine.
exporter.export(input_dims,
output_filename,
backend,
data_file_name=cal_data_file,
calibration_cache=os.path.realpath(cal_cache_file),
n_batches=n_batches,
batch_size=batch_size,
target_opset=target_opset,
save_engine=save_engine,
engine_file_name=engine_file_name,
calibration_images_dir=cal_image_dir,
max_batch_size=max_batch_size,
static_batch_size=static_batch_size,
opt_batch_size=opt_batch_size,
max_workspace_size=max_workspace_size)
logger.info('Model exported at : %s' % output_filename)
def main(cl_args=None):
"""Run exporting."""
try:
args = parse_command_line(cl_args)
run_export(args)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Export finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Export was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == '__main__':
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/scripts/export.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for FpeNet evaluate_model script."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from mock import MagicMock
import pytest
from nvidia_tao_tf1.cv.fpenet.scripts.evaluate import main
EVALUATOR_SCRIPT = "nvidia_tao_tf1.cv.fpenet.scripts.evaluate"
@pytest.mark.parametrize("exp_spec", ['default.yaml', None])
@pytest.mark.parametrize("eval_type", ['kpi_testing'])
@pytest.mark.parametrize("log_level", ['INFO', 'ERROR'])
def test_evaluate_script_main(mocker, tmpdir, log_level, eval_type, exp_spec):
"""Test GazeNet evaluate script main function."""
test_spec = expected_spec = {
'config': {
'checkpoint_dir': 'original_path',
}
}
mocked_trainer = MagicMock()
mocked_deserialize = mocker.patch(
f'{EVALUATOR_SCRIPT}.nvidia_tao_tf1.core.coreobject.deserialize_tao_object',
return_value=mocked_trainer
)
mocker.patch(
f'{EVALUATOR_SCRIPT}.yaml.load',
return_value=test_spec
)
mocked_config_path = mocker.patch(
f'{EVALUATOR_SCRIPT}.os.path.isfile',
return_value=True
)
# Patching the os.path.exists call
mocker.patch(
f'{EVALUATOR_SCRIPT}.os.path.exists',
return_value=True
)
mocker.patch(
f'{EVALUATOR_SCRIPT}.open',
return_value=open('nvidia_tao_tf1/cv/fpenet/experiment_specs/default.yaml', 'r')
)
model_path = os.path.join(str(tmpdir), 'fake_path')
results_dir = os.path.join(str(tmpdir), 'results')
if exp_spec is not None:
yaml_path = os.path.join(model_path, exp_spec)
args = ['-l', log_level, '-m', model_path, '-type', eval_type, '-e', exp_spec, '-k', '0',
'-r', results_dir]
else:
# Default case looks for 'experiment_spec.yaml'.
yaml_path = os.path.join(model_path, 'experiment_spec.yaml')
args = ['-l', log_level, '-m', model_path, '-type', eval_type, '-k', '0',
'-r', results_dir]
main(args)
assert mocked_config_path.call_count == 2
mocked_deserialize.assert_called_once_with(expected_spec)
mocked_trainer.build.assert_called_once_with(
eval_mode=eval_type,
eval_model_path=model_path
)
mocked_trainer.run_testing.assert_called_once()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/scripts/test_evaluate.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FPE DataIO pipeline script which generates tfrecords."""
import argparse
import os
from yaml import load
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.common.utilities.path_processing import mkdir_p
from nvidia_tao_tf1.cv.fpenet.dataio.generate_dataset import tfrecord_manager
def build_command_line_parser(parser=None):
"""Build the command line parser using argparse.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(
prog='convert_datasets',
description='Convert FpeNet ground truth jsons to tfrecords.'
)
parser.add_argument('-e', '--experiment_spec_file',
type=str,
required=True,
help='Config file with dataio inputs.')
parser.add_argument(
'-r',
'--results_dir',
type=str,
default=None,
help='Path to a folder where experiment outputs will be created, or specify in spec file.')
return parser
def parse_command_line(args=None):
"""Simple function to parse command line arguments.
Args:
args (list): List of strings used as command line arguments.
Returns:
args_parsed: Parsed arguments.
"""
parser = build_command_line_parser()
args_parsed = parser.parse_args(args)
return args_parsed
def main(cl_args=None):
'''Main function to parse use arguments and call tfrecord manager.'''
try:
args = parse_command_line(cl_args)
results_dir = args.results_dir
if results_dir:
mkdir_p(results_dir)
# Writing out status file for TAO.
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True,
verbosity=1,
append=True
)
)
status_logging.get_status_logger().write(
data=None,
status_level=status_logging.Status.STARTED,
message="Starting dataset convert."
)
config_path = args.experiment_spec_file
with open(config_path, 'r') as f:
args = load(f)
tfrecord_manager(args)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Dataset convert finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Dataset convert was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == '__main__':
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/scripts/dataset_convert.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FpeNet training script."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import os
formatter = logging.Formatter(
"%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s") # noqa
handler = logging.StreamHandler() # noqa
handler.setFormatter(formatter) # noqa
logging.basicConfig(
level='INFO'
) # noqa
# Replace existing handlers with ours to avoid duplicate messages.
logging.getLogger().handlers = [] # noqa
logging.getLogger().addHandler(handler) # noqa
import tensorflow as tf
import yaml
import nvidia_tao_tf1.core as tao_core
from nvidia_tao_tf1.core import distribution
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.common.utilities.path_processing import mkdir_p
import nvidia_tao_tf1.cv.fpenet # noqa # pylint: disable=unused-import
logger = logging.getLogger(__name__)
def build_command_line_parser(parser=None):
"""Build the command line parser using argparse.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism.
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(prog='train', description='Run FpeNet training.')
parser.add_argument(
'-e',
'--experiment_spec_file',
type=str,
default='nvidia_tao_tf1/cv/fpenet/experiment_specs/default.yaml',
help='Path to a single file containing a complete experiment spec.')
parser.add_argument(
'-r',
'--results_dir',
type=str,
default=None,
help='Path to a folder where experiment outputs will be created, or specify in spec file.')
parser.add_argument(
'-ll',
'--log_level',
type=str,
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
default='INFO',
help='Set logging level.')
parser.add_argument(
'-k',
'--key',
default="",
type=str,
required=False,
help='The key to load pretrained weights and save intermediate snapshopts and final model.'
)
return parser
def parse_command_line(args=None):
"""Simple function to parse command line arguments.
Args:
args (list): List of strings used as command line arguments.
If None, sys.argv is used.
Returns:
args_parsed: Parsed arguments.
"""
parser = build_command_line_parser()
args_parsed = parser.parse_args(args)
return args_parsed
def main(cl_args=None):
"""Launch the training process."""
tf.logging.set_verbosity(tf.logging.INFO)
args = parse_command_line(cl_args)
config_path = args.experiment_spec_file
results_dir = args.results_dir
key = args.key
# Load experiment spec.
if not os.path.isfile(config_path):
raise ValueError("Experiment spec file cannot be found.")
with open(config_path, 'r') as yaml_file:
spec = yaml.load(yaml_file.read())
# Build the model saving directory.
if results_dir is not None:
spec['checkpoint_dir'] = results_dir
elif spec['checkpoint_dir']:
results_dir = spec['checkpoint_dir']
else:
raise ValueError('Checkpoint directory not specified, please specify it through -r or'
'through the checkpoint_dir field in your model config.')
mkdir_p(results_dir)
# Add key
if key is not None:
spec['key'] = key
# Use Horovod distributor for multi-gpu training.
distribution.set_distributor(distribution.HorovodDistributor())
is_master = distribution.get_distributor().is_master()
# Set logger level
if is_master:
logger.setLevel(args.log_level)
# Writing out status file for TAO.
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=is_master,
verbosity=1,
append=True
)
)
# Build trainer from spec.
trainer = tao_core.coreobject.deserialize_tao_object(spec)
trainer.build()
logger.info('Build trainer finished. Starting training...')
status_logging.get_status_logger().write(
data=None,
status_level=status_logging.Status.STARTED,
message="Starting fpenet training."
)
# Save the training spec in the results directory.
if distribution.get_distributor().is_master():
trainer.to_yaml(os.path.join(results_dir, 'experiment_spec.yaml'))
trainer.train()
status_logging.get_status_logger().write(
data=None,
status_level=status_logging.Status.SUCCESS,
message="Fpenet training finished successfully."
)
if __name__ == "__main__":
try:
main()
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Training finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Training was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/scripts/train.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Export trained FPENet Keras model to UFF format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import graphsurgeon as gs
import keras
import uff
from nvidia_tao_tf1.core.export._uff import keras_to_pb
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.common.utilities.path_processing import mkdir_p
from nvidia_tao_tf1.cv.fpenet.models.custom.softargmax import Softargmax
def parse_command_line(arg_list):
"""
Parse command-line flags passed to the prune script.
Args:
arg_list: List of strings used as command-line arguments. If None,
sys.argv is used by default.
Returns:
Namespace with members for all parsed arguments.
"""
parser = argparse.ArgumentParser(prog='export', description='Export a FPENet model.')
parser.add_argument(
'-m',
'--model_filename',
type=str,
required=True,
help="Absolute path to the model file to export."
)
parser.add_argument(
'-o',
'--output_filename',
type=str,
required=True,
help='Name of the exported model')
parser.add_argument(
'-r',
'--results_dir',
type=str,
default=None,
help='Path to a folder where experiment outputs will be created, or specify in spec file.')
args_parsed = parser.parse_args(arg_list)
return args_parsed
def main(cl_args=None):
"""Run exporting."""
args_parsed = parse_command_line(cl_args)
results_dir = args_parsed.results_dir
if results_dir:
mkdir_p(results_dir)
# Writing out status file for TAO.
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True,
verbosity=1,
append=True
)
)
status_logging.get_status_logger().write(
data=None,
status_level=status_logging.Status.STARTED,
message="Starting export."
)
model_name = args_parsed.model_filename
output_model_name = args_parsed.output_filename
# Load Keras model from file.
model = keras.models.load_model(model_name,
custom_objects={'Softargmax': Softargmax},
compile=False)
model.load_weights(model_name)
model.summary()
output_node_names = ['softargmax/strided_slice', 'softargmax/strided_slice_1']
# Create froxen graph as .pb file.
output_pb_filename = model_name + '.pb'
_, out_tensor_names, __ = keras_to_pb(model,
output_pb_filename,
output_node_names=output_node_names,
custom_objects={'Softargmax': Softargmax})
# Add custom plugin nodes.
SoftargmaxLayer = gs.create_plugin_node(name='softargmax', op='Softargmax')
UpSamplingLayer_1 = gs.create_plugin_node(name='up_sampling2d_1', op='UpsamplingNearest')
UpSamplingLayer_2 = gs.create_plugin_node(name='up_sampling2d_2', op='UpsamplingNearest')
UpSamplingLayer_3 = gs.create_plugin_node(name='up_sampling2d_3', op='UpsamplingNearest')
UpSamplingLayer_4 = gs.create_plugin_node(name='up_sampling2d_4', op='UpsamplingNearest')
namespace_plugin_map = {'softargmax' : SoftargmaxLayer,
'up_sampling2d_1' : UpSamplingLayer_1,
'up_sampling2d_2' : UpSamplingLayer_2,
'up_sampling2d_3' : UpSamplingLayer_3,
'up_sampling2d_4' : UpSamplingLayer_4}
dynamic_graph = gs.DynamicGraph(output_pb_filename)
dynamic_graph.collapse_namespaces(namespace_plugin_map)
out_tensor_names = ['softargmax']
# Convert to UFF from dynamic graph.
uff.from_tensorflow(dynamic_graph.as_graph_def(),
out_tensor_names,
output_filename=os.path.join(results_dir, output_model_name),
list_nodes=False,
debug_mode=False,
return_graph_info=False)
if __name__ == "__main__":
try:
main()
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Export finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Export was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/scripts/export_keras.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FpeNet inference script."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import os
import yaml
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.common.utilities.path_processing import mkdir_p
from nvidia_tao_tf1.cv.fpenet.inferencer.fpenet_inferencer import FpeNetInferencer
logger = logging.getLogger(__name__)
def build_command_line_parser(parser=None):
"""Build the command line parser using argparse.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism.
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(prog='infer', description='Run FpeNet inference.')
parser.add_argument("-e",
"--experiment_spec",
default=None,
type=str,
help="Path to inferencer spec file.",
required=True)
parser.add_argument('-i',
'--input_data_json_path',
help='The json file with paths to input images and ground truth face box.',
type=str,
default=None,
required=True)
parser.add_argument('-m',
'--model_path',
help='The trained model path to infer images with.',
type=str,
default=None,
required=True)
parser.add_argument("-k",
"--key",
default="",
help="Key to load the model.",
type=str,
required=False)
parser.add_argument('-o',
'--output_folder',
help='The directory to the output images and predictions.',
type=str,
required=True,
default=None)
parser.add_argument('-r',
'--image_root_path',
help='parent directory (if any) for the image paths in json.',
type=str,
required=False,
default='')
return parser
def parse_command_line_args(cl_args=None):
"""Parser command line arguments to the inferencer.
Args:
cl_args(sys.argv[1:]): Arg from the command line.
Returns:
args: Parsed arguments using argparse.
"""
parser = build_command_line_parser(parser=None)
args = parser.parse_args(cl_args)
return args
def main(args=None):
"""Wrapper function for running inference on a single image or collection of images.
Args:
Dictionary arguments containing parameters defined by command line parameters
"""
arguments = parse_command_line_args(args)
if arguments.output_folder:
mkdir_p(arguments.output_folder)
# Writing out status file for TAO.
status_file = os.path.join(arguments.output_folder, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True,
verbosity=1,
append=True
)
)
status_logging.get_status_logger().write(
data=None,
status_level=status_logging.Status.STARTED,
message="Starting inference."
)
# Load experiment spec.
config_path = arguments.experiment_spec
if not os.path.isfile(config_path):
raise ValueError("Experiment spec file cannot be found.")
with open(config_path, 'r') as yaml_file:
spec = yaml.load(yaml_file.read())
inferencer = FpeNetInferencer(experiment_spec=spec,
data_path=arguments.input_data_json_path,
output_folder=arguments.output_folder,
model_path=arguments.model_path,
image_root_path=arguments.image_root_path,
key=arguments.key)
# load pre-trained model
inferencer.load_model()
# load test data and run inference
inferencer.infer_model()
# save inference results
inferencer.save_results()
if __name__ == "__main__":
try:
main()
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Inference finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Inference was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/scripts/inference.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FpeNet model evaluation script."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import os
import tensorflow as tf
import yaml
import nvidia_tao_tf1.core as tao_core
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.common.utilities.path_processing import mkdir_p
import nvidia_tao_tf1.cv.fpenet # noqa # pylint: disable=unused-import
def build_command_line_parser(parser=None):
"""Build the command line parser using argparse.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(
prog='evaluate', description='Run FpeNet evaluation.')
parser.add_argument(
'-type',
'--eval_type',
type=str,
choices=['kpi_testing'],
default='kpi_testing',
help='Type of evaluation to run.')
parser.add_argument(
'-m',
'--model_folder_path',
type=str,
required=True,
help='Path to the folder where the model to be evaluated'
'is in, or the model file itself.')
parser.add_argument(
'-e',
'--experiment_spec_filename',
type=str,
default='experiment_spec.yaml',
help='Filename of yaml experiment spec to be used for evaluation.')
parser.add_argument(
'-ll',
'--log_level',
type=str,
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
default='INFO',
help='Set logging level.')
parser.add_argument(
'-k',
'--key',
default="",
type=str,
required=False,
help='The key to load pretrained weights and save intermediate snapshopts and final model.'
)
parser.add_argument(
'-r',
'--results_dir',
type=str,
default=None,
help='Path to a folder where experiment outputs will be created, or specify in spec file.')
return parser
def parse_command_line(args=None):
"""Simple function to parse command line arguments.
Args:
args (list): List of strings used as command line arguments.
If None, sys.argv is used.
Returns:
args_parsed: Parsed arguments.
"""
parser = build_command_line_parser()
args_parsed = parser.parse_args(args)
return args_parsed
def main(args=None):
"""Launch the model evaluation process."""
tf.logging.set_verbosity(tf.logging.INFO)
args = parse_command_line(args)
results_dir = args.results_dir
if results_dir:
mkdir_p(results_dir)
# Writing out status file for TAO.
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True,
verbosity=1,
append=True
)
)
status_logging.get_status_logger().write(
data=None,
status_level=status_logging.Status.STARTED,
message="Starting evaluation."
)
model_folder_path = args.model_folder_path
experiment_spec = args.experiment_spec_filename
eval_type = args.eval_type
key = args.key
# Build logger file.
logging.basicConfig(
format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s')
logger = logging.getLogger(__name__)
logger_tf = logging.getLogger('tensorflow')
logger.setLevel(args.log_level)
logger_tf.setLevel(args.log_level)
# Load experiment spec.
config_path = os.path.join(model_folder_path, experiment_spec)
print('config_path: ', config_path)
if not os.path.isfile(config_path):
raise ValueError("Experiment spec file cannot be found.")
with open(config_path, 'r') as yaml_file:
spec = yaml.load(yaml_file.read())
# Build the model saving directory.
if not os.path.exists(model_folder_path):
raise FileNotFoundError(f"Model path doesn't exist at {model_folder_path}")
spec['checkpoint_dir'] = model_folder_path
trainer_build_kwargs = {}
if os.path.isfile(model_folder_path):
spec['checkpoint_dir'] = os.path.dirname(model_folder_path)
trainer_build_kwargs["eval_model_path"] = model_folder_path
# Add key
if key is not None:
spec['key'] = key
# Build trainer with on evaluation mode.
trainer = tao_core.coreobject.deserialize_tao_object(spec)
trainer.build(eval_mode=eval_type, **trainer_build_kwargs)
logger.info("Trainer built.")
logger.info("Starting evaluation")
trainer.run_testing()
if __name__ == "__main__":
try:
main()
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Evaluation finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Evaluation was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/scripts/evaluate.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FpeNet Entrypoint Definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/entrypoint/__init__.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""TLT command line wrapper to invoke CLI scripts."""
import sys
from nvidia_tao_tf1.cv.common.entrypoint.entrypoint import launch_job
import nvidia_tao_tf1.cv.fpenet.scripts
def main():
"""Function to launch the job."""
launch_job(nvidia_tao_tf1.cv.fpenet.scripts, "fpenet", sys.argv[1:])
if __name__ == "__main__":
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/entrypoint/fpenet.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for FpeNet Evaluator Class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import keras.backend as K
import tensorflow as tf
from nvidia_tao_tf1.cv.fpenet.evaluation.fpenet_evaluator import FpeNetEvaluator
from nvidia_tao_tf1.cv.fpenet.losses.fpenet_loss import FpeLoss
from nvidia_tao_tf1.cv.fpenet.models.fpenet_basemodel import FpeNetBaseModel
class _synthetic_dataloader():
"""Create synthetic dataloader for test."""
def __init__(self, phase='validation'):
self.phase = phase
self.batch_size = 4
self.image_width = 80
self.images = tf.fill((4, 1, 80, 80), 255.0)
def __call__(self, repeat=True, phase='validation'):
'''
Synthetic dataloader call.
Args:
repeat (bool): Whether the dataset can be looped over multiple times or only once.
phase (str): Evaluation phase. Options- 'validation' or 'kpi_testing'.
Returns:
images (Tensor): Image tensors.
label (Tensor): Ground truth keypoints tensor.
num_samples (int): Number of samples.
masking_occ_info (Tensor): Keypoints masking info.
'''
images = self.images
label = (tf.zeros([4, 80, 2], dtype='float32'), tf.zeros([4, 80], dtype='float32'))
masking_occ_info = tf.zeros([4], dtype='float32')
face_bbox = tf.stack([tf.zeros([4, 4], dtype='float32')])
image_names = tf.stack(['tmp'])
num_samples = 4
if phase == 'kpi_testing':
return images, label, num_samples, masking_occ_info, face_bbox, image_names
return images, label, num_samples, masking_occ_info
def _create_evaluator(phase, checkpoint_dir):
"""
Create evaluator object.
Args:
phase (str): Evaluation phase. Options- 'validation' or 'kpi_testing'.
checkpoint_dir (str): Checkpoint directory path with model.
Returns:
evaluator (FpeNetEvaluator): Instance of FpeNetEvaluator to evaluate with.
"""
dataloader = _synthetic_dataloader(phase=phase)
model_parameters = {
'beta': 0.01,
'dropout_rate': 0.5,
'freeze_Convlayer': None,
'pretrained_model_path': None,
'regularizer_type': 'l2',
'regularizer_weight': 1.0e-05,
'type': 'FpeNet_base',
'use_upsampling_layer': False,
'visualization_parameters': None,
}
model = FpeNetBaseModel(model_parameters)
model.build(input_images=dataloader.images)
# save temporary files for test case purpose since doing only eval
model.save_model(os.path.join(checkpoint_dir, 'model.epoch-1.hdf5'), 'test')
open(os.path.join(checkpoint_dir, 'checkpoint'), 'a').close()
loss = FpeLoss('l1')
evaluator = FpeNetEvaluator(model=model,
dataloader=dataloader,
save_dir=checkpoint_dir,
mode='validation',
visualizer=None,
enable_viz=False,
num_keypoints=80,
loss=loss,
steps_per_epoch=1,
model_path=checkpoint_dir)
return evaluator
def test_evaluator_validation(tmpdir):
"""
Test whether evaluator instantiates correctly.
Args:
tmpdir (str): Temporary path for checkpoint directory.
Returns:
None
"""
# Test: 'validation' phase
K.clear_session()
evaluator = _create_evaluator('validation', str(tmpdir))
evaluator.build()
evaluation_cost = evaluator.evaluate(global_step=1)
assert isinstance(evaluation_cost, float)
def test_evaluator_kpi(tmpdir):
"""
Test whether evaluator instantiates correctly.
Args:
tmpdir (str): Temporary path for checkpoint directory.
Returns:
None
"""
# Test: 'kpi_testing' phase
K.clear_session()
evaluator = _create_evaluator('kpi_testing', str(tmpdir))
evaluator.build()
evaluation_cost = evaluator.evaluate(global_step=1)
assert isinstance(evaluation_cost, float)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/evaluation/test_fpenet_evaluator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License."""
"""Defines evaluation functions and classes for Fpenet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nvidia_tao_tf1.cv.fpenet.evaluation.fpenet_evaluator import FpeNetEvaluator
__all__ = ('FpeNetEvaluator',)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/evaluation/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A class to evaluate an FpeNet model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import json
import logging
import os
from time import gmtime, strftime
import keras
from keras import backend as K
import numpy as np
import tensorflow as tf
from nvidia_tao_tf1.core.coreobject import TAOObject
from nvidia_tao_tf1.core.coreobject import save_args
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.common.utilities.model_file_processing import save_best_model
from nvidia_tao_tf1.cv.common.utilities.tlt_utils import model_io
from nvidia_tao_tf1.cv.fpenet.models.custom.softargmax import Softargmax
logger = logging.getLogger(__name__)
EXTENSIONS = ["hdf5", "tlt"]
class FpeNetEvaluator(TAOObject):
"""Class for running evaluation on a trained FpeNet model."""
@save_args
def __init__(self,
model,
dataloader,
save_dir,
mode='validation',
visualizer=None,
enable_viz=False,
num_keypoints=80,
loss=None,
model_path=None,
key=None,
steps_per_epoch=None,
**kwargs):
"""Initialization for evaluator.
Args:
model (nvidia_tao_tf1.blocks.Model): A trained FpeNet model for evaluation.
dataloader (nvidia_tao_tf1.blocks.Dataloader): Instance dataloader to load
evaluation images and masks.
save_dir (str): The full path where all the model files are saved.
mode (str): 'validation' or 'kpi_testing'.
visualizer (driveix.fpenet.FpeNetVisualizer): Instance of Fpenet visualizer.
enable_viz (bool): Flag to enable evaluation visuzlization.
num_keypoints (int): Number of facial keypoints.
loss (driveix.fpenet.FpeLoss): Instance of Fpenet Loss.
model_path (str): The model path to be evaluated.
key (str): Key to load tlt file.
"""
super(FpeNetEvaluator, self).__init__(**kwargs)
self.model = model
self.dataloader = dataloader
self.save_dir = save_dir
self.model_path = model_path
self.visualizer = visualizer
self.enable_viz = enable_viz
self.loss = loss
self.mode = mode
self.num_keypoints = num_keypoints
self.batch_size = self._steps = self.evaluation_tensors = None
self.crop_size = self.dataloader.image_width
self._key = key
self._steps_per_epoch = steps_per_epoch
def build(self):
"""Load a trained FpeNet model and data for evaluation."""
self.batch_size = self.dataloader.batch_size
if self.mode == 'kpi_testing':
images, ground_truth_labels, num_samples, occ_masking_info, \
face_bbox, image_names = \
self.dataloader(repeat=True,
phase=self.mode)
else:
images, ground_truth_labels, num_samples, occ_masking_info = \
self.dataloader(repeat=True,
phase=self.mode)
self._steps = num_samples // self.batch_size
if not self.model_path:
for extension in EXTENSIONS:
model_file = os.path.join(self.save_dir, f'model.{extension}')
self.model_path = model_file if self.mode != 'validation' \
and os.path.exists(model_file) else None
break
# Get input tensors.
input_tensor = keras.layers.Input(
tensor=images, name='input_face_images')
# Set testing phase.
keras.backend.set_learning_phase(0)
if self.mode == 'validation':
if not hasattr(self.model, 'keras_model'):
raise ValueError(
"The model graph should be built before calling build() \
in validation mode.")
predictions = self.model.keras_model([input_tensor])
elif self.mode == 'kpi_testing':
logger.info('model_path: {}'.format(self.model_path))
if self.model_path is None or not os.path.isfile(self.model_path):
raise ValueError(
"Please provide a valid fpenet file path for evaluation.")
self.model.keras_model = model_io(self.model_path,
enc_key=self._key,
custom_objects={"Softargmax": Softargmax})
predictions = self.model.keras_model([input_tensor])
else:
raise ValueError("Evaluation mode not supported.")
# extract keypoints and confidence from model output
predictions_coord = K.reshape(predictions[0], (self.batch_size, self.num_keypoints, 2))
predictions_conf = K.reshape(predictions[1], (self.batch_size, self.num_keypoints))
# vizualization of images with predicted keypoints overlay
if (self.enable_viz):
self.visualizer.visualize_images(
images, predictions_coord, viz_phase=self.mode)
# compute loss for validation/testing data
evaluation_cost, mouth_cost, eyelid_cost = self.loss(ground_truth_labels[0],
predictions_coord,
ground_truth_labels[1],
occ_masking_info,
num_keypoints=self.num_keypoints)
if self.mode == 'kpi_testing':
self.evaluation_tensors = [predictions_coord,
predictions_conf,
ground_truth_labels[0],
evaluation_cost,
mouth_cost,
eyelid_cost,
ground_truth_labels[1],
face_bbox,
image_names]
else:
self.evaluation_tensors = [predictions_coord,
predictions_conf,
ground_truth_labels[0],
evaluation_cost,
mouth_cost,
eyelid_cost]
# Restores learning phase.
keras.backend.set_learning_phase(1)
def evaluate(self, sess=None, global_step=None):
"""Evaluate a loaded FpeNet model.
Args:
sess (tf.Session): User defined session if exists.
global_step (int): Global step in the graph if doing validation.
Returns:
evaluation_cost (float): An average loss on the evaluation dataset.
"""
if self.evaluation_tensors is None:
raise ValueError("Evaluator must be built before evaluation!")
if sess is None:
sess = keras.backend.get_session()
sess.run(
tf.group(tf.local_variables_initializer(),
tf.tables_initializer(),
*tf.get_collection('iterator_init')))
evaluation_cost = 0.
evaluation_mouth_cost = 0.
evaluation_eyelids_cost = 0.
results, occlusion_gt, image_names, face_bbox, pred_conf = [], [], [], [], []
progress_list = range(self._steps)
for _ in progress_list:
if self.mode == 'kpi_testing':
batch_prediction, batch_prediction_conf, batch_gt, batch_evaluation_cost, \
batch_mouth_cost, batch_eyelids_cost, \
batch_gt_occ, batch_face_bbox, \
batch_image_names = sess.run(self.evaluation_tensors)
else:
batch_prediction, batch_prediction_conf, batch_gt, batch_evaluation_cost, \
batch_mouth_cost, batch_eyelids_cost = \
sess.run(self.evaluation_tensors)
evaluation_cost += batch_evaluation_cost / self._steps
evaluation_mouth_cost += batch_mouth_cost / self._steps
evaluation_eyelids_cost += batch_eyelids_cost / self._steps
# Write ground truth and prediction together for each data point.
for i in range(self.batch_size):
result = np.stack([batch_gt[i], batch_prediction[i]], axis=-1)
results.append(result)
pred_conf.append(batch_prediction_conf[i])
if self.mode == 'kpi_testing':
occlusion_gt.append(batch_gt_occ[i])
face_bbox.append(batch_face_bbox[i])
image_names.append(batch_image_names[i])
if self.mode != 'validation' and results == []:
raise ValueError(
"Need valid 'test_file_name' in experiment spec for evaluation!"
)
if self.mode == 'validation' and self.save_dir is not None:
final_errors, _ = compute_error_keypoints(results)
epoch = int(global_step / self._steps_per_epoch)
save_best_model(
self.save_dir, epoch, evaluation_cost, epoch_based_checkpoint=True,
extension="hdf5"
)
[(mean_err_x, std_err_x), (mean_err_y, std_err_y),
(mean_err_xy, std_err_xy)] = final_errors
with open(os.path.join(self.save_dir, 'validation.log'),
'a+') as f:
cur_time = strftime("%Y-%m-%d %H:%M:%S UTC", gmtime())
f.write(
'{} - global_step {} : {} (total), {} (mouth), {} (eyelids) '
'| Mean errors in px: '.format(
cur_time, global_step, evaluation_cost,
evaluation_mouth_cost, evaluation_eyelids_cost))
f.write(
'err_x: {:.2f} (+/-{:.2f}), err_y: {:.2f} (+/-{:.2f}), err_xy: {:.2f} '
'(+/-{:.2f})\n'.format(mean_err_x, std_err_x, mean_err_y,
std_err_y, mean_err_xy, std_err_xy))
elif self.save_dir is not None:
# Write predictions and gt to json files only in kpi_testing phase
output_filename = os.path.join(self.save_dir, self.mode + '_all_data.json')
print('writing predictions to {}'.format(output_filename))
# Write predictions by masking occluded points for KPI data
write_errors_per_region(results,
pred_conf,
occlusion_gt,
face_bbox,
image_names,
self.save_dir,
self.crop_size,
self.mode)
else:
raise ValueError("Evaluation mode not supported or checkpoint_dir missing.")
logger.info('Validation #{}: {}'.format(global_step, evaluation_cost))
kpi_data = {
"evaluation_cost ": evaluation_cost
}
s_logger = status_logging.get_status_logger()
if isinstance(s_logger, status_logging.StatusLogger):
s_logger.kpi = kpi_data
s_logger.write(
status_level=status_logging.Status.RUNNING,
message="Evaluation metrics generated."
)
return evaluation_cost
def compute_error_keypoints(results):
"""Compute the final keypoints error using ground truth and prediction results.
Args:
result (list): List of ground truth and prediction for each data point.
Returns:
final_errors (list): mean and std error for x, y, xy.
num_samples (int): Number of samples in results.
"""
num_samples = len(results)
results = np.asarray(results)
# Get ground truth and prediction lists.
gt_x = results[:, :, 0, 0]
gt_y = results[:, :, 1, 0]
pred_x = results[:, :, 0, 1]
pred_y = results[:, :, 1, 1]
# Calculate the error.
error_x = np.absolute([a - b for a, b in zip(gt_x, pred_x)])
error_y = np.absolute([a - b for a, b in zip(gt_y, pred_y)])
mean_err_x = np.mean(error_x)
mean_err_y = np.mean(error_y)
std_err_x = np.std(error_x)
std_err_y = np.std(error_y)
error_xy = np.sqrt(np.power(error_x, 2) + np.power(error_y, 2))
mean_err_xy = np.mean(error_xy)
std_err_xy = np.std(error_xy)
final_errors = [(mean_err_x, std_err_x), (mean_err_y, std_err_y)]
final_errors.append((mean_err_xy, std_err_xy))
return final_errors, num_samples
def dump_json(save_dir, mode, image_names, results, pred_conf, occlusion_gt, face_bbox):
"""
Utility function to dump all data into a json.
Args:
save_dir (str): Path to save results.
mode (str): run mode used as post script for file name.
image_names (list): List of image names with paths.
results (list): List of ground truth and prediction for each data point.
pred_conf (list): List of predicted softargmax confidence values.
occlusion_gt (list): List of ground truth occlusion flags for each data point.
face_bbox (list): Ground truth face bounding box in format (x, y, h, w).
Returns:
None
"""
num_samples = len(image_names)
data = []
for i in range(num_samples):
sample = {}
sample['image_path'] = str(image_names[i], 'utf-8')
sample['face_box'] = face_bbox[i].tolist()
results = np.asarray(results)
sample['gt_keypoints'] = results[i, :, :, 0].tolist()
sample['pred_keypoints'] = results[i, :, :, 1].tolist()
sample['gt_occlusions'] = occlusion_gt[i].tolist()
sample['pred_conf'] = pred_conf[i].tolist()
data.append(sample)
with open(os.path.join(save_dir, mode + '_all_data.json'), 'w', encoding='utf-8') as outfile:
json.dump(data, outfile, indent=4)
print('KPI results saved to: {}'.format(os.path.join(save_dir, mode + '_all_data.json')))
def write_errors_per_region(results,
pred_conf,
occlusion_gt,
face_bbox,
image_names,
save_dir,
crop_size,
mode):
"""Compute the errors per facial region and write them to a file.
Args:
results (list): List of ground truth and prediction for each data point.
pred_conf (list): List of predicted softargmax confidence values.
occlusion_gt (list): List of ground truth occlusion flags for each data point.
face_bbox (list): Ground truth face bounding box in format (x, y, h, w).
image_names (list): List of image names with paths.
save_dir (string): Path to save results.
crop_size (int): Face crop size
mode (string): Run mode used as post script for file name.
"""
# dump results as json for offline processing/analysis
dump_json(save_dir, mode, image_names, results, pred_conf, occlusion_gt, face_bbox)
num_images = len(results)
results = np.asarray(results)
occlusion_gt = np.asarray(occlusion_gt)
face_bbox = np.asarray(face_bbox)
nkeypoints = results.shape[1]
# face crop size for error normalization
face_bbox_crop_size = float(crop_size)
# Various face region keypoints for region based error.
# ordering of points listed here-
# https://docs.google.com/document/d/13q8NciZtGyx5TgIgELkCbXGfE7PstKZpI3cENBGWkVw/edit#
if nkeypoints in [68, 80]:
region_idx = {'All': range(0, nkeypoints),
'Eyes': range(36, 48),
'Nose': range(27, 36),
'Mouth': range(48, 68),
'Eyebrows': range(17, 27),
'Chin': range(0, 17),
'HP': [8, 17, 21, 22, 26, 31, 35, 36, 39, 42, 45, 48, 54, 57],
'Pupil': range(68, 76),
'Ears': range(76, 80)}
regions = ['All', 'Eyes', 'Nose', 'Mouth', 'Eyebrows', 'Chin', 'HP', 'Pupil', 'Ears']
if nkeypoints == 68:
regions = regions[:-2]
else:
region_idx = {'All': range(0, nkeypoints)}
regions = ['All']
# normalize GT and predictions with face bbox size
results = results*(face_bbox[:, 2].reshape(-1, 1, 1, 1)/face_bbox_crop_size)
# mask all GT occluded points
results_occ_masked = np.multiply(results, occlusion_gt.reshape([num_images, nkeypoints, 1, 1]))
# compute error per point
points_error = []
points_error_all = []
for point in range(nkeypoints):
# only get the samples for a point which are GT not occluded
results_non_occ = [results_occ_masked[x, point:point+1, :, :] for x in range(num_images)
if occlusion_gt[x, point] != 0.0]
results_all = [results[x, point:point+1, :, :] for x in range(num_images)]
# get the point error non occluded
if len(results_non_occ) > 0:
point_error = compute_error_keypoints(results_non_occ)
else:
point_error = [(0.0, 0.0), (0.0, 0.0), (0.0, 0.0)], -1
points_error.append(point_error)
# get the point error all points
point_error_all = compute_error_keypoints(results_all)
points_error_all.append(point_error_all)
# save error for all points
output_filename = os.path.join(save_dir, mode + '_error_per_point.csv')
with open(output_filename, 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(['Error per point',
'mean_pxl_err_xy_all',
'mean_pxl_err_xy_non_occ',
'num_samples_non_occluded'])
for point in range(nkeypoints):
# get error per point
results_point = points_error[point][0][-1][0]
results_point_all = points_error_all[point][0][-1][0]
writer.writerow([str(point),
str(results_point_all),
str(results_point),
str(points_error[point][-1])])
# save points for all regions
output_filename = os.path.join(save_dir, mode + '_error_per_region.csv')
with open(output_filename, 'w', newline="") as f:
writer = csv.writer(f)
writer.writerow(['Error per region',
'mean_region_err_xy_all',
'mean_region_err_xy_non_occ'])
for region in regions:
# get error per region
results_region = [points_error[x][0][-1][0] for x in region_idx[region]]
region_error = sum(results_region)/len(results_region)
results_region_all = [points_error_all[x][0][-1][0] for x in region_idx[region]]
region_error_all = sum(results_region_all)/len(results_region_all)
writer.writerow([region,
str(region_error_all),
str(region_error)])
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/evaluation/fpenet_evaluator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License."""
"""FpeNet Export definitions."""
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/exporter/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FpeNet exporter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import logging
import os
from shutil import copyfile
import keras
try:
import tensorrt # noqa pylint: disable=W0611 pylint: disable=W0611
except Exception as e:
logger = logging.getLogger(__name__)
logger.warning(
"Failed to import TensorRT package, exporting TLT to a TensorRT engine "
"will not be available."
)
import numpy as np
import tensorflow as tf
try:
from nvidia_tao_tf1.core.export._tensorrt import Engine, ONNXEngineBuilder, UFFEngineBuilder
except Exception as e:
logger = logging.getLogger(__name__)
logger.warning(
"Failed to import TensorRT package, exporting TLT to a TensorRT engine "
"will not be available."
)
from nvidia_tao_tf1.cv.common.utilities.tlt_utils import change_model_batch_size, load_model
from nvidia_tao_tf1.cv.core.export.base_exporter import BaseExporter
from nvidia_tao_tf1.cv.fpenet.models.custom.softargmax import Softargmax
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level='INFO')
class FpeNetExporter(BaseExporter):
"""Exporter class to export a trained FpeNet model."""
def __init__(self,
model_path=None,
key=None,
data_type="fp32",
strict_type=False,
backend="tfonnx",
data_format="channels_first"):
"""Instantiate the FpeNet exporter to export etlt model.
Args:
model_path(str): Path to the FpeNet model file.
key (str): Key to decode the model.
data_type (str): Backend data-type for the optimized TensorRT engine.
strict_type (bool): Apply TensorRT strict_type_constraints or not for INT8 mode.
backend (str): Type of intermediate backend parser to be instantiated.
"""
super(FpeNetExporter, self).__init__(model_path=model_path,
key=key,
data_type=data_type,
strict_type=strict_type,
backend=backend,
data_format=data_format)
# Set keras backend format
keras.backend.set_image_data_format(data_format)
# Reformat preprocessing params to use with preprocessing block in
# BaseExporter
self.preprocessing_params = self._reformat_data_preprocessing_parameters()
def _reformat_data_preprocessing_parameters(self, normalization_params=None):
"""Reformat normalization params to be consumed by pre-processing block.
Args:
normalization_params (dict): Normalization params used for training.
Returns:
preprocessing_params (dict): Preprocessing parameters including mean, scale
and flip_channel keys.
"""
if normalization_params is None:
logger.warning("Using default normalization params!")
means = [0.0]
scale = [1.0]
else:
means = normalization_params['image_offset']
scale = normalization_params['image_scale']
# Reformat as expected by preprocessing funtion.
means = np.array(means) * np.array(scale)
scale = 1.0 / np.array(scale)
# Network is trained in RGB format
flip_channel = False
preprocessing_params = {"scale": scale,
"means": means,
"flip_channel": flip_channel}
return preprocessing_params
def generate_exported_model(self, output_filename, target_opset=None):
"""Function to export model to etlt.
Args:
output_filename (str): Output .etlt filename
target_opset (int): Target opset version to use for onnx conversion.
Returns:
output_onnx_filename (str): Temporary unencrypted file
in_tensor_names (list): List of input tensor names
out_tensor_names (list): List of output tensor names
"""
custom_objects = {'Softargmax': Softargmax}
# Load Keras model from file.
keras.backend.set_learning_phase(0)
model = load_model(self.model_path,
custom_objects=custom_objects,
key=self.key)
# model.summary() # Disable for TLT release
# convert the model batch to 1
input_dims = {'input_face_images': (None, 1, 80, 80)}
new_model = change_model_batch_size(model,
input_dims,
logger,
custom_objects=custom_objects)
output_node_names = ['softargmax/strided_slice', 'softargmax/strided_slice_1']
_, in_tensor_names, out_tensor_names = self.save_exported_file(
new_model,
output_filename,
output_node_names=output_node_names,
custom_objects=custom_objects,
target_opset=target_opset,
delete_tmp_file=False
)
# Trigger garbage collector to clear memory of the deleted loaded model
del model
tf.reset_default_graph()
gc.collect()
return output_filename, in_tensor_names, out_tensor_names
def export(self,
input_dims,
output_filename,
backend,
calibration_cache="",
data_file_name="",
n_batches=1,
batch_size=1,
verbose=True,
target_opset=None,
calibration_images_dir="",
save_engine=False,
engine_file_name="",
max_workspace_size=1 << 30,
min_batch_size=1,
max_batch_size=1,
opt_batch_size=1,
static_batch_size=None,
save_unencrypted_model=False,
validate_trt_engine=True):
"""Export.
Args:
ETLT export
input_dims (list): Input dims with channels_first(CHW) or channels_last (HWC)
output_filename (str): Output .etlt filename
backend (str): Model type to export to
Calibration and TRT export
calibration_cache (str): Calibration cache file to write to or read from.
data_file_name (str): Tensorfile to run calibration for int8 optimization
n_batches (int): Number of batches to calibrate over
batch_size (int): Number of images per batch
verbose (bool): Verbosity of the logger
target_opset (int): Target opset version to use for onnx conversion.
calibration_images_dir (str): Directory of images to run int8 calibration if
data file is unavailable.
save_engine (bool): If True, saves trt engine file to `engine_file_name`
engine_file_name (str): Output trt engine file
max_workspace_size (int): Max size of workspace to be set for trt engine builder.
max_batch_size (int): Max batch size for trt engine builder
opt_batch_size (int): Optimum batch size to use for model conversion.
Default is 1.
static_batch_size (int): Set a static batch size for exported etlt model.
Default is -1(dynamic batch size)
Debugging
save_unencrypted_model (bool): Flag to save unencrypted model (debug purpose)
validate_trt_engine (bool): Flag to enable trt engine execution for validation.
"""
# set dynamic_batch flag
dynamic_batch = bool(static_batch_size <= 0)
_, in_tensor_name, out_tensor_names = self.generate_exported_model(
output_filename, target_opset=target_opset
)
# Get int8 calibrator
calibrator = None
max_batch_size = max(batch_size, max_batch_size)
data_format = self.data_format
preprocessing_params = self.preprocessing_params
input_dims = tuple(input_dims)
logger.debug("Input dims: {}".format(input_dims))
if self.backend == "tfonnx":
backend = "onnx"
keras.backend.clear_session()
if self.data_type == "int8":
# no tensor scale, take traditional INT8 calibration approach
# use calibrator to generate calibration cache
calibrator = self.get_calibrator(calibration_cache=calibration_cache,
data_file_name=data_file_name,
n_batches=n_batches,
batch_size=batch_size,
input_dims=input_dims,
calibration_images_dir=calibration_images_dir,
preprocessing_params=preprocessing_params)
logger.info("Calibration takes time especially if number of batches is large.")
# Assuming single input node graph for uff engine creation.
if not isinstance(input_dims, dict):
input_dims_dict = {in_tensor_name: input_dims}
# Verify with engine generation / run calibration.
if backend == "uff":
engine_builder = UFFEngineBuilder(output_filename,
in_tensor_name,
input_dims_dict,
out_tensor_names,
max_batch_size=max_batch_size,
max_workspace_size=max_workspace_size,
dtype=self.data_type,
strict_type=self.strict_type,
verbose=verbose,
calibrator=calibrator,
tensor_scale_dict=self.tensor_scale_dict,
data_format=data_format)
elif backend == "onnx":
engine_builder = ONNXEngineBuilder(output_filename,
max_batch_size=max_batch_size,
min_batch_size=min_batch_size,
max_workspace_size=max_workspace_size,
opt_batch_size=opt_batch_size,
dtype=self.data_type,
strict_type=self.strict_type,
verbose=verbose,
calibrator=calibrator,
tensor_scale_dict=self.tensor_scale_dict,
dynamic_batch=dynamic_batch,
input_dims=input_dims_dict)
else:
raise NotImplementedError("Invalid backend.")
trt_engine = engine_builder.get_engine()
if save_engine:
with open(engine_file_name, "wb") as outf:
outf.write(trt_engine.serialize())
if validate_trt_engine:
try:
engine = Engine(trt_engine)
dummy_input = np.ones((1,) + input_dims)
trt_output = engine.infer(dummy_input)
logger.info("TRT engine outputs: {}".format(trt_output.keys()))
for output_name in trt_output.keys():
out = trt_output[output_name]
logger.info("{}: {}".format(output_name, out.shape))
except Exception as error:
logger.error("TRT engine validation error!")
logger.error(error)
if trt_engine:
del trt_engine
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/exporter/fpenet_exporter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for FpeNet Inferencer Class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import keras.backend as K
import numpy as np
import tensorflow as tf
from nvidia_tao_tf1.cv.fpenet.exporter.fpenet_exporter import FpeNetExporter
from nvidia_tao_tf1.cv.fpenet.models.fpenet_basemodel import FpeNetBaseModel
def test_exporter(tmpdir):
"""
Test the exporter.
Args:
tmpdir (str): Temporary path for checkpoint directory.
Returns:
None
"""
K.clear_session()
key = 'test'
model_parameters = {
'beta': 0.01,
'dropout_rate': 0.5,
'freeze_Convlayer': None,
'pretrained_model_path': None,
'regularizer_type': 'l2',
'regularizer_weight': 1.0e-05,
'type': 'FpeNet_public',
'visualization_parameters': None,
}
model = FpeNetBaseModel(model_parameters)
model.build(input_images=tf.convert_to_tensor(np.ones((1, 1, 80, 80)).astype(np.float32)))
# save temporary files for test case purpose
model.save_model(str(tmpdir)+'/temp_model.hdf5', key)
# Build exporter instance
exporter = FpeNetExporter(str(tmpdir)+'/temp_model.hdf5',
key,
backend='tfonnx',
data_type='fp32')
output_filename = str(tmpdir)+'/temp_model.onnx'
engine_file_name = str(tmpdir)+'/temp_model.engine'
# Export the model to etlt file and build the TRT engine.
exporter.export([1, 80, 80],
output_filename,
backend='tfonnx',
static_batch_size=-1,
save_engine=True,
engine_file_name=engine_file_name,
target_opset=10,
validate_trt_engine=True)
assert os.path.exists(output_filename), (
f"Output model file doesn't exist at {output_filename}"
)
assert os.path.exists(engine_file_name), (
f"Output engine file doesn't exist at {engine_file_name}"
)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/exporter/test_fpenet_exporter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FpeNet inference modules to handle standalone inference."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/inferencer/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FpeNet Inference Class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import logging
import os
import cv2
import keras
import keras.backend as K
import numpy as np
import tensorflow as tf
import tqdm
from nvidia_tao_tf1.cv.common.utilities.path_processing import mkdir_p
from nvidia_tao_tf1.cv.common.utilities.tlt_utils import load_model
from nvidia_tao_tf1.cv.fpenet.models.custom.softargmax import Softargmax
from nvidia_tao_tf1.cv.fpenet.models.fpenet_basemodel import FpeNetBaseModel
logger = logging.getLogger(__name__)
# Color definition for stdout logs.
CRED = '\033[91m'
CEND = '\033[0m'
class FpeNetInferencer(object):
"""FpeNet Inference Class."""
def __init__(self,
experiment_spec,
data_path,
output_folder,
model_path,
image_root_path,
key):
"""__init__ method.
Args:
experiment_spec (object): config file for the experiments
data_path (str): path to json with image paths and ground truth face box
output_folder (str): folder for the output files
model_path (str): path to pre-trained model
image_root_path (str): parent directory for the image paths in json.
key (str): model encryption key
"""
self._spec = experiment_spec
self._data_path = data_path
self._output_folder = output_folder
if not os.path.exists(self._output_folder):
mkdir_p(self._output_folder)
self._model_path = model_path
self._image_root_path = image_root_path
self._key = key
# Set up model
self._model = FpeNetBaseModel(self._spec['model']['model_parameters'])
# Set test phase.
keras.backend.set_learning_phase(0)
def load_model(self):
"""Load model from path and args."""
if self._model_path is None or not os.path.isfile(self._model_path):
raise ValueError("Please provide a valid fpenet file path for evaluation.")
if self._model_path.endswith('.engine'):
# Use TensorRT for inference
# import TRTInferencer only if it's a TRT Engine.
from nvidia_tao_tf1.cv.core.inferencer.trt_inferencer import TRTInferencer
self.model = TRTInferencer(self._model_path)
self.is_trt_model = True
else:
self._model.keras_model = load_model(self._model_path,
key=self._key,
custom_objects={'Softargmax': Softargmax})
self.is_trt_model = False
def infer_model(self):
"""Prepare data for inferencing and run inference."""
# Get data from json
json_data = json.loads(open(self._data_path , 'r').read())
self.results = []
for img in tqdm.tqdm(json_data):
try:
fname = str(img['filename'])
if not os.path.exists(os.path.join(self._image_root_path, fname)):
print(CRED + 'Image does not exist: {}'.format(fname) + CEND)
continue
for chunk in img['annotations']:
if 'facebbox' not in chunk['class'].lower():
continue
bbox_data = (entry for entry in chunk if ('class' not in entry and
'version' not in entry))
for entry in bbox_data:
if 'face_tight_bboxheight' in str(entry).lower():
height = int(float(chunk[entry]))
if 'face_tight_bboxwidth' in str(entry).lower():
width = int(float(chunk[entry]))
if 'face_tight_bboxx' in str(entry).lower():
x = int(float(chunk[entry]))
if 'face_tight_bboxy' in str(entry).lower():
y = int(float(chunk[entry]))
sample = dict()
sample['image_path'] = fname
image = cv2.imread(os.path.join(self._image_root_path, fname))
if image is None:
print(CRED + 'Bad image:{}'.format(fname) + CEND)
continue
image_shape = image.shape
image_height = image_shape[0]
image_width = image_shape[1]
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = np.float32(image)
# transform it into a square bbox wrt the longer side
longer_side = max(width, height)
new_width = longer_side
new_height = longer_side
x = int(x - (new_width - width) / 2)
y = int(y - (new_height - height) / 2)
x = min(max(x, 0), image_width)
y = min(max(y, 0), image_height)
new_width = min(new_width, image_width - x)
new_height = min(new_height, image_height - y)
new_width = min(new_width, new_height)
new_height = new_width # make it a square bbox
sample['facebox'] = [x, y, new_width, new_height]
# crop the face bounding box
img_crop = image[y:y + new_height, x:x + new_width, :] # pylint:disable=E1136
target_height = self._spec['dataloader']['image_info']['image']['height']
target_width = self._spec['dataloader']['image_info']['image']['width']
image_resized = cv2.resize(img_crop,
(target_height, target_width),
interpolation=cv2.INTER_CUBIC)
if self._spec['dataloader']['image_info']['image']['channel'] == 1:
image_resized = cv2.cvtColor(image_resized, cv2.COLOR_BGR2GRAY)
image_resized = np.expand_dims(image_resized, 2)
# make it channel first (channel, height, width)
image_resized = np.transpose(image_resized, (2, 0, 1))
image_resized = np.expand_dims(image_resized, 0) # add batch dimension
sample['img'] = image_resized
except Exception as e:
print(CRED + str(e) + CEND)
# Run inference on current sample
if self.is_trt_model:
# Run model prediction using trt engine
try:
output_blobs = self.model.predict(sample['img'])
except Exception as error:
logger.error("TRT execution failed. Please ensure that the `input_shape` "
"matches the model input dims")
logger.error(error)
raise error
# engine generates 3 outputs including penultimate layer at [0]
predictions_coord_results = list(output_blobs.values())[1:]
assert len(predictions_coord_results) == 2,\
"Number of outputs more than 2. Please verify."
else:
# Get input tensors.
input_face_tensor = keras.layers.Input(tensor=tf.convert_to_tensor(sample['img']),
name='input_face_images')
predictions = self._model.keras_model(input_face_tensor)
# extract keypoints and confidence from model output
predictions_coord = K.reshape(predictions[0], (1, self._spec['num_keypoints'], 2))
self.evaluation_tensors = [predictions_coord]
sess = keras.backend.get_session()
sess.run(tf.group(tf.local_variables_initializer(),
tf.tables_initializer(),
*tf.get_collection('iterator_init')))
predictions_coord_results = sess.run(self.evaluation_tensors)
# rescale predictions back to original image coordinates
scale = float(sample['facebox'][2]) / \
self._spec['dataloader']['image_info']['image']['height']
shift = np.tile(np.array((sample['facebox'][0], sample['facebox'][1])),
(self._spec['num_keypoints'], 1))
result = (predictions_coord_results[0][0, :, :] * scale) + shift
self.results.append((sample['image_path'], result.flatten()))
def save_results(self):
"""Save inference results to output folder."""
# Write predictions and txt files
output_filename = os.path.join(self._output_folder, 'result.txt')
if len(self.results) > 0:
print('Results file created: ' + output_filename)
file_contents = []
for frame_id, result in self.results:
line = ' '.join([str(x) for x in result.tolist()]) + '\n'
line = '{} {}'.format(frame_id, line)
file_contents.append(line)
with open(output_filename, 'w') as f:
for line in file_contents:
f.write(line)
else:
print(CRED + 'No results to write.' + CEND)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/inferencer/fpenet_inferencer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for FpeNet Inferencer Class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import keras.backend as K
import numpy as np
import pytest
import tensorflow as tf
import yaml
from nvidia_tao_tf1.cv.fpenet.inferencer.fpenet_inferencer import FpeNetInferencer
@pytest.mark.skipif(
os.getenv("RUN_ON_CI", "0") == "1",
reason="Temporarily skipping on the CI."
)
def test_inferencer(tmpdir):
"""
Test the inferencer.
Args:
tmpdir (str): Temporary path for checkpoint directory.
Returns:
None
"""
K.clear_session()
key = '0'
with open('nvidia_tao_tf1/cv/fpenet/experiment_specs/default.yaml', 'r') as yaml_file:
spec = yaml.load(yaml_file.read())
inferencer = FpeNetInferencer(
experiment_spec=spec,
data_path='nvidia_tao_tf1/cv/fpenet/dataloader/testdata/datafactory.json',
output_folder=str(tmpdir),
model_path=str(tmpdir)+'/temp_model.tlt',
image_root_path='',
key=key)
inferencer.is_trt_model = False
# save temporary model for testing purpose
inferencer._model.build(
input_images=tf.convert_to_tensor(np.ones((1, 1, 80, 80)).astype(np.float32)))
inferencer._model.save_model(inferencer._model_path, key)
# run inference
inferencer.infer_model()
# save inference results
inferencer.save_results()
assert os.path.exists(str(tmpdir)+'/result.txt')
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/inferencer/test_fpenet_inferencer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for FpeNet experiment specs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
from mock import patch
import yaml
import nvidia_tao_tf1.core as tao_core
def test_spec_deserialization():
"""Test whether all experiment specs can deserialize correctly into MagLev objects."""
spec_paths = glob.glob("nvidia_tao_tf1/cv/fpenet/experiment_specs/*.yaml")
for spec_path in spec_paths:
with open(spec_path, 'r') as f:
spec = yaml.load(f.read())
print(spec)
trainer = tao_core.coreobject.deserialize_tao_object(spec)
# Type of trainer object should be included in the '__class_name__' field of the spec.
assert type(trainer).__name__ in spec['__class_name__']
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/experiment_specs/spec_linter_test.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License."""
"""Defines test functions and classes for FpeNet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nvidia_tao_tf1.cv.fpenet.dataloader.fpenet_dataloader \
import FpeNetDataloader
__all__ = (
'FpeNetDataloader',
)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/dataloader/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test FpeNet default dataloader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from parameterized import parameterized
import pytest
import tensorflow as tf
from nvidia_tao_tf1.cv.fpenet.dataloader.fpenet_dataloader import FpeNetDataloader
def get_data_source():
"""
Returns a data source dict for FpeNetDataloader constructor.
"""
image_info_dict = {}
# Set input information for eyes and face input.
image_info_dict['image'] = {
'height': 80,
'width': 80,
'channel': 1
}
# Setup dataset info.
dataset_info_dict = {
'use_extra_dataset': False,
'root_path': '',
'tfrecords_directory_path': 'nvidia_tao_tf1/cv/fpenet/dataloader',
'tfrecords_set_id_train': 'testdata',
'no_occlusion_masking_sets': '',
'tfrecords_set_id_val': 'testdata',
'image_extension': 'png',
'ground_truth_folder_name': '',
'tfrecord_folder_name': '',
'tfrecord_file_name': 'data.tfrecords',
}
# Setup KPI set info.
kpiset_info_dict = {
'tfrecords_set_id_kpi': 'testdata',
}
# Setup augmentation info
augmentation_info_dict = {
'augmentation_resize_probability': 0.5,
'augmentation_resize_scale': 1.6,
'enable_occlusion_augmentation': True,
'enable_online_augmentation': True,
'enable_resize_augmentation': True,
'patch_probability': 0.5,
'size_to_image_ratio': 0.5,
'mask_augmentation_patch': True
}
# Assemble all information needed together
data_source = {
'batch_size': 2,
'num_keypoints': 80,
'image_info': image_info_dict,
'dataset_info': dataset_info_dict,
'kpiset_info': kpiset_info_dict,
'augmentation_config': augmentation_info_dict
}
return data_source
class FpeNetDataloaderTest(tf.test.TestCase):
"""
Test FpeNet dataloader returning correct instance image and keypoint pairs.
"""
@parameterized.expand([
("training", 2),
("validation", 2),
("kpi_testing", 2)])
def test_tfrecords_loading(self, phase, expected_num_samples):
"""
Test dataloader on loading multiple instances and augmentation enabled.
"""
data_source = get_data_source()
img_shape = (1, 80, 80)
repeat = True
with self.test_session() as session:
dataloader = FpeNetDataloader(batch_size=data_source['batch_size'],
image_info=data_source['image_info'],
dataset_info=data_source['dataset_info'],
kpiset_info=data_source['kpiset_info'],
augmentation_info=data_source['augmentation_config'],
num_keypoints=data_source['num_keypoints'])
# Get the tensor object from test data
results = dataloader(repeat, phase)
images = results[0]
ground_truth_landmarks = results[1]
num_samples = results[2]
iterator_init = tf.get_collection(
dataloader.ITERATOR_INIT_OP_NAME)
session.run(iterator_init)
assert num_samples == expected_num_samples
assert tf.shape(images).eval().tolist() == \
[dataloader.batch_size, img_shape[0], img_shape[1], img_shape[2]]
assert tf.shape(ground_truth_landmarks[0]).eval().tolist() == \
[dataloader.batch_size, data_source['num_keypoints'], 2]
assert tf.shape(ground_truth_landmarks[1]).eval().tolist() == \
[dataloader.batch_size, data_source['num_keypoints']]
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/fpenet/dataloader/test_fpenet_dataloader.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.