python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IVA YOLO base architecture."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Concatenate, Conv2D, Permute, Reshape, UpSampling2D
from keras.models import Model
from nvidia_tao_tf1.core.models.quantize_keras_model import create_quantized_keras_model
from nvidia_tao_tf1.core.templates.utils import _leaky_conv, arg_scope
from nvidia_tao_tf1.cv.yolo_v3.layers.yolo_anchor_box_layer import YOLOAnchorBox
from nvidia_tao_tf1.cv.yolo_v3.models.base_model import get_base_model
def YOLO_FCN(feature_layers, # pylint: disable=W0102
data_format='channels_first',
use_batch_norm=True,
kernel_regularizer=None,
bias_regularizer=None,
alpha=0.1,
use_bias=False,
num_anchors=[3, 3, 3],
num_classes=80,
arch_conv_blocks=2,
force_relu=False):
'''
Build FCN (fully convolutional net) part of YOLO.
Args:
feature_layers: two elements' list. First element is a tuple of size 3, containing three
keras tensors as three feature maps. Second element is a tuple of size 2, containing
number of channels upsampled layers need to have (this should be half of the number of
channels of the 2x larger feature map).
data_format: currently only 'channels_first' is tested and supported
use_batch_norm: whether to use batch norm in FCN build. Note this should be consistent with
feature extractor.
kernel_regularizer, bias_regularizer: keras regularizer object or None
alpha: Alpha for leakyReLU in FCN build. Note this is value does not apply to feature
extractor. if x is negative, lReLU(x) = alpha * x
use_bias: whether to use bias for conv layers. If use_batch_norm is true, this should be
false.
num_anchors: Number of anchors of different sizes in each feature maps. first element is
for smallest feature map (i.e. to detect large objects). Last element is for largest
feature map (i.e. to detect small objects).
num_classes: Number of all possible classes. E.g. if you have `person, bag, face`, the value
should be 3.
arch_conv_blocks: How many leaky conv blocks to attach before detection layer.
force_relu: whether to use ReLU instead of LeakyReLU
Returns:
[det_bgobj, det_mdobj, det_smobj]: Three keras tensors for big/mid/small objects detection.
Those tensors can be processed to get detection boxes.
'''
concat_axis = 1 if data_format == 'channels_first' else -1
concat_num_filters = feature_layers[1]
last_layer = feature_layers[0][0]
last_conv_filters = [i * (num_classes + 5) for i in num_anchors]
assert arch_conv_blocks < 3, "arch_conv_blocks can only be 0, 1 or 2."
with arg_scope([_leaky_conv],
use_batch_norm=use_batch_norm,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
alpha=alpha,
padding='same',
freeze_bn=False,
use_bias=use_bias,
force_relu=force_relu):
x = _leaky_conv(last_layer, filters=concat_num_filters[0] * 2,
kernel=1, strides=1, name='yolo_conv1_1')
if arch_conv_blocks > 0:
x = _leaky_conv(x, filters=concat_num_filters[0] * 4,
kernel=3, strides=1, name='yolo_conv1_2')
x = _leaky_conv(x, filters=concat_num_filters[0] * 2,
kernel=1, strides=1, name='yolo_conv1_3')
if arch_conv_blocks > 1:
x = _leaky_conv(x, filters=concat_num_filters[0] * 4,
kernel=3, strides=1, name='yolo_conv1_4')
x = _leaky_conv(x, filters=concat_num_filters[0] * 2,
kernel=1, strides=1, name='yolo_conv1_5')
bg_leaky = _leaky_conv(x, filters=concat_num_filters[0] * 4,
kernel=3, strides=1, name='yolo_conv1_6')
x = _leaky_conv(x, filters=concat_num_filters[0], kernel=1, strides=1, name='yolo_conv2')
x = UpSampling2D(2, data_format=data_format, name='upsample0')(x)
x = Concatenate(axis=concat_axis)([x, feature_layers[0][1]])
x = _leaky_conv(x, filters=concat_num_filters[0],
kernel=1, strides=1, name='yolo_conv3_1')
if arch_conv_blocks > 0:
x = _leaky_conv(x, filters=concat_num_filters[0] * 2,
kernel=3, strides=1, name='yolo_conv3_2')
x = _leaky_conv(x, filters=concat_num_filters[0],
kernel=1, strides=1, name='yolo_conv3_3')
if arch_conv_blocks > 1:
x = _leaky_conv(x, filters=concat_num_filters[0] * 2,
kernel=3, strides=1, name='yolo_conv3_4')
x = _leaky_conv(x, filters=concat_num_filters[0],
kernel=1, strides=1, name='yolo_conv3_5')
md_leaky = _leaky_conv(x, filters=concat_num_filters[0] * 2,
kernel=3, strides=1, name='yolo_conv3_6')
x = _leaky_conv(x, filters=concat_num_filters[1], kernel=1, strides=1, name='yolo_conv4')
x = UpSampling2D(2, data_format=data_format, name='upsample1')(x)
x = Concatenate(axis=concat_axis)([x, feature_layers[0][2]])
x = _leaky_conv(x, filters=concat_num_filters[1],
kernel=1, strides=1, name='yolo_conv5_1')
if arch_conv_blocks > 0:
x = _leaky_conv(x, filters=concat_num_filters[1] * 2,
kernel=3, strides=1, name='yolo_conv5_2')
x = _leaky_conv(x, filters=concat_num_filters[1],
kernel=1, strides=1, name='yolo_conv5_3')
if arch_conv_blocks > 1:
x = _leaky_conv(x, filters=concat_num_filters[1] * 2,
kernel=3, strides=1, name='yolo_conv5_4')
x = _leaky_conv(x, filters=concat_num_filters[1],
kernel=1, strides=1, name='yolo_conv5_5')
sm_leaky = _leaky_conv(x, filters=concat_num_filters[1] * 2,
kernel=3, strides=1, name='yolo_conv5_6')
det_smobj = Conv2D(filters=last_conv_filters[2],
kernel_size=1,
strides=1,
padding='same',
data_format=data_format,
activation=None,
bias_regularizer=bias_regularizer,
kernel_regularizer=kernel_regularizer,
use_bias=True,
name='conv_sm_object')(sm_leaky)
det_bgobj = Conv2D(filters=last_conv_filters[0],
kernel_size=1,
strides=1,
padding='same',
data_format=data_format,
activation=None,
bias_regularizer=bias_regularizer,
kernel_regularizer=kernel_regularizer,
use_bias=True,
name='conv_big_object')(bg_leaky)
det_mdobj = Conv2D(filters=last_conv_filters[1],
kernel_size=1,
strides=1,
padding='same',
data_format=data_format,
activation=None,
bias_regularizer=bias_regularizer,
kernel_regularizer=kernel_regularizer,
use_bias=True,
name='conv_mid_object')(md_leaky)
return [det_bgobj, det_mdobj, det_smobj]
def YOLO(input_tensor, # pylint: disable=W0102
arch,
nlayers,
kernel_regularizer=None,
bias_regularizer=None,
freeze_blocks=None,
freeze_bn=None,
anchors=[[(0.279, 0.216), (0.375, 0.476), (0.897, 0.784)],
[(0.072, 0.147), (0.149, 0.108), (0.142, 0.286)],
[(0.024, 0.031), (0.038, 0.072), (0.079, 0.055)]],
num_classes=80,
arch_conv_blocks=2,
qat=True,
force_relu=False):
'''
Build YOLO v3 Network.
Args:
input_tensor: Keras tensor created by Input layer
arch: architecture of feature extractors. E.g. resnet18, resnet10, darknet53
kernel_regularizer, bias_regularizer: keras regularizer object or None
freeze_blocks: blocks to freeze during training. The meaning of `block` is arch-specific
freeze_bn: whether to freeze batch norm layer **for feature extractors**
anchors: List of 3 elements indicating the anchor boxes shape on feature maps. first element
is for smallest feature map (i.e. to detect large objects). Last element is for largest
feature map (i.e. to detect small objects). Each element is a list of tuples of size 2,
in the format of (w, h). The length of the list can be any integer larger than 0.
num_classes: Number of all possible classes. E.g. if you have `person, bag, face`, the value
should be 3
arch_conv_blocks: Number of optional conv blocks to attach after each feature map.
qat (bool): If `True`, build an quantization aware model.
force_relu(bool): If `True`, change all LeakyReLU to ReLU
Returns:
model: A keras YOLO v3 model with encoded box detections as output.
'''
assert len(anchors) == 3
num_anchors = [len(i) for i in anchors]
feature_layers = get_base_model(input_tensor, arch, nlayers, kernel_regularizer,
bias_regularizer, freeze_blocks, freeze_bn, force_relu)
yolo_fcn = YOLO_FCN(feature_layers,
data_format='channels_first',
use_batch_norm=True,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
alpha=0.1,
use_bias=False,
num_anchors=num_anchors,
num_classes=num_classes,
arch_conv_blocks=arch_conv_blocks,
force_relu=force_relu)
if qat:
raw_model = Model(inputs=input_tensor, outputs=yolo_fcn)
qat_model = create_quantized_keras_model(raw_model)
yolo_fcn = [qat_model.get_layer('conv_big_object').output,
qat_model.get_layer('conv_mid_object').output,
qat_model.get_layer('conv_sm_object').output]
bgdet = Reshape((-1, num_classes + 5),
name="bg_reshape")(Permute((2, 3, 1), name="bg_permute")(yolo_fcn[0]))
mddet = Reshape((-1, num_classes + 5),
name="md_reshape")(Permute((2, 3, 1), name="md_permute")(yolo_fcn[1]))
smdet = Reshape((-1, num_classes + 5),
name="sm_reshape")(Permute((2, 3, 1), name="sm_permute")(yolo_fcn[2]))
# build YOLO v3 anchor layers for corresponding feature maps. Anchor shapes are defined in args.
bg_anchor = YOLOAnchorBox(anchors[0], name="bg_anchor")(yolo_fcn[0])
md_anchor = YOLOAnchorBox(anchors[1], name="md_anchor")(yolo_fcn[1])
sm_anchor = YOLOAnchorBox(anchors[2], name="sm_anchor")(yolo_fcn[2])
bgdet = Concatenate(axis=-1, name="encoded_bg")([bg_anchor, bgdet])
mddet = Concatenate(axis=-1, name="encoded_md")([md_anchor, mddet])
smdet = Concatenate(axis=-1, name="encoded_sm")([sm_anchor, smdet])
results = Concatenate(axis=-2, name="encoded_detections")([bgdet, mddet, smdet])
return Model(inputs=input_tensor, outputs=results, name="YOLOv3")
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/architecture/yolo_arch.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test yolo arch builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Input, LeakyReLU
from nvidia_tao_tf1.cv.yolo_v3.architecture.yolo_arch import YOLO
def test_arch():
it = Input(shape=(3, 64, 32), name="Input")
model = YOLO(it,
'resnet', 18,
kernel_regularizer=None,
bias_regularizer=None,
freeze_blocks=[0],
freeze_bn=None,
arch_conv_blocks=2,
qat=True)
assert model.get_layer('conv1').trainable is False
assert model.get_layer('encoded_detections').output_shape[-2:] == (126, 91)
model = YOLO(it,
'resnet', 18,
kernel_regularizer=None,
bias_regularizer=None,
freeze_blocks=[0],
freeze_bn=None,
arch_conv_blocks=2,
qat=True)
assert model.get_layer('conv1').trainable is False
assert model.get_layer('encoded_detections').output_shape[-2:] == (126, 91)
for layer in model.layers:
assert type(layer) != LeakyReLU
model = YOLO(it,
'darknet', 19,
kernel_regularizer=None,
bias_regularizer=None,
freeze_blocks=None,
freeze_bn=None,
arch_conv_blocks=2,
qat=False,
force_relu=True)
assert model.get_layer('conv1').trainable is True
for layer in model.layers:
assert type(layer) != LeakyReLU
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/architecture/tests/test_arch.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/scripts/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Export a yolo_v3 model."""
# import build_command_line_parser as this is needed by entrypoint
from nvidia_tao_tf1.cv.common.export.app import build_command_line_parser # noqa pylint: disable=W0611
from nvidia_tao_tf1.cv.common.export.app import launch_export
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.yolo_v3.export.yolov3_exporter import YOLOv3Exporter as Exporter
if __name__ == "__main__":
try:
launch_export(Exporter, None, "onnx")
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Export finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Export was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/scripts/export.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Kmeans algorithm to select Anchor shape. @Jeffery <[email protected]>."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import numpy as np
from PIL import Image
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
def build_command_line_parser(parser=None):
'''build parser.'''
if parser is None:
parser = argparse.ArgumentParser(prog='kmeans', description='Kmeans to select anchors.')
parser.add_argument(
'-l',
'--label_folders',
type=str,
required=True,
nargs='+',
help='Paths to label files')
parser.add_argument(
'-i',
'--image_folders',
type=str,
required=True,
nargs='+',
help='Paths to image files, must match order of label_folders')
parser.add_argument(
'-x',
'--size_x',
type=int,
required=True,
help='Network input width'
)
parser.add_argument(
'-y',
'--size_y',
type=int,
required=True,
help='Network input height'
)
parser.add_argument(
'-n',
'--num_clusters',
type=int,
default=9,
help='Number of clusters needed.'
)
parser.add_argument(
'--max_steps',
type=int,
default=10000,
help='maximum kmeans steps. Kmeans will stop even if not converged at max_steps'
)
parser.add_argument(
'--min_x',
type=int,
default=0,
help='ignore boxes with width (as in network input-size image) not larger than this value.'
)
parser.add_argument(
'--min_y',
type=int,
default=0,
help='ignore boxes with height (as in network input-size image) not larger than this value.'
)
parser.add_argument(
"--results_dir",
type=str,
default=None,
help="Path to the files where the logs are stored."
)
return parser
def parse_command_line(args):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser()
return parser.parse_args(args)
def read_boxes(folders, img_folders, min_x, min_y):
'''
Read all boxes as two numpy arrays.
Args:
folders (list of strings): paths to kitti label txts.
img_folders (list of strings): paths to kitti images.
min_x (float): minimum x ratio
min_y (float): minimum y ratio
Returns:
w (1-d array): widths of all boxes, 0-1 range
h (1-d array): heights of all boxes, 0-1 range
'''
supported_img_format = ['.jpg', '.jpeg', '.JPG', '.JPEG', '.png', '.PNG']
w = []
h = []
assert len(folders) == len(img_folders), "Labels and images folder must be 1-1 match"
for idx, img_folder in enumerate(img_folders):
for img_file in os.listdir(img_folder):
fname, ext = os.path.splitext(img_file)
if ext not in supported_img_format:
continue
label_file = os.path.join(folders[idx], fname+'.txt')
if not os.path.isfile(label_file):
print("Cannot find:", label_file)
continue
img_file = os.path.join(img_folder, img_file)
img = Image.open(img_file)
orig_w, orig_h = img.size
lines = open(label_file, 'r').read().split('\n')
for l in lines:
l_sp = l.strip().split()
if len(l_sp) < 15:
continue
left = float(l_sp[4]) / orig_w
top = float(l_sp[5]) / orig_h
right = float(l_sp[6]) / orig_w
bottom = float(l_sp[7]) / orig_h
l_w = right - left
l_h = bottom - top
if l_w > min_x and l_h > min_y:
w.append(l_w)
h.append(l_h)
return np.array(w), np.array(h)
def iou(w0, h0, w1, h1):
'''
Pairwise IOU.
Args:
w0, h0: Boxes group 0
w1, h1: Boxes group 1
Returns:
iou (len(w0) rows and len(w1) cols): pairwise iou scores
'''
len0 = len(w0)
len1 = len(w1)
w0_m = w0.repeat(len1).reshape(len0, len1)
h0_m = h0.repeat(len1).reshape(len0, len1)
w1_m = np.tile(w1, len0).reshape(len0, len1)
h1_m = np.tile(h1, len0).reshape(len0, len1)
area0_m = w0_m * h0_m
area1_m = w1_m * h1_m
area_int_m = np.minimum(w0_m, w1_m) * np.minimum(h0_m, h1_m)
return area_int_m / (area0_m + area1_m - area_int_m)
def kmeans(w, h, num_clusters, max_steps=1000):
'''
Calculate cluster centers.
Args:
w (1-d numpy array): 0-1 widths
h (1-d numpy array): 0-1 heights
num_clusters (int): num clusters needed
Returns:
cluster_centers (list of tuples): [(c_w, c_h)] sorted by area
'''
assert len(w) == len(h), "w and h should have same shape"
assert num_clusters < len(w), "Must have more boxes than clusters"
n_box = len(w)
rand_id = np.random.choice(n_box, num_clusters, replace=False)
clusters_w = w[rand_id]
clusters_h = h[rand_id]
# EM-algorithm
cluster_assign = np.zeros((n_box,), int)
for i in range(max_steps):
# shape (n_box, num_cluster)
if i % 10 == 0:
print("Start optimization iteration:", i + 1)
box_cluster_iou = iou(w, h, clusters_w, clusters_h)
re_assign = np.argmax(box_cluster_iou, axis=1)
if all(re_assign == cluster_assign):
# converge
break
cluster_assign = re_assign
for j in range(num_clusters):
clusters_w[j] = np.median(w[cluster_assign == j])
clusters_h[j] = np.median(h[cluster_assign == j])
return sorted(zip(clusters_w, clusters_h), key=lambda x: x[0] * x[1])
def main(args=None):
'''Main function.'''
args = parse_command_line(args)
# Set up status logging
if args.results_dir:
if not os.path.exists(args.results_dir):
os.makedirs(args.results_dir)
status_file = os.path.join(args.results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True,
verbosity=1,
append=True
)
)
s_logger = status_logging.get_status_logger()
s_logger.write(
status_level=status_logging.Status.STARTED,
message="Starting k-means."
)
w, h = read_boxes(args.label_folders, args.image_folders,
float(args.min_x) / args.size_x, float(args.min_y) / args.size_y)
results = kmeans(w, h, args.num_clusters, args.max_steps)
print('Please use following anchor sizes in YOLO config:')
anchors = []
for x in results:
print("(%0.2f, %0.2f)" % (x[0] * args.size_x, x[1] * args.size_y))
anchors.append("(%0.2f, %0.2f)" % (x[0] * args.size_x, x[1] * args.size_y))
if args.results_dir:
s_logger.kpi.update({'k-means generated anchors': str(anchors)})
s_logger.write(
status_level=status_logging.Status.SUCCESS,
message="K-means finished successfully."
)
if __name__ == "__main__":
try:
main()
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="KMEANS was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/scripts/kmeans.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert KITTI dataset to TFRecords for YOLOv3 TLT model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.detectnet_v2.scripts.dataset_convert import ( # noqa pylint: disable=unused-import
build_command_line_parser,
main,
)
if __name__ == "__main__":
try:
main(sys.argv[1:])
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Dataset convert finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Dataset convert was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/scripts/dataset_convert.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perform continuous YOLO training on a tfrecords dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import os
from keras import backend as K
import tensorflow as tf
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.common.utils import check_tf_oom, hvd_keras, initialize
from nvidia_tao_tf1.cv.yolo_v3.models.utils import build_training_pipeline
from nvidia_tao_tf1.cv.yolo_v3.utils.spec_loader import load_experiment_spec
from nvidia_tao_tf1.cv.yolo_v3.utils.tensor_utils import get_init_ops
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level='INFO')
logger = logging.getLogger(__name__)
verbose = 0
def run_experiment(config_path, results_dir, key):
"""
Launch experiment that trains the model.
NOTE: Do not change the argument names without verifying that cluster submission works.
Args:
config_path (str): Path to a text file containing a complete experiment configuration.
results_dir (str): Path to a folder where various training outputs will be written.
If the folder does not already exist, it will be created.
"""
hvd = hvd_keras()
hvd.init()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = str(hvd.local_rank())
sess = tf.Session(config=config)
K.set_session(sess)
K.set_image_data_format('channels_first')
K.set_learning_phase(1)
verbose = 1 if hvd.rank() == 0 else 0
is_master = hvd.rank() == 0
if is_master and not os.path.exists(results_dir):
os.makedirs(results_dir)
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=is_master,
verbosity=1,
append=True
)
)
status_logging.get_status_logger().write(
data=None,
status_level=status_logging.Status.STARTED,
message="Starting Yolo_V3 Training job"
)
# Load experiment spec.
spec = load_experiment_spec(config_path)
initialize(spec.random_seed, hvd)
# build training model and dataset
model = build_training_pipeline(
spec,
results_dir,
key,
hvd,
sess,
verbose
)
if hvd.rank() == 0:
model.summary()
sess.run(get_init_ops())
model.train(verbose)
status_logging.get_status_logger().write(
data=None,
status_level=status_logging.Status.SUCCESS,
message="YOLO_V3 training finished successfully."
)
def build_command_line_parser(parser=None):
'''build parser.'''
if parser is None:
parser = argparse.ArgumentParser(prog='train', description='Train an YOLOv3 model.')
parser.add_argument(
'-e',
'--experiment_spec_file',
type=str,
required=True,
help='Path to spec file. Absolute path or relative to working directory. \
If not specified, default spec from spec_loader.py is used.')
parser.add_argument(
'-r',
'--results_dir',
type=str,
required=True,
help='Path to a folder where experiment outputs should be written.'
)
parser.add_argument(
'-k',
'--key',
default="",
type=str,
required=False,
help='Key to save or load a .tlt model.'
)
return parser
def parse_command_line(args):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser()
return parser.parse_args(args)
@check_tf_oom
def main(args=None):
"""Run the training process."""
args = parse_command_line(args)
try:
run_experiment(
config_path=args.experiment_spec_file,
results_dir=args.results_dir,
key=args.key
)
logger.info("Training finished successfully.")
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Training was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
logger.info("Training was interrupted.")
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/scripts/train.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MagNet pruning wrapper for classification/detection models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from datetime import datetime as dt
import logging
import os
from nvidia_tao_tf1.core.pruning.pruning import prune
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.common.utils import (
get_model_file_size,
get_num_params
)
from nvidia_tao_tf1.cv.yolo_v3.utils.model_io import load_model, save_model
from nvidia_tao_tf1.cv.yolo_v3.utils.spec_loader import load_experiment_spec
logger = logging.getLogger(__name__)
def build_command_line_parser(parser=None):
'''build parser.'''
if parser is None:
parser = argparse.ArgumentParser(description="TLT pruning script")
parser.add_argument("-m",
"--model",
type=str,
help="Path to the target model for pruning",
required=True,
default=None)
parser.add_argument("-o",
"--output_file",
type=str,
help="Output file path for pruned model",
required=True,
default=None)
parser.add_argument("-e",
"--experiment_spec_path",
type=str,
help="Path to experiment spec file",
required=True)
parser.add_argument('-k',
'--key',
required=False,
default="",
type=str,
help='Key to load a .tlt model')
parser.add_argument('-n',
'--normalizer',
type=str,
default='max',
help="`max` to normalize by dividing each norm by the \
maximum norm within a layer; `L2` to normalize by \
dividing by the L2 norm of the vector comprising all \
kernel norms. (default: `max`)")
parser.add_argument('-eq',
'--equalization_criterion',
type=str,
default='union',
help="Criteria to equalize the stats of inputs to an \
element wise op layer. Options are \
[arithmetic_mean, geometric_mean, union, \
intersection]. (default: `union`)")
parser.add_argument("-pg",
"--pruning_granularity",
type=int,
help="Pruning granularity: number of filters to remove \
at a time. (default:8)",
default=8)
parser.add_argument("-pth",
"--pruning_threshold",
type=float,
help="Threshold to compare normalized norm against \
(default:0.1)", default=0.1)
parser.add_argument("-nf",
"--min_num_filters",
type=int,
help="Minimum number of filters to keep per layer. \
(default:16)", default=16)
parser.add_argument("-el",
"--excluded_layers", action='store',
type=str, nargs='*',
help="List of excluded_layers. Examples: -i item1 \
item2", default=[])
parser.add_argument("--results_dir",
type=str,
default=None,
help="Path to the files where the logs are stored.")
parser.add_argument("-v",
"--verbose",
action='store_true',
help="Include this flag in command line invocation for \
verbose logs.")
return parser
def parse_command_line(args):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser()
return parser.parse_args(args)
def run_pruning(args=None):
"""Prune an encrypted Keras model."""
results_dir = args.results_dir
if results_dir is not None:
if not os.path.exists(results_dir):
os.makedirs(results_dir)
timestamp = int(dt.timestamp(dt.now()))
filename = "status.json"
if results_dir == "/workspace/logs":
filename = f"status_prune_{timestamp}.json"
status_file = os.path.join(results_dir, filename)
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True
)
)
status_logging.get_status_logger().write(
status_level=status_logging.Status.STARTED,
message="Starting YOLO pruning"
)
# Set up logger verbosity.
verbosity = 'INFO'
if args.verbose:
verbosity = 'DEBUG'
# Configure the logger.
logging.basicConfig(
format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level=verbosity)
assert args.equalization_criterion in \
['arithmetic_mean', 'geometric_mean', 'union', 'intersection'], \
"Equalization criterion are [arithmetic_mean, geometric_mean, union, \
intersection]."
assert args.normalizer in ['L2', 'max'], \
"normalizer options are [L2, max]."
experiment_spec = load_experiment_spec(args.experiment_spec_path)
img_height = experiment_spec.augmentation_config.output_height
img_width = experiment_spec.augmentation_config.output_width
n_channels = experiment_spec.augmentation_config.output_channel
final_model = load_model(
args.model,
experiment_spec,
(n_channels, img_height, img_width),
key=args.key
)
if verbosity == 'DEBUG':
# Printing out the loaded model summary
logger.debug("Model summary of the unpruned model:")
logger.debug(final_model.summary())
# Exckuded layers for YOLOv3 / v4
force_excluded_layers = [
'conv_big_object',
'conv_mid_object',
'conv_sm_object'
]
force_excluded_layers += final_model.output_names
# Pruning trained model
pruned_model = prune(
model=final_model,
method='min_weight',
normalizer=args.normalizer,
criterion='L2',
granularity=args.pruning_granularity,
min_num_filters=args.min_num_filters,
threshold=args.pruning_threshold,
equalization_criterion=args.equalization_criterion,
excluded_layers=args.excluded_layers + force_excluded_layers)
if verbosity == 'DEBUG':
# Printing out pruned model summary
logger.debug("Model summary of the pruned model:")
logger.debug(pruned_model.summary())
pruning_ratio = pruned_model.count_params() / final_model.count_params()
logger.info("Pruning ratio (pruned model / original model): {}".format(
pruning_ratio
)
)
# Save the encrypted pruned model
save_model(pruned_model, args.output_file, args.key, save_format='.hdf5')
if results_dir is not None:
s_logger = status_logging.get_status_logger()
s_logger.kpi = {
"pruning_ratio": pruning_ratio,
"size": get_model_file_size(args.output_file),
"param_count": get_num_params(pruned_model)
}
s_logger.write(
message="Pruning ratio (pruned model / original model): {}".format(
pruning_ratio
)
)
def main(args=None):
"""Wrapper function for pruning."""
# Apply patch to correct keras 2.2.4 bug
try:
# parse command line
args = parse_command_line(args)
run_pruning(args)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Pruning finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Pruning was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/scripts/prune.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple Stand-alone inference script for YOLO models trained using modulus."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import os
import keras.backend as K
import numpy as np
from nvidia_tao_tf1.cv.common.inferencer.inferencer import Inferencer
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.common.utils import check_tf_oom
from nvidia_tao_tf1.cv.yolo_v3.builders import eval_builder
from nvidia_tao_tf1.cv.yolo_v3.utils.model_io import load_model
from nvidia_tao_tf1.cv.yolo_v3.utils.spec_loader import load_experiment_spec
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level='INFO')
image_extensions = ['.jpg', '.jpeg', '.JPG', '.JPEG', '.png', '.PNG']
def build_command_line_parser(parser=None):
'''build parser.'''
if parser is None:
parser = argparse.ArgumentParser(description='TLT YOLOv3 Inference Tool')
parser.add_argument('-m',
'--model_path',
type=str,
required=True,
help='Path to a TLT model or TensorRT engine.')
parser.add_argument('-i',
'--image_dir',
required=True,
type=str,
help='The path to input image or directory.')
parser.add_argument('-k',
'--key',
type=str,
default="",
help='Key to save or load a .tlt model. Must present if -m is a TLT model')
parser.add_argument('-e',
'--experiment_spec',
required=True,
type=str,
help='Path to an experiment spec file for training.')
parser.add_argument('-t',
'--threshold',
type=float,
default=0.3,
help='Confidence threshold for inference.')
parser.add_argument("-r",
'--results_dir',
type=str,
default=None,
help='Path to the files where the logs are stored.')
parser.add_argument('-b',
'--batch_size',
type=int,
required=False,
default=1,
help=argparse.SUPPRESS)
return parser
def parse_command_line(args):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser()
return parser.parse_args(args)
def keras_output_process_fn(inferencer, y_encoded):
"function to process keras model output."
# xmin
y_encoded[..., -4] = y_encoded[..., -4] * inferencer.model_input_width
# ymin
y_encoded[..., -3] = y_encoded[..., -3] * inferencer.model_input_height
# xmax
y_encoded[..., -2] = y_encoded[..., -2] * inferencer.model_input_width
# ymax
y_encoded[..., -1] = y_encoded[..., -1] * inferencer.model_input_height
return y_encoded
def trt_output_process_fn(inferencer, y_encoded):
"function to process TRT model output."
keep_k, boxes, scores, cls_id = y_encoded
result = []
for idx, k in enumerate(keep_k.reshape(-1)):
mul = np.array([[inferencer.model_input_width,
inferencer.model_input_height,
inferencer.model_input_width,
inferencer.model_input_height]])
loc = boxes[idx].reshape(-1, 4)[:k] * mul
cid = cls_id[idx].reshape(-1, 1)[:k]
conf = scores[idx].reshape(-1, 1)[:k]
result.append(np.concatenate((cid, conf, loc), axis=-1))
return result
def inference(arguments):
'''make inference on a folder of images.'''
# Set up status logging
if arguments.results_dir:
if not os.path.exists(arguments.results_dir):
os.makedirs(arguments.results_dir)
status_file = os.path.join(arguments.results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True,
verbosity=1,
append=True
)
)
s_logger = status_logging.get_status_logger()
s_logger.write(
status_level=status_logging.Status.STARTED,
message="Starting YOLOv3 inference."
)
config_path = arguments.experiment_spec
experiment_spec = load_experiment_spec(config_path)
K.clear_session() # Clear previous models from memory.
K.set_learning_phase(0)
classes = sorted({str(x).lower() for x in
experiment_spec.dataset_config.target_class_mapping.values()})
class_mapping = dict(zip(range(len(classes)), classes))
img_mean = experiment_spec.augmentation_config.image_mean
if experiment_spec.augmentation_config.output_channel == 3:
if img_mean:
img_mean = [img_mean['b'], img_mean['g'], img_mean['r']]
else:
img_mean = [103.939, 116.779, 123.68]
else:
if img_mean:
img_mean = [img_mean['l']]
else:
img_mean = [117.3786]
if os.path.splitext(arguments.model_path)[1] in ['.tlt', '.hdf5']:
img_height = experiment_spec.augmentation_config.output_height
img_width = experiment_spec.augmentation_config.output_width
n_channels = experiment_spec.augmentation_config.output_channel
model = load_model(arguments.model_path, experiment_spec,
(n_channels, img_height, img_width),
key=arguments.key)
# Load evaluation parameters
conf_th = experiment_spec.nms_config.confidence_threshold
iou_th = experiment_spec.nms_config.clustering_iou_threshold
top_k = experiment_spec.nms_config.top_k
nms_on_cpu = True
# Build evaluation model
model = eval_builder.build(
model, conf_th, iou_th, top_k, nms_on_cpu=nms_on_cpu
)
inferencer = Inferencer(keras_model=model,
batch_size=experiment_spec.eval_config.batch_size,
infer_process_fn=keras_output_process_fn,
class_mapping=class_mapping,
img_mean=img_mean,
threshold=arguments.threshold)
print("Using TLT model for inference, setting batch size to the one in eval_config:",
experiment_spec.eval_config.batch_size)
else:
inferencer = Inferencer(trt_engine_path=arguments.model_path,
infer_process_fn=trt_output_process_fn,
class_mapping=class_mapping,
img_mean=img_mean,
threshold=arguments.threshold,
batch_size=experiment_spec.eval_config.batch_size)
print("Using TensorRT engine for inference, setting batch size to engine's one:",
inferencer.batch_size)
out_image_path = os.path.join(arguments.results_dir, "images_annotated")
out_label_path = os.path.join(arguments.results_dir, "labels")
os.makedirs(out_image_path, exist_ok=True)
os.makedirs(out_label_path, exist_ok=True)
inferencer.infer(arguments.image_dir, out_image_path, out_label_path)
if arguments.results_dir:
s_logger.write(
status_level=status_logging.Status.SUCCESS,
message="Inference finished successfully."
)
@check_tf_oom
def main(args=None):
"""Run the inference process."""
try:
args = parse_command_line(args)
inference(args)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Inference was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/scripts/inference.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple Stand-alone evaluate script for YOLO models trained using modulus."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import os
import sys
import keras.backend as K
from keras.utils.data_utils import OrderedEnqueuer
import numpy as np
import tensorflow as tf
from tqdm import trange
from nvidia_tao_tf1.cv.common.evaluator.ap_evaluator import APEvaluator
from nvidia_tao_tf1.cv.common.inferencer.inferencer import Inferencer
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.common.utils import check_tf_oom
from nvidia_tao_tf1.cv.yolo_v3.builders import eval_builder
from nvidia_tao_tf1.cv.yolo_v3.data_loader.data_loader import YOLOv3DataPipe
from nvidia_tao_tf1.cv.yolo_v3.dataio.data_sequence import YOLOv3DataSequence
from nvidia_tao_tf1.cv.yolo_v3.utils.model_io import load_model
from nvidia_tao_tf1.cv.yolo_v3.utils.spec_loader import (
load_experiment_spec,
validation_labels_format
)
from nvidia_tao_tf1.cv.yolo_v3.utils.tensor_utils import get_init_ops
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level='INFO')
def build_command_line_parser(parser=None):
'''build parser.'''
if parser is None:
parser = argparse.ArgumentParser(description='TLT YOLOv3 Evaluation Tool')
parser.add_argument('-m',
'--model_path',
help='Path to an YOLOv3 TLT model or TensorRT engine.',
required=True,
type=str)
parser.add_argument('-k',
'--key',
type=str,
default="",
help='Key to load a .tlt model.')
parser.add_argument('-e',
'--experiment_spec',
required=False,
type=str,
help='Experiment spec file for training and evaluation.')
parser.add_argument("-r",
'--results_dir',
type=str,
default=None,
help='Path to the files where the logs are stored.')
parser.add_argument('-i',
'--image_dir',
type=str,
required=False,
default=None,
help=argparse.SUPPRESS)
parser.add_argument('-l',
'--label_dir',
type=str,
required=False,
help=argparse.SUPPRESS)
parser.add_argument('-b',
'--batch_size',
type=int,
required=False,
default=1,
help=argparse.SUPPRESS)
return parser
def parse_command_line(args):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser()
return parser.parse_args(args)
def keras_output_process_fn(inferencer, y_encoded):
"function to process keras model output."
return y_encoded
def trt_output_process_fn(inferencer, y_encoded):
"function to process TRT model output."
keep_k, boxes, scores, cls_id = y_encoded
result = []
for idx, k in enumerate(keep_k.reshape(-1)):
loc = boxes[idx].reshape(-1, 4)[:k]
cid = cls_id[idx].reshape(-1, 1)[:k]
conf = scores[idx].reshape(-1, 1)[:k]
result.append(np.concatenate((cid, conf, loc), axis=-1))
return result
def evaluate(arguments):
'''make evaluation.'''
# Set up status logging
if arguments.results_dir:
if not os.path.exists(arguments.results_dir):
os.makedirs(arguments.results_dir)
status_file = os.path.join(arguments.results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True,
verbosity=1,
append=True
)
)
s_logger = status_logging.get_status_logger()
s_logger.write(
status_level=status_logging.Status.STARTED,
message="Starting YOLOv3 evaluation."
)
config_path = arguments.experiment_spec
experiment_spec = load_experiment_spec(config_path)
val_labels_format = validation_labels_format(experiment_spec)
classes = sorted({str(x).lower() for x in
experiment_spec.dataset_config.target_class_mapping.values()})
ap_mode = experiment_spec.eval_config.average_precision_mode
matching_iou = experiment_spec.eval_config.matching_iou_threshold
matching_iou = matching_iou if matching_iou > 0 else 0.5
ap_mode_dict = {0: "sample", 1: "integrate"}
average_precision_mode = ap_mode_dict[ap_mode]
K.clear_session() # Clear previous models from memory.
evaluator = APEvaluator(len(classes),
conf_thres=experiment_spec.nms_config.confidence_threshold,
matching_iou_threshold=matching_iou,
average_precision_mode=average_precision_mode)
if os.path.splitext(arguments.model_path)[1] in ['.tlt', '.hdf5']:
K.set_learning_phase(0)
img_height = experiment_spec.augmentation_config.output_height
img_width = experiment_spec.augmentation_config.output_width
n_channels = experiment_spec.augmentation_config.output_channel
model = load_model(
arguments.model_path,
experiment_spec,
(n_channels, img_height, img_width),
key=arguments.key
)
# Load evaluation parameters
conf_th = experiment_spec.nms_config.confidence_threshold
iou_th = experiment_spec.nms_config.clustering_iou_threshold
top_k = experiment_spec.nms_config.top_k
nms_on_cpu = False
if val_labels_format == "tfrecords":
nms_on_cpu = True
# Build evaluation model
model = eval_builder.build(
model, conf_th, iou_th, top_k, nms_on_cpu=nms_on_cpu
)
model.summary()
inferencer = Inferencer(keras_model=model,
batch_size=experiment_spec.eval_config.batch_size,
infer_process_fn=keras_output_process_fn,
class_mapping=None,
threshold=experiment_spec.nms_config.confidence_threshold)
print("Using TLT model for inference, setting batch size to the one in eval_config:",
experiment_spec.eval_config.batch_size)
else:
# Works in python 3.6
cpu_cnt = os.cpu_count()
if cpu_cnt is None:
cpu_cnt = 1
session_config = tf.compat.v1.ConfigProto(
device_count={'GPU' : 0, 'CPU': cpu_cnt}
)
session = tf.Session(config=session_config)
# Pin TF to CPU to avoid TF & TRT CUDA context conflict
K.set_session(session)
inferencer = Inferencer(trt_engine_path=arguments.model_path,
infer_process_fn=trt_output_process_fn,
batch_size=experiment_spec.eval_config.batch_size,
class_mapping=None,
threshold=experiment_spec.nms_config.confidence_threshold)
print("Using TensorRT engine for inference, setting batch size to engine's one:",
inferencer.batch_size)
# Prepare labels
sess = K.get_session()
if val_labels_format == "tfrecords":
h_tensor = tf.constant(
experiment_spec.augmentation_config.output_height,
dtype=tf.int32
)
w_tensor = tf.constant(
experiment_spec.augmentation_config.output_width,
dtype=tf.int32
)
val_dataset = YOLOv3DataPipe(
experiment_spec,
label_encoder=None,
training=False,
h_tensor=h_tensor,
w_tensor=w_tensor,
sess=sess
)
num_samples = val_dataset.num_samples
num_steps = num_samples // experiment_spec.eval_config.batch_size
tr = trange(num_steps, file=sys.stdout)
sess.run(get_init_ops())
else:
eval_sequence = YOLOv3DataSequence(
experiment_spec.dataset_config,
experiment_spec.augmentation_config,
experiment_spec.eval_config.batch_size,
is_training=False,
encode_fn=None
)
enqueuer = OrderedEnqueuer(eval_sequence, use_multiprocessing=False)
enqueuer.start(workers=max(os.cpu_count() - 1, 1), max_queue_size=20)
output_generator = enqueuer.get()
tr = trange(len(eval_sequence), file=sys.stdout)
tr.set_description('Producing predictions')
gt_labels = []
pred_labels = []
# Loop over all batches.
for _ in tr:
# Generate batch.
if val_labels_format == "tfrecords":
batch_X, batch_labs = val_dataset.get_array()
else:
batch_X, batch_labs = next(output_generator)
y_pred = inferencer._predict_batch(batch_X)
gt_labels.extend(batch_labs)
conf_thres = experiment_spec.nms_config.confidence_threshold
for i in range(len(y_pred)):
y_pred_valid = y_pred[i][y_pred[i][:, 1] > conf_thres]
pred_labels.append(y_pred_valid)
results = evaluator(gt_labels, pred_labels, verbose=True)
mean_average_precision, average_precisions = results
print("*******************************")
for i in range(len(average_precisions)):
print("{:<14}{:<6}{}".format(classes[i], 'AP', round(average_precisions[i], 5)))
print("{:<14}{:<6}{}".format('', 'mAP', round(mean_average_precision, 5)))
print("*******************************")
if arguments.results_dir:
s_logger.kpi.update({'mAP': float(mean_average_precision)})
s_logger.write(
status_level=status_logging.Status.SUCCESS,
message="Evaluation finished successfully."
)
@check_tf_oom
def main(args=None):
"""Run the evaluation process."""
try:
args = parse_command_line(args)
evaluate(args)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Evaluation finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Evaluation was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/scripts/evaluate.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""TLT command line wrapper to invoke CLI scripts."""
import sys
from nvidia_tao_tf1.cv.common.entrypoint.entrypoint import launch_job
import nvidia_tao_tf1.cv.yolo_v3.scripts
def main():
"""Function to launch the job."""
launch_job(nvidia_tao_tf1.cv.yolo_v3.scripts, "yolo_v3", sys.argv[1:])
if __name__ == "__main__":
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/entrypoint/yolo_v3.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""TLT YOLOv3 entrypoint."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/entrypoint/__init__.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/experiment_specs/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class to export trained .tlt models to etlt file."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from keras import backend as K
from keras.models import Model
import numpy as np
import onnx
import onnx_graphsurgeon as onnx_gs
import tensorflow as tf
from nvidia_tao_tf1.core.export._onnx import keras_to_onnx
# Import quantization layer processing.
from nvidia_tao_tf1.core.export._quantized import (
check_for_quantized_layers,
process_quantized_layers,
)
from nvidia_tao_tf1.core.export._uff import _reload_model_for_inference
from nvidia_tao_tf1.cv.common.export.keras_exporter import KerasExporter as Exporter
from nvidia_tao_tf1.cv.yolo_v3.layers.export_layers import BoxLayer, ClsLayer
from nvidia_tao_tf1.cv.yolo_v3.layers.yolo_anchor_box_layer import YOLOAnchorBox
from nvidia_tao_tf1.cv.yolo_v3.utils.model_io import load_model
from nvidia_tao_tf1.cv.yolo_v3.utils.spec_loader import load_experiment_spec
CUSTOM_OBJ = {'YOLOAnchorBox': YOLOAnchorBox,
'BoxLayer': BoxLayer,
'ClsLayer': ClsLayer}
class YOLOv3Exporter(Exporter):
"""Exporter class to export a trained yolo model."""
def __init__(self, model_path=None,
key=None,
data_type="fp32",
strict_type=False,
experiment_spec_path="",
backend="onnx",
**kwargs):
"""Instantiate the yolo exporter to export a trained yolo .tlt model.
Args:
model_path(str): Path to the yolo model file.
key (str): Key to decode the model.
data_type (str): Backend data-type for the optimized TensorRT engine.
strict_type(bool): Apply TensorRT strict_type_constraints or not for INT8 mode.
experiment_spec_path (str): Path to yolo experiment spec file.
backend (str): Type of intermediate backend parser to be instantiated.
"""
super(YOLOv3Exporter, self).__init__(model_path=model_path,
key=key,
data_type=data_type,
strict_type=strict_type,
backend=backend,
**kwargs)
self.experiment_spec_path = experiment_spec_path
assert os.path.isfile(self.experiment_spec_path), \
"Experiment spec file not found at {}.".format(self.experiment_spec_path)
self.experiment_spec = None
def load_model(self, backend="onnx"):
"""Simple function to load the yolo Keras model."""
experiment_spec = load_experiment_spec(self.experiment_spec_path)
K.clear_session()
K.set_learning_phase(0)
img_channel = experiment_spec.augmentation_config.output_channel
img_height = experiment_spec.augmentation_config.output_height
img_width = experiment_spec.augmentation_config.output_width
model = load_model(
self.model_path,
experiment_spec,
(img_channel, img_height, img_width),
key=self.key
)
last_layer_out = model.layers[-1].output
r_boxes = BoxLayer(name="box")(last_layer_out)
r_cls = ClsLayer(name="cls")(last_layer_out)
model = Model(inputs=model.inputs, outputs=[r_boxes, r_cls])
if check_for_quantized_layers(model):
model, self.tensor_scale_dict = process_quantized_layers(
model, backend,
calib_cache=None,
calib_json=None)
# plugin nodes will have different names in TRT
nodes = list(self.tensor_scale_dict.keys())
for k in nodes:
if k.find('Input') != -1:
self.tensor_scale_dict['Input'] = self.tensor_scale_dict.pop(k)
# ZeroPadding is fused with its following conv2d/depthwiseconv2d, collapse them.
padding_nodes = []
for k in self.tensor_scale_dict:
if '/Pad' in k:
# this is a ZeroPadding node
padding_nodes.append(k)
for n in padding_nodes:
self.tensor_scale_dict.pop(n)
self.experiment_spec = experiment_spec
img_mean = experiment_spec.augmentation_config.image_mean
self.image_mean = [103.939, 116.779, 123.68] \
if experiment_spec.augmentation_config.output_channel == 3 else [117.3786]
if img_mean:
if experiment_spec.augmentation_config.output_channel == 3:
self.image_mean = [img_mean['b'], img_mean['g'], img_mean['r']]
else:
self.image_mean = [img_mean['l']]
# @zeyuz: must reload so tensorname won't have _1 suffix
model = _reload_model_for_inference(model, custom_objects=CUSTOM_OBJ)
return model
def save_exported_file(self, model, output_file_name):
"""Save the exported model file.
This routine converts a keras model to onnx/uff model
based on the backend the exporter was initialized with.
Args:
model (keras.model.Model): Decoded keras model to be exported.
output_file_name (str): Path to the output file.
Returns:
output_file_name (str): Path to the onnx file.
"""
if self.backend == "onnx":
keras_to_onnx(model,
output_file_name,
custom_objects=CUSTOM_OBJ,
target_opset=self.target_opset)
tf.reset_default_graph()
onnx_model = onnx.load(output_file_name)
onnx_model = self.node_process(onnx_model)
os.remove(output_file_name)
onnx.save(onnx_model, output_file_name)
return output_file_name
# @zeyuz: UFF export not supported in YOLOv3 due to NvBug200697725
raise NotImplementedError("Invalid backend provided. {}".format(self.backend))
def set_input_output_node_names(self):
"""Set input output node names."""
self.output_node_names = ["BatchedNMS"]
self.input_node_names = ["Input"]
def process_nms_node(self, onnx_graph):
"""Process the NMS ONNX node."""
spec = self.experiment_spec
box_data = self._get_onnx_node_by_name(onnx_graph, 'box/concat_concat').outputs[0]
cls_data = self._get_onnx_node_by_name(onnx_graph, 'cls/mul').outputs[0]
nms_out_0 = onnx_gs.Variable(
"BatchedNMS",
dtype=np.int32
)
nms_out_1 = onnx_gs.Variable(
"BatchedNMS_1",
dtype=np.float32
)
nms_out_2 = onnx_gs.Variable(
"BatchedNMS_2",
dtype=np.float32
)
nms_out_3 = onnx_gs.Variable(
"BatchedNMS_3",
dtype=np.float32
)
nms_attrs = dict()
nms_attrs["shareLocation"] = 1
nms_attrs["backgroundLabelId"] = -1
nms_attrs["scoreThreshold"] = spec.nms_config.confidence_threshold
nms_attrs["iouThreshold"] = spec.nms_config.clustering_iou_threshold
nms_attrs["topK"] = 2*spec.nms_config.top_k
nms_attrs["keepTopK"] = spec.nms_config.top_k
nms_attrs["numClasses"] = len(
{str(x) for x in spec.dataset_config.target_class_mapping.values()}
)
nms_attrs["clipBoxes"] = 1
nms_attrs["isNormalized"] = 1
nms_attrs["scoreBits"] = spec.nms_config.infer_nms_score_bits
nms_plugin = onnx_gs.Node(
op="BatchedNMSDynamic_TRT",
name="BatchedNMS_N",
inputs=[box_data, cls_data],
outputs=[nms_out_0, nms_out_1, nms_out_2, nms_out_3],
attrs=nms_attrs
)
onnx_graph.nodes.append(nms_plugin)
onnx_graph.outputs = nms_plugin.outputs
onnx_graph.cleanup().toposort()
def node_process(self, yolo_graph):
"""Manipulating the yolo dynamic graph to make it compatible with TRT.
Args:
yolo_graph (onnx_gs.DynamicGraph): Dynamic graph of the yolo model from the TF Proto
file.
Returns:
yolo_graph (onnx_gs.DynamicGraph): Post processed dynamic graph which is ready to be
serialized as a ONNX file.
"""
graph = onnx_gs.import_onnx(yolo_graph)
self.process_nms_node(graph)
self._fix_onnx_paddings(graph)
return onnx_gs.export_onnx(graph)
def get_class_labels(self):
"""Get list of class labels to serialize to a labels.txt file."""
classes = sorted({str(x) for x in
self.experiment_spec.dataset_config.target_class_mapping.values()})
return classes
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/export/yolov3_exporter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to export a trained YOLO model to an ETLT file for deployment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/export/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""YOLO v3 data loader."""
import tensorflow as tf
from nvidia_tao_tf1.blocks.multi_source_loader.data_loader import DataLoaderYOLOv3
from nvidia_tao_tf1.blocks.multi_source_loader.processors import (
TemporalBatcher,
)
from nvidia_tao_tf1.blocks.multi_source_loader.sources import (
TFRecordsDataSource,
)
from nvidia_tao_tf1.blocks.multi_source_loader.types import (
Bbox2DLabel,
Coordinates2D,
FEATURE_CAMERA,
filter_bbox_label_based_on_minimum_dims,
LabelledImages2DReference,
LabelledImages2DReferenceVal,
SequenceExample,
set_augmentations,
set_augmentations_val,
set_h_tensor,
set_h_tensor_val,
set_image_channels,
set_image_depth,
set_max_side,
set_min_side,
set_w_tensor,
set_w_tensor_val,
sparsify_dense_coordinates,
vector_and_counts_to_sparse_tensor,
)
import nvidia_tao_tf1.core as tao_core
from nvidia_tao_tf1.core import distribution
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.build_dataloader import _pattern_to_files
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.data_source_config import DataSourceConfig
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.default_dataloader import DefaultDataloader
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.default_dataloader import FRAME_ID_KEY
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.default_dataloader import HEIGHT_KEY
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.default_dataloader import UNKNOWN_CLASS
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.default_dataloader import WIDTH_KEY
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.utilities import (
extract_tfrecords_features,
get_absolute_data_path,
)
from nvidia_tao_tf1.cv.detectnet_v2.label_filter.base_label_filter import filter_labels
from nvidia_tao_tf1.cv.yolo_v3.data_loader.augmentation import (
apply_letterbox_resize,
inner_augmentations
)
Canvas2D = tao_core.types.Canvas2D
BW_POLY_COEFF1_60FC = 0.000545421498827636
FOLD_STRING = "fold-{:03d}-of-"
class YOLOv3TFRecordsParser(object):
"""Parse tf.train.Example protos into YOLO v3 Examples."""
def __init__(
self, tfrecord_path, image_dir, extension,
channels, depth, source_weight, h_tensor, w_tensor,
augmentations, target_class_mapping,
class_to_idx_mapping, training
):
"""Construct a parser for YOLO v3 labels.
Args:
tfrecord_path (list): List of paths to tfrecords file.
image_dir (str): Path to the directory where images are contained.
extension (str): Extension for images that get loaded (
".fp16", ".png", ".jpg" or ".jpeg").
channels (int): Number of channels in each image.
depth(int): Depth of image(8 or 16).
h_tensor(Tensor): Image height tensor.
w_tensor(Tensor): Image width tensor.
augmentations(List): List of augmentations.
Raises:
ValueError: If the number of input channels is not unsupported (i.e. must be equal to 3)
"""
if channels not in [1, 3]:
raise ValueError("YOLOv3TFRecordsParser: unsupported number of channels %d." %
channels)
self._image_file_extension = extension
# These will be set once all data sources have been instantiated and the common
# maximum image size is known.
self._output_height = None
self._output_width = None
self._output_min = None
self._output_max = None
self._num_input_channels = channels
set_image_channels(self._num_input_channels)
if depth == 16 and extension in ["JPG", "JPEG", "jpg", "jpeg"]:
raise ValueError(
f"Only PNG(png) images can support 16-bit depth, got {extension}"
)
self._image_depth = depth
set_image_depth(self._image_depth)
self.training = training
self._h_tensor = h_tensor
if self.training:
set_h_tensor(h_tensor)
else:
set_h_tensor_val(h_tensor)
self._w_tensor = w_tensor
if self.training:
set_w_tensor(w_tensor)
else:
set_w_tensor_val(w_tensor)
self._augmentations = augmentations
if self.training:
set_augmentations(augmentations)
else:
set_augmentations_val(augmentations)
self.target_class_mapping = target_class_mapping
self.class_to_idx_mapping = class_to_idx_mapping
self._image_dir = image_dir
if not self._image_dir.endswith('/'):
self._image_dir += '/'
self._tfrecord_path = tfrecord_path
# Delay the actual definition to call time.
self._parse_example = None
# Set the source_weight.
self.source_weight = source_weight
def _get_parse_example(self):
if self._parse_example is None:
extracted_features = extract_tfrecords_features(self._tfrecord_path[0])
self._parse_example = \
tao_core.processors.ParseExampleProto(features=extracted_features,
single=True)
return self._parse_example
def set_target_size(self, height, width, min_side=None, max_side=None):
"""Set size for target image.
Args:
height (int): Target image height.
width (int): Target image width.
min_side(int): Target minimal side of the image(either width or height)
max_side(int): The larger side of the image(the one other than min_side).
"""
self._output_height = height
self._output_width = width
self._output_min = min_side
self._output_max = max_side
set_min_side(min_side)
set_max_side(max_side)
def __call__(self, tfrecord):
"""Parse a tfrecord.
Args:
tfrecord (tensor): a serialized example proto.
Returns:
(Example) Example compatible with Processors.
"""
example = self._get_parse_example()(tfrecord)
example = DefaultDataloader.translate_bbox_to_polygon(example)
# Reshape to have rank 0.
height = tf.cast(tf.reshape(example[HEIGHT_KEY], []), dtype=tf.int32)
width = tf.cast(tf.reshape(example[WIDTH_KEY], []), dtype=tf.int32)
example[HEIGHT_KEY] = height
example[WIDTH_KEY] = width
# Reshape image_path to have rank 0 as expected by TensorFlow's ReadFile.
image_path = tf.strings.join([self._image_dir, example[FRAME_ID_KEY]])
image_path = tf.reshape(image_path, [])
extension = tf.convert_to_tensor(value=self._image_file_extension)
image_path = tf.strings.join([image_path, extension])
labels = self._extract_bbox_labels(example)
# @vpraveen: This is the point where the image datastructure is populated. The loading
# and decoding functions are defined as member variables in Images2DReference.
if self.training:
x = SequenceExample(
instances={
FEATURE_CAMERA: LabelledImages2DReference(
path=image_path,
extension=extension,
canvas_shape=Canvas2D(
height=tf.ones([self._output_height]),
width=tf.ones([self._output_width])),
input_height=height,
input_width=width,
labels=labels
),
},
labels=[]
)
else:
x = SequenceExample(
instances={
FEATURE_CAMERA: LabelledImages2DReferenceVal(
path=image_path,
extension=extension,
canvas_shape=Canvas2D(
height=tf.ones([self._output_height]),
width=tf.ones([self._output_width])),
input_height=height,
input_width=width,
labels=labels
),
},
labels=[]
)
return x
def _extract_depth(self, example):
"""Extract depth label.
Args:
example (dict): Maps from feature name (str) to tf.Tensor.
Returns:
depth (tf.Tensor): depth values with possible scale adjustments.
"""
depth = example['target/world_bbox_z']
# Use the ratio of the first order backward polynomial coefficients as the scaling factor.
# Default camera is 60 degree camera, and this is the first order bw-poly coeff of it.
if 'frame/bw_poly_coeff1' in example:
scale_factor = example['frame/bw_poly_coeff1'] / \
BW_POLY_COEFF1_60FC
else:
scale_factor = 1.0
depth *= scale_factor
return depth
def _extract_bbox_labels(self, example):
"""Extract relevant features from labels.
Args:
example (dict): Maps from feature name (str) to tf.Tensor.
Returns:
bbox_label (Bbox2DLabel): Named tuple containing all the feature in tf.SparseTensor
form.
"""
# Cast polygons to rectangles. For polygon support, use SQLite.
coord_x = example['target/coordinates/x']
coord_y = example['target/coordinates/y']
coord_idx = example['target/coordinates/index']
xmin = tf.math.segment_min(coord_x, coord_idx)
xmax = tf.math.segment_max(coord_x, coord_idx)
ymin = tf.math.segment_min(coord_y, coord_idx)
ymax = tf.math.segment_max(coord_y, coord_idx)
# Massage the above to get a [N, 2] tensor. N refers to the number of vertices, so 2
# per bounding box, and always in (x, y) order.
dense_coordinates = tf.reshape(
tf.stack([xmin, ymin, xmax, ymax], axis=1),
(-1, 2))
counts = tf.ones_like(example['target/object_class'], dtype=tf.int64)
# 2 vertices per bounding box (since we can infer the other 2 using just these).
vertex_counts_per_polygon = 2 * counts
sparse_coordinates = \
sparsify_dense_coordinates(dense_coordinates, vertex_counts_per_polygon)
# This will be used to instantiate the namedtuple Bbox2DLabel.
bbox_2d_label_kwargs = dict()
bbox_2d_label_kwargs['vertices'] = Coordinates2D(
coordinates=sparse_coordinates,
canvas_shape=Canvas2D(
height=tf.ones([self._output_height]),
width=tf.ones([self._output_width]))
)
bbox_2d_label_kwargs['frame_id'] = tf.reshape(example[FRAME_ID_KEY], [])
# Take care of all other possible target features.
for feature_name in Bbox2DLabel._fields:
if feature_name in {'vertices', 'frame_id'}:
continue
if 'target/' + feature_name in example:
if feature_name == 'world_bbox_z':
sparse_feature_tensor = vector_and_counts_to_sparse_tensor(
vector=self._extract_depth(example),
counts=counts)
else:
sparse_feature_tensor = vector_and_counts_to_sparse_tensor(
vector=example['target/' + feature_name],
counts=counts)
else:
# TODO(@williamz): Is there a better way to handle optional labels?
sparse_feature_tensor = []
bbox_2d_label_kwargs[feature_name] = sparse_feature_tensor
# Assign source_weight.
bbox_2d_label_kwargs['source_weight'] = [tf.constant(self.source_weight, tf.float32)]
bbox_label = Bbox2DLabel(**bbox_2d_label_kwargs)
# Filter out labels whose dimensions are too small. NOTE: this is mostly for historical
# reasons (the DefaultDataloader has such a mechanism by default), and due to the fact
# that labels are actually not enforced to have x2 > x1 and y2 > y1.
bbox_label = filter_bbox_label_based_on_minimum_dims(
bbox_2d_label=bbox_label, min_height=1.0, min_width=1.0)
return bbox_label
def _resize_coordinates(
self,
dense_coordinates,
height,
width,
target_height,
target_width
):
"""Resize coordinates to target size."""
scale_x = tf.cast(target_width / width, tf.float32)
scale_y = tf.cast(target_height / height, tf.float32)
scale_xy = tf.reshape(tf.stack([scale_x, scale_y]), (-1, 2))
return dense_coordinates * scale_xy
class YOLOv3TFRecordsDataSource(TFRecordsDataSource):
"""DataSource for reading examples from TFRecords files."""
def __init__(self, tfrecord_path, image_dir, extension,
height, width, channels, depth, subset_size,
preprocessing, sample_ratio=1.0,
source_weight=1.0, min_side=None,
max_side=None, h_tensor=None,
w_tensor=None, augmentations=None,
target_class_mapping=None,
class_to_idx_mapping=None,
training=True):
"""Construct a YOLOv3TFRecordsDataSource.
Args:
tfrecord_path (str): Path, or a list of paths to tfrecords file(s).
image_dir (str): Path to directory where images referenced by examples are stored.
extension (str): Extension of image files.
height (int): Output image height.
width (int): Output image width.
channels (int): Number of channels for images stored in this dataset.
depth(int): Image depth of bits per pixel per channel.
subset_size (int): Number of images from tfrecord_path to use.
preprocessing (Pipeline): Preprocessing processors specific to this dataset.
sample_ratio (float): probability at which a sample from this data source is picked
for inclusion in a batch.
source_weight (float): Value by which to weight the loss for samples
coming from this DataSource.
min_side(int): Minimal side of the image.
max_side(int): Maximal side of the image.
h_tensor(Tensor): Image height tensor.
w_tensor(Tensor): Image width tensor.
"""
super(YOLOv3TFRecordsDataSource, self).__init__(
tfrecord_path=tfrecord_path,
image_dir=image_dir,
extension=extension,
height=height,
width=width,
channels=channels,
subset_size=subset_size,
preprocessing=preprocessing,
sample_ratio=sample_ratio
)
self._parser = None
if self.tfrecord_path:
assert depth in [8, 16], (
f"Image depth can only support 8 or 16 bits, got {depth}"
)
self._parser = YOLOv3TFRecordsParser(
tfrecord_path=self.tfrecord_path,
image_dir=image_dir,
extension=extension,
channels=channels,
depth=depth,
source_weight=source_weight,
h_tensor=h_tensor,
w_tensor=w_tensor,
augmentations=augmentations,
target_class_mapping=target_class_mapping,
class_to_idx_mapping=class_to_idx_mapping,
training=training)
self.num_samples = sum([sum(1 for _ in tf.compat.v1.python_io.tf_record_iterator(filename))
for filename in self.tfrecord_path])
self.min_side = min_side
self.max_side = max_side
self.max_image_width, self.max_image_height = self._get_max_image_size()
# Set the target size for the parser.
self.set_target_size(height=self.max_image_height,
width=self.max_image_width,
min_side=self.min_side,
max_side=self.max_side)
@property
def parse_example(self):
"""Parser for labels in TFRecords used by YOLOv3."""
return lambda dataset: dataset.map(self._parser)
def set_target_size(self, height, width, min_side=None, max_side=None):
"""Set size for target image .
Args:
height (int): Target image height.
width (int): Target image width.
min_side(int): Minimal side of the image.
max_side(int): Maximal side of the image.
"""
if self._parser:
self._parser.set_target_size(
height=height,
width=width,
min_side=min_side,
max_side=max_side
)
def _get_max_image_size(self):
"""Scan for the maximum image size of this data source.
Returns:
(int) max_image_width, max_image_height.
"""
max_image_height = 0
max_image_width = 0
for path in self.tfrecord_path:
for record in tf.compat.v1.python_io.tf_record_iterator(path):
example = tf.train.Example()
example.ParseFromString(record)
height = int(str(example.features.feature[HEIGHT_KEY].int64_list.value[0]))
width = int(str(example.features.feature[WIDTH_KEY].int64_list.value[0]))
max_image_height = max(max_image_height, height)
max_image_width = max(max_image_width, width)
return max_image_width, max_image_height
class YOLOv3DataLoader:
"""YOLOv3DataLoader for object detection datasets such as KITTI and Cyclops.
Implements a data loader that reads labels and frame id from datasets and compiles
image and ground truth tensors used in training and validation.
"""
def __init__(self,
training_data_source_list,
image_file_encoding,
augmentation_config,
validation_data_source_list=None,
data_sequence_length_in_frames=None,
target_class_mapping=None,
h_tensor=None,
w_tensor=None,
training=False
):
"""Instantiate the dataloader.
Args:
training_data_source_list (list): List of DataSourceConfigs specifying training set.
image_file_encoding (str): How the images to be produced by the dataset are encoded.
Can be e.g. "jpg", "fp16", "png".
augmentation_config (dlav.drivenet.common.dataloader.augmentation_config.
AugmentationConfig): Holds the parameters for augmentation and preprocessing.
validation_data_source_list (list): List of DataSourceConfigs specifying validation
set. Can be None.
data_sequence_length_in_frames (int): Number of frames in each sequence. If not None,
the output images will be 5D tensors with additional temporal dimension.
target_class_mapping (dict): source to target class mapper from the ModelConfig proto.
h_tensor(Tensor): Image height tensor.
w_tensor(Tensor): Image width tensor.
"""
self.image_file_encoding = image_file_encoding
self.augmentation_config = augmentation_config
self.target_class_mapping = target_class_mapping
# Get training data sources.
self.training_data_sources = training_data_source_list
# Now, potentially, get the validation data sources.
self.validation_data_sources = validation_data_source_list
self._h_tensor = h_tensor
self._w_tensor = w_tensor
self._sequence_length_in_frames = data_sequence_length_in_frames
self._training = training
self._augmentations = self._build_yolov3_augmentations_pipeline(
augmentation_config
)
self.num_input_channels = augmentation_config.output_channel
self.image_depth = int(augmentation_config.output_depth) or 8
# Set up a look up table for class mapping.
self._target_class_lookup = None
if self.target_class_mapping is not None:
self._target_class_lookup = tao_core.processors.LookupTable(
keys=list(self.target_class_mapping.keys()),
values=list(self.target_class_mapping.values()),
default_value=tf.constant(UNKNOWN_CLASS)
)
self._class_idx_lookup = tao_core.processors.LookupTable(
keys=sorted(list(self.target_class_mapping.values())),
values=list(range(len(list(self.target_class_mapping.values())))),
default_value=-1
)
self.training_sources, self.num_training_samples =\
self._construct_data_sources(self.training_data_sources)
if validation_data_source_list is not None:
self.validation_sources, self.num_validation_samples =\
self._construct_data_sources(self.validation_data_sources)
else:
self.validation_sources = None
self.num_validation_samples = 0
def _construct_data_sources(self, data_source_list):
"""Instantiate data sources.
Args:
data_source_list (list): List of DataSourceConfigs.
Returns:
data_sources (list): A list of DataSource instances.
num_samples (int): Sum of the number of samples in the above data sources.
Raises:
ValueError: If an unknown dataset type was encountered.
"""
data_sources = []
for data_source_config in data_source_list:
if data_source_config.dataset_type == 'tfrecord':
data_source =\
YOLOv3TFRecordsDataSource(
tfrecord_path=data_source_config.dataset_files,
image_dir=data_source_config.images_path,
extension='.' + self.image_file_encoding,
height=0,
width=0,
channels=self.num_input_channels,
depth=self.image_depth,
subset_size=0, # TODO(jrasanen) use this.
sample_ratio=1.0, # TODO(jrasanen) use this.
preprocessing=[],
source_weight=data_source_config.source_weight,
min_side=0,
max_side=0,
h_tensor=self._h_tensor,
w_tensor=self._w_tensor,
augmentations=self._augmentations,
target_class_mapping=self._target_class_lookup,
class_to_idx_mapping=self._class_idx_lookup,
training=self._training
)
else:
raise ValueError("Unknown dataset type \'%s\'" % data_source_config.dataset_type)
data_sources.append(data_source)
# Scan through all data sources and compute the maximum image size. Needed so that we
# can pad all images to the same size for minibatching.
max_image_height = 0
max_image_width = 0
for data_source in data_sources:
max_image_height = max(data_source.max_image_height, max_image_height)
max_image_width = max(data_source.max_image_width, max_image_width)
self._max_image_height = max_image_height
self._max_image_width = max_image_width
num_samples = 0
for data_source in data_sources:
data_source.set_target_size(
height=self._max_image_height,
width=self._max_image_width,
min_side=0,
max_side=0
)
source_samples = len(data_source)
num_samples += source_samples
# This is to be consistent with the DefaultDataloader's concatenation behavior.
# Note that it doesn't functionally reproduce concatenating multiple sources into one,
# but statistically should lead to the samples being seen the same amount of times.
data_source.sample_ratio = source_samples
return data_sources, num_samples
def get_num_samples(self, training):
"""Get number of dataset samples.
Args:
training (bool): Get number of samples in the training (true) or
validation (false) set.
Returns:
Number of samples in the chosen set.
"""
if training:
return self.num_training_samples
return self.num_validation_samples
def get_dataset_tensors(self, batch_size, repeat=True):
"""Get input images and ground truth labels as tensors for training and validation.
Args:
batch_size (int): Minibatch size.
training (bool): Get samples from the training (True) or validation (False) set.
enable_augmentation (bool): Whether to augment input images and labels.
repeat (bool): Whether the dataset can be looped over multiple times or only once.
Returns:
images (Tensor of shape (batch, channels, height, width)): Input images with values
in the [0, 1] range.
labels (Bbox2DLabel): Contains labels corresponding to ``images``.
num_samples (int): Total number of samples found in the dataset.
"""
# TODO(jrasanen) Need to support repeat in dlav/common data loader? Looks like we
# currently have repeat=True everywhere, so could actually remove the arg.
assert repeat is True
data_sources = self.training_sources if self._training else self.validation_sources
if self._sequence_length_in_frames is not None:
preprocessors = [TemporalBatcher(size=self._sequence_length_in_frames)]
else:
preprocessors = []
if self._training:
num_gpus = distribution.get_distributor().size()
local_gpu = distribution.get_distributor().rank()
else:
# We want data to be unsharded during evaluation because currently only single-GPU
# evaluation is enabled.
num_gpus = 1
local_gpu = 0
data_loader = DataLoaderYOLOv3(
data_sources=data_sources,
augmentation_pipeline=[],
batch_size=batch_size * num_gpus,
shuffle=self._training,
preprocessing=preprocessors,
# This doesn't matter as we forced it to float32 in modulus
pipeline_dtype=tf.uint8
)
data_loader.set_shard(shard_count=num_gpus, shard_index=local_gpu)
# Instantiate the data loader pipeline.
sequence_example = data_loader()
images = sequence_example.instances[FEATURE_CAMERA].images
labels = sequence_example.instances[FEATURE_CAMERA].labels
if self._sequence_length_in_frames is None:
images = images[:, 0, ...]
if self.target_class_mapping is not None:
labels = self._map_to_model_target_classes(labels)
shapes = sequence_example.instances[FEATURE_CAMERA].shapes
shapes = tf.reshape(shapes, (-1, 2))
return images, labels, shapes, len(data_loader)
def _map_to_model_target_classes(self, labels):
"""Map object classes in the data source to the target classes in the dataset_config.
Args:
labels(BBox2DLabel): Input data label.
Returns:
filterred_labels (Bbox2DLabel): Output labels with mapped class names.
"""
source_classes = labels.object_class
mapped_classes = tf.SparseTensor(
values=self._target_class_lookup(source_classes.values),
indices=source_classes.indices,
dense_shape=source_classes.dense_shape)
mapped_labels = labels._replace(object_class=mapped_classes)
valid_indices = tf.not_equal(mapped_classes.values, UNKNOWN_CLASS)
return filter_labels(mapped_labels, valid_indices)
def _build_yolov3_augmentations_pipeline(self, yolov3_augmentation_config):
def _augmentations_list(image, labels, ratio, xmax):
return inner_augmentations(image, labels, ratio, xmax, yolov3_augmentation_config)
if self._training:
return _augmentations_list
return apply_letterbox_resize
def build_data_source_lists(dataset_proto):
"""Build training and validation data source lists from proto.
Args:
dataset_proto (nvidia_tao_tf1.cv.detectnet_v2.proto.dataset_config proto message)
Returns:
training_data_source_list (list): List of tuples (tfrecord_file_pattern,
image_directory_path) to use for training.
validation_data_source_list (list): List of tuples (tfrecord_file_pattern,
image_directory_path) to use for validation. Can be None.
validation_fold (int): Validation fold number (0-based). Indicates which fold from the
training data to use as validation. Can be None.
"""
# Determine how we are getting validation data sources.
if len(dataset_proto.validation_data_sources) > 0:
dataset_split_type = "validation_data_sources"
else:
dataset_split_type = "validation_fold"
training_data_source_list = []
validation_data_source_list = []
validation_fold = None
if dataset_proto.data_sources[0].WhichOneof("labels_format") == "tfrecords_path":
for data_source_proto in dataset_proto.data_sources:
source_weight = data_source_proto.source_weight
images_path = get_absolute_data_path(
str(data_source_proto.image_directory_path)
)
tfrecords_path = str(data_source_proto.tfrecords_path)
tfrecords_files = _pattern_to_files(tfrecords_path)
# Filter out files based on validation fold only if validation fold specified.
if dataset_split_type == "validation_fold":
# Defining the fold number for the glob pattern.
fold_identifier = FOLD_STRING.format(dataset_proto.validation_fold)
validation_fold = dataset_proto.validation_fold
# Take all .tfrecords files except the one matching the validation fold.
training_tfrecords_files = [filename for filename in tfrecords_files
if fold_identifier not in filename]
# Take only the file matching the validation fold.
validation_tfrecords_files = [filename for filename in tfrecords_files
if fold_identifier in filename]
validation_data_source_list.append(DataSourceConfig(
dataset_type='tfrecord',
dataset_files=validation_tfrecords_files,
images_path=images_path,
export_format=None,
split_db_path=None,
split_tags=None,
source_weight=source_weight))
else:
training_tfrecords_files = tfrecords_files
training_data_source_list.append(DataSourceConfig(
dataset_type='tfrecord',
dataset_files=training_tfrecords_files,
images_path=images_path,
export_format=None,
split_db_path=None,
split_tags=None,
source_weight=source_weight))
# Get validation data sources, if available.
if dataset_split_type == "validation_data_sources":
for data_source_proto in dataset_proto.validation_data_sources:
source_weight = data_source_proto.source_weight
images_path = get_absolute_data_path(
str(data_source_proto.image_directory_path)
)
tfrecords_path = str(data_source_proto.tfrecords_path)
tfrecords_files = _pattern_to_files(tfrecords_path)
validation_data_source_list.append(DataSourceConfig(
dataset_type='tfrecord',
dataset_files=tfrecords_files,
images_path=images_path,
export_format=None,
split_db_path=None,
split_tags=None,
source_weight=source_weight)
)
return training_data_source_list, validation_data_source_list, validation_fold
def build_dataloader(
dataset_proto,
augmentation_proto,
h_tensor,
w_tensor,
training
):
"""Build a Dataloader from a proto.
Args:
dataset_proto (nvidia_tao_tf1.cv.detectnet_v2.proto.dataset_config.DatasetConfig)
augmentation_proto (nvidia_tao_tf1.cv.detectnet_v2.proto.augmentation_config.
AugmentationConfig)
Returns:
dataloader (nvidia_tao_tf1.cv.detectnet_v2.dataloader.default_dataloader.DefaultDataloader).
"""
# Now, get the class mapping.
dataset_config = dataset_proto
source_to_target_class_mapping = dict(dataset_config.target_class_mapping)
# Image file encoding.
image_file_encoding = dataset_config.image_extension
# Get the data source lists.
training_data_source_list, validation_data_source_list, _ = \
build_data_source_lists(dataset_config)
if training:
validation_data_source_list = []
else:
training_data_source_list = []
dataloader_kwargs = dict(
training_data_source_list=training_data_source_list,
image_file_encoding=image_file_encoding,
augmentation_config=augmentation_proto,
validation_data_source_list=validation_data_source_list,
target_class_mapping=source_to_target_class_mapping,
h_tensor=h_tensor,
w_tensor=w_tensor,
training=training
)
return YOLOv3DataLoader(**dataloader_kwargs)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/data_loader/yolo_v3_data_loader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IVA YOLO v3 data loader builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import nvidia_tao_tf1.core as tao_core
from nvidia_tao_tf1.cv.ssd.utils.tensor_utils import get_non_empty_rows_2d_sparse
from nvidia_tao_tf1.cv.yolo_v3.data_loader.augmentation import (
outer_augmentations
)
from nvidia_tao_tf1.cv.yolo_v3.data_loader.yolo_v3_data_loader import build_dataloader
def unstack_images(images, shapes, bs):
"""unstack images into a list of images."""
images_list = []
for b_idx in range(bs):
image = images[b_idx, ...]
shape = shapes[b_idx, ...]
image = image[0:shape[0], 0:shape[1], ...]
if image.dtype != tf.float32:
image = tf.cast(image, tf.float32)
images_list.append(image)
return images_list
class YOLOv3DataPipe:
"""
Data loader class.
DataLoader can be used in two ways:
1. build groundtruth image and label TF tensors. Those two tensors can be
directly used for training.
2. build a generator that yields image and label numpy arrays. In this case,
a TF session needs to be passed into the class initializer.
"""
def __init__(self,
experiment_spec,
label_encoder=None,
training=True,
sess=None,
h_tensor=None,
w_tensor=None,
visualizer=None,
rank=0):
"""
Data loader init function.
Arguments:
experiment_spec: The loaded config pb2.
label_encoder (function, optional): If passed in, groundtruth label will be encoded.
training (bool): Return training set or validation set.
sess (TF Session): Required if generator() function needs to be called. Otherwise, just
pass None.
visualizer(object): The Visualizer object.
rank(int): Horovod rank.
"""
dataset_proto = experiment_spec.dataset_config
self._exclude_difficult = not dataset_proto.include_difficult_in_training
dataloader = build_dataloader(
dataset_proto=dataset_proto,
augmentation_proto=experiment_spec.augmentation_config,
h_tensor=h_tensor,
w_tensor=w_tensor,
training=training
)
self.dataloader = dataloader
if training:
batch_size = experiment_spec.training_config.batch_size_per_gpu
else:
batch_size = experiment_spec.eval_config.batch_size
self.batch_size = batch_size
self.images, self.ground_truth_labels, self.shapes, self.num_samples = \
dataloader.get_dataset_tensors(
batch_size
)
# original images for debugging
self._images = self.images
if self.num_samples == 0:
return
self.n_batches = (self.num_samples + self.batch_size - 1) // self.batch_size
cls_mapping_dict = experiment_spec.dataset_config.target_class_mapping
self.classes = sorted({str(x) for x in cls_mapping_dict.values()})
cls_map = tao_core.processors.LookupTable(
keys=self.classes,
values=list(range(len(self.classes))),
default_value=-1)
cls_map.build()
self.label_encoder = label_encoder
gt_labels = []
source_classes = self.ground_truth_labels.object_class
mapped_classes = tf.SparseTensor(
values=cls_map(source_classes.values),
indices=source_classes.indices,
dense_shape=source_classes.dense_shape
)
mapped_labels = self.ground_truth_labels._replace(object_class=mapped_classes)
valid_indices = tf.not_equal(mapped_classes.values, -1)
filtered_labels = mapped_labels.filter(valid_indices)
filtered_obj_ids = tf.sparse.reshape(filtered_labels.object_class, [batch_size, -1, 1])
filtered_coords = tf.sparse.reshape(filtered_labels.vertices.coordinates,
[batch_size, -1, 4])
filtered_occlusion = tf.sparse.reshape(
filtered_labels.occlusion,
[batch_size, -1, 1]
)
filtered_obj_ids = tf.sparse.SparseTensor(
values=tf.cast(tf.round(filtered_obj_ids.values), tf.float32),
indices=filtered_obj_ids.indices,
dense_shape=filtered_obj_ids.dense_shape
)
filtered_coords = tf.sparse.SparseTensor(
values=tf.cast(filtered_coords.values, tf.float32),
indices=filtered_coords.indices,
dense_shape=filtered_coords.dense_shape
)
filtered_occlusion = tf.sparse.SparseTensor(
values=tf.cast(filtered_occlusion.values, tf.float32),
indices=filtered_occlusion.indices,
dense_shape=filtered_occlusion.dense_shape,
)
labels_all = tf.sparse.concat(
axis=-1,
sp_inputs=[filtered_obj_ids, filtered_occlusion, filtered_coords]
)
labels_split = tf.sparse.split(sp_input=labels_all, num_split=batch_size, axis=0)
labels_split = [tf.sparse.reshape(x, [-1, 6]) for x in labels_split]
labels = [tf.sparse.to_dense(get_non_empty_rows_2d_sparse(x)) for x in labels_split]
for l_idx, l in enumerate(labels):
obj_id = tf.cast(l[:, 0], tf.float32)
is_difficult = tf.cast(l[:, 1], tf.float32)
x1 = l[:, 2] / tf.cast(self.shapes[l_idx, 1], tf.float32)
x2 = l[:, 4] / tf.cast(self.shapes[l_idx, 1], tf.float32)
y1 = l[:, 3] / tf.cast(self.shapes[l_idx, 0], tf.float32)
y2 = l[:, 5] / tf.cast(self.shapes[l_idx, 0], tf.float32)
# only select valid labels
select = tf.logical_and(
tf.not_equal(obj_id, -1),
tf.logical_and(tf.less(x1, x2), tf.less(y1, y2))
)
label = tf.stack([obj_id, is_difficult, x1, y1, x2, y2], axis=1)
label = tf.boolean_mask(label, select)
# exclude difficult boxes is forced to do so
if self._exclude_difficult:
label = tf.boolean_mask(
label,
tf.math.logical_not(tf.cast(label[:, 1], tf.bool))
)
gt_labels.append(label)
self.gt_labels = gt_labels
self.frame_ids = self.ground_truth_labels.frame_id
# images
images_list = unstack_images(
self.images,
self.shapes,
self.batch_size
)
ratio = tf.cast(w_tensor, tf.float32) / tf.cast(h_tensor, tf.float32)
augmented_images = []
augmented_labels = []
for idx, img in enumerate(images_list):
if training:
aug_img, aug_label = outer_augmentations(
img,
self.gt_labels[idx][:, 2:6],
ratio,
experiment_spec.augmentation_config
)
aug_img = tf.image.resize_images(aug_img, [h_tensor, w_tensor])
else:
aug_img, aug_label = img, self.gt_labels[idx][:, 2:6]
aug_img.set_shape([None, None, 3])
aug_img = tf.transpose(aug_img, (2, 0, 1))
aug_label = tf.concat([self.gt_labels[idx][:, 0:2], aug_label], axis=-1)
# filter out bad boxes after augmentation
aug_x1 = aug_label[:, 2]
aug_x2 = aug_label[:, 4]
aug_y1 = aug_label[:, 3]
aug_y2 = aug_label[:, 5]
# only select valid labels
select = tf.logical_and(
aug_x2 - aug_x1 > 1e-3,
aug_y2 - aug_y1 > 1e-3
)
aug_label = tf.boolean_mask(aug_label, select)
augmented_images.append(aug_img)
augmented_labels.append(aug_label)
self.images = tf.stack(augmented_images, axis=0)
num_channels = experiment_spec.augmentation_config.output_channel
# See conversion: https://pillow.readthedocs.io/en/3.2.x/reference/Image.html
bgr_ = tf.reshape(
tf.constant([0.1140, 0.5870, 0.2990], dtype=tf.float32),
(1, 3, 1, 1)
)
# Vis the augmented images in TensorBoard, only rank 0
if rank == 0 and visualizer is not None:
if visualizer.enabled:
if num_channels == 3:
aug_images = self.images
else:
# Project RGB to grayscale
aug_images = tf.reduce_sum(
self.images * bgr_,
axis=1,
keepdims=True
)
aug_images = tf.transpose(aug_images, (0, 2, 3, 1))
_max_box_num = tf.shape(augmented_labels[0])[0]
for _aug_label in augmented_labels[1:]:
_max_box_num = tf.reduce_max(
tf.stack([_max_box_num, tf.shape(_aug_label)[0]], axis=0)
)
_aug_label_list = []
for _aug_label in augmented_labels:
_num_pad = _max_box_num - tf.shape(_aug_label)[0]
_aug_label_list.append(tf.pad(_aug_label, [(0, _num_pad), (0, 0)]))
_aug_label_concat = tf.stack(_aug_label_list, axis=0)[:, :, 2:]
# (xmin, ymin, xmax, ymax) to (ymin, xmin, ymax, xmax)
_aug_label_concat = tf.gather(_aug_label_concat, [1, 0, 3, 2], axis=2)
aug_images = tf.image.draw_bounding_boxes(
aug_images,
_aug_label_concat
)
aug_images = tf.cast(aug_images, tf.uint8)
visualizer.image(
"augmented_images",
aug_images,
data_format="channels_last"
)
self._images = self.images
self.gt_labels = augmented_labels
img_mean = experiment_spec.augmentation_config.image_mean
bb, gg, rr = 103.939, 116.779, 123.68
if img_mean:
if num_channels == 3:
bb, gg, rr = img_mean['b'], img_mean['g'], img_mean['r']
else:
bb, gg, rr = img_mean['l'], img_mean['l'], img_mean['l']
perm = tf.constant([2, 1, 0])
self.images = tf.gather(self.images, perm, axis=1)
self.images -= tf.constant([[[[bb]], [[gg]], [[rr]]]])
if num_channels == 1:
self.images = tf.reduce_sum(self.images * bgr_, axis=1, keepdims=True)
self.encoded_labels = self.gt_labels
if self.label_encoder is not None:
self.encoded_labels = self.label_encoder(self.gt_labels)
self.sess = sess
def set_encoder(self, label_encoder):
"""Set a new label encoder for output labels."""
self.encoded_labels = label_encoder(self.gt_labels)
def generator(self):
"""Yields img and label numpy arrays."""
if self.sess is None:
raise ValueError('TF session can not be found. Pass a session to the initializer!')
while True:
img, enc_label, label = self.sess.run(
[self.images, self.encoded_labels, self.gt_labels]
)
yield img, enc_label, label
def get_array(self):
'''get the array for a batch.'''
return self.sess.run([self.images, self.gt_labels])
def get_array_and_frame_ids(self):
'''get the array and frame IDs for a batch.'''
return self.sess.run([self.frame_ids, self.images, self.gt_labels])
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/data_loader/data_loader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""YOLO v3 data augmentations."""
import cv2
import numpy as np
import tensorflow as tf
def aug_hsv_api(img, h=0.1, s=1.5, v=1.5):
"""Apply HSV augmentation using tf.image.
Args:
img: HWC RGB image
h (float): Change hue at most h * 180
s, v (float): change sv at most s, v, 1/s, 1/v times
Returns:
aug_img: HWC RGB img after augmentation
"""
img = tf.image.random_hue(img, h/2)
img = tf.image.random_saturation(img, 1.0/s, s)
img = tf.image.random_brightness(img, v)
return img
def aug_hsv(img, h=0.1, s=1.5, v=1.5, depth=8):
"""Apply HSV augmentation.
Args:
img: HWC RGB image
h (float): Change hue at most h * 180
s, v (float): change sv at most s, v, 1/s, 1/v times
depth(int): Number of bits per pixel per channel of the image
Returns:
aug_img: HWC RGB img after augmentation
"""
def rand_inv(x):
return tf.cond(
tf.random.uniform([]) < 0.5,
true_fn=lambda: x,
false_fn=lambda: 1.0 / x
)
max_limit = (2 ** depth - 1)
sv_mul = tf.random.uniform([2]) * tf.constant([s - 1.0, v - 1.0]) + 1.0
sv_mul = tf.reshape(tf.map_fn(rand_inv, sv_mul), (1, 1, 2))
hsv = tf.image.rgb_to_hsv(img / max_limit) * tf.constant([180., max_limit, max_limit])
hsv = tf.concat(
[
hsv[..., 0:1] + (tf.random.uniform([]) * 2. - 1.) * h * 180.,
hsv[..., 1:] * sv_mul
],
axis=-1
)
hsv = tf.cast(tf.math.round(hsv), tf.int32)
hsv = tf.concat(
[tf.math.floormod(hsv[..., 0:1], 180),
tf.clip_by_value(hsv[..., 1:], 0, max_limit)],
axis=-1
)
hsv = tf.cast(hsv, tf.float32)
return tf.image.hsv_to_rgb(hsv * tf.constant([1/180., 1/max_limit, 1/max_limit])) * max_limit
def random_hflip(image, prob, seed):
"""Random horizontal flip.
Args:
image(Tensor): The input image in (H, W, C).
prob(float): The probability for horizontal flip.
seed(int): The random seed.
Returns:
out_images(Tensor): The output image.
flipped(boolean Tensor): A boolean scalar tensor to indicate whether flip is
applied or not. This can be used to manipulate the labels accordingly.
"""
val = tf.random.uniform([], maxval=1.0, seed=seed)
is_flipped = tf.less_equal(val, prob)
# flip and to CHW
flipped_image = tf.image.flip_left_right(image)
out_images = tf.cond(
is_flipped,
true_fn=lambda: flipped_image,
false_fn=lambda: image
)
return out_images, is_flipped
def hflip_bboxes(boxes, flipped, xmax=1.0):
"""Flip the bboxes horizontally.
Args:
boxes(Tensor): (N, 4) shaped bboxes in [x1, y1, x2, y2] normalized coordinates.
Returns:
out_boxes(Tensor): horizontally flipped boxes.
"""
# x1 becomes new x2, while x2 becomes new x1
# (N,)
x1_new = xmax - boxes[:, 2]
x2_new = xmax - boxes[:, 0]
# (N, 4)
flipped_boxes = tf.stack([x1_new, boxes[:, 1], x2_new, boxes[:, 3]], axis=1)
out_boxes = tf.cond(
flipped,
true_fn=lambda: flipped_boxes,
false_fn=lambda: boxes
)
return out_boxes
def aug_hflip(img, gt_labels, prob=0.5, xmax=1.0):
"""random horizontal flip of image and bboxes."""
img, flipped = random_hflip(img, prob, 42)
# x1, y1, x2, y2
flipped_boxes = hflip_bboxes(gt_labels, flipped, xmax=xmax)
return img, flipped_boxes
def _aug_flip_np(img, boxes, ftype=1):
"""Apply flip.
Args:
img: RGB image in numpy array
boxes: (N, 4) numpy arrays (xmin, ymin, xmax, ymax) containing bboxes. {x,y}{min,max} is
in [0, 1] range.
ftype (0 or 1): 0: vertical flip. 1: horizontal flip
Returns:
aug_img: img after flip
aug_boxes: boxes after flip
"""
if ftype == 0:
ymin = 1.0 - boxes[:, 3]
ymax = 1.0 - boxes[:, 1]
xmin = boxes[:, 0]
xmax = boxes[:, 2]
elif ftype == 1:
ymin = boxes[:, 1]
ymax = boxes[:, 3]
xmin = 1.0 - boxes[:, 2]
xmax = 1.0 - boxes[:, 0]
else:
raise ValueError("Use ftype 0 for vertical flip and 1 for horizontal flip.")
return cv2.flip(img, ftype), np.stack([xmin, ymin, xmax, ymax], axis=-1)
def aug_flip_np(img, boxes):
"""aug flip np."""
if np.random.rand() < 0.5:
img = np.clip(img, 0., 255.).astype(np.uint8)
img, boxes = _aug_flip_np(img, boxes)
img = img.astype(np.float32)
return img, boxes
def _update_dx_wide(w, h, ratio, dl, dr, dt, db):
# first try to decrease new_width
ar_w = h * ratio
dw = w - ar_w
# narrow from two sides
l_shift = -tf.minimum(dl, 0.)
r_shift = -tf.minimum(dr, 0.)
lr_shift = tf.minimum(tf.minimum(l_shift, r_shift), dw / 2.0)
dl += lr_shift
dr += lr_shift
dw -= 2. * lr_shift
l_shift = tf.cond(
tf.logical_and(dl < 0., 0. < dw),
true_fn=lambda: tf.minimum(dw, -dl),
false_fn=lambda: tf.constant(0.)
)
dl += l_shift
dw -= l_shift
r_shift = tf.cond(
tf.logical_and(dr < 0., 0. < dw),
true_fn=lambda: tf.minimum(dw, -dr),
false_fn=lambda: tf.constant(0.)
)
dr += r_shift
dw -= r_shift
# if doesn't work, increase new_height
dh = tf.cond(
dw > 0.,
true_fn=lambda: dw / ratio,
false_fn=lambda: tf.constant(0., dtype=tf.float32)
)
dt = tf.cond(
dw > 0.,
true_fn=lambda: dt - dh / 2.,
false_fn=lambda: dt
)
db = tf.cond(
dw > 0.,
true_fn=lambda: db - dh / 2.,
false_fn=lambda: db
)
return dl, dr, dt, db
def _update_dx_tall(w, h, ratio, dl, dr, dt, db):
# first try to decrease new_height
ar_h = w / ratio
dh = h - ar_h
# narrow from two sides
t_shift = -tf.minimum(dt, 0.)
b_shift = -tf.minimum(db, 0.)
tb_shift = tf.minimum(tf.minimum(t_shift, b_shift), dh / 2.0)
dt += tb_shift
db += tb_shift
dh -= 2 * tb_shift
t_shift = tf.cond(
tf.logical_and(dt < 0., 0. < dh),
true_fn=lambda: tf.minimum(dh, -dt),
false_fn=lambda: tf.constant(0.)
)
dt += t_shift
dh -= t_shift
b_shift = tf.cond(
tf.logical_and(db < 0., 0. < dh),
true_fn=lambda: tf.minimum(db, -dt),
false_fn=lambda: tf.constant(0.)
)
db += b_shift
dh -= b_shift
# If doesn't work, increase new_width
dw = tf.cond(
dh > 0.,
true_fn=lambda: dh * ratio,
false_fn=lambda: tf.constant(0., dtype=tf.float32)
)
dl = tf.cond(
dh > 0.,
true_fn=lambda: dl - dw / 2.,
false_fn=lambda: dl
)
dr = tf.cond(
dh > 0.,
true_fn=lambda: dr - dw / 2.,
false_fn=lambda: dr
)
return dl, dr, dt, db
def _update_dx_combined(w, h, ratio, dl, dr, dt, db):
dl, dr, dt, db = tf.cond(
w / h > ratio,
true_fn=lambda: _update_dx_wide(w, h, ratio, dl, dr, dt, db),
false_fn=lambda: _update_dx_tall(w, h, ratio, dl, dr, dt, db)
)
return dl, dr, dt, db
def aug_jitter_single_image(img, boxes, jitter=0.3, resize_ar=None):
"""Apply YOLO style jitter.
See https://stackoverflow.com/questions/55038726
Args:
img: HWC RGB image, 0-255
boxes: (N, 4) numpy arrays (xmin, ymin, xmax, ymax) containing bboxes. {x,y}{min,max} is
in [0, 1] range.
jitter (0, 1): jitter value
resize_ar (float): network input width / height. Jitter will try to mimic this
Returns:
aug_img: img after jitter
aug_boxes: boxes after jitter
"""
# -jitter ~ jitter rand
jt = tf.minimum((tf.random.uniform([4]) - 0.5) * 2 * jitter, 0.8)
dl, dt, dr, db = tf.unstack(jt, axis=0)
# make sure the result image is not too small
cond1 = dl + dr > 0.8
dr = tf.cond(
cond1,
true_fn=lambda: tf.minimum(dr, 0.4),
false_fn=lambda: dr
)
dl = tf.cond(
cond1,
true_fn=lambda: tf.minimum(dl, 0.4),
false_fn=lambda: dl
)
cond2 = dt + db > 0.8
dt = tf.cond(
cond2,
true_fn=lambda: tf.minimum(dt, 0.4),
false_fn=lambda: dt
)
db = tf.cond(
cond2,
true_fn=lambda: tf.minimum(db, 0.4),
false_fn=lambda: db
)
h = tf.cast(tf.shape(img)[0], tf.float32)
w = tf.cast(tf.shape(img)[1], tf.float32)
dl *= w
dr *= w
dt *= h
db *= h
new_width = w - dl - dr
new_height = h - dt - db
dl, dr, dt, db = _update_dx_combined(
w, h, resize_ar, dl, dr, dt, db
)
new_width = w - dl - dr
new_height = h - dt - db
# new image left top corner [dl, dt], height / width [new_height, new_width]
# old image left top corner [0, 0], height/width [h, w]
dl = tf.cast(tf.math.round(dl), tf.int32)
dt = tf.cast(tf.math.round(dt), tf.int32)
new_height = tf.cast(tf.math.round(new_height), tf.int32)
new_width = tf.cast(tf.math.round(new_width), tf.int32)
joint_l_on_img = tf.maximum(dl, 0)
joint_t_on_img = tf.maximum(dt, 0)
joint_r_on_img = tf.minimum(new_width + dl, tf.cast(w, tf.int32))
joint_b_on_img = tf.minimum(new_height + dt, tf.cast(h, tf.int32))
h_idx = tf.range(joint_t_on_img - dt, joint_b_on_img - dt, delta=1)
w_idx = tf.range(joint_l_on_img - dl, joint_r_on_img - dl, delta=1)
h_idx, w_idx = tf.meshgrid(h_idx, w_idx)
h_idx = tf.reshape(tf.transpose(h_idx), (-1,))
w_idx = tf.reshape(tf.transpose(w_idx), (-1,))
# (k, 2)
indices = tf.stack([h_idx, w_idx], axis=1)
# (k, 3)
updates = tf.reshape(
img[joint_t_on_img:joint_b_on_img, joint_l_on_img:joint_r_on_img, :],
(-1, 3)
)
# (H, W, 3)
shape = tf.stack([new_height, new_width, 3], axis=0)
new_img = tf.scatter_nd(
indices,
updates,
shape
)
# replace all other pixels with mean pixels
mean_img = tf.reduce_mean(img, axis=(0, 1), keepdims=True)
new_img += mean_img
# (k, 3)
neg_mean = -1 * tf.broadcast_to(
tf.reshape(mean_img, (1, 3)),
tf.stack([tf.shape(indices)[0], 3])
)
new_img_delta = tf.scatter_nd(
indices,
neg_mean,
tf.shape(new_img)
)
new_img = new_img + new_img_delta
xmin = (boxes[:, 0] * w - tf.cast(dl, tf.float32)) / tf.cast(new_width, tf.float32)
xmax = (boxes[:, 2] * w - tf.cast(dl, tf.float32)) / tf.cast(new_width, tf.float32)
ymin = (boxes[:, 1] * h - tf.cast(dt, tf.float32)) / tf.cast(new_height, tf.float32)
ymax = (boxes[:, 3] * h - tf.cast(dt, tf.float32)) / tf.cast(new_height, tf.float32)
augmented_boxes = tf.stack([xmin, ymin, xmax, ymax], axis=-1)
augmented_boxes = tf.clip_by_value(augmented_boxes, 0., 1.)
return new_img, augmented_boxes
def _aug_jitter(img, boxes, resize_ar, jitter):
"""Apply YOLO style jitter.
See https://stackoverflow.com/questions/55038726
Args:
img: RGB image in numpy array
boxes: (N, 4) numpy arrays (xmin, ymin, xmax, ymax) containing bboxes. {x,y}{min,max} is
in [0, 1] range.
jitter (0, 1): jitter value
resize_ar (float): network input width / height. Jitter will try to mimic this
Returns:
aug_img: img after jitter
aug_boxes: boxes after jitter
"""
# -jitter ~ jitter rand
img = np.array(img)
boxes = np.array(boxes)
dl, dt, dr, db = np.minimum((np.random.rand(4) - 0.5) * 2 * jitter, 0.8)
# make sure the result image is not too small
if dl + dr > 0.8:
dr = min(dr, 0.4)
dl = min(dl, 0.4)
if dt + db > 0.8:
dt = min(dt, 0.4)
db = min(db, 0.4)
h, w, _ = img.shape
dl *= w
dr *= w
dt *= h
db *= h
new_width = w - dl - dr
new_height = h - dt - db
if resize_ar is not None:
if w / float(h) > resize_ar:
# first try to decrease new_width
ar_w = h * resize_ar
dw = w - ar_w
# narrow from two sides
l_shift = -min(dl, 0)
r_shift = -min(dr, 0)
lr_shift = min(l_shift, r_shift, dw / 2.0)
dl += lr_shift
dr += lr_shift
dw -= 2 * lr_shift
if dl < 0 < dw:
l_shift = min(dw, -dl)
dl += l_shift
dw -= l_shift
if dr < 0 < dw:
r_shift = min(dw, -dr)
dr += r_shift
dw -= r_shift
# if doesn't work, increase new_height
if dw > 0:
dh = dw / resize_ar
dt -= dh / 2.0
db -= dh / 2.0
else:
# first try to decrease new_height
ar_h = w / resize_ar
dh = h - ar_h
# narrow from two sides
t_shift = -min(dt, 0)
b_shift = -min(db, 0)
tb_shift = min(t_shift, b_shift, dh / 2.0)
dt += tb_shift
db += tb_shift
dh -= 2 * tb_shift
if dt < 0 < dh:
t_shift = min(dh, -dt)
dt += t_shift
dh -= t_shift
if db < 0 < dh:
b_shift = min(db, -dt)
db += b_shift
dh -= b_shift
# If doesn't work, increase new_width
if dh > 0:
dw = dh * resize_ar
dl -= dw / 2.0
dr -= dw / 2.0
new_width = w - dl - dr
new_height = h - dt - db
# new image left top corner [dl, dt], height / width [new_height, new_width]
# old image left top corner [0, 0], height/width [h, w]
dl = np.round(dl).astype(np.int32)
dt = np.round(dt).astype(np.int32)
new_height = np.round(new_height).astype(np.int32)
new_width = np.round(new_width).astype(np.int32)
joint_l_on_img = np.maximum(dl, 0)
joint_t_on_img = np.maximum(dt, 0)
joint_r_on_img = np.minimum(new_width + dl, w)
joint_b_on_img = np.minimum(new_height + dt, h)
new_img = np.zeros((new_height, new_width, 3), dtype=np.float)
new_img += np.mean(img, axis=(0, 1), keepdims=True)
new_img[joint_t_on_img - dt:joint_b_on_img - dt,
joint_l_on_img - dl:joint_r_on_img - dl, :] = \
img[joint_t_on_img:joint_b_on_img, joint_l_on_img:joint_r_on_img, :].astype(np.float)
xmin = (boxes[:, 0] * w - dl) / new_width
xmax = (boxes[:, 2] * w - dl) / new_width
ymin = (boxes[:, 1] * h - dt) / new_height
ymax = (boxes[:, 3] * h - dt) / new_height
return new_img, np.stack([xmin, ymin, xmax, ymax], axis=-1)
def aug_jitter(img, boxes, resize_ar):
"""aug jitter in numpy."""
img, boxes = _aug_jitter(img, boxes, resize_ar, 0.3)
boxes = np.clip(boxes, 0., 1.)
return img, boxes
def aug_letterbox_resize(img, boxes, resize_shape):
"""Apply letter box. resize image to resize_shape, not changing aspect ratio.
Args:
img: RGB image in numpy array
boxes: (N, 4) numpy arrays (xmin, ymin, xmax, ymax) containing bboxes. {x,y}{min,max} is
in [0, 1] range.
resize_shape (int, int): (w, h) of new image
Returns:
aug_img: img after resize
aug_boxes: boxes after resize
"""
resize_shape_f = tf.cast(resize_shape, tf.float32)
new_img = tf.zeros((resize_shape[1], resize_shape[0], 3), dtype=tf.float32)
mean_img = tf.reduce_mean(img, axis=(0, 1), keepdims=True)
new_img += mean_img
h = tf.cast(tf.shape(img)[0], tf.float32)
w = tf.cast(tf.shape(img)[1], tf.float32)
ratio = tf.reduce_min([resize_shape_f[1] / h, resize_shape_f[0] / w])
new_h = tf.cast(tf.math.round(ratio * h), tf.int32)
new_w = tf.cast(tf.math.round(ratio * w), tf.int32)
l_shift = (resize_shape[0] - new_w) // 2
t_shift = (resize_shape[1] - new_h) // 2
img = tf.image.resize_images(img, [new_h, new_w])
# copy-paste img to new_img
h_idx = tf.range(t_shift, t_shift+new_h, delta=1)
w_idx = tf.range(l_shift, l_shift+new_w, delta=1)
h_idx, w_idx = tf.meshgrid(h_idx, w_idx)
h_idx = tf.reshape(tf.transpose(h_idx), (-1,))
w_idx = tf.reshape(tf.transpose(w_idx), (-1,))
# (k, 2)
indices = tf.stack([h_idx, w_idx], axis=1)
# (k, 3)
updates = tf.reshape(img, (-1, 3))
new_img_scattered = tf.scatter_nd(
indices,
updates,
tf.shape(new_img)
)
new_img += new_img_scattered
neg_mean = -1 * tf.broadcast_to(
tf.reshape(mean_img, (1, 3)),
tf.stack([tf.shape(indices)[0], 3])
)
new_img_delta = tf.scatter_nd(
indices,
neg_mean,
tf.shape(new_img)
)
new_img += new_img_delta
new_w_f = tf.cast(new_w, tf.float32)
new_h_f = tf.cast(new_h, tf.float32)
l_shift_f = tf.cast(l_shift, tf.float32)
t_shift_f = tf.cast(t_shift, tf.float32)
xmin = (boxes[:, 0] * new_w_f + l_shift_f) / resize_shape_f[0]
xmax = (boxes[:, 2] * new_w_f + l_shift_f) / resize_shape_f[0]
ymin = (boxes[:, 1] * new_h_f + t_shift_f) / resize_shape_f[1]
ymax = (boxes[:, 3] * new_h_f + t_shift_f) / resize_shape_f[1]
return new_img, tf.stack([xmin, ymin, xmax, ymax], axis=-1)
def apply_letterbox_resize(image, gt_labels, target_shape):
"""apply letterbox resize."""
return aug_letterbox_resize(image, gt_labels, target_shape)
def inner_augmentations(image, gt_labels, ratio, xmax, augmentation_config):
"""yolo v3 augmentations inside tf.data.
Args:
image: NCHW RGB images.
gt_labels(list): list of groundtruth labels for each image: (#gt, 6).
augmentation_config: YOLO v3 augmentation config.
Returns:
augmented images and gt_labels.
"""
# augmentation pipelines, applied on HWC images
image_depth = int(augmentation_config.output_depth) or 8
if image_depth == 8:
image = aug_hsv_api(
image,
augmentation_config.hue,
augmentation_config.saturation,
augmentation_config.exposure,
)
else:
image = aug_hsv(
image,
augmentation_config.hue,
augmentation_config.saturation,
augmentation_config.exposure,
image_depth
)
image, gt_labels = aug_hflip(
image,
gt_labels,
prob=augmentation_config.horizontal_flip,
xmax=xmax
)
return image, gt_labels
def outer_augmentations(image, gt_labels, ratio, augmentation_config):
"""yolo v3 augmentations outside of tf.data.
Args:
image: NCHW RGB images.
gt_labels(list): list of groundtruth labels for each image: (#gt, 6).
augmentation_config: YOLO v3 augmentation config.
Returns:
augmented images and gt_labels.
"""
# augmentation pipelines, applied on HWC images
image, gt_labels = aug_jitter_single_image(
image,
gt_labels,
jitter=augmentation_config.jitter,
resize_ar=ratio
)
return image, gt_labels
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/data_loader/augmentation.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/data_loader/__init__.py |
|
"""Generate image shape tensors for multi-scale training."""
import numpy as np
import tensorflow as tf
def global_var_with_init(init_value):
"""global variable with initialization."""
with tf.variable_scope("global_step", reuse=tf.AUTO_REUSE):
v = tf.get_variable(
"global_step_var",
trainable=False,
dtype=tf.int32,
initializer=init_value
)
return v
def gen_random_shape_tensors(
T,
h_min,
h_max,
w_min,
w_max
):
"""Generate random tensors for multi-scale training."""
# make sure it is the output shape is a multiple of 32 to
# align feature map shape for Upsample2D and Concatenate
divider = 32
h_min = h_min / divider
h_max = h_max / divider
w_min = w_min / divider
w_max = w_max / divider
# random size: uniform distribution in [size_min, size_max]
rand_h = tf.cast(
h_min + tf.random.uniform([]) * (h_max - h_min),
tf.int32
)
rand_w = tf.cast(
w_min + tf.random.uniform([]) * (w_max - w_min),
tf.int32
)
# moving sum to repeat the size T times
h_buffer = tf.Variable(
np.zeros((T,), dtype=np.int32),
trainable=False,
dtype=tf.int32
)
w_buffer = tf.Variable(
np.zeros((T,), dtype=np.int32),
trainable=False,
dtype=tf.int32
)
# global step
global_step = global_var_with_init(-1)
assign_gstep = tf.assign(global_step, global_step + 1)
with tf.control_dependencies([assign_gstep]):
# upsampled random size
rand_h = tf.cond(
tf.equal(tf.math.floormod(global_step, T), 0),
true_fn=lambda: rand_h,
false_fn=lambda: tf.zeros([], dtype=tf.int32)
)
rand_w = tf.cond(
tf.equal(tf.math.floormod(global_step, T), 0),
true_fn=lambda: rand_w,
false_fn=lambda: tf.zeros([], dtype=tf.int32)
)
h_buffer_updated = tf.concat(
[h_buffer[1:], [rand_h]],
axis=-1
)
w_buffer_updated = tf.concat(
[w_buffer[1:], [rand_w]],
axis=-1
)
assign_h_buffer = tf.assign(h_buffer, h_buffer_updated)
assign_w_buffer = tf.assign(w_buffer, w_buffer_updated)
with tf.control_dependencies([assign_h_buffer, assign_w_buffer]):
repeated_rand_hsize = tf.reduce_sum(h_buffer, axis=-1)
repeated_rand_wsize = tf.reduce_sum(w_buffer, axis=-1)
rh = repeated_rand_hsize * divider
rw = repeated_rand_wsize * divider
return tf.reshape(rh, []), tf.reshape(rw, [])
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/data_loader/generate_shape_tensors.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/metric/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unified eval and mAP callback."""
import sys
from keras import backend as K
import numpy as np
from tqdm import trange
from nvidia_tao_tf1.cv.common.callbacks.base_metric_callback import BaseMetricCallback
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
class YOLOv3MetricCallback(BaseMetricCallback):
'''
Callback function to calculate model mAP / validation loss per k epoch.
Args:
ap_evaluator: object of class APEvaluator.
built_eval_model: eval model built with additional layers for encoded output AND bbox
output (model requires two outputs!!!)
eval_sequence: Eval data sequence (based on keras sequence) that gives images, labels.
labels is list (batch_size) of tuples (encoded_label, raw_label)
loss_ops: three element tuple or list. [gt_placeholder, pred_placeholder, loss]
eval_model: the training graph part of built_eval_model. Note, this model must share
TF nodes with built_eval_model
metric_interval: calculate model mAP per k epoch
verbose: True if you want print ap message.
'''
def __init__(
self,
ap_evaluator,
built_eval_model,
generator,
classes,
n_batches,
loss_ops,
*args,
**kwargs
):
"""Init function."""
super().__init__(*args, **kwargs)
self.ap_evaluator = ap_evaluator
self.built_eval_model = built_eval_model
self.generator = generator
self.classes = classes
self.n_batches = n_batches
self.loss_ops = loss_ops
def _skip_metric(self, logs):
for i in self.classes:
logs['AP_' + i] = np.nan
logs['mAP'] = np.nan
logs['validation_loss'] = np.nan
def _calc_metric(self, logs):
total_loss = 0.0
gt_labels = []
pred_labels = []
if self.verbose:
tr = trange(self.n_batches, file=sys.stdout)
tr.set_description('Producing predictions')
else:
tr = range(self.n_batches)
# Loop over all batches.
for _ in tr:
# Generate batch.
batch_X, encoded_lab, gt_lab = next(self.generator)
# Predict.
y_pred_encoded, y_pred = self.built_eval_model.predict(batch_X)
batch_loss = K.get_session().run(self.loss_ops[2],
feed_dict={self.loss_ops[0]: np.array(encoded_lab),
self.loss_ops[1]: y_pred_encoded})
total_loss += np.sum(batch_loss) * len(gt_lab)
gt_labels.extend(gt_lab)
for i in range(len(y_pred)):
y_pred_valid = y_pred[i][y_pred[i][:, 1] > self.ap_evaluator.conf_thres]
pred_labels.append(y_pred_valid)
logs['validation_loss'] = total_loss / len(gt_labels)
m_ap, ap = self.ap_evaluator(gt_labels, pred_labels, verbose=self.verbose)
if self.verbose:
print("*******************************")
for i in range(len(ap)):
logs['AP_' + self.classes[i]] = ap[i]
if self.verbose:
print("{:<14}{:<6}{}".format(self.classes[i], 'AP', round(ap[i], 5)))
if self.verbose:
print("{:<14}{:<6}{}".format('', 'mAP', round(m_ap, 5)))
print("*******************************")
print("Validation loss:", logs['validation_loss'])
logs['mAP'] = m_ap
graphical_data = {
"validation loss": round(logs['validation_loss'], 8),
"mean average precision": round(logs['mAP'], 5)
}
s_logger = status_logging.get_status_logger()
if isinstance(s_logger, status_logging.StatusLogger):
s_logger.graphical = graphical_data
s_logger.write(
status_level=status_logging.Status.RUNNING,
message="Evaluation metrics generated."
)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/metric/yolov3_metric_callback.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple script to test routines written in utils.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import keras
from keras import backend as K
import numpy as np
import pytest
import tensorflow as tf
from nvidia_tao_tf1.core.templates.googlenet import GoogLeNet
from nvidia_tao_tf1.core.templates.resnet import ResNet
from nvidia_tao_tf1.core.templates.vgg import VggNet
from nvidia_tao_tf1.cv.common.utils import decode_to_keras, encode_from_keras
class TestUtils(object):
"""Class to test utils.py."""
def _set_tf_session(self):
# Restricting the number of GPU's to be used.
gpu_id = str(0)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = gpu_id
K.set_session(tf.Session(config=config))
def _get_googlenet(self, nlayers, input_shape, all_projections=False,
use_batch_norm=False, use_pooling=False, data_format='channels_first'):
inputs = keras.layers.Input(shape=input_shape)
return GoogLeNet(inputs=inputs,
use_batch_norm=use_batch_norm,
data_format=data_format)
def _get_resnet(self, nlayers, input_shape, all_projections=True,
use_batch_norm=False, use_pooling=False, data_format='channels_first'):
inputs = keras.layers.Input(shape=input_shape)
return ResNet(nlayers, inputs, all_projections=all_projections,
use_batch_norm=use_batch_norm,
use_pooling=use_pooling,
data_format=data_format)
def _get_vgg(self, nlayers, input_shape, all_projections=False,
use_batch_norm=False, use_pooling=False, data_format='channels_first'):
inputs = keras.layers.Input(shape=input_shape)
return VggNet(nlayers, inputs,
use_batch_norm=use_batch_norm,
use_pooling=use_pooling,
data_format=data_format)
def _create_model(self, model_template, nlayers=None, use_pooling=False, use_batch_norm=False,
input_shape=(3, 224, 224), data_format="channels_first",
instantiation_mode='detector'):
self._set_tf_session()
# Constructing a dictionary for feature extractor templates.
self.model_choose = {"ResNet": self._get_resnet,
"VggNet": self._get_vgg,
"GoogLeNet": self._get_googlenet}
# Choosing the feature extractor template.
if model_template in self.model_choose.keys():
keras_model = self.model_choose[model_template](nlayers, input_shape,
use_batch_norm=use_batch_norm,
use_pooling=use_pooling,
data_format=data_format)
else:
raise NotImplementedError('Unsupported model template: {}'.format(model_template))
# Hooking the model to the respective outputs.
x = keras_model.outputs[0]
# Appending the outputs based on the instantiation mode.
if instantiation_mode == "classifier":
x = keras.layers.Flatten()(x)
x = keras.layers.Dense(10, activation='softmax',
name='output')(x)
outputs = [x]
elif instantiation_mode == 'detector':
x1 = keras.layers.Conv2D(filters=1,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
name='conv2d_cov')(x)
x2 = keras.layers.Conv2D(filters=4,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
name='conv2d_bbox')(x)
outputs = [x1, x2]
else:
raise NotImplementedError("Unknown instantiation mode: {}".format(instantiation_mode))
# Generate final keras model.
keras_model = keras.models.Model(inputs=keras_model.inputs, outputs=outputs)
return keras_model
@pytest.mark.skipif(os.getenv("RUN_ON_CI", "0") == "1", reason="Cannot be run on CI")
@pytest.mark.parametrize("model, nlayers, data_format, use_batch_norm, input_shape,"
"use_pooling, instantiation_mode, enc_key",
[
("ResNet", 10, 'channels_first', False, (3, 128, 64), True,
'classifier', 'th0@!#$(@*#$)is'),
("ResNet", 18, 'channels_first', True, (3, 128, 64), False,
'detector', '0@!#$(@*#$)'),
("VggNet", 16, 'channels_last', False, (3, 128, 64), False,
'classifier', '0@!#$(@*#$)')
])
def test_encode_decode(self, model, nlayers, data_format, use_batch_norm,
input_shape, use_pooling, instantiation_mode, enc_key):
"""Simple function to test encode and decode wrappers.
Args:
model (str): Name of the model template.
nlayers (int): Number of layers for Resnet-xx and Vgg-xx models. This parameter
may be set to None for fixed templates.
data_format (str): Setting keras backend data format to 'channels_first' and
'channels_last'.
use_batch_norm (bool): Flag to enable or disable batch norm.
input_shape (tuple): Shape of the keras model input.
use_pooling (bool): Flag to enable or disable pooling.
instantiation_mode (str): The mode to define the models. Two types of instantiations
are allowed.
1. classification: Here the output is a single dense layer of nclasses = 10
2. detection: Here there are two outputs.
a. output_cov: number of filters = 1
b. output_bbox: number of filters = 4
enc_key (str): Key for encrpytion and decryption.
"""
enc_key = str.encode(enc_key)
os_handle, output_file_name = tempfile.mkstemp()
os.close(os_handle)
# Defining the keras model for testing.
keras_model = self._create_model(model, nlayers,
use_pooling=use_pooling,
input_shape=input_shape,
use_batch_norm=use_batch_norm,
data_format=data_format,
instantiation_mode=instantiation_mode)
# Encrypting the model.
encode_from_keras(keras_model, output_file_name, enc_key)
# Decrypting the above encrypted model.
decoded_model = decode_to_keras(output_file_name, enc_key)
# Extracting the data format parameter to detect input shape.
data_format = decoded_model.layers[1].data_format
# Computing shape of input tensor.
image_shape = decoded_model.layers[0].input_shape[1:4]
# Create an input image.
test_image = np.random.randn(image_shape[0], image_shape[1], image_shape[2])
test_image.shape = (1, ) + test_image.shape
# Infer on both original and decrypted model.
decoded_model_output = decoded_model.predict(test_image, batch_size=1)
original_model_output = keras_model.predict(test_image, batch_size=1)
# Check for equality of inferences.
assert len(decoded_model_output) == len(original_model_output)
for output in range(len(decoded_model_output)):
assert np.array_equal(
decoded_model_output[output],
original_model_output[output]
)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/test_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MagNet pruning wrapper for classification/detection models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from datetime import datetime as dt
import logging
import os
# we have to import keras here although we do not use it at all
# to avoid the circular import error in the two patches below.
# circular import issue: keras -> third_party.keras.mixed_precision -> keras
# TODO(@zhimengf): Ideally, we have to patch the keras patches in the keras __init__.py
# instead of calling third_party.keras.mixed_precision in the iva code base,
# as it is the way in dazel.
import keras # noqa pylint: disable=F401, W0611
from nvidia_tao_tf1.core.pruning.pruning import prune
from nvidia_tao_tf1.core.utils.path_utils import expand_path
import nvidia_tao_tf1.cv.common.no_warning # noqa pylint: disable=W0611
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.common.utils import (
encode_from_keras,
get_model_file_size,
get_num_params,
model_io,
restore_eff
)
from nvidia_tao_tf1.cv.yolo_v4.layers.split import Split
logger = logging.getLogger(__name__)
def build_command_line_parser(parser=None):
"""Build a command line parser for pruning."""
if parser is None:
parser = argparse.ArgumentParser(description="TLT pruning script")
parser.add_argument("-m",
"--model",
type=str,
help="Path to the target model for pruning",
required=True,
default=None)
parser.add_argument("-o",
"--output_file",
type=str,
help="Output file path for pruned model",
required=True,
default=None)
parser.add_argument("--results_dir",
type=str,
default=None,
help="Path to where the status log is generated.")
parser.add_argument('-k',
'--key',
required=False,
type=str,
default="",
help='Key to load a .tlt model')
parser.add_argument('-n',
'--normalizer',
type=str,
default='max',
help="`max` to normalize by dividing each norm by the \
maximum norm within a layer; `L2` to normalize by \
dividing by the L2 norm of the vector comprising all \
kernel norms. (default: `max`)")
parser.add_argument('-eq',
'--equalization_criterion',
type=str,
default='union',
help="Criteria to equalize the stats of inputs to an \
element wise op layer. Options are \
[arithmetic_mean, geometric_mean, union, \
intersection]. (default: `union`)")
parser.add_argument("-pg",
"--pruning_granularity",
type=int,
help="Pruning granularity: number of filters to remove \
at a time. (default:8)",
default=8)
parser.add_argument("-pth",
"--pruning_threshold",
type=float,
help="Threshold to compare normalized norm against \
(default:0.1)", default=0.1)
parser.add_argument("-nf",
"--min_num_filters",
type=int,
help="Minimum number of filters to keep per layer. \
(default:16)", default=16)
parser.add_argument("-el",
"--excluded_layers", action='store',
type=str, nargs='*',
help="List of excluded_layers. Examples: -i item1 \
item2", default=[])
parser.add_argument("-v",
"--verbose",
action='store_true',
help="Include this flag in command line invocation for \
verbose logs.")
return parser
def parse_command_line_arguments(args=None):
"""Parse command line arguments for pruning."""
parser = build_command_line_parser()
return parser.parse_args(args)
def run_pruning(args=None):
"""Prune an encrypted Keras model."""
# Set up logger verbosity.
verbosity = 'INFO'
if args.verbose:
verbosity = 'DEBUG'
# Configure the logger.
logging.basicConfig(
format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level=verbosity
)
results_dir = args.results_dir
if results_dir is not None:
if not os.path.exists(expand_path(results_dir)):
os.makedirs(expand_path(results_dir))
timestamp = int(dt.timestamp(dt.now()))
filename = "status.json"
if results_dir == "/workspace/logs":
filename = f"status_prune_{timestamp}.json"
status_file = os.path.join(expand_path(results_dir), filename)
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True
)
)
assert args.equalization_criterion in \
['arithmetic_mean', 'geometric_mean', 'union', 'intersection'], \
"Equalization criterion are [arithmetic_mean, geometric_mean, union, \
intersection]."
assert args.normalizer in ['L2', 'max'], \
"normalizer options are [L2, max]."
custom_objs = {}
# Decrypt and load the pretrained model
final_model = model_io(
expand_path(args.model),
args.key,
custom_objects=custom_objs,
compile=False
)
if verbosity == 'DEBUG':
# Printing out the loaded model summary
logger.debug("Model summary of the unpruned model:")
logger.debug(final_model.summary())
# Excluded layers for FRCNN
force_excluded_layers = ['rpn_out_class',
'rpn_out_regress',
'dense_class_td',
'dense_regress_td']
# Excluded layers for SSD
force_excluded_layers += ['ssd_conf_0', 'ssd_conf_1', 'ssd_conf_2',
'ssd_conf_3', 'ssd_conf_4', 'ssd_conf_5',
'ssd_loc_0', 'ssd_loc_1', 'ssd_loc_2',
'ssd_loc_3', 'ssd_loc_4', 'ssd_loc_5',
'ssd_predictions']
# Exckuded layers for YOLOv3 / v4
force_excluded_layers += ['conv_big_object', 'conv_mid_object',
'conv_sm_object']
# Excluded layers for RetinaNet
force_excluded_layers += ['retinanet_predictions',
'retinanet_loc_regressor',
'retinanet_conf_regressor']
# For CSPDarkNetTiny backbone
# Cannot prune input layers of Split layer
for layer in final_model.layers:
if type(layer) == Split:
basename = layer.name[:-8]
name = basename + "_conv_0"
force_excluded_layers.append(name)
force_excluded_layers += final_model.output_names
# Pruning trained model
pruned_model = prune(
model=final_model,
method='min_weight',
normalizer=args.normalizer,
criterion='L2',
granularity=args.pruning_granularity,
min_num_filters=args.min_num_filters,
threshold=args.pruning_threshold,
equalization_criterion=args.equalization_criterion,
excluded_layers=args.excluded_layers + force_excluded_layers)
if verbosity == 'DEBUG':
# Printing out pruned model summary
logger.debug("Model summary of the pruned model:")
logger.debug(pruned_model.summary())
pruning_ratio = pruned_model.count_params() / final_model.count_params()
logger.info(
"Pruning ratio (pruned model / original model): {}".format(
pruning_ratio
)
)
# Save the pruned model.
output_file = args.output_file
if not output_file.endswith(".hdf5"):
output_file = f"{output_file}.hdf5"
encode_from_keras(
pruned_model,
output_file,
args.key,
custom_objects=custom_objs
)
s_logger = status_logging.get_status_logger()
s_logger.kpi = {
"pruning_ratio": pruning_ratio,
"size": get_model_file_size(args.output_file),
"param_count": get_num_params(pruned_model)
}
s_logger.write(
message="Pruning ratio (pruned model / original model): {}".format(
pruning_ratio
)
)
def main(args=None):
"""Wrapper function for pruning."""
# parse command line
args = parse_command_line_arguments(args)
run_pruning(args)
if __name__ == "__main__":
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/magnet_prune.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IVA Common module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IVA common utils used across all apps."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from functools import lru_cache
import importlib
import io
import logging
import math
from math import exp, log
import os
import sys
import tempfile
from eff.core import Archive
import keras
from keras import backend as K
from keras.optimizers import Adam, RMSprop, SGD
from keras.regularizers import l1, l2
from keras.utils.generic_utils import CustomObjectScope
import numpy as np
from PIL import Image, ImageDraw
import tensorflow as tf
from nvidia_tao_tf1.core.utils import set_random_seed
from nvidia_tao_tf1.core.utils.path_utils import expand_path
from nvidia_tao_tf1.core.templates.utils import mish, swish
from nvidia_tao_tf1.cv.faster_rcnn.layers.custom_layers import (
CropAndResize, NmsInputs,
OutputParser, Proposal,
ProposalTarget, TFReshape
)
from nvidia_tao_tf1.cv.retinanet.initializers.prior_prob import PriorProbability
from nvidia_tao_tf1.cv.retinanet.layers.anchor_box_layer import RetinaAnchorBoxes
from nvidia_tao_tf1.cv.ssd.layers.anchor_box_layer import AnchorBoxes
from nvidia_tao_tf1.cv.yolo_v3.layers.yolo_anchor_box_layer import YOLOAnchorBox
from nvidia_tao_tf1.cv.yolo_v4.layers.bbox_postprocessing_layer import BBoxPostProcessingLayer
from nvidia_tao_tf1.cv.yolo_v4.layers.split import Split
from nvidia_tao_tf1.encoding import encoding
ENCRYPTION_OFF = False
reg_dict = {0: None, 1: l1, 2: l2}
ap_mode_dict = {0: "sample", 1: "integrate"}
CUSTOM_OBJS = {'CropAndResize': CropAndResize,
"NmsInputs": NmsInputs,
'OutputParser': OutputParser,
'Proposal': Proposal,
'ProposalTarget': ProposalTarget,
'TFReshape': TFReshape,
'PriorProbability': PriorProbability,
'RetinaAnchorBoxes': RetinaAnchorBoxes,
'AnchorBoxes': AnchorBoxes,
'YOLOAnchorBox': YOLOAnchorBox,
'BBoxPostProcessingLayer': BBoxPostProcessingLayer,
'swish': swish,
'mish': mish,
# loss is not needed if loaded from utils.
# But the loss output must have gradient in TF1.15
'compute_loss': lambda x, y: K.max(x) - K.max(y),
'Split': Split}
# Define 1MB for filesize calculation.
MB = 1 << 20
@lru_cache()
def hvd_keras():
"""Lazily load and return the (cached) horovod module."""
import horovod.keras as hvd
return hvd
def raise_deprecation_warning(task, subtask, args):
"""Raise a deprecation warning based on the module.
Args:
task (str): The TLT task to be deprecated.
subtask (str): The subtask supported by that task.
args (list): List of arguments to be appended.
Raises:
DeprecationWarning: With the actual command to be run.
"""
if not isinstance(args, list):
raise TypeError("There should a list of arguments.")
args_string = " ".join(args)
new_command = "{} {} {}".format(
task, subtask, args_string
)
raise DeprecationWarning(
"This command has been deprecated in this version of TLT. "
"Please run \n{}".format(new_command)
)
def parse_arguments(cl_args, supported_tasks=None):
"""Parse command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('module',
default='classification',
choices=supported_tasks)
args, unknown_args = parser.parse_known_args(cl_args)
args = vars(args)
return args, unknown_args
def initialize(random_seed, hvd, training_precision='float32'):
"""Initialization.
Args:
random_seed: Random_seed in experiment spec.
training_precision: (TrainingPrecision or None) Proto object with FP16/FP32 parameters or
None. None leaves K.floatx() in its previous setting.
"""
setup_keras_backend(training_precision, is_training=True)
# Set Maglev random seed. Take care to give different seed to each process.
seed = random_seed + hvd.rank()
set_random_seed(seed)
def get_num_params(model):
"""Get the number of parameters in a model.
Args:
model(keras.model.Model): Model object to run count params.
Returns:
num_params(int): Number of parameters in a model. Represented
in units per million.
"""
return model.count_params()/1e6
def get_model_file_size(model_path):
"""Get the size of the model.
Args:
model_path (str): UNIX path to the model.
Returns:
file_size (float): File size in MB.
"""
if not os.path.exists(expand_path(model_path)):
raise FileNotFoundError(f"Model file wasn't found at {model_path}")
file_size = os.path.getsize(model_path) / MB
return file_size
def setup_keras_backend(training_precision, is_training):
"""Setup Keras-specific backend settings for training or inference.
Args:
training_precision: (TrainingPrecision or None) Proto object with FP16/FP32 parameters or
None. None leaves K.floatx() in its previous setting.
is_training: (bool) If enabled, Keras is set in training mode.
"""
# Learning phase of '1' indicates training mode -- important for operations
# that behave differently at training/test times (e.g. batch normalization)
if is_training:
K.set_learning_phase(1)
else:
K.set_learning_phase(0)
# Set training precision, if given. Otherwise leave K.floatx() in its previous setting.
# K.floatx() determines how Keras creates weights and casts them (Keras default: 'float32').
if training_precision is not None:
if training_precision == 'float32':
K.set_floatx('float32')
elif training_precision == 'float16':
K.set_floatx('float16')
else:
raise RuntimeError('Invalid training precision selected')
def summary_from_value(tag, value, scope=None):
"""Generate a manual simple summary object with a tag and a value."""
summary = tf.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value
if scope:
summary_value.tag = '{}/{}'.format(scope, tag)
else:
summary_value.tag = tag
return summary
def summary_from_image(
summary,
tag,
value,
box,
scope=None,
img_means=(103.939, 116.779, 123.68),
channels_first=True,
reverse_channels=True,
idx=0
):
"""Summary from single image."""
summary_value = summary.value.add()
summary_value.image.height = value.shape[0]
summary_value.image.width = value.shape[1]
# de-preprocessing to get the INT8 image
img_means = np.array(img_means)
lambda_gray = np.array([0.1140, 0.5870, 0.2990])
if channels_first:
n_channels = value.shape[0]
summary_value.image.colorspace = n_channels
if n_channels == 3:
img_means = img_means.reshape(3, 1, 1)
value = value + img_means
value = value.transpose(1, 2, 0)
if reverse_channels:
value = value[..., [2, 1, 0]]
else:
delta = np.dot(img_means.reshape(1, 3), lambda_gray.reshape(3, 1))
value = value + delta
value = value.transpose(1, 2, 0)
else:
n_channels = value.shape[-1]
summary_value.image.colorspace = n_channels
if n_channels == 3:
img_means = img_means.reshape(1, 1, 3)
value = value + img_means
if reverse_channels:
value = value[..., [2, 1, 0]]
else:
delta = np.dot(img_means.reshape(1, 3), lambda_gray.reshape(3, 1))
value = value + delta
value = value.astype(np.uint8)
image = Image.fromarray(np.squeeze(value))
draw = ImageDraw.Draw(image)
h, w = value.shape[:2]
box = box * np.array([w, h, w, h])
box = box.astype(np.int32)
for b in box:
draw.rectangle(
((b[0], b[1]), (b[2], b[3])),
outline="Black"
)
img_byte_arr = io.BytesIO()
image.save(img_byte_arr, format='PNG')
img_byte_arr = img_byte_arr.getvalue()
summary_value.image.encoded_image_string = img_byte_arr
if scope:
summary_value.tag = '{}/{}/image/{}'.format(scope, tag, idx)
else:
summary_value.tag = '{}/image/{}'.format(tag, idx)
def summary_from_images(
tag,
value,
boxes,
scope=None,
img_means=(103.939, 116.779, 123.68),
channels_first=True,
reverse_channels=True,
max_num=3
):
"""Generate a manual image summary object with a tag and a value."""
summary = tf.Summary()
for idx, img in enumerate(value):
if idx < max_num:
summary_from_image(
summary,
tag,
img,
boxes[idx],
scope,
img_means,
channels_first,
reverse_channels,
idx
)
return summary
def tensorboard_images(
tag,
value,
boxes,
writer,
step,
scope=None,
img_means=(103.939, 116.779, 123.68),
channels_first=True,
reverse_channels=True,
max_num=3
):
"""Vis images in TensorBoard."""
summary = summary_from_images(
tag,
value,
boxes,
scope,
img_means,
channels_first,
reverse_channels,
max_num
)
writer.add_summary(summary, step)
writer.flush()
def encode_from_keras(keras_model, output_filename, enc_key, only_weights=False,
custom_objects=None):
"""A simple function to encode a keras model into magnet export format.
Args:
keras_model (keras.models.Model object): The input keras model to be encoded.
output_file_name (str): The name of the encoded output file.
enc_key (bytes): Byte text to encode the model.
custom_objects(dict): Custom objects for serialization and deserialization.
Returns:
None
"""
# TODO(madil): Ensure switched off for production.
custom_objs = dict()
custom_objs.update(CUSTOM_OBJS)
if custom_objects is not None:
custom_objs.update(custom_objects)
if output_filename.endswith(".hdf5"):
with CustomObjectScope(custom_objs):
if only_weights:
keras_model.save_weights(output_filename)
else:
keras_model.save(output_filename)
return
# Make sure that input model is a keras model object.
if not isinstance(keras_model, keras.models.Model):
raise TypeError("The model should be a keras.models.Model object")
os_handle, temp_file_name = tempfile.mkstemp()
os.close(os_handle)
# Create a temporary model file for the keras model.
with CustomObjectScope(custom_objs):
if only_weights:
keras_model.save_weights(temp_file_name)
else:
keras_model.save(temp_file_name)
# Encode the keras model file.
with open(expand_path(output_filename), 'wb') as outfile, open(temp_file_name, 'rb') as infile:
encoding.encode(infile, outfile, enc_key)
infile.closed
outfile.closed
# Remove the temporary keras file.
os.remove(temp_file_name)
def get_decoded_filename(input_file_name, enc_key, custom_objects=None):
"""Extract keras model file and get model dtype.
Args:
input_file_name (str): Path to input model file.
enc_key (bytes): Byte text to decode model.
custom_objects(dict): Custom objects for serialization and deserialization.
Returns:
model_dtype: Return the decoded model filename.
"""
if input_file_name.endswith(".hdf5"):
return input_file_name
custom_objs = dict()
custom_objs.update(CUSTOM_OBJS)
if custom_objects is not None:
custom_objs.update(custom_objects)
if ENCRYPTION_OFF:
return input_file_name
# Check if input file exists.
if not os.path.isfile(input_file_name):
raise ValueError("Cannot find input file name.")
os_handle, temp_file_name = tempfile.mkstemp()
os.close(os_handle)
with open(temp_file_name, 'wb') as temp_file, open(input_file_name, 'rb') as encoded_file:
encoding.decode(encoded_file, temp_file, enc_key)
encoded_file.closed
temp_file.closed
# Check if the model is valid hdf5
try:
with CustomObjectScope(custom_objs):
keras.models.load_model(temp_file_name, compile=False)
except (OSError, IOError):
sys.exit("Invalid decryption. {}. The key used to load the model "
"is incorrect.".format(sys.exc_info()[1]))
except ValueError:
raise ValueError("Invalid decryption. {}. The key used to load the model "
"is incorrect.".format(sys.exc_info()[1]))
return temp_file_name
def decode_to_keras(input_file_name, enc_key,
input_model=None, compile_model=True, by_name=True,
custom_objects=None):
"""A simple function to decode an encrypted file to a keras model.
Args:
input_file_name (str): Path to encoded input file.
enc_key (bytes): Byte text to decode the model.
custom_objects(dict): Custom objects for serialization and deserialization.
Returns:
decrypted_model (keras.models.Model): Returns a decrypted keras model.
"""
custom_objs = dict()
custom_objs.update(CUSTOM_OBJS)
if custom_objects is not None:
custom_objs.update(custom_objects)
if input_file_name.endswith(".hdf5"):
with CustomObjectScope(custom_objs):
if input_model is None:
return keras.models.load_model(input_file_name, compile=compile_model)
assert isinstance(input_model, keras.models.Model), (
"Input model not a valid Keras model."
)
input_model.load_weights(input_file_name, by_name=by_name)
return input_model
# Check if input file exists.
if not os.path.isfile(expand_path(input_file_name)):
raise ValueError("Cannot find input file name.")
os_handle, temp_file_name = tempfile.mkstemp()
os.close(os_handle)
with open(temp_file_name, 'wb') as temp_file, open(expand_path(input_file_name), 'rb') as encoded_file:
encoding.decode(encoded_file, temp_file, enc_key)
encoded_file.closed
temp_file.closed
if input_model is None:
try:
# Patch for custom layers.
with CustomObjectScope(custom_objs):
decrypted_model = keras.models.load_model(temp_file_name, compile=compile_model)
except (OSError, IOError):
sys.exit("Invalid decryption. {}. The key used to load the model "
"is incorrect.".format(sys.exc_info()[1]))
except ValueError:
raise ValueError("Invalid decryption. {}".format(sys.exc_info()[1]))
os.remove(temp_file_name)
return decrypted_model
assert isinstance(input_model, keras.models.Model), 'Input model not a valid Keras moodel.'
try:
# Patch for custom layers.
with CustomObjectScope(custom_objs):
input_model.load_weights(temp_file_name, by_name=by_name)
except (OSError, IOError):
sys.exit("Invalid decryption. {}. The key used to load the model "
"is incorrect.".format(sys.exc_info()[1]))
except ValueError:
raise ValueError("Invalid decryption. {}. The key used to load the model "
"is incorrect.".format(sys.exc_info()[1]))
os.remove(temp_file_name)
return input_model
def model_io(model_path, enc_key=None, custom_objects=None, compile=False):
"""Simple utility to handle model file based on file extensions.
Args:
pretrained_model_file (str): Path to the model file.
enc_key (str): Key to load tlt file.
Returns:
model (keras.models.Model): Loaded keras model.
"""
custom_objs = dict()
custom_objs.update(CUSTOM_OBJS)
if custom_objects is not None:
custom_objs.update(custom_objects)
assert os.path.exists(
model_path), "Model not found at {}".format(model_path)
if model_path.endswith('.tlt'):
assert enc_key is not None, "Key must be provided to load the model."
return decode_to_keras(str(model_path),
enc_key,
custom_objects=custom_objs)
elif model_path.endswith('.hdf5'):
with CustomObjectScope(custom_objs):
return keras.models.load_model(str(model_path),
compile=compile)
else:
raise NotImplementedError(
"Invalid model file extension. {}".format(model_path))
def deserialize_custom_layers(art):
"""Deserialize the code for custom layer from EFF.
Args:
art (eff.core.artifact.Artifact): Artifact restored from EFF Archive.
Returns:
final_dict (dict): Dictionary representing CUSTOM_OBJS used in the EFF stored Keras model.
"""
# Get class.
source_code = art.get_content()
spec = importlib.util.spec_from_loader('helper', loader=None)
helper = importlib.util.module_from_spec(spec)
exec(source_code, helper.__dict__) # noqa pylint: disable=W0122
final_dict = {}
# Get class name from attributes.
class_names = art["class_names"]
for cn in class_names:
final_dict[cn] = getattr(helper, cn)
return final_dict
def restore_eff(eff_path, passphrase=None):
"""Restore Keras Model from EFF Archive.
Args:
eff_path (str): Path to the eff file.
passphrase (str): Key to load EFF file.
Returns:
model (keras.models.Model): Loaded keras model.
EFF_CUSTOM_OBJS (dict): Dictionary of custom layers from the eff file.
"""
model_name = os.path.basename(eff_path).split(".")[0]
with Archive.restore_from(restore_path=eff_path, passphrase=passphrase) as restored_effa:
EFF_CUSTOM_OBJS = deserialize_custom_layers(restored_effa.artifacts['custom_layers.py'])
art = restored_effa.artifacts['{}.hdf5'.format(model_name)]
weights, m = art.get_content()
with CustomObjectScope(EFF_CUSTOM_OBJS):
model = keras.models.model_from_json(m, custom_objects=EFF_CUSTOM_OBJS)
model.set_weights(weights)
return model, EFF_CUSTOM_OBJS
def load_keras_model(
filepath, custom_objects=None, compile=True): # pylint: disable=redefined-builtin
"""Wrap keras load model to catch incorrect keywords error."""
if not os.path.exists(expand_path(filepath)):
raise FileNotFoundError(f"Model not found: {filepath}")
try:
return keras.models.load_model(filepath, custom_objects, compile=compile)
except (OSError, IOError):
raise ValueError(
f"Invalid model: {filepath}, please check the key used to load the model"
)
def load_tf_keras_model(
filepath, custom_objects=None, compile=True): # pylint: disable=redefined-builtin
"""Wrap tf keras load model to catch incorrect keywords error."""
try:
return tf.keras.models.load_model(filepath, custom_objects, compile=compile)
except (OSError, IOError):
sys.exit("Invalid decryption. {}. The key used to load the model "
"is incorrect.".format(sys.exc_info()[1]))
def build_regularizer_from_config(reg_config):
'''Build Keras regularizer based on config protobuf.'''
reg_type = reg_config.type
reg_weight = reg_config.weight
kr = None
if reg_type and reg_type > 0:
assert 0 < reg_weight < 1, "Weight decay should be no less than 0 and less than 1"
kr = reg_dict[reg_type](reg_weight)
return kr
def build_optimizer_from_config(optim_config, **kwargs):
'''Build Keras optimizer based on config protobuf.'''
optim_type = optim_config.WhichOneof('optimizer')
assert optim_type, "Optimizer must be specified in config file!"
cfg = getattr(optim_config, optim_type)
if optim_type == 'adam':
assert 1 > cfg.beta1 > 0, "beta1 must be within (0, 1)."
assert 1 > cfg.beta2 > 0, "beta2 must be within (0, 1)."
assert cfg.epsilon > 0, "epsilon must be greater than 0."
optim = Adam(beta_1=cfg.beta1, beta_2=cfg.beta2, epsilon=cfg.epsilon,
amsgrad=cfg.amsgrad, **kwargs)
elif optim_type == 'sgd':
assert cfg.momentum >= 0, "momentum must be >=0."
optim = SGD(momentum=cfg.momentum, nesterov=cfg.nesterov, **kwargs)
elif optim_type == 'rmsprop':
assert 1 > cfg.beta2 > 0, "rho must be within (0, 1)."
assert cfg.momentum >= 0, "momentum must be >=0."
assert cfg.epsilon > 0, "epsilon must be greater than 0."
optim = RMSprop(rho=cfg.rho, momentum=cfg.momentum, epsilon=cfg.epsilon,
centered=cfg.centered, **kwargs)
else:
raise NotImplementedError("The optimizer specified is not implemented!")
return optim
def build_lrs_from_config(lrs_config, max_iterations, lr_multiplier):
'''
Build Keras learning schedule based on config protobuf.
Args:
lrs_config: LearningRateConfig
max_iterations: max iterations of training
lr_multiplier: lr = config.lr * lr_multiplier
Returns:
lr_schedule as keras.callback
'''
lrs_type = lrs_config.WhichOneof('learning_rate')
assert lrs_type, "learning rate schedule must be specified in config file!"
cfg = getattr(lrs_config, lrs_type)
assert cfg.min_learning_rate > 0.0, "min_learning_rate should be positive"
assert cfg.max_learning_rate > cfg.min_learning_rate, \
"max learning rate should be larger than min_learning_rate"
if lrs_type == 'soft_start_annealing_schedule':
lrs = SoftStartAnnealingLearningRateScheduler(
max_iterations=max_iterations,
base_lr=cfg.max_learning_rate * lr_multiplier,
min_lr_ratio=cfg.min_learning_rate / cfg.max_learning_rate,
soft_start=cfg.soft_start,
annealing_start=cfg.annealing)
elif lrs_type == 'soft_start_cosine_annealing_schedule':
lrs = SoftStartCosineAnnealingScheduler(
base_lr=cfg.max_learning_rate * lr_multiplier,
min_lr_ratio=cfg.min_learning_rate / cfg.max_learning_rate,
soft_start=cfg.soft_start,
max_iterations=max_iterations)
else:
raise NotImplementedError("The Learning schedule specified is not implemented!")
return lrs
def parse_model_load_from_config(train_config):
'''Parse model loading config from protobuf.
Input:
the protobuf config at training_config level.
Output
model_path (string): the path of model to be loaded. None if not given
load_graph (bool): Whether to load whole graph. If False, will need to recompile the model
reset_optim (bool): Whether to reset optim. This field must be true if load_graph is false.
initial_epoch (int): the starting epoch number. 0 - based
'''
load_type = train_config.WhichOneof('load_model')
if load_type is None:
return None, False, True, 0
if load_type == 'resume_model_path':
try:
epoch = int(train_config.resume_model_path.split('.')[-2].split('_')[-1])
except Exception:
raise ValueError("Cannot parse the checkpoint path. Did you rename it?")
return train_config.resume_model_path, True, False, epoch
if load_type == 'pretrain_model_path':
return train_config.pretrain_model_path, False, True, 0
if load_type == 'pruned_model_path':
return train_config.pruned_model_path, True, True, 0
raise ValueError("training configuration contains invalid load_model type.")
def check_tf_oom(func):
'''A decorator function to check OOM and raise informative errors.'''
def return_func(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
if type(e) == tf.errors.ResourceExhaustedError:
logger = logging.getLogger(__name__)
logger.error(
"Ran out of GPU memory, please lower the batch size, use a smaller input "
"resolution, use a smaller backbone, or enable model parallelism for "
"supported TLT architectures (see TLT documentation)."
)
sys.exit(1)
else:
# throw out the error as-is if they are not OOM error
raise e
return return_func
class StepLRScheduler(keras.callbacks.Callback):
"""Step learning rate annealing schedule.
This callback implements the step learning rate annnealing schedule according to
the progress of the current experiment. The training progress is defined as the
ratio of the current iteration to the maximum iterations. The scheduler adjusts the
learning rate of the experiment in steps at regular intervals.
Args:
base lr: Learning rate at the start of the experiment
gamma : ratio by which the learning rate reduces at every steps
step_size : step size as percentage of maximum iterations
max_iterations : Total number of iterations in the current experiment
phase
"""
def __init__(self, base_lr=1e-2, gamma=0.1, step_size=33, max_iterations=12345):
"""__init__ method."""
super(StepLRScheduler, self).__init__()
if not 0.0 <= step_size <= 100.0:
raise ValueError('StepLRScheduler ' 'does not support a step size < 0.0 or > 100.0')
if not 0.0 <= gamma <= 1.0:
raise ValueError('StepLRScheduler ' 'does not support gamma < 0.0 or > 1.0')
self.base_lr = base_lr
self.gamma = gamma
self.step_size = step_size
self.max_iterations = max_iterations
self.global_step = 0
def reset(self, initial_step):
"""Reset global_step."""
self.global_step = initial_step
def update_global_step(self):
"""Increment global_step by 1."""
self.global_step += 1
def on_train_begin(self, logs=None):
"""Start of training method."""
self.reset(self.global_step)
lr = self.get_learning_rate(self.global_step / float(self.max_iterations))
K.set_value(self.model.optimizer.lr, lr)
def on_batch_end(self, batch, logs=None):
"""on_batch_end method."""
self.update_global_step()
progress = self.global_step / float(self.max_iterations)
lr = self.get_learning_rate(progress)
K.set_value(self.model.optimizer.lr, lr)
def on_epoch_end(self, epoch, logs):
"""on_epoch_end method."""
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
def get_learning_rate(self, progress):
"""Compute learning rate according to progress to reach max iterations."""
if not 0. <= progress <= 1.:
raise ValueError('StepLRScheduler '
'does not support a progress value < 0.0 or > 1.0 '
'received (%f)' % progress)
numsteps = self.max_iterations * self.step_size // 100
exp_factor = self.global_step / numsteps
lr = self.base_lr * pow(self.gamma, exp_factor)
return lr
class MultiGPULearningRateScheduler(keras.callbacks.Callback):
"""Learning rate scheduler implementation.
Implements https://arxiv.org/pdf/1706.02677.pdf (Accurate, Large Minibatch SGD:
Training ImageNet in 1 Hour) style learning rate schedule.
Learning rate scheduler modulates learning rate according to the progress in the
training experiment. Specifically the training progress is defined as the ratio of
the current iteration to the maximum iterations. Learning rate scheduler adjusts
learning rate in the following phases:
Phase 1: 0.0 <= progress < soft_start:
Starting from start_lr linearly increase the learning rate to base_lr.
Phase 2: at every annealing point, divide learning rate by annealing divider.
Example:
```python
lrscheduler = MultiGPULearningRateScheduler(
max_iterations=max_iterations)
model.fit(X_train, Y_train, callbacks=[lrscheduler])
```
Args:
max_iterations: Total number of iterations in the experiment.
start_lr: Learning rate at the beginning. In the paper this is the learning rate used
with single GPU training.
base_lr: Maximum learning rate. In the paper base_lr is set as start_lr * number of
GPUs.
soft_start: The progress at which learning rate achieves base_lr when starting from
start_lr. Default value set as in the paper.
annealing_points: A list of progress values at which learning rate is divided by
annealing_divider. Default values set as in the paper.
annealing_divider: A divider for learning rate applied at each annealing point.
Default value set as in the paper.
"""
def __init__( # pylint: disable=W0102
self,
max_iterations,
start_lr=3e-4,
base_lr=5e-4,
soft_start=0.056,
annealing_points=[0.33, 0.66, 0.88],
annealing_divider=10.0):
"""__init__ method."""
super(MultiGPULearningRateScheduler, self).__init__()
if not 0.0 <= soft_start <= 1.0:
raise ValueError('The soft_start varible should be >= 0.0 or <= 1.0.')
prev = 0.
for p in annealing_points:
if not 0.0 <= p <= 1.0:
raise ValueError('annealing_point should be >= 0.0 or <= 1.0.')
if p < prev:
raise ValueError('annealing_points should be in increasing order.')
if not soft_start < p:
raise ValueError('soft_start should be less than the first annealing point.')
prev = p
self.start_lr = start_lr
self.base_lr = base_lr
self.soft_start = soft_start # Increase to lr from start_lr until this point.
self.annealing_points = annealing_points # Divide lr by annealing_divider at these points.
self.annealing_divider = annealing_divider
self.max_iterations = max_iterations
self.global_step = 0
def reset(self, initial_step):
"""Reset global_step."""
self.global_step = initial_step
def update_global_step(self):
"""Increment global_step by 1."""
self.global_step += 1
def on_train_begin(self, logs=None):
"""on_train_begin method."""
self.reset(self.global_step)
lr = self.get_learning_rate(self.global_step / float(self.max_iterations))
K.set_value(self.model.optimizer.lr, lr)
def on_batch_end(self, batch, logs=None):
"""on_batch_end method."""
self.update_global_step()
progress = self.global_step / float(self.max_iterations)
lr = self.get_learning_rate(progress)
K.set_value(self.model.optimizer.lr, lr)
def on_epoch_end(self, epoch, logs):
"""on_epoch_end method."""
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
def get_learning_rate(self, progress):
"""Compute learning rate according to progress to reach max_iterations."""
if not 0. <= progress <= 1.:
raise ValueError('MultiGPULearningRateScheduler '
'does not support a progress value < 0.0 or > 1.0 '
'received (%f)' % progress)
if not self.base_lr:
return self.base_lr
lr = self.base_lr
if progress < self.soft_start:
soft_start = progress / self.soft_start
lr = soft_start * self.base_lr + (1. - soft_start) * self.start_lr
else:
for p in self.annealing_points:
if progress > p:
lr /= self.annealing_divider
return lr
class SoftStartAnnealingLearningRateScheduler(keras.callbacks.Callback):
"""Learning rate scheduler implementation.
Learning rate scheduler modulates learning rate according to the progress in the
training experiment. Specifically the training progress is defined as the ratio of
the current iteration to the maximum iterations. Learning rate scheduler adjusts
learning rate in the following 3 phases:
Phase 1: 0.0 <= progress < soft_start:
Starting from min_lr exponentially increase the learning rate to base_lr
Phase 2: soft_start <= progress < annealing_start:
Maintain the learning rate at base_lr
Phase 3: annealing_start <= progress <= 1.0:
Starting from base_lr exponentially decay the learning rate to min_lr
Example:
```python
lrscheduler = SoftStartAnnealingLearningRateScheduler(
max_iterations=max_iterations)
model.fit(X_train, Y_train, callbacks=[lrscheduler])
```
Args:
base_lr: Maximum learning rate
min_lr_ratio: The ratio between minimum learning rate (min_lr) and base_lr
soft_start: The progress at which learning rate achieves base_lr when starting from min_lr
annealing_start: The progress at which learning rate starts to drop from base_lr to min_lr
max_iterations: Total number of iterations in the experiment
"""
def __init__(self, max_iterations, base_lr=5e-4, min_lr_ratio=0.01, soft_start=0.1,
annealing_start=0.7):
"""__init__ method."""
super(SoftStartAnnealingLearningRateScheduler, self).__init__()
if not 0.0 <= soft_start <= 1.0:
raise ValueError('The soft_start variable should be >= 0.0 or <= 1.0.')
if not 0.0 <= annealing_start <= 1.0:
raise ValueError('The annealing_start variable should be >= 0.0 or <= 1.0.')
if not soft_start < annealing_start:
raise ValueError('Variable soft_start should be less than annealing_start.')
self.base_lr = base_lr
self.min_lr_ratio = min_lr_ratio
self.soft_start = soft_start # Increase to lr from min_lr until this point.
self.annealing_start = annealing_start # Start annealing to min_lr at this point.
self.max_iterations = max_iterations
self.min_lr = min_lr_ratio * base_lr
self.global_step = 0
def reset(self, initial_step):
"""Reset global_step."""
self.global_step = initial_step
def update_global_step(self):
"""Increment global_step by 1."""
self.global_step += 1
def on_train_begin(self, logs=None):
"""on_train_begin method."""
self.reset(self.global_step)
lr = self.get_learning_rate(self.global_step / float(self.max_iterations))
K.set_value(self.model.optimizer.lr, lr)
def on_batch_end(self, batch, logs=None):
"""on_batch_end method."""
self.update_global_step()
progress = self.global_step / float(self.max_iterations)
lr = self.get_learning_rate(progress)
K.set_value(self.model.optimizer.lr, lr)
def on_epoch_end(self, epoch, logs):
"""on_epoch_end method."""
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
def get_learning_rate(self, progress):
"""Compute learning rate according to progress to reach max_iterations."""
if not 0. <= progress <= 1.:
raise ValueError('SoftStartAnnealingLearningRateScheduler '
'does not support a progress value < 0.0 or > 1.0 '
'received (%f)' % progress)
if not self.base_lr:
return self.base_lr
if self.soft_start > 0.0:
soft_start = progress / self.soft_start
else: # learning rate starts from base_lr
soft_start = 1.0
if self.annealing_start < 1.0:
annealing = (1.0 - progress) / (1.0 - self.annealing_start)
else: # learning rate is never annealed
annealing = 1.0
t = soft_start if progress < self.soft_start else 1.0
t = annealing if progress > self.annealing_start else t
lr = exp(log(self.min_lr) + t * (log(self.base_lr) - log(self.min_lr)))
return lr
class OneIndexedCSVLogger(keras.callbacks.CSVLogger):
"""CSV Logger with epoch number started from 1."""
def on_epoch_end(self, epoch, logs=None):
"""On epoch end."""
super(OneIndexedCSVLogger, self).on_epoch_end(epoch+1, logs)
class SoftStartCosineAnnealingScheduler(keras.callbacks.Callback):
"""Soft Start Cosine annealing scheduler.
learning rate in the following 2 phases:
Phase 1: 0.0 <= progress < soft_start:
Starting from min_lr linearly increase the learning rate to base_lr
Phase 2: soft_start <= progress <= 1.0:
Starting from base_lr cosine decay the learning rate to min_lr
Args:
base_lr: Maximum learning rate
min_lr_ratio: The ratio between minimum learning rate (min_lr) and base_lr
soft_start: The progress at which learning rate achieves base_lr when starting from min_lr
max_iterations: Total number of iterations in the experiment
(https://arxiv.org/pdf/1608.03983.pdf)
"""
def __init__(self, base_lr, min_lr_ratio, soft_start, max_iterations):
"""Initalize global parameters."""
super(SoftStartCosineAnnealingScheduler, self).__init__()
if not 0.0 <= soft_start <= 1.0:
raise ValueError('The soft_start varible should be >= 0.0 or <= 1.0.')
self.max_iterations = max_iterations
self.soft_start = soft_start
self.base_lr = base_lr
self.min_lr = self.base_lr * min_lr_ratio
self.global_step = 0
def reset(self, initial_step):
"""Reset global step."""
self.global_step = initial_step
def update_global_step(self):
"""Increment global_step by 1."""
self.global_step += 1
def on_train_begin(self, logs=None):
"""on_train_begin method."""
self.reset(self.global_step)
lr = self.get_learning_rate(self.global_step / float(self.max_iterations))
K.set_value(self.model.optimizer.lr, lr)
def on_batch_end(self, batch, logs=None):
"""on_batch_end method."""
self.update_global_step()
progress = self.global_step / float(self.max_iterations)
lr = self.get_learning_rate(progress)
K.set_value(self.model.optimizer.lr, lr)
def on_epoch_end(self, epoch, logs):
"""on_epoch_end method."""
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
def get_learning_rate(self, progress):
"""Compute learning rate according to progress to reach max_iterations."""
if not hasattr(self.model.optimizer, 'lr'):
raise ValueError('Optimizer must have a "lr" attribute.')
if not 0. <= progress <= 1.:
raise ValueError('SoftStartCosineAnnealingScheduler '
'does not support a progress value < 0.0 or > 1.0 '
'received (%f)' % progress)
if not self.base_lr:
return self.base_lr
if self.soft_start > 0.0:
soft_start = progress / self.soft_start
else: # learning rate starts from base_lr
soft_start = 1.0
if soft_start < 1:
lr = (self.base_lr - self.min_lr) * soft_start + self.min_lr
else:
lr = self.min_lr + (self.base_lr - self.min_lr) * \
(1 + math.cos(math.pi * (progress - self.soft_start))) / 2
return lr
def build_class_weights(spec):
"""Build the class weights list."""
mapping_dict = spec.dataset_config.target_class_mapping
classes = sorted({str(x).lower() for x in mapping_dict.values()})
class_weights_dict = spec.class_weighting_config.class_weighting
class_weights_list = []
for cls_name in classes:
if cls_name in class_weights_dict:
class_weights_list.append(class_weights_dict[cls_name])
else:
class_weights_list.append(1.0)
return class_weights_list
class TensorBoard(keras.callbacks.Callback):
"""Callback to log some things to TensorBoard. Quite minimal, and just here as an example."""
def __init__(self, log_dir='./logs', write_graph=True, weight_hist=False):
"""__init__ method.
Args:
log_dir: the path of the directory where to save the log
files to be parsed by TensorBoard.
write_graph: whether to visualize the graph in TensorBoard.
The log file can become quite large when
write_graph is set to True.
weight_hist: whether plot histogram of weights.
"""
super(TensorBoard, self).__init__()
self.log_dir = log_dir
self.write_graph = write_graph
self._merged = None
self._step = 0
self._weight_hist = weight_hist
self.writer = tf.summary.FileWriter(self.log_dir)
def on_epoch_begin(self, epoch, logs=None):
"""on_epoch_begin method."""
# Run user defined summaries
if self._merged is not None:
summary_str = self.sess.run(self._merged)
self.writer.add_summary(summary_str, epoch)
self.writer.flush()
def on_epoch_end(self, epoch, logs=None):
"""on_epoch_end method."""
for name, value in logs.items():
if ("AP" in name) or ("loss" in name) or ("acc" in name):
if isinstance(value, np.ndarray):
if not np.isnan(value.item()):
summary = summary_from_value(name, value.item())
self.writer.add_summary(summary, epoch)
self.writer.flush()
else:
if not np.isnan(value):
summary = summary_from_value(name, value)
self.writer.add_summary(summary, epoch)
self.writer.flush()
def on_batch_end(self, batch, logs=None):
"""on_batch_end method."""
for name, value in logs.items():
if name in ['batch', 'size']:
continue
summary = summary_from_value(name, value.item())
self.writer.add_summary(summary, self._step)
summary = summary_from_value('lr', K.get_value(self.model.optimizer.lr))
self.writer.add_summary(summary, self._step)
self._step += 1
self.writer.flush()
def set_model(self, model):
"""set_model method."""
self.model = model
self.sess = K.get_session()
if self._weight_hist:
for layer in self.model.layers:
for weight in layer.weights:
mapped_weight_name = weight.name.replace(':', '_')
tf.summary.histogram(mapped_weight_name, weight)
self._merged = tf.summary.merge_all()
if self.write_graph:
self.writer.add_graph(self.sess.graph)
def on_train_end(self, *args, **kwargs):
"""on_train_end method."""
self.writer.close()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Spec validator to validate experiment spec."""
import operator
import sys
from google.protobuf.pyext._message import MessageMapContainer, ScalarMapContainer
import six
def eval_str(s):
"""If s is a string, return the eval results. Else return itself."""
if isinstance(s, six.string_types):
if len(s) > 0:
return eval(s)
return None
return s
def length(s):
"""same as len(eval(s))."""
return len(eval(s))
class ValueChecker:
"""Class to wrap the op and print info for value check."""
def __init__(self, comp_op, limit, func=None, func_info=""):
"""Init."""
self.comp_op = comp_op
self.limit = limit
self.func = func
self.func_info = func_info
def __call__(self):
"""Call."""
return self.comp_op, self.limit, self.func, self.func_info
def a_in_b(a, b):
"""Same as a in b."""
return (a in b)
def a_mod_b(a, b):
"""Check if a is divisible by b."""
return operator.mod(a, b) == 0
operator_dict = {">": operator.gt,
"=": operator.eq,
"<": operator.lt,
"!=": operator.ne,
">=": operator.ge,
"<=": operator.le,
"in": a_in_b,
"%": a_mod_b}
def check_has(value_name, input_value, checker):
"""Function to check if a value is set."""
comp_op_name, limit, _, _ = checker()
comp_op = operator_dict[comp_op_name]
return comp_op(input_value, limit)
def check_value(value_name, input_value, checker_list):
"""Function to check if a value is within the limitation."""
for checker in checker_list:
comp_op_name, limit, func, func_info = checker()
comp_op = operator_dict[comp_op_name]
if func:
try:
value = func(input_value)
except SyntaxError:
print("Experiment Spec Setting Error: " +
"{} can not be parsed correct. ".format(value_name) +
"Wrong value: {}".format(input_value))
sys.exit(1)
else:
value = input_value
if isinstance(value, list):
for item in value:
if isinstance(item, list):
new_vc = ValueChecker(comp_op_name, limit)
check_value(value_name, item, [new_vc])
else:
if limit == "":
error_info = (
"Experiment Spec Setting Error: " + func_info +
"{} should be set. ".format(value_name))
else:
error_info = (
"Experiment Spec Setting Error: " + func_info +
"{} should be {} {}. Wrong value: {}".format(value_name,
comp_op_name,
limit,
item))
assert comp_op(item, limit), error_info
else:
if limit == "":
error_info = \
("Experiment Spec Setting Error: " + func_info +
"{} should be set.".format(value_name))
else:
error_info = \
("Experiment Spec Setting Error: " + func_info +
"{} should {} {}. Wrong value: {}".format(value_name,
comp_op_name,
limit,
value))
assert comp_op(value, limit), error_info
class SpecValidator:
"""Validator for spec check."""
def __init__(self, required_msg_dict, value_checker_dict, option_checker_dict=None):
"""Init."""
self.required_msg_dict = required_msg_dict
self.value_checker_dict = value_checker_dict
if option_checker_dict is None:
self.option_checker_dict = {}
else:
self.option_checker_dict = option_checker_dict
def validate(self, spec, required_msg):
"""Recursively validate experiment spec protobuf."""
def spec_validator(spec, required_msg=None):
"""
Spec validate function.
spec: protobuf spec.
required_msg: The names of the required messages in the spec.
"""
if required_msg is None:
required_msg = []
try:
for desc in spec.DESCRIPTOR.fields:
value = getattr(spec, desc.name)
if desc.type == desc.TYPE_MESSAGE:
if desc.name in required_msg:
if desc.label == desc.LABEL_REPEATED:
assert len(value) > 0, \
"{} should be set in experiment spec file.".format(desc.name)
# @TODO(tylerz): to skip ScalarMapContainer check
# because it is handled by protobuf.
if isinstance(value, ScalarMapContainer):
continue
else:
assert spec.HasField(desc.name), \
"{} should be set in experiment spec file.".format(desc.name)
if desc.name in self.required_msg_dict:
required_msg_next = self.required_msg_dict[desc.name]
else:
required_msg_next = []
if desc.label == desc.LABEL_REPEATED:
# @vpraveen: skipping scalar map containers because
# this is handled by protobuf internally.
if isinstance(value, ScalarMapContainer):
continue
if isinstance(value, MessageMapContainer):
for item in value:
spec_validator(value[item], required_msg=required_msg_next)
else:
for item in value:
spec_validator(spec=item, required_msg=required_msg_next)
else:
# Check if the message exists.
if spec.HasField(desc.name):
spec_validator(spec=value, required_msg=required_msg_next)
else:
# If the parameter is optional and it is not set,
# then we skip the check_value.
if desc.name in self.option_checker_dict:
if not check_has(desc.name, value,
self.option_checker_dict[desc.name]):
continue
if desc.name in self.value_checker_dict:
value_checker = self.value_checker_dict[desc.name]
else:
continue
if desc.label == desc.LABEL_REPEATED:
for item in value:
check_value(desc.name, item, value_checker)
else:
check_value(desc.name, value, value_checker)
except AttributeError:
print("failed for spec: {}, type(spec): {}".format(spec, type(spec)))
sys.exit(-1)
spec_validator(spec, required_msg)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/spec_validator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Include this in wrapper to suppress all warnings."""
# Code below to suppress as many warnings as possible
import os
if str(os.getenv('SUPPRES_VERBOSE_LOGGING', '0')) == '1':
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import warnings
warnings.filterwarnings("ignore")
import logging
logging.getLogger('tensorflow').disabled = True
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.FATAL)
from tensorflow.python.util import deprecation
deprecation._PRINT_DEPRECATION_WARNINGS = False
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/no_warning.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/losses/__init__.py |
|
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
'''Base loss for IVA models.'''
from abc import ABC, abstractmethod
import tensorflow as tf
class BaseLoss(ABC):
'''
IVA Base losses.
All model losses (if needs customization) should be inherited from this class.
Child class must implement: compute_loss(self, y_true, y_false). And this should
be passed into model.compile() as loss.
'''
def bce_loss(self, y_true, y_pred, smoothing=0.0):
'''
Compute the bce loss.
Arguments:
y_true (nD tensor): A TensorFlow tensor of any shape containing the ground truth data.
In this context, the expected tensor has shape (batch_size, #boxes, #classes)
and contains the ground truth bounding box categories.
y_pred (nD tensor): A TensorFlow tensor of identical structure to `y_true` containing
the predicted data, in this context the predicted bounding box categories.
smoothing (float): y_true = y_true * (1-smoothing) + smoothing / 2.0. Smoothing=0 is
same as old bce_loss.
Returns:
The bce loss
'''
# Compute the log loss
y_true = y_true * (1.0 - smoothing) + smoothing / 2.0
y_pred = tf.sigmoid(y_pred)
bce_loss = -(y_true * tf.log(tf.maximum(y_pred, 1e-18)) +
(1.0-y_true) * tf.log(tf.maximum(1.0-y_pred, 1e-18)))
return tf.reduce_sum(bce_loss, axis=-1)
def bce_focal_loss(self, y_true, y_pred, alpha=0.25, gamma=2.0, smoothing=0.0):
'''
Compute the bce focal loss.
Arguments:
y_true (nD tensor): A TensorFlow tensor of any shape containing the ground truth data.
In this context, the expected tensor has shape (batch_size, #boxes, #classes)
and contains the ground truth bounding box categories.
y_pred (nD tensor): A TensorFlow tensor of identical structure to `y_true` containing
the predicted data, in this context the predicted bounding box categories.
alpha: alpha of focal loss
gamma: gamma of focal loss
smoothing (float): y_true = y_true * (1-smoothing) + smoothing / 2.0.
Returns:
The softmax log loss, a nD-1 Tensorflow tensor. In this context a 2D tensor
of shape (batch, n_boxes_total).
'''
y_true = y_true * (1.0 - smoothing) + smoothing / 2.0
y_pred = tf.sigmoid(y_pred)
# Compute the log loss
bce_loss = -(y_true * tf.log(tf.maximum(y_pred, 1e-18)) +
(1.0-y_true) * tf.log(tf.maximum(1.0-y_pred, 1e-18)))
p_ = (y_true * y_pred) + (1.0-y_true) * (1.0-y_pred)
modulating_factor = tf.pow(1.0 - p_, gamma)
weight_factor = (y_true * alpha + (1.0 - y_true) * (1.0-alpha))
focal_loss = modulating_factor * weight_factor * bce_loss
return tf.reduce_sum(focal_loss, axis=-1)
def L2_loss(self, y_true, y_pred):
'''Compute L2 loss.'''
square_loss = 0.5 * (y_true - y_pred)**2
return tf.reduce_sum(square_loss, axis=-1)
@abstractmethod
def compute_loss(self, y_true, y_pred):
'''compute_loss to be implemented in child class.'''
raise NotImplementedError("compute_loss not implemented!")
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/losses/base_loss.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/evaluator/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""mAP calculation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import partial
import logging
import os
from matplotlib import pyplot as plt
import numpy as np
logging.basicConfig(
format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="DEBUG"
)
logger = logging.getLogger(__name__)
def batch_iou(box, box_list):
'''
element-wise IOU to perform on a batch (box_list).
Args:
box: np array of shape (4,): the target box
box_list: np array of shape (N, 4): a batch of boxes to match the box.
Returns:
np array of shape (N,). The IOU between target box and each single box in box_list
'''
if box.ndim == 1:
box = np.expand_dims(box, axis=0)
if box_list.ndim == 1:
box_list = np.expand_dims(box_list, axis=0)
# Compute the IoU.
min_xy = np.maximum(box[:, :2], box_list[:, :2])
max_xy = np.minimum(box[:, 2:], box_list[:, 2:])
interx = np.maximum(0, max_xy - min_xy)
interx = interx[:, 0] * interx[:, 1]
box_area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1])
box_list_areas = (box_list[:, 2] - box_list[:, 0]) * (box_list[:, 3] - box_list[:, 1])
union_areas = box_area + box_list_areas - interx
return interx / union_areas
def _per_img_match(x, n_classes, sorting_algorithm, matching_iou_threshold):
"""
Helper function for multithreading matching.
Do not call this function from outside. It's outside the class definition purely due to python
pickle issue.
Arguments:
x (tuple): (gt_box, pred_box)
n_classes (int): number of classes
sorting_algorithm (str): Which sorting algorithm the matching algorithm should
use. This argument accepts any valid sorting algorithm for Numpy's `argsort()`
function. You will usually want to choose between 'quicksort' (fastest and most
memory efficient, but not stable) and 'mergesort' (slight slower and less memory
efficient, but stable). The official Matlab evaluation algorithm uses a stable
sorting algorithm, so this algorithm is only guaranteed to behave identically if you
choose 'mergesort' as the sorting algorithm, but it will almost always behave
identically even if you choose 'quicksort' (but no guarantees).
matching_iou_threshold (float): A prediction will be considered a true
positive if it has a Jaccard overlap of at least `matching_iou_threshold` with any
ground truth bounding box of the same class.
"""
gt = x[0]
pred = x[1]
T = [[] for _ in range(n_classes)]
P = [[] for _ in range(n_classes)]
gt_cls = [gt[gt[:, 0].astype(np.int) == i, 1:] for i in range(n_classes)]
gt_cls_valid = [np.ones((len(i), )) for i in gt_cls]
gt_hard_count = [i[:, 0].sum() for i in gt_cls]
desc_inds = np.argsort(-pred[:, 1], kind=sorting_algorithm)
pred = pred[desc_inds]
for pred_box in pred:
pred_cls = int(pred_box[0])
# if no GT in this class, simply recognize as FP
if len(gt_cls[pred_cls]) == 0:
T[pred_cls].append(0)
P[pred_cls].append(pred_box[1])
continue
overlaps = batch_iou(box_list=gt_cls[pred_cls][:, -4:], box=pred_box[-4:])
overlaps_unmatched = overlaps * gt_cls_valid[pred_cls]
if np.max(overlaps_unmatched) >= matching_iou_threshold:
# invalidate the matched gt
matched_gt_idx = np.argmax(overlaps_unmatched)
gt_cls_valid[pred_cls][matched_gt_idx] = 0.0
if gt_cls[pred_cls][matched_gt_idx, 0] < 0.5:
# this is not a hard box. We should append GT
T[pred_cls].append(1)
P[pred_cls].append(pred_box[1])
else:
logger.warning("Got label marked as difficult(occlusion > 0), "
"please set occlusion field in KITTI label to 0, "
"if you want to include it in mAP calculation "
"during validation/evaluation.")
# this hard box is already processed. Deduct from gt_hard_cnt
gt_hard_count[pred_cls] = gt_hard_count[pred_cls] - 1
else:
T[pred_cls].append(0)
P[pred_cls].append(pred_box[1])
for idx, cls_valid in enumerate(gt_cls_valid):
non_match_count = int(round(cls_valid.sum() - gt_hard_count[idx]))
T[idx].extend([1]*non_match_count)
P[idx].extend([0.0]*non_match_count)
return (T, P)
class APEvaluator:
'''Computes the mean average precision of the given lists of pred and GT.'''
def __init__(self,
n_classes,
conf_thres=0.01,
matching_iou_threshold=0.5,
average_precision_mode='sample',
num_recall_points=11):
"""Initializes Keras / TensorRT objects needed for model inference.
Args:
n_classes (integer): Number of classes
conf_thres (float): confidence threshold to consider a bbox.
matching_iou_threshold (float, optional): A prediction will be considered a true
positive if it has a Jaccard overlap of at least `matching_iou_threshold` with any
ground truth bounding box of the same class.
average_precision_mode (str, optional): Can be either 'sample' or 'integrate'. In the
case of 'sample', the average precision will be computed according to the Pascal VOC
formula that was used up until VOC 2009, where the precision will be sampled for
`num_recall_points` recall values. In the case of 'integrate', the average precision
will be computed according to the Pascal VOC formula that was used from VOC 2010
onward, where the average precision will be computed by numerically integrating
over the whole preciscion-recall curve instead of sampling individual points from
it. 'integrate' mode is basically just the limit case of 'sample' mode as the number
of sample points increases.
num_recall_points (int, optional): The number of points to sample from the
precision-recall-curve to compute the average precisions. In other words, this is
the number of equidistant recall values for which the resulting precision will be
computed. 11 points is the value used in the official Pascal VOC 2007 detection
evaluation algorithm.
"""
self.n_classes = n_classes
self.conf_thres = conf_thres
self.matching_iou_threshold = matching_iou_threshold
self.average_precision_mode = average_precision_mode
self.num_recall_points = num_recall_points
self.gt_labels = None
self.pred_labels = None
self.T = None
self.P = None
self.ap = None
def __call__(self, gt, pred, verbose=True, class_names=None, vis_path=None):
'''
Compute AP of each classes and mAP.
Arguments:
gt (list of numpy arrays): A list of length n_eval_images. Each element is a numpy
array of shape (n_bbox, 6). n_bbox is the number of boxes inside the image and
6 elements for the bbox is [class_id, is_difficult, xmin, ymin, xmax, ymax].
Note: is_difficult is 0 if the bbox is not difficult. 1 otherwise. Always set
is_difficult to 0 if you don't have this field in your GT label.
pred (list of numpy arrays): A list of length n_eval_images. Each element is a numpy
array of shape (n_bbox, 6). n_bbox is the number of boxes inside the image and
6 elements for the bbox is [class_id, confidence, xmin, ymin, xmax, ymax]
verbose (bool, optional): If `True`, will print out the progress during runtime.
class_name(list): Name of object classes for vis.
vis_path(string): Path to save vis image.
Note: the class itself supports both normalized / un-normalized coords. As long as the
coords is_normalized for gt and pred identical, the class gives correct results.
Returns:
A float, the mean average precision. A list of length n_classes. AP for each class
'''
self.gt_labels = gt
self.pred_labels = pred
self.matching(sorting_algorithm='quicksort',
matching_iou_threshold=self.matching_iou_threshold,
verbose=verbose)
if verbose:
print('Start to calculate AP for each class')
# Calc AP and plot PR curves
self._calc_ap(sorting_algorithm='quicksort',
average_precision_mode=self.average_precision_mode,
num_recall_points=self.num_recall_points,
class_names=class_names,
vis_path=vis_path)
# Save plots to image
if vis_path is not None:
plt.legend()
plt.title("Precision-Recall curve")
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.grid()
save_path = os.path.join(vis_path, "PR_curve.png")
plt.savefig(save_path)
print(f"PR-curve image saved to {save_path}")
plt.clf()
# release memory
self.gt_labels = None
self.pred_labels = None
return np.mean(self.ap), self.ap
def matching(self, sorting_algorithm, matching_iou_threshold, verbose):
'''
Generate T, P list for AP calculation.
T: 0 - negative match, 1 - positive match
P: confidence of this prediction
'''
if (self.gt_labels is None) or (self.pred_labels is None):
raise ValueError("Matching cannot be called before the completion of prediction!")
if len(self.gt_labels) != len(self.pred_labels):
raise ValueError("Image count mismatch between ground truth and prediction!")
T = [[] for _ in range(self.n_classes)]
P = [[] for _ in range(self.n_classes)]
per_img_match = partial(_per_img_match, n_classes=self.n_classes,
sorting_algorithm=sorting_algorithm,
matching_iou_threshold=matching_iou_threshold)
results = []
for x in zip(self.gt_labels, self.pred_labels):
results.append(per_img_match(x))
for t, p in results:
for i in range(self.n_classes):
T[i] += t[i]
P[i] += p[i]
self.T = T
self.P = P
def __voc_ap(
self,
rec,
prec,
average_precision_mode,
num_recall_points,
class_name=None,
vis_path=None
):
if average_precision_mode == 'sample':
ap = 0.
for t in np.linspace(0., 1.0, num_recall_points):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / float(num_recall_points)
if class_name and vis_path:
rec_arr = np.array(rec)
prec_arr = np.array(prec)
plt.plot(rec_arr, prec_arr, label=class_name)
elif average_precision_mode == 'integrate':
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
if class_name and vis_path:
if class_name != "bg":
plt.plot(mrec, mpre, label=class_name)
else:
raise ValueError("average_precision_mode should be either sample or integrate")
return ap
def _calc_ap(
self,
sorting_algorithm,
average_precision_mode,
num_recall_points,
class_names=None,
vis_path=None
):
"""compute the AP for classes."""
if (self.T is None) or (self.P is None):
raise ValueError("Matching must be done first!")
self.ap = []
class_idx = 0
for T, P in zip(self.T, self.P):
if class_names is not None:
class_name = class_names[class_idx]
else:
class_name = None
prec = []
rec = []
TP = 0.
FP = 0.
FN = 0.
# sort according to prob.
Ta = np.array(T)
Pa = np.array(P)
s_idx = np.argsort(-Pa, kind=sorting_algorithm)
P = Pa[s_idx].tolist()
T = Ta[s_idx].tolist()
npos = np.sum(Ta)
for t, p in zip(T, P):
if t == 1 and p >= self.conf_thres:
TP += 1
elif t == 1 and p < self.conf_thres:
FN += 1
elif t == 0 and p >= self.conf_thres:
FP += 1
if TP + FP == 0.:
precision = 0.
else:
precision = float(TP) / (TP+FP)
if npos > 0:
recall = float(TP) / float(npos)
else:
recall = 0.0
prec.append(precision)
rec.append(recall)
ap = self.__voc_ap(
np.array(rec),
np.array(prec),
average_precision_mode,
num_recall_points,
class_name,
vis_path
)
self.ap.append(ap)
class_idx += 1
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/evaluator/ap_evaluator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module with custom data structures."""
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/types/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration element for a DeepStream graph."""
VALID_COLOR_FORMATS = ["rgb", "bgr", "l"]
VALID_CHANNEL_ORDERS = ["channels_first", "channels_last"]
VALID_BACKENDS = ["uff", "onnx"]
VALID_NETWORK_TYPES = [0, 1, 2, 3, 100]
class BaseDSConfig(object):
"""Configuration element for an nvinfer ds plugin."""
def __init__(self, scale, offsets, infer_dims,
color_format, key, network_type=0,
input_names=None, num_classes=None,
output_names=None, data_format="channels_first",
backend="uff", maintain_aspect_ratio=False,
output_tensor_meta=False):
"""Generate a Deepstream config element.
Args:
scale (float): Scale value to normalize the input.
offsets (tuple): Tuple of floats for channels wise mean subtraction.
infer_dims (tuple): Input dimensions of the model.
color_format (str): Format of the color to be running inference on.
key (str): Key to load the model.
network_type (int): Type of model.
input_names (list): List of input names.
num_classes (int): Number of classes.
output_names (list): List of output names.
data_format (str): Format of the input data.
backend (str): Backend format of the model.
Returns:
BaseDSConfig: Instance of BaseDSConfig element.
"""
self.scale = scale
self.offsets = offsets
self.input_names = input_names
self.output_names = output_names
self.backend = backend
self.infer_dims = infer_dims
self.key = key
self.network_type = network_type
self.maintain_aspect_ratio = maintain_aspect_ratio
self.output_tensor_meta = output_tensor_meta
assert self.network_type in VALID_NETWORK_TYPES, (
"Invalid Network type {} requested. Supported network types: {}".format(
self.network_type, VALID_NETWORK_TYPES
)
)
self.color_format = color_format.lower()
if self.color_format not in VALID_COLOR_FORMATS:
raise NotImplementedError(
"Color format specified is not valid: {}. "
"Valid color formats include {}".format(
color_format.lower(),
VALID_COLOR_FORMATS
)
)
self.data_format = data_format
if self.data_format not in VALID_CHANNEL_ORDERS:
raise NotImplementedError("Invalid data format {} encountered.".format(
data_format, VALID_CHANNEL_ORDERS
))
self.channel_index = 0
if data_format == "channels_last":
self.channel_index = -1
if self.color_format == "l":
assert self.infer_dims[self.channel_index] == 1, (
"Channel count mismatched with color_format. "
"Provided\ndata_format: {}\n color_format: {}".format(
self.infer_dims[self.channel_index], self.color_format
)
)
self.num_classes = num_classes
self.initialized = True
def get_config(self):
"""Generate config elements."""
config_dict = {
"net-scale-factor": self.scale,
"offsets": ";".join([str(offset) for offset in self.offsets]),
"infer-dims": ";".join([str(int(dims)) for dims in self.infer_dims]),
"tlt-model-key": self.key,
"network-type": self.network_type,
}
# Number of classes.
if self.num_classes is not None:
config_dict["num-detected-classes"] = self.num_classes
if self.backend == "uff":
assert self.input_names is not None, (
"Input blob names cannot be None for a UFF model."
)
assert self.output_names is not None, (
"Output blob names cannot be None for a UFF model."
)
config_dict.update(
{
"uff-input-order": "0" if self.channel_index == 0 else "1",
"output-blob-names": ";".join([blob for blob in self.output_names]),
"uff-input-blob-name": ";".join([blob for blob in self.input_names])
}
)
if self.infer_dims[self.channel_index] == 3:
config_dict["model-color-format"] = 0
if self.color_format == "bgr":
config_dict["model-color-format"] = 1
else:
config_dict["model-color-format"] = 2
if self.maintain_aspect_ratio:
config_dict["maintain-aspect-ratio"] = 1
else:
config_dict["maintain-aspect-ratio"] = 0
if self.output_tensor_meta:
config_dict["output-tensor-meta"] = 1
else:
config_dict["output-tensor-meta"] = 0
return config_dict
def __str__(self):
"""Return the string data."""
if not self.initialized:
raise RuntimeError("Class wasn't initialized.")
config_dict = self.get_config()
config_string = ""
for key, val in config_dict.items():
config_string += "{}={}\n".format(key, str(val))
return config_string
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/types/base_ds_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Allow client to use common methodss regardless of set type."""
import abc
from collections import namedtuple
import os
class SetStrategy(object):
"""Abstract class for common methods between types of sets."""
__metaclass__ = abc.ABCMeta
PathStruct = namedtuple('PathStruct',
'''error_path,
data_path,
config_path,
tfrecord_path,
gt_path,
info_source_path,
filtered_path,
landmarks_path,
regions''')
fpe_expr_folder = 'Ground_Truth_Fpegaze_'
def __init__(
self,
set_id,
experiment_folder_suffix,
tfrecord_folder_name,
gt_folder_name,
landmarks_folder_name,
set_label_sorter
):
"""Initialize parameters.
Args:
set_id (str): Set for which to generate tfrecords.
experiment_folder_suffix (str): Suffix of experiment folder containing tfrecords.
tfrecord_folder_name (str): Folder name of folder containing tfrecords.
gt_folder_name (str): Folder name of folder containing ground truth txt files.
landmarks_folder_name (str): Folder name of predicted landmarks, or None to disable.
set_label_sorter (SetLabelSorter object): Object to sort set as DataFactory / Nvhelnet.
"""
self._set_id = set_id
self._experiment_folder_suffix = experiment_folder_suffix
self._tfrecord_folder_name = tfrecord_folder_name
self._gt_folder_name = gt_folder_name
self._landmarks_folder_name = landmarks_folder_name
self._set_label_sorter = set_label_sorter
self.experiment_folder_name = None
@staticmethod
def _check_paths(parent_path, possible_paths):
for path in possible_paths:
full_path = os.path.join(parent_path, path)
if os.path.exists(full_path):
return full_path
return None
def _get_landmarks_path(self, parent):
"""Get the path to obtain landmarks from, using self._landmarks_folder_name.
Args:
parent (path-like): Path where _landmarks_folder_name should be living.
Returns:
lm_path (path-like): Path to the landmark folder.
"""
if self._landmarks_folder_name is None:
return None
lm_path = os.path.join(parent, self._landmarks_folder_name, 'facelandmark')
# The check to see if path is valid happens here, not in main.
if not os.path.isdir(lm_path):
raise IOError("Could not find landmarks-folder: {} is not a valid directory"
.format(lm_path))
return lm_path
@abc.abstractmethod
def _set_source_paths(self):
pass
def get_source_paths(self):
"""Get needed input file paths."""
return self._strategy_type, self._paths
@abc.abstractmethod
def _set_camera_parameters(self, config_path):
pass
def get_camera_parameters(self):
"""Get camera parameters."""
return self._cam_intrinsics, self._cam_extrinsics, self._screen_params
@abc.abstractmethod
def _get_json_path(self, set_id_path):
pass
@abc.abstractmethod
def extract_gaze_info(self, frame_data_dict, frame_name):
"""Return strategy's extracted info about gaze."""
pass
@abc.abstractmethod
def get_pts(self, pts, frame_width, frame_height):
"""Return strategy's landmark points (can be undistorted / distorted)."""
pass
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataio/set_strategy.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Write debug ground truth txt files."""
import os
import subprocess
import numpy as np
from nvidia_tao_tf1.core.utils.path_utils import expand_path
from nvidia_tao_tf1.cv.common.dataio.utils import mkdir
class GtConverter(object):
"""Converts a dataset to GT txt files."""
gt_features = [
'train/facebbx_x',
'train/facebbx_y',
'train/facebbx_w',
'train/facebbx_h',
'train/lefteyebbx_x',
'train/lefteyebbx_y',
'train/lefteyebbx_w',
'train/lefteyebbx_h',
'train/righteyebbx_x',
'train/righteyebbx_y',
'train/righteyebbx_w',
'train/righteyebbx_h',
'label/gaze_cam_x',
'label/gaze_cam_y',
'label/gaze_cam_z',
'label/gaze_screen_x',
'label/gaze_screen_y',
'label/hp_pitch',
'label/hp_yaw',
'label/hp_roll',
'label/theta',
'label/phi',
'label/mid_cam_x',
'label/mid_cam_y',
'label/mid_cam_z',
'label/lpc_cam_x',
'label/lpc_cam_y',
'label/lpc_cam_z',
'label/rpc_cam_x',
'label/rpc_cam_y',
'label/rpc_cam_z',
'label/head_pose_theta',
'label/head_pose_phi',
'label/theta_mid',
'label/phi_mid',
'label/theta_le',
'label/phi_le',
'label/theta_re',
'label/phi_re',
'train/valid_theta_phi',
'train/num_keypoints',
'train/source',
'train/norm_frame_path',
'label/norm_face_gaze_theta',
'label/norm_face_gaze_phi',
'label/norm_face_hp_theta',
'label/norm_face_hp_phi',
'label/norm_leye_gaze_theta',
'label/norm_leye_gaze_phi',
'label/norm_leye_hp_theta',
'label/norm_leye_hp_phi',
'label/norm_reye_gaze_theta',
'label/norm_reye_gaze_phi',
'label/norm_reye_hp_theta',
'label/norm_reye_hp_phi',
'train/norm_facebb_x',
'train/norm_facebb_y',
'train/norm_facebb_w',
'train/norm_facebb_h',
'train/norm_leyebb_x',
'train/norm_leyebb_y',
'train/norm_leyebb_w',
'train/norm_leyebb_h',
'train/norm_reyebb_x',
'train/norm_reyebb_y',
'train/norm_reyebb_w',
'train/norm_reyebb_h',
'train/norm_per_oof',
'train/norm_face_cnv_mat',
'train/norm_leye_cnv_mat',
'train/norm_reye_cnv_mat',
'label/face_cam_x',
'label/face_cam_y',
'label/face_cam_z'
]
lm_pred_gt_features = [feature for feature in gt_features
if (
'lefteyebbx' not in feature and
'righteyebbx' not in feature and
'facebbx' not in feature and
'num_eyes_detected' not in feature
)]
def __init__(self, gt_files_path, use_lm_pred):
"""Initialize file paths and features.
Args:
gt_files_path (path): Path to dump ground truth files.
use_lm_pred (bool): True if using predicted landmarks.
"""
self._gt_files_path = gt_files_path
# Overwrite the gt txt files, cleanup
if os.path.exists(expand_path(gt_files_path)):
subprocess.run('rm -r ' + gt_files_path, shell=False)
mkdir(gt_files_path)
# Get different set of features if using predicted landmarks
self._gt_features = self.gt_features
if use_lm_pred:
self._gt_features = self.lm_pred_gt_features
@staticmethod
def _get_frame_gt(gt_features, frame_dict):
split_frame = frame_dict['train/image_frame_name'].rsplit('/', 1)
path_to_frame = split_frame[0]
frame_name = split_frame[1]
features_list = [
path_to_frame,
frame_name
]
for feature in gt_features:
val = ''
if feature == 'train/lefteyebbx_w':
val = str(frame_dict[feature] + frame_dict['train/lefteyebbx_x'])
elif feature == 'train/lefteyebbx_h':
val = str(frame_dict[feature] + frame_dict['train/lefteyebbx_y'])
elif feature == 'train/righteyebbx_w':
val = str(frame_dict[feature] + frame_dict['train/righteyebbx_x'])
elif feature == 'train/righteyebbx_h':
val = str(frame_dict[feature] + frame_dict['train/righteyebbx_y'])
elif isinstance(frame_dict[feature], np.ndarray):
val = ' '.join(map(str, frame_dict[feature].flatten()))
else:
val = str(frame_dict[feature])
features_list.append(val)
return ' '.join(features_list) + '\n'
def write_landmarks(self, users_dict):
"""Write landmarks GT files to separate it from other features."""
for user in users_dict.keys():
user_files_path = expand_path(f"{self._gt_files_path}/{user}_landmarks.txt")
user_writer = open(user_files_path, 'a')
for region in users_dict[user].keys():
for frame in users_dict[user][region].keys():
frame_data_dict = users_dict[user][region][frame]
user_writer.write(self._get_frame_gt(
['train/landmarks', 'train/landmarks_occ'],
frame_data_dict))
user_writer.close()
def write_gt_files(self, users_dict):
"""Write GT files."""
for user in users_dict.keys():
user_files_path = expand_path(f"{self._gt_files_path}/{user}.txt")
user_writer = open(user_files_path, 'a')
for region in users_dict[user].keys():
for frame in users_dict[user][region].keys():
frame_data_dict = users_dict[user][region][frame]
user_writer.write(self._get_frame_gt(
self._gt_features,
frame_data_dict))
user_writer.close()
def write_combined_landmarks(self, combined_dict, test_users, validation_users, train_users):
"""Write combined landmarks GT files after splitting into test, train, validation."""
def _write_category_files(combined_dict, category_users, gt_file_path):
gt_writer = open(gt_file_path, 'a')
for user in category_users:
for region in combined_dict[user].keys():
for frame in combined_dict[user][region].keys():
frame_data_dict = combined_dict[user][region][frame]
gt_writer.write(self._get_frame_gt(
['train/landmarks', 'train/landmarks_occ'],
frame_data_dict))
gt_writer.close()
test_path = os.path.join(self._gt_files_path, 'test_landmarks.txt')
validation_path = os.path.join(self._gt_files_path, 'validate_landmarks.txt')
train_path = os.path.join(self._gt_files_path, 'train_landmarks.txt')
_write_category_files(combined_dict, test_users, test_path)
_write_category_files(combined_dict, validation_users, validation_path)
_write_category_files(combined_dict, train_users, train_path)
def write_combined_gt_files(self, combined_dict, test_users, validation_users, train_users):
"""Write combined GT files after splitting into test, train, validation."""
def _write_category_files(combined_dict, category_users, gt_file_path):
gt_writer = open(gt_file_path, 'a')
for user in category_users:
for region in combined_dict[user].keys():
for frame in combined_dict[user][region].keys():
frame_data_dict = combined_dict[user][region][frame]
gt_writer.write(self._get_frame_gt(
self._gt_features,
frame_data_dict))
gt_writer.close()
test_path = os.path.join(self._gt_files_path, 'test.txt')
validation_path = os.path.join(self._gt_files_path, 'validate.txt')
train_path = os.path.join(self._gt_files_path, 'train.txt')
_write_category_files(combined_dict, test_users, test_path)
_write_category_files(combined_dict, validation_users, validation_path)
_write_category_files(combined_dict, train_users, train_path)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataio/gt_converter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TLT base data sequence."""
from abc import ABC, abstractmethod
from keras.utils import Sequence
import numpy as np
from PIL import Image
class BaseDataSequence(ABC, Sequence):
"""Abstract class for TLT network data sequence.
There should be another level of abstraction for specific tasks like detection etc.
To use dataloader:
1. call __init__(configs)
2. call add_source(image_folder, label_folder) to add sources
3. Use data generator in keras model.fit_generator()
Functions below must be implemented in derived classes:
1. __init__
2. _add_source
3. _preprocessing
4. _load_gt_label
5. __getitem__
6. __len__
"""
@abstractmethod
def __init__(self, dataset_config, augmentation_config=None, batch_size=10, is_training=True):
"""init function."""
self.n_samples = 0
pass
@abstractmethod
def _add_source(self, image_folder, label_folder):
"""add_source."""
pass
@abstractmethod
def _preprocessing(self, image, label, output_img_size):
"""Perform augmentation."""
pass
@abstractmethod
def _load_gt_label(self, label_path):
"""Load GT label from file."""
pass
def _load_gt_image(self, image_path):
"""Load GT image from file."""
img = Image.open(image_path)
if img.mode != "I":
img = img.convert('RGB')
return np.array(img).astype(np.float32)
# If it is a 16-bit image
# PIL convert to RGB will clip to 8-bit
# Hence we achieve this in numpy
img = np.array(img).astype(np.float32)
target_shape = img.shape + (3,)
img = img.reshape(img.shape + (1,))
img = np.broadcast_to(img, target_shape)
return img
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataio/base_data_sequence.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Constants related to eye status."""
class EyeStatus():
"""Class which bounds together eye status constants."""
missing_eye_status = 'missing'
open_eye_status = 'open'
closed_eye_status = 'closed'
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataio/eye_status.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Write tfrecord files."""
from enum import Enum
import json
import os
import subprocess
import sys
import numpy as np
import six
import tensorflow as tf
from nvidia_tao_tf1.core.utils.path_utils import expand_path
from nvidia_tao_tf1.cv.common.dataio.utils import mkdir
def _convert_unicode_to_str(item):
if sys.version_info >= (3, 0):
# unicode() no longer exists in Python 3
if isinstance(item, str):
return item.encode()
return item
if isinstance(item, unicode): # noqa: disable=F821 # pylint: disable=E0602
return item.encode()
return item
def _bytes_feature(*values):
"""Convert unicode data to string for saving to TFRecords."""
values = [_convert_unicode_to_str(value) for value in values]
return tf.train.Feature(bytes_list=tf.train.BytesList(value=values))
def _float_feature(*values):
return tf.train.Feature(float_list=tf.train.FloatList(value=values))
def _int64_feature(*values):
return tf.train.Feature(int64_list=tf.train.Int64List(value=values))
def _dtype_feature(ndarray):
assert isinstance(ndarray, np.ndarray)
dtype = ndarray.dtype
if dtype in (np.float32, np.longdouble):
return tf.train.Feature(float_list=tf.train.FloatList(value=ndarray))
if dtype == np.int64:
return tf.train.Feature(int64_list=tf.train.Int64List(value=ndarray))
return None
class TfRecordType(Enum):
"""Class which bounds Enum values to indicate tfrecord type."""
BYTES = 1
FLOAT = 2
INT64 = 3
DTYPE_FLOAT = 4
DTYPE_INT64 = 5
class DataConverter(object):
"""Converts a dataset to TFRecords."""
feature_to_type = {
'train/image_frame_name' : TfRecordType.BYTES,
'train/image_frame_width' : TfRecordType.INT64,
'train/image_frame_height' : TfRecordType.INT64,
'train/facebbx_x' : TfRecordType.INT64,
'train/facebbx_y' : TfRecordType.INT64,
'train/facebbx_w' : TfRecordType.INT64,
'train/facebbx_h' : TfRecordType.INT64,
'train/lefteyebbx_x' : TfRecordType.INT64,
'train/lefteyebbx_y' : TfRecordType.INT64,
'train/lefteyebbx_w' : TfRecordType.INT64,
'train/lefteyebbx_h' : TfRecordType.INT64,
'train/righteyebbx_x' : TfRecordType.INT64,
'train/righteyebbx_y' : TfRecordType.INT64,
'train/righteyebbx_w' : TfRecordType.INT64,
'train/righteyebbx_h' : TfRecordType.INT64,
'label/gaze_cam_x' : TfRecordType.FLOAT,
'label/gaze_cam_y' : TfRecordType.FLOAT,
'label/gaze_cam_z' : TfRecordType.FLOAT,
'label/gaze_screen_x' : TfRecordType.FLOAT,
'label/gaze_screen_y' : TfRecordType.FLOAT,
'train/landmarks' : TfRecordType.DTYPE_FLOAT,
'train/landmarks_occ' : TfRecordType.DTYPE_INT64,
'label/left_eye_status' : TfRecordType.BYTES,
'label/right_eye_status' : TfRecordType.BYTES,
'train/num_keypoints' : TfRecordType.INT64,
'train/tight_facebbx_x1' : TfRecordType.INT64,
'train/tight_facebbx_y1' : TfRecordType.INT64,
'train/tight_facebbx_x2' : TfRecordType.INT64,
'train/tight_facebbx_y2' : TfRecordType.INT64,
'label/hp_pitch': TfRecordType.FLOAT, # Degrees
'label/hp_yaw': TfRecordType.FLOAT, # Degrees
'label/hp_roll': TfRecordType.FLOAT, # Degrees
'label/theta': TfRecordType.FLOAT, # Radians
'label/phi': TfRecordType.FLOAT, # Radians
'label/mid_cam_x': TfRecordType.FLOAT, # Mid eye center - x
'label/mid_cam_y': TfRecordType.FLOAT, # Mid eye center - y
'label/mid_cam_z': TfRecordType.FLOAT, # Mid eye center - z
'label/lpc_cam_x': TfRecordType.FLOAT, # Left eye center - x
'label/lpc_cam_y': TfRecordType.FLOAT, # Left eye center - y
'label/lpc_cam_z': TfRecordType.FLOAT, # Left eye center - z
'label/rpc_cam_x': TfRecordType.FLOAT, # Right eye center - x
'label/rpc_cam_y': TfRecordType.FLOAT, # Right eye center - y
'label/rpc_cam_z': TfRecordType.FLOAT, # Right eye center - z
'train/valid_theta_phi' : TfRecordType.INT64, # 1 if valid, 0 otherwise
'label/theta_le' : TfRecordType.FLOAT, # In radians
'label/phi_le' : TfRecordType.FLOAT, # In radians
'label/theta_re' : TfRecordType.FLOAT, # In radians
'label/phi_re' : TfRecordType.FLOAT, # In radians
'label/theta_mid' : TfRecordType.FLOAT, # In radians
'label/phi_mid' : TfRecordType.FLOAT, # In radians
'label/head_pose_theta' : TfRecordType.FLOAT, # In radians
'label/head_pose_phi' : TfRecordType.FLOAT, # In radians
'train/eye_features' : TfRecordType.DTYPE_FLOAT,
'train/source' : TfRecordType.BYTES,
'train/num_eyes_detected': TfRecordType.INT64,
'train/norm_frame_path': TfRecordType.BYTES,
'label/norm_face_gaze_theta': TfRecordType.FLOAT, # In radians
'label/norm_face_gaze_phi': TfRecordType.FLOAT, # In radians
'label/norm_face_hp_theta': TfRecordType.FLOAT, # In radians
'label/norm_face_hp_phi': TfRecordType.FLOAT, # In radians
'label/norm_leye_gaze_theta': TfRecordType.FLOAT, # In radians
'label/norm_leye_gaze_phi': TfRecordType.FLOAT, # In radians
'label/norm_leye_hp_theta': TfRecordType.FLOAT, # In radians
'label/norm_leye_hp_phi': TfRecordType.FLOAT, # In radians
'label/norm_reye_gaze_theta': TfRecordType.FLOAT, # In radians
'label/norm_reye_gaze_phi': TfRecordType.FLOAT, # In radians
'label/norm_reye_hp_theta': TfRecordType.FLOAT, # In radians
'label/norm_reye_hp_phi': TfRecordType.FLOAT, # In radians
'train/norm_facebb_x': TfRecordType.INT64,
'train/norm_facebb_y': TfRecordType.INT64,
'train/norm_facebb_w': TfRecordType.INT64,
'train/norm_facebb_h': TfRecordType.INT64,
'train/norm_leyebb_x': TfRecordType.INT64,
'train/norm_leyebb_y': TfRecordType.INT64,
'train/norm_leyebb_w': TfRecordType.INT64,
'train/norm_leyebb_h': TfRecordType.INT64,
'train/norm_reyebb_x': TfRecordType.INT64,
'train/norm_reyebb_y': TfRecordType.INT64,
'train/norm_reyebb_w': TfRecordType.INT64,
'train/norm_reyebb_h': TfRecordType.INT64,
'train/norm_landmarks': TfRecordType.DTYPE_FLOAT,
'train/norm_per_oof': TfRecordType.FLOAT,
'train/landmarks_3D': TfRecordType.DTYPE_FLOAT
}
lm_pred_feature_to_type = {k : v for k, v in six.iteritems(feature_to_type)
if 'lefteyebbx' not in k and
'righteyebbx' not in k and
'facebbx' not in k and
'num_eyes_detected' not in k}
# Convert from enum type to read tfrecord type
enum_to_read_dict = {
TfRecordType.BYTES : tf.FixedLenFeature([], dtype=tf.string),
TfRecordType.FLOAT : tf.FixedLenFeature([], dtype=tf.float32),
TfRecordType.INT64 : tf.FixedLenFeature([], dtype=tf.int64),
TfRecordType.DTYPE_FLOAT : tf.VarLenFeature(tf.float32),
TfRecordType.DTYPE_INT64 : tf.VarLenFeature(tf.int64)
}
read_features_dict = {}
def __init__(self, tfrecord_files_path, use_lm_pred):
"""Initialize file paths and features.
Args:
tfrecord_files_path (path): Path to dump tfrecord files.
use_lm_pred (bool): True if using predicted landmarks.
"""
self._tfrecord_files_path = tfrecord_files_path
self._feature_to_type_dict = self.feature_to_type
if use_lm_pred:
self._feature_to_type_dict = self.lm_pred_feature_to_type
# Overwrite the tfrecords, cleanup
if os.path.exists(expand_path(tfrecord_files_path)):
subprocess.run('rm -r ' + tfrecord_files_path, shell=False)
mkdir(tfrecord_files_path)
@classmethod
def enum_to_read_type(cls, enum_type):
"""Return tfrecord type based on Enum."""
if enum_type not in cls.enum_to_read_dict:
raise TypeError('Must be an instance of TfRecordType Enum')
return cls.enum_to_read_dict[enum_type]
@classmethod
def get_read_features(cls, use_lm_pred):
"""Return dict of features to values."""
feature_to_type_dict = cls.feature_to_type
if use_lm_pred:
feature_to_type_dict = cls.lm_pred_feature_to_type
if not cls.read_features_dict:
for feature in feature_to_type_dict.keys():
feature_type = feature_to_type_dict[feature]
cls.read_features_dict[feature] = cls.enum_to_read_type(feature_type)
return cls.read_features_dict
@classmethod
def read_tfrecords(cls, tfrecord_files, use_lm_pred, print_json=True):
"""Read tfrecord files as JSON."""
records = tf.data.TFRecordDataset(tfrecord_files)
parsed = records.map(
lambda x: tf.parse_single_example(x, cls.get_read_features(use_lm_pred)))
iterator = parsed.make_one_shot_iterator()
next_elem = iterator.get_next()
combined_data = []
feature_to_type_dict = cls.feature_to_type
if use_lm_pred:
feature_to_type_dict = cls.lm_pred_feature_to_type
with tf.train.MonitoredTrainingSession() as sess:
while not sess.should_stop():
data = dict(sess.run(next_elem))
for key, val in six.iteritems(data):
if feature_to_type_dict[key] in (
TfRecordType.DTYPE_FLOAT,
TfRecordType.DTYPE_INT64
):
data[key] = (data[key].values).tolist()
elif isinstance(val, (np.float32, np.longdouble)):
# Keeps around 7 digits of precision of float32
py_val = str(np.asarray(val, dtype=np.longdouble))
data[key] = float(py_val)
elif isinstance(val, np.generic):
data[key] = np.asscalar(val)
combined_data.append(data)
if print_json:
print(json.dumps(combined_data, indent=4, sort_keys=True))
return combined_data
def write_feature(self, feature, val, feature_dict):
"""Write dict feature and value into tfrecord format."""
feature_type = self._feature_to_type_dict[feature]
if feature_type == TfRecordType.BYTES:
feature_dict[feature] = _bytes_feature(val)
elif feature_type == TfRecordType.FLOAT:
feature_dict[feature] = _float_feature(val)
elif feature_type == TfRecordType.INT64:
feature_dict[feature] = _int64_feature(val)
elif feature_type in (TfRecordType.DTYPE_FLOAT, TfRecordType.DTYPE_INT64):
feature_dict[feature] = _dtype_feature(val)
def write_user_tfrecords(self, users_dict):
"""Write collected data dict into tfrecords."""
for user in users_dict.keys():
tfrecord_file_path = expand_path(f"{self._tfrecord_files_path}/{user}.tfrecords")
user_writer = tf.python_io.TFRecordWriter(tfrecord_file_path)
for region in users_dict[user].keys():
for frame in users_dict[user][region].keys():
frame_features = {}
frame_data_dict = users_dict[user][region][frame]
for feature in self._feature_to_type_dict.keys():
self.write_feature(feature, frame_data_dict[feature], frame_features)
example = tf.train.Example(features=tf.train.Features(feature=frame_features))
user_writer.write(example.SerializeToString())
user_writer.close()
def write_combined_tfrecords(self, combined_dict, test_users, validation_users, train_users):
"""Write collected data after splitting into test, train, validation into tfrecords."""
def _write_category_tfrecords(combined_dict, category_users, tfrecord_file_path):
category_writer = tf.python_io.TFRecordWriter(tfrecord_file_path)
for user in category_users:
for region in combined_dict[user].keys():
for frame in combined_dict[user][region].keys():
frame_features = {}
frame_data_dict = combined_dict[user][region][frame]
for feature in self._feature_to_type_dict.keys():
self.write_feature(feature, frame_data_dict[feature], frame_features)
example = tf.train.Example(
features=tf.train.Features(feature=frame_features))
category_writer.write(example.SerializeToString())
category_writer.close()
test_path = os.path.join(self._tfrecord_files_path, 'test.tfrecords')
validation_path = os.path.join(self._tfrecord_files_path, 'validate.tfrecords')
train_path = os.path.join(self._tfrecord_files_path, 'train.tfrecords')
_write_category_tfrecords(combined_dict, test_users, test_path)
_write_category_tfrecords(combined_dict, validation_users, validation_path)
_write_category_tfrecords(combined_dict, train_users, train_path)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataio/data_converter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manage generation of normalized data and prepare data for gazenet inference."""
from nvidia_tao_tf1.cv.common.dataio.custom_data_converter import CustomDataConverter
from nvidia_tao_tf1.cv.common.dataio.custom_jsonlabels_strategy import CustomJsonLabelsStrategy
from nvidia_tao_tf1.cv.common.dataio.gaze_custom_set_strategy import GazeCustomSetStrategy
from nvidia_tao_tf1.cv.common.dataio.utils import PipelineReporter
class CustomerDataManager(object):
"""Manage the flow of data source files to tfrecord generation."""
def __init__(self,
data_root_path,
norm_data_folder_name='norm_data',
data_strategy_type='json',
landmarks_folder_name=None,
sdklabels_folder_name=None,
save_images=True):
"""Initialize parameters.
Args:
data_root_path (str): root path of data
norm_data_folder_name (str): Normalized data folder name.
data_strategy_type (str): specify the data labeling resources.
landmarks_folder_name (str): Folder name to obtain predicted fpe landmarks from.
sdklabels_folder_name (str): Folder name to obtain predicted nvhelnet sdk labels from.
save_images (bool): Flag to determine if want to save normalized faces and images.
"""
self._data_root_path = data_root_path
self._data_strategy_type = data_strategy_type
self._landmarks_folder_name = landmarks_folder_name
self._sdklabels_folder_name = sdklabels_folder_name
self._norm_data_folder_name = norm_data_folder_name
self._save_images = save_images
self._frame_dict = None
self._set_strategy = GazeCustomSetStrategy(
data_root_path=self._data_root_path,
norm_data_folder_name=self._norm_data_folder_name,
data_strategy_type=self._data_strategy_type,
landmarks_folder_name=self._landmarks_folder_name)
self._strategy_type, self._paths = self._set_strategy.get_source_paths()
self._logger = PipelineReporter(log_path=self._paths.error_path,
script_name='custom_data_manager',
set_id="")
strategy = None
if self._strategy_type == 'json':
strategy = CustomJsonLabelsStrategy(
data_root_path=self._data_root_path,
norm_data_folder_name=self._norm_data_folder_name,
set_strategy=self._set_strategy,
save_images=self._save_images,
logger=self._logger
)
self._frame_dict = strategy.get_data()
else:
self._logger.add_error('No such strategy {}'.format(self._strategy_type))
raise ValueError('Invalid strategy.')
converter = CustomDataConverter(use_lm_pred=self._landmarks_folder_name is not None)
self._tfrecords_data = converter.generate_frame_tfrecords(self._frame_dict)
def get_data(self):
"""get frame dictionary data."""
return self._frame_dict
def get_tfrecords_data(self):
"""get tfrecords data."""
return self._tfrecords_data
def get_data_number(self):
"""get data number."""
return len(self._frame_dict)
def get_frame_dict(self):
"""get frame dictionary."""
return self._frame_dict
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataio/custom_data_manager.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TLT augmentation library."""
import cv2
import numpy as np
def aug_hsv(img, h=0.1, s=1.5, v=1.5, depth=8):
"""Apply HSV augmentation.
Args:
img: RGB image in numpy array
h (float): Change hue at most h * 180
s, v (float): change sv at most s, v, 1/s, 1/v times
depth(int): Number of bits per pixel per channel of the image.
Returns:
aug_img: img after augmentation
"""
def rand_inv(x):
return x if np.random.rand() < 0.5 else 1.0 / x
sv_mul = np.random.rand(2) * np.array([s - 1.0, v - 1.0]) + 1.0
sv_mul = np.array(list(map(rand_inv, sv_mul))).reshape(1, 1, 2)
if depth not in [8, 16]:
raise ValueError(
f"Unsupported image depth: {depth}, should be 8 or 16."
)
hsv = cv2.cvtColor(
np.clip(img, 0, 2 ** depth - 1).astype(np.float32),
cv2.COLOR_RGB2HSV
)
hsv[..., 1:] *= sv_mul
hsv[..., 0] += (np.random.rand() * 2.0 - 1.0) * h * 180
hsv = np.round(hsv).astype(np.int)
hsv[..., 0] %= 180
hsv[..., 1:] = np.clip(hsv[..., 1:], 0, (2.**depth - 1))
return cv2.cvtColor(hsv.astype(np.float32), cv2.COLOR_HSV2RGB)
def aug_flip(img, boxes, ftype=0):
"""Apply flip.
Args:
img: RGB image in numpy array
boxes: (N, 4) numpy arrays (xmin, ymin, xmax, ymax) containing bboxes. {x,y}{min,max} is
in [0, 1] range.
ftype (0 or 1): 0: vertical flip. 1: horizontal flip
Returns:
aug_img: img after flip
aug_boxes: boxes after flip
"""
if ftype == 0:
ymin = 1.0 - boxes[:, 3]
ymax = 1.0 - boxes[:, 1]
xmin = boxes[:, 0]
xmax = boxes[:, 2]
elif ftype == 1:
ymin = boxes[:, 1]
ymax = boxes[:, 3]
xmin = 1.0 - boxes[:, 2]
xmax = 1.0 - boxes[:, 0]
else:
raise ValueError("Use ftype 0 for vertical flip and 1 for horizontal flip.")
return cv2.flip(img, ftype), np.stack([xmin, ymin, xmax, ymax], axis=-1)
def aug_jitter(img, boxes, jitter=0.3, resize_ar=None):
"""Apply YOLO style jitter.
See https://stackoverflow.com/questions/55038726
Args:
img: RGB image in numpy array
boxes: (N, 4) numpy arrays (xmin, ymin, xmax, ymax) containing bboxes. {x,y}{min,max} is
in [0, 1] range.
jitter (0, 1): jitter value
resize_ar (float): network input width / height. Jitter will try to mimic this
Returns:
aug_img: img after jitter
aug_boxes: boxes after jitter
"""
# -jitter ~ jitter rand
dl, dt, dr, db = np.minimum((np.random.rand(4) - 0.5) * 2 * jitter, 0.8)
# make sure the result image is not too small
if dl + dr > 0.8:
dr = min(dr, 0.4)
dl = min(dl, 0.4)
if dt + db > 0.8:
dt = min(dt, 0.4)
db = min(db, 0.4)
h, w, _ = img.shape
dl *= w
dr *= w
dt *= h
db *= h
new_width = w - dl - dr
new_height = h - dt - db
if resize_ar is not None:
if w / float(h) > resize_ar:
# first try to decrease new_width
ar_w = h * resize_ar
dw = w - ar_w
# narrow from two sides
l_shift = -min(dl, 0)
r_shift = -min(dr, 0)
lr_shift = min(l_shift, r_shift, dw / 2.0)
dl += lr_shift
dr += lr_shift
dw -= 2 * lr_shift
if dl < 0 < dw:
l_shift = min(dw, -dl)
dl += l_shift
dw -= l_shift
if dr < 0 < dw:
r_shift = min(dw, -dr)
dr += r_shift
dw -= r_shift
# if doesn't work, increase new_height
if dw > 0:
dh = dw / resize_ar
dt -= dh / 2.0
db -= dh / 2.0
else:
# first try to decrease new_height
ar_h = w / resize_ar
dh = h - ar_h
# narrow from two sides
t_shift = -min(dt, 0)
b_shift = -min(db, 0)
tb_shift = min(t_shift, b_shift, dh / 2.0)
dt += tb_shift
db += tb_shift
dh -= 2 * tb_shift
if dt < 0 < dh:
t_shift = min(dh, -dt)
dt += t_shift
dh -= t_shift
if db < 0 < dh:
b_shift = min(db, -dt)
db += b_shift
dh -= b_shift
# If doesn't work, increase new_width
if dh > 0:
dw = dh * resize_ar
dl -= dw / 2.0
dr -= dw / 2.0
new_width = w - dl - dr
new_height = h - dt - db
# new image left top corner [dl, dt], height / width [new_height, new_width]
# old image left top corner [0, 0], height/width [h, w]
dl = int(round(dl))
dt = int(round(dt))
new_height = int(round(new_height))
new_width = int(round(new_width))
joint_l_on_img = max(dl, 0)
joint_t_on_img = max(dt, 0)
joint_r_on_img = min(new_width + dl, w)
joint_b_on_img = min(new_height + dt, h)
new_img = np.zeros((new_height, new_width, 3), dtype=np.float)
new_img += np.mean(img, axis=(0, 1), keepdims=True)
new_img[joint_t_on_img - dt:joint_b_on_img - dt,
joint_l_on_img - dl:joint_r_on_img - dl, :] = \
img[joint_t_on_img:joint_b_on_img, joint_l_on_img:joint_r_on_img, :].astype(np.float)
xmin = (boxes[:, 0] * w - dl) / new_width
xmax = (boxes[:, 2] * w - dl) / new_width
ymin = (boxes[:, 1] * h - dt) / new_height
ymax = (boxes[:, 3] * h - dt) / new_height
return new_img, np.stack([xmin, ymin, xmax, ymax], axis=-1)
def aug_letterbox_resize(img, boxes, resize_shape=(512, 512)):
"""Apply letter box. resize image to resize_shape, not changing aspect ratio.
Args:
img: RGB image in numpy array
boxes: (N, 4) numpy arrays (xmin, ymin, xmax, ymax) containing bboxes. {x,y}{min,max} is
in [0, 1] range.
resize_shape (int, int): (w, h) of new image
Returns:
aug_img: img after resize
aug_boxes: boxes after resize
"""
new_img = np.zeros((resize_shape[1], resize_shape[0], 3), dtype=np.float)
new_img += np.mean(img, axis=(0, 1), keepdims=True)
h, w, _ = img.shape
ratio = min(float(resize_shape[1]) / h, float(resize_shape[0]) / w)
new_h = int(round(ratio * h))
new_w = int(round(ratio * w))
l_shift = (resize_shape[0] - new_w) // 2
t_shift = (resize_shape[1] - new_h) // 2
img = cv2.resize(img, (new_w, new_h), cv2.INTER_LINEAR)
new_img[t_shift: t_shift+new_h, l_shift: l_shift+new_w] = img.astype(np.float)
xmin = (boxes[:, 0] * new_w + l_shift) / float(resize_shape[0])
xmax = (boxes[:, 2] * new_w + l_shift) / float(resize_shape[0])
ymin = (boxes[:, 1] * new_h + t_shift) / float(resize_shape[1])
ymax = (boxes[:, 3] * new_h + t_shift) / float(resize_shape[1])
return new_img, np.stack([xmin, ymin, xmax, ymax], axis=-1)
def aug_random_crop(img, boxes, crop_ar, min_box_ratio):
"""Apply random crop according to crop_ar.
Args:
img: RGB image in numpy array
boxes: (N, 4) numpy arrays (xmin, ymin, xmax, ymax) containing bboxes. {x,y}{min,max} is
in [0, 1] range.
crop_ar: output aspect ratio
min_box_ratio: the minimum ratio the crop bbox will be compared to original image
Returns:
aug_img: img after flip
aug_boxes: boxes after flip
"""
h, w, _ = img.shape
# let's decide crop box size first
crop_ratio = np.random.rand() * (1.0 - min_box_ratio) + min_box_ratio
if w / float(h) > crop_ar:
crop_h = int(round(h * crop_ratio))
crop_w = int(round(crop_h * crop_ar))
else:
crop_w = int(round(w * crop_ratio))
crop_h = int(round(crop_w / crop_ar))
# get crop box location
t_shift = np.random.randint(h - crop_h + 1)
l_shift = np.random.randint(w - crop_w + 1)
new_img = img[t_shift: t_shift+crop_h, l_shift: l_shift+crop_w].astype(np.float)
xmin = (boxes[:, 0] * w - l_shift) / float(crop_w)
xmax = (boxes[:, 2] * w - l_shift) / float(crop_w)
ymin = (boxes[:, 1] * h - t_shift) / float(crop_h)
ymax = (boxes[:, 3] * h - t_shift) / float(crop_h)
return new_img, np.stack([xmin, ymin, xmax, ymax], axis=-1)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataio/augmentation_lib.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper for retrieving gaze vector and theta phi from existing results."""
import cv2
import numpy as np
'''
Terminologies:
WCS: World (object) coordinate system
CCS: Camera coordinate system
ICS: Image coordinate system
'''
def populate_theta_phi(info, is_valid_theta_phi):
"""Populate 1D array with scalar."""
if not is_valid_theta_phi or info is None:
return -1
assert info.shape == (1,)
return np.asscalar(info)
def populate_gaze_info(np_2D_arr, is_valid_theta_phi):
"""Populate array with scalar np values."""
if not is_valid_theta_phi or np_2D_arr is None:
return [-1] * 3
assert isinstance(np_2D_arr, np.ndarray)
json_list = []
for arr in np_2D_arr:
assert isinstance(arr, np.ndarray) and arr.shape[0] == 1
json_list.append(np.asscalar(arr))
return json_list
def populate_head_norm_bbinfo(info, ind, is_valid_theta_phi):
"""Populate with scalar value."""
if not is_valid_theta_phi or info is None:
return -1
return info[ind]
def populate_head_norm_listinfo(info, typ, is_valid_theta_phi):
"""Populate array with scalar np values."""
if not is_valid_theta_phi or info is None:
if typ == '3D':
return -1.0 * np.ones((114,), dtype=np.float32)
if typ == '2D':
return -1.0 * np.ones((208,), dtype=np.float32)
if typ == 'cnv_mat':
return -1.0 * np.ones((9,), dtype=np.float32)
return np.float32(info)
def populate_head_norm_path(info, is_valid_theta_phi):
"""Populate string."""
if not is_valid_theta_phi or info is None:
return ''
return info
def populate_head_norm_float(info, is_valid_theta_phi):
"""Populate np float."""
if not is_valid_theta_phi or info is None:
return -1.0
return np.float32(info)
def calculate_reprojection_error(ref, reproj):
"""Get reprojection error.
Args:
ref (np.ndarray): Original 3D points.
reproj (np.ndarray): Projected 3D points onto image plane.
Returns:
reprojection error (float):
The mean squared difference between projected and original points.
"""
if isinstance(ref, list):
ref = np.asarray(ref)
if isinstance(reproj, list):
reproj = np.asarray(reproj)
diff = ref[:2, :] - reproj[:2, :]
return np.mean(np.sqrt(diff[0, :] ** 2 + diff[1, :] ** 2))
def compute_gaze_vector_from_theta_phi(theta, phi):
"""Get forward facing gaze vector [x, y, z] in CCS from theta/beta and alpha/phi angles.
Args:
theta (float): Gaze pitch in radians.
phi (float): Gaze yaw in radians.
Returns:
gaze_vec (np.ndarray): Forward facing gaze vector [x, y, z] in CCS.
"""
gaze_vec = np.zeros(3)
gaze_vec[0] = -np.cos(theta) * np.sin(phi)
gaze_vec[1] = -np.sin(theta)
gaze_vec[2] = -np.cos(theta) * np.cos(phi)
return gaze_vec
def compute_theta_phi_from_gaze_vector(x, y, z):
"""Get theta phi angles from forward facing gaze vector in CCS.
Using appearance-based gaze estimation method in the Wild (MPIIGaze).
Args:
x (float): CCS x-coord of forward facing gaze vector.
y (float): CCS y-coord of forward facing gaze vector.
z (float): CCS z-coord of forward facing gaze vector.
Returns:
theta (float): Gaze pitch in radians.
phi (float): Gaze yaw in radians.
"""
theta = np.arcsin(-y) # Range [-pi/2, pi/2]
phi = np.arctan2(-x, -z) # Range [-pi, pi], not the range [-pi/2, pi/2] of arctan
return np.reshape(theta, (1,)), np.reshape(phi, (1,))
def compute_PoR_from_theta_phi(theta, phi, pc_cam_mm, R, T):
"""Get the intersection of gaze vector (generated using theta & phi) with the monitor plane.
TODO: consider normalized camera space (scaling, divide by Z).
Args:
theta (float): Gaze pitch in radians.
phi (float): Gaze yaw in radians.
pc_cam_mm (np.ndarray): 3D pupil center coordinates in camera space in mm.
R (np.ndarray): Rotation matrix computed from mirror calibration.
T (np.ndarray): Translation vector computed from mirror calibration.
Returns:
PoR_x (np.ndarray): x-coord of intersection point in camera space in mm.
PoR_y (np.ndarray): y-coord of intersection point in camera space in mm.
PoR_z (np.ndarray): z-coord of intersection point in camera space in mm.
"""
screenNormal = R
screenNormal = screenNormal[:, 2]
screenOrigin = T
gaze_vec = compute_gaze_vector_from_theta_phi(theta, phi)
dNormalGazeVec = np.dot(screenNormal, gaze_vec)
d = np.dot(screenNormal, screenOrigin)
dNormalEyeCenter = np.dot(screenNormal, pc_cam_mm)
t = (d - dNormalEyeCenter) / dNormalGazeVec
gaze_vec = np.expand_dims(gaze_vec, axis=1)
intersectPoint = pc_cam_mm + t * gaze_vec
PoR_x = intersectPoint[0]
PoR_y = intersectPoint[1]
PoR_z = intersectPoint[2]
return PoR_x, PoR_y, PoR_z
def compute_PoR_from_gaze_vector(gaze_vec, pc_cam_mm, R, T):
"""Get the intersection of gaze vector (generated using theta & phi) with the monitor plane.
TODO: consider normalized camera space (scaling, divide by Z).
Args:
gaze vec (np.ndarray): 3D gaze vector.
pc_cam_mm (np.ndarray): 3D pupil center coordinates in camera space in mm.
R (np.ndarray): Rotation matrix computed from mirror calibration.
T (np.ndarray): Translation vector computed from mirror calibration.
Returns:
PoR_x (np.ndarray): x-coord of intersection point in camera space in mm.
PoR_y (np.ndarray): y-coord of intersection point in camera space in mm.
PoR_z (np.ndarray): z-coord of intersection point in camera space in mm.
"""
screenNormal = R
screenNormal = screenNormal[:, 2]
screenOrigin = T
dNormalGazeVec = np.dot(screenNormal, gaze_vec)
d = np.dot(screenNormal, screenOrigin)
dNormalEyeCenter = np.dot(screenNormal, pc_cam_mm)
t = (d - dNormalEyeCenter) / dNormalGazeVec
intersectPoint = pc_cam_mm + t * gaze_vec
PoR_x = intersectPoint[0]
PoR_y = intersectPoint[1]
PoR_z = intersectPoint[2]
return PoR_x, PoR_y, PoR_z
def normalizeFullFrame(img, face_cam, ec_pxs, rot_mat, face_gaze_vec, landmarks, cam, dist,
method='modified', scale_factor=2.0):
"""Perform face normalization with full frame warping.
Args:
img (np.ndarray): input frame.
face_cam (np.ndarray): face center in camera space in mm.
ec_pxs (np.ndarray): left/right eye centers in pixel coordinates.
rot_mat (np.ndarray): head pose rotation matrix obtained from PnP.
face_gaze_vec (np.ndarray): 3D gaze vector for face.
landmarks (np.ndarray): input 2D landmarks.
cam (np.ndarray): camera calibration matrix.
dist (np.ndarray): lens distortion coefficients.
method (str): normalization method.
scale_factor (float): face size scaling factor.
Returns:
img_warped (np.ndarray): output warped frame.
gaze_theta_new (np.float): face normalized gaze pitch.
gaze_phi_new (np.float): face normalized gaze yaw.
hp_theta_new (np.float): face normalized hp pitch.
hp_phi_new (np.float): face normalized hp yaw.
face_bb (np.array): face rectangle on normalized frame.
leye_bb (np.array): left eye rectangle on normalized frame.
reye_bb (np.array): right eye rectangle on normalized frame.
landmarks_norm (np.ndarray): normalized 2D landmarks.
cnvMat (np.ndarray): warping conversion matrix.
"""
focal_new = 960 * scale_factor
distance_new = 600.0
imageWidth = img.shape[1]
imageHeight = img.shape[0]
roiSize = (imageWidth, imageHeight)
img_u = cv2.undistort(img, cam, dist)
distance = np.linalg.norm(face_cam)
z_scale = distance_new / distance
cam_new = np.array([
[focal_new, 0, roiSize[0] / 2],
[0, focal_new, roiSize[1] / 2],
[0, 0, 1.0], ])
scaleMat = np.array([
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, z_scale], ])
hRx = rot_mat[:, 0]
forward = (face_cam / distance).reshape(3)
down = np.cross(forward, hRx)
down /= np.linalg.norm(down)
right = np.cross(down, forward)
right /= np.linalg.norm(right)
rotMat = np.c_[right, down, forward].T
# Conversion matrix M.
cnvMat = np.dot(scaleMat, rotMat)
cnvMat = np.float64(cnvMat)
# Warping matrix, W.
warpMat = np.dot(np.dot(cam_new, cnvMat), np.linalg.inv(cam))
img_warped = cv2.warpPerspective(img_u, warpMat, roiSize)
# Face bounding box (upper left corner (x,y), width, height).
face_bb = [int(imageWidth/2 - 112*scale_factor), int(imageHeight/2 - 112*scale_factor),
int(224*scale_factor), int(224*scale_factor)]
# Warp 2D landmarks.
landmarks = landmarks.astype(float)
landmarks = landmarks.reshape(landmarks.shape[0], 1, 2)
undist_landmarks = cv2.undistortPoints(landmarks, cam, dist, P=cam)
landmarks_norm = cv2.perspectiveTransform(undist_landmarks, warpMat)
landmarks_norm = np.reshape(landmarks_norm, (landmarks_norm.shape[0], landmarks_norm.shape[2]))
# Warp 2D eye centers.
ec_pxs = ec_pxs.astype(float)
ec_pxs = ec_pxs.reshape(ec_pxs.shape[0], 1, 2)
undist_ec_pxs = cv2.undistortPoints(ec_pxs, cam, dist, P=cam)
ec_pxs_norm = cv2.perspectiveTransform(undist_ec_pxs, warpMat)
ec_pxs_norm = np.reshape(ec_pxs_norm, (ec_pxs_norm.shape[0], ec_pxs_norm.shape[2]))
leye_center = ec_pxs_norm[0]
reye_center = ec_pxs_norm[1]
leye_bb = [int(leye_center[0] - 40*scale_factor), int(leye_center[1] - 40*scale_factor),
int(80*scale_factor), int(80*scale_factor)]
reye_bb = [int(reye_center[0] - 40*scale_factor), int(reye_center[1] - 40*scale_factor),
int(80*scale_factor), int(80*scale_factor)]
if leye_bb[0] < 0 or leye_bb[1] < 0 or scale_factor <= 0 or \
(leye_bb[0] + leye_bb[2]) > imageWidth or \
(leye_bb[1] + leye_bb[3]) > imageHeight:
leye_bb = None
if reye_bb[0] < 0 or reye_bb[1] < 0 or scale_factor <= 0 or \
(reye_bb[0] + reye_bb[2]) > imageWidth or \
(reye_bb[1] + reye_bb[3]) > imageHeight:
reye_bb = None
# Validate norm image orientation (ie, chin is lower than eye center).
mideyes_center = (leye_center + reye_center) / 2.0
chin = landmarks_norm[8]
if chin[1] <= (mideyes_center[1] + 100 * scale_factor):
img_warped = None
# Calculate head orientation.
dY = chin[1] - mideyes_center[1]
dX = abs(chin[0] - mideyes_center[0])
angle = np.degrees(np.arctan2(dX, dY))
THR = 30
# Validate head orientation.
# Normal < THR, horizontal flip < (90-THR), vertical flip < (180-THR).
if angle > (90-THR):
print('Invalid head orientation: ', angle)
if angle > (180 - THR):
print('Invalid head orientation: ', angle, '. Flipped vertically.')
else:
print('Invalid head orientation: ', angle, '. Flipped horizontally.')
img_warped = None
if method == 'original':
cnvMat = np.dot(scaleMat, rotMat)
else:
cnvMat = rotMat
# Calculate new HP vector.
hR_new = np.dot(cnvMat, rot_mat)
Zv = hR_new[:, 2]
# Reverse direction as we want head pose z pointing positive Z direction.
# (towards the back of the head)! Also, convention in all research papers.
hp_theta_new, hp_phi_new = compute_theta_phi_from_gaze_vector(-Zv[0], -Zv[1], -Zv[2])
# Calculate new gaze GT vector: [g_n = M * g_r] where M is only rotation.
face_gaze_vec_new = np.dot(cnvMat, face_gaze_vec)
gaze_theta_new, gaze_phi_new = compute_theta_phi_from_gaze_vector(face_gaze_vec_new[0],
face_gaze_vec_new[1],
face_gaze_vec_new[2])
return img_warped, gaze_theta_new, gaze_phi_new, hp_theta_new, hp_phi_new, face_bb, leye_bb, \
reye_bb, landmarks_norm, cnvMat
def normalizeFace(img, face_cam, rot_mat, face_gaze_vec, cam, dist,
method='modified', imageWidth=224, imageHeight=224):
"""Perform face normalization.
Args:
img (np.ndarray): input frame.
face_cam (np.ndarray): face center in camera space in mm.
rot_mat (np.ndarray): head pose rotation matrix obtained from PnP.
face_gaze_vec (np.ndarray): 3D gaze vector for face.
cam (np.ndarray): camera calibration matrix.
dist (np.ndarray): lens distortion coefficients.
method (str): normalization method.
imageWidth (int): output image width.
imageHeight (int): output image height.
Returns:
img_warped (np.ndarray): output warped frame.
gaze_theta_new (np.float): face normalized gaze pitch.
gaze_phi_new (np.float): face normalized gaze yaw.
hp_theta_new (np.float): face normalized hp pitch.
hp_phi_new (np.float): face normalized hp yaw.
per_oof (np.array): percentage of out of frame after normalization.
cnvMat (np.ndarray): warping conversion matrix.
"""
focal_new = 960 * imageWidth / 224
distance_new = 600.0
roiSize = (imageWidth, imageHeight)
img_u = cv2.undistort(img, cam, dist)
distance = np.linalg.norm(face_cam)
z_scale = distance_new / distance
cam_new = np.array([
[focal_new, 0, roiSize[0] / 2],
[0, focal_new, roiSize[1] / 2],
[0, 0, 1.0], ])
scaleMat = np.array([
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, z_scale], ])
hRx = rot_mat[:, 0]
forward = (face_cam / distance).reshape(3)
down = np.cross(forward, hRx)
down /= np.linalg.norm(down)
right = np.cross(down, forward)
right /= np.linalg.norm(right)
rotMat = np.c_[right, down, forward].T
# Conversion matrix M.
cnvMat = np.dot(scaleMat, rotMat)
cnvMat = np.float64(cnvMat)
# Warping matrix W.
warpMat = np.dot(np.dot(cam_new, cnvMat), np.linalg.inv(cam))
img_warped = cv2.warpPerspective(img_u, warpMat, roiSize)
# Calculate percentage of face which is out of frame.
per_oof = round(np.count_nonzero(img_warped == 0) / (1.0 * roiSize[0] * roiSize[1]), 2)
if method == 'original':
cnvMat = np.dot(scaleMat, rotMat)
else:
cnvMat = rotMat
# Calculate new HP vector.
hR_new = np.dot(cnvMat, rot_mat)
Zv = hR_new[:, 2]
# Reverse direction as we want head pose z pointing positive Z direction.
# (towards the back of the head)! Also, convention in all research papers.
hp_theta_new, hp_phi_new = compute_theta_phi_from_gaze_vector(-Zv[0], -Zv[1], -Zv[2])
# Calculate new gaze GT vector: [g_n = M * g_r] where M is only rotation.
face_gaze_vec_new = np.dot(cnvMat, face_gaze_vec)
gaze_theta_new, gaze_phi_new = compute_theta_phi_from_gaze_vector(face_gaze_vec_new[0],
face_gaze_vec_new[1],
face_gaze_vec_new[2])
return img_warped, gaze_theta_new, gaze_phi_new, hp_theta_new, hp_phi_new, per_oof, cnvMat
def normalizeEye(img, eye_cam, rot_mat, gaze_vec, cam, dist,
method='modified', imageWidth=60, imageHeight=36):
"""Perform eye normalization.
Args:
img (np.ndarray): input frame.
eye_cam (np.ndarray): eye center in camera space in mm.
rot_mat (np.ndarray): head pose rotation matrix obtained from PnP.
gaze_vec (np.ndarray): 3D gaze vector for face.
cam (np.ndarray): camera calibration matrix.
dist (np.ndarray): lens distortion coefficients.
method (str): normalization method.
imageWidth (int): output image width.
imageHeight (int): output image height.
Returns:
img_warped (np.ndarray): output warped frame.
gaze_theta_new (np.float): face normalized gaze pitch.
gaze_phi_new (np.float): face normalized gaze yaw.
hp_theta_new (np.float): face normalized hp pitch.
hp_phi_new (np.float): face normalized hp yaw.
per_oof (np.array): percentage of out of frame after normalization.
cnvMat (np.ndarray): warping conversion matrix.
"""
focal_new = 960 * imageWidth / 60
distance_new = 600.0
roiSize = (imageWidth, imageHeight)
img_u = cv2.undistort(img, cam, dist)
distance = np.linalg.norm(eye_cam)
z_scale = distance_new / distance
cam_new = np.array([
[focal_new, 0, roiSize[0] / 2],
[0, focal_new, roiSize[1] / 2],
[0, 0, 1.0], ])
scaleMat = np.array([
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, z_scale], ])
hRx = rot_mat[:, 0]
forward = (eye_cam / distance).reshape(3)
down = np.cross(forward, hRx)
down /= np.linalg.norm(down)
right = np.cross(down, forward)
right /= np.linalg.norm(right)
rotMat = np.c_[right, down, forward].T
# Conversion matrix M.
cnvMat = np.dot(scaleMat, rotMat)
cnvMat = np.float64(cnvMat)
# Warping matrix W.
warpMat = np.dot(np.dot(cam_new, cnvMat), np.linalg.inv(cam))
img_warped = cv2.warpPerspective(img_u, warpMat, roiSize)
if method == 'original':
cnvMat = np.dot(scaleMat, rotMat)
else:
cnvMat = rotMat
# Calculate new HP vector.
hR_new = np.dot(cnvMat, rot_mat)
Zv = hR_new[:, 2]
# Reverse direction as we want head pose z pointing positive Z direction.
# (towards the back of the head)! Also, convention in all research papers.
hp_theta_new, hp_phi_new = compute_theta_phi_from_gaze_vector(-Zv[0], -Zv[1], -Zv[2])
# Calculate new gaze GT vector: [g_n = M * g_r] where M is only rotation.
gaze_vec_new = np.dot(cnvMat, gaze_vec)
gaze_theta_new, gaze_phi_new = compute_theta_phi_from_gaze_vector(gaze_vec_new[0],
gaze_vec_new[1],
gaze_vec_new[2])
return img_warped, gaze_theta_new, gaze_phi_new, hp_theta_new, hp_phi_new, cnvMat
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataio/theta_phi_angle_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manage generation of tfrecords and related output files."""
from collections import defaultdict
import json
import os
import random
import numpy as np
from nvidia_tao_tf1.core.utils.path_utils import expand_path
from nvidia_tao_tf1.cv.common.dataio.data_converter import DataConverter
# from nvidia_tao_tf1.cv.common.dataio.desq_converter import write_desq
from nvidia_tao_tf1.cv.common.dataio.gt_converter import GtConverter
from nvidia_tao_tf1.cv.common.dataio.jsonlabels_strategy import JsonLabelsStrategy
from nvidia_tao_tf1.cv.common.dataio.sdklabels_strategy import SdkLabelsStrategy
from nvidia_tao_tf1.cv.common.dataio.set_strategy_generator import SetStrategyGenerator
from nvidia_tao_tf1.cv.common.dataio.tfrecord_generator import TfRecordGenerator
from nvidia_tao_tf1.cv.common.dataio.utils import is_kpi_set, PipelineReporter
class TfRecordManager(object):
"""Manage the flow of data source files to tfrecord generation."""
def __init__(
self,
set_id,
experiment_folder_suffix,
use_unique,
use_filtered,
use_undistort,
landmarks_folder_name,
sdklabels_folder_name,
norm_folder_name,
set_root_path,
save_images=True
):
"""Initialize parameters.
Args:
set_id (str): Set for which to generate tfrecords.
experiment_folder_suffix (str): Suffix of experiment folder containing tfrecords.
use_unique (bool): Only create records for first frame in a series if true.
use_filtered (bool): Filter frames if true.
use_undistort (bool): Use undistorted/not calibrated pts and frames if true.
landmarks_folder_name (str): Folder name to obtain predicted fpe landmarks from.
sdklabels_folder_name (str): Folder name to obtain predicted nvhelnet sdk labels from.
norm_folder_name (str): Folder name to save normalized face, eyes and frame images.
save_images (bool): Flag to determine if want to save normalized faces and images.
"""
self._set_id = set_id
self._experiment_folder_suffix = experiment_folder_suffix
self._use_unique = use_unique
self._use_filtered = use_filtered
self._use_undistort = use_undistort
self._landmarks_folder_name = landmarks_folder_name
self._sdklabels_folder_name = sdklabels_folder_name
self._norm_folder_name = norm_folder_name
self._save_images = save_images
self._set_root_path = set_root_path
self._extract_cosmos_paths()
strategy = None
if self._strategy_type == 'sdk':
strategy = SdkLabelsStrategy(
set_id,
self._use_unique,
self._logger,
self._set_strategy,
self._norm_folder_name,
self._save_images)
elif self._strategy_type == 'json':
strategy = JsonLabelsStrategy(
set_id,
self._use_unique,
self._logger,
self._set_strategy,
self._norm_folder_name,
self._save_images)
else:
self._logger.add_error('No such strategy {}'.format(self._strategy_type))
raise ValueError('Invalid strategy.')
self._generator = TfRecordGenerator(strategy)
self._generate_data()
def _extract_cosmos_paths(self):
tfrecord_folder_name = 'TfRecords'
gt_folder_name = 'GT'
if self._use_unique:
tfrecord_folder_name += '_unique'
gt_folder_name += '_unique'
self._set_strategy = SetStrategyGenerator(
self._set_id,
self._experiment_folder_suffix,
tfrecord_folder_name,
gt_folder_name,
self._use_filtered,
self._use_undistort,
self._landmarks_folder_name,
self._sdklabels_folder_name,
self._set_root_path)
self._strategy_type, self._paths = self._set_strategy.get_source_paths()
self._logger = PipelineReporter(self._paths.error_path, 'tfrecord_generation', self._set_id)
def _generate_data(self):
self._tfrecord_data = self._generator.get_tfrecord_data()
# Generate joint records if not kpi set
if not is_kpi_set(self._set_id):
self._joint_data = defaultdict(lambda: defaultdict(
lambda: defaultdict(lambda: defaultdict())))
for user in self._tfrecord_data.keys():
for region in self._tfrecord_data[user].keys():
for frame in self._tfrecord_data[user][region].keys():
frame_data_dict = self._tfrecord_data[user][region][frame]
if not frame_data_dict['train/valid_theta_phi']:
continue
self._joint_data[user][region][frame] = frame_data_dict
def _do_write_joint(self):
if is_kpi_set(self._set_id) or 'eoc' in self._set_id.lower():
return False
return True
def generate_tfrecords(self):
"""Generate tfrecords."""
converter = DataConverter(self._paths.tfrecord_path,
self._landmarks_folder_name is not None)
converter.write_user_tfrecords(self._tfrecord_data)
if self._do_write_joint():
converter = DataConverter(self._paths.tfrecord_path + '_joint',
self._landmarks_folder_name is not None)
converter.write_user_tfrecords(self._joint_data)
def generate_gt(self):
"""Generate ground truth txt files for debugging."""
converter = GtConverter(self._paths.gt_path,
self._landmarks_folder_name is not None)
converter.write_gt_files(self._tfrecord_data)
if is_kpi_set(self._set_id):
converter.write_landmarks(self._tfrecord_data)
if self._do_write_joint():
converter = GtConverter(self._paths.gt_path + '_joint',
self._landmarks_folder_name is not None)
converter.write_gt_files(self._joint_data)
if is_kpi_set(self._set_id):
converter.write_landmarks(self._joint_data)
# def write_desq(self, update=False):
# """Write data to desq database."""
# write_desq(self._tfrecord_data, self._strategy_type, self._landmarks_folder_name,
# self._sdklabels_folder_name, update=update)
@staticmethod
def _summarize(data, test_users, validation_users, train_users, summarize_file_path):
"""Summarize the mean and std of points for a set."""
def _summarize_helper(users, category, json_dict):
landmarks_arr = []
eye_feat_arr = []
landmarks_arr_norm = []
landmarks_arr_3D = []
n_samples = 0
for user in users:
for region in data[user].keys():
for frame in data[user][region].keys():
n_samples += 1
landmarks_arr.append(data[user][region][frame]['train/landmarks']
.reshape((104, 2)))
eye_feat_arr.append(data[user][region][frame]['train/eye_features']
.reshape((56, 1)))
landmarks_arr_norm.append(data[user][region][frame]['train/norm_landmarks']
.reshape((104, 2)))
landmarks_arr_3D.append(data[user][region][frame]['train/landmarks_3D']
.reshape((38, 3)))
if landmarks_arr:
np_landmarks_arr = np.stack(landmarks_arr, axis=0)
json_dict[category + '_mean_lm'] = np.mean(
np_landmarks_arr, dtype=np.float, axis=0).tolist()
json_dict[category + '_std_lm'] = np.std(
np_landmarks_arr, dtype=np.float, axis=0).tolist()
json_dict[category + '_var_lm'] = np.var(
np_landmarks_arr, dtype=np.float, axis=0).tolist()
else:
default_lm = np.empty([104, 2], dtype=np.float)
default_lm.fill(-1)
json_dict[category + '_mean_lm'] = default_lm.tolist()
json_dict[category + '_std_lm'] = default_lm.tolist()
json_dict[category + '_var_lm'] = default_lm.tolist()
if eye_feat_arr:
np_eye_features_arr = np.stack(eye_feat_arr, axis=0)
json_dict[category + '_mean_eye_feat'] = np.mean(
np_eye_features_arr, dtype=np.float, axis=0).tolist()
json_dict[category + '_std_eye_feat'] = np.std(
np_eye_features_arr, dtype=np.float, axis=0).tolist()
json_dict[category + '_var_eye_feat'] = np.var(
np_eye_features_arr, dtype=np.float, axis=0).tolist()
else:
default_eye_feat = np.empty([56, 1], dtype=np.float)
default_eye_feat.fill(-1)
json_dict[category + '_mean_eye_feat'] = default_eye_feat.tolist()
json_dict[category + '_std_eye_feat'] = default_eye_feat.tolist()
json_dict[category + '_var_eye_feat'] = default_eye_feat.tolist()
if landmarks_arr_norm:
np_landmarks_arr_norm = np.stack(landmarks_arr_norm, axis=0)
json_dict[category + '_mean_norm_lm'] = np.mean(
np_landmarks_arr_norm, dtype=np.float, axis=0).tolist()
json_dict[category + '_std_norm_lm'] = np.std(
np_landmarks_arr_norm, dtype=np.float, axis=0).tolist()
json_dict[category + '_var_norm_lm'] = np.var(
np_landmarks_arr_norm, dtype=np.float, axis=0).tolist()
else:
default_lm = np.empty([104, 2], dtype=np.float)
default_lm.fill(-1)
json_dict[category + '_mean_norm_lm'] = default_lm.tolist()
json_dict[category + '_std_norm_lm'] = default_lm.tolist()
json_dict[category + '_var_norm_lm'] = default_lm.tolist()
if landmarks_arr_3D:
np_landmarks_arr_3D = np.stack(landmarks_arr_3D, axis=0)
json_dict[category + '_mean_3D_lm'] = np.mean(
np_landmarks_arr_3D, dtype=np.float, axis=0).tolist()
json_dict[category + '_std_3D_lm'] = np.std(
np_landmarks_arr_3D, dtype=np.float, axis=0).tolist()
json_dict[category + '_var_3D_lm'] = np.var(
np_landmarks_arr_3D, dtype=np.float, axis=0).tolist()
else:
default_lm = np.empty([38, 3], dtype=np.float)
default_lm.fill(-1)
json_dict[category + '_mean_3D_lm'] = default_lm.tolist()
json_dict[category + '_std_3D_lm'] = default_lm.tolist()
json_dict[category + '_var_3D_lm'] = default_lm.tolist()
json_dict[category + '_num_samples'] = n_samples
json_dict = {}
_summarize_helper(test_users, 'test', json_dict)
_summarize_helper(validation_users, 'validate', json_dict)
_summarize_helper(train_users, 'train', json_dict)
data_json_path = expand_path(f"{summarize_file_path}/data.json")
with open(data_json_path, 'w') as data_json_file:
json.dump(json_dict, data_json_file, indent=4, sort_keys=True)
def split_tfrecords(self):
"""Local split of users in a set."""
def _split_users(data):
users = data.keys()
n_users = len(users)
size = int(n_users / 8)
random.seed(1)
users = sorted(users)
reordered_users = random.sample(users, n_users)
if size == 0:
size = 1
if is_kpi_set(self._set_id):
size = n_users
test_users = reordered_users[:size]
validation_users = reordered_users[size:size * 2]
train_users = reordered_users[size * 2:]
print('Test', test_users)
print('Validation', validation_users)
print('Train', train_users)
return data, test_users, validation_users, train_users
tfrecords_combined_path = self._paths.tfrecord_path + '_combined'
data_converter = DataConverter(tfrecords_combined_path,
self._landmarks_folder_name is not None)
data, test_users, validation_users, train_users = _split_users(self._tfrecord_data)
data_converter.write_combined_tfrecords(data, test_users, validation_users, train_users)
self._summarize(data, test_users, validation_users, train_users, tfrecords_combined_path)
gt_combined_path = self._paths.gt_path + '_combined'
gt_converter = GtConverter(gt_combined_path, self._landmarks_folder_name is not None)
gt_converter.write_combined_gt_files(data, test_users, validation_users, train_users)
if is_kpi_set(self._set_id):
gt_converter.write_combined_landmarks(data, test_users, validation_users, train_users)
if self._do_write_joint():
tfrecords_joint_combined_path = self._paths.tfrecord_path + '_joint_combined'
data_joint_converter = DataConverter(tfrecords_joint_combined_path,
self._landmarks_folder_name is not None)
data, test_users, validation_users, train_users = _split_users(self._joint_data)
data_joint_converter.write_combined_tfrecords(
data,
test_users,
validation_users,
train_users)
self._summarize(
data,
test_users,
validation_users,
train_users,
tfrecords_joint_combined_path)
gt_combined_path = self._paths.gt_path + '_joint_combined'
gt_converter = GtConverter(gt_combined_path, self._landmarks_folder_name is not None)
gt_converter.write_combined_gt_files(data, test_users, validation_users, train_users)
if is_kpi_set(self._set_id):
gt_converter.write_combined_landmarks(
data,
test_users,
validation_users,
train_users)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataio/tfrecord_manager.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Calculate gaze vectors and their theta phi angles."""
import abc
import errno
import os
import cv2
import numpy as np
from recordclass import recordclass
from nvidia_tao_tf1.cv.common.dataio.theta_phi_angle_utils import (
calculate_reprojection_error,
compute_PoR_from_gaze_vector,
compute_PoR_from_theta_phi,
compute_theta_phi_from_gaze_vector,
normalizeEye,
normalizeFace,
normalizeFullFrame)
from nvidia_tao_tf1.cv.common.dataio.theta_phi_lm_utils import (
AnthropometicPtsGenerator,
PnPPtsGenerator,
projectCamera2Image,
projectObject2Camera)
'''
Terminologies:
WCS: World (object) coordinate system
CCS: Camera coordinate system
ICS: Image coordinate system
'''
def mkdir_p(new_path):
"""Makedir, making also non-existing parent dirs."""
try:
os.makedirs(new_path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(new_path):
pass
else:
raise
class ThetaPhiLandmarksGenerator(object):
"""Generate landmarks for theta phi calculations."""
def __init__(
self,
landmarks_2D,
occlusions,
frame_width,
frame_height,
face_bbox
):
"""Initialize landmarks, face bounding box, landmarks, and frame info."""
if landmarks_2D is None:
raise ValueError('Non-existent landmarks, cannot be used to compute gaze vector')
self._landmarks_2D = landmarks_2D
self._occlusions = occlusions
self._face_bbox = face_bbox
if face_bbox:
self._x1, self._y1, self._x2, self._y2 = face_bbox
self._frame_width = frame_width
self._frame_height = frame_height
def get_landmarks_in_frame(self):
""""Get landmarks in frame."""
valid_landmarks = []
n_landmarks = len(self._landmarks_2D)
for coord_index in range(n_landmarks):
coord = self._landmarks_2D[coord_index]
x = coord[0]
y = coord[1]
if x < 0 or y < 0 or x > self._frame_width or y > self._frame_height:
valid_landmarks.append([-1, -1])
elif (
self._face_bbox and
self._occlusions[coord_index] == 1 and
not (self._x1 <= x <= self._x2 and self._y1 <= y <= self._y2)
):
valid_landmarks.append([-1, -1])
else:
valid_landmarks.append([x, y])
return np.asarray(valid_landmarks, dtype=np.longdouble)
class ThetaPhiCalcStrategy(object):
"""Class for calculating gaze vectors and their theta phi angles."""
__metaclass__ = abc.ABCMeta
validation_threshold = 50 # in mm
min_landmark_percentile_pnp = 50.0
max_user_depth = 2500.0
min_user_depth = 350.0
reproj_err_threshold = 30.0
hp_tp_diff_threshold_theta = 90 * np.pi/180.0
hp_tp_diff_threshold_phi = 50 * np.pi/180.0
AngleStruct = recordclass('AngleStruct', '''
le_pc_cam_mm, re_pc_cam_mm,
rot_mat, tvec,
le_gaze_vec, re_gaze_vec,
theta_ovr, phi_ovr,
theta_le, phi_le,
theta_re, phi_re,
euler_angles, err_reproj_x2,
head_pose_theta, head_pose_phi,
mid_eyes_cam_mm, mid_gaze_vec,
theta_mid, phi_mid,
norm_face_gaze_theta, norm_face_gaze_phi,
norm_face_hp_theta, norm_face_hp_phi,
norm_leye_gaze_theta, norm_leye_gaze_phi,
norm_leye_hp_theta, norm_leye_hp_phi,
norm_reye_gaze_theta, norm_reye_gaze_phi,
norm_reye_hp_theta, norm_reye_hp_phi,
norm_face_bb, norm_leye_bb, norm_reye_bb,
norm_landmarks, norm_per_oof,
norm_frame_path, landmarks_3D,
norm_face_cnv_mat, norm_leye_cnv_mat,
norm_reye_cnv_mat, face_cam_mm''')
empty_angle_struct = AngleStruct(
None, None,
None, None,
None, None,
None, None,
None, None,
None, None,
None, None,
None, None,
None, None,
None, None,
None, None,
None, None,
None, None,
None, None,
None, None,
None, None,
None, None,
None, None,
None, None,
None, None,
None, None,
None)
def __init__(self, logger, gt_cam_mm, camera_matrix, distortion_coeffs, R, T, landmarks_2D):
"""Initialize camera parameters and landmarks."""
self._logger = logger
self._gt_cam_mm = gt_cam_mm
self._camera_matrix = camera_matrix
self._distortion_coeffs = distortion_coeffs
self._R = R
self._T = T
self._landmarks_2D = landmarks_2D
self._max_theta_phi_val_err = None
self._max_gaze_vec_val_err = None
@abc.abstractmethod
def _compute_gaze_angles_theta_phi(self):
pass
def _validate_overall_gaze(
self,
gaze_vec,
phi,
theta,
err_reproj_x2,
head_pose_phi,
head_pose_theta
):
is_valid_gaze = True
is_invalid_angle_params = (
gaze_vec is None or
phi is None or theta is None or
err_reproj_x2 is None or
head_pose_phi is None or head_pose_theta is None)
if is_invalid_angle_params:
self._logger.add_warning('Landmark problems. Discard sample.')
is_valid_gaze = False
else:
if phi >= 1.8 or phi <= -1.8:
self._logger.add_warning(
'Unexpected gaze yaw or pitch: {}, {}. Discard sample.'.format(phi, theta))
is_valid_gaze = False
if err_reproj_x2 > self.reproj_err_threshold:
self._logger.add_warning(
'High PnP reprojection error: {}. Discard sample'.format(err_reproj_x2))
is_valid_gaze = False
if (phi > 0 and head_pose_phi < -1 * phi) or (phi < 0 and head_pose_phi > -1 * phi):
if abs(phi + head_pose_phi) > 0.1:
self._logger.add_warning(
'Opposite hp and gaze yaw: {}, {}'.format(phi, head_pose_phi))
if abs(phi - head_pose_phi) > self.hp_tp_diff_threshold_phi:
self._logger.add_warning(
'''High yaw difference and opposite directions: {}, {}, {}, {}.
Discard sample.'''.format(phi, head_pose_phi, theta, head_pose_theta))
is_valid_gaze = False
if abs(phi - head_pose_phi) > 2 * self.hp_tp_diff_threshold_phi:
self._logger.add_warning(
'''High yaw difference between head pose-based gaze and real gaze: {}, {}, {}, {}.
Discard sample.'''.format(phi, head_pose_phi, theta, head_pose_theta))
is_valid_gaze = False
return is_valid_gaze
if abs(theta - head_pose_theta) > self.hp_tp_diff_threshold_theta:
self._logger.add_warning(
'''High pitch difference between head pose-based gaze and real gaze: {}, {}, {}.
Discard sample.'''.format(abs(theta - head_pose_theta), theta, head_pose_theta))
is_valid_gaze = False
return is_valid_gaze
@abc.abstractmethod
def _extract_error(self, info):
pass
def _is_valid_sample(self):
if self._max_gaze_vec_val_err <= self.validation_threshold \
and self._max_theta_phi_val_err <= self.validation_threshold \
and abs(self._max_theta_phi_val_err - self._max_gaze_vec_val_err) <= 3.0:
return True
self._logger.add_warning('Sample discarded due to high validation error')
self._logger.add_warning('Errors theta-phi: {} gaze_vec: {}'.format(
self._max_theta_phi_val_err,
self._max_gaze_vec_val_err))
return False
@abc.abstractmethod
def get_gaze_angles_theta_phi(self):
"""Abstract method for getting gaze angles info."""
pass
class NoPupilThetaPhiStrategy(ThetaPhiCalcStrategy):
"""Class for generating gaze angles without pupil landmarks."""
def __init__(self, logger, gt_cam_mm, camera_matrix, distortion_coeffs, R, T, landmarks_2D,
frame_path, labels_source_path, norm_folder_name, save_images):
"""Initialize camera parameters and landmarks."""
super(NoPupilThetaPhiStrategy, self).__init__(
logger,
gt_cam_mm,
camera_matrix,
distortion_coeffs,
R,
T,
landmarks_2D)
self._labels_source_path = labels_source_path
self._orig_frame_path = frame_path
self._anthro_pts = AnthropometicPtsGenerator()
self._pnp_pts = PnPPtsGenerator(camera_matrix, distortion_coeffs)
self._norm_folder_name = norm_folder_name
self._save_images = save_images
def get_gaze_angles_theta_phi(self):
"""Get gaze angles and validity of sample."""
info = self._compute_gaze_angles_theta_phi()
if not self._validate_overall_gaze(
info.mid_gaze_vec,
info.phi_mid,
info.theta_mid,
info.err_reproj_x2,
info.head_pose_phi,
info.head_pose_theta
):
is_valid_sample = False
else:
self._extract_error(info)
is_valid_sample = self._is_valid_sample()
return is_valid_sample, info
def _extract_error(self, info):
PoR_x, PoR_y, PoR_z = compute_PoR_from_theta_phi(
info.theta_mid,
info.phi_mid,
info.mid_eyes_cam_mm,
self._R,
self._T)
por_arr = np.asarray([PoR_x, PoR_y, PoR_z])
self._max_theta_phi_val_err = np.sqrt(np.sum(np.power(por_arr - self._gt_cam_mm, 2)))
PoR_x, PoR_y, PoR_z = compute_PoR_from_gaze_vector(
info.mid_gaze_vec,
info.mid_eyes_cam_mm,
self._R,
self._T)
por_arr = np.asarray([PoR_x, PoR_y, PoR_z])
self._max_gaze_vec_val_err = np.sqrt(np.sum(np.power(por_arr - self._gt_cam_mm, 2)))
def _compute_gaze_angles_theta_phi(self):
landmarks_2D_final, landmarks_3D_final, perc_occ_landmarks = \
self._anthro_pts.get_selected_landmarks(
self._landmarks_2D,
self._anthro_pts.landmarks_2D_indices_selected,
self._anthro_pts.get_pnp_landmarks())
# Compute PnP between the generic 3D face model landmarks (WCS) and 2D landmarks (ICS)
if perc_occ_landmarks < self.min_landmark_percentile_pnp:
# Rotation and translation vectors for 3D-to-2D transformation
_, rvec, tvec = self._pnp_pts.compute_EPnP(
landmarks_3D_final, landmarks_2D_final)
else:
self._logger.add_warning(
'Too many occluded landmarks (%' + str(perc_occ_landmarks) +
'). SolvePnP might not be reliable. Discard sample.')
return self.empty_angle_struct
landmarks_2D_final, landmarks_3D_final, perc_occ_landmarks = \
self._anthro_pts.get_selected_landmarks(
self._landmarks_2D,
self._anthro_pts.landmarks_2D_indices_expr_inv,
self._anthro_pts.get_robust_expr_var_landmarks())
if perc_occ_landmarks < self.min_landmark_percentile_pnp:
_, rvec, tvec = self._pnp_pts.compute_PnP_Iterative(
landmarks_3D_final, landmarks_2D_final, rvec, tvec)
else:
self._logger.add_warning(
'Too many occluded landmarks (%' + str(perc_occ_landmarks) +
'). SolvePnP might not be reliable. Discard sample.')
return self.empty_angle_struct
# Convert rotation vector into a rotation matrix
rot_mat = cv2.Rodrigues(rvec)[0]
# Concatenate translation vector with rotation matrix
proj_mat = np.hstack((rot_mat, tvec))
# use cv with distortion coeffs
landmarks_2D_reproj_2, _ = cv2.projectPoints(
PnPPtsGenerator.get_cv_array(landmarks_3D_final),
rvec,
tvec,
self._camera_matrix,
self._distortion_coeffs)
err_reproj_x2 = calculate_reprojection_error(
np.reshape(landmarks_2D_final, (len(landmarks_2D_final), 2)),
np.reshape(landmarks_2D_reproj_2, (len(landmarks_2D_reproj_2), 2)))
# Head pose-based direction vector
Zv = rot_mat[:, 2]
head_pose_theta, head_pose_phi = \
compute_theta_phi_from_gaze_vector(-Zv[0], -Zv[1], -Zv[2])
# Head pose euler angles in degrees
euler_angles = cv2.decomposeProjectionMatrix(proj_mat)[6]
if euler_angles[0] < -90:
euler_angles[0] = -(euler_angles[0] + 180) # correct pitch
elif euler_angles[0] > 90:
euler_angles[0] = -(euler_angles[0] - 180) # correct pitch
euler_angles[1] = -euler_angles[1] # correct the sign of yaw
if euler_angles[2] < -90:
euler_angles[2] = -(euler_angles[2] + 180) # correct roll
elif euler_angles[2] > 90:
euler_angles[2] = -(euler_angles[2] - 180) # correct roll
face_center_obj = self._anthro_pts.get_face_center()
face_cam_mm = projectObject2Camera(face_center_obj, rot_mat, tvec)
face_gaze_vec = self._gt_cam_mm - face_cam_mm
face_gv_mag = np.sqrt(face_gaze_vec[0] ** 2 + face_gaze_vec[1] ** 2 + face_gaze_vec[2] ** 2)
face_gaze_vec = face_gaze_vec / face_gv_mag
le_center, re_center = self._anthro_pts.get_eye_centers()
mid_eyes_obj = (re_center + le_center) / 2.0
mid_eyes_cam_mm = projectObject2Camera(mid_eyes_obj, rot_mat, tvec)
if mid_eyes_cam_mm[2] > self.max_user_depth or mid_eyes_cam_mm[2] < self.min_user_depth:
self._logger.add_warning('Mid eye cam coords incorrect: {}. Discard sample'.format(
mid_eyes_cam_mm))
elif np.any(euler_angles > 90) or np.any(euler_angles < -90):
self._logger.add_warning('''Head pose angle range incorrect. Discard sample.
Euler angles - Pitch: {}, Yaw: {}, Roll: {}'''.format(
euler_angles[0], euler_angles[1], euler_angles[2]))
return self.empty_angle_struct
mid_gaze_vec = self._gt_cam_mm - mid_eyes_cam_mm
mid_gaze_vec = mid_gaze_vec / np.sqrt(np.sum(np.power(mid_gaze_vec, 2)))
theta_mid, phi_mid = compute_theta_phi_from_gaze_vector(mid_gaze_vec[0],
mid_gaze_vec[1],
mid_gaze_vec[2])
leye_cam_mm = projectObject2Camera(le_center, rot_mat, tvec)
leye_gaze_vec = self._gt_cam_mm - leye_cam_mm
leye_gaze_vec /= np.sqrt(np.sum(np.power(leye_gaze_vec, 2)))
leye_theta, leye_phi = compute_theta_phi_from_gaze_vector(leye_gaze_vec[0],
leye_gaze_vec[1],
leye_gaze_vec[2])
reye_cam_mm = projectObject2Camera(re_center, rot_mat, tvec)
reye_gaze_vec = self._gt_cam_mm - reye_cam_mm
reye_gaze_vec /= np.sqrt(np.sum(np.power(reye_gaze_vec, 2)))
reye_theta, reye_phi = compute_theta_phi_from_gaze_vector(reye_gaze_vec[0],
reye_gaze_vec[1],
reye_gaze_vec[2])
theta_ovr = (leye_theta + reye_theta)/2.0
phi_ovr = (leye_phi + reye_phi)/2.0
# Get 3D landmarks according to current 58-pt landmark model
# 38 of 58 3D landmarks matches with 2D landmarks, therefore, considered
no_of_3D_landmarks = 38
landmarks_3D = -1.0 * np.ones((no_of_3D_landmarks, 3), dtype=np.float32)
landmarks_obj_3D = self._anthro_pts.get_landmarks_export_3D()
for ind in range(no_of_3D_landmarks):
obj_mm = np.reshape(landmarks_obj_3D[ind, :], (1, 3))
cam_mm = projectObject2Camera(obj_mm, rot_mat, tvec)
landmarks_3D[ind] = cam_mm.transpose()
landmarks_3D = landmarks_3D.reshape(-1)
# Full Frame Normalization
frame_gray = cv2.imread(self._orig_frame_path, 0)
lec_px = projectCamera2Image(leye_cam_mm, self._camera_matrix)
rec_px = projectCamera2Image(reye_cam_mm, self._camera_matrix)
ec_pxs = np.zeros((2, 2), dtype=np.float32)
ec_pxs[0][0] = lec_px[0]
ec_pxs[0][1] = lec_px[1]
ec_pxs[1][0] = rec_px[0]
ec_pxs[1][1] = rec_px[1]
method = 'modified'
# For 448 x 448, scale_factor is 2
scale_factor = 2.0
norm_frame_warped, norm_face_gaze_theta, norm_face_gaze_phi, norm_face_hp_theta, \
norm_face_hp_phi, norm_face_bb, norm_leye_bb, norm_reye_bb, normlandmarks, \
norm_face_cnv_mat = normalizeFullFrame(frame_gray, face_cam_mm, ec_pxs, rot_mat,
face_gaze_vec, self._landmarks_2D,
self._camera_matrix, self._distortion_coeffs,
method, scale_factor)
if norm_frame_warped is None:
print('Bad normalization, warped image is flipped. Discard sample:',
self._orig_frame_path)
return self.AngleStruct(
leye_cam_mm, reye_cam_mm,
rot_mat, tvec,
leye_gaze_vec, reye_gaze_vec,
theta_ovr, phi_ovr,
leye_theta, leye_phi,
reye_theta, reye_phi,
euler_angles, err_reproj_x2,
head_pose_theta, head_pose_phi,
mid_eyes_cam_mm, mid_gaze_vec,
theta_mid, phi_mid,
None, None,
None, None,
None, None,
None, None,
None, None,
None, None,
None, None, None,
None, None,
None, None,
None, None,
None, None)
invalid_landmark_ind = np.argwhere(self._landmarks_2D.reshape(-1) == -1)
no_of_2D_landmarks = 104
norm_landmarks = -1.0 * np.ones((no_of_2D_landmarks, 2), dtype=np.float32)
norm_landmarks[:normlandmarks.shape[0]] = normlandmarks
norm_landmarks = norm_landmarks.reshape(-1)
norm_landmarks[invalid_landmark_ind] = -1
# Face/Eyes Normalization
imageWidth = 224
imageHeight = 224
norm_face_warped, _, _, _, _, norm_per_oof, _ = normalizeFace(frame_gray, face_cam_mm,
rot_mat, face_gaze_vec,
self._camera_matrix,
self._distortion_coeffs,
method, imageWidth,
imageHeight)
norm_face_cnv_mat = norm_face_cnv_mat.reshape(-1)
imageWidth = 120
# For rectangular eye, use imageHeight = int(imageWidth * 36.0 / 60.0)
imageHeight = imageWidth
norm_leye_warped, norm_leye_gaze_theta, norm_leye_gaze_phi, norm_leye_hp_theta, \
norm_leye_hp_phi, norm_leye_cnv_mat = normalizeEye(frame_gray, leye_cam_mm, rot_mat,
leye_gaze_vec, self._camera_matrix,
self._distortion_coeffs, method,
imageWidth, imageHeight)
norm_leye_cnv_mat = norm_leye_cnv_mat.reshape(-1)
norm_reye_warped, norm_reye_gaze_theta, norm_reye_gaze_phi, norm_reye_hp_theta, \
norm_reye_hp_phi, norm_reye_cnv_mat = normalizeEye(frame_gray, reye_cam_mm, rot_mat,
reye_gaze_vec, self._camera_matrix,
self._distortion_coeffs, method,
imageWidth, imageHeight)
norm_reye_cnv_mat = norm_reye_cnv_mat.reshape(-1)
folder_path = ''
for tmp in self._orig_frame_path.split('/')[:-1]:
folder_path += (tmp + '/')
frame_name = self._orig_frame_path.split('/')[-1]
norm_data_name = self._norm_folder_name + '_' + \
self._labels_source_path.split("/")[-1] + '/'
if 'pngData' in folder_path:
data_folder = 'pngData'
elif 'Data' in folder_path:
data_folder = 'Data'
else:
print('Cosmos path name and data folder is not recognized:', folder_path,
self._orig_frame_path, head_pose_phi, phi_mid, norm_face_gaze_phi)
return self.AngleStruct(
leye_cam_mm, reye_cam_mm,
rot_mat, tvec,
leye_gaze_vec, reye_gaze_vec,
theta_ovr, phi_ovr,
leye_theta, leye_phi,
reye_theta, reye_phi,
euler_angles, err_reproj_x2,
head_pose_theta, head_pose_phi,
mid_eyes_cam_mm, mid_gaze_vec,
theta_mid, phi_mid,
None, None,
None, None,
None, None,
None, None,
None, None,
None, None,
None, None, None,
None, None,
None, None,
None, None,
None, None)
new_suffix = norm_data_name + 'frame'
# Replace only last occurrence (reverse, change first occurence, reverse back)
norm_frame_path = (self._orig_frame_path[::-1].
replace(data_folder[::-1], new_suffix[::-1], 1))[::-1]
if self._save_images:
norm_frame_folder = folder_path[::-1]. \
replace(data_folder[::-1], new_suffix[::-1], 1)[::-1]
mkdir_p(norm_frame_folder)
new_suffix = norm_data_name + 'face'
norm_face_folder = folder_path[::-1]. \
replace(data_folder[::-1], new_suffix[::-1], 1)[::-1]
mkdir_p(norm_face_folder)
new_suffix = norm_data_name + 'leye'
norm_leye_folder = folder_path[::-1]. \
replace(data_folder[::-1], new_suffix[::-1], 1)[::-1]
mkdir_p(norm_leye_folder)
new_suffix = norm_data_name + 'reye'
norm_reye_folder = folder_path[::-1]. \
replace(data_folder[::-1], new_suffix[::-1], 1)[::-1]
mkdir_p(norm_reye_folder)
# Write-out normalized/warped images
new_image_path = norm_frame_folder + frame_name
cv2.imwrite(new_image_path, norm_frame_warped)
new_image_path = norm_face_folder + frame_name
cv2.imwrite(new_image_path, norm_face_warped)
new_image_path = norm_leye_folder + frame_name
cv2.imwrite(new_image_path, norm_leye_warped)
new_image_path = norm_reye_folder + frame_name
cv2.imwrite(new_image_path, norm_reye_warped)
return self.AngleStruct(
leye_cam_mm, reye_cam_mm,
rot_mat, tvec,
leye_gaze_vec, reye_gaze_vec,
theta_ovr, phi_ovr,
leye_theta, leye_phi,
reye_theta, reye_phi,
euler_angles, err_reproj_x2,
head_pose_theta, head_pose_phi,
mid_eyes_cam_mm, mid_gaze_vec,
theta_mid, phi_mid,
norm_face_gaze_theta, norm_face_gaze_phi,
norm_face_hp_theta, norm_face_hp_phi,
norm_leye_gaze_theta, norm_leye_gaze_phi,
norm_leye_hp_theta, norm_leye_hp_phi,
norm_reye_gaze_theta, norm_reye_gaze_phi,
norm_reye_hp_theta, norm_reye_hp_phi,
norm_face_bb, norm_leye_bb, norm_reye_bb,
norm_landmarks, norm_per_oof,
norm_frame_path, landmarks_3D,
norm_face_cnv_mat, norm_leye_cnv_mat,
norm_reye_cnv_mat, face_cam_mm)
class CustomNormalizeData(ThetaPhiCalcStrategy):
"""Class for generating gaze angles without pupil landmarks."""
def __init__(self, logger, camera_matrix, distortion_coeffs, R, T, landmarks_2D,
frame_path, norm_folder_name, save_images, data_root_path):
"""Initialize camera parameters and landmarks."""
super(CustomNormalizeData, self).__init__(
logger=logger,
gt_cam_mm=np.zeros((3, 1), dtype=np.longdouble),
camera_matrix=camera_matrix,
distortion_coeffs=distortion_coeffs,
R=R,
T=T,
landmarks_2D=landmarks_2D)
self._orig_frame_path = frame_path
self._anthro_pts = AnthropometicPtsGenerator()
self._pnp_pts = PnPPtsGenerator(camera_matrix, distortion_coeffs)
self._norm_folder_name = norm_folder_name
self._save_images = save_images
self._data_root_path = data_root_path
def get_normalized_data(self):
"""Get normalized data for the given sample."""
info = self._compute_gaze_angles_theta_phi()
return info
def _extract_error(self, info):
PoR_x, PoR_y, PoR_z = compute_PoR_from_theta_phi(
info.theta_mid,
info.phi_mid,
info.mid_eyes_cam_mm,
self._R,
self._T)
por_arr = np.asarray([PoR_x, PoR_y, PoR_z])
self._max_theta_phi_val_err = np.sqrt(np.sum(np.power(por_arr - self._gt_cam_mm, 2)))
PoR_x, PoR_y, PoR_z = compute_PoR_from_gaze_vector(
info.mid_gaze_vec,
info.mid_eyes_cam_mm,
self._R,
self._T)
por_arr = np.asarray([PoR_x, PoR_y, PoR_z])
self._max_gaze_vec_val_err = np.sqrt(np.sum(np.power(por_arr - self._gt_cam_mm, 2)))
def _compute_gaze_angles_theta_phi(self):
landmarks_2D_final, landmarks_3D_final, perc_occ_landmarks = \
self._anthro_pts.get_selected_landmarks(
self._landmarks_2D,
self._anthro_pts.landmarks_2D_indices_selected,
self._anthro_pts.get_pnp_landmarks())
# Compute PnP between the generic 3D face model landmarks (WCS) and 2D landmarks (ICS)
if perc_occ_landmarks < self.min_landmark_percentile_pnp:
# Rotation and translation vectors for 3D-to-2D transformation
_, rvec, tvec = self._pnp_pts.compute_EPnP(
landmarks_3D_final, landmarks_2D_final)
else:
self._logger.add_warning(
'Too many occluded landmarks (%' + str(perc_occ_landmarks) +
'). SolvePnP might not be reliable. Discard sample.')
return self.empty_angle_struct
landmarks_2D_final, landmarks_3D_final, perc_occ_landmarks = \
self._anthro_pts.get_selected_landmarks(
self._landmarks_2D,
self._anthro_pts.landmarks_2D_indices_expr_inv,
self._anthro_pts.get_robust_expr_var_landmarks())
if perc_occ_landmarks < self.min_landmark_percentile_pnp:
_, rvec, tvec = self._pnp_pts.compute_PnP_Iterative(
landmarks_3D_final, landmarks_2D_final, rvec, tvec)
else:
self._logger.add_warning(
'Too many occluded landmarks (%' + str(perc_occ_landmarks) +
'). SolvePnP might not be reliable. Discard sample.')
return self.empty_angle_struct
# Convert rotation vector into a rotation matrix
rot_mat = cv2.Rodrigues(rvec)[0]
# Concatenate translation vector with rotation matrix
proj_mat = np.hstack((rot_mat, tvec))
# use cv with distortion coeffs
landmarks_2D_reproj_2, _ = cv2.projectPoints(
PnPPtsGenerator.get_cv_array(landmarks_3D_final),
rvec,
tvec,
self._camera_matrix,
self._distortion_coeffs)
err_reproj_x2 = calculate_reprojection_error(
np.reshape(landmarks_2D_final, (len(landmarks_2D_final), 2)),
np.reshape(landmarks_2D_reproj_2, (len(landmarks_2D_reproj_2), 2)))
# Head pose-based direction vector
Zv = rot_mat[:, 2]
head_pose_theta, head_pose_phi = \
compute_theta_phi_from_gaze_vector(-Zv[0], -Zv[1], -Zv[2])
# Head pose euler angles in degrees
euler_angles = cv2.decomposeProjectionMatrix(proj_mat)[6]
if euler_angles[0] < -90:
euler_angles[0] = -(euler_angles[0] + 180) # correct pitch
elif euler_angles[0] > 90:
euler_angles[0] = -(euler_angles[0] - 180) # correct pitch
euler_angles[1] = -euler_angles[1] # correct the sign of yaw
if euler_angles[2] < -90:
euler_angles[2] = -(euler_angles[2] + 180) # correct roll
elif euler_angles[2] > 90:
euler_angles[2] = -(euler_angles[2] - 180) # correct roll
face_center_obj = self._anthro_pts.get_face_center()
face_cam_mm = projectObject2Camera(face_center_obj, rot_mat, tvec)
face_gaze_vec = self._gt_cam_mm - face_cam_mm
face_gv_mag = np.sqrt(face_gaze_vec[0] ** 2 + face_gaze_vec[1] ** 2 + face_gaze_vec[2] ** 2)
face_gaze_vec = face_gaze_vec / face_gv_mag
le_center, re_center = self._anthro_pts.get_eye_centers()
mid_eyes_obj = (re_center + le_center) / 2.0
mid_eyes_cam_mm = projectObject2Camera(mid_eyes_obj, rot_mat, tvec)
if mid_eyes_cam_mm[2] > self.max_user_depth or mid_eyes_cam_mm[2] < self.min_user_depth:
self._logger.add_warning('Mid eye cam coords incorrect: {}. Discard sample'.format(
mid_eyes_cam_mm))
elif np.any(euler_angles > 90) or np.any(euler_angles < -90):
self._logger.add_warning('''Head pose angle range incorrect. Discard sample.
Euler angles - Pitch: {}, Yaw: {}, Roll: {}'''.format(
euler_angles[0], euler_angles[1], euler_angles[2]))
return self.empty_angle_struct
mid_gaze_vec = self._gt_cam_mm - mid_eyes_cam_mm
mid_gaze_vec = mid_gaze_vec / np.sqrt(np.sum(np.power(mid_gaze_vec, 2)))
theta_mid, phi_mid = compute_theta_phi_from_gaze_vector(mid_gaze_vec[0],
mid_gaze_vec[1],
mid_gaze_vec[2])
leye_cam_mm = projectObject2Camera(le_center, rot_mat, tvec)
leye_gaze_vec = self._gt_cam_mm - leye_cam_mm
leye_gaze_vec /= np.sqrt(np.sum(np.power(leye_gaze_vec, 2)))
leye_theta, leye_phi = compute_theta_phi_from_gaze_vector(leye_gaze_vec[0],
leye_gaze_vec[1],
leye_gaze_vec[2])
reye_cam_mm = projectObject2Camera(re_center, rot_mat, tvec)
reye_gaze_vec = self._gt_cam_mm - reye_cam_mm
reye_gaze_vec /= np.sqrt(np.sum(np.power(reye_gaze_vec, 2)))
reye_theta, reye_phi = compute_theta_phi_from_gaze_vector(reye_gaze_vec[0],
reye_gaze_vec[1],
reye_gaze_vec[2])
theta_ovr = (leye_theta + reye_theta)/2.0
phi_ovr = (leye_phi + reye_phi)/2.0
# Get 3D landmarks according to current 58-pt landmark model
# 38 of 58 3D landmarks matches with 2D landmarks, therefore, considered
no_of_3D_landmarks = 38
landmarks_3D = -1.0 * np.ones((no_of_3D_landmarks, 3), dtype=np.float32)
landmarks_obj_3D = self._anthro_pts.get_landmarks_export_3D()
for ind in range(no_of_3D_landmarks):
obj_mm = np.reshape(landmarks_obj_3D[ind, :], (1, 3))
cam_mm = projectObject2Camera(obj_mm, rot_mat, tvec)
landmarks_3D[ind] = cam_mm.transpose()
landmarks_3D = landmarks_3D.reshape(-1)
# Full Frame Normalization
frame_gray = cv2.imread(self._orig_frame_path, 0)
lec_px = projectCamera2Image(leye_cam_mm, self._camera_matrix)
rec_px = projectCamera2Image(reye_cam_mm, self._camera_matrix)
ec_pxs = np.zeros((2, 2), dtype=np.float32)
ec_pxs[0][0] = lec_px[0]
ec_pxs[0][1] = lec_px[1]
ec_pxs[1][0] = rec_px[0]
ec_pxs[1][1] = rec_px[1]
method = 'modified'
# For 448 x 448, scale_factor is 2
scale_factor = 2.0
norm_frame_warped, norm_face_gaze_theta, norm_face_gaze_phi, norm_face_hp_theta, \
norm_face_hp_phi, norm_face_bb, norm_leye_bb, norm_reye_bb, normlandmarks, \
norm_face_cnv_mat = normalizeFullFrame(frame_gray, face_cam_mm, ec_pxs, rot_mat,
face_gaze_vec, self._landmarks_2D,
self._camera_matrix, self._distortion_coeffs,
method, scale_factor)
if norm_frame_warped is None:
print('Bad normalization, warped image is flipped. Discard sample:',
self._orig_frame_path)
return self.AngleStruct(
leye_cam_mm, reye_cam_mm,
rot_mat, tvec,
leye_gaze_vec, reye_gaze_vec,
theta_ovr, phi_ovr,
leye_theta, leye_phi,
reye_theta, reye_phi,
euler_angles, err_reproj_x2,
head_pose_theta, head_pose_phi,
mid_eyes_cam_mm, mid_gaze_vec,
theta_mid, phi_mid,
None, None,
None, None,
None, None,
None, None,
None, None,
None, None,
None, None, None,
None, None,
None, None,
None, None,
None, None)
invalid_landmark_ind = np.argwhere(self._landmarks_2D.reshape(-1) == -1)
no_of_2D_landmarks = 104
norm_landmarks = -1.0 * np.ones((no_of_2D_landmarks, 2), dtype=np.float32)
norm_landmarks[:normlandmarks.shape[0]] = normlandmarks
norm_landmarks = norm_landmarks.reshape(-1)
norm_landmarks[invalid_landmark_ind] = -1
# Face/Eyes Normalization
imageWidth = 224
imageHeight = 224
norm_face_warped, _, _, _, _, norm_per_oof, _ = normalizeFace(frame_gray, face_cam_mm,
rot_mat, face_gaze_vec,
self._camera_matrix,
self._distortion_coeffs,
method, imageWidth,
imageHeight)
norm_face_cnv_mat = norm_face_cnv_mat.reshape(-1)
imageWidth = 120
# For rectangular eye, use imageHeight = int(imageWidth * 36.0 / 60.0)
imageHeight = imageWidth
norm_leye_warped, norm_leye_gaze_theta, norm_leye_gaze_phi, norm_leye_hp_theta, \
norm_leye_hp_phi, norm_leye_cnv_mat = normalizeEye(frame_gray, leye_cam_mm, rot_mat,
leye_gaze_vec, self._camera_matrix,
self._distortion_coeffs, method,
imageWidth, imageHeight)
norm_leye_cnv_mat = norm_leye_cnv_mat.reshape(-1)
norm_reye_warped, norm_reye_gaze_theta, norm_reye_gaze_phi, norm_reye_hp_theta, \
norm_reye_hp_phi, norm_reye_cnv_mat = normalizeEye(frame_gray, reye_cam_mm, rot_mat,
reye_gaze_vec, self._camera_matrix,
self._distortion_coeffs, method,
imageWidth, imageHeight)
norm_reye_cnv_mat = norm_reye_cnv_mat.reshape(-1)
folder_path = ''
for tmp in self._orig_frame_path.split('/')[:-1]:
folder_path += (tmp + '/')
frame_name = self._orig_frame_path.split('/')[-1]
# Replace only last occurrence (reverse, change first occurence, reverse back)
norm_frame_path = os.path.join(self._data_root_path,
self._norm_folder_name,
'frame',
frame_name)
if self._save_images:
norm_frame_folder = os.path.join(self._data_root_path,
self._norm_folder_name,
'frame')
mkdir_p(norm_frame_folder)
# new_suffix = norm_data_name + 'face'
norm_face_folder = os.path.join(self._data_root_path,
self._norm_folder_name,
'face')
mkdir_p(norm_face_folder)
# new_suffix = norm_data_name + 'leye'
norm_leye_folder = os.path.join(self._data_root_path,
self._norm_folder_name,
'leye')
mkdir_p(norm_leye_folder)
# new_suffix = norm_data_name + 'reye'
norm_reye_folder = os.path.join(self._data_root_path,
self._norm_folder_name,
'reye')
mkdir_p(norm_reye_folder)
# Write-out normalized/warped images
new_image_path = os.path.join(norm_frame_folder, frame_name)
cv2.imwrite(new_image_path, norm_frame_warped)
new_image_path = os.path.join(norm_face_folder, frame_name)
cv2.imwrite(new_image_path, norm_face_warped)
new_image_path = os.path.join(norm_leye_folder, frame_name)
cv2.imwrite(new_image_path, norm_leye_warped)
new_image_path = os.path.join(norm_reye_folder, frame_name)
cv2.imwrite(new_image_path, norm_reye_warped)
return self.AngleStruct(
leye_cam_mm, reye_cam_mm,
rot_mat, tvec,
leye_gaze_vec, reye_gaze_vec,
theta_ovr, phi_ovr,
leye_theta, leye_phi,
reye_theta, reye_phi,
euler_angles, err_reproj_x2,
head_pose_theta, head_pose_phi,
mid_eyes_cam_mm, mid_gaze_vec,
theta_mid, phi_mid,
norm_face_gaze_theta, norm_face_gaze_phi,
norm_face_hp_theta, norm_face_hp_phi,
norm_leye_gaze_theta, norm_leye_gaze_phi,
norm_leye_hp_theta, norm_leye_hp_phi,
norm_reye_gaze_theta, norm_reye_gaze_phi,
norm_reye_hp_theta, norm_reye_hp_phi,
norm_face_bb, norm_leye_bb, norm_reye_bb,
norm_landmarks, norm_per_oof,
norm_frame_path, landmarks_3D,
norm_face_cnv_mat, norm_leye_cnv_mat,
norm_reye_cnv_mat, face_cam_mm)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataio/theta_phi_calc_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract strategy for generating tfrecords."""
import abc
from collections import defaultdict
import copy
import os
import cv2
import numpy as np
from nvidia_tao_tf1.cv.common.dataio.data_converter import DataConverter
from nvidia_tao_tf1.cv.common.dataio.eye_features_generator import (
EyeFeaturesGenerator)
from nvidia_tao_tf1.cv.common.dataio.eye_status import EyeStatus
from nvidia_tao_tf1.cv.common.dataio.theta_phi_angle_utils import (
populate_gaze_info,
populate_head_norm_bbinfo,
populate_head_norm_float,
populate_head_norm_listinfo,
populate_head_norm_path,
populate_theta_phi)
from nvidia_tao_tf1.cv.common.dataio.theta_phi_calc_utils import (
NoPupilThetaPhiStrategy, ThetaPhiLandmarksGenerator)
from nvidia_tao_tf1.cv.common.dataio.utils import (
get_file_ext, get_file_name_noext, is_kpi_set)
class TfRecordLabelsStrategy(object):
"""Abstract class for generating tfrecords."""
__metaclass__ = abc.ABCMeta
# Users in KPI sets which are also in training sets are no longer considered as KPI users.
_non_kpi_users = (
'ZTy4vjHcYs57_4Gq',
'TNg6PdPzFMmNKuZgr7BbSSxkNRj0ibZzbH8n8kMAnuY=',
'VBjKmYLtjNhrjC_A')
class Pipeline_Constants():
"""Constants of pipeline script."""
num_fid_points = 104
@staticmethod
def _populate_frame_dict(frame_dict, features, vals):
assert len(features) == len(vals)
n_items = len(features)
for i in range(n_items):
frame_dict[features[i]] = vals[i]
def __init__(
self,
set_id,
use_unique,
logger,
set_strategy,
norm_folder_name,
save_images
):
"""Initialize parameters.
Args:
set_id (str): Set for which to generate tfrecords.
use_unique (bool): Only create records for first frame in a series if true.
logger (Logger object): Report failures and number of tfrecords lost for tracking.
set_strategy (SetStrategy object): Strategy for set type (gaze / eoc).
save_images (bool): Whether to generate new folders and images for face crop, eyes, etc.
"""
self._set_id = set_id
self._use_unique = use_unique
self._logger = logger
self._set_strategy = set_strategy
self._norm_folder_name = norm_folder_name
self._save_images = save_images
_, self._paths = set_strategy.get_source_paths()
self._cam_intrinsics, self._cam_extrinsics, self._screen_params = \
self._set_strategy.get_camera_parameters()
# 4 level dict (user_name->region_name->frame_name->frame_data)
self._users = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: defaultdict())))
self._frame_width = None
self._frame_height = None
self._landmarks_path = None
def _read_landmarks_from_path(self):
assert self._landmarks_path is not None
for user_file in os.listdir(self._landmarks_path):
user_name = get_file_name_noext(user_file)
user_lm_path = os.path.join(self._landmarks_path, user_file)
with open(user_lm_path, 'r') as user_landmarks:
for line in user_landmarks:
line_split = line.rstrip().split(' ')
if len(self._paths.regions) == 1 and self._paths.regions[0] == '':
# On bench data collection has no regions.
region_name = ''
else:
region_name = line_split[0].split('/')[-2]
frame_name = get_file_name_noext(line_split[0].split('/')[-1])
frame_dict = self._users[user_name][region_name][frame_name]
if (
'train/image_frame_width' not in frame_dict or
'train/image_frame_height' not in frame_dict
):
self._logger.add_error(
'''Could not find frame width or height.
User {} frame {} may not exist'''.format(user_name, frame_name))
continue
frame_w = frame_dict['train/image_frame_width']
frame_h = frame_dict['train/image_frame_height']
landmarks_arr = np.empty([208, ], dtype=np.longdouble)
landmarks_arr.fill(-1)
# No occlusion information
landmarks_occ_arr = np.empty([104, ], dtype=np.longdouble)
landmarks_occ_arr.fill(-1)
frame_dict['train/landmarks_occ'] = np.copy(landmarks_occ_arr).astype(int)
num_min_sdk_landmarks = 68
if len(line_split) >= (1 + num_min_sdk_landmarks * 2):
read_landmarks = np.array(line_split[1:]) # ignore frame at beginning
read_landmarks = read_landmarks.astype(np.longdouble)
landmarks_2D = read_landmarks.reshape(-1, 2)
frame_dict['internal/landmarks_2D_distort'] = np.copy(landmarks_2D)
num_landmarks = landmarks_2D.shape[0]
frame_dict['train/num_keypoints'] = num_landmarks
landmarks_2D[:num_landmarks] = np.asarray(
self._set_strategy.get_pts(
landmarks_2D[:num_landmarks],
frame_w,
frame_h)).reshape(-1, 2)
frame_dict['internal/landmarks_2D'] = landmarks_2D
landmarks_arr[:num_landmarks * 2] = landmarks_2D.flatten().tolist()
frame_dict['train/landmarks'] = landmarks_arr
# Eye_features only dependent on landmarks
frame_dict['train/eye_features'] = EyeFeaturesGenerator(
landmarks_2D,
num_landmarks).get_eye_features()
@abc.abstractmethod
def extract_landmarks(self):
"""Abstract method for populating landmarks."""
pass
@abc.abstractmethod
def extract_bbox(self):
"""Abstract method for populating bounding boxes."""
pass
@abc.abstractmethod
def extract_eye_status(self):
"""Abstract method for populating eye status."""
pass
def get_tfrecord_data(self):
"""Factory method which returns populated data for tfrecord generation."""
self._extract_frame()
self.extract_landmarks()
self.extract_bbox()
self.extract_eye_status()
self._extract_gaze_vec_info()
self._label_source()
self._prune_data()
self._logger.write_to_log()
return self._users
def _extract_frame(self):
for user_name in os.listdir(self._paths.data_path):
if user_name.startswith('.'):
# Not a valid folder
continue
user_dir_path = os.path.join(self._paths.data_path, user_name)
if not os.path.isdir(user_dir_path):
continue
for region_name in self._paths.regions:
region_dir_path = os.path.join(user_dir_path, region_name)
if not os.path.isdir(region_dir_path):
self._logger.add_warning('Could not find region data dir {}, '
'skipping this region'.format(region_dir_path))
continue
for img_file in os.listdir(region_dir_path):
frame_name = get_file_name_noext(img_file)
frame_path = os.path.join(region_dir_path, img_file)
if get_file_ext(img_file) != '.png':
self._logger.add_warning('{} is not an image'.format(frame_path))
continue
if not os.path.exists(frame_path):
self._logger.add_error('Unable to find frame {}'.format(frame_path))
continue
self._users[user_name][region_name][frame_name][
'train/image_frame_name'] = frame_path
# All images in a set have the same frame size
if self._frame_width is None or self._frame_height is None:
image_frame = cv2.imread(frame_path)
self._frame_width = image_frame.shape[1]
self._frame_height = image_frame.shape[0]
self._users[user_name][region_name][frame_name][
'train/image_frame_width'] = self._frame_width
self._users[user_name][region_name][frame_name][
'train/image_frame_height'] = self._frame_height
self._set_strategy.extract_gaze_info(
self._users[user_name][region_name][frame_name], frame_name, region_name)
def _extract_gaze_vec_info(self):
should_calculate = False
if self._cam_intrinsics is not None:
camera_matrix, _, theta_phi_distortion_coeffs = self._cam_intrinsics
should_calculate = True
for user_name in list(self._users.keys()):
for region_name in list(self._users[user_name].keys()):
# Each region has its own extrinsic parameters.
if self._cam_extrinsics is not None:
R, T = self._cam_extrinsics[region_name]
should_calculate = True
for frame_name in list(self._users[user_name][region_name].keys()):
frame_data_dict = self._users[user_name][region_name][frame_name]
face_bbox = None
try:
x1 = frame_data_dict['internal/facebbx_x_distort']
y1 = frame_data_dict['internal/facebbx_y_distort']
x2 = x1 + frame_data_dict['internal/facebbx_w_distort']
y2 = y1 + frame_data_dict['internal/facebbx_h_distort']
face_bbox = [x1, y1, x2, y2]
except KeyError:
# Using Shagan's landmarks will result in no face bounding boxes
if self._landmarks_path is None:
continue
if 'internal/landmarks_2D_distort' not in frame_data_dict:
continue
landmarks_2D_valid = ThetaPhiLandmarksGenerator(
frame_data_dict['internal/landmarks_2D_distort'],
frame_data_dict['train/landmarks_occ'],
frame_data_dict['train/image_frame_width'],
frame_data_dict['train/image_frame_height'],
face_bbox).get_landmarks_in_frame()
if not should_calculate:
is_valid_theta_phi = False
frame_data_dict['train/valid_theta_phi'] = is_valid_theta_phi
angle_struct_ins = NoPupilThetaPhiStrategy.empty_angle_struct
else:
gt_cam = np.zeros((3, 1), dtype=np.longdouble)
gt_cam[0] = frame_data_dict['label/gaze_cam_x']
gt_cam[1] = frame_data_dict['label/gaze_cam_y']
gt_cam[2] = frame_data_dict['label/gaze_cam_z']
# No pupils to bridge gap between json and sdk info
is_valid_theta_phi, angle_struct_ins = NoPupilThetaPhiStrategy(
self._logger,
gt_cam,
camera_matrix,
theta_phi_distortion_coeffs,
R,
T,
landmarks_2D_valid,
self._users[user_name][region_name]
[frame_name]['train/image_frame_name'],
self._paths.info_source_path,
self._norm_folder_name,
self._save_images).get_gaze_angles_theta_phi()
frame_data_dict['train/valid_theta_phi'] = is_valid_theta_phi
frame_data_dict['label/hp_pitch'], \
frame_data_dict['label/hp_yaw'], \
frame_data_dict['label/hp_roll'] = populate_gaze_info(
angle_struct_ins.euler_angles,
is_valid_theta_phi)
frame_data_dict['label/theta'] = populate_theta_phi(
angle_struct_ins.theta_ovr,
is_valid_theta_phi)
frame_data_dict['label/theta_le'] = populate_theta_phi(
angle_struct_ins.theta_le,
is_valid_theta_phi)
frame_data_dict['label/theta_re'] = populate_theta_phi(
angle_struct_ins.theta_re,
is_valid_theta_phi)
frame_data_dict['label/theta_mid'] = populate_theta_phi(
angle_struct_ins.theta_mid,
is_valid_theta_phi)
frame_data_dict['label/head_pose_theta'] = populate_theta_phi(
angle_struct_ins.head_pose_theta,
is_valid_theta_phi)
frame_data_dict['label/phi'] = populate_theta_phi(
angle_struct_ins.phi_ovr,
is_valid_theta_phi)
frame_data_dict['label/phi_le'] = populate_theta_phi(
angle_struct_ins.phi_le,
is_valid_theta_phi)
frame_data_dict['label/phi_re'] = populate_theta_phi(
angle_struct_ins.phi_re,
is_valid_theta_phi)
frame_data_dict['label/phi_mid'] = populate_theta_phi(
angle_struct_ins.phi_mid,
is_valid_theta_phi)
frame_data_dict['label/head_pose_phi'] = populate_theta_phi(
angle_struct_ins.head_pose_phi,
is_valid_theta_phi)
frame_data_dict['label/lpc_cam_x'], \
frame_data_dict['label/lpc_cam_y'], \
frame_data_dict['label/lpc_cam_z'] = populate_gaze_info(
angle_struct_ins.le_pc_cam_mm,
is_valid_theta_phi)
frame_data_dict['label/rpc_cam_x'], \
frame_data_dict['label/rpc_cam_y'], \
frame_data_dict['label/rpc_cam_z'] = populate_gaze_info(
angle_struct_ins.re_pc_cam_mm,
is_valid_theta_phi)
frame_data_dict['label/mid_cam_x'], \
frame_data_dict['label/mid_cam_y'], \
frame_data_dict['label/mid_cam_z'] = populate_gaze_info(
angle_struct_ins.mid_eyes_cam_mm,
is_valid_theta_phi)
frame_data_dict['label/norm_face_hp_theta'] = populate_theta_phi(
angle_struct_ins.norm_face_hp_theta,
is_valid_theta_phi)
frame_data_dict['label/norm_face_hp_phi'] = populate_theta_phi(
angle_struct_ins.norm_face_hp_phi,
is_valid_theta_phi)
frame_data_dict['label/norm_face_gaze_theta'] = populate_theta_phi(
angle_struct_ins.norm_face_gaze_theta,
is_valid_theta_phi)
frame_data_dict['label/norm_face_gaze_phi'] = populate_theta_phi(
angle_struct_ins.norm_face_gaze_phi,
is_valid_theta_phi)
frame_data_dict['label/norm_leye_hp_theta'] = populate_theta_phi(
angle_struct_ins.norm_leye_hp_theta,
is_valid_theta_phi)
frame_data_dict['label/norm_leye_hp_phi'] = populate_theta_phi(
angle_struct_ins.norm_leye_hp_phi,
is_valid_theta_phi)
frame_data_dict['label/norm_leye_gaze_theta'] = populate_theta_phi(
angle_struct_ins.norm_leye_gaze_theta,
is_valid_theta_phi)
frame_data_dict['label/norm_leye_gaze_phi'] = populate_theta_phi(
angle_struct_ins.norm_leye_gaze_phi,
is_valid_theta_phi)
frame_data_dict['label/norm_reye_hp_theta'] = populate_theta_phi(
angle_struct_ins.norm_reye_hp_theta,
is_valid_theta_phi)
frame_data_dict['label/norm_reye_hp_phi'] = populate_theta_phi(
angle_struct_ins.norm_reye_hp_phi,
is_valid_theta_phi)
frame_data_dict['label/norm_reye_gaze_theta'] = populate_theta_phi(
angle_struct_ins.norm_reye_gaze_theta,
is_valid_theta_phi)
frame_data_dict['label/norm_reye_gaze_phi'] = populate_theta_phi(
angle_struct_ins.norm_reye_gaze_phi,
is_valid_theta_phi)
frame_data_dict['train/norm_per_oof'] = populate_head_norm_float(
angle_struct_ins.norm_per_oof, is_valid_theta_phi)
frame_data_dict['train/norm_facebb_x'] = populate_head_norm_bbinfo(
angle_struct_ins.norm_face_bb, 0, is_valid_theta_phi)
frame_data_dict['train/norm_facebb_y'] = populate_head_norm_bbinfo(
angle_struct_ins.norm_face_bb, 1, is_valid_theta_phi)
frame_data_dict['train/norm_facebb_w'] = populate_head_norm_bbinfo(
angle_struct_ins.norm_face_bb, 2, is_valid_theta_phi)
frame_data_dict['train/norm_facebb_h'] = populate_head_norm_bbinfo(
angle_struct_ins.norm_face_bb, 3, is_valid_theta_phi)
frame_data_dict['train/norm_leyebb_x'] = populate_head_norm_bbinfo(
angle_struct_ins.norm_leye_bb, 0, is_valid_theta_phi)
frame_data_dict['train/norm_leyebb_y'] = populate_head_norm_bbinfo(
angle_struct_ins.norm_leye_bb, 1, is_valid_theta_phi)
frame_data_dict['train/norm_leyebb_w'] = populate_head_norm_bbinfo(
angle_struct_ins.norm_leye_bb, 2, is_valid_theta_phi)
frame_data_dict['train/norm_leyebb_h'] = populate_head_norm_bbinfo(
angle_struct_ins.norm_leye_bb, 3, is_valid_theta_phi)
frame_data_dict['train/norm_reyebb_x'] = populate_head_norm_bbinfo(
angle_struct_ins.norm_reye_bb, 0, is_valid_theta_phi)
frame_data_dict['train/norm_reyebb_y'] = populate_head_norm_bbinfo(
angle_struct_ins.norm_reye_bb, 1, is_valid_theta_phi)
frame_data_dict['train/norm_reyebb_w'] = populate_head_norm_bbinfo(
angle_struct_ins.norm_reye_bb, 2, is_valid_theta_phi)
frame_data_dict['train/norm_reyebb_h'] = populate_head_norm_bbinfo(
angle_struct_ins.norm_reye_bb, 3, is_valid_theta_phi)
frame_data_dict['train/norm_landmarks'] = populate_head_norm_listinfo(
angle_struct_ins.norm_landmarks, '2D', is_valid_theta_phi)
frame_data_dict['train/norm_frame_path'] = populate_head_norm_path(
angle_struct_ins.norm_frame_path, is_valid_theta_phi)
frame_data_dict['train/landmarks_3D'] = populate_head_norm_listinfo(
angle_struct_ins.landmarks_3D, '3D', is_valid_theta_phi)
frame_data_dict['train/norm_face_cnv_mat'] = populate_head_norm_listinfo(
angle_struct_ins.norm_face_cnv_mat, 'cnv_mat', is_valid_theta_phi)
frame_data_dict['train/norm_leye_cnv_mat'] = populate_head_norm_listinfo(
angle_struct_ins.norm_leye_cnv_mat, 'cnv_mat', is_valid_theta_phi)
frame_data_dict['train/norm_reye_cnv_mat'] = populate_head_norm_listinfo(
angle_struct_ins.norm_reye_cnv_mat, 'cnv_mat', is_valid_theta_phi)
frame_data_dict['label/face_cam_x'], frame_data_dict['label/face_cam_y'], \
frame_data_dict['label/face_cam_z'] = populate_gaze_info(
angle_struct_ins.face_cam_mm,
is_valid_theta_phi)
def _prune_data(self):
def _get_frame_num(frame):
return int(frame.rsplit('_')[-1])
prune_users = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: defaultdict())))
set_is_kpi = is_kpi_set(self._set_id)
num_available_frames = 0
num_tfrecord_frames = 0
num_insufficient_features_frames = 0
if self._landmarks_path is not None:
num_hp_frames = 0
num_wink_frames = 0
num_missing_from_fpe = 0
for user in self._users.keys():
if set_is_kpi and user in self._non_kpi_users:
continue
for region in self._users[user].keys():
for frame in self._users[user][region].keys():
frame_data_dict = self._users[user][region][frame]
num_available_frames += 1
# Fill missing fields
if 'label/left_eye_status' not in frame_data_dict:
frame_data_dict['label/left_eye_status'] = EyeStatus.missing_eye_status
if 'label/right_eye_status' not in frame_data_dict:
frame_data_dict['label/right_eye_status'] = EyeStatus.missing_eye_status
set_frame_data_dict = set(frame_data_dict)
if self._landmarks_path is not None:
if not set(DataConverter.lm_pred_feature_to_type).issubset(
set_frame_data_dict):
if 'hp' in user:
num_hp_frames += 1
elif 'wink' in user:
num_wink_frames += 1
elif 'train/landmarks' not in set_frame_data_dict:
num_missing_from_fpe += 1
continue
else:
if not set(DataConverter.feature_to_type).issubset(
set(frame_data_dict)):
self._logger.add_warning(
'User {} frame {} do not have sufficient features'
.format(user, frame))
num_insufficient_features_frames += 1
continue
prune_users[user][region][frame] = frame_data_dict
num_tfrecord_frames += 1
self._users = copy.deepcopy(prune_users)
self._logger.add_info('Total num of available frames: {}'.format(num_available_frames))
if self._landmarks_path is not None:
self._logger.add_info('Total num of frames with head pose user: {}'.format(
num_hp_frames))
self._logger.add_info('Total num of frames with wink user: {}'.format(
num_wink_frames))
self._logger.add_info('Total num of frames without fpe predictions: {}'.format(
num_missing_from_fpe))
else:
self._logger.add_info(
'Total num of frames with insufficient features: {}'.format(
num_insufficient_features_frames))
if self._paths.filtered_path:
set_filtered_frames_file = os.path.join(
self._paths.filtered_path,
self._set_id + '.txt')
num_filtered_frames = 0
prune_filtered = defaultdict(lambda: defaultdict(
lambda: defaultdict(lambda: defaultdict())))
if os.path.exists(set_filtered_frames_file):
with open(set_filtered_frames_file, 'r') as set_filtered_info:
for line in set_filtered_info:
line_split = line.split('/')
if len(self._paths.regions) == 1 and self._paths.regions[0] == '':
# There are no regions (on bench).
user_name = line_split[-2]
region_name = ''
else:
user_name = line_split[-3]
region_name = line_split[-2]
frame_name = get_file_name_noext(line_split[-1])
num_filtered_frames += 1
if (user_name in self._users.keys()
and frame_name in self._users[user_name][region_name].keys()):
prune_filtered[user_name][region_name][frame_name] = \
self._users[user_name][region_name][frame_name]
num_tfrecord_frames += 1
else:
self._logger.add_error('Could not find filtered frames file {}'
.format(set_filtered_frames_file))
self._users = copy.deepcopy(prune_filtered)
self._logger.add_info('Total num of filtered frames: {}'.format(num_filtered_frames))
if self._use_unique:
prune_unique = defaultdict(lambda: defaultdict(
lambda: defaultdict(lambda: defaultdict())))
num_tfrecord_frames = 0
for user in self._users.keys():
for region in self._users[user].keys():
img_frame_dict = defaultdict(list)
for frame in self._users[user][region].keys():
base_img = frame.rsplit('_', 1)[0]
img_frame_dict[base_img].append(frame)
for _, val in img_frame_dict.items():
val.sort(key=_get_frame_num)
frames = [val[0] for _, val in img_frame_dict.items()]
for frame in frames:
prune_unique[user][region][frame] = self._users[user][region][frame]
num_tfrecord_frames += 1
self._users = copy.deepcopy(prune_unique)
self._logger.add_info('Total num of frames in tfrecord: {}'.format(num_tfrecord_frames))
def _label_source(self):
for user in self._users.keys():
for region in self._users[user].keys():
for frame in self._users[user][region].keys():
self._users[user][region][frame]['train/source'] = os.path.basename(
os.path.normpath(self._paths.info_source_path))
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataio/tfrecordlabels_strategy.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstracted retrieving data generated by tfrecord strategy."""
class TfRecordGenerator(object):
"""Generate tfrecords using a specified strategy."""
def __init__(self, strategy):
"""Initialize input strategy."""
self._strategy = strategy
def get_tfrecord_data(self):
"""Return the data generated by the input strategy."""
return self._strategy.get_tfrecord_data()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataio/tfrecord_generator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate square bounding boxes for face and eyes."""
import abc
from six import add_metaclass
@add_metaclass(abc.ABCMeta)
class BoundingBoxStrategy(object):
"""Abstract class for creating square bounding boxes."""
@abc.abstractmethod
def get_square_bbox(self):
"""Abstract method for retrieving square bounding box."""
pass
class FaceBboxStrategy(BoundingBoxStrategy):
"""Generate face bounding box."""
def __init__(self, frame_width, frame_height, x1, y1, w, h, scale_factor=1.3):
"""Initialize frame_width, frame_height, w, h, top left coordinate, and scale factor."""
self._frame_width = frame_width
self._frame_height = frame_height
self._x1 = x1
self._y1 = y1
self._w = w
self._h = h
self._scale_factor = scale_factor
def get_square_bbox(self):
"""Get square face bounding box."""
center = [self._x1 + self._w/2, self._y1 + self._h/2]
w = self._w * self._scale_factor
h = self._h * self._scale_factor
side_len = max(w, h)
upper = int(center[1] - side_len/2)
lower = int(center[1] + side_len/2)
left = int(center[0] - side_len/2)
right = int(center[0] + side_len/2)
if left < 0:
dx = 0 - left
left = 0
right += dx
if right > self._frame_width:
dx = right - self._frame_width
right = self._frame_width
left = left - dx
if upper < 0:
dx = 0 - upper
upper = 0
lower += dx
if lower > self._frame_height:
dx = lower - self._frame_height
lower = self._frame_height
upper = upper - dx
upper = max(upper, 0)
lower = min(lower, self._frame_height)
left = max(left, 0)
right = min(right, self._frame_width)
side_len = min(right - left, lower - upper)
lower = upper + side_len
right = left + side_len
return list(map(int, [left, upper, side_len]))
class EyeBboxStrategy(BoundingBoxStrategy):
"""Generate eye bounding box."""
def __init__(self, frame_width, frame_height, coords, scale_factor=1.1):
"""Initialize frame_width, frame_height, coordinates and scale factor."""
self._frame_width = frame_width
self._frame_height = frame_height
self._coords = coords
self._scale_factor = scale_factor
def get_square_bbox(self):
"""Get square eye bounding box."""
x1, y1, x2, y2 = self._coords
w = self._scale_factor * (x2 - x1)
h = self._scale_factor * (y2 - y1)
side_len = max(w, h)
center = [0.5*(x1 + x2), 0.5*(y1 + y2)]
upper = center[1] - side_len/2
lower = center[1] + side_len/2
left = center[0] - side_len/2
right = center[0] + side_len/2
upper = max(upper, 0)
lower = min(lower, self._frame_height)
left = max(left, 0)
right = min(right, self._frame_width)
side_len = int(min(right - left, lower - upper))
lower = int(upper) + side_len
right = int(left) + side_len
if lower - upper <= 0 and right - left <= 0:
return [-1, -1, -1, -1]
return list(map(int, [left, upper, right - left, lower - upper]))
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataio/bbox_strategy.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DriveIX common module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataio/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate landmarks for use in theta phi calculations."""
import cv2
import numpy as np
'''
Terminologies:
WCS: World (object) coordinate system
CCS: Camera coordinate system
ICS: Image coordinate system
'''
# FACE_MODEL can be 'OLD-58', 'IX-68' or 'EOS-50'
FACE_MODEL = 'OLD-58'
class AnthropometicPtsGenerator(object):
"""Class to provide anthropometic points of face."""
if FACE_MODEL == 'OLD-58':
# Generic 3D face model http://aifi.isr.uc.pt/Downloads.html
anthropometic_3D_landmarks = np.asarray([
[-7.308957, 0.913869, 0.000000],
[-6.775290, -0.730814, -0.012799],
[-5.665918, -3.286078, 1.022951],
[-5.011779, -4.876396, 1.047961],
[-4.056931, -5.947019, 1.636229],
[-1.833492, -7.056977, 4.061275],
[0.000000, -7.415691, 4.070434],
[1.833492, -7.056977, 4.061275],
[4.056931, -5.947019, 1.636229],
[5.011779, -4.876396, 1.047961],
[5.665918, -3.286078, 1.022951],
[6.775290, -0.730814, -0.012799],
[7.308957, 0.913869, 0.000000],
[5.311432, 5.485328, 3.987654],
[4.461908, 6.189018, 5.594410],
[3.550622, 6.185143, 5.712299],
[2.542231, 5.862829, 4.687939],
[1.789930, 5.393625, 4.413414],
[2.693583, 5.018237, 5.072837],
[3.530191, 4.981603, 4.937805],
[4.490323, 5.186498, 4.694397],
[-5.311432, 5.485328, 3.987654],
[-4.461908, 6.189018, 5.594410],
[-3.550622, 6.185143, 5.712299],
[-2.542231, 5.862829, 4.687939],
[-1.789930, 5.393625, 4.413414],
[-2.693583, 5.018237, 5.072837],
[-3.530191, 4.981603, 4.937805],
[-4.490323, 5.186498, 4.694397],
[1.330353, 7.122144, 6.903745],
[2.533424, 7.878085, 7.451034],
[4.861131, 7.878672, 6.601275],
[6.137002, 7.271266, 5.200823],
[6.825897, 6.760612, 4.402142],
[-1.330353, 7.122144, 6.903745],
[-2.533424, 7.878085, 7.451034],
[-4.861131, 7.878672, 6.601275],
[-6.137002, 7.271266, 5.200823],
[-6.825897, 6.760612, 4.402142],
[-2.774015, -2.080775, 5.048531],
[-0.509714, -1.571179, 6.566167],
[0.000000, -1.646444, 6.704956],
[0.509714, -1.571179, 6.566167],
[2.774015, -2.080775, 5.048531],
[0.589441, -2.958597, 6.109526],
[0.000000, -3.116408, 6.097667],
[-0.589441, -2.958597, 6.109526],
[-0.981972, 4.554081, 6.301271],
[-0.973987, 1.916389, 7.654050],
[-2.005628, 1.409845, 6.165652],
[-1.930245, 0.424351, 5.914376],
[-0.746313, 0.348381, 6.263227],
[0.000000, 0.000000, 6.763430],
[0.746313, 0.348381, 6.263227],
[1.930245, 0.424351, 5.914376],
[2.005628, 1.409845, 6.165652],
[0.973987, 1.916389, 7.654050],
[0.981972, 4.554081, 6.301271]
], dtype=np.longdouble)
elif FACE_MODEL == 'IX-68':
# New generic 3D face model (mean of our 10 face scans)
anthropometic_3D_landmarks = np.asarray([
[0.463302314, 0.499617226, 2.824620485],
[0.433904979, 0.505937393, 2.644347876],
[0.39794359, 0.54824712, 2.468309015],
[0.347156364, 0.608686736, 2.301015556],
[0.261349984, 0.708693571, 2.164755151],
[0.149679065, 0.846413877, 2.038914531],
[0.020857666, 1.000756979, 1.96136412],
[-0.124583332, 1.132211104, 1.890638679],
[-0.332052324, 1.199630469, 1.870016173],
[-0.521015424, 1.142547488, 1.891351938],
[-0.659920681, 1.03973259, 1.961001828],
[-0.797577906, 0.86913183, 2.057442099],
[-0.912351593, 0.80159378, 2.188996568],
[-0.994247332, 0.707895722, 2.321974874],
[-1.045988813, 0.646902599, 2.488486946],
[-1.07838694, 0.581114346, 2.655235291],
[-1.096934579, 0.572226902, 2.832175642],
[0.306385975, 0.988582142, 3.263724804],
[0.238308419, 1.087680236, 3.319453031],
[0.126437213, 1.167731345, 3.357794225],
[-0.003806859, 1.229740798, 3.335844517],
[-0.126166103, 1.249807343, 3.300820023],
[-0.483642399, 1.261558414, 3.320731789],
[-0.594755229, 1.244249567, 3.356189996],
[-0.709202692, 1.193373024, 3.370144337],
[-0.830934606, 1.118067637, 3.342299908],
[-0.911886856, 1.022390895, 3.286355436],
[-0.31427322, 1.28056182, 3.116396815],
[-0.312322683, 1.355140246, 3.0163863],
[-0.310799612, 1.452512272, 2.899074256],
[-0.315011633, 1.537534878, 2.777368128],
[-0.125134574, 1.256734014, 2.648497283],
[-0.216964348, 1.329175174, 2.637426972],
[-0.310138743, 1.389713913, 2.611324817],
[-0.414820289, 1.334226191, 2.642694384],
[-0.513519868, 1.265409455, 2.656487644],
[0.196186462, 1.035192601, 3.090169013],
[0.126957612, 1.119997166, 3.156619817],
[-0.027273278, 1.136058375, 3.157634437],
[-0.100839235, 1.102722079, 3.088872135],
[-0.021972392, 1.132983871, 3.03742063],
[0.127623449, 1.10177733, 3.034567326],
[-0.520080116, 1.100469962, 3.095452815],
[-0.586792942, 1.133374192, 3.17071414],
[-0.745613977, 1.125613876, 3.170327187],
[-0.819571108, 1.0455795, 3.105413705],
[-0.744035766, 1.112881519, 3.048785478],
[-0.589515403, 1.131952509, 3.048771381],
[0.02306129, 1.158300541, 2.368660092],
[-0.080868714, 1.272260003, 2.42186287],
[-0.181587959, 1.345463172, 2.448015809],
[-0.312187512, 1.385880813, 2.454812676],
[-0.452711696, 1.3551175, 2.454890877],
[-0.558453415, 1.285798028, 2.426469952],
[-0.664228875, 1.164380819, 2.386185408],
[-0.571288593, 1.24077671, 2.312964618],
[-0.466966776, 1.311935268, 2.253052473],
[-0.318221454, 1.336186148, 2.228322476],
[-0.170658994, 1.297508962, 2.247573286],
[-0.071347391, 1.225932129, 2.294786155],
[-0.053068627, 1.196410157, 2.366681814],
[-0.16933859, 1.303018831, 2.379662782],
[-0.31283252, 1.344644643, 2.37743327],
[-0.453441203, 1.322521709, 2.388329715],
[-0.585467342, 1.213929333, 2.378763407],
[-0.473488985, 1.302666686, 2.342419624],
[-0.311414156, 1.341170423, 2.319458067],
[-0.16669072, 1.297568522, 2.336282581]
], dtype=np.longdouble)
elif FACE_MODEL == 'EOS-50':
# https://github.com/patrikhuber/eos/blob/master/share/ibug_to_sfm.txt
sfm_points_50 = np.load('nvidia_tao_tf1/cv/common/dataio/eos_mean_68.npy')
list_points = []
list_points.append(9)
list_points += range(18, 61)
list_points += range(62, 65)
list_points += range(66, 69)
anthropometic_3D_landmarks = np.zeros((68, 3), dtype=np.float32)
i = 0
for ind in list_points:
anthropometic_3D_landmarks[ind-1, :] = sfm_points_50[i, :]
i += 1
anthropometic_3D_landmarks = np.asarray(anthropometic_3D_landmarks, dtype=np.longdouble)
if FACE_MODEL == 'OLD-58':
le_outer = anthropometic_3D_landmarks[13]
le_inner = anthropometic_3D_landmarks[17]
re_outer = anthropometic_3D_landmarks[21]
re_inner = anthropometic_3D_landmarks[25]
else:
le_outer = anthropometic_3D_landmarks[45]
le_inner = anthropometic_3D_landmarks[42]
re_outer = anthropometic_3D_landmarks[36]
re_inner = anthropometic_3D_landmarks[39]
le_center = (le_inner + le_outer) / 2.0
le_center = np.reshape(le_center, (1, 3))
re_center = (re_inner + re_outer) / 2.0
re_center = np.reshape(re_center, (1, 3))
# Use conventional cord system [+x right to left eye] [+y nose to mouth] [nose to back of head].
if FACE_MODEL == 'OLD-58':
anthropometic_3D_landmarks[:, 1] *= -1 # Inverse Y axis.
anthropometic_3D_landmarks[:, 2] *= -1 # Inverse Z axis.
elif FACE_MODEL == 'IX-68':
anthropometic_3D_landmarks[:, [1, 2]] = anthropometic_3D_landmarks[:, [2, 1]]
anthropometic_3D_landmarks *= -1 # Inverse X, Y, Z axis.
elif FACE_MODEL == 'EOS-50':
anthropometic_3D_landmarks[:, 1] *= -1 # Inverse Y axis.
anthropometic_3D_landmarks[:, 2] *= -1 # Inverse Z axis.
if FACE_MODEL == 'OLD-58':
'''
OLD-58 face model has:
- interpupillary distance of 70 mm
- intercanthal distance of 35.8 mm
=> larger than average human values of 64-65 mm and 30-31 mm respectively
=> need a normalization (e.g. downscaling to 6.5/7 ratio)
'''
face_model_scaling = 65.0 / 7.0
else:
face_model_scaling = 65.0 / np.linalg.norm(le_center - re_center)
anthropometic_3D_landmarks *= face_model_scaling
if FACE_MODEL == 'OLD-58':
'''
Landmark correspondences (index-1)
3D-58, 2D-68
34, 27 - left eye, eyebrow, outer point
30, 23 - left eye, eyebrow, inner point
35, 22 - right eye, eyebrow, inner point
39, 18 - right eye, eyebrow, outer point
14, 46 - left eye, eye corner, outer point
18, 43 - left eye, eye corner, inner point
26, 40 - right eye, eye corner, inner point
22, 37 - right eye, eye corner, outer point
56, 36 - nose, right side, middle point
50, 32 - nose, left side, middle point
44, 55 - mouth, left corner point
40, 49 - mouth, right corner point
46, 58 - mouth, bottom center point
7, 9 - chin, center point
'''
pnp_indices = [33, 29, 34, 38, 13, 17, 25, 21, 55, 49, 43, 39, 45, 6]
'''
Landmarks to be used for eye center estimation (index-1)
left eye outer corner (3D:#14, Dlib:#46)
left eye inner corner (3D:#18, Dlib:#43)
right eye outer corner (3D:#22, Dlib:#37)
right eye inner corner (3D:#26, Dlib:#40)
chin center (3D:#7, Dlib:#9)
nose leftmost (3D:#55, GT/Dlib:#36)
nose left (3D:#54, GT/Dlib:#35)
nose rightmost (3D:#51, GT/Dlib:#32)
nose right (3D:#52, GT/Dlib:#33)
'''
robust_expr_var_indices = [13, 17, 21, 25, 6, 54, 53, 50, 51]
landmarks_2D_indices_selected = [26, 22, 21, 17, 45, 42, 39, 36, 35, 31, 54, 48, 57, 8]
landmarks_2D_indices_expr_inv = [45, 42, 36, 39, 8, 35, 34, 31, 32]
else: # Landmark correspondence is one to one for new model.
landmarks_2D_indices_selected = [36, 39, 42, 45, 48, 54, 27, 28, 29, 30, 31, 32, 33, 34,
35, 8, 17, 21, 22, 26]
if FACE_MODEL == 'IX-68':
landmarks_2D_indices_expr_inv = [45, 42, 36, 39, 8, 33, 30]
elif FACE_MODEL == 'EOS-50':
landmarks_2D_indices_expr_inv = [45, 42, 36, 39, 8, 35, 34, 32, 31]
# Shalini v1
# landmarks_2D_set_expr_inv = [45, 42, 36, 39, 33, 30, 27]
# Shalini v2
# landmarks_2D_set_expr_inv = [45, 42, 36, 39, 31, 32, 33, 34, 35, 30, 29, 28, 27]
pnp_indices = landmarks_2D_indices_selected
robust_expr_var_indices = landmarks_2D_indices_expr_inv
def __init__(self):
"""Initialize landmarks to be None so they can be lazily set."""
self._pnp_lm = None
self._robust_expr_var_lm = None
def get_all_landmarks(self):
"""Get all landmarks."""
return self.anthropometic_3D_landmarks
def get_landmarks_export_3D(self):
"""Get selected 3D landmarks to write out into tfrecords."""
landmarks_3D = np.zeros((38, 3), dtype=np.float32)
list_points = []
if FACE_MODEL == 'OLD-58':
list_points.append(6)
list_points += range(13, 15)
list_points += range(16, 19)
list_points += range(20, 23)
list_points += range(24, 27)
list_points += range(29, 47)
list_points += range(50, 55)
i = 0
for ind in list_points:
landmarks_3D[i, :] = self.anthropometic_3D_landmarks[ind, :]
i += 1
landmarks_3D[36, :] = (self.anthropometic_3D_landmarks[48, :] +
self.anthropometic_3D_landmarks[56, :]) / 2.0
landmarks_3D[37, :] = (self.anthropometic_3D_landmarks[49, :] +
self.anthropometic_3D_landmarks[55, :]) / 2.0
elif FACE_MODEL == 'IX-68':
list_points.append(8)
list_points += range(17, 27)
list_points += range(29, 48)
list_points.append(48)
list_points += range(50, 53)
list_points.append(54)
list_points += range(56, 59)
i = 0
for ind in list_points:
landmarks_3D[i, :] = self.anthropometic_3D_landmarks[ind, :]
i += 1
return landmarks_3D
def get_pnp_landmarks(self):
"""Get landmarks to be used in perspective-n-point algorithm."""
if self._pnp_lm is None:
self._pnp_lm = self.anthropometic_3D_landmarks[self.pnp_indices]
return self._pnp_lm
def get_robust_expr_var_landmarks(self):
"""Get landmarks that are robust against variations in expressions."""
if self._robust_expr_var_lm is None:
self._robust_expr_var_lm = self.anthropometic_3D_landmarks[self.robust_expr_var_indices]
return self._robust_expr_var_lm
def get_selected_landmarks(self, landmarks_2D, indices_2D, lm_3D):
"""Get selected landmarks from 2D and 3D landmarks and invalid landmarks percentage."""
landmarks_selected_2D = []
landmarks_selected_3D = []
cnt_invalid = 0
n_selected_landmarks = len(indices_2D)
for i in range(n_selected_landmarks):
coord = landmarks_2D[indices_2D[i]]
if coord[0] > 0 and coord[1] > 0:
landmarks_selected_2D.append(coord)
landmarks_selected_3D.append(lm_3D[i])
else:
cnt_invalid += 1
return np.asarray(landmarks_selected_2D),\
np.asarray(landmarks_selected_3D),\
cnt_invalid * 100.0 / n_selected_landmarks
def get_eye_centers(self):
"""Get centers of eyes based off eye pts in landmarks."""
eye_lm = self.get_robust_expr_var_landmarks()
left_eye_pts = eye_lm[:2]
right_eye_pts = eye_lm[2:4]
return np.reshape(np.mean(left_eye_pts, axis=0), (1, 3)),\
np.reshape(np.mean(right_eye_pts, axis=0), (1, 3))
def get_face_center(self):
"""Get face center as mean of 6 landmarks (4 eye points + 2 mouth points)."""
lm = self.get_pnp_landmarks()
if FACE_MODEL == 'OLD-58':
face_pts = lm[[4, 5, 6, 7, 10, 11]]
elif FACE_MODEL == 'IX-68':
face_pts = lm[[4, 5, 6, 7, 12, 13]]
return np.reshape(np.mean(face_pts, axis=0), (1, 3))
class PnPPtsGenerator(object):
"""
Class to provide Perspective-n-Point(PnP) rvec and tvec.
PnP rvec, tvec describe how to project from pts coordinate system to image plane.
"""
def __init__(self, camera_mat, distortion_coeffs):
"""Initialize camera matrix and distortion coefficients."""
self._camera_mat = camera_mat
self._distortion_coeffs = distortion_coeffs
@staticmethod
def get_cv_array(precise_arr):
"""Cast more precise array to float for cv."""
return np.asarray(precise_arr, dtype=np.float)
def compute_EPnP(self, points_3D, points_2D):
""""Find object pose efficiently."""
points_2D = np.expand_dims(points_2D, axis=1)
points_3D = np.expand_dims(points_3D, axis=1)
retval, rvec, tvec = cv2.solvePnP(
self.get_cv_array(points_3D),
self.get_cv_array(points_2D),
self._camera_mat,
self._distortion_coeffs,
None,
None,
False,
cv2.SOLVEPNP_EPNP)
return retval, rvec, tvec
def compute_PnP_Iterative(self, points_3D, points_2D, rvec, tvec):
""""Find object pose which minimizes reporjection error."""
retval, rvec, tvec = cv2.solvePnP(
self.get_cv_array(points_3D),
self.get_cv_array(points_2D),
self._camera_mat,
self._distortion_coeffs,
rvec,
tvec,
True,
cv2.SOLVEPNP_ITERATIVE)
return retval, rvec, tvec
def projectObject2Camera(object_coords, rot_mat, tvec):
"""Project object coordinates (WCS) to camera coordinates (CCS).
Args:
object_coords (np.ndarray): (x, y, z) coordinates in WCS.
rot_mat (np.ndarray): 3D WCS to 2D ICS rotational transformation matrix.
tvec (np.ndarray): 3D WCS to 2D ICS translation transformation matrix.
Returns:
cam_coords (np.ndarray): output cam coordinates.
"""
RPw = rot_mat.dot(object_coords.transpose())
cam_coords = RPw + tvec
return cam_coords
def projectCamera2Image(cam_coords, cam_mat):
"""Project camera coordinates (CCS) to image coordinates (ICS).
P_i = cam_mat*cam_coords
Args:
cam_coords (np.ndarray): (x, y, z) coordinates in CCS.
cam_mat (np.ndarray): camera calibration matrix.
Returns:
image_coords (np.ndarray): output image coordinates.
"""
image_coords = np.matmul(cam_mat, cam_coords)
image_coords /= image_coords[2]
return image_coords
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataio/theta_phi_lm_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encapsulate implementation for gaze sets."""
import os
import cv2
import numpy as np
from nvidia_tao_tf1.core.utils.path_utils import expand_path
from nvidia_tao_tf1.cv.common.dataio.set_strategy import SetStrategy
from nvidia_tao_tf1.cv.common.dataio.utils import is_int, mkdir
class GazeStrategy(SetStrategy):
"""Class encapsulates implementation specific to gaze sets."""
gaze_parent_set_path = '/home/copilot.cosmos10/RealTimePipeline/set'
gaze_error_path = '/home/copilot.cosmos10/RealTimePipeline/errors'
gaze_cosmos639_parent_path = '/home/driveix.cosmos639/GazeData'
gaze_cosmos639_filter_path = '/home/driveix.cosmos639/GazeData/filteredFrames'
def __init__(
self,
set_id,
experiment_folder_suffix,
tfrecord_folder_name,
gt_folder_name,
use_filtered,
use_undistort,
landmarks_folder_name,
set_label_sorter,
set_root_path,
):
"""Initialize parameters.
Args:
set_id (str): Set for which to generate tfrecords.
experiment_folder_suffix (str): Suffix of experiment folder containing tfrecords.
tfrecord_folder_name (str): Folder name of folder containing tfrecords.
gt_folder_name (str): Folder name of folder containing ground truth txt files.
landmarks_folder_name (str): Folder name of predicted landmarks, or None to disable.
set_label_sorter (SetLabelSorter object): Object to sort set as DataFactory / Nvhelnet.
"""
super(GazeStrategy, self).__init__(
set_id,
experiment_folder_suffix,
tfrecord_folder_name,
gt_folder_name,
landmarks_folder_name,
set_label_sorter)
self._use_filtered = use_filtered
self._use_undistort = use_undistort
if len(set_root_path) == 0:
self._gaze_parent_set_path = self.gaze_parent_set_path
else:
self._gaze_parent_set_path = set_root_path
self._set_source_paths()
self._set_camera_parameters()
def _set_camera_parameters(self):
def _load_cam_intrinsics(self):
file_path = os.path.join(self._paths.config_path, 'camera_parameters.txt')
camera_matrix = np.loadtxt(file_path, delimiter=',', max_rows=3)
# Distortion coeffs has a single value line by line, below camera_matrix.
distortion_coeffs = np.loadtxt(file_path, skiprows=4).transpose()
# Only the first 5 distortion coefficients are correct.
distortion_coeffs = distortion_coeffs[:5]
theta_phi_distortion_coeffs = distortion_coeffs
return [camera_matrix, distortion_coeffs, theta_phi_distortion_coeffs]
def _load_cam_extrinsics(self):
extrinsics = {}
for region_name in self._paths.regions:
R_file_path = os.path.join(self._paths.config_path, region_name, 'R.txt')
T_file_path = os.path.join(self._paths.config_path, region_name, 'T.txt')
R = np.loadtxt(R_file_path, delimiter=',')
T = np.loadtxt(T_file_path)
extrinsics[region_name] = (R, T)
return extrinsics
def _load_screen_parameters(self):
screens = {}
for region_name in self._paths.regions:
scrpW, scrpH = 1920.0, 1080.0 # Default vals.
# Try to find a config file for the resolution.
resolution = os.path.join(self._paths.config_path, region_name, 'resolution.txt')
if os.path.isfile(resolution):
with open(resolution) as f:
scrpW = float(f.readline())
scrpH = float(f.readline())
# Check which of board_size or TV_size is available.
board_size = os.path.join(self._paths.config_path, region_name, 'board_size.txt')
tv_size = os.path.join(self._paths.config_path, region_name, 'TV_size')
if os.path.isfile(board_size):
if os.path.isfile(tv_size):
raise IOError("Both board_size.txt and TV_size exist in {}"
.format(os.path.join(self._paths.config_path, region_name)))
size_file = board_size
elif os.path.isfile(tv_size):
size_file = tv_size
else:
raise IOError("Neither board_size.txt nor TV_size exists in {}"
.format(os.path.join(self._paths.config_path, region_name)))
with open(size_file) as f:
scrmW = float(f.readline())
scrmH = float(f.readline())
screens[region_name] = (scrmW, scrmH, scrpW, scrpH)
return screens
self._cam_intrinsics = _load_cam_intrinsics(self)
self._cam_extrinsics = _load_cam_extrinsics(self)
self._screen_params = _load_screen_parameters(self)
def _get_json_path(self, set_id_path):
return self._check_paths(set_id_path, [
'json_datafactory_v2',
'json_datafactory'
])
def _set_source_paths(self):
def _get_region_names(config_path):
"""Read the region names from the folders in config."""
if not os.path.exists(config_path):
raise IOError('{} doesnot exist' .format(config_path))
configs = os.listdir(config_path)
regions = [
config
for config in configs
if os.path.isdir(os.path.join(config_path, config))
and config.startswith('region_')
]
if not regions:
# On bench collection has no regions.
if 'incar' in self._set_id:
raise IOError('In car data set should have region_ folders in {}'
.format(config_path))
return ['']
if 'incar' not in self._set_id:
raise IOError('On bench data set should not have region_ folders in {}'
.format(config_path))
return regions
old_cosmos_set_path = os.path.join(
self._gaze_parent_set_path, self._set_id)
if os.path.exists(old_cosmos_set_path):
strategy_type, info_source_path, self.experiment_folder_name = \
self._set_label_sorter.get_info_source_path(
self._get_json_path,
old_cosmos_set_path,
old_cosmos_set_path)
lm_path = self._get_landmarks_path(old_cosmos_set_path)
if lm_path is not None:
self.experiment_folder_name = self.fpe_expr_folder \
+ self._experiment_folder_suffix
experiment_folder_path = os.path.join(
old_cosmos_set_path, self.experiment_folder_name)
mkdir(experiment_folder_path)
data_path = os.path.join(old_cosmos_set_path, 'Data')
if self._use_undistort:
data_path = os.path.join(old_cosmos_set_path, 'undistortedData')
config_path = os.path.join(old_cosmos_set_path, 'Config')
regions = _get_region_names(config_path)
self._strategy_type, self._paths = strategy_type, self.PathStruct(
error_path=os.path.join(self.gaze_error_path),
data_path=data_path,
config_path=config_path,
tfrecord_path=os.path.join(
old_cosmos_set_path,
self.experiment_folder_name,
self._tfrecord_folder_name),
gt_path=os.path.join(
old_cosmos_set_path,
self.experiment_folder_name,
self._gt_folder_name),
info_source_path=info_source_path,
filtered_path=None,
landmarks_path=lm_path,
regions=regions)
else:
new_cosmos_base_path = self.gaze_cosmos639_parent_path
new_cosmos_raw_path = os.path.join(
new_cosmos_base_path, 'orgData', self._set_id)
new_cosmos_generated_path = os.path.join(
new_cosmos_base_path, 'postData', self._set_id)
if (
not os.path.exists(new_cosmos_raw_path) or
not os.path.exists(new_cosmos_generated_path)
):
raise IOError('Unable to find a data source')
strategy_type, info_source_path, self.experiment_folder_name = \
self._set_label_sorter.get_info_source_path(
self._get_json_path,
new_cosmos_raw_path,
new_cosmos_generated_path)
data_path = os.path.join(new_cosmos_raw_path, 'pngData')
if not os.path.exists(data_path):
data_path = os.path.join(new_cosmos_raw_path, 'Data')
if self._use_undistort:
data_path = os.path.join(new_cosmos_raw_path, 'undistortedData')
config_path = os.path.join(new_cosmos_raw_path, 'Config')
regions = _get_region_names(config_path)
filtered_path = None
if self._use_filtered:
filtered_path = os.path.join(
self.gaze_cosmos639_filter_path,
self._set_id.rsplit('-', 1)[0])
lm_path = self._get_landmarks_path(new_cosmos_generated_path)
if lm_path is not None:
self.experiment_folder_name = self.fpe_expr_folder + self._experiment_folder_suffix
experiment_folder_path = os.path.join(
new_cosmos_generated_path,
self.experiment_folder_name)
mkdir(experiment_folder_path)
self._strategy_type, self._paths = strategy_type, self.PathStruct(
error_path=os.path.join(new_cosmos_base_path, 'errors'),
data_path=data_path,
config_path=config_path,
tfrecord_path=os.path.join(
new_cosmos_generated_path,
self.experiment_folder_name,
self._tfrecord_folder_name),
gt_path=os.path.join(
new_cosmos_generated_path,
self.experiment_folder_name,
self._gt_folder_name),
info_source_path=info_source_path,
filtered_path=filtered_path,
landmarks_path=lm_path,
regions=regions)
@staticmethod
def _get_screen(frame_name):
frame_split = frame_name.split('_')
x_index = 0
y_index = 1
if not is_int(frame_split[0]):
x_index += 1
y_index += 1
return float(frame_split[x_index]), float(frame_split[y_index])
@staticmethod
def _get_gaze_cam(gaze_screen, screen_params, cam_extrinsics):
scrmW, scrmH, scrpW, scrpH = screen_params
gaze_camera_spc = np.zeros(2)
gaze_camera_spc[0] = gaze_screen[0] * scrmW / scrpW
gaze_camera_spc[1] = gaze_screen[1] * scrmH / scrpH
gaze_in_homogeneous = np.zeros(3)
gaze_in_homogeneous[:2] = gaze_camera_spc
gaze_in_homogeneous[2] = 1.0
# Output gaze in mm
R, T = cam_extrinsics
return R.dot(gaze_in_homogeneous) + T.transpose()
def extract_gaze_info(self, frame_data_dict, frame_name, region_name):
"""Extract gaze information from screen and camera parameters."""
gaze_screen = self._get_screen(frame_name)
frame_data_dict['label/gaze_screen_x'] = gaze_screen[0]
frame_data_dict['label/gaze_screen_y'] = gaze_screen[1]
gaze_cam = self._get_gaze_cam(gaze_screen, self._screen_params[region_name],
self._cam_extrinsics[region_name])
frame_data_dict['label/gaze_cam_x'] = gaze_cam[0]
frame_data_dict['label/gaze_cam_y'] = gaze_cam[1]
frame_data_dict['label/gaze_cam_z'] = gaze_cam[2]
def get_pts(self, pts, frame_width, frame_height):
"""Return undistorted points if required, otherwise return distored points."""
if not self._use_undistort:
return pts
reshaped_pts = np.asarray(pts, dtype=np.float64)
reshaped_pts.shape = (-1, 1, 2)
cam_matrix, distortion_coeffs, _ = self._cam_intrinsics
pts_undistorted = cv2.undistortPoints(
reshaped_pts,
cam_matrix,
distortion_coeffs,
R=None,
P=cam_matrix)
pts_undistorted[:, :, 0] = np.clip(pts_undistorted[:, :, 0], 0, frame_width)
pts_undistorted[:, :, 1] = np.clip(pts_undistorted[:, :, 1], 0, frame_height)
pts_undistorted.shape = (-1, 1)
return pts_undistorted.flatten().tolist()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataio/gaze_strategy.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Stand-alone script that can read the generated tfrecords."""
import argparse
import os
import sys
from nvidia_tao_tf1.core.utils.path_utils import expand_path
from nvidia_tao_tf1.cv.common.dataio.data_converter import DataConverter
def main(args=None):
"""Generate tfrecords based on user arguments."""
if args is None:
args = sys.argv[1:]
parser = argparse.ArgumentParser(description="Read TfRecords")
parser.add_argument('-folder-name', '--ground_truth_experiment_folder_name',
type=str, required=True,
help='Name of ground truth experiment folder containing tfrecords')
parser.add_argument('-nfs', '--nfs_storage', type=str,
required=True, help='NFS cosmos storage name')
# This script uses -pred instead of -landmarks-folder, not caring about the location.
parser.add_argument('-pred', '--use_landmark_predictions',
type=lambda x: (str(x).lower() == 'true'),
default=False, help='Read landmarks predictions')
parser.add_argument('-set', '--set_id', type=str,
required=True, help='Setid (ex. s427-gaze-2)')
args = parser.parse_args(args)
ground_truth_factory = args.ground_truth_experiment_folder_name
nfs_name = args.nfs_storage
use_lm_pred = args.use_landmark_predictions
set_id = args.set_id
tfrecord_folder_name = 'TfRecords_combined'
if nfs_name == 'driveix.cosmos639':
spec_set_path = expand_path(f"/home/{nfs_name}/GazeData/postData/{set_id}")
set_gt_factory = expand_path(f"{spec_set_path}/{ground_truth_factory}")
elif nfs_name in ('copilot.cosmos10', 'projects1_copilot'):
set_gt_factory = expand_path(f"/home/{nfs_name}/RealTimePipeline/set/{set_id}/{ground_truth_factory}")
else:
raise IOError('No such NFS cosmos storage')
tfrecords_path = os.path.join(set_gt_factory, tfrecord_folder_name)
if os.path.isdir(tfrecords_path):
for filename in os.listdir(tfrecords_path):
if filename.endswith('.tfrecords'):
DataConverter.read_tfrecords([os.path.join(tfrecords_path, filename)], use_lm_pred)
if __name__ == "__main__":
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataio/tfrecord_reader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commonly used methods and classes across dataio."""
from collections import defaultdict
import datetime
import errno
import logging
import os
from nvidia_tao_tf1.core.utils.path_utils import expand_path
def is_kpi_set(set_id):
"""Return if set id is a KPI set."""
lowercase_set_id = set_id.lower()
return ('kpi' in lowercase_set_id or
's532-gaze-' in lowercase_set_id or
's534-gaze-' in lowercase_set_id or
's593-gaze-' in lowercase_set_id or
's594-gaze-' in lowercase_set_id or
's595-gaze-' in lowercase_set_id or
's596-gaze-' in lowercase_set_id or
's597-gaze-' in lowercase_set_id)
def is_int(num_str):
"""Return if python string is a number."""
try:
int(num_str)
return True
except Exception:
return False
def get_file_name_noext(filepath):
"""Return file name witout extension."""
return os.path.splitext(os.path.basename(filepath))[0]
def get_file_ext(filepath):
"""Return file extension."""
return os.path.splitext(os.path.basename(filepath))[1]
def mkdir(path):
"""Make a directory if one does not exist already."""
try:
os.makedirs(expand_path(path))
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
class SysOutLogger(object):
"""Class which logs messages to output when FileHandler cannot establish cosmos connection."""
def __init__(self):
"""Constructor for SysOutLogger."""
pass
@staticmethod
def _format_output(msg_type, msg):
return '{} {}: {}'.format(
msg_type,
datetime.datetime.now().strftime("%Y-%m-%d %H:%M"),
msg)
def info(self, msg):
"""Output info msg."""
print(self._format_output('INFO', msg))
def error(self, msg):
"""Output error msg."""
print(self._format_output('ERROR', msg))
def warning(self, msg):
"""Output warning msg."""
print(self._format_output('WARN', msg))
class Logger(object):
"""Class which formats logging file."""
formatter = logging.Formatter('%(asctime)s %(levelname)s %(name)s %(message)s')
def __init__(self, loggername, filename, level):
"""Initialize logger."""
try:
handler = logging.FileHandler(filename)
handler.setFormatter(Logger.formatter)
self._logger = logging.getLogger(loggername)
self._logger.setLevel(level)
self._logger.addHandler(handler)
except IOError:
print('Check NFS cosmos connection.')
self._logger = SysOutLogger()
def get_logger(self):
"""Return formatted logger."""
return self._logger
class PipelineReporter(object):
"""Class which logs information about runs."""
def __init__(self, log_path, script_name, set_id):
"""Initialize parameters, create log folders, and format logging."""
self.set_id = set_id
self.set_to_info = defaultdict(list)
self.set_to_errors = defaultdict(list)
self.set_to_warnings = defaultdict(list)
self._create_log_folders(
log_path,
script_name)
self._create_set_loggers()
def _create_log_folders(self, log_path, script_name):
run_folder_name = script_name + '_' + datetime.datetime.now().strftime('%Y-%m-%d')
log_set_path = os.path.join(
log_path,
run_folder_name)
self.error_set_path = os.path.join(log_set_path, 'Error')
self.warning_set_path = os.path.join(log_set_path, 'Warning')
mkdir(self.error_set_path)
mkdir(self.warning_set_path)
def _create_set_loggers(self):
self.err_logger = Logger(
self.set_id + '_error',
os.path.join(
self.error_set_path,
self.set_id + '.log'),
logging.INFO).get_logger()
self.warning_logger = Logger(
self.set_id + '_warning',
os.path.join(
self.warning_set_path,
self.set_id + '.log'),
logging.WARNING).get_logger()
def add_info(self, info_msg):
"""Log info under set_id."""
self.set_to_info[self.set_id].append(info_msg)
def add_error(self, err_msg):
"""Log error under set_id."""
self.set_to_errors[self.set_id].append(err_msg)
def add_warning(self, warning_msg):
"""Log warning under set_id."""
self.set_to_warnings[self.set_id].append(warning_msg)
def write_to_log(self):
"""Write stored dicts of msgs to logs."""
# Currently info log to error log for convenience of reading num of tfrecords
for info_msg in self.set_to_info[self.set_id]:
self.err_logger.info(info_msg)
for err_msg in self.set_to_errors[self.set_id]:
self.err_logger.error(err_msg)
for warning_msg in self.set_to_warnings[self.set_id]:
self.warning_logger.warning(warning_msg)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataio/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sort set type based on input labels and generate either DataFactory or Nvhelnet folder."""
import os
class SetLabelSorter(object):
"""Class to sort experiment folder created for set."""
json_expr_folder_prefix = 'Ground_Truth_DataFactory_'
sdk_expr_folder_prefix = 'Ground_Truth_Nvhelnet_'
def __init__(
self,
experiment_folder_suffix,
sdklabels_folder_name,
):
"""Initialize parameters."""
self._experiment_folder_suffix = experiment_folder_suffix
self._sdklabels_folder_name = sdklabels_folder_name
def get_info_source_path(
self,
get_json_path,
parent_json_path,
parent_sdklabels_path,
):
"""Get required input file paths for tfrecord generation.
Args:
get_json_path (function): Function which returns json files path.
parent_json_path (path): Parent path of json folder.
parent_sdklabels_path (path): Parent path of nvhelnet sdk labels folder.
Returns:
strategy_type (str): Type of strategy (sdk / json) determined by input files.
info_source_path (path): Label source path.
experiment_folder_name (str): Name of experiment folder which contains tfrecords.
"""
def _get_sdklabels_folder(parent):
sdklabels_path = os.path.join(parent, self._sdklabels_folder_name)
# The check to see if path is valid happens here, not in main.
if not os.path.isdir(sdklabels_path):
if not os.path.isdir(sdklabels_path):
print("Could not find sdklabels-folder: {} is not a valid directory"
.format(sdklabels_path))
sdklabels_path = sdklabels_path.replace('postData', 'orgData')
if not os.path.isdir(sdklabels_path):
raise IOError("Could not find sdklabels-folder: {} is not a valid directory"
.format(sdklabels_path))
return sdklabels_path
info_source_path = None
if self._sdklabels_folder_name is None:
# Consider json files first and only if sdklabels is not given.
# If not found, fall back to a known good nvhelnet sdk folder.
strategy_type = 'json'
info_source_path = get_json_path(parent_json_path)
experiment_folder_name = self.json_expr_folder_prefix + self._experiment_folder_suffix
if info_source_path is None:
if self._sdklabels_folder_name is None:
self._sdklabels_folder_name = 'Nvhelnet_v11.2'
strategy_type = 'sdk'
info_source_path = _get_sdklabels_folder(parent_sdklabels_path)
experiment_folder_name = self.sdk_expr_folder_prefix + self._experiment_folder_suffix
return strategy_type, info_source_path, experiment_folder_name
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataio/set_label_sorter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TLT detection data sequence."""
import logging
import os
import numpy as np
from nvidia_tao_tf1.cv.common.dataio.base_data_sequence import BaseDataSequence
logging.basicConfig(
format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="DEBUG"
)
logger = logging.getLogger(__name__)
class DetectionDataSequence(BaseDataSequence):
"""Abstract class for TLT detection network.
To use dataloader:
1. call __init__(configs)
2. call add_source(image_folder, label_folder) to add sources
3. Use data generator in keras model.fit_generator()
Functions below must be implemented in derived classes:
1. _preprocessing
"""
def __init__(self,
dataset_config,
augmentation_config,
batch_size=10,
is_training=True,
encode_fn=None,
root_path=None,
output_raw_label=False):
"""Class initialization."""
self.image_paths = []
self.label_paths = []
# mapping class to 1-based integer
mapping_dict = dataset_config.target_class_mapping
self.classes = sorted({str(x).lower() for x in mapping_dict.values()})
val_class_mapping = dict(zip(self.classes, range(len(self.classes))))
self.class_mapping = {key.lower(): val_class_mapping[str(val.lower())]
for key, val in mapping_dict.items()}
# load data sources
if is_training:
data_sources = dataset_config.data_sources
else:
data_sources = dataset_config.validation_data_sources
for data_source in data_sources:
self._add_source(
os.path.join(root_path, data_source.image_directory_path)
if root_path else data_source.image_directory_path,
os.path.join(root_path, data_source.label_directory_path)
if root_path else data_source.label_directory_path)
# use numpy array to accelerate
self.image_paths = np.array(self.image_paths)
self.label_paths = np.array(self.label_paths)
self.data_inds = np.arange(len(self.image_paths))
if is_training:
np.random.shuffle(self.data_inds)
self.is_training = is_training
self.batch_size = batch_size
self.output_img_size = (augmentation_config.output_width, augmentation_config.output_height)
self.augmentation_config = augmentation_config
self.exclude_difficult = is_training and (not dataset_config.include_difficult_in_training)
self.encode_fn = encode_fn
self.n_samples = len(self.data_inds)
self.output_raw_label = output_raw_label
self.image_depth = 8
def _add_source(self, image_folder, label_folder):
"""Add Kitti sources."""
img_paths = os.listdir(image_folder)
label_paths = set(os.listdir(label_folder))
supported_img_format = ['.jpg', '.jpeg', '.png', '.bmp', '.gif']
for img_path in img_paths:
# Only add valid items to paths
filename, img_ext = os.path.splitext(img_path)
if img_ext in supported_img_format and filename + '.txt' in label_paths:
self.image_paths.append(os.path.join(image_folder, img_path))
self.label_paths.append(os.path.join(label_folder, filename + '.txt'))
def __len__(self):
"""Get length of Sequence."""
return int(np.ceil(len(self.image_paths) / self.batch_size))
def _load_gt_label(self, label_path):
"""Load Kitti labels.
Returns:
[class_idx, is_difficult, x_min, y_min, x_max, y_max]
"""
entries = open(label_path, 'r').read().strip().split('\n')
results = []
for entry in entries:
items = entry.strip().split()
if len(items) < 9:
continue
items[0] = items[0].lower()
if items[0] not in self.class_mapping:
continue
label = [self.class_mapping[items[0]], 1 if int(
items[2]) != 0 else 0, *items[4:8]]
results.append([float(x) for x in label])
return np.array(results).reshape(-1, 6)
def _filter_invalid_labels(self, labels):
"""filter out invalid labels.
Arg:
labels: size (N, 6), where bboxes is normalized to 0~1.
Returns:
labels: size (M, 6), filtered bboxes with clipped boxes.
"""
labels[:, -4:] = np.clip(labels[:, -4:], 0, 1)
# exclude invalid boxes
difficult_cond = (labels[:, 1] < 0.5) | (not self.exclude_difficult)
if np.any(difficult_cond == 0):
logger.warning(
"Got label marked as difficult(occlusion > 0), "
"please set occlusion field in KITTI label to 0 "
"or set `dataset_config.include_difficult_in_training` to True "
"in spec file, if you want to include it in training."
)
x_cond = labels[:, 4] - labels[:, 2] > 1e-3
y_cond = labels[:, 5] - labels[:, 3] > 1e-3
return labels[difficult_cond & x_cond & y_cond]
def _get_single_item_raw(self, idx):
"""Load single image and its label."""
image = self._load_gt_image(self.image_paths[self.data_inds[idx]])
label = self._load_gt_label(self.label_paths[self.data_inds[idx]])
# change bbox to 0~1
h, w, _ = image.shape
label[:, 2] /= w
label[:, 3] /= h
label[:, 4] /= w
label[:, 5] /= h
return image, label
def _get_single_item(self, idx, output_img_size):
"""Load and process single image and its label."""
image, label = self._get_single_item_raw(idx)
return self._preprocessing(image, label, output_img_size)
def _batch_post_processing(self, images, labels):
"""Post processing for a batch."""
images = np.array(images)
# RGB -> BGR, channels_last -> channels_first
images = images[..., [2, 1, 0]].transpose(0, 3, 1, 2)
img_mean = self.augmentation_config.image_mean
if self.augmentation_config.output_channel == 3:
assert self.image_depth == 8, (
f"RGB images only support 8-bit depth, got {self.image_depth}, "
"please check `augmentation_config.output_depth` in spec file"
)
if img_mean:
bb, gg, rr = img_mean['b'], img_mean['g'], img_mean['r']
else:
bb, gg, rr = 103.939, 116.779, 123.68
else:
if img_mean:
bb, gg, rr = img_mean['l'], img_mean['l'], img_mean['l']
elif self.image_depth == 8:
bb, gg, rr = 117.3786, 117.3786, 117.3786
elif self.image_depth == 16:
# 117.3786 * 256
bb, gg, rr = 30048.9216, 30048.9216, 30048.9216
else:
raise ValueError(
f"Unsupported image depth: {self.image_depth}, should be 8 or 16, "
"please check `augmentation_config.output_depth` in spec file"
)
# subtract imagenet mean
images -= np.array([[[[bb]], [[gg]], [[rr]]]])
if self.augmentation_config.output_channel == 1:
# See conversion: https://pillow.readthedocs.io/en/3.2.x/reference/Image.html
bgr_ = np.array([0.1140, 0.5870, 0.2990]).reshape(1, 3, 1, 1)
images = np.sum(images * bgr_, axis=1, keepdims=True)
# try to make labels a numpy array
is_make_array = True
x_shape = None
for x in labels:
if not isinstance(x, np.ndarray):
is_make_array = False
break
if x_shape is None:
x_shape = x.shape
elif x_shape != x.shape:
is_make_array = False
break
if is_make_array:
labels = np.array(labels)
return images, labels
def __getitem__(self, batch_idx):
"""Load a full batch."""
images = []
labels = []
for idx in range(batch_idx * self.batch_size,
min(self.n_samples, (batch_idx + 1) * self.batch_size)):
image, label = self._get_single_item(idx, self.output_img_size)
images.append(image)
labels.append(label)
return self._batch_post_processing(images, labels)
def on_epoch_end(self):
"""shuffle data at end."""
if self.is_training:
np.random.shuffle(self.data_inds)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataio/detection_data_sequence.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encapsulate implementation for gaze custom data set."""
from collections import namedtuple
import os
import numpy as np
from nvidia_tao_tf1.cv.common.dataio.set_strategy import SetStrategy
from nvidia_tao_tf1.cv.common.dataio.utils import mkdir
class GazeCustomSetStrategy(SetStrategy):
"""Class encapsulates implementation specific to gaze sets."""
CustomerPathStruct = namedtuple('CustomerPathStruct',
'''error_path,
data_path,
norm_data_path,
config_path,
label_path,
landmarks_path''')
def __init__(
self,
data_root_path,
norm_data_folder_name,
data_strategy_type,
landmarks_folder_name
):
"""Initialize parameters.
Args:
set_id (str): Set for which to generate tfrecords.
experiment_folder_suffix (str): Suffix of experiment folder containing tfrecords.
tfrecord_folder_name (str): Folder name of folder containing tfrecords.
gt_folder_name (str): Folder name of folder containing ground truth txt files.
landmarks_folder_name (str): Folder name of predicted landmarks, or None to disable.
set_label_sorter (SetLabelSorter object): Object to sort set as DataFactory / Nvhelnet.
"""
super(GazeCustomSetStrategy, self).__init__(
landmarks_folder_name=landmarks_folder_name,
set_id="",
experiment_folder_suffix="",
tfrecord_folder_name="",
gt_folder_name="",
set_label_sorter="")
self._use_filtered = False
self._use_undistort = False
self._data_root_path = data_root_path
self._norm_data_folder_name = norm_data_folder_name
self._strategy_type = data_strategy_type
self._landmarks_folder_name = landmarks_folder_name
self._set_source_paths()
self._set_camera_parameters()
def _set_camera_parameters(self):
def _load_cam_intrinsics(self):
file_path = os.path.join(self._paths.config_path, 'camera_parameters.txt')
camera_matrix = np.loadtxt(file_path, delimiter=',', max_rows=3)
# Distortion coeffs has a single value line by line, below camera_matrix.
distortion_coeffs = np.loadtxt(file_path, skiprows=4).transpose()
# Only the first 5 distortion coefficients are correct.
distortion_coeffs = distortion_coeffs[:5]
theta_phi_distortion_coeffs = distortion_coeffs
return [camera_matrix, distortion_coeffs, theta_phi_distortion_coeffs]
def _load_cam_extrinsics(self):
R_file_path = os.path.join(self._paths.config_path, 'R.txt')
T_file_path = os.path.join(self._paths.config_path, 'T.txt')
R = np.loadtxt(R_file_path, delimiter=',')
T = np.loadtxt(T_file_path)
extrinsics = (R, T)
return extrinsics
def _load_screen_parameters(self):
scrpW, scrpH = 1920.0, 1080.0 # Default vals.
# Try to find a config file for the resolution.
resolution = os.path.join(self._paths.config_path, 'resolution.txt')
if os.path.isfile(resolution):
with open(resolution) as f:
scrpW = float(f.readline())
scrpH = float(f.readline())
# Check which of board_size or TV_size is available.
board_size = os.path.join(self._paths.config_path, 'board_size.txt')
tv_size = os.path.join(self._paths.config_path, 'TV_size')
if os.path.isfile(board_size):
if os.path.isfile(tv_size):
raise IOError("Both board_size.txt and TV_size exist in {}"
.format(os.path.join(self._paths.config_path)))
size_file = board_size
elif os.path.isfile(tv_size):
size_file = tv_size
else:
raise IOError("Neither board_size.txt nor TV_size exists in {}"
.format(os.path.join(self._paths.config_path)))
with open(size_file) as f:
scrmW = float(f.readline())
scrmH = float(f.readline())
screens = (scrmW, scrmH, scrpW, scrpH)
return screens
self._cam_intrinsics = _load_cam_intrinsics(self)
self._cam_extrinsics = _load_cam_extrinsics(self)
self._screen_params = _load_screen_parameters(self)
def _get_json_path(self, set_id_path):
return self._check_paths(set_id_path, [
'json_datafactory_v2',
'json_datafactory'
])
def _set_source_paths(self):
root_path = self._data_root_path
if os.path.exists(root_path):
data_path = os.path.join(root_path, 'Data')
config_path = os.path.join(root_path, 'Config')
label_path = self._get_json_path(root_path)
norm_data_path = os.path.join(root_path, self._norm_data_folder_name)
error_path = os.path.join(root_path, 'errors')
mkdir(error_path)
landmarks_path = None
if self._landmarks_folder_name is not None:
landmarks_path = os.path.join(root_path, self._landmarks_folder_name)
self._paths = self.CustomerPathStruct(
error_path=error_path,
data_path=data_path,
norm_data_path=norm_data_path,
config_path=config_path,
label_path=label_path,
landmarks_path=landmarks_path)
def get_pts(self, pts, frame_width, frame_height):
"""Return undistorted points if required, otherwise return distored points."""
return pts
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataio/gaze_custom_set_strategy.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstracted methods of set strategy (gaze / eoc) and select strategy for set."""
from nvidia_tao_tf1.cv.common.dataio.eoc_strategy import EocStrategy
from nvidia_tao_tf1.cv.common.dataio.gaze_strategy import GazeStrategy
from nvidia_tao_tf1.cv.common.dataio.set_label_sorter import SetLabelSorter
class SetStrategyGenerator(object):
"""Class for generating set strategy and retrieving strategy output."""
def __init__(
self,
set_id,
experiment_folder_suffix,
tfrecord_folder_name,
gt_folder_name,
use_filtered,
use_undistort,
landmarks_folder_name,
sdklabels_folder_name,
set_root_path
):
"""Sort set into sdk or json and generate gaze / eoc strategy."""
self._landmarks_folder_name = landmarks_folder_name
self._use_undistort = use_undistort
set_label_sorter = SetLabelSorter(
experiment_folder_suffix,
sdklabels_folder_name)
if 'eoc' in set_id.lower():
self._strategy = EocStrategy(
set_id,
experiment_folder_suffix,
tfrecord_folder_name,
gt_folder_name,
landmarks_folder_name,
set_label_sorter)
else:
self._strategy = GazeStrategy(
set_id,
experiment_folder_suffix,
tfrecord_folder_name,
gt_folder_name,
use_filtered,
use_undistort,
landmarks_folder_name,
set_label_sorter,
set_root_path)
def get_camera_parameters(self):
"""Return strategy's camera parameters."""
return self._strategy.get_camera_parameters()
def get_source_paths(self):
"""Return strategy's needed input file paths."""
return self._strategy.get_source_paths()
def extract_gaze_info(self, frame_data_dict, frame_name, region_name):
"""Return strategy's extracted info about gaze."""
self._strategy.extract_gaze_info(frame_data_dict, frame_name, region_name)
def get_pts(self, pts, frame_width, frame_height):
"""Return strategy's landmark points (can be undistorted / distorted)."""
return self._strategy.get_pts(pts, frame_width, frame_height)
def use_undistort(self):
"""Return whether user wants to use undistorted frames and landmarks."""
return self._use_undistort
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataio/set_strategy_generator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main file of pipeline script which pushes to desq."""
import argparse
import sys
from nvidia_tao_tf1.cv.common.dataio.tfrecord_manager import TfRecordManager
from nvidia_tao_tf1.cv.common.dataio.utils import is_kpi_set
def main(args=None):
"""Generate tfrecords based on user arguments."""
if args is None:
args = sys.argv[1:]
parser = argparse.ArgumentParser(
description="Push data to desq from json post-labels or nvhelnet sdk labels")
parser.add_argument('-unique', '--use_unique', type=lambda x: (str(x).lower() == 'true'),
default=False, help='Take the first frame of every image')
parser.add_argument('-filter', '--use_filtered', type=lambda x: (str(x).lower() == 'true'),
default=False, help='Use filtered frames on nvhelnet sets')
parser.add_argument('-undistort', '--undistort', type=lambda x: (str(x).lower() == 'true'),
default=False, help='Read undistort frames')
parser.add_argument('-update', '--update', type=lambda x: (str(x).lower() == 'true'),
default=False, help='Update already present creations in desq')
parser.add_argument('-landmarks-folder', '--landmarks_folder_name', type=str, default="",
help='Source folder to obtain predicted fpe landmarks from, '
'or "" for default landmarks from JSON')
parser.add_argument('-sdklabels-folder', '--sdklabels_folder_name', type=str, default="",
help='Source folder to obtain nvhelnet sdk labels from, '
'or "" for sdk labels from JSON (with fallback to Nvhelnet_v11.2)')
parser.add_argument('-save_norm_images', '--save_norm_images',
type=lambda x: (str(x).lower() == 'true'),
default=False,
help='Flag on whether to save normalized data')
parser.add_argument('-norm_folder_name', '--norm_folder_name', type=str, required=True,
help='Folder to save normalized data')
parser.add_argument('-sets', '--set_ids', type=str,
required=True, help='Setid (ex. s427-gaze-2)')
args = parser.parse_args(args)
set_ids = args.set_ids
is_unique = args.use_unique
if is_unique and is_kpi_set(set_ids):
raise ValueError('Do not use unique for kpi sets.')
if args.landmarks_folder_name == "":
args.landmarks_folder_name = None
elif not args.landmarks_folder_name.startswith('fpenet_results'):
raise ValueError('Landmarks folder has to start with "fpenet_results".')
if args.sdklabels_folder_name == "":
args.sdklabels_folder_name = None
# The check to see if these folders actually exists depends on the cosmos choice
# and is therefore done in set_strategy.py's _get_landmarks_path(),
# and set_label_sorter.py's _get_sdklabels_folder(), respectively.
tfrecord_manager = TfRecordManager(
set_ids,
"tmp",
is_unique,
args.use_filtered,
args.undistort,
args.landmarks_folder_name,
args.sdklabels_folder_name,
args.norm_folder_name,
args.save_norm_images)
tfrecord_manager.write_desq(update=args.update)
if __name__ == '__main__':
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataio/desq_push.py |
"""Push data to desq database."""
# As ai-infra does not allow git submodules or wheel installs, get desq from cosmos distribution.
import sys
sys.path.append('/home/driveix.cosmos639/desq/dist/v1.0.0')
import desq # noqa: E402
from desq.schema import EyecropComputation, EyecropCreation, EyecropLabelling, EyestateCreation, \
EyestateLabelling, EyestateModel, FacecropComputation, FacecropCreation, \
FacecropLabelling, GazeComputation, GazeCreation, GazeoriginComputation, \
GazeoriginCreation, GazeoriginCreator, HeadposeComputation, \
HeadposeCreation, LandmarksCreation, LandmarksLabelling, LandmarksModel \
# noqa: E402
def get_creator(session, creator_class, creator_name, constructor_kwargs=None):
"""Return a creator instance from the database (adding to database if needed)."""
creator = session.query(creator_class).filter_by(name=creator_name).one_or_none()
if creator is None:
if constructor_kwargs is None: # So that we don't get a global dict as default param.
constructor_kwargs = {}
if issubclass(creator_class, desq.schema.ModelMixin): # Model class (driveIX SDK).
creator = creator_class(name=creator_name,
uri_path="{}/{}".format(creator_name, creator_class.__name__),
storage_id=3,
**constructor_kwargs)
else: # Computation or Labelling class.
creator = creator_class(name=creator_name, **constructor_kwargs)
session.add(creator)
return creator
def get_creators(session, strategy_type, landmarks_folder_name, sdklabels_folder_name):
"""Return a list of tuple(creation_class, creator, kwargs) to create prediction for.
Each of the creators returned is already added to the session (thus has an id).
"""
if landmarks_folder_name is not None:
raise NotImplementedError("Writing to desq not yet supported with -landmarks-folder")
if sdklabels_folder_name is not None:
raise NotImplementedError("Writing to desq not yet supported with -sdklabels-folder")
if strategy_type == 'sdk':
eyestate_creator = get_creator(session, EyestateModel, u"SDK_v1")
landmarks_creator = get_creator(session, LandmarksModel, u"SDK_v1",
{"keypoint_count": 80})
eyecrop_creator = get_creator(session, EyecropComputation, u"SDK_v1",
{"landmarkscreator": landmarks_creator})
facecrop_creator = get_creator(session, FacecropComputation, u"SDK_v1",
{"landmarkscreator": landmarks_creator})
else:
eyestate_creator = get_creator(session, EyestateLabelling, u"DataFactory_v1")
landmarks_creator = get_creator(session, LandmarksLabelling, u"DataFactory_v1",
{"keypoint_count": 104})
eyecrop_creator = get_creator(session, EyecropLabelling, u"DataFactory_v1")
facecrop_creator = get_creator(session, FacecropLabelling, u"DataFactory_v1")
gazeorigin_mid = get_creator(session, GazeoriginComputation, u"PnP_center_of_eyes_v1",
{"origin_type": GazeoriginCreator.OriginType.CENTER_OF_EYES,
"landmarkscreator": landmarks_creator})
gazeorigin_lpc = get_creator(session, GazeoriginComputation, u"PnP_left_eye_v1",
{"origin_type": GazeoriginCreator.OriginType.LEFT_EYE,
"landmarkscreator": landmarks_creator})
gazeorigin_rpc = get_creator(session, GazeoriginComputation, u"PnP_right_eye_v1",
{"origin_type": GazeoriginCreator.OriginType.RIGHT_EYE,
"landmarkscreator": landmarks_creator})
return [
(EyestateCreation, eyestate_creator, {}),
(EyecropCreation, eyecrop_creator, {}),
(FacecropCreation, facecrop_creator, {}),
(GazeoriginCreation, gazeorigin_mid, {"origin_prefix": "mid_"}),
(GazeoriginCreation, gazeorigin_lpc, {"origin_prefix": "lpc_"}),
(GazeoriginCreation, gazeorigin_rpc, {"origin_prefix": "rpc_"}),
(GazeCreation, get_creator(session, GazeComputation, u"PnP_center_of_eyes_v1",
constructor_kwargs={"origincreator": gazeorigin_mid}),
{"origin_postfix": ""}),
(GazeCreation, get_creator(session, GazeComputation, u"PnP_left_eye_v1",
constructor_kwargs={"origincreator": gazeorigin_lpc}),
{"origin_postfix": "_le"}),
(GazeCreation, get_creator(session, GazeComputation, u"PnP_right_eye_v1",
constructor_kwargs={"origincreator": gazeorigin_rpc}),
{"origin_postfix": "_re"}),
(HeadposeCreation, get_creator(session, HeadposeComputation, u"PnP_v1"), {}),
(LandmarksCreation, landmarks_creator, {}),
]
def build_creation(creation_class, frame_data_dict, origin_prefix="mid_", origin_postfix=""):
"""Return a new creation_class for the given frame_data_dict."""
if creation_class == EyestateCreation:
creation = EyestateCreation(
left_eye=EyestateCreation.EyeState.from_str(frame_data_dict["label/left_eye_status"]),
right_eye=EyestateCreation.EyeState.from_str(frame_data_dict["label/right_eye_status"])
)
elif creation_class == FacecropCreation:
creation = FacecropCreation(
x1=frame_data_dict["train/tight_facebbx_x1"],
y1=frame_data_dict["train/tight_facebbx_y1"],
width=frame_data_dict["train/tight_facebbx_x2"]
- frame_data_dict["train/tight_facebbx_x1"],
height=frame_data_dict["train/tight_facebbx_y2"]
- frame_data_dict["train/tight_facebbx_y1"],
)
elif creation_class == EyecropCreation:
creation = EyecropCreation(
left_x1=frame_data_dict["train/lefteyebbx_x"],
left_y1=frame_data_dict["train/lefteyebbx_y"],
left_width=frame_data_dict["train/lefteyebbx_w"],
left_height=frame_data_dict["train/lefteyebbx_h"],
right_x1=frame_data_dict["train/righteyebbx_x"],
right_y1=frame_data_dict["train/righteyebbx_y"],
right_width=frame_data_dict["train/righteyebbx_w"],
right_height=frame_data_dict["train/righteyebbx_h"],
)
elif creation_class == GazeoriginCreation:
creation = GazeoriginCreation(
origin_mm=(frame_data_dict["label/{}cam_x".format(origin_prefix)],
frame_data_dict["label/{}cam_y".format(origin_prefix)],
frame_data_dict["label/{}cam_z".format(origin_prefix)]),
)
elif creation_class == GazeCreation:
creation = GazeCreation(
gaze_cam_mm=(frame_data_dict["label/gaze_cam_x"],
frame_data_dict["label/gaze_cam_y"],
frame_data_dict["label/gaze_cam_z"]),
)
if frame_data_dict["train/valid_theta_phi"]: # Otherwise theta/phi == None.
creation.theta = frame_data_dict["label/theta{}".format(origin_postfix)]
creation.phi = frame_data_dict["label/phi{}".format(origin_postfix)]
elif creation_class == HeadposeCreation:
creation = HeadposeCreation(
headpose_degrees=(frame_data_dict["label/hp_pitch"],
frame_data_dict["label/hp_yaw"],
frame_data_dict["label/hp_roll"]),
)
elif creation_class == LandmarksCreation:
creation = LandmarksCreation.from_flat_list(
frame_data_dict["train/landmarks"],
occluded=frame_data_dict["train/landmarks_occ"]
)
else:
raise NotImplementedError("Unknown creation_class {}".format(creation_class))
return creation
def write_desq(users_dict, strategy_type, landmarks_folder_name, sdklabels_folder_name,
update=False):
"""Add the given users_dict to desq.
If update is True, replaces any existing creations (of the same creators) by the new ones.
"""
with desq.session_scope() as session:
creators = get_creators(session, strategy_type, landmarks_folder_name,
sdklabels_folder_name)
for user in users_dict.keys():
print("Pushing images from user {}".format(user))
for region in users_dict[user].keys():
for frame in users_dict[user][region].keys():
frame_data_dict = users_dict[user][region][frame]
# Remove /home/copilot.cosmos10/ or /home/driveix.cosmos639/ from file path.
uri_path = frame_data_dict["train/image_frame_name"].split('/', 3)[3]
# Get the corresponding image from desq.
image = (
session.query(desq.schema.OriginalImage)
.filter_by(uri_path=uri_path).one()
)
# Add all predictions.
for creation_class, creator, kwargs in creators:
creation = build_creation(creation_class, frame_data_dict, **kwargs)
creation.creator_id = creator.id
creation.image_id = image.id
if update:
creation = session.merge(creation)
session.add(creation)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataio/desq_converter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main file of pipeline script which generates tfrecords and related debug files."""
import argparse
import sys
from nvidia_tao_tf1.cv.common.dataio.tfrecord_manager import TfRecordManager
from nvidia_tao_tf1.cv.common.dataio.utils import is_kpi_set
def main(args=None):
"""Generate tfrecords based on user arguments."""
if args is None:
args = sys.argv[1:]
parser = argparse.ArgumentParser(
description="Generate TFRecords from json post-labels or nvhelnet sdk labels")
parser.add_argument('-folder-suffix', '--ground_truth_experiment_folder_suffix',
type=str, required=True,
help='Suffix of folder which will include generated tfrecords')
parser.add_argument('-unique', '--use_unique', type=lambda x: (str(x).lower() == 'true'),
default=False, help='Take the first frame of every image')
parser.add_argument('-filter', '--use_filtered', type=lambda x: (str(x).lower() == 'true'),
default=False, help='Use filtered frames on nvhelnet sets')
parser.add_argument('-undistort', '--undistort', type=lambda x: (str(x).lower() == 'true'),
default=False, help='Read undistort frames')
parser.add_argument('-landmarks-folder', '--landmarks_folder_name', type=str, default="",
help='Source folder to obtain predicted fpe landmarks from, '
'or "" for default landmarks from JSON')
parser.add_argument('-sdklabels-folder', '--sdklabels_folder_name', type=str, default="",
help='Source folder to obtain nvhelnet sdk labels from, '
'or "" for sdk labels from JSON (with fallback to Nvhelnet_v11.2)')
parser.add_argument('-norm_folder_name', '--norm_folder_name', type=str, required=True,
help='Folder to generate normalized data')
parser.add_argument('-data_root_path', '--data_root_path', type=str, default="",
help='root path of the data')
parser.add_argument('-sets', '--set_ids', type=str,
required=True, help='Setid (ex. s427-gaze-2)')
args = parser.parse_args(args)
set_ids = args.set_ids
is_unique = args.use_unique
if is_unique and is_kpi_set(set_ids):
raise ValueError('Do not use unique for kpi sets.')
is_filtered = args.use_filtered
folder_suffix = args.ground_truth_experiment_folder_suffix
if is_filtered:
folder_suffix = 'filtered_' + folder_suffix
if args.landmarks_folder_name == "":
args.landmarks_folder_name = None
elif not args.landmarks_folder_name.startswith('fpenet_results'):
raise ValueError('Landmarks folder has to start with "fpenet_results".')
if args.sdklabels_folder_name == "":
args.sdklabels_folder_name = None
# The check to see if these folders actually exists depends on the cosmos choice
# and is therefore done in set_strategy.py's _get_landmarks_path(),
# and set_label_sorter.py's _get_sdklabels_folder(), respectively.
tfrecord_manager = TfRecordManager(
set_ids,
folder_suffix,
is_unique,
is_filtered,
args.undistort,
args.landmarks_folder_name,
args.sdklabels_folder_name,
args.norm_folder_name,
args.data_root_path)
tfrecord_manager.generate_tfrecords()
tfrecord_manager.generate_gt()
tfrecord_manager.split_tfrecords()
if __name__ == '__main__':
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataio/tfrecord_gen_main.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Strategy to extract JSON labels for custom data."""
from collections import defaultdict
import json
import os
import cv2
import numpy as np
from nvidia_tao_tf1.cv.common.dataio.bbox_strategy import EyeBboxStrategy, FaceBboxStrategy
from nvidia_tao_tf1.cv.common.dataio.eye_features_generator import (
eye_end_index_diff, eye_index, EyeFeaturesGenerator, num_pts_eye_outline)
from nvidia_tao_tf1.cv.common.dataio.eye_status import EyeStatus
from nvidia_tao_tf1.cv.common.dataio.tfrecordlabels_strategy import TfRecordLabelsStrategy
from nvidia_tao_tf1.cv.common.dataio.theta_phi_angle_utils import (
populate_gaze_info,
populate_head_norm_bbinfo,
populate_head_norm_float,
populate_head_norm_listinfo,
populate_head_norm_path,
populate_theta_phi)
from nvidia_tao_tf1.cv.common.dataio.theta_phi_calc_utils import (
CustomNormalizeData,
ThetaPhiLandmarksGenerator
)
from nvidia_tao_tf1.cv.common.dataio.utils import get_file_ext, get_file_name_noext
class CustomJsonLabelsStrategy(TfRecordLabelsStrategy):
"""Use JSON post labels to generate tfrecords."""
def __init__(
self,
data_root_path,
norm_data_folder_name,
set_strategy,
save_images,
logger
):
"""
Initialize parameters.
Args:
set_id (str): Set for which to generate tfrecords.
use_unique (bool): Only create records for first frame in a series if true.
logger (Logger object): Report failures and number of tfrecords lost for tracking.
set_strategy (SetStrategy object): Strategy for set type (gaze / eoc).
norm_folder_name (str): Folder name to save normalized face, eyes and frame images.
save_images (bool): Whether to generate new folders and images for face crop, eyes, etc.
"""
super(CustomJsonLabelsStrategy, self).__init__(
norm_folder_name=norm_data_folder_name,
set_strategy=set_strategy,
save_images=save_images,
logger=logger,
set_id="",
use_unique=False)
self._data_root_path = data_root_path
self._landmarks_path = self._paths.landmarks_path
self._file_json = defaultdict(lambda: defaultdict())
self._users = defaultdict(lambda: defaultdict())
self._extract_json()
self._generate_data()
def _extract_json(self):
for json_file in os.listdir(self._paths.label_path):
file_path = os.path.join(self._paths.label_path, json_file)
if get_file_ext(file_path) != '.json':
continue
try:
with open(file_path, 'r') as file_json:
read_json = json.load(file_json)
except Exception:
self._logger.add_error(
'Json file improperly formatted for user {}'.format(file_path))
for frame_json in read_json:
if 'annotations' not in frame_json:
continue
frame_name = get_file_name_noext(frame_json['filename'].split('/')[-1])
self._file_json[frame_name] = frame_json['annotations']
def get_data(self):
"""get frame data."""
return self._users
def _generate_data(self):
"""Factory method which returns populated data for tfrecord generation."""
self._extract_frame()
self.extract_landmarks()
self.extract_bbox()
self.extract_eye_status()
self._extract_gaze_vec_info()
self._logger.write_to_log()
# remove empty elements from dict
self._users = {k: v for k, v in self._users.items() if len(v) > 0}
return self._users
def _extract_frame(self):
for img_file in os.listdir(self._paths.data_path):
frame_name = get_file_name_noext(img_file)
frame_path = os.path.join(self._paths.data_path, img_file)
if get_file_ext(img_file) != '.png':
self._logger.add_warning('{} is not an image'.format(frame_path))
continue
if not os.path.exists(frame_path):
self._logger.add_error('Unable to find frame {}'.format(frame_path))
continue
self._users[frame_name]['train/image_frame_name'] = frame_path
# All images in a set have the same frame size
if self._frame_width is None or self._frame_height is None:
image_frame = cv2.imread(frame_path)
self._frame_width = image_frame.shape[1]
self._frame_height = image_frame.shape[0]
self._users[frame_name]['train/image_frame_width'] = self._frame_width
self._users[frame_name]['train/image_frame_height'] = self._frame_height
def _extract_fiducial_points(self, chunk):
x = [-1] * self.Pipeline_Constants.num_fid_points
y = [-1] * self.Pipeline_Constants.num_fid_points
occlusions = [-1] * self.Pipeline_Constants.num_fid_points
num_landmarks = None
for point in (
point for point in chunk if (
'class' not in point and 'version' not in point)):
try:
number = int(''.join(c for c in str(point) if c.isdigit()))
if num_landmarks is None or number > num_landmarks:
num_landmarks = number
if 'x' in str(point).lower() and number <= self.Pipeline_Constants.num_fid_points:
x[number - 1] = str(np.longdouble(chunk[point]))
if 'y' in str(point).lower() and number <= self.Pipeline_Constants.num_fid_points:
y[number - 1] = str(np.longdouble(chunk[point]))
if (
'occ' in str(point).lower() and
number <= self.Pipeline_Constants.num_fid_points and
chunk[point]
):
occlusions[number - 1] = 1
for index in range(num_landmarks):
if occlusions[index] == -1:
occlusions[index] = 0
except Exception as e:
print('Exception occured during parsing')
print(str(e))
print(str(point))
return x, y, occlusions, num_landmarks
def _extract_landmarks_from_json(self):
for frame in self._file_json.keys():
json_frame_dict = self._file_json[frame]
frame_dict = self._users[frame]
for chunk in json_frame_dict:
if 'class' not in chunk:
continue
chunk_class = str(chunk['class']).lower()
if chunk_class == 'fiducialpoints':
x, y, occlusions, num_landmarks = self._extract_fiducial_points(chunk)
landmarks_2D = np.asarray([x, y], dtype=np.longdouble).T
try:
frame_dict['internal/landmarks_2D_distort'] = np.copy(landmarks_2D)
landmarks_2D[:num_landmarks] = np.asarray(
self._set_strategy.get_pts(
landmarks_2D[:num_landmarks],
frame_dict['train/image_frame_width'],
frame_dict['train/image_frame_height'])).reshape(-1, 2)
frame_dict['internal/landmarks_2D'] = landmarks_2D
frame_dict['train/num_keypoints'] = num_landmarks
frame_dict['train/landmarks'] = landmarks_2D.reshape(-1)
frame_dict['train/landmarks_occ'] = np.asarray(occlusions).T
# Note eye_features only dependent on landmarks
frame_dict['train/eye_features'] = EyeFeaturesGenerator(
landmarks_2D,
num_landmarks).get_eye_features()
except Exception:
continue
def extract_landmarks(self):
"""JSON tfrecord generation read landmarks from json when there is no given path."""
if self._landmarks_path is None:
self._extract_landmarks_from_json()
return
self._read_landmarks_from_path()
@staticmethod
def _get_scaled_facebbx(facex1, facey1, facex2, facey2, frame_w, frame_h):
def _get_facebbx_legacy(x1, y1, x2, y2):
h = y2 - y1
y1 = max(0, y1 - 0.2 * h)
return x1, y1, x2, y2
distort_face_coords = [facex1, facey1, facex2, facey2]
legacy_face_coords = _get_facebbx_legacy(*distort_face_coords)
x1, y1, x2, y2 = legacy_face_coords
x1, y1, side_len = FaceBboxStrategy(
frame_w,
frame_h,
x1,
y1,
x2 - x1,
y2 - y1).get_square_bbox()
scaled_facebbx = x1, y1, side_len, side_len
return distort_face_coords, legacy_face_coords, scaled_facebbx
@staticmethod
def _safeints(x):
x = int(x)
x = max(x, 0)
return int(x)
@classmethod
def _extract_from_facebbox(cls, chunk, facex1, facey1, facex2, facey2):
if (
'face_tight_bboxx' not in chunk or
'face_tight_bboxy' not in chunk or
'face_tight_bboxwidth' not in chunk or
'face_tight_bboxheight' not in chunk
):
return facex1, facey1, facex2, facey2
facex1 = cls._safeints(chunk['face_tight_bboxx'])
facey1 = cls._safeints(chunk['face_tight_bboxy'])
facex2 = cls._safeints(chunk['face_tight_bboxwidth']) + facex1
facey2 = cls._safeints(chunk['face_tight_bboxheight']) + facey1
return facex1, facey1, facex2, facey2
@classmethod
def _extract_from_rect(cls, chunk, prevArea, facex1, facey1, facex2, facey2):
height = chunk['height']
width = chunk['width']
if prevArea == 0:
facex1 = cls._safeints(chunk['x'])
facey1 = cls._safeints(chunk['y'])
facex2 = cls._safeints(chunk['width']) + facex1
facey2 = cls._safeints(chunk['height']) + facey1
prevArea = height * width
else:
if (height * width) < prevArea:
facex1 = cls._safeints(chunk['x'])
facey1 = cls._safeints(chunk['y'])
facex2 = cls._safeints(chunk['width']) + facex1
facey2 = cls._safeints(chunk['height']) + facey1
return prevArea, facex1, facey1, facex2, facey2
def _extract_face_bbox(self):
for frame in self._file_json.keys():
frame_dict = self._users[frame]
if (
'train/image_frame_width' not in frame_dict or
'train/image_frame_height' not in frame_dict
):
self._logger.add_error(
'''Could not find frame width and height.
frame {} may not exist'''.format(frame))
continue
prevArea = 0
facex1 = -1
facey1 = -1
facex2 = -1
facey2 = -1
json_frame_dict = self._file_json[frame]
for chunk in json_frame_dict:
if 'class' not in chunk:
continue
chunk_class = str(chunk['class']).lower()
if chunk_class == 'rect':
prevArea, facex1, facey1, facex2, facey2 = self._extract_from_rect(
chunk, prevArea, facex1, facey1, facex2, facey2)
elif chunk_class == 'facebbox':
facex1, facey1, facex2, facey2 = self._extract_from_facebbox(
chunk, facex1, facey1, facex2, facey2)
if -1 in (facex1, facey1, facex2, facey2):
self._logger.add_error(
'Unable to get face bounding box from json. frame {}'.format(frame))
continue # skip img
frame_w = frame_dict['train/image_frame_width']
frame_h = frame_dict['train/image_frame_height']
face_coords, legacy_face_coords, scaled_facebbx = \
self._get_scaled_facebbx(
facex1,
facey1,
facex2,
facey2,
frame_w,
frame_h)
self._populate_frame_dict(
frame_dict,
[
'internal/facebbx_x_distort',
'internal/facebbx_y_distort',
'internal/facebbx_w_distort',
'internal/facebbx_h_distort',
],
scaled_facebbx)
self._populate_frame_dict(
frame_dict,
[
'train/tight_facebbx_x1',
'train/tight_facebbx_y1',
'train/tight_facebbx_x2',
'train/tight_facebbx_y2'
],
list(map(int, face_coords)))
self._populate_frame_dict(
frame_dict,
[
'internal/facebbx_x1',
'internal/facebbx_y1',
'internal/facebbx_x2',
'internal/facebbx_y2'
],
legacy_face_coords)
self._populate_frame_dict(
frame_dict,
[
'train/facebbx_x',
'train/facebbx_y',
'train/facebbx_w',
'train/facebbx_h'
],
scaled_facebbx)
def _extract_eye_bbox(self):
def _format_eye_bbox(x1, y1, x2, y2, frame_dict):
face_x1, face_y1 = frame_dict['internal/facebbx_x1'], frame_dict['internal/facebbx_y1']
# Relative to face bbx
left = np.asarray([x1 - face_x1, y1 - face_y1], dtype=np.longdouble)
right = np.asarray([x2 - face_x1, y2 - face_y1], dtype=np.longdouble)
width = np.power(np.sum(np.square(left - right)), 0.5)
eye_pupil = np.true_divide(np.add(left, right), 2)
upper_left = np.subtract(eye_pupil, width)
lower_right = np.add(eye_pupil, np.true_divide(width, 1.5))
coords = np.asarray([upper_left, lower_right], dtype=np.longdouble)
# Back to frame coord
back_global_coord = np.add(coords, np.asarray([face_x1, face_y1]))
back_global_coord[:, 0] = np.clip(
back_global_coord[:, 0],
face_x1,
frame_dict['internal/facebbx_x2'])
back_global_coord[:, 1] = np.clip(
back_global_coord[:, 1],
face_y1,
frame_dict['internal/facebbx_y2'])
[eye_x1, eye_y1], [eye_x2, eye_y2] = back_global_coord.tolist()
return eye_x1, eye_y1, eye_x2, eye_y2
for frame in self._users.keys():
frame_dict = self._users[frame]
# Landmarks and facebbox should be extracted already
if (
'internal/landmarks_2D' not in frame_dict or
'internal/facebbx_x1' not in frame_dict or
'internal/facebbx_y1' not in frame_dict or
'internal/facebbx_x2' not in frame_dict or
'internal/facebbx_y2' not in frame_dict
):
continue
landmarks_2D = frame_dict['internal/landmarks_2D']
right_eye_begin = eye_index
right_eye_end = right_eye_begin + eye_end_index_diff
r_x1, r_y1 = landmarks_2D[right_eye_begin].tolist()
r_x2, r_y2 = landmarks_2D[right_eye_end].tolist()
left_eye_begin = right_eye_begin + num_pts_eye_outline
left_eye_end = left_eye_begin + eye_end_index_diff
l_x1, l_y1 = landmarks_2D[left_eye_begin].tolist()
l_x2, l_y2 = landmarks_2D[left_eye_end].tolist()
right_eye_bbx = _format_eye_bbox(r_x1, r_y1, r_x2, r_y2, frame_dict)
left_eye_bbx = _format_eye_bbox(l_x1, l_y1, l_x2, l_y2, frame_dict)
try:
num_eyes_detected = 0
right_eye_bbx_processed = self._set_strategy.get_pts(
right_eye_bbx,
frame_dict['train/image_frame_width'],
frame_dict['train/image_frame_height'])
right_eye_bbx = EyeBboxStrategy(
frame_dict['train/image_frame_width'],
frame_dict['train/image_frame_height'],
right_eye_bbx_processed).get_square_bbox()
frame_dict['train/righteyebbx_x'] = right_eye_bbx[0]
frame_dict['train/righteyebbx_y'] = right_eye_bbx[1]
frame_dict['train/righteyebbx_w'] = right_eye_bbx[2]
frame_dict['train/righteyebbx_h'] = right_eye_bbx[3]
if -1 not in right_eye_bbx:
num_eyes_detected += 1
left_eye_bbx_processed = self._set_strategy.get_pts(
left_eye_bbx,
frame_dict['train/image_frame_width'],
frame_dict['train/image_frame_height'])
left_eye_bbx = EyeBboxStrategy(
frame_dict['train/image_frame_width'],
frame_dict['train/image_frame_height'],
left_eye_bbx_processed).get_square_bbox()
frame_dict['train/lefteyebbx_x'] = left_eye_bbx[0]
frame_dict['train/lefteyebbx_y'] = left_eye_bbx[1]
frame_dict['train/lefteyebbx_w'] = left_eye_bbx[2]
frame_dict['train/lefteyebbx_h'] = left_eye_bbx[3]
if -1 not in left_eye_bbx:
num_eyes_detected += 1
frame_dict['train/num_eyes_detected'] = num_eyes_detected
except Exception as ex:
self._logger.add_warning(
'frame {} could not draw eye bounding boxes because {}'.format(
frame,
repr(ex)))
continue
def extract_bbox(self):
"""JSON tfrecord generation extract bounding boxes.
Face bounding box extracted from json.
Eye bounding boxes extracted from read landmarks.
"""
if self._landmarks_path is None:
self._extract_face_bbox()
self._extract_eye_bbox()
def extract_eye_status(self):
"""Fill eye status with value in JSON."""
for frame in self._file_json.keys():
frame_dict = self._file_json[frame]
for chunk in frame_dict:
if 'class' not in chunk:
continue
chunk_class = str(chunk['class']).lower()
frame_dict = self._users[frame]
if chunk_class == 'eyes':
# JSON labels from the labeller's perspective,
# flip for user's perspective
if 'r_status' in chunk:
frame_dict['label/left_eye_status'] = chunk['r_status']
if 'l_status' in chunk:
frame_dict['label/right_eye_status'] = chunk['l_status']
elif chunk_class == 'eyeopen':
frame_dict['label/left_eye_status'] = EyeStatus.open_eye_status
frame_dict['label/right_eye_status'] = EyeStatus.open_eye_status
elif chunk_class == 'eyeclose':
frame_dict['label/left_eye_status'] = EyeStatus.closed_eye_status
frame_dict['label/right_eye_status'] = EyeStatus.closed_eye_status
def _extract_gaze_vec_info(self):
if self._cam_intrinsics is not None:
camera_matrix, _, theta_phi_distortion_coeffs = self._cam_intrinsics
if self._cam_extrinsics is not None:
R, T = self._cam_extrinsics
for frame_name in list(self._users.keys()):
frame_data_dict = self._users[frame_name]
face_bbox = None
try:
x1 = frame_data_dict['internal/facebbx_x_distort']
y1 = frame_data_dict['internal/facebbx_y_distort']
x2 = x1 + frame_data_dict['internal/facebbx_w_distort']
y2 = y1 + frame_data_dict['internal/facebbx_h_distort']
face_bbox = [x1, y1, x2, y2]
except KeyError:
# Using Shagan's landmarks will result in no face bounding boxes
if self._landmarks_path is None:
continue
if 'internal/landmarks_2D_distort' not in frame_data_dict:
continue
landmarks_2D_valid = ThetaPhiLandmarksGenerator(
frame_data_dict['internal/landmarks_2D_distort'],
frame_data_dict['train/landmarks_occ'],
frame_data_dict['train/image_frame_width'],
frame_data_dict['train/image_frame_height'],
face_bbox).get_landmarks_in_frame()
angle_struct_ins = CustomNormalizeData(
logger=self._logger,
camera_matrix=camera_matrix,
distortion_coeffs=theta_phi_distortion_coeffs,
R=R,
T=T,
landmarks_2D=landmarks_2D_valid,
frame_path=self._users[frame_name]['train/image_frame_name'],
norm_folder_name=self._norm_folder_name,
save_images=self._save_images,
data_root_path=self._data_root_path).get_normalized_data()
is_valid_theta_phi = True
frame_data_dict['train/source'] = 'json'
frame_data_dict['train/valid_theta_phi'] = is_valid_theta_phi
frame_data_dict['label/hp_pitch'], \
frame_data_dict['label/hp_yaw'], \
frame_data_dict['label/hp_roll'] = populate_gaze_info(
angle_struct_ins.euler_angles,
is_valid_theta_phi)
frame_data_dict['label/theta'] = populate_theta_phi(
angle_struct_ins.theta_ovr,
is_valid_theta_phi)
frame_data_dict['label/theta_le'] = populate_theta_phi(
angle_struct_ins.theta_le,
is_valid_theta_phi)
frame_data_dict['label/theta_re'] = populate_theta_phi(
angle_struct_ins.theta_re,
is_valid_theta_phi)
frame_data_dict['label/theta_mid'] = populate_theta_phi(
angle_struct_ins.theta_mid,
is_valid_theta_phi)
frame_data_dict['label/head_pose_theta'] = populate_theta_phi(
angle_struct_ins.head_pose_theta,
is_valid_theta_phi)
frame_data_dict['label/phi'] = populate_theta_phi(
angle_struct_ins.phi_ovr,
is_valid_theta_phi)
frame_data_dict['label/phi_le'] = populate_theta_phi(
angle_struct_ins.phi_le,
is_valid_theta_phi)
frame_data_dict['label/phi_re'] = populate_theta_phi(
angle_struct_ins.phi_re,
is_valid_theta_phi)
frame_data_dict['label/phi_mid'] = populate_theta_phi(
angle_struct_ins.phi_mid,
is_valid_theta_phi)
frame_data_dict['label/head_pose_phi'] = populate_theta_phi(
angle_struct_ins.head_pose_phi,
is_valid_theta_phi)
frame_data_dict['label/lpc_cam_x'], \
frame_data_dict['label/lpc_cam_y'], \
frame_data_dict['label/lpc_cam_z'] = populate_gaze_info(
angle_struct_ins.le_pc_cam_mm,
is_valid_theta_phi)
frame_data_dict['label/rpc_cam_x'], \
frame_data_dict['label/rpc_cam_y'], \
frame_data_dict['label/rpc_cam_z'] = populate_gaze_info(
angle_struct_ins.re_pc_cam_mm,
is_valid_theta_phi)
frame_data_dict['label/mid_cam_x'], \
frame_data_dict['label/mid_cam_y'], \
frame_data_dict['label/mid_cam_z'] = populate_gaze_info(
angle_struct_ins.mid_eyes_cam_mm,
is_valid_theta_phi)
frame_data_dict['label/norm_face_hp_theta'] = populate_theta_phi(
angle_struct_ins.norm_face_hp_theta,
is_valid_theta_phi)
frame_data_dict['label/norm_face_hp_phi'] = populate_theta_phi(
angle_struct_ins.norm_face_hp_phi,
is_valid_theta_phi)
frame_data_dict['label/norm_face_gaze_theta'] = populate_theta_phi(
angle_struct_ins.norm_face_gaze_theta,
is_valid_theta_phi)
frame_data_dict['label/norm_face_gaze_phi'] = populate_theta_phi(
angle_struct_ins.norm_face_gaze_phi,
is_valid_theta_phi)
frame_data_dict['label/norm_leye_hp_theta'] = populate_theta_phi(
angle_struct_ins.norm_leye_hp_theta,
is_valid_theta_phi)
frame_data_dict['label/norm_leye_hp_phi'] = populate_theta_phi(
angle_struct_ins.norm_leye_hp_phi,
is_valid_theta_phi)
frame_data_dict['label/norm_leye_gaze_theta'] = populate_theta_phi(
angle_struct_ins.norm_leye_gaze_theta,
is_valid_theta_phi)
frame_data_dict['label/norm_leye_gaze_phi'] = populate_theta_phi(
angle_struct_ins.norm_leye_gaze_phi,
is_valid_theta_phi)
frame_data_dict['label/norm_reye_hp_theta'] = populate_theta_phi(
angle_struct_ins.norm_reye_hp_theta,
is_valid_theta_phi)
frame_data_dict['label/norm_reye_hp_phi'] = populate_theta_phi(
angle_struct_ins.norm_reye_hp_phi,
is_valid_theta_phi)
frame_data_dict['label/norm_reye_gaze_theta'] = populate_theta_phi(
angle_struct_ins.norm_reye_gaze_theta,
is_valid_theta_phi)
frame_data_dict['label/norm_reye_gaze_phi'] = populate_theta_phi(
angle_struct_ins.norm_reye_gaze_phi,
is_valid_theta_phi)
frame_data_dict['train/norm_per_oof'] = populate_head_norm_float(
angle_struct_ins.norm_per_oof, is_valid_theta_phi)
frame_data_dict['train/norm_facebb_x'] = populate_head_norm_bbinfo(
angle_struct_ins.norm_face_bb, 0, is_valid_theta_phi)
frame_data_dict['train/norm_facebb_y'] = populate_head_norm_bbinfo(
angle_struct_ins.norm_face_bb, 1, is_valid_theta_phi)
frame_data_dict['train/norm_facebb_w'] = populate_head_norm_bbinfo(
angle_struct_ins.norm_face_bb, 2, is_valid_theta_phi)
frame_data_dict['train/norm_facebb_h'] = populate_head_norm_bbinfo(
angle_struct_ins.norm_face_bb, 3, is_valid_theta_phi)
frame_data_dict['train/norm_leyebb_x'] = populate_head_norm_bbinfo(
angle_struct_ins.norm_leye_bb, 0, is_valid_theta_phi)
frame_data_dict['train/norm_leyebb_y'] = populate_head_norm_bbinfo(
angle_struct_ins.norm_leye_bb, 1, is_valid_theta_phi)
frame_data_dict['train/norm_leyebb_w'] = populate_head_norm_bbinfo(
angle_struct_ins.norm_leye_bb, 2, is_valid_theta_phi)
frame_data_dict['train/norm_leyebb_h'] = populate_head_norm_bbinfo(
angle_struct_ins.norm_leye_bb, 3, is_valid_theta_phi)
frame_data_dict['train/norm_reyebb_x'] = populate_head_norm_bbinfo(
angle_struct_ins.norm_reye_bb, 0, is_valid_theta_phi)
frame_data_dict['train/norm_reyebb_y'] = populate_head_norm_bbinfo(
angle_struct_ins.norm_reye_bb, 1, is_valid_theta_phi)
frame_data_dict['train/norm_reyebb_w'] = populate_head_norm_bbinfo(
angle_struct_ins.norm_reye_bb, 2, is_valid_theta_phi)
frame_data_dict['train/norm_reyebb_h'] = populate_head_norm_bbinfo(
angle_struct_ins.norm_reye_bb, 3, is_valid_theta_phi)
frame_data_dict['train/norm_landmarks'] = populate_head_norm_listinfo(
angle_struct_ins.norm_landmarks, '2D', is_valid_theta_phi)
frame_data_dict['train/norm_frame_path'] = populate_head_norm_path(
angle_struct_ins.norm_frame_path, is_valid_theta_phi)
frame_data_dict['train/landmarks_3D'] = populate_head_norm_listinfo(
angle_struct_ins.landmarks_3D, '3D', is_valid_theta_phi)
frame_data_dict['train/norm_face_cnv_mat'] = populate_head_norm_listinfo(
angle_struct_ins.norm_face_cnv_mat, 'cnv_mat', is_valid_theta_phi)
frame_data_dict['train/norm_leye_cnv_mat'] = populate_head_norm_listinfo(
angle_struct_ins.norm_leye_cnv_mat, 'cnv_mat', is_valid_theta_phi)
frame_data_dict['train/norm_reye_cnv_mat'] = populate_head_norm_listinfo(
angle_struct_ins.norm_reye_cnv_mat, 'cnv_mat', is_valid_theta_phi)
frame_data_dict['label/face_cam_x'], frame_data_dict['label/face_cam_y'], \
frame_data_dict['label/face_cam_z'] = populate_gaze_info(
angle_struct_ins.face_cam_mm,
is_valid_theta_phi)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataio/custom_jsonlabels_strategy.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TLT detection data sequence."""
import os
import cv2
import numpy as np
from PIL import Image
from pycocotools.coco import COCO
from nvidia_tao_tf1.cv.common.dataio.base_data_sequence import BaseDataSequence
class CocoDataSequence(BaseDataSequence):
"""Abstract class for TLT detection network.
To use dataloader:
1. call __init__(configs)
2. call add_source(image_folder, label_folder) to add sources
3. Use data generator in keras model.fit_generator()
Functions below must be implemented in derived classes:
1. _preprocessing
"""
def __init__(self,
dataset_config,
augmentation_config=None,
batch_size=10,
is_training=True,
encode_fn=None,
enable_mask=False,
root_path=None):
"""Class initialization."""
self.coco = []
# load data sources
if is_training:
data_sources = dataset_config.data_sources
set_name = 'train2017'
else:
data_sources = dataset_config.validation_data_sources
set_name = 'val2017'
for data_source in data_sources:
self._add_source(
os.path.join(root_path, data_source.image_directory_path)
if root_path else data_source.image_directory_path,
os.path.join(root_path, data_source.label_directory_path)
if root_path else data_source.label_directory_path,
set_name)
self.load_classes()
# use numpy array to accelerate
self.image_ids = self.coco.getImgIds()
if is_training:
np.random.shuffle(self.image_ids)
self.is_training = is_training
self.enable_mask = enable_mask
self.batch_size = batch_size
self.augmentation_config = augmentation_config
self.output_height = self.augmentation_config.output_height
self.output_width = self.augmentation_config.output_width
self.output_img_size = (self.output_width, self.output_height)
self.encode_fn = encode_fn
self.n_samples = len(self.image_ids)
print("Number of images: {}".format(self.n_samples))
def set_encoder(self, encode_fn):
'''Set label encoder.'''
self.encode_fn = encode_fn
def _add_source(self, image_folder, label_folder, set_name='train2017'):
"""Add COCO sources."""
self.raw_image_dir = image_folder
self.coco = COCO(os.path.join(label_folder, 'instances_' + set_name + '.json'))
def __len__(self):
"""Get length of Sequence."""
return int(np.ceil(self.n_samples / self.batch_size))
def read_image_rgb(self, path):
"""Read an image in BGR format.
Args
path: Path to the image.
"""
image = np.ascontiguousarray(Image.open(path).convert('RGB'))
return image.astype(np.float32)
def load_classes(self):
"""create class mapping."""
# load class names (name -> label)
categories = self.coco.loadCats(self.coco.getCatIds())
categories.sort(key=lambda x: x['id'])
self.classes = {}
self.coco_labels = {}
self.coco_labels_inverse = {}
for c in categories:
self.coco_labels[len(self.classes)] = c['id']
self.coco_labels_inverse[c['id']] = len(self.classes) + 1
self.classes[c['name']] = len(self.classes) + 1
def coco_label_to_label(self, coco_label):
"""coco label to label mapping."""
return self.coco_labels_inverse[coco_label]
def _load_gt_image(self, image_index):
"""Load image."""
image_info = self.coco.loadImgs(self.image_ids[image_index])[0]
path = os.path.join(self.raw_image_dir, image_info['file_name'])
return self.read_image_rgb(path)
def _load_gt_label(self, image_index):
"""Load COCO labels.
Returns:
[class_idx, is_difficult, x_min, y_min, x_max, y_max]
where is_diffcult is hardcoded to 0 in the current COCO GT labels.
"""
# get image info
image_info = self.coco.loadImgs(self.image_ids[image_index])[0]
# get ground truth annotations
annotations_ids = self.coco.getAnnIds(imgIds=self.image_ids[image_index], iscrowd=False)
annotations = {
'labels': np.empty((0,)),
'bboxes': np.empty((0, 4)),
'masks': [],
}
# some images appear to miss annotations (like image with id 257034)
if len(annotations_ids) == 0:
# return empty annotations
return np.empty((0, 6))
# parse annotations
coco_annotations = self.coco.loadAnns(annotations_ids)
for _, a in enumerate(coco_annotations):
if 'segmentation' not in a:
raise ValueError('Expected \'segmentation\' key in annotation, got: {}'.format(a))
# some annotations have basically no width / height, skip them
if a['bbox'][2] < 1 or a['bbox'][3] < 1:
continue
annotations['labels'] = np.concatenate(
[annotations['labels'], [self.coco_label_to_label(a['category_id'])]], axis=0)
annotations['bboxes'] = np.concatenate([annotations['bboxes'], [[
a['bbox'][0],
a['bbox'][1],
a['bbox'][0] + a['bbox'][2],
a['bbox'][1] + a['bbox'][3],
]]], axis=0)
if self.enable_mask:
mask = np.zeros((image_info['height'], image_info['width'], 1), dtype=np.uint8)
for seg in a['segmentation']:
points = np.array(seg).reshape((len(seg) // 2, 2)).astype(int)
# draw mask
cv2.fillPoly(mask, [points.astype(int)], (1,))
annotations['masks'].append(mask.astype(float))
labels = np.expand_dims(annotations['labels'], axis=-1)
return np.concatenate((labels, np.full_like(labels, 0), annotations['bboxes']), axis=1)
def _get_single_item(self, idx):
"""Load and process single image and its label."""
image = self._load_gt_image(idx)
label = self._load_gt_label(idx)
image, label = self._preprocessing(image, label, self.enable_mask)
return image, label
def __getitem__(self, batch_idx):
"""Load a full batch."""
images = []
labels = []
for idx in range(batch_idx * self.batch_size,
min(self.n_samples, (batch_idx + 1) * self.batch_size)):
image, label = self._get_single_item(idx)
images.append(image)
labels.append(label)
return self._batch_post_processing(images, labels)
def on_epoch_end(self):
"""shuffle data at end."""
if self.is_training:
np.random.shuffle(self.image_ids)
def _batch_post_processing(self, images, labels):
"""Post processing for a batch."""
return images, labels
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataio/coco_data_sequence.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate tfrecords data for customer input data."""
import six
import tensorflow as tf
from nvidia_tao_tf1.cv.common.dataio.data_converter import DataConverter, TfRecordType
class CustomDataConverter(DataConverter):
"""Converts a dataset to TFRecords."""
feature_to_type = {
'train/image_frame_name' : TfRecordType.BYTES,
'train/image_frame_width' : TfRecordType.INT64,
'train/image_frame_height' : TfRecordType.INT64,
'train/facebbx_x' : TfRecordType.INT64,
'train/facebbx_y' : TfRecordType.INT64,
'train/facebbx_w' : TfRecordType.INT64,
'train/facebbx_h' : TfRecordType.INT64,
'train/lefteyebbx_x' : TfRecordType.INT64,
'train/lefteyebbx_y' : TfRecordType.INT64,
'train/lefteyebbx_w' : TfRecordType.INT64,
'train/lefteyebbx_h' : TfRecordType.INT64,
'train/righteyebbx_x' : TfRecordType.INT64,
'train/righteyebbx_y' : TfRecordType.INT64,
'train/righteyebbx_w' : TfRecordType.INT64,
'train/righteyebbx_h' : TfRecordType.INT64,
'train/landmarks' : TfRecordType.DTYPE_FLOAT,
'train/landmarks_occ' : TfRecordType.DTYPE_INT64,
'label/left_eye_status' : TfRecordType.BYTES,
'label/right_eye_status' : TfRecordType.BYTES,
'train/num_keypoints' : TfRecordType.INT64,
'train/tight_facebbx_x1' : TfRecordType.INT64,
'train/tight_facebbx_y1' : TfRecordType.INT64,
'train/tight_facebbx_x2' : TfRecordType.INT64,
'train/tight_facebbx_y2' : TfRecordType.INT64,
'label/hp_pitch': TfRecordType.FLOAT, # Degrees
'label/hp_yaw': TfRecordType.FLOAT, # Degrees
'label/hp_roll': TfRecordType.FLOAT, # Degrees
'label/theta': TfRecordType.FLOAT, # Radians
'label/phi': TfRecordType.FLOAT, # Radians
'label/mid_cam_x': TfRecordType.FLOAT, # Mid eye center - x
'label/mid_cam_y': TfRecordType.FLOAT, # Mid eye center - y
'label/mid_cam_z': TfRecordType.FLOAT, # Mid eye center - z
'label/lpc_cam_x': TfRecordType.FLOAT, # Left eye center - x
'label/lpc_cam_y': TfRecordType.FLOAT, # Left eye center - y
'label/lpc_cam_z': TfRecordType.FLOAT, # Left eye center - z
'label/rpc_cam_x': TfRecordType.FLOAT, # Right eye center - x
'label/rpc_cam_y': TfRecordType.FLOAT, # Right eye center - y
'label/rpc_cam_z': TfRecordType.FLOAT, # Right eye center - z
'train/valid_theta_phi' : TfRecordType.INT64, # 1 if valid, 0 otherwise
'label/theta_le' : TfRecordType.FLOAT, # In radians
'label/phi_le' : TfRecordType.FLOAT, # In radians
'label/theta_re' : TfRecordType.FLOAT, # In radians
'label/phi_re' : TfRecordType.FLOAT, # In radians
'label/theta_mid' : TfRecordType.FLOAT, # In radians
'label/phi_mid' : TfRecordType.FLOAT, # In radians
'label/head_pose_theta' : TfRecordType.FLOAT, # In radians
'label/head_pose_phi' : TfRecordType.FLOAT, # In radians
'train/eye_features' : TfRecordType.DTYPE_FLOAT,
'train/source' : TfRecordType.BYTES,
'train/num_eyes_detected': TfRecordType.INT64,
'train/norm_frame_path': TfRecordType.BYTES,
'label/norm_face_gaze_theta': TfRecordType.FLOAT, # In radians
'label/norm_face_gaze_phi': TfRecordType.FLOAT, # In radians
'label/norm_face_hp_theta': TfRecordType.FLOAT, # In radians
'label/norm_face_hp_phi': TfRecordType.FLOAT, # In radians
'label/norm_leye_gaze_theta': TfRecordType.FLOAT, # In radians
'label/norm_leye_gaze_phi': TfRecordType.FLOAT, # In radians
'label/norm_leye_hp_theta': TfRecordType.FLOAT, # In radians
'label/norm_leye_hp_phi': TfRecordType.FLOAT, # In radians
'label/norm_reye_gaze_theta': TfRecordType.FLOAT, # In radians
'label/norm_reye_gaze_phi': TfRecordType.FLOAT, # In radians
'label/norm_reye_hp_theta': TfRecordType.FLOAT, # In radians
'label/norm_reye_hp_phi': TfRecordType.FLOAT, # In radians
'train/norm_facebb_x': TfRecordType.INT64,
'train/norm_facebb_y': TfRecordType.INT64,
'train/norm_facebb_w': TfRecordType.INT64,
'train/norm_facebb_h': TfRecordType.INT64,
'train/norm_leyebb_x': TfRecordType.INT64,
'train/norm_leyebb_y': TfRecordType.INT64,
'train/norm_leyebb_w': TfRecordType.INT64,
'train/norm_leyebb_h': TfRecordType.INT64,
'train/norm_reyebb_x': TfRecordType.INT64,
'train/norm_reyebb_y': TfRecordType.INT64,
'train/norm_reyebb_w': TfRecordType.INT64,
'train/norm_reyebb_h': TfRecordType.INT64,
'train/norm_landmarks': TfRecordType.DTYPE_FLOAT,
'train/norm_per_oof': TfRecordType.FLOAT,
'train/landmarks_3D': TfRecordType.DTYPE_FLOAT
}
lm_pred_feature_to_type = {k : v for k, v in six.iteritems(feature_to_type)
if 'lefteyebbx' not in k and
'righteyebbx' not in k and
'facebbx' not in k and
'num_eyes_detected' not in k}
# Convert from enum type to read tfrecord type
enum_to_read_dict = {
TfRecordType.BYTES : tf.FixedLenFeature([], dtype=tf.string),
TfRecordType.FLOAT : tf.FixedLenFeature([], dtype=tf.float32),
TfRecordType.INT64 : tf.FixedLenFeature([], dtype=tf.int64),
TfRecordType.DTYPE_FLOAT : tf.VarLenFeature(tf.float32),
TfRecordType.DTYPE_INT64 : tf.VarLenFeature(tf.int64)
}
def __init__(self, use_lm_pred):
"""Initialize file paths and features.
Args:
tfrecord_files_path (path): Path to dump tfrecord files.
use_lm_pred (bool): True if using predicted landmarks.
"""
self._feature_to_type_dict = self.feature_to_type
if use_lm_pred:
self._feature_to_type_dict = self.lm_pred_feature_to_type
def generate_frame_tfrecords(self, frame_dict):
"""Write collected data dict into tfrecords.
Args:
frame_dict (dict): dictionary for frame information
"""
example_array = []
for frame in frame_dict.keys():
frame_features = {}
frame_data_dict = frame_dict[frame]
for feature in self._feature_to_type_dict.keys():
self.write_feature(feature, frame_data_dict[feature], frame_features)
example = tf.train.Example(features=tf.train.Features(feature=frame_features))
example_array.append(example)
return example_array
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataio/custom_data_converter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate manually engineered eye features."""
import abc
import numpy as np
num_pts_eye_outline = 6
num_pts_pupil_outline = 4
eye_index = 36
eye_end_index_diff = 3
pupil_index = 68
class EyeFeaturesStrategy(object):
"""Abstract class with common methods for eye features generation."""
__metaclass__ = abc.ABCMeta
@staticmethod
def normalize_gaze_coord(coord_row, origin, distance):
"""Normalize coordinate to reduce the range."""
return (coord_row - origin) / distance
@staticmethod
def _get_flattened_list(np_arr):
assert isinstance(np_arr, np.ndarray)
return np_arr.reshape(-1).tolist()
def __init__(self, landmarks_2D):
"""Initialize landmarks."""
self._landmarks_2D = landmarks_2D
@abc.abstractmethod
def get_eye_features(self):
"""Return generated eye features."""
pass
class PupilStrategy(EyeFeaturesStrategy):
"""Eye features generation with pupils."""
@staticmethod
def _get_pupil_center(pupil_coord):
pupil_remaining = pupil_coord
separate_by_zero_indices = np.where(pupil_coord == 0)
pupil_zero_indices = separate_by_zero_indices[0]
if 0 in pupil_zero_indices or 2 in pupil_zero_indices:
pupil_remaining = np.delete(pupil_coord, [0, 2], axis=0)
elif 1 in pupil_zero_indices or 3 in pupil_zero_indices:
pupil_remaining = np.delete(pupil_coord, [1, 3], axis=0)
return np.mean(pupil_remaining, axis=0)
@staticmethod
def _get_eye_pupil_ratio(eye_coord, pupil_center):
max_eye_x = np.amax(eye_coord[:, 0])
max_eye_y = np.amax(eye_coord[:, 1])
min_eye_x = np.amin(eye_coord[:, 0])
min_eye_y = np.amin(eye_coord[:, 1])
max_min_x_to_pupil_ratio = np.abs(
max_eye_x - pupil_center[0]) / (np.abs(pupil_center[0] - min_eye_x) + 1)
max_min_y_to_pupil_ratio = np.abs(
max_eye_y - pupil_center[1]) / (np.abs(pupil_center[1] - min_eye_y) + 1)
return max_min_x_to_pupil_ratio, max_min_y_to_pupil_ratio
def _extract_side_eye_features(self, eye_coord, pupil_coord):
pupil_center = self._get_pupil_center(pupil_coord)
dist_eye_pupil = pupil_center - eye_coord
max_min_x_to_pupil_ratio, max_min_y_to_pupil_ratio = \
self._get_eye_pupil_ratio(eye_coord, pupil_center)
eye_origin = eye_coord[0]
eye_dist = np.linalg.norm(eye_origin - eye_coord[3])
if eye_dist == 0:
eye_dist += np.finfo(float).eps
norm_pupil_center = np.apply_along_axis(
self.normalize_gaze_coord,
0,
pupil_center,
origin=eye_origin,
distance=eye_dist)
norm_eye_coord = np.apply_along_axis(
self.normalize_gaze_coord,
1,
eye_coord,
origin=eye_origin,
distance=eye_dist)
eye_features = self._get_flattened_list(norm_eye_coord)
eye_features.extend(self._get_flattened_list(norm_pupil_center))
eye_features.extend(self._get_flattened_list(dist_eye_pupil))
eye_features.append(max_min_x_to_pupil_ratio)
eye_features.append(max_min_y_to_pupil_ratio)
return eye_features
def get_eye_features(self, landmarks_2D):
"""Generate eye features with pupils."""
# 6 coordinates per eye outline -> 12 coordinates total
eye_pts_index = eye_index
n_points_per_eye = num_pts_eye_outline
right_eye_pts_index = eye_pts_index + n_points_per_eye
left_eye_pts = landmarks_2D[eye_pts_index:right_eye_pts_index]
right_eye_pts = \
landmarks_2D[right_eye_pts_index:right_eye_pts_index + n_points_per_eye]
# Flip to user's perspective
left_eye_pts, right_eye_pts = right_eye_pts, left_eye_pts
# 4 coordinates per pupil outline -> 8 coordinates total
pupil_pts_index = pupil_index
n_points_per_pupil = num_pts_pupil_outline
right_pupil_pts_index = pupil_pts_index + n_points_per_pupil
left_pupil_pts = landmarks_2D[pupil_pts_index:right_pupil_pts_index]
right_pupil_pts = \
landmarks_2D[right_pupil_pts_index:right_pupil_pts_index + n_points_per_pupil]
# Flip to user's perspective
left_pupil_pts, right_pupil_pts = right_pupil_pts, left_pupil_pts
eye_features = []
eye_features.extend(self._extract_side_eye_features(left_eye_pts, left_pupil_pts))
eye_features.extend(self._extract_side_eye_features(right_eye_pts, right_pupil_pts))
return np.asarray(eye_features, dtype=np.longdouble)
class NoPupilStrategy(EyeFeaturesStrategy):
"""Eye features generation without pupils."""
def _extract_side_eye_features(self, eye_coord):
eye_origin = eye_coord[0]
eye_dist = np.linalg.norm(eye_origin - eye_coord[3])
return np.apply_along_axis(
self.normalize_gaze_coord,
1,
eye_coord,
origin=eye_origin,
distance=eye_dist)
def get_eye_features(self, landmarks_2D):
"""Generate eye features without pupils."""
# 6 coordinates per eye outline -> 12 coordinates total
eye_pts_index = eye_index
n_points_per_eye = num_pts_eye_outline
right_eye_pts_index = eye_pts_index + n_points_per_eye
left_eye_pts = self._landmarks_2D[eye_pts_index:right_eye_pts_index]
right_eye_pts = \
self._landmarks_2D[right_eye_pts_index:right_eye_pts_index + n_points_per_eye]
# Flip to user's perspective
left_eye_pts, right_eye_pts = right_eye_pts, left_eye_pts
eye_features = np.empty([56, ], dtype=np.longdouble)
eye_features.fill(-1)
n_flattened_points_per_eye = n_points_per_eye * 2
eye_features[:n_flattened_points_per_eye] = \
self._extract_side_eye_features(left_eye_pts).reshape(-1)
eye_features[28 : 28 + n_flattened_points_per_eye] = \
self._extract_side_eye_features(right_eye_pts).reshape(-1)
return eye_features
class EyeFeaturesGenerator(object):
"""Return generated eye features of an eye features strategy."""
def __init__(self, landmarks_2D, n_landmarks):
"""Initialize landmarks and strategy."""
self._landmarks_2D = landmarks_2D
end_pupil_index = pupil_index + \
2 * num_pts_eye_outline
if n_landmarks < end_pupil_index:
self._strategy = NoPupilStrategy(landmarks_2D)
else:
self._strategy = PupilStrategy(landmarks_2D)
def get_eye_features(self):
"""Return eye features generated by strategy."""
return self._strategy.get_eye_features(self._landmarks_2D)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataio/eye_features_generator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Strategy for tfrecord generation using SDK labels."""
import os
from nvidia_tao_tf1.cv.common.dataio.bbox_strategy import EyeBboxStrategy, FaceBboxStrategy
from nvidia_tao_tf1.cv.common.dataio.eye_status import EyeStatus
from nvidia_tao_tf1.cv.common.dataio.tfrecordlabels_strategy import TfRecordLabelsStrategy
from nvidia_tao_tf1.cv.common.dataio.utils import get_file_name_noext
class SdkLabelsStrategy(TfRecordLabelsStrategy):
"""Use SDK results from pre-labelled data to generate tfrecords."""
def __init__(
self,
set_id,
use_unique,
logger,
set_strategy,
norm_folder_name,
save_images
):
"""Initialize parameters.
Args:
set_id (str): Set for which to generate tfrecords.
use_unique (bool): Only create records for first frame in a series if true.
logger (Logger object): Report failures and number of tfrecords lost for tracking.
set_strategy (SetStrategy object): Strategy for set type (gaze / eoc).
norm_folder_name (str): Folder name to save normalized face, eyes and frame images.
save_images (bool): Whether to generate new folders and images for face crop, eyes, etc.
"""
super(SdkLabelsStrategy, self).__init__(
set_id,
use_unique,
logger,
set_strategy,
norm_folder_name,
save_images)
self._eye_status_path = os.path.join(self._paths.info_source_path, 'results')
self._bounding_box_paths = [
os.path.join(self._paths.info_source_path, 'two_eyes'),
os.path.join(self._paths.info_source_path, 'one_eye'),
os.path.join(self._paths.info_source_path, 'no_eye')
]
self._landmarks_path = os.path.join(self._paths.info_source_path, 'facelandmark')
# Use given landmarks path if possible
if self._paths.landmarks_path:
self._landmarks_path = self._paths.landmarks_path
def extract_landmarks(self):
"""SDK Nvhelnet tfrecord generation read landmarks from files in landmarks path."""
self._read_landmarks_from_path()
def _process_eye_bbox(self, frame_dict, line_split, frame_w, frame_h):
num_eyes_detected = 0
left_eye_bbx = list(map(int, line_split[5:9]))
left_eye_bbx_processed = self._set_strategy.get_pts(
left_eye_bbx, frame_w, frame_h)
left_eye_bbx = EyeBboxStrategy(
frame_w,
frame_h,
left_eye_bbx_processed).get_square_bbox()
frame_dict['train/lefteyebbx_x'] = left_eye_bbx[0]
frame_dict['train/lefteyebbx_y'] = left_eye_bbx[1]
frame_dict['train/lefteyebbx_w'] = left_eye_bbx[2]
frame_dict['train/lefteyebbx_h'] = left_eye_bbx[3]
if -1 not in left_eye_bbx:
num_eyes_detected += 1
right_eye_bbx = list(map(int, line_split[9:13]))
right_eye_bbx_processed = self._set_strategy.get_pts(
right_eye_bbx, frame_w, frame_h)
right_eye_bbx = EyeBboxStrategy(
frame_dict['train/image_frame_width'],
frame_dict['train/image_frame_height'],
right_eye_bbx_processed).get_square_bbox()
frame_dict['train/righteyebbx_x'] = right_eye_bbx[0]
frame_dict['train/righteyebbx_y'] = right_eye_bbx[1]
frame_dict['train/righteyebbx_w'] = right_eye_bbx[2]
frame_dict['train/righteyebbx_h'] = right_eye_bbx[3]
if -1 not in right_eye_bbx:
num_eyes_detected += 1
frame_dict['train/num_eyes_detected'] = num_eyes_detected
@staticmethod
def _get_scaled_facebbx(x1, y1, x2, y2, frame_w, frame_h):
distort_face_coords = [x1, y1, x2, y2]
x1, y1, side_len = FaceBboxStrategy(
frame_w,
frame_h,
x1,
y1,
x2 - x1,
y2 - y1).get_square_bbox()
scaled_facebbx = x1, y1, side_len, side_len
return distort_face_coords, scaled_facebbx
def extract_bbox(self):
"""SDK Nvhelnet tfrecord generation read bounding boxes from paths."""
# No bounding boxes when using predicted landmarks
if self._set_strategy._landmarks_folder_name is not None:
return
for bounding_box_path in self._bounding_box_paths:
for user_file in os.listdir(bounding_box_path):
user_name = get_file_name_noext(user_file)
user_path = os.path.join(bounding_box_path, user_file)
with open(user_path, 'r') as user_bbox:
for line in user_bbox:
line_split = line.rstrip().split(' ')
path_split = line_split[0].split('/')
frame_name = get_file_name_noext(path_split[-1])
if len(self._paths.regions) == 1 and self._paths.regions[0] == '':
# On bench data collection has no regions.
region_name = ''
else:
region_name = path_split[-2]
frame_dict = self._users[user_name][region_name][frame_name]
frame_dict['train/eye_detect_found'] = os.path.basename(
os.path.normpath(bounding_box_path))
# SDK: person's perspective for left and right eyes
try:
frame_w = frame_dict['train/image_frame_width']
frame_h = frame_dict['train/image_frame_height']
self._process_eye_bbox(frame_dict, line_split, frame_w, frame_h)
unprocesssed_face_bbx = list(map(int, line_split[1:5]))
x1 = unprocesssed_face_bbx[0]
y1 = unprocesssed_face_bbx[1]
w = unprocesssed_face_bbx[2]
h = unprocesssed_face_bbx[3]
x2 = x1 + w
y2 = y1 + h
face_coords, scaled_facebbx = self._get_scaled_facebbx(
x1, y1, x2, y2, frame_w, frame_h)
self._populate_frame_dict(
frame_dict,
[
'internal/facebbx_x_distort',
'internal/facebbx_y_distort',
'internal/facebbx_w_distort',
'internal/facebbx_h_distort',
],
scaled_facebbx)
if self._set_strategy.use_undistort():
x1, y1, x2, y2 = self._set_strategy.get_pts(
[x1, y1, x2, y2],
frame_w,
frame_h)
w = x2 - x1
h = y2 - y1
face_coords, scaled_facebbx = self._get_scaled_facebbx(
x1, y1, x2, y2, frame_w, frame_h)
self._populate_frame_dict(
frame_dict,
[
'train/tight_facebbx_x1',
'train/tight_facebbx_y1',
'train/tight_facebbx_x2',
'train/tight_facebbx_y2'
],
list(map(int, face_coords)))
self._populate_frame_dict(
frame_dict,
[
'train/facebbx_x',
'train/facebbx_y',
'train/facebbx_w',
'train/facebbx_h'
],
scaled_facebbx)
except Exception:
self._logger.add_warning(
'Cannot draw valid eye bounding box {}'.format(user_path))
continue
def extract_eye_status(self):
"""SDK Nvhelnet tfrecord generation read eye status as open / closed."""
def _map_eye_status(status_val):
if status_val == 0:
return EyeStatus.closed_eye_status
return EyeStatus.open_eye_status
for user_file in os.listdir(self._eye_status_path):
file_name_noext = get_file_name_noext(user_file)
# Nvhelnet wink files store eye open / close status
if not file_name_noext.endswith('_wink'):
continue
user_name = file_name_noext[:-5]
user_path = os.path.join(self._eye_status_path, user_file)
with open(user_path, 'r') as user_eye_status:
for line in user_eye_status:
line_split = line.rstrip().split(' ')
path_split = line_split[0].split('/')
frame_name = get_file_name_noext(path_split[-1])
if len(self._paths.regions) == 1 and self._paths.regions[0] == '':
# On bench data collection has no regions.
region_name = ''
else:
region_name = path_split[-2]
self._users[user_name][region_name][frame_name][
'label/left_eye_status'] = _map_eye_status(line_split[1])
self._users[user_name][region_name][frame_name][
'label/right_eye_status'] = _map_eye_status(line_split[3])
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataio/sdklabels_strategy.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encapsulate implementation for eye-open-close (EOC) sets."""
import os
from nvidia_tao_tf1.cv.common.dataio.set_strategy import SetStrategy
from nvidia_tao_tf1.cv.common.dataio.utils import mkdir
class EocStrategy(SetStrategy):
"""Class encapsulates implementation specific to EOC sets."""
eoc_parent_set_path = '/home/projects1_copilot/RealTimePipeline/set'
eoc_error_path = '/home/projects1_copilot/RealTimePipeline/errors'
def __init__(
self,
set_id,
experiment_folder_suffix,
tfrecord_folder_name,
gt_folder_name,
landmarks_folder_name,
set_label_sorter
):
"""Initialize parameters.
Args:
set_id (str): Set for which to generate tfrecords.
experiment_folder_suffix (str): Suffix of experiment folder containing tfrecords.
tfrecord_folder_name (str): Folder name of folder containing tfrecords.
gt_folder_name (str): Folder name of folder containing ground truth txt files.
landmarks_folder_name (str): Folder name of predicted landmarks, or None to disable.
set_label_sorter (SetLabelSorter object): Object to sort set as DataFactory / Nvhelnet.
"""
super(EocStrategy, self).__init__(
set_id,
experiment_folder_suffix,
tfrecord_folder_name,
gt_folder_name,
landmarks_folder_name,
set_label_sorter)
self._set_source_paths()
self._set_camera_parameters()
def _set_camera_parameters(self):
self._cam_intrinsics, self._cam_extrinsics, self._screen_params = None, None, None
def _get_json_path(self, set_id_path):
return self._check_paths(set_id_path, [
'json_datafactory_v3',
'json_datafactory_v2',
'json_datafactory'
])
def _set_source_paths(self):
eoc_cosmos_set_path = os.path.join(
self.eoc_parent_set_path,
self._set_id)
strategy_type, info_source_path, self.experiment_folder_name = \
self._set_label_sorter.get_info_source_path(
self._get_json_path,
eoc_cosmos_set_path,
eoc_cosmos_set_path)
lm_path = self._get_landmarks_path(eoc_cosmos_set_path)
if lm_path is not None:
self.experiment_folder_name = self.fpe_expr_folder + self._experiment_folder_suffix
experiment_folder_path = os.path.join(eoc_cosmos_set_path, self.experiment_folder_name)
mkdir(experiment_folder_path)
self._strategy_type, self._paths = strategy_type, self.PathStruct(
error_path=os.path.join(self.eoc_error_path),
data_path=os.path.join(eoc_cosmos_set_path, 'Data'),
config_path=None,
tfrecord_path=os.path.join(
eoc_cosmos_set_path,
self.experiment_folder_name,
self._tfrecord_folder_name),
gt_path=os.path.join(
eoc_cosmos_set_path,
self.experiment_folder_name,
self._gt_folder_name),
info_source_path=info_source_path,
filtered_path=None,
landmarks_path=lm_path,
regions=[''])
def extract_gaze_info(self, frame_data_dict, frame_name, region_name):
"""No gaze information available."""
frame_data_dict['label/gaze_screen_x'] = -1
frame_data_dict['label/gaze_screen_y'] = -1
frame_data_dict['label/gaze_cam_x'] = -1
frame_data_dict['label/gaze_cam_y'] = -1
frame_data_dict['label/gaze_cam_z'] = -1
def get_pts(self, pts, frame_width, frame_height):
"""EOC sets have no undistort data, so it should always return original pts."""
return pts
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataio/eoc_strategy.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Strategy for tfrecord generation using post JSON labels."""
from collections import defaultdict
import json
import os
import numpy as np
from nvidia_tao_tf1.cv.common.dataio.bbox_strategy import EyeBboxStrategy, FaceBboxStrategy
from nvidia_tao_tf1.cv.common.dataio.eye_features_generator import (
eye_end_index_diff, eye_index, EyeFeaturesGenerator, num_pts_eye_outline)
from nvidia_tao_tf1.cv.common.dataio.eye_status import EyeStatus
from nvidia_tao_tf1.cv.common.dataio.tfrecordlabels_strategy import TfRecordLabelsStrategy
from nvidia_tao_tf1.cv.common.dataio.utils import get_file_ext, get_file_name_noext
class JsonLabelsStrategy(TfRecordLabelsStrategy):
"""Use JSON post labels to generate tfrecords."""
def __init__(
self,
set_id,
use_unique,
logger,
set_strategy,
norm_folder_name,
save_images
):
"""
Initialize parameters.
Args:
set_id (str): Set for which to generate tfrecords.
use_unique (bool): Only create records for first frame in a series if true.
logger (Logger object): Report failures and number of tfrecords lost for tracking.
set_strategy (SetStrategy object): Strategy for set type (gaze / eoc).
norm_folder_name (str): Folder name to save normalized face, eyes and frame images.
save_images (bool): Whether to generate new folders and images for face crop, eyes, etc.
"""
super(JsonLabelsStrategy, self).__init__(
set_id,
use_unique,
logger,
set_strategy,
norm_folder_name,
save_images)
self._landmarks_path = self._paths.landmarks_path
self._users_json = defaultdict(lambda: defaultdict(
lambda: defaultdict(lambda: defaultdict())))
self._extract_json()
def _extract_json(self):
for user_json_file in os.listdir(self._paths.info_source_path):
user_path = os.path.join(self._paths.info_source_path, user_json_file)
if get_file_ext(user_json_file) != '.json':
continue
# On bench data format for user_json_file: <set-id>_<user>.json
user_name = str(get_file_name_noext(user_json_file)[len(self._set_id) + 1:])
# In-car data format for user_json_file: <set-id>_<user>_<region>.json
if len(self._paths.regions) > 1:
user_name = user_name.split('_')[0]
try:
with open(user_path, 'r') as user_json:
user_read_json = json.load(user_json)
except Exception:
self._logger.add_error(
'Json file improperly formatted for user {}'.format(user_name))
for frame_json in user_read_json:
if 'annotations' not in frame_json:
continue
frame_name = get_file_name_noext(frame_json['filename'].split('/')[-1])
if len(self._paths.regions) == 1 and self._paths.regions[0] == '':
# On bench data collection has no regions.
region_name = ''
else:
region_name = frame_json['filename'].split('/')[-2]
self._users_json[user_name][region_name][frame_name] = frame_json['annotations']
def _extract_fiducial_points(self, chunk):
x = [-1] * self.Pipeline_Constants.num_fid_points
y = [-1] * self.Pipeline_Constants.num_fid_points
occlusions = [-1] * self.Pipeline_Constants.num_fid_points
num_landmarks = None
for point in (
point for point in chunk if (
'class' not in point and 'version' not in point)):
try:
number = int(''.join(c for c in str(point) if c.isdigit()))
if num_landmarks is None or number > num_landmarks:
num_landmarks = number
if 'x' in str(point).lower() and number <= self.Pipeline_Constants.num_fid_points:
x[number - 1] = str(np.longdouble(chunk[point]))
if 'y' in str(point).lower() and number <= self.Pipeline_Constants.num_fid_points:
y[number - 1] = str(np.longdouble(chunk[point]))
if (
'occ' in str(point).lower() and
number <= self.Pipeline_Constants.num_fid_points and
chunk[point]
):
occlusions[number - 1] = 1
for index in range(num_landmarks):
if occlusions[index] == -1:
occlusions[index] = 0
except Exception as e:
print('Exception occured during parsing')
print(str(e))
print(str(point))
return x, y, occlusions, num_landmarks
def _extract_landmarks_from_json(self):
for user in self._users_json.keys():
for region in self._users_json[user].keys():
for frame in self._users_json[user][region].keys():
json_frame_dict = self._users_json[user][region][frame]
frame_dict = self._users[user][region][frame]
for chunk in json_frame_dict:
if 'class' not in chunk:
continue
chunk_class = str(chunk['class']).lower()
if chunk_class == 'fiducialpoints':
x, y, occlusions, num_landmarks = self._extract_fiducial_points(chunk)
landmarks_2D = np.asarray([x, y], dtype=np.longdouble).T
try:
frame_dict['internal/landmarks_2D_distort'] = np.copy(landmarks_2D)
landmarks_2D[:num_landmarks] = np.asarray(
self._set_strategy.get_pts(
landmarks_2D[:num_landmarks],
frame_dict['train/image_frame_width'],
frame_dict['train/image_frame_height'])).reshape(-1, 2)
frame_dict['internal/landmarks_2D'] = landmarks_2D
frame_dict['train/num_keypoints'] = num_landmarks
frame_dict['train/landmarks'] = landmarks_2D.reshape(-1)
frame_dict['train/landmarks_occ'] = np.asarray(occlusions).T
# Note eye_features only dependent on landmarks
frame_dict['train/eye_features'] = EyeFeaturesGenerator(
landmarks_2D,
num_landmarks).get_eye_features()
except Exception:
continue
def extract_landmarks(self):
"""JSON tfrecord generation read landmarks from json when there is no given path."""
if self._landmarks_path is None:
self._extract_landmarks_from_json()
return
self._read_landmarks_from_path()
@staticmethod
def _get_scaled_facebbx(facex1, facey1, facex2, facey2, frame_w, frame_h):
def _get_facebbx_legacy(x1, y1, x2, y2):
h = y2 - y1
y1 = max(0, y1 - 0.2 * h)
return x1, y1, x2, y2
distort_face_coords = [facex1, facey1, facex2, facey2]
legacy_face_coords = _get_facebbx_legacy(*distort_face_coords)
x1, y1, x2, y2 = legacy_face_coords
x1, y1, side_len = FaceBboxStrategy(
frame_w,
frame_h,
x1,
y1,
x2 - x1,
y2 - y1).get_square_bbox()
scaled_facebbx = x1, y1, side_len, side_len
return distort_face_coords, legacy_face_coords, scaled_facebbx
@staticmethod
def _safeints(x):
x = int(x)
x = max(x, 0)
return int(x)
@classmethod
def _extract_from_facebbox(cls, chunk, facex1, facey1, facex2, facey2):
if (
'face_tight_bboxx' not in chunk or
'face_tight_bboxy' not in chunk or
'face_tight_bboxwidth' not in chunk or
'face_tight_bboxheight' not in chunk
):
return facex1, facey1, facex2, facey2
facex1 = cls._safeints(chunk['face_tight_bboxx'])
facey1 = cls._safeints(chunk['face_tight_bboxy'])
facex2 = cls._safeints(chunk['face_tight_bboxwidth']) + facex1
facey2 = cls._safeints(chunk['face_tight_bboxheight']) + facey1
return facex1, facey1, facex2, facey2
@classmethod
def _extract_from_rect(cls, chunk, prevArea, facex1, facey1, facex2, facey2):
height = chunk['height']
width = chunk['width']
if prevArea == 0:
facex1 = cls._safeints(chunk['x'])
facey1 = cls._safeints(chunk['y'])
facex2 = cls._safeints(chunk['width']) + facex1
facey2 = cls._safeints(chunk['height']) + facey1
prevArea = height * width
else:
if (height * width) < prevArea:
facex1 = cls._safeints(chunk['x'])
facey1 = cls._safeints(chunk['y'])
facex2 = cls._safeints(chunk['width']) + facex1
facey2 = cls._safeints(chunk['height']) + facey1
return prevArea, facex1, facey1, facex2, facey2
def _extract_face_bbox(self):
for user in self._users_json.keys():
for region in self._users_json[user].keys():
for frame in self._users_json[user][region].keys():
frame_dict = self._users[user][region][frame]
if (
'train/image_frame_width' not in frame_dict or
'train/image_frame_height' not in frame_dict
):
self._logger.add_error(
'''Could not find frame width and height.
User {} frame {} may not exist'''.format(
user, frame))
continue
prevArea = 0
facex1 = -1
facey1 = -1
facex2 = -1
facey2 = -1
json_frame_dict = self._users_json[user][region][frame]
for chunk in json_frame_dict:
if 'class' not in chunk:
continue
chunk_class = str(chunk['class']).lower()
if chunk_class == 'rect':
prevArea, facex1, facey1, facex2, facey2 = self._extract_from_rect(
chunk, prevArea, facex1, facey1, facex2, facey2)
elif chunk_class == 'facebbox':
facex1, facey1, facex2, facey2 = self._extract_from_facebbox(
chunk, facex1, facey1, facex2, facey2)
if -1 in (facex1, facey1, facex2, facey2):
self._logger.add_error(
'Unable to get face bounding box from json. User {}, frame {}'.format(
user, frame))
continue # skip img
frame_w = frame_dict['train/image_frame_width']
frame_h = frame_dict['train/image_frame_height']
face_coords, legacy_face_coords, scaled_facebbx = \
self._get_scaled_facebbx(
facex1,
facey1,
facex2,
facey2,
frame_w,
frame_h)
self._populate_frame_dict(
frame_dict,
[
'internal/facebbx_x_distort',
'internal/facebbx_y_distort',
'internal/facebbx_w_distort',
'internal/facebbx_h_distort',
],
scaled_facebbx)
if self._set_strategy.use_undistort():
# Recalculate face bounding box so it is undistorted
facex1, facey1, facex2, facey2 = self._set_strategy.get_pts(
face_coords,
frame_w,
frame_h)
face_coords, legacy_face_coords, scaled_facebbx = \
self._get_scaled_facebbx(
facex1,
facey1,
facex2,
facey2,
frame_w,
frame_h)
self._populate_frame_dict(
frame_dict,
[
'train/tight_facebbx_x1',
'train/tight_facebbx_y1',
'train/tight_facebbx_x2',
'train/tight_facebbx_y2'
],
list(map(int, face_coords)))
self._populate_frame_dict(
frame_dict,
[
'internal/facebbx_x1',
'internal/facebbx_y1',
'internal/facebbx_x2',
'internal/facebbx_y2'
],
legacy_face_coords)
self._populate_frame_dict(
frame_dict,
[
'train/facebbx_x',
'train/facebbx_y',
'train/facebbx_w',
'train/facebbx_h'
],
scaled_facebbx)
def _extract_eye_bbox(self):
def _format_eye_bbox(x1, y1, x2, y2, frame_dict):
face_x1, face_y1 = frame_dict['internal/facebbx_x1'], frame_dict['internal/facebbx_y1']
# Relative to face bbx
left = np.asarray([x1 - face_x1, y1 - face_y1], dtype=np.longdouble)
right = np.asarray([x2 - face_x1, y2 - face_y1], dtype=np.longdouble)
width = np.power(np.sum(np.square(left - right)), 0.5)
eye_pupil = np.true_divide(np.add(left, right), 2)
upper_left = np.subtract(eye_pupil, width)
lower_right = np.add(eye_pupil, np.true_divide(width, 1.5))
coords = np.asarray([upper_left, lower_right], dtype=np.longdouble)
# Back to frame coord
back_global_coord = np.add(coords, np.asarray([face_x1, face_y1]))
back_global_coord[:, 0] = np.clip(
back_global_coord[:, 0],
face_x1,
frame_dict['internal/facebbx_x2'])
back_global_coord[:, 1] = np.clip(
back_global_coord[:, 1],
face_y1,
frame_dict['internal/facebbx_y2'])
[eye_x1, eye_y1], [eye_x2, eye_y2] = back_global_coord.tolist()
return eye_x1, eye_y1, eye_x2, eye_y2
for user in self._users.keys():
for region in self._users[user].keys():
for frame in self._users[user][region].keys():
frame_dict = self._users[user][region][frame]
# Landmarks and facebbox should be extracted already
if (
'internal/landmarks_2D' not in frame_dict or
'internal/facebbx_x1' not in frame_dict or
'internal/facebbx_y1' not in frame_dict or
'internal/facebbx_x2' not in frame_dict or
'internal/facebbx_y2' not in frame_dict
):
continue
landmarks_2D = frame_dict['internal/landmarks_2D']
right_eye_begin = eye_index
right_eye_end = right_eye_begin + eye_end_index_diff
r_x1, r_y1 = landmarks_2D[right_eye_begin].tolist()
r_x2, r_y2 = landmarks_2D[right_eye_end].tolist()
left_eye_begin = right_eye_begin + num_pts_eye_outline
left_eye_end = left_eye_begin + eye_end_index_diff
l_x1, l_y1 = landmarks_2D[left_eye_begin].tolist()
l_x2, l_y2 = landmarks_2D[left_eye_end].tolist()
right_eye_bbx = _format_eye_bbox(r_x1, r_y1, r_x2, r_y2, frame_dict)
left_eye_bbx = _format_eye_bbox(l_x1, l_y1, l_x2, l_y2, frame_dict)
try:
num_eyes_detected = 0
right_eye_bbx_processed = self._set_strategy.get_pts(
right_eye_bbx,
frame_dict['train/image_frame_width'],
frame_dict['train/image_frame_height'])
right_eye_bbx = EyeBboxStrategy(
frame_dict['train/image_frame_width'],
frame_dict['train/image_frame_height'],
right_eye_bbx_processed).get_square_bbox()
frame_dict['train/righteyebbx_x'] = right_eye_bbx[0]
frame_dict['train/righteyebbx_y'] = right_eye_bbx[1]
frame_dict['train/righteyebbx_w'] = right_eye_bbx[2]
frame_dict['train/righteyebbx_h'] = right_eye_bbx[3]
if -1 not in right_eye_bbx:
num_eyes_detected += 1
left_eye_bbx_processed = self._set_strategy.get_pts(
left_eye_bbx,
frame_dict['train/image_frame_width'],
frame_dict['train/image_frame_height'])
left_eye_bbx = EyeBboxStrategy(
frame_dict['train/image_frame_width'],
frame_dict['train/image_frame_height'],
left_eye_bbx_processed).get_square_bbox()
frame_dict['train/lefteyebbx_x'] = left_eye_bbx[0]
frame_dict['train/lefteyebbx_y'] = left_eye_bbx[1]
frame_dict['train/lefteyebbx_w'] = left_eye_bbx[2]
frame_dict['train/lefteyebbx_h'] = left_eye_bbx[3]
if -1 not in left_eye_bbx:
num_eyes_detected += 1
frame_dict['train/num_eyes_detected'] = num_eyes_detected
except Exception as ex:
self._logger.add_warning(
'User {} frame {} could not draw eye bounding boxes because {}'.format(
user,
frame,
repr(ex)))
continue
def extract_bbox(self):
"""JSON tfrecord generation extract bounding boxes.
Face bounding box extracted from json.
Eye bounding boxes extracted from read landmarks.
"""
if self._landmarks_path is None:
self._extract_face_bbox()
self._extract_eye_bbox()
def extract_eye_status(self):
"""Fill eye status with value in JSON."""
for user in self._users_json.keys():
for region in self._users_json[user].keys():
for frame in self._users_json[user][region].keys():
frame_dict = self._users_json[user][region][frame]
for chunk in frame_dict:
if 'class' not in chunk:
continue
chunk_class = str(chunk['class']).lower()
frame_dict = self._users[user][region][frame]
if chunk_class == 'eyes':
# JSON labels from the labeller's perspective,
# flip for user's perspective
if 'r_status' in chunk:
frame_dict['label/left_eye_status'] = chunk['r_status']
if 'l_status' in chunk:
frame_dict['label/right_eye_status'] = chunk['l_status']
elif chunk_class == 'eyeopen':
frame_dict['label/left_eye_status'] = EyeStatus.open_eye_status
frame_dict['label/right_eye_status'] = EyeStatus.open_eye_status
elif chunk_class == 'eyeclose':
frame_dict['label/left_eye_status'] = EyeStatus.closed_eye_status
frame_dict['label/right_eye_status'] = EyeStatus.closed_eye_status
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataio/jsonlabels_strategy.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test common functions."""
import datetime
import os
import shutil
import tempfile
import unittest
from nvidia_tao_tf1.cv.common.dataio.utils import PipelineReporter
class PipelineReporterTest(unittest.TestCase):
"""Test PipelineReporter."""
script_name = 'test'
set_id = 'test-set'
def setUp(self):
self.error_path = tempfile.mkdtemp()
self.reporter = PipelineReporter(
self.error_path,
self.script_name,
self.set_id)
self.reporter.add_error('Error')
self.reporter.add_info('Info')
self.reporter.add_warning('Warning')
def tearDown(self):
shutil.rmtree(self.error_path)
def test_write_to_log(self):
run_folder_name = self.script_name + '_' + datetime.datetime.now().strftime('%Y-%m-%d')
log_path_today = os.path.join(self.error_path, run_folder_name)
error_path = os.path.join(log_path_today, 'Error')
warning_path = os.path.join(log_path_today, 'Warning')
err_lines = ''
warning_lines = ''
self.reporter.write_to_log()
err_lines = open(os.path.join(error_path, self.set_id + '.log'), 'r').read()
warning_lines = open(os.path.join(warning_path, self.set_id + '.log'), 'r').read()
self.assertIn('Error\n', err_lines)
self.assertIn('Info\n', err_lines)
self.assertNotIn('Warning\n', err_lines)
self.assertIn('Warning\n', warning_lines)
self.assertNotIn('Error\n', warning_lines)
self.assertNotIn('Info\n', warning_lines)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataio/tests/test_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test eoc strategy."""
import unittest
import mock
from nvidia_tao_tf1.cv.common.dataio.eoc_strategy import EocStrategy
from nvidia_tao_tf1.cv.common.dataio.set_label_sorter import SetLabelSorter
class EocStrategyTest(unittest.TestCase):
"""Test EocStrategy."""
set_id = 'test-eoc-set'
experiment_folder_suffix = 'test_suffix'
tfrecord_folder_name = 'TfRecords'
gt_folder_name = 'GT'
@mock.patch('os.makedirs')
def setUp(self, mock_makedirs):
self.label_sorter = SetLabelSorter(
self.experiment_folder_suffix,
False)
self.label_sorter.get_info_source_path = mock.MagicMock(
return_value=[
'json',
'test-input-path',
'Ground_Truth_DataFactory_' + self.experiment_folder_suffix])
self.eoc_strategy = EocStrategy(
self.set_id,
self.experiment_folder_suffix,
self.tfrecord_folder_name,
self.gt_folder_name,
None,
self.label_sorter)
mock_makedirs.assert_called_with(
'/home/projects1_copilot/RealTimePipeline/set/' + self.set_id +
'/Ground_Truth_DataFactory_' + self.experiment_folder_suffix)
def test_get_camera_parameters(self):
cam_params = self.eoc_strategy.get_camera_parameters()
self.assertEqual(
(None, None, None),
cam_params)
def test_extract_gaze_info(self):
frame_data_dict = {}
frame_name = ''
expected_frame_dict = {
'label/gaze_screen_x': -1,
'label/gaze_screen_y': -1,
'label/gaze_cam_x': -1,
'label/gaze_cam_y': -1,
'label/gaze_cam_z': -1,
}
self.eoc_strategy.extract_gaze_info(frame_data_dict, frame_name, '')
self.assertDictEqual(
expected_frame_dict,
frame_data_dict)
@mock.patch('os.makedirs')
@mock.patch('os.listdir', return_value=['v1'])
@mock.patch('os.path.isdir', return_value=True)
def test_using_lm_pred(self, mock_isdir, mock_listdir, mock_makedirs):
EocStrategy(
self.set_id,
self.experiment_folder_suffix,
self.tfrecord_folder_name,
self.gt_folder_name,
'fpenet_results/v2_SS80_v9',
self.label_sorter)
mock_makedirs.assert_called_with('/home/projects1_copilot/RealTimePipeline/set/' +
self.set_id +
'/Ground_Truth_Fpegaze_' + self.experiment_folder_suffix)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataio/tests/test_eoc_strategy.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test BoundingBoxStrategy concrete classes."""
import unittest
from nvidia_tao_tf1.cv.common.dataio.bbox_strategy import EyeBboxStrategy, FaceBboxStrategy
class FaceBboxStrategyTest(unittest.TestCase):
"""Test FaceBboxStrategy."""
def test_get_square_bbox(self):
face_bbox_strategy = FaceBboxStrategy(
1280, 800,
400, 200, 100, 150)
left, upper, side_len = face_bbox_strategy.get_square_bbox()
self.assertEqual(left, 352)
self.assertEqual(upper, 177)
self.assertEqual(side_len, 195)
def test_get_square_bbox_sanity_clamp(self):
face_bbox_strategy = FaceBboxStrategy(
1280, 800,
1180, 0, 100, 150)
left, upper, side_len = face_bbox_strategy.get_square_bbox()
self.assertEqual(left, 1085)
self.assertEqual(upper, 0)
self.assertEqual(side_len, 194)
class EyeBboxStrategyTest(unittest.TestCase):
"""Test EyeBboxStrategy."""
def test_get_square_bbox(self):
eye_bbox_strategy = EyeBboxStrategy(
1280, 800,
[400, 200, 410, 210])
left, upper, w, h = eye_bbox_strategy.get_square_bbox()
self.assertEqual(left, 399)
self.assertEqual(upper, 199)
self.assertEqual(w, 10)
self.assertEqual(h, 10)
def test_get_square_bbox_sanity_clamp(self):
eye_bbox_strategy = EyeBboxStrategy(
1280, 800,
[1270, 0, 1280, 10])
left, upper, w, h = eye_bbox_strategy.get_square_bbox()
self.assertEqual(left, 1269)
self.assertEqual(upper, 0)
self.assertEqual(w, 9)
self.assertEqual(h, 10)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataio/tests/test_bbox_strategy.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test set label sorter."""
import os
import shutil
import tempfile
import unittest
from nvidia_tao_tf1.cv.common.dataio.set_label_sorter import SetLabelSorter
class SetLabelSorterTest(unittest.TestCase):
"""Test SetLabelSorter."""
experiment_folder_suffix = 'test'
def setUp(self):
self.json_folder = tempfile.mkdtemp()
self.nvhelnet_parent_folder = tempfile.mkdtemp()
self.nvhelnet_folder = os.path.join(self.nvhelnet_parent_folder, 'Nvhelnet_v11.2')
self.nvhelnet_forced = os.path.join(self.nvhelnet_parent_folder, 'Nvhelnet_forced')
os.makedirs(self.nvhelnet_folder)
os.makedirs(self.nvhelnet_forced)
def tearDown(self):
shutil.rmtree(self.json_folder)
shutil.rmtree(self.nvhelnet_parent_folder)
def test_get_info_source_path_json(self):
strategy_type, info_source_path, experiment_folder_name = \
SetLabelSorter(
self.experiment_folder_suffix,
None).get_info_source_path(
lambda _: self.json_folder,
None,
self.nvhelnet_parent_folder)
self.assertEqual(strategy_type, 'json')
self.assertEqual(info_source_path, self.json_folder)
self.assertEqual(
experiment_folder_name,
'Ground_Truth_DataFactory_' + self.experiment_folder_suffix)
def test_get_info_source_path_sdk(self):
strategy_type, info_source_path, experiment_folder_name = \
SetLabelSorter(
self.experiment_folder_suffix,
None).get_info_source_path(
lambda _: None,
None,
self.nvhelnet_parent_folder)
self.assertEqual(strategy_type, 'sdk')
self.assertEqual(info_source_path, self.nvhelnet_folder)
self.assertEqual(
experiment_folder_name,
'Ground_Truth_Nvhelnet_' + self.experiment_folder_suffix)
def test_get_info_source_path_force_sdk(self):
strategy_type, info_source_path, experiment_folder_name = \
SetLabelSorter(
self.experiment_folder_suffix,
'Nvhelnet_forced').get_info_source_path(
lambda _: self.json_folder,
None,
self.nvhelnet_parent_folder)
self.assertEqual(strategy_type, 'sdk')
self.assertEqual(info_source_path, self.nvhelnet_forced)
self.assertEqual(
experiment_folder_name,
'Ground_Truth_Nvhelnet_' + self.experiment_folder_suffix)
def test_get_info_source_path_sdk_nonexisting(self):
with self.assertRaises(IOError):
SetLabelSorter(
self.experiment_folder_suffix,
'Nvhelnet_nonexisting').get_info_source_path(
lambda _: self.json_folder,
None,
self.nvhelnet_parent_folder)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataio/tests/test_set_label_sorter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test eye features generator."""
import unittest
import numpy as np
import pytest
from nvidia_tao_tf1.cv.common.dataio.eye_features_generator import EyeFeaturesGenerator
class EyeFeaturesGeneratorTest(unittest.TestCase):
"""Test EyeFeaturesGenerator."""
# 104 (x, y) coordinates
# Landmarks from set/germany-1-gaze-1/Data/VdChfpnvDBu7MByIYFUCwM video1_15_750_vc00_07.png
landmarks_2D = np.asarray([
240.4823455810547,
551.151123046875,
249.34622192382812,
591.03857421875,
259.3180847167969,
628.1560668945312,
273.7218933105469,
667.489501953125,
293.04754638671875,
704.656982421875,
318.59527587890625,
732.306640625,
346.848876953125,
752.8043212890625,
383.3990783691406,
778.6015625,
421.562255859375,
781.989013671875,
459.86334228515625,
766.6541748046875,
487.89031982421875,
733.9591064453125,
514.6818237304688,
702.6900024414062,
532.0092163085938,
668.2235717773438,
543.92333984375,
634.5968017578125,
550.9884033203125,
603.5624389648438,
553.140869140625,
579.9955444335938,
555.6583862304688,
547.2760009765625,
272.3310241699219,
488.6089172363281,
299.0483703613281,
477.6076354980469,
323.4083251953125,
471.3211975097656,
356.01922607421875,
475.64312744140625,
377.2359619140625,
489.0018005371094,
445.6009826660156,
482.3224792480469,
472.71124267578125,
470.535400390625,
495.1066589355469,
467.3921813964844,
514.7517700195312,
474.8573303222656,
530.0750122070312,
489.78759765625,
413.2937927246094,
548.6087036132812,
416.3590087890625,
571.7369995117188,
419.1455383300781,
594.0293579101562,
422.4565124511719,
615.2787475585938,
376.52923583984375,
629.8828735351562,
398.4658508300781,
637.195068359375,
420.204833984375,
639.1713256835938,
441.1532897949219,
636.404541015625,
457.7870178222656,
625.8230590820312,
315.46246337890625,
529.6859130859375,
325.73907470703125,
524.547607421875,
370.7980651855469,
526.52392578125,
374.3553466796875,
535.4171142578125,
366.64788818359375,
536.010009765625,
325.34381103515625,
533.8361206054688,
454.6097412109375,
531.4393920898438,
463.1083984375,
519.6356811523438,
496.1587219238281,
516.6847534179688,
502.768798828125,
522.1144409179688,
497.8112487792969,
526.7178955078125,
464.0527038574219,
529.07861328125,
370.99566650390625,
690.7520141601562,
389.770263671875,
676.1276245117188,
403.9993896484375,
670.7916870117188,
422.3347473144531,
672.0076293945312,
434.2623291015625,
667.4786376953125,
448.6631164550781,
670.0011596679688,
466.8448181152344,
681.6611328125,
454.66357421875,
697.7672119140625,
440.85565185546875,
706.019287109375,
426.598876953125,
706.5772094726562,
410.03192138671875,
709.9840698242188,
391.35125732421875,
707.155029296875,
389.770263671875,
687.9852294921875,
406.3709411621094,
685.4160766601562,
423.9597473144531,
685.6137084960938,
436.4102478027344,
683.0445556640625,
451.2322692871094,
683.0445556640625,
437.1334228515625,
687.640380859375,
424.5090026855469,
689.529052734375,
407.2795715332031,
689.8966064453125,
348.108154296875,
525.959228515625,
351.7036437988281,
522.293212890625,
355.5106506347656,
525.3952026367188,
352.1381530761719,
528.5972290039062,
481.3041076660156,
521.7062377929688,
483.9189758300781,
518.29296875,
488.6904296875,
521.447998046875,
484.9493408203125,
524.9349975585938,
245.04425048828125,
623.6600341796875,
227.25071716308594,
515.8814697265625,
548.7801513671875,
612.8729248046875,
556.8007202148438,
507.0892639160156,
350.721923828125,
515.5374755859375,
352.60125732421875,
536.3585815429688,
351.1640625,
519.3336181640625,
356.20318603515625,
520.873046875,
359.4877624511719,
524.7459106445312,
357.0066223144531,
530.8558959960938,
352.21978759765625,
532.0278930664062,
347.2783508300781,
530.6721801757812,
344.4820861816406,
526.685302734375,
347.42608642578125,
521.79736328125,
351.86749267578125,
525.564453125,
483.0555725097656,
511.1482849121094,
486.0395202636719,
531.6798706054688,
483.69964599609375,
514.4667358398438,
488.69964599609375,
516.5667114257812,
492.0996398925781,
521.3167114257812,
490.4996643066406,
525.0667114257812,
485.5496520996094,
528.3167114257812,
480.2496643066406,
525.8167114257812,
477.0496520996094,
522.11669921875,
479.5996398925781,
516.8167114257812,
484.44964599609375,
521.4166870117188,
344.7006530761719,
523.4890747070312,
476.96893310546875,
516.9593505859375
]).reshape((-1, 2))
def test_get_eye_features_with_pupils(self):
# Eye features from Ground_Truth_DataFactory_v2/GT_combined/eye_feature_combined.txt
expected_eye_features = [
0.00000000000000000,
0.00000000000000000,
0.18428853505018536,
-0.24571804673358047,
0.86001316356753166,
-0.30714755841697561,
0.98287218693432188,
-0.18428853505018536,
0.88048966746199675,
-0.10238251947232520,
0.20476503894465040,
-0.04095300778893008,
0.61429511683395122,
-0.20476503894465040,
30.00000000000000000,
-10.00000000000000000,
21.00000000000000000,
2.00000000000000000,
-12.00000000000000000,
5.00000000000000000,
-18.00000000000000000,
-1.00000000000000000,
-13.00000000000000000,
-5.00000000000000000,
20.00000000000000000,
-8.00000000000000000,
0.58064516129032262,
1.66666666666666674,
0.00000000000000000,
0.00000000000000000,
0.16862183682891940,
-0.08431091841445970,
0.92742010255905660,
-0.05058655104867581,
0.99486883729062436,
0.10117310209735163,
0.85997136782748884,
0.11803528578024357,
0.16862183682891940,
0.06744873473156776,
0.61546970442555582,
-0.06744873473156776,
36.50000000000000000,
-4.00000000000000000,
26.50000000000000000,
1.00000000000000000,
-18.50000000000000000,
-1.00000000000000000,
-22.50000000000000000,
-10.00000000000000000,
-14.50000000000000000,
-11.00000000000000000,
26.50000000000000000,
-8.00000000000000000,
0.59999999999999998,
5.50000000000000000]
pupil_eye_features = EyeFeaturesGenerator(self.landmarks_2D, 104).get_eye_features()
self.assertEqual(len(pupil_eye_features), len(expected_eye_features))
for i in range(len(pupil_eye_features)):
assert pupil_eye_features[i] == pytest.approx(expected_eye_features[i], 1.0)
def test_get_eye_features_without_pupils(self):
expected_eye_features = [
0.00000000000000000,
0.00000000000000000,
0.18428853505018536,
-0.24571804673358047,
0.86001316356753166,
-0.30714755841697561,
0.98287218693432188,
-0.18428853505018536,
0.88048966746199675,
-0.10238251947232520,
0.20476503894465040,
-0.04095300778893008,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
0.00000000000000000,
0.00000000000000000,
0.16862183682891940,
-0.08431091841445970,
0.92742010255905660,
-0.05058655104867581,
0.99486883729062436,
0.10117310209735163,
0.85997136782748884,
0.11803528578024357,
0.16862183682891940,
0.06744873473156776,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1]
no_pupil_eye_features = EyeFeaturesGenerator(self.landmarks_2D, 68).get_eye_features()
self.assertEqual(len(no_pupil_eye_features), len(expected_eye_features))
for i in range(len(no_pupil_eye_features)):
assert no_pupil_eye_features[i] == pytest.approx(expected_eye_features[i], 1.0)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataio/tests/test_eye_features_generator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test common functions."""
import unittest
import numpy as np
from nvidia_tao_tf1.cv.common.dataio.theta_phi_angle_utils import (
normalizeEye,
normalizeFace,
normalizeFullFrame
)
from nvidia_tao_tf1.cv.common.dataio.theta_phi_lm_utils import (
AnthropometicPtsGenerator,
projectObject2Camera)
class ThetaPhiAngleUtilsTest(unittest.TestCase):
"""Test PipelineReporter."""
def setUp(self):
# Create black blank image
self.frame = np.zeros((800, 1280, 1), np.uint8)
# Params from data collected from germany-1-gaze-1 config
self.distortion_coeffs = np.asarray([
-0.38252,
0.195521,
0.000719038,
0.00196389,
-0.0346336])
self.camera_matrix = np.asarray([
[1329.98, 0, 646.475],
[0, 1329.48, 390.789],
[0, 0, 1]])
self.face_cam_mm = np.asarray([-98.20092574611859924,
13.586749303863609923,
591.0001357050918288])
self.leye_cam_mm = np.asarray([-71.130428199788372425,
-12.089548827437531463,
575.3897685617227298])
self.gt_cam_mm = np.asarray([-1099.5803948625,
31.06042467049997,
- 44.927703281250004])
self.rot_mat = np.asarray([
[0.75789226, -0.01591712, 0.65218553],
[-0.09241607, 0.98700119, 0.13148351],
[-0.64580074, -0.15992275, 0.74656957]])
self.tvec = np.asarray([
[-71.4840822],
[45.93883556],
[617.72451667]])
self.ec_pxs = np.asarray([
[482.06115723, 362.85522461],
[385.83392334, 377.89053345]])
self.landmarks = np.ones((68, 1, 2), dtype=np.float32)
self.norm_face_theta = 0.08371164488099574275
self.norm_face_phi = 1.16868653667981276
self.norm_leye_theta = 0.11742628651787750257
self.norm_leye_phi = 1.1475201861509552997
self._anthro_pts = AnthropometicPtsGenerator()
def test_normalize_fullframe(self):
face_gaze_vec = self.gt_cam_mm - self.face_cam_mm
face_gv_mag = np.sqrt(face_gaze_vec[0] ** 2 + face_gaze_vec[1] ** 2 + face_gaze_vec[2] ** 2)
face_gaze_vec = face_gaze_vec / face_gv_mag
_, norm_face_gaze_theta, norm_face_gaze_phi, _, _, _, _, _, _, _ = \
normalizeFullFrame(self.frame, self.face_cam_mm, self.ec_pxs, self.rot_mat,
face_gaze_vec, self.landmarks, self.camera_matrix,
self.distortion_coeffs, 'modified', 2.0)
self.assertAlmostEqual(self.norm_face_theta, norm_face_gaze_theta[0], 6)
self.assertAlmostEqual(self.norm_face_phi, norm_face_gaze_phi[0], 6)
def test_normalize_face(self):
face_gaze_vec = self.gt_cam_mm - self.face_cam_mm
face_gv_mag = np.sqrt(face_gaze_vec[0] ** 2 + face_gaze_vec[1] ** 2 + face_gaze_vec[2] ** 2)
face_gaze_vec = face_gaze_vec / face_gv_mag
imageWidth = 224
imageHeight = 224
_, norm_face_gaze_theta, norm_face_gaze_phi, _, _, _, _ = \
normalizeFace(self.frame, self.face_cam_mm, self.rot_mat, face_gaze_vec,
self.camera_matrix, self.distortion_coeffs, 'modified',
imageWidth, imageHeight)
self.assertAlmostEqual(self.norm_face_theta, norm_face_gaze_theta[0], 6)
self.assertAlmostEqual(self.norm_face_phi, norm_face_gaze_phi[0], 6)
def test_normalize_eye(self):
leye_gaze_vec = self.gt_cam_mm - self.leye_cam_mm
leye_gv_mag = np.sqrt(leye_gaze_vec[0] ** 2 + leye_gaze_vec[1] ** 2 + leye_gaze_vec[2] ** 2)
leye_gaze_vec = leye_gaze_vec / leye_gv_mag
imageWidth = 120
imageHeight = imageWidth
_, norm_leye_gaze_theta, norm_leye_gaze_phi, _, _, _ = \
normalizeEye(self.frame, self.leye_cam_mm, self.rot_mat, leye_gaze_vec,
self.camera_matrix, self.distortion_coeffs, 'modified',
imageWidth, imageHeight)
self.assertAlmostEqual(self.norm_leye_theta, norm_leye_gaze_theta[0], 6)
self.assertAlmostEqual(self.norm_leye_phi, norm_leye_gaze_phi[0], 6)
def test_3D_landmarks(self):
no_of_3D_landmarks = 38
landmarks_3D = -1.0 * np.ones((no_of_3D_landmarks, 3), dtype=np.float32)
landmarks_obj_3D = self._anthro_pts.get_landmarks_export_3D()
for ind in range(no_of_3D_landmarks):
obj_mm = np.reshape(landmarks_obj_3D[ind, :], (1, 3))
cam_mm = projectObject2Camera(obj_mm, self.rot_mat, self.tvec)
landmarks_3D[ind] = cam_mm.transpose()
landmarks_3D = landmarks_3D.reshape(-1)
self.assertEqual(no_of_3D_landmarks*3, landmarks_3D.shape[0])
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataio/tests/test_theta_phi_angle_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test gaze strategy."""
import sys
import unittest
import mock
import numpy as np
from nvidia_tao_tf1.cv.common.dataio.gaze_strategy import GazeStrategy
from nvidia_tao_tf1.cv.common.dataio.set_label_sorter import SetLabelSorter
# Data collected from germany-1-gaze-1 config
test_R = 'nvidia_tao_tf1/cv/common/dataio/testdata/R.txt'
test_T = 'nvidia_tao_tf1/cv/common/dataio/testdata/T.txt'
test_cam_params = 'nvidia_tao_tf1/cv/common/dataio/testdata/camera_parameters.txt'
test_TV = 'nvidia_tao_tf1/cv/common/dataio/testdata/TV_size'
test_resolution = 'nvidia_tao_tf1/cv/common/dataio/testdata/resolution.txt'
real_open = open
real_loadtxt = np.loadtxt
if sys.version_info >= (3, 0):
_BUILTIN_OPEN = 'builtins.open'
else:
_BUILTIN_OPEN = '__builtin__.open'
def _get_test_file(fname):
if 'R.txt' in fname:
fname = test_R
elif 'T.txt' in fname:
fname = test_T
elif 'camera_parameters' in fname:
fname = test_cam_params
elif 'TV_size' in fname:
fname = test_TV
elif 'resolution' in fname:
fname = test_resolution
else:
raise IOError('Could not find file for ' + fname)
return fname
def _open(fname, *args, **kwargs):
return real_open(_get_test_file(fname), *args, **kwargs)
def _loadtxt(fname, *args, **kwargs):
return real_loadtxt(_get_test_file(fname), *args, **kwargs)
def _use_cosmos639(*args, **kwargs):
cosmos_path = args[0]
if 'cosmos' not in cosmos_path:
return True
if 'cosmos10' in cosmos_path:
return False
if 'cosmos639' in cosmos_path:
return True
raise IOError('No such NFS cosmos storage for gaze strategy')
class GazeStrategyTest(unittest.TestCase):
"""Test GazeStrategy."""
set_id = 'test-gaze-set'
experiment_folder_suffix = 'test_suffix'
tfrecord_folder_name = 'TfRecords'
gt_folder_name = 'GT'
@mock.patch(_BUILTIN_OPEN, wraps=_open)
@mock.patch('numpy.loadtxt', wraps=_loadtxt)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('os.path.isfile', wraps=lambda f: 'board_size' not in f)
@mock.patch('os.makedirs')
@mock.patch('os.listdir', return_value=[])
def setUp(self, mock_listdir, mock_makedirs,
mock_isfile, mock_pathexists, mock_loadtxt, mock_open):
self.label_sorter = SetLabelSorter(
self.experiment_folder_suffix,
False)
self.label_sorter.get_info_source_path = mock.MagicMock(
return_value=[
'json',
'test-input-path',
'Ground_Truth_DataFactory_' + self.experiment_folder_suffix])
self.gaze_strategy = GazeStrategy(
self.set_id,
self.experiment_folder_suffix,
self.tfrecord_folder_name,
self.gt_folder_name,
False,
False,
None,
self.label_sorter,
"")
def test_get_camera_parameters(self):
expected_distortion_coeffs = np.asarray([
-0.38252,
0.195521,
0.000719038,
0.00196389,
-0.0346336])
cam_intrinsics, cam_extrinsics, screen_params = self.gaze_strategy.get_camera_parameters()
assert(len(cam_intrinsics) == 3)
np.testing.assert_array_equal(cam_intrinsics[0], np.asarray([
[1329.98, 0, 646.475],
[0, 1329.48, 390.789],
[0, 0, 1]]))
np.testing.assert_array_equal(cam_intrinsics[1], expected_distortion_coeffs)
np.testing.assert_array_equal(cam_intrinsics[2], expected_distortion_coeffs[:5])
assert(len(cam_extrinsics['']) == 2)
np.testing.assert_array_equal(cam_extrinsics[''][0], np.asarray([
[-0.999935, 0.00027103, -0.0113785],
[-1.12976e-05, 0.999692, 0.0248051],
[0.0113817, 0.0248036, -0.999628]]))
np.testing.assert_array_equal(cam_extrinsics[''][1], np.asarray([
374.8966, -433.8045, -72.2462]))
assert(len(screen_params['']) == 4)
self.assertTupleEqual(screen_params[''], (1650.0, 930.0, 1920.0, 1080.0))
def test_extract_gaze_info(self):
frame_data_dict = {}
frame_name = 'video1_1716_540_vc00_02.png'
expected_frame_dict = {
'label/gaze_screen_x': 1716.0,
'label/gaze_screen_y': 540.0,
'label/gaze_cam_x': -1099.5804443359375,
'label/gaze_cam_y': 31.0604248046875,
'label/gaze_cam_z': -44.927703857421875
}
self.gaze_strategy.extract_gaze_info(frame_data_dict, frame_name, '')
self.assertEqual(expected_frame_dict.keys(), frame_data_dict.keys())
for key, val in frame_data_dict.items():
self.assertAlmostEqual(val, expected_frame_dict[key], 3)
def test_get_pts_calibrated(self):
original_pts = [35.0, 25.0]
pts = self.gaze_strategy.get_pts(original_pts, 1280, 800)
self.assertEqual(pts, original_pts)
@mock.patch(_BUILTIN_OPEN, wraps=_open)
@mock.patch('numpy.loadtxt', wraps=_loadtxt)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('os.path.isfile', wraps=lambda f: 'board_size' not in f)
@mock.patch('os.makedirs')
@mock.patch('os.listdir', return_value=[])
def test_get_pts_not_calibrated(self, mock_listdir, mock_makedirs, mock_isfile,
mock_pathexists, mock_loadtxt, mock_open):
original_pts = [450.0, 525.0]
undistort_gaze_strategy = GazeStrategy(
self.set_id,
self.experiment_folder_suffix,
self.tfrecord_folder_name,
self.gt_folder_name,
False,
True,
None,
self.label_sorter,
"")
pts = undistort_gaze_strategy.get_pts(original_pts, 1280, 800)
self.assertListEqual(pts, [447.3650540173241, 526.7086949792829])
@mock.patch(_BUILTIN_OPEN, wraps=_open)
@mock.patch('numpy.loadtxt', wraps=_loadtxt)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('os.path.isfile', wraps=lambda f: 'board_size' not in f)
@mock.patch('os.makedirs')
@mock.patch('os.listdir', return_value=['v1'])
@mock.patch('os.path.isdir', return_value=True)
def test_using_lm_pred_cosmos10(self,
mock_isdir,
mock_listdir,
mock_makedirs,
mock_isfile,
mock_pathexists,
mock_loadtxt,
mock_open):
GazeStrategy(
self.set_id,
self.experiment_folder_suffix,
self.tfrecord_folder_name,
self.gt_folder_name,
False,
False,
'fpenet_results/v2_SS80_v9',
self.label_sorter,
"")
mock_makedirs.assert_called_with('/home/copilot.cosmos10/RealTimePipeline/set/' +
self.set_id +
'/Ground_Truth_Fpegaze_' + self.experiment_folder_suffix)
@mock.patch(_BUILTIN_OPEN, wraps=_open)
@mock.patch('numpy.loadtxt', wraps=_loadtxt)
@mock.patch('os.path.exists', side_effect=_use_cosmos639)
@mock.patch('os.path.isfile', wraps=lambda f: 'board_size' not in f)
@mock.patch('os.makedirs')
@mock.patch('os.listdir', return_value=['v1'])
@mock.patch('os.path.isdir', return_value=True)
def test_using_lm_pred_cosmos639(self,
mock_isdir,
mock_listdir,
mock_makedirs,
mock_isfile,
mock_pathexists,
mock_loadtxt,
mock_open):
GazeStrategy(
self.set_id,
self.experiment_folder_suffix,
self.tfrecord_folder_name,
self.gt_folder_name,
False,
False,
'fpenet_results/v2_SS80_v9',
self.label_sorter,
"")
mock_makedirs.assert_called_with('/home/driveix.cosmos639/GazeData/postData/' +
self.set_id +
'/Ground_Truth_Fpegaze_' + self.experiment_folder_suffix)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataio/tests/test_gaze_strategy.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test gt converter."""
import os
import shutil
import tempfile
import unittest
import numpy as np
from nvidia_tao_tf1.cv.common.dataio.data_converter import DataConverter
from nvidia_tao_tf1.cv.common.dataio.gt_converter import GtConverter
class GtConverterTest(unittest.TestCase):
"""Test GtConverter."""
def setUp(self):
self.user_gt_path = tempfile.mkdtemp()
self.combined_gt_path = tempfile.mkdtemp()
self.json_combined_all = DataConverter.read_tfrecords(
'nvidia_tao_tf1/cv/common/dataio/testdata/combined.tfrecords',
False,
False)
users = ['TYEMA8OgcTvGl6ct', 'WfsYHoMi_AmWnAIL', 'VSZMWvxcTBNbp_ZW']
self.users_dict = {}
self.json_combined = []
sample = 0
for record in self.json_combined_all:
path = record['train/image_frame_name'].split(b'/')
orig_user_name = path[-2]
user_name = users[sample]
print(type(orig_user_name), type(user_name))
print(type(record['train/image_frame_name']))
frame_name = path[-1][:-4].decode() # get rid of .png in path
# Type casting is necessary
record['train/image_frame_name'] = record['train/image_frame_name'].\
replace(orig_user_name, user_name.encode()).decode()
record['train/eye_features'] = np.asarray(record['train/eye_features'], np.float32)
record['train/landmarks'] = np.asarray(record['train/landmarks'], np.float32)
record['train/landmarks_occ'] = np.asarray(record['train/landmarks_occ'], np.int64)
record['train/norm_landmarks'] = np.asarray(record['train/norm_landmarks'], np.float32)
record['train/landmarks_3D'] = np.asarray(record['train/landmarks_3D'], np.float32)
record['train/norm_face_cnv_mat'] = -1.0 * np.ones((9,), dtype=np.float32)
record['train/norm_leye_cnv_mat'] = -1.0 * np.ones((9,), dtype=np.float32)
record['train/norm_reye_cnv_mat'] = -1.0 * np.ones((9,), dtype=np.float32)
record['label/face_cam_x'] = record['label/mid_cam_x']
record['label/face_cam_y'] = record['label/mid_cam_y']
record['label/face_cam_z'] = record['label/mid_cam_z']
self.users_dict[user_name] = {'': {frame_name: record}}
self.json_combined.append(record)
sample += 1
if sample > 2:
return
def tearDown(self):
shutil.rmtree(self.user_gt_path)
shutil.rmtree(self.combined_gt_path)
def test_write_landmarks(self):
GtConverter(
self.user_gt_path,
False).write_landmarks(self.users_dict)
assert os.path.exists(os.path.join(
self.user_gt_path,
'TYEMA8OgcTvGl6ct_landmarks.txt'))
assert os.path.exists(os.path.join(
self.user_gt_path,
'WfsYHoMi_AmWnAIL_landmarks.txt'))
assert os.path.exists(os.path.join(
self.user_gt_path,
'VSZMWvxcTBNbp_ZW_landmarks.txt'))
def test_write_gt_files(self):
GtConverter(
self.user_gt_path,
False).write_gt_files(self.users_dict)
assert os.path.exists(os.path.join(
self.user_gt_path,
'TYEMA8OgcTvGl6ct.txt'))
assert os.path.exists(os.path.join(
self.user_gt_path,
'WfsYHoMi_AmWnAIL.txt'))
assert os.path.exists(os.path.join(
self.user_gt_path,
'VSZMWvxcTBNbp_ZW.txt'))
def test_write_combined_landmarks(self):
GtConverter(
self.combined_gt_path,
False).write_combined_landmarks(
self.users_dict,
['TYEMA8OgcTvGl6ct'],
['WfsYHoMi_AmWnAIL'],
['VSZMWvxcTBNbp_ZW'])
assert os.path.exists(os.path.join(
self.combined_gt_path,
'test_landmarks.txt'))
assert os.path.exists(os.path.join(
self.combined_gt_path,
'validate_landmarks.txt'))
assert os.path.exists(os.path.join(
self.combined_gt_path,
'train_landmarks.txt'))
test_path = os.path.join(self.combined_gt_path, 'test_landmarks.txt')
assert os.path.exists(test_path)
validate_path = os.path.join(self.combined_gt_path, 'validate_landmarks.txt')
assert os.path.exists(validate_path)
train_path = os.path.join(self.combined_gt_path, 'train_landmarks.txt')
assert os.path.exists(train_path)
test_gt = open(test_path).readlines()
self.assertEqual(len(test_gt), 1)
self.assertIn('TYEMA8OgcTvGl6ct', test_gt[0])
validate_gt = open(validate_path).readlines()
self.assertEqual(len(validate_gt), 1)
self.assertIn('WfsYHoMi_AmWnAIL', validate_gt[0])
train_gt = open(train_path).readlines()
self.assertEqual(len(train_gt), 1)
self.assertIn('VSZMWvxcTBNbp_ZW', train_gt[0])
def test_write_combined_gt_files(self):
GtConverter(
self.combined_gt_path,
False).write_combined_gt_files(
self.users_dict,
['TYEMA8OgcTvGl6ct'],
['WfsYHoMi_AmWnAIL'],
['VSZMWvxcTBNbp_ZW'])
assert os.path.exists(os.path.join(
self.combined_gt_path,
'test.txt'))
assert os.path.exists(os.path.join(
self.combined_gt_path,
'validate.txt'))
assert os.path.exists(os.path.join(
self.combined_gt_path,
'train.txt'))
test_path = os.path.join(self.combined_gt_path, 'test.txt')
assert os.path.exists(test_path)
validate_path = os.path.join(self.combined_gt_path, 'validate.txt')
assert os.path.exists(validate_path)
train_path = os.path.join(self.combined_gt_path, 'train.txt')
assert os.path.exists(train_path)
test_gt = open(test_path).readlines()
self.assertEqual(len(test_gt), 1)
self.assertIn('TYEMA8OgcTvGl6ct', test_gt[0])
validate_gt = open(validate_path).readlines()
self.assertEqual(len(validate_gt), 1)
self.assertIn('WfsYHoMi_AmWnAIL', validate_gt[0])
train_gt = open(train_path).readlines()
self.assertEqual(len(train_gt), 1)
self.assertIn('VSZMWvxcTBNbp_ZW', train_gt[0])
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataio/tests/test_gt_converter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test data converter."""
import os
import shutil
import tempfile
import unittest
import numpy as np
from nvidia_tao_tf1.cv.common.dataio.data_converter import DataConverter
class DataConverterTest(unittest.TestCase):
"""Test DataConverter."""
def setUp(self):
self.user_tfrecord_path = tempfile.mkdtemp()
self.combined_tfrecord_path = tempfile.mkdtemp()
# Using 3 samples each from train, test, validate of s498-gaze-0
self.json_combined_all = DataConverter.read_tfrecords(
'nvidia_tao_tf1/cv/common/dataio/testdata/combined.tfrecords',
False,
False)
users = ['TYEMA8OgcTvGl6ct', 'WfsYHoMi_AmWnAIL', 'VSZMWvxcTBNbp_ZW']
self.users_dict = {}
self.json_combined = []
sample = 0
for record in self.json_combined_all:
path = record['train/image_frame_name'].split(b'/')
orig_user_name = path[-2]
user_name = users[sample]
frame_name = path[-1][:-4].decode() # get rid of .png in path
# Type casting is necessary
record['train/image_frame_name'] = record['train/image_frame_name'].\
replace(orig_user_name, user_name.encode()).decode()
record['train/eye_features'] = np.asarray(record['train/eye_features'], np.float32)
record['train/landmarks'] = np.asarray(record['train/landmarks'], np.float32)
record['train/landmarks_occ'] = np.asarray(record['train/landmarks_occ'], np.int64)
record['train/norm_landmarks'] = np.asarray(record['train/norm_landmarks'], np.float32)
record['train/landmarks_3D'] = np.asarray(record['train/landmarks_3D'], np.float32)
self.users_dict[user_name] = {'': {frame_name: record}}
self.json_combined.append(record)
sample += 1
if sample > 2:
return
def tearDown(self):
shutil.rmtree(self.user_tfrecord_path)
shutil.rmtree(self.combined_tfrecord_path)
def test_read_tfrecords(self):
self.assertEqual(len(self.json_combined), 3)
self.assertSetEqual(
set(self.json_combined[0]),
set(DataConverter.feature_to_type))
def test_write_user_tfrecords(self):
DataConverter(
self.user_tfrecord_path,
False).write_user_tfrecords(self.users_dict)
assert os.path.exists(os.path.join(
self.user_tfrecord_path,
'TYEMA8OgcTvGl6ct.tfrecords'))
assert os.path.exists(os.path.join(
self.user_tfrecord_path,
'WfsYHoMi_AmWnAIL.tfrecords'))
assert os.path.exists(os.path.join(
self.user_tfrecord_path,
'VSZMWvxcTBNbp_ZW.tfrecords'))
def test_write_combined_tfrecords(self):
DataConverter(
self.combined_tfrecord_path,
False).write_combined_tfrecords(
self.users_dict,
['TYEMA8OgcTvGl6ct'],
['WfsYHoMi_AmWnAIL'],
['VSZMWvxcTBNbp_ZW'])
test_path = os.path.join(self.combined_tfrecord_path, 'test.tfrecords')
assert os.path.exists(test_path)
validate_path = os.path.join(self.combined_tfrecord_path, 'validate.tfrecords')
assert os.path.exists(validate_path)
train_path = os.path.join(self.combined_tfrecord_path, 'train.tfrecords')
assert os.path.exists(train_path)
test_json = DataConverter.read_tfrecords(test_path, False, False)
self.assertEqual(len(test_json), 1)
self.assertIn(b'TYEMA8OgcTvGl6ct', test_json[0]['train/image_frame_name'])
validate_json = DataConverter.read_tfrecords(validate_path, False, False)
self.assertEqual(len(validate_json), 1)
self.assertIn(b'WfsYHoMi_AmWnAIL', validate_json[0]['train/image_frame_name'])
train_json = DataConverter.read_tfrecords(train_path, False, False)
self.assertEqual(len(train_json), 1)
self.assertIn(b'VSZMWvxcTBNbp_ZW', train_json[0]['train/image_frame_name'])
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataio/tests/test_data_converter.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataio/datalake/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converts a gaze table from the datalake to TFRecord."""
import argparse
import maglev
import numpy as np
import tensorflow as tf
from tqdm import tqdm
def determine_tf_type(value):
"""Convert value to respective TF Feature type."""
if type(value) == str:
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value.encode()]))
if type(value) == float:
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
if type(value) == bool:
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
if type(value) == np.ndarray:
assert value.dtype == np.float64
return tf.train.Feature(float_list=tf.train.FloatList(value=value.reshape(-1)))
return None
def write_from_pandas(df, writer):
"""Writes a TFRecord from a Pandas DF."""
columns = df.columns
for _, row in df.iterrows():
features_dict = {}
for col in columns:
features_dict[col] = determine_tf_type(row[col])
example = tf.train.Example(features=tf.train.Features(feature=features_dict))
writer.write(example.SerializeToString())
def write_to_tfrecord(tbl_name, tf_folder):
"Converts a gaze table to a TFRecord."
client = maglev.Client.default_service_client()
tbl = client.get_table(table=tbl_name, database="driveix")
tf_filepath = "{}/{}.tfrecords".format(tf_folder, tbl_name)
writer = tf.python_io.TFRecordWriter(tf_filepath)
for pq in tqdm(tbl._files):
# convert every partition parquet into pandas df and write to TFRecord
df = pq.to_pandas()
write_from_pandas(df, writer)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-p",
"--path",
type=str,
default="/home/driveix.cosmos639/eddiew/tfrecords/",
help="Folder path to save generated TFrecord",
)
parser.add_argument(
"-t",
"--tbl",
type=str,
default="gaze_kpi_1",
help="Table from driveix to convert.",
)
args = parser.parse_args()
tbl_name = args.tbl
tf_folder = args.path
write_to_tfrecord(tbl_name, tf_folder)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataio/datalake/to_tfrecord.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import unittest
import numpy as np
import pandas as pd
import tensorflow as tf
from nvidia_tao_tf1.cv.common.dataio.datalake.to_tfrecord import (
write_from_pandas,
)
class ToTFRecordTest(unittest.TestCase):
def test_pandas_to_tfrecord(self):
"""Test conversion from Pandas DF to TFRecord."""
test_tf_folder = '/tmp/'
test_tf_filepath = "{}/test.tfrecords".format(test_tf_folder)
test_writer = tf.python_io.TFRecordWriter(test_tf_filepath)
test_data = {
'cosmos': 'driveix.cosmos639',
'facebbx_h': 439.0,
'facebbx_w': 439.0,
'facebbx_x': 429.0,
'facebbx_y': 252.0,
'gaze_cam_x': -1043.777333,
'gaze_cam_y': -1.3017040000000009,
'gaze_cam_z': 539.090724,
'gaze_leye_phi': 1.366890967655318,
'gaze_leye_theta': 0.06599873509000821,
'gaze_mid_phi': 1.3407598348898804,
'gaze_mid_theta': 0.07245378463757722,
'gaze_origin_facecenter_x': -51.39234825144855,
'gaze_origin_facecenter_y': 92.04489429673804,
'gaze_origin_facecenter_z': 761.1916839669866,
'gaze_origin_leye_x': -20.84654251367733,
'gaze_origin_leye_y': 67.7389280534117,
'gaze_origin_leye_z': 750.6114456760628,
'gaze_origin_midpoint_x': -44.275693768483954,
'gaze_origin_midpoint_y': 73.20562575359814,
'gaze_origin_midpoint_z': 773.155872135953,
'gaze_origin_reye_x': -67.70484502329059,
'gaze_origin_reye_y': 78.67232345378457,
'gaze_origin_reye_z': 795.7002985958433,
'gaze_phi': 1.340302336944776,
'gaze_reye_phi': 1.3137137062342341,
'gaze_reye_theta': 0.0790765716648666,
'gaze_screen_x': 0.0,
'gaze_theta': 0.07253765337743741,
'hp_phi': 0.857525957552,
'hp_pitch': -37.9900557691,
'hp_roll': -13.1337805057,
'hp_theta': 0.49720801543,
'hp_yaw': -43.1392202242,
'image_frame_name': '2_0_vc00_52.png',
'image_frame_path': '/home/driveix.cosmos639/GazeData/orgData/\
s589-gaze-incar-lincoln-car22-0/pngData/RttjJvFIa5uMqEpo/region_9',
'is_valid_tp': True,
'landmarks_source': 'Nvhelnet_v12',
'leyebbx_h': 443.0,
'leyebbx_w': 614.0,
'leyebbx_x': 574.0,
'leyebbx_y': 403.0,
'norm_face_gaze_phi': 1.4057945156534788,
'norm_face_gaze_theta': 0.1869903503541677,
'norm_face_hp_phi': 0.8400734880627186,
'norm_face_hp_theta': 0.5104548952594155,
'norm_face_warping_matrix': np.array([
0.9901051,
-0.11478256,
0.08072733,
0.12336678,
0.986143,
-0.11091729,
-0.06687732,
0.11977884,
0.9905455,
]),
'norm_facebbx_h': 448.0,
'norm_facebbx_w': 448.0,
'norm_facebbx_x': 416.0,
'norm_facebbx_y': 176.0,
'norm_image_frame_path': '/home/driveix.cosmos639/GazeData/orgData/\
s589-gaze-incar-lincoln-car22-0/Norm_Data_OldFM_Nvhelnet_v12/\
frame/RttjJvFIa5uMqEpo/region_9/2_0_vc00_52.png',
'norm_leye_gaze_phi': 1.3862417985239928,
'norm_leye_gaze_theta': 0.1939742569441609,
'norm_leye_hp_phi': 0.7970397456237868,
'norm_leye_hp_theta': 0.5465758298139601,
'norm_leye_warping_matrix': np.array([
0.9885556,
-0.14530005,
0.04056751,
0.14830144,
0.9852998,
-0.08479964,
-0.02764977,
0.08984538,
0.99557185,
]),
'norm_leyebbx_h': 160.0,
'norm_leyebbx_w': 160.0,
'norm_leyebbx_x': 664.0,
'norm_leyebbx_y': 258.0,
'norm_reye_gaze_phi': 1.3884122795611544,
'norm_reye_gaze_theta': 0.200662527144421,
'norm_reye_hp_phi': 0.8544979998120636,
'norm_reye_hp_theta': 0.5465758298139601,
'norm_reyebbx_h': 160.0,
'norm_reyebbx_w': 160.0,
'norm_reyebbx_x': 522.0,
'norm_reyebbx_y': 263.0,
'norm_reyee_warping_matrix': np.array([
0.98533636,
-0.13990074,
0.09767291,
0.14830144,
0.9852998,
-0.08479964,
-0.08437356,
0.09804121,
0.9915992,
]),
'num_landmarks': 80.0,
'percentile_of_out_of_frame': 0.0,
'region_id': 'region_9',
'reyebbx_h': 449.0,
'reyebbx_w': 534.0,
'reyebbx_x': 511.0,
'reyebbx_y': 426.0,
'sample_type': 'filtered',
'set_name': 's589-gaze-incar-lincoln-car22-0',
'setup_type': 'incar',
'user_id': 'RttjJvFIa5uMqEpo',
'version': '1586812763',
}
test_df = pd.DataFrame([test_data])
write_from_pandas(test_df, test_writer)
assert os.path.exists(test_tf_filepath)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataio/datalake/test_to_tfrecord.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Splits driveix.gaze into train, validation and kpi tables in the datalake."""
import maglev
from maglev_sdk.spark.catalog import CatalogTable, SparkCatalog
from sklearn.model_selection import train_test_split
import yaml
def split_dataset(id_, train_size, kpi_users):
"""Splits driveix.gaze into train, validation, and kpi tables."""
client = maglev.Client.default_service_client()
spark = SparkCatalog.spark()
gaze_tbl = CatalogTable(table="gaze", database="driveix")
train_tbl = CatalogTable(table="gaze_train_{}".format(id_), database="driveix")
validation_tbl = CatalogTable(table="gaze_validation_{}".format(id_), database="driveix")
kpi_tbl = CatalogTable(table="gaze_kpi_{}".format(id_), database="driveix")
catalog = SparkCatalog(client, spark)
catalog.register(
read_tables=[gaze_tbl], write_tables=[train_tbl, validation_tbl, kpi_tbl]
)
gaze_df = catalog.read(gaze_tbl)
users = set(gaze_df.select("user_id").rdd.flatMap(lambda x: x).collect())
kpi_users = set(kpi_users)
# with remaining users, split into train and validation
leftover_users = users - kpi_users
train_users, validation_users = train_test_split(
list(leftover_users), train_size=train_size, random_state=42
)
# get dfs for respective splits
kpi_df = gaze_df[gaze_df.user_id.isin(kpi_users)]
train_df = gaze_df[gaze_df.user_id.isin(train_users)]
validation_df = gaze_df[gaze_df.user_id.isin(validation_users)]
# Write tables
catalog.write(kpi_tbl, kpi_df)
catalog.write(train_tbl, train_df)
catalog.write(validation_tbl, validation_df)
if __name__ == "__main__":
yaml_path = "nvidia_tao_tf1/cv/common/dataio/datalake/config.yaml"
with open(yaml_path, "r") as f:
data = yaml.load(f)
id_ = data["id_"]
train_size = data["train_size"]
kpi_users = data["kpi_users"]
split_dataset(id_, train_size, kpi_users)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataio/datalake/split_gaze_dataset.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/common/proto/optimizer_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_tao_tf1.cv.common.proto import sgd_optimizer_config_pb2 as nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_sgd__optimizer__config__pb2
from nvidia_tao_tf1.cv.common.proto import adam_optimizer_config_pb2 as nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_adam__optimizer__config__pb2
from nvidia_tao_tf1.cv.common.proto import rmsprop_optimizer_config_pb2 as nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_rmsprop__optimizer__config__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/common/proto/optimizer_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n5nvidia_tao_tf1/cv/common/proto/optimizer_config.proto\x1a\x39nvidia_tao_tf1/cv/common/proto/sgd_optimizer_config.proto\x1a:nvidia_tao_tf1/cv/common/proto/adam_optimizer_config.proto\x1a=nvidia_tao_tf1/cv/common/proto/rmsprop_optimizer_config.proto\"\x94\x01\n\x0fOptimizerConfig\x12$\n\x04\x61\x64\x61m\x18\x01 \x01(\x0b\x32\x14.AdamOptimizerConfigH\x00\x12\"\n\x03sgd\x18\x02 \x01(\x0b\x32\x13.SGDOptimizerConfigH\x00\x12*\n\x07rmsprop\x18\x03 \x01(\x0b\x32\x17.RMSpropOptimizerConfigH\x00\x42\x0b\n\toptimizerb\x06proto3')
,
dependencies=[nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_sgd__optimizer__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_adam__optimizer__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_rmsprop__optimizer__config__pb2.DESCRIPTOR,])
_OPTIMIZERCONFIG = _descriptor.Descriptor(
name='OptimizerConfig',
full_name='OptimizerConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='adam', full_name='OptimizerConfig.adam', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sgd', full_name='OptimizerConfig.sgd', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rmsprop', full_name='OptimizerConfig.rmsprop', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='optimizer', full_name='OptimizerConfig.optimizer',
index=0, containing_type=None, fields=[]),
],
serialized_start=240,
serialized_end=388,
)
_OPTIMIZERCONFIG.fields_by_name['adam'].message_type = nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_adam__optimizer__config__pb2._ADAMOPTIMIZERCONFIG
_OPTIMIZERCONFIG.fields_by_name['sgd'].message_type = nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_sgd__optimizer__config__pb2._SGDOPTIMIZERCONFIG
_OPTIMIZERCONFIG.fields_by_name['rmsprop'].message_type = nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_rmsprop__optimizer__config__pb2._RMSPROPOPTIMIZERCONFIG
_OPTIMIZERCONFIG.oneofs_by_name['optimizer'].fields.append(
_OPTIMIZERCONFIG.fields_by_name['adam'])
_OPTIMIZERCONFIG.fields_by_name['adam'].containing_oneof = _OPTIMIZERCONFIG.oneofs_by_name['optimizer']
_OPTIMIZERCONFIG.oneofs_by_name['optimizer'].fields.append(
_OPTIMIZERCONFIG.fields_by_name['sgd'])
_OPTIMIZERCONFIG.fields_by_name['sgd'].containing_oneof = _OPTIMIZERCONFIG.oneofs_by_name['optimizer']
_OPTIMIZERCONFIG.oneofs_by_name['optimizer'].fields.append(
_OPTIMIZERCONFIG.fields_by_name['rmsprop'])
_OPTIMIZERCONFIG.fields_by_name['rmsprop'].containing_oneof = _OPTIMIZERCONFIG.oneofs_by_name['optimizer']
DESCRIPTOR.message_types_by_name['OptimizerConfig'] = _OPTIMIZERCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
OptimizerConfig = _reflection.GeneratedProtocolMessageType('OptimizerConfig', (_message.Message,), dict(
DESCRIPTOR = _OPTIMIZERCONFIG,
__module__ = 'nvidia_tao_tf1.cv.common.proto.optimizer_config_pb2'
# @@protoc_insertion_point(class_scope:OptimizerConfig)
))
_sym_db.RegisterMessage(OptimizerConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/proto/optimizer_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/common/proto/sgd_optimizer_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/common/proto/sgd_optimizer_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n9nvidia_tao_tf1/cv/common/proto/sgd_optimizer_config.proto\"8\n\x12SGDOptimizerConfig\x12\x10\n\x08momentum\x18\x01 \x01(\x02\x12\x10\n\x08nesterov\x18\x02 \x01(\x08\x62\x06proto3')
)
_SGDOPTIMIZERCONFIG = _descriptor.Descriptor(
name='SGDOptimizerConfig',
full_name='SGDOptimizerConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='momentum', full_name='SGDOptimizerConfig.momentum', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='nesterov', full_name='SGDOptimizerConfig.nesterov', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=61,
serialized_end=117,
)
DESCRIPTOR.message_types_by_name['SGDOptimizerConfig'] = _SGDOPTIMIZERCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SGDOptimizerConfig = _reflection.GeneratedProtocolMessageType('SGDOptimizerConfig', (_message.Message,), dict(
DESCRIPTOR = _SGDOPTIMIZERCONFIG,
__module__ = 'nvidia_tao_tf1.cv.common.proto.sgd_optimizer_config_pb2'
# @@protoc_insertion_point(class_scope:SGDOptimizerConfig)
))
_sym_db.RegisterMessage(SGDOptimizerConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/proto/sgd_optimizer_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/common/proto/string_int_label_map.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/common/proto/string_int_label_map.proto',
package='',
syntax='proto2',
serialized_options=None,
serialized_pb=_b('\n9nvidia_tao_tf1/cv/common/proto/string_int_label_map.proto\"\x91\x02\n\x15StringIntLabelMapItem\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x01(\x05\x12\x14\n\x0c\x64isplay_name\x18\x03 \x01(\t\x12\x35\n\tkeypoints\x18\x04 \x03(\x0b\x32\".StringIntLabelMapItem.KeypointMap\x12\x14\n\x0c\x61ncestor_ids\x18\x05 \x03(\x05\x12\x16\n\x0e\x64\x65scendant_ids\x18\x06 \x03(\x05\x12!\n\tfrequency\x18\x07 \x01(\x0e\x32\x0e.LVISFrequency\x12\x16\n\x0einstance_count\x18\x08 \x01(\x05\x1a(\n\x0bKeypointMap\x12\n\n\x02id\x18\x01 \x01(\x05\x12\r\n\x05label\x18\x02 \x01(\t\"9\n\x11StringIntLabelMap\x12$\n\x04item\x18\x01 \x03(\x0b\x32\x16.StringIntLabelMapItem*D\n\rLVISFrequency\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0c\n\x08\x46REQUENT\x10\x01\x12\n\n\x06\x43OMMON\x10\x02\x12\x08\n\x04RARE\x10\x03')
)
_LVISFREQUENCY = _descriptor.EnumDescriptor(
name='LVISFrequency',
full_name='LVISFrequency',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FREQUENT', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='COMMON', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RARE', index=3, number=3,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=396,
serialized_end=464,
)
_sym_db.RegisterEnumDescriptor(_LVISFREQUENCY)
LVISFrequency = enum_type_wrapper.EnumTypeWrapper(_LVISFREQUENCY)
UNSPECIFIED = 0
FREQUENT = 1
COMMON = 2
RARE = 3
_STRINGINTLABELMAPITEM_KEYPOINTMAP = _descriptor.Descriptor(
name='KeypointMap',
full_name='StringIntLabelMapItem.KeypointMap',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='StringIntLabelMapItem.KeypointMap.id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='label', full_name='StringIntLabelMapItem.KeypointMap.label', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=295,
serialized_end=335,
)
_STRINGINTLABELMAPITEM = _descriptor.Descriptor(
name='StringIntLabelMapItem',
full_name='StringIntLabelMapItem',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='StringIntLabelMapItem.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id', full_name='StringIntLabelMapItem.id', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='display_name', full_name='StringIntLabelMapItem.display_name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='keypoints', full_name='StringIntLabelMapItem.keypoints', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ancestor_ids', full_name='StringIntLabelMapItem.ancestor_ids', index=4,
number=5, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='descendant_ids', full_name='StringIntLabelMapItem.descendant_ids', index=5,
number=6, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='frequency', full_name='StringIntLabelMapItem.frequency', index=6,
number=7, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='instance_count', full_name='StringIntLabelMapItem.instance_count', index=7,
number=8, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_STRINGINTLABELMAPITEM_KEYPOINTMAP, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=62,
serialized_end=335,
)
_STRINGINTLABELMAP = _descriptor.Descriptor(
name='StringIntLabelMap',
full_name='StringIntLabelMap',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='item', full_name='StringIntLabelMap.item', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=337,
serialized_end=394,
)
_STRINGINTLABELMAPITEM_KEYPOINTMAP.containing_type = _STRINGINTLABELMAPITEM
_STRINGINTLABELMAPITEM.fields_by_name['keypoints'].message_type = _STRINGINTLABELMAPITEM_KEYPOINTMAP
_STRINGINTLABELMAPITEM.fields_by_name['frequency'].enum_type = _LVISFREQUENCY
_STRINGINTLABELMAP.fields_by_name['item'].message_type = _STRINGINTLABELMAPITEM
DESCRIPTOR.message_types_by_name['StringIntLabelMapItem'] = _STRINGINTLABELMAPITEM
DESCRIPTOR.message_types_by_name['StringIntLabelMap'] = _STRINGINTLABELMAP
DESCRIPTOR.enum_types_by_name['LVISFrequency'] = _LVISFREQUENCY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
StringIntLabelMapItem = _reflection.GeneratedProtocolMessageType('StringIntLabelMapItem', (_message.Message,), dict(
KeypointMap = _reflection.GeneratedProtocolMessageType('KeypointMap', (_message.Message,), dict(
DESCRIPTOR = _STRINGINTLABELMAPITEM_KEYPOINTMAP,
__module__ = 'nvidia_tao_tf1.cv.common.proto.string_int_label_map_pb2'
# @@protoc_insertion_point(class_scope:StringIntLabelMapItem.KeypointMap)
))
,
DESCRIPTOR = _STRINGINTLABELMAPITEM,
__module__ = 'nvidia_tao_tf1.cv.common.proto.string_int_label_map_pb2'
# @@protoc_insertion_point(class_scope:StringIntLabelMapItem)
))
_sym_db.RegisterMessage(StringIntLabelMapItem)
_sym_db.RegisterMessage(StringIntLabelMapItem.KeypointMap)
StringIntLabelMap = _reflection.GeneratedProtocolMessageType('StringIntLabelMap', (_message.Message,), dict(
DESCRIPTOR = _STRINGINTLABELMAP,
__module__ = 'nvidia_tao_tf1.cv.common.proto.string_int_label_map_pb2'
# @@protoc_insertion_point(class_scope:StringIntLabelMap)
))
_sym_db.RegisterMessage(StringIntLabelMap)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/proto/string_int_label_map_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/common/proto/wandb_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/common/proto/wandb_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n1nvidia_tao_tf1/cv/common/proto/wandb_config.proto\"\xea\x01\n\x0bWandBConfig\x12\x0f\n\x07\x65nabled\x18\x01 \x01(\x08\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x0f\n\x07project\x18\x03 \x01(\t\x12\x0e\n\x06\x65ntity\x18\x04 \x01(\t\x12\x0e\n\x06reinit\x18\x05 \x01(\x08\x12\x0c\n\x04name\x18\x06 \x01(\t\x12\x0c\n\x04tags\x18\x07 \x03(\t\x12\x11\n\twandb_dir\x18\x08 \x01(\t\x12\r\n\x05notes\x18\t \x01(\t\x12\x1f\n\x04mode\x18\n \x01(\x0e\x32\x11.WandBConfig.MODE\"-\n\x04MODE\x12\n\n\x06ONLINE\x10\x00\x12\x0b\n\x07OFFLINE\x10\x01\x12\x0c\n\x08\x44ISABLED\x10\x02\x62\x06proto3')
)
_WANDBCONFIG_MODE = _descriptor.EnumDescriptor(
name='MODE',
full_name='WandBConfig.MODE',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='ONLINE', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OFFLINE', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DISABLED', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=243,
serialized_end=288,
)
_sym_db.RegisterEnumDescriptor(_WANDBCONFIG_MODE)
_WANDBCONFIG = _descriptor.Descriptor(
name='WandBConfig',
full_name='WandBConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='enabled', full_name='WandBConfig.enabled', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='key', full_name='WandBConfig.key', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='project', full_name='WandBConfig.project', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='entity', full_name='WandBConfig.entity', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reinit', full_name='WandBConfig.reinit', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='WandBConfig.name', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tags', full_name='WandBConfig.tags', index=6,
number=7, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='wandb_dir', full_name='WandBConfig.wandb_dir', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='notes', full_name='WandBConfig.notes', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mode', full_name='WandBConfig.mode', index=9,
number=10, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_WANDBCONFIG_MODE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=54,
serialized_end=288,
)
_WANDBCONFIG.fields_by_name['mode'].enum_type = _WANDBCONFIG_MODE
_WANDBCONFIG_MODE.containing_type = _WANDBCONFIG
DESCRIPTOR.message_types_by_name['WandBConfig'] = _WANDBCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
WandBConfig = _reflection.GeneratedProtocolMessageType('WandBConfig', (_message.Message,), dict(
DESCRIPTOR = _WANDBCONFIG,
__module__ = 'nvidia_tao_tf1.cv.common.proto.wandb_config_pb2'
# @@protoc_insertion_point(class_scope:WandBConfig)
))
_sym_db.RegisterMessage(WandBConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/proto/wandb_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/common/proto/adam_optimizer_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/common/proto/adam_optimizer_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n:nvidia_tao_tf1/cv/common/proto/adam_optimizer_config.proto\"U\n\x13\x41\x64\x61mOptimizerConfig\x12\x0f\n\x07\x65psilon\x18\x01 \x01(\x02\x12\r\n\x05\x62\x65ta1\x18\x02 \x01(\x02\x12\r\n\x05\x62\x65ta2\x18\x03 \x01(\x02\x12\x0f\n\x07\x61msgrad\x18\x04 \x01(\x08\x62\x06proto3')
)
_ADAMOPTIMIZERCONFIG = _descriptor.Descriptor(
name='AdamOptimizerConfig',
full_name='AdamOptimizerConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='epsilon', full_name='AdamOptimizerConfig.epsilon', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='beta1', full_name='AdamOptimizerConfig.beta1', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='beta2', full_name='AdamOptimizerConfig.beta2', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='amsgrad', full_name='AdamOptimizerConfig.amsgrad', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=62,
serialized_end=147,
)
DESCRIPTOR.message_types_by_name['AdamOptimizerConfig'] = _ADAMOPTIMIZERCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AdamOptimizerConfig = _reflection.GeneratedProtocolMessageType('AdamOptimizerConfig', (_message.Message,), dict(
DESCRIPTOR = _ADAMOPTIMIZERCONFIG,
__module__ = 'nvidia_tao_tf1.cv.common.proto.adam_optimizer_config_pb2'
# @@protoc_insertion_point(class_scope:AdamOptimizerConfig)
))
_sym_db.RegisterMessage(AdamOptimizerConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/proto/adam_optimizer_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/common/proto/training_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_tao_tf1.cv.common.proto import cost_scaling_config_pb2 as nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_cost__scaling__config__pb2
from nvidia_tao_tf1.cv.common.proto import learning_rate_config_pb2 as nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_learning__rate__config__pb2
from nvidia_tao_tf1.cv.common.proto import optimizer_config_pb2 as nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_optimizer__config__pb2
from nvidia_tao_tf1.cv.common.proto import regularizer_config_pb2 as nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_regularizer__config__pb2
from nvidia_tao_tf1.cv.common.proto import visualizer_config_pb2 as nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_visualizer__config__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/common/proto/training_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n4nvidia_tao_tf1/cv/common/proto/training_config.proto\x1a\x38nvidia_tao_tf1/cv/common/proto/cost_scaling_config.proto\x1a\x39nvidia_tao_tf1/cv/common/proto/learning_rate_config.proto\x1a\x35nvidia_tao_tf1/cv/common/proto/optimizer_config.proto\x1a\x37nvidia_tao_tf1/cv/common/proto/regularizer_config.proto\x1a\x36nvidia_tao_tf1/cv/common/proto/visualizer_config.proto\"E\n\rEarlyStopping\x12\x0f\n\x07monitor\x18\x01 \x01(\t\x12\x11\n\tmin_delta\x18\x02 \x01(\x02\x12\x10\n\x08patience\x18\x03 \x01(\r\"\xa6\x04\n\x0eTrainingConfig\x12\x1a\n\x12\x62\x61tch_size_per_gpu\x18\x01 \x01(\r\x12\x12\n\nnum_epochs\x18\x02 \x01(\r\x12*\n\rlearning_rate\x18\x03 \x01(\x0b\x32\x13.LearningRateConfig\x12\'\n\x0bregularizer\x18\x04 \x01(\x0b\x32\x12.RegularizerConfig\x12#\n\toptimizer\x18\x05 \x01(\x0b\x32\x10.OptimizerConfig\x12(\n\x0c\x63ost_scaling\x18\x06 \x01(\x0b\x32\x12.CostScalingConfig\x12\x1b\n\x13\x63heckpoint_interval\x18\x07 \x01(\r\x12\x12\n\nenable_qat\x18\x08 \x01(\x08\x12\x1b\n\x11resume_model_path\x18\t \x01(\tH\x00\x12\x1d\n\x13pretrain_model_path\x18\n \x01(\tH\x00\x12\x1b\n\x11pruned_model_path\x18\x0b \x01(\tH\x00\x12\x16\n\x0emax_queue_size\x18\x0c \x01(\r\x12\x11\n\tn_workers\x18\r \x01(\r\x12\x1b\n\x13use_multiprocessing\x18\x0e \x01(\x08\x12&\n\x0e\x65\x61rly_stopping\x18\x0f \x01(\x0b\x32\x0e.EarlyStopping\x12%\n\nvisualizer\x18\x10 \x01(\x0b\x32\x11.VisualizerConfig\x12\x11\n\tmodel_ema\x18\x11 \x01(\x08\x42\x0c\n\nload_modelb\x06proto3')
,
dependencies=[nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_cost__scaling__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_learning__rate__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_optimizer__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_regularizer__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_visualizer__config__pb2.DESCRIPTOR,])
_EARLYSTOPPING = _descriptor.Descriptor(
name='EarlyStopping',
full_name='EarlyStopping',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='monitor', full_name='EarlyStopping.monitor', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_delta', full_name='EarlyStopping.min_delta', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='patience', full_name='EarlyStopping.patience', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=341,
serialized_end=410,
)
_TRAININGCONFIG = _descriptor.Descriptor(
name='TrainingConfig',
full_name='TrainingConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='batch_size_per_gpu', full_name='TrainingConfig.batch_size_per_gpu', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_epochs', full_name='TrainingConfig.num_epochs', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='learning_rate', full_name='TrainingConfig.learning_rate', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='regularizer', full_name='TrainingConfig.regularizer', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='optimizer', full_name='TrainingConfig.optimizer', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cost_scaling', full_name='TrainingConfig.cost_scaling', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='checkpoint_interval', full_name='TrainingConfig.checkpoint_interval', index=6,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='enable_qat', full_name='TrainingConfig.enable_qat', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='resume_model_path', full_name='TrainingConfig.resume_model_path', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pretrain_model_path', full_name='TrainingConfig.pretrain_model_path', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pruned_model_path', full_name='TrainingConfig.pruned_model_path', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_queue_size', full_name='TrainingConfig.max_queue_size', index=11,
number=12, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='n_workers', full_name='TrainingConfig.n_workers', index=12,
number=13, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='use_multiprocessing', full_name='TrainingConfig.use_multiprocessing', index=13,
number=14, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='early_stopping', full_name='TrainingConfig.early_stopping', index=14,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='visualizer', full_name='TrainingConfig.visualizer', index=15,
number=16, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='model_ema', full_name='TrainingConfig.model_ema', index=16,
number=17, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='load_model', full_name='TrainingConfig.load_model',
index=0, containing_type=None, fields=[]),
],
serialized_start=413,
serialized_end=963,
)
_TRAININGCONFIG.fields_by_name['learning_rate'].message_type = nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_learning__rate__config__pb2._LEARNINGRATECONFIG
_TRAININGCONFIG.fields_by_name['regularizer'].message_type = nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_regularizer__config__pb2._REGULARIZERCONFIG
_TRAININGCONFIG.fields_by_name['optimizer'].message_type = nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_optimizer__config__pb2._OPTIMIZERCONFIG
_TRAININGCONFIG.fields_by_name['cost_scaling'].message_type = nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_cost__scaling__config__pb2._COSTSCALINGCONFIG
_TRAININGCONFIG.fields_by_name['early_stopping'].message_type = _EARLYSTOPPING
_TRAININGCONFIG.fields_by_name['visualizer'].message_type = nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_visualizer__config__pb2._VISUALIZERCONFIG
_TRAININGCONFIG.oneofs_by_name['load_model'].fields.append(
_TRAININGCONFIG.fields_by_name['resume_model_path'])
_TRAININGCONFIG.fields_by_name['resume_model_path'].containing_oneof = _TRAININGCONFIG.oneofs_by_name['load_model']
_TRAININGCONFIG.oneofs_by_name['load_model'].fields.append(
_TRAININGCONFIG.fields_by_name['pretrain_model_path'])
_TRAININGCONFIG.fields_by_name['pretrain_model_path'].containing_oneof = _TRAININGCONFIG.oneofs_by_name['load_model']
_TRAININGCONFIG.oneofs_by_name['load_model'].fields.append(
_TRAININGCONFIG.fields_by_name['pruned_model_path'])
_TRAININGCONFIG.fields_by_name['pruned_model_path'].containing_oneof = _TRAININGCONFIG.oneofs_by_name['load_model']
DESCRIPTOR.message_types_by_name['EarlyStopping'] = _EARLYSTOPPING
DESCRIPTOR.message_types_by_name['TrainingConfig'] = _TRAININGCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
EarlyStopping = _reflection.GeneratedProtocolMessageType('EarlyStopping', (_message.Message,), dict(
DESCRIPTOR = _EARLYSTOPPING,
__module__ = 'nvidia_tao_tf1.cv.common.proto.training_config_pb2'
# @@protoc_insertion_point(class_scope:EarlyStopping)
))
_sym_db.RegisterMessage(EarlyStopping)
TrainingConfig = _reflection.GeneratedProtocolMessageType('TrainingConfig', (_message.Message,), dict(
DESCRIPTOR = _TRAININGCONFIG,
__module__ = 'nvidia_tao_tf1.cv.common.proto.training_config_pb2'
# @@protoc_insertion_point(class_scope:TrainingConfig)
))
_sym_db.RegisterMessage(TrainingConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/proto/training_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/common/proto/cost_scaling_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/common/proto/cost_scaling_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n8nvidia_tao_tf1/cv/common/proto/cost_scaling_config.proto\"d\n\x11\x43ostScalingConfig\x12\x0f\n\x07\x65nabled\x18\x01 \x01(\x08\x12\x18\n\x10initial_exponent\x18\x02 \x01(\x01\x12\x11\n\tincrement\x18\x03 \x01(\x01\x12\x11\n\tdecrement\x18\x04 \x01(\x01\x62\x06proto3')
)
_COSTSCALINGCONFIG = _descriptor.Descriptor(
name='CostScalingConfig',
full_name='CostScalingConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='enabled', full_name='CostScalingConfig.enabled', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='initial_exponent', full_name='CostScalingConfig.initial_exponent', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='increment', full_name='CostScalingConfig.increment', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='decrement', full_name='CostScalingConfig.decrement', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=60,
serialized_end=160,
)
DESCRIPTOR.message_types_by_name['CostScalingConfig'] = _COSTSCALINGCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CostScalingConfig = _reflection.GeneratedProtocolMessageType('CostScalingConfig', (_message.Message,), dict(
DESCRIPTOR = _COSTSCALINGCONFIG,
__module__ = 'nvidia_tao_tf1.cv.common.proto.cost_scaling_config_pb2'
# @@protoc_insertion_point(class_scope:CostScalingConfig)
))
_sym_db.RegisterMessage(CostScalingConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/proto/cost_scaling_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/common/proto/regularizer_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/common/proto/regularizer_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n7nvidia_tao_tf1/cv/common/proto/regularizer_config.proto\"\x8a\x01\n\x11RegularizerConfig\x12\x33\n\x04type\x18\x01 \x01(\x0e\x32%.RegularizerConfig.RegularizationType\x12\x0e\n\x06weight\x18\x02 \x01(\x02\"0\n\x12RegularizationType\x12\n\n\x06NO_REG\x10\x00\x12\x06\n\x02L1\x10\x01\x12\x06\n\x02L2\x10\x02\x62\x06proto3')
)
_REGULARIZERCONFIG_REGULARIZATIONTYPE = _descriptor.EnumDescriptor(
name='RegularizationType',
full_name='RegularizerConfig.RegularizationType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='NO_REG', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='L1', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='L2', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=150,
serialized_end=198,
)
_sym_db.RegisterEnumDescriptor(_REGULARIZERCONFIG_REGULARIZATIONTYPE)
_REGULARIZERCONFIG = _descriptor.Descriptor(
name='RegularizerConfig',
full_name='RegularizerConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='RegularizerConfig.type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='weight', full_name='RegularizerConfig.weight', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_REGULARIZERCONFIG_REGULARIZATIONTYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=60,
serialized_end=198,
)
_REGULARIZERCONFIG.fields_by_name['type'].enum_type = _REGULARIZERCONFIG_REGULARIZATIONTYPE
_REGULARIZERCONFIG_REGULARIZATIONTYPE.containing_type = _REGULARIZERCONFIG
DESCRIPTOR.message_types_by_name['RegularizerConfig'] = _REGULARIZERCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
RegularizerConfig = _reflection.GeneratedProtocolMessageType('RegularizerConfig', (_message.Message,), dict(
DESCRIPTOR = _REGULARIZERCONFIG,
__module__ = 'nvidia_tao_tf1.cv.common.proto.regularizer_config_pb2'
# @@protoc_insertion_point(class_scope:RegularizerConfig)
))
_sym_db.RegisterMessage(RegularizerConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/proto/regularizer_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/common/proto/detection_sequence_dataset_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/common/proto/detection_sequence_dataset_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\nFnvidia_tao_tf1/cv/common/proto/detection_sequence_dataset_config.proto\"s\n\nDataSource\x12\x1c\n\x14label_directory_path\x18\x01 \x01(\t\x12\x1c\n\x14image_directory_path\x18\x02 \x01(\t\x12\x11\n\troot_path\x18\x03 \x01(\t\x12\x16\n\x0etfrecords_path\x18\x04 \x01(\t\"\x96\x02\n\rDatasetConfig\x12!\n\x0c\x64\x61ta_sources\x18\x01 \x03(\x0b\x32\x0b.DataSource\x12\x44\n\x14target_class_mapping\x18\x02 \x03(\x0b\x32&.DatasetConfig.TargetClassMappingEntry\x12,\n\x17validation_data_sources\x18\x03 \x03(\x0b\x32\x0b.DataSource\x12%\n\x1dinclude_difficult_in_training\x18\x04 \x01(\x08\x12\x0c\n\x04type\x18\x05 \x01(\t\x1a\x39\n\x17TargetClassMappingEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x62\x06proto3')
)
_DATASOURCE = _descriptor.Descriptor(
name='DataSource',
full_name='DataSource',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='label_directory_path', full_name='DataSource.label_directory_path', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image_directory_path', full_name='DataSource.image_directory_path', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='root_path', full_name='DataSource.root_path', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tfrecords_path', full_name='DataSource.tfrecords_path', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=74,
serialized_end=189,
)
_DATASETCONFIG_TARGETCLASSMAPPINGENTRY = _descriptor.Descriptor(
name='TargetClassMappingEntry',
full_name='DatasetConfig.TargetClassMappingEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='DatasetConfig.TargetClassMappingEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='DatasetConfig.TargetClassMappingEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=413,
serialized_end=470,
)
_DATASETCONFIG = _descriptor.Descriptor(
name='DatasetConfig',
full_name='DatasetConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='data_sources', full_name='DatasetConfig.data_sources', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='target_class_mapping', full_name='DatasetConfig.target_class_mapping', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='validation_data_sources', full_name='DatasetConfig.validation_data_sources', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='include_difficult_in_training', full_name='DatasetConfig.include_difficult_in_training', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='DatasetConfig.type', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_DATASETCONFIG_TARGETCLASSMAPPINGENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=192,
serialized_end=470,
)
_DATASETCONFIG_TARGETCLASSMAPPINGENTRY.containing_type = _DATASETCONFIG
_DATASETCONFIG.fields_by_name['data_sources'].message_type = _DATASOURCE
_DATASETCONFIG.fields_by_name['target_class_mapping'].message_type = _DATASETCONFIG_TARGETCLASSMAPPINGENTRY
_DATASETCONFIG.fields_by_name['validation_data_sources'].message_type = _DATASOURCE
DESCRIPTOR.message_types_by_name['DataSource'] = _DATASOURCE
DESCRIPTOR.message_types_by_name['DatasetConfig'] = _DATASETCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DataSource = _reflection.GeneratedProtocolMessageType('DataSource', (_message.Message,), dict(
DESCRIPTOR = _DATASOURCE,
__module__ = 'nvidia_tao_tf1.cv.common.proto.detection_sequence_dataset_config_pb2'
# @@protoc_insertion_point(class_scope:DataSource)
))
_sym_db.RegisterMessage(DataSource)
DatasetConfig = _reflection.GeneratedProtocolMessageType('DatasetConfig', (_message.Message,), dict(
TargetClassMappingEntry = _reflection.GeneratedProtocolMessageType('TargetClassMappingEntry', (_message.Message,), dict(
DESCRIPTOR = _DATASETCONFIG_TARGETCLASSMAPPINGENTRY,
__module__ = 'nvidia_tao_tf1.cv.common.proto.detection_sequence_dataset_config_pb2'
# @@protoc_insertion_point(class_scope:DatasetConfig.TargetClassMappingEntry)
))
,
DESCRIPTOR = _DATASETCONFIG,
__module__ = 'nvidia_tao_tf1.cv.common.proto.detection_sequence_dataset_config_pb2'
# @@protoc_insertion_point(class_scope:DatasetConfig)
))
_sym_db.RegisterMessage(DatasetConfig)
_sym_db.RegisterMessage(DatasetConfig.TargetClassMappingEntry)
_DATASETCONFIG_TARGETCLASSMAPPINGENTRY._options = None
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/proto/detection_sequence_dataset_config_pb2.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/proto/__init__.py |
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/common/proto/clearml_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/common/proto/clearml_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n3nvidia_tao_tf1/cv/common/proto/clearml_config.proto\"\x8b\x01\n\rClearMLConfig\x12\x0f\n\x07project\x18\x01 \x01(\t\x12\x0c\n\x04task\x18\x02 \x01(\t\x12\x0c\n\x04tags\x18\x03 \x03(\t\x12\x1a\n\x12reuse_last_task_id\x18\x04 \x01(\x08\x12\x1a\n\x12\x63ontinue_last_task\x18\x05 \x01(\x08\x12\x15\n\rdeferred_init\x18\x06 \x01(\x08\x62\x06proto3')
)
_CLEARMLCONFIG = _descriptor.Descriptor(
name='ClearMLConfig',
full_name='ClearMLConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project', full_name='ClearMLConfig.project', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='task', full_name='ClearMLConfig.task', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tags', full_name='ClearMLConfig.tags', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reuse_last_task_id', full_name='ClearMLConfig.reuse_last_task_id', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='continue_last_task', full_name='ClearMLConfig.continue_last_task', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='deferred_init', full_name='ClearMLConfig.deferred_init', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=56,
serialized_end=195,
)
DESCRIPTOR.message_types_by_name['ClearMLConfig'] = _CLEARMLCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ClearMLConfig = _reflection.GeneratedProtocolMessageType('ClearMLConfig', (_message.Message,), dict(
DESCRIPTOR = _CLEARMLCONFIG,
__module__ = 'nvidia_tao_tf1.cv.common.proto.clearml_config_pb2'
# @@protoc_insertion_point(class_scope:ClearMLConfig)
))
_sym_db.RegisterMessage(ClearMLConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/proto/clearml_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/common/proto/rmsprop_optimizer_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/common/proto/rmsprop_optimizer_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n=nvidia_tao_tf1/cv/common/proto/rmsprop_optimizer_config.proto\"Z\n\x16RMSpropOptimizerConfig\x12\x0b\n\x03rho\x18\x01 \x01(\x02\x12\x10\n\x08momentum\x18\x02 \x01(\x02\x12\x0f\n\x07\x65psilon\x18\x03 \x01(\x02\x12\x10\n\x08\x63\x65ntered\x18\x04 \x01(\x08\x62\x06proto3')
)
_RMSPROPOPTIMIZERCONFIG = _descriptor.Descriptor(
name='RMSpropOptimizerConfig',
full_name='RMSpropOptimizerConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='rho', full_name='RMSpropOptimizerConfig.rho', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='momentum', full_name='RMSpropOptimizerConfig.momentum', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='epsilon', full_name='RMSpropOptimizerConfig.epsilon', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='centered', full_name='RMSpropOptimizerConfig.centered', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=65,
serialized_end=155,
)
DESCRIPTOR.message_types_by_name['RMSpropOptimizerConfig'] = _RMSPROPOPTIMIZERCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
RMSpropOptimizerConfig = _reflection.GeneratedProtocolMessageType('RMSpropOptimizerConfig', (_message.Message,), dict(
DESCRIPTOR = _RMSPROPOPTIMIZERCONFIG,
__module__ = 'nvidia_tao_tf1.cv.common.proto.rmsprop_optimizer_config_pb2'
# @@protoc_insertion_point(class_scope:RMSpropOptimizerConfig)
))
_sym_db.RegisterMessage(RMSpropOptimizerConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/proto/rmsprop_optimizer_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/common/proto/visualizer_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_tao_tf1.cv.common.proto import wandb_config_pb2 as nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_wandb__config__pb2
from nvidia_tao_tf1.cv.common.proto import clearml_config_pb2 as nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_clearml__config__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/common/proto/visualizer_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n6nvidia_tao_tf1/cv/common/proto/visualizer_config.proto\x1a\x31nvidia_tao_tf1/cv/common/proto/wandb_config.proto\x1a\x33nvidia_tao_tf1/cv/common/proto/clearml_config.proto\"\x9e\x01\n\x10VisualizerConfig\x12\x0f\n\x07\x65nabled\x18\x01 \x01(\x08\x12\x12\n\nnum_images\x18\x02 \x01(\r\x12\x19\n\x11weight_histograms\x18\x03 \x01(\x08\x12\"\n\x0cwandb_config\x18\x04 \x01(\x0b\x32\x0c.WandBConfig\x12&\n\x0e\x63learml_config\x18\x05 \x01(\x0b\x32\x0e.ClearMLConfigb\x06proto3')
,
dependencies=[nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_wandb__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_clearml__config__pb2.DESCRIPTOR,])
_VISUALIZERCONFIG = _descriptor.Descriptor(
name='VisualizerConfig',
full_name='VisualizerConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='enabled', full_name='VisualizerConfig.enabled', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_images', full_name='VisualizerConfig.num_images', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='weight_histograms', full_name='VisualizerConfig.weight_histograms', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='wandb_config', full_name='VisualizerConfig.wandb_config', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='clearml_config', full_name='VisualizerConfig.clearml_config', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=163,
serialized_end=321,
)
_VISUALIZERCONFIG.fields_by_name['wandb_config'].message_type = nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_wandb__config__pb2._WANDBCONFIG
_VISUALIZERCONFIG.fields_by_name['clearml_config'].message_type = nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_clearml__config__pb2._CLEARMLCONFIG
DESCRIPTOR.message_types_by_name['VisualizerConfig'] = _VISUALIZERCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
VisualizerConfig = _reflection.GeneratedProtocolMessageType('VisualizerConfig', (_message.Message,), dict(
DESCRIPTOR = _VISUALIZERCONFIG,
__module__ = 'nvidia_tao_tf1.cv.common.proto.visualizer_config_pb2'
# @@protoc_insertion_point(class_scope:VisualizerConfig)
))
_sym_db.RegisterMessage(VisualizerConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/proto/visualizer_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/common/proto/learning_rate_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_tao_tf1.cv.common.proto import soft_start_annealing_schedule_config_pb2 as nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_soft__start__annealing__schedule__config__pb2
from nvidia_tao_tf1.cv.common.proto import soft_start_cosine_annealing_schedule_config_pb2 as nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_soft__start__cosine__annealing__schedule__config__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/common/proto/learning_rate_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n9nvidia_tao_tf1/cv/common/proto/learning_rate_config.proto\x1aInvidia_tao_tf1/cv/common/proto/soft_start_annealing_schedule_config.proto\x1aPnvidia_tao_tf1/cv/common/proto/soft_start_cosine_annealing_schedule_config.proto\"\xca\x01\n\x12LearningRateConfig\x12J\n\x1dsoft_start_annealing_schedule\x18\x01 \x01(\x0b\x32!.SoftStartAnnealingScheduleConfigH\x00\x12W\n$soft_start_cosine_annealing_schedule\x18\x02 \x01(\x0b\x32\'.SoftStartCosineAnnealingScheduleConfigH\x00\x42\x0f\n\rlearning_rateb\x06proto3')
,
dependencies=[nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_soft__start__annealing__schedule__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_soft__start__cosine__annealing__schedule__config__pb2.DESCRIPTOR,])
_LEARNINGRATECONFIG = _descriptor.Descriptor(
name='LearningRateConfig',
full_name='LearningRateConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='soft_start_annealing_schedule', full_name='LearningRateConfig.soft_start_annealing_schedule', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='soft_start_cosine_annealing_schedule', full_name='LearningRateConfig.soft_start_cosine_annealing_schedule', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='learning_rate', full_name='LearningRateConfig.learning_rate',
index=0, containing_type=None, fields=[]),
],
serialized_start=219,
serialized_end=421,
)
_LEARNINGRATECONFIG.fields_by_name['soft_start_annealing_schedule'].message_type = nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_soft__start__annealing__schedule__config__pb2._SOFTSTARTANNEALINGSCHEDULECONFIG
_LEARNINGRATECONFIG.fields_by_name['soft_start_cosine_annealing_schedule'].message_type = nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_soft__start__cosine__annealing__schedule__config__pb2._SOFTSTARTCOSINEANNEALINGSCHEDULECONFIG
_LEARNINGRATECONFIG.oneofs_by_name['learning_rate'].fields.append(
_LEARNINGRATECONFIG.fields_by_name['soft_start_annealing_schedule'])
_LEARNINGRATECONFIG.fields_by_name['soft_start_annealing_schedule'].containing_oneof = _LEARNINGRATECONFIG.oneofs_by_name['learning_rate']
_LEARNINGRATECONFIG.oneofs_by_name['learning_rate'].fields.append(
_LEARNINGRATECONFIG.fields_by_name['soft_start_cosine_annealing_schedule'])
_LEARNINGRATECONFIG.fields_by_name['soft_start_cosine_annealing_schedule'].containing_oneof = _LEARNINGRATECONFIG.oneofs_by_name['learning_rate']
DESCRIPTOR.message_types_by_name['LearningRateConfig'] = _LEARNINGRATECONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
LearningRateConfig = _reflection.GeneratedProtocolMessageType('LearningRateConfig', (_message.Message,), dict(
DESCRIPTOR = _LEARNINGRATECONFIG,
__module__ = 'nvidia_tao_tf1.cv.common.proto.learning_rate_config_pb2'
# @@protoc_insertion_point(class_scope:LearningRateConfig)
))
_sym_db.RegisterMessage(LearningRateConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/proto/learning_rate_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/common/proto/soft_start_annealing_schedule_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/common/proto/soft_start_annealing_schedule_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\nInvidia_tao_tf1/cv/common/proto/soft_start_annealing_schedule_config.proto\"\x7f\n SoftStartAnnealingScheduleConfig\x12\x19\n\x11min_learning_rate\x18\x01 \x01(\x02\x12\x19\n\x11max_learning_rate\x18\x02 \x01(\x02\x12\x12\n\nsoft_start\x18\x03 \x01(\x02\x12\x11\n\tannealing\x18\x04 \x01(\x02\x62\x06proto3')
)
_SOFTSTARTANNEALINGSCHEDULECONFIG = _descriptor.Descriptor(
name='SoftStartAnnealingScheduleConfig',
full_name='SoftStartAnnealingScheduleConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='min_learning_rate', full_name='SoftStartAnnealingScheduleConfig.min_learning_rate', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_learning_rate', full_name='SoftStartAnnealingScheduleConfig.max_learning_rate', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='soft_start', full_name='SoftStartAnnealingScheduleConfig.soft_start', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='annealing', full_name='SoftStartAnnealingScheduleConfig.annealing', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=77,
serialized_end=204,
)
DESCRIPTOR.message_types_by_name['SoftStartAnnealingScheduleConfig'] = _SOFTSTARTANNEALINGSCHEDULECONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SoftStartAnnealingScheduleConfig = _reflection.GeneratedProtocolMessageType('SoftStartAnnealingScheduleConfig', (_message.Message,), dict(
DESCRIPTOR = _SOFTSTARTANNEALINGSCHEDULECONFIG,
__module__ = 'nvidia_tao_tf1.cv.common.proto.soft_start_annealing_schedule_config_pb2'
# @@protoc_insertion_point(class_scope:SoftStartAnnealingScheduleConfig)
))
_sym_db.RegisterMessage(SoftStartAnnealingScheduleConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/proto/soft_start_annealing_schedule_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/common/proto/soft_start_cosine_annealing_schedule_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/common/proto/soft_start_cosine_annealing_schedule_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\nPnvidia_tao_tf1/cv/common/proto/soft_start_cosine_annealing_schedule_config.proto\"r\n&SoftStartCosineAnnealingScheduleConfig\x12\x19\n\x11max_learning_rate\x18\x01 \x01(\x02\x12\x12\n\nsoft_start\x18\x02 \x01(\x02\x12\x19\n\x11min_learning_rate\x18\x03 \x01(\x02\x62\x06proto3')
)
_SOFTSTARTCOSINEANNEALINGSCHEDULECONFIG = _descriptor.Descriptor(
name='SoftStartCosineAnnealingScheduleConfig',
full_name='SoftStartCosineAnnealingScheduleConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='max_learning_rate', full_name='SoftStartCosineAnnealingScheduleConfig.max_learning_rate', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='soft_start', full_name='SoftStartCosineAnnealingScheduleConfig.soft_start', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_learning_rate', full_name='SoftStartCosineAnnealingScheduleConfig.min_learning_rate', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=84,
serialized_end=198,
)
DESCRIPTOR.message_types_by_name['SoftStartCosineAnnealingScheduleConfig'] = _SOFTSTARTCOSINEANNEALINGSCHEDULECONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SoftStartCosineAnnealingScheduleConfig = _reflection.GeneratedProtocolMessageType('SoftStartCosineAnnealingScheduleConfig', (_message.Message,), dict(
DESCRIPTOR = _SOFTSTARTCOSINEANNEALINGSCHEDULECONFIG,
__module__ = 'nvidia_tao_tf1.cv.common.proto.soft_start_cosine_annealing_schedule_config_pb2'
# @@protoc_insertion_point(class_scope:SoftStartCosineAnnealingScheduleConfig)
))
_sym_db.RegisterMessage(SoftStartCosineAnnealingScheduleConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/proto/soft_start_cosine_annealing_schedule_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/common/proto/nms_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/common/proto/nms_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n/nvidia_tao_tf1/cv/common/proto/nms_config.proto\"\x8e\x01\n\tNMSConfig\x12\x1c\n\x14\x63onfidence_threshold\x18\x01 \x01(\x02\x12 \n\x18\x63lustering_iou_threshold\x18\x02 \x01(\x02\x12\r\n\x05top_k\x18\x03 \x01(\r\x12\x1c\n\x14infer_nms_score_bits\x18\x04 \x01(\r\x12\x14\n\x0c\x66orce_on_cpu\x18\x05 \x01(\x08\x62\x06proto3')
)
_NMSCONFIG = _descriptor.Descriptor(
name='NMSConfig',
full_name='NMSConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='confidence_threshold', full_name='NMSConfig.confidence_threshold', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='clustering_iou_threshold', full_name='NMSConfig.clustering_iou_threshold', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='top_k', full_name='NMSConfig.top_k', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='infer_nms_score_bits', full_name='NMSConfig.infer_nms_score_bits', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='force_on_cpu', full_name='NMSConfig.force_on_cpu', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=52,
serialized_end=194,
)
DESCRIPTOR.message_types_by_name['NMSConfig'] = _NMSCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
NMSConfig = _reflection.GeneratedProtocolMessageType('NMSConfig', (_message.Message,), dict(
DESCRIPTOR = _NMSCONFIG,
__module__ = 'nvidia_tao_tf1.cv.common.proto.nms_config_pb2'
# @@protoc_insertion_point(class_scope:NMSConfig)
))
_sym_db.RegisterMessage(NMSConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/proto/nms_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/common/proto/class_weighting_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/common/proto/class_weighting_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n;nvidia_tao_tf1/cv/common/proto/class_weighting_config.proto\"\xa6\x01\n\x14\x43lassWeightingConfig\x12\x42\n\x0f\x63lass_weighting\x18\x01 \x03(\x0b\x32).ClassWeightingConfig.ClassWeightingEntry\x12\x13\n\x0b\x65nable_auto\x18\x02 \x01(\x08\x1a\x35\n\x13\x43lassWeightingEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x02:\x02\x38\x01\x62\x06proto3')
)
_CLASSWEIGHTINGCONFIG_CLASSWEIGHTINGENTRY = _descriptor.Descriptor(
name='ClassWeightingEntry',
full_name='ClassWeightingConfig.ClassWeightingEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='ClassWeightingConfig.ClassWeightingEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='ClassWeightingConfig.ClassWeightingEntry.value', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=177,
serialized_end=230,
)
_CLASSWEIGHTINGCONFIG = _descriptor.Descriptor(
name='ClassWeightingConfig',
full_name='ClassWeightingConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='class_weighting', full_name='ClassWeightingConfig.class_weighting', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='enable_auto', full_name='ClassWeightingConfig.enable_auto', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_CLASSWEIGHTINGCONFIG_CLASSWEIGHTINGENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=64,
serialized_end=230,
)
_CLASSWEIGHTINGCONFIG_CLASSWEIGHTINGENTRY.containing_type = _CLASSWEIGHTINGCONFIG
_CLASSWEIGHTINGCONFIG.fields_by_name['class_weighting'].message_type = _CLASSWEIGHTINGCONFIG_CLASSWEIGHTINGENTRY
DESCRIPTOR.message_types_by_name['ClassWeightingConfig'] = _CLASSWEIGHTINGCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ClassWeightingConfig = _reflection.GeneratedProtocolMessageType('ClassWeightingConfig', (_message.Message,), dict(
ClassWeightingEntry = _reflection.GeneratedProtocolMessageType('ClassWeightingEntry', (_message.Message,), dict(
DESCRIPTOR = _CLASSWEIGHTINGCONFIG_CLASSWEIGHTINGENTRY,
__module__ = 'nvidia_tao_tf1.cv.common.proto.class_weighting_config_pb2'
# @@protoc_insertion_point(class_scope:ClassWeightingConfig.ClassWeightingEntry)
))
,
DESCRIPTOR = _CLASSWEIGHTINGCONFIG,
__module__ = 'nvidia_tao_tf1.cv.common.proto.class_weighting_config_pb2'
# @@protoc_insertion_point(class_scope:ClassWeightingConfig)
))
_sym_db.RegisterMessage(ClassWeightingConfig)
_sym_db.RegisterMessage(ClassWeightingConfig.ClassWeightingEntry)
_CLASSWEIGHTINGCONFIG_CLASSWEIGHTINGENTRY._options = None
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/proto/class_weighting_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/common/proto/eval_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/common/proto/eval_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n0nvidia_tao_tf1/cv/common/proto/eval_config.proto\"\xb7\x01\n\nEvalConfig\x12\x33\n\x16\x61verage_precision_mode\x18\x01 \x01(\x0e\x32\x13.EvalConfig.AP_MODE\x12\x12\n\nbatch_size\x18\x02 \x01(\r\x12\x1e\n\x16matching_iou_threshold\x18\x03 \x01(\x02\x12\x1a\n\x12visualize_pr_curve\x18\x04 \x01(\x08\"$\n\x07\x41P_MODE\x12\n\n\x06SAMPLE\x10\x00\x12\r\n\tINTEGRATE\x10\x01\x62\x06proto3')
)
_EVALCONFIG_AP_MODE = _descriptor.EnumDescriptor(
name='AP_MODE',
full_name='EvalConfig.AP_MODE',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='SAMPLE', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INTEGRATE', index=1, number=1,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=200,
serialized_end=236,
)
_sym_db.RegisterEnumDescriptor(_EVALCONFIG_AP_MODE)
_EVALCONFIG = _descriptor.Descriptor(
name='EvalConfig',
full_name='EvalConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='average_precision_mode', full_name='EvalConfig.average_precision_mode', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batch_size', full_name='EvalConfig.batch_size', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='matching_iou_threshold', full_name='EvalConfig.matching_iou_threshold', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='visualize_pr_curve', full_name='EvalConfig.visualize_pr_curve', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_EVALCONFIG_AP_MODE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=53,
serialized_end=236,
)
_EVALCONFIG.fields_by_name['average_precision_mode'].enum_type = _EVALCONFIG_AP_MODE
_EVALCONFIG_AP_MODE.containing_type = _EVALCONFIG
DESCRIPTOR.message_types_by_name['EvalConfig'] = _EVALCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
EvalConfig = _reflection.GeneratedProtocolMessageType('EvalConfig', (_message.Message,), dict(
DESCRIPTOR = _EVALCONFIG,
__module__ = 'nvidia_tao_tf1.cv.common.proto.eval_config_pb2'
# @@protoc_insertion_point(class_scope:EvalConfig)
))
_sym_db.RegisterMessage(EvalConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/proto/eval_config_pb2.py |
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
"""Saver callback."""
import os
from keras.callbacks import Callback
def _save_model(keras_model, model_path, key, save_format=None):
"""Save a model to either .h5 or .hdf5 format."""
_, ext = os.path.splitext(model_path)
if (save_format is not None) and (save_format != ext):
# recursive call to save a correct model
return _save_model(keras_model, model_path + save_format, key, None)
if ext == '.h5':
keras_model.save_weights(model_path)
elif ext == '.hdf5':
# include optimiizer for training resume
keras_model.save(model_path, overwrite=True, include_optimizer=True)
else:
raise NotImplementedError("{0} file is not supported for saving. Please use .hdf5!".format(ext))
return model_path
class KerasModelSaver(Callback):
"""Save the encrypted model after every epoch.
Attributes:
filepath: formated string for saving models. E.g.: 'ssd_resnet18_epoch_{epoch:03d}.tlt'
key: API key to encrypt the model.
save_period: save model every k epoch. If save_period = 10, saver will save 10-th, 20th etc.
epoch models
verbose: Whether to print out save message.
"""
def __init__(self,
filepath,
key,
save_period,
last_epoch=None,
verbose=1):
"""Initialization with encryption key."""
self.filepath = filepath
self._ENC_KEY = str.encode(key)
self.verbose = verbose
self.save_period = int(save_period)
self.last_epoch = last_epoch
self.ema = None
assert self.save_period > 0, "save_period must be a positive integer!"
def _save_model(self, save_epoch):
fname = self.filepath.format(epoch=save_epoch)
if self.ema:
orig_weights = self.model.get_weights()
self.model.set_weights(self.ema)
fname = _save_model(self.model, fname, self._ENC_KEY, '.hdf5')
self.model.set_weights(orig_weights)
else:
fname = _save_model(self.model, fname, self._ENC_KEY, '.hdf5')
if self.verbose > 0:
print('\nEpoch %05d: saving model to %s' % (save_epoch, fname))
def on_epoch_end(self, epoch, logs=None):
"""Called at the end of an epoch."""
if (epoch + 1) % self.save_period == 0 or self.last_epoch == (epoch + 1):
self._save_model(epoch + 1)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/callbacks/enc_model_saver_callback.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Status Logger callback."""
from collections import Iterable
from datetime import timedelta
import os
import time
from keras.callbacks import Callback
import numpy as np
import six
from nvidia_tao_tf1.cv.common.logging.logging import (
get_status_logger,
Status,
StatusLogger,
Verbosity
)
# Get default status logger() if it's been previously defined.
logger = get_status_logger()
KEY_MAP = {
"val_loss": "validation_loss",
"val_acc": "validation_accuracy",
"loss": "loss",
"acc": "training_accuracy",
"lr": "learning_rate",
"mAP": "mean average precision"
}
class TAOStatusLogger(Callback):
"""Callback that streams the data training data to a status.json file.
Supports all values that can be represented as a string,
including 1D iterables such as np.ndarray.
# Example
```python
logger = TAOStatusLogger('/path/to/results_dir')
model.fit(X_train, Y_train, callbacks=[logger])
```
# Arguments
results_dir (str): The directory where the logs will be saved.
num_epochs (int): Number of epochs to run the training
verbosity (status_logger.verbosity.Verbosity()): Verbosity level.
is_master (bool): Boolean value to check if the gpu rank is 0.
append: True: append if file exists (useful for continuing
training). False: overwrite existing file,
"""
def __init__(self, results_dir, num_epochs=120,
verbosity=Verbosity.INFO,
append=False,
is_master=False):
"""Instantiate the TAOStatusLogger."""
# Make sure that the status logger obtained is always
# an instance of nvidia_tao_tf1.cv.common.logging.logging.StatusLogger.
# Otherwise, this data get's rendered in stdout.
if isinstance(logger, StatusLogger):
self.logger = logger
else:
self.logger = StatusLogger(
filename=os.path.join(results_dir, "status.json"),
is_master=is_master,
verbosity=verbosity,
append=append
)
self.keys = None
self.max_epochs = num_epochs
self._epoch_start_time = None
super(TAOStatusLogger, self).__init__()
def on_train_begin(self, logs=None):
"""Write data beginning of the training."""
self.logger.write(
status_level=Status.STARTED,
message="Starting Training Loop."
)
@staticmethod
def _handle_value(k):
is_zero_dim_ndarray = isinstance(k, np.ndarray) and k.ndim == 0
if isinstance(k, six.string_types):
return k
if isinstance(k, Iterable) and not is_zero_dim_ndarray:
return '"[%s]"' % (', '.join(map(str, k)))
return k
def on_epoch_begin(self, epoch, logs=None):
"""Routines to be run at the beginning of the epoch."""
self._epoch_start_time = time.time()
def on_epoch_end(self, epoch, logs=None):
"""Collect data at the end of an epoch."""
logs = logs or {}
data = {}
data["epoch"] = epoch + 1
data["max_epoch"] = self.max_epochs
epoch_end_time = time.time()
time_per_epoch = epoch_end_time - self._epoch_start_time
eta = (self.max_epochs - (epoch + 1)) * time_per_epoch
data["time_per_epoch"] = str(timedelta(seconds=time_per_epoch))
data["eta"] = str(timedelta(seconds=eta))
graphical_data = {}
kpi_data = {}
for k, v in logs.items():
if "loss" in k:
key = KEY_MAP[k] if k in KEY_MAP.keys() else k
graphical_data[key] = str(self._handle_value(v))
if "acc" in k:
key = KEY_MAP[k] if k in KEY_MAP.keys() else k
graphical_data[key] = str(self._handle_value(v))
kpi_data[key] = str(self._handle_value(v))
if k == "mAP":
key = KEY_MAP[k] if k in KEY_MAP.keys() else k
graphical_data[key] = str(self._handle_value(v))
kpi_data[key] = str(self._handle_value(v))
if k == "lr":
graphical_data[KEY_MAP[k]] = str(self._handle_value(v))
self.logger.graphical = graphical_data
self.logger.kpi = kpi_data
self.logger.write(data=data, message="Training loop in progress")
def on_train_end(self, logs=None):
"""Callback function run at the end of training."""
self.logger.write(
status_level=Status.RUNNING,
message="Training loop complete."
)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/callbacks/loggers.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.