python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO custom callbacks for keras training pipeline."""
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/callbacks/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ModelEMA."""
import math
from keras.callbacks import Callback
class ModelEMACallback(Callback):
"""Model Exponential Moving Average for keras."""
def __init__(self, model, decay=0.999, init_step=0):
"""Init."""
self.ema = model.get_weights()
self.decay = lambda x: decay * (1 - math.exp(-float(x) / 2000))
self.updates = init_step
def on_batch_end(self, batch, logs=None):
"""On batch end call."""
self.updates += 1
d = self.decay(self.updates)
new_weights = self.model.get_weights()
for w1, w2 in zip(self.ema, new_weights):
w1 *= d
w1 += (1.0 - d) * w2
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/callbacks/model_ema_callback.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base metric callback."""
from abc import ABC, abstractmethod
from keras import backend as K
from keras.callbacks import Callback
class BaseMetricCallback(ABC, Callback):
'''
Callback function to calculate model metric per k epoch.
To be implemented in child classes:
_calc_metric(self, logs): calculate metric and stores to log
_skip_metric(self, logs): write np.nan (or other values) for metrics to log
'''
def __init__(self, eval_model, metric_interval, last_epoch=None, verbose=1):
'''init function.'''
metric_interval = int(metric_interval)
self.metric_interval = metric_interval if metric_interval > 0 else 1
self.eval_model = eval_model
self.verbose = verbose
self.last_epoch = last_epoch
self.ema = None
@abstractmethod
def _calc_metric(self, logs):
raise NotImplementedError("Method not implemented!")
@abstractmethod
def _skip_metric(self, logs):
raise NotImplementedError("Method not implemented!")
def _get_metric(self, logs):
K.set_learning_phase(0)
# First copy weights from training model
if self.ema:
self.eval_model.set_weights(self.ema)
else:
self.eval_model.set_weights(self.model.get_weights())
self._calc_metric(logs)
K.set_learning_phase(1)
def on_epoch_end(self, epoch, logs):
'''evaluates on epoch end.'''
if (epoch + 1) % self.metric_interval != 0 and (epoch + 1) != self.last_epoch:
self._skip_metric(logs)
else:
self._get_metric(logs)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/callbacks/base_metric_callback.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Automatic class weighting callback."""
from keras import backend as K
from keras.callbacks import Callback
import numpy as np
import tensorflow as tf
class AutoClassWeighting(Callback):
"""
Automatically update the classes weighting for anchor-based detection algorithms.
Currently, only merged with YOLOV4.
Detailed demonstration could be found at:
https://confluence.nvidia.com/display/~tylerz/Auto+class+weighting+-
-+trainning+anchor-based+object+detection+model+on+unbalanced+dataset
"""
def __init__(self, train_dataset, loss_ops, alpha=0.9, interval=10):
"""Init function."""
super(AutoClassWeighting, self).__init__()
self.train_dataset = train_dataset
self.classes = train_dataset.classes
self.loss_ops = loss_ops
self.alpha = alpha
self.pred = tf.Variable(0., validate_shape=False,
collections=[tf.GraphKeys.LOCAL_VARIABLES])
self.label = tf.Variable(0., validate_shape=False,
collections=[tf.GraphKeys.LOCAL_VARIABLES])
self.loss_per_class = np.zeros((len(self.classes)), dtype=np.float32)
self.anchor_per_class = np.zeros((len(self.classes)), dtype=np.float32)
self.interval = interval
def on_batch_end(self, batch, logs=None):
"""Compute per anchor loss in every batch."""
# compute the per class loc_loss cls_loss (average by num of assigned anchors)
# input_data
pred = K.get_value(self.pred)
encoded_lab = K.get_value(self.label)
# y_pred_encoded = self.model.predict(batch_x)
batch_loss = K.get_session().run(self.loss_ops[2],
feed_dict={self.loss_ops[0]: encoded_lab,
self.loss_ops[1]: pred})
# loc_loss: [#batch, #anchor]; cls_loss: [#batch, #anchor]
loc_loss, cls_loss = batch_loss
# convert the one-hot vector to index
idx_map = np.tile(np.arange(len(self.classes)),
[loc_loss.shape[0], loc_loss.shape[1], 1])
one_hot_vectors = encoded_lab[:, :, 6:-1]
neg_map = np.full(one_hot_vectors.shape, -1)
cls_idx = np.max(np.where(one_hot_vectors == 1, idx_map, neg_map), axis=-1)
# compute the loss per class
for idx in range(len(self.classes)):
cur_loc_loss = float(0)
cur_cls_loss = float(0)
cur_loc_loss = loc_loss[cls_idx == idx]
if len(cur_loc_loss) <= 0:
continue
num_anchor = cur_loc_loss.shape[0]
cur_loc_loss = np.sum(cur_loc_loss)
cur_cls_loss = np.sum(cls_loss[cls_idx == idx])
self.loss_per_class[idx] += cur_loc_loss + cur_cls_loss
self.anchor_per_class[idx] += num_anchor
def on_epoch_end(self, epoch, logs=None):
"""Compute per anchor per class loss and classes weighting at the end of epoch."""
# compute the per class weights (reciprocal of each loss * maximum loss)
old_weights = np.array(self.train_dataset.encode_fn.class_weights)
# print(f"old_weights: {old_weights}")
self.loss_per_class = self.loss_per_class / old_weights
self.loss_per_class = self.loss_per_class / self.anchor_per_class
# max_loss = np.max(self.loss_per_class)
min_loss = np.min(self.loss_per_class)
new_wts = []
for idx in range(len(self.classes)):
new_w = float(self.loss_per_class[idx]) / min_loss
# print(f"{self.classes[idx]}:{self.loss_per_class[idx]}")
# print(f"{self.classes[idx]}_anchor:{self.anchor_per_class[idx]}")
# logs[f"{self.classes[idx]}_loss"] = self.loss_per_class[idx]
# logs[f"{self.classes[idx]}_anchor"] = self.anchor_per_class[idx]
new_wts.append(new_w)
# Make the first epoch count !
if epoch != 0:
new_wts = old_weights * self.alpha + np.array(new_wts) * (1 - self.alpha)
# print(f"new_weights: {new_wts}")
if epoch % self.interval == 0:
self.train_dataset.encode_fn.update_class_weights(new_wts)
self.loss_per_class = np.zeros((len(self.classes)), dtype=np.float32)
self.anchor_per_class = np.zeros((len(self.classes)), dtype=np.float32)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/callbacks/auto_class_weighting_callback.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unified eval and mAP callback."""
from multiprocessing import cpu_count
import sys
from keras import backend as K
from keras.utils.data_utils import OrderedEnqueuer
import numpy as np
from tqdm import trange
from nvidia_tao_tf1.cv.common.callbacks.base_metric_callback import BaseMetricCallback
class DetectionMetricCallback(BaseMetricCallback):
'''
Callback function to calculate model mAP / validation loss per k epoch.
Args:
ap_evaluator: object of class APEvaluator.
built_eval_model: eval model built with additional layers for encoded output AND bbox
output (model requires two outputs!!!)
eval_sequence: Eval data sequence (based on keras sequence) that gives images, labels.
labels is list (batch_size) of tuples (encoded_label, raw_label)
loss_ops: three element tuple or list. [gt_placeholder, pred_placeholder, loss]
eval_model: the training graph part of built_eval_model. Note, this model must share
TF nodes with built_eval_model
metric_interval: calculate model mAP per k epoch
verbose: True if you want print ap message.
'''
def __init__(self, ap_evaluator, built_eval_model, eval_sequence, loss_ops, *args, **kwargs):
"""Init function."""
super().__init__(*args, **kwargs)
self.ap_evaluator = ap_evaluator
self.built_eval_model = built_eval_model
self.classes = eval_sequence.classes
self.enqueuer = OrderedEnqueuer(eval_sequence, use_multiprocessing=False)
self.n_batches = len(eval_sequence)
self.loss_ops = loss_ops
def _skip_metric(self, logs):
for i in self.classes:
logs['AP_' + i] = np.nan
logs['mAP'] = np.nan
logs['validation_loss'] = np.nan
def _calc_metric(self, logs):
total_loss = 0.0
gt_labels = []
pred_labels = []
if self.verbose:
tr = trange(self.n_batches, file=sys.stdout)
tr.set_description('Producing predictions')
else:
tr = range(self.n_batches)
self.enqueuer.start(workers=max(cpu_count() - 1, 1), max_queue_size=20)
output_generator = self.enqueuer.get()
# Loop over all batches.
for _ in tr:
# Generate batch.
batch_X, batch_labs = next(output_generator)
encoded_lab, gt_lab = zip(*batch_labs)
# Predict.
y_pred_encoded, y_pred = self.built_eval_model.predict(batch_X)
batch_loss = K.get_session().run(self.loss_ops[2],
feed_dict={self.loss_ops[0]: np.array(encoded_lab),
self.loss_ops[1]: y_pred_encoded})
total_loss += np.sum(batch_loss) * len(gt_lab)
gt_labels.extend(gt_lab)
for i in range(len(y_pred)):
y_pred_valid = y_pred[i][y_pred[i][:, 1] > self.ap_evaluator.conf_thres]
pred_labels.append(y_pred_valid)
self.enqueuer.stop()
logs['validation_loss'] = total_loss / len(gt_labels)
m_ap, ap = self.ap_evaluator(gt_labels, pred_labels, verbose=self.verbose)
if self.verbose:
print("*******************************")
for i in range(len(ap)):
logs['AP_' + self.classes[i]] = ap[i]
if self.verbose:
print("{:<14}{:<6}{}".format(self.classes[i], 'AP', round(ap[i], 5)))
if self.verbose:
print("{:<14}{:<6}{}".format('', 'mAP', round(m_ap, 5)))
print("*******************************")
print("Validation loss:", logs['validation_loss'])
logs['mAP'] = m_ap
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/callbacks/detection_metric_callback.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataset/__init__.py |
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Label map utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import logging
from google.protobuf import text_format
import numpy as np
from six import string_types
from six.moves import range
import tensorflow.compat.v1 as tf
import nvidia_tao_tf1.cv.common.proto.string_int_label_map_pb2 as string_int_label_map_pb2
_LABEL_OFFSET = 1
def _validate_label_map(label_map):
"""Checks if a label map is valid.
Args:
label_map: StringIntLabelMap to validate.
Raises:
ValueError: if label map is invalid.
"""
for item in label_map.item:
if item.id < 0:
raise ValueError('Label map ids should be >= 0.')
if (item.id == 0 and item.name != 'background' and
item.display_name != 'background'):
raise ValueError('Label map id 0 is reserved for the background label')
def create_category_index(categories):
"""Creates dictionary of COCO compatible categories keyed by category id.
Args:
categories: a list of dicts, each of which has the following keys:
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'.
Returns:
category_index: a dict containing the same entries as categories, but keyed
by the 'id' field of each category.
"""
category_index = {}
for cat in categories:
category_index[cat['id']] = cat
return category_index
def get_max_label_map_index(label_map):
"""Get maximum index in label map.
Args:
label_map: a StringIntLabelMapProto
Returns:
an integer
"""
return max([item.id for item in label_map.item])
def convert_label_map_to_categories(label_map,
max_num_classes,
use_display_name=True):
"""Given label map proto returns categories list compatible with eval.
This function converts label map proto and returns a list of dicts, each of
which has the following keys:
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'.
'keypoints': (optional) a dictionary of keypoint string 'label' to integer
'id'.
We only allow class into the list if its id-label_id_offset is
between 0 (inclusive) and max_num_classes (exclusive).
If there are several items mapping to the same id in the label map,
we will only keep the first one in the categories list.
Args:
label_map: a StringIntLabelMapProto or None. If None, a default categories
list is created with max_num_classes categories.
max_num_classes: maximum number of (consecutive) label indices to include.
use_display_name: (boolean) choose whether to load 'display_name' field as
category name. If False or if the display_name field does not exist, uses
'name' field as category names instead.
Returns:
categories: a list of dictionaries representing all possible categories.
"""
categories = []
list_of_ids_already_added = []
if not label_map:
label_id_offset = 1
for class_id in range(max_num_classes):
categories.append({
'id': class_id + label_id_offset,
'name': 'category_{}'.format(class_id + label_id_offset)
})
return categories
for item in label_map.item:
if not 0 < item.id <= max_num_classes:
logging.info(
'Ignore item %d since it falls outside of requested '
'label range.', item.id)
continue
if use_display_name and item.HasField('display_name'):
name = item.display_name
else:
name = item.name
if item.id not in list_of_ids_already_added:
list_of_ids_already_added.append(item.id)
category = {'id': item.id, 'name': name}
if item.HasField('frequency'):
if item.frequency == string_int_label_map_pb2.LVISFrequency.Value(
'FREQUENT'):
category['frequency'] = 'f'
elif item.frequency == string_int_label_map_pb2.LVISFrequency.Value(
'COMMON'):
category['frequency'] = 'c'
elif item.frequency == string_int_label_map_pb2.LVISFrequency.Value(
'RARE'):
category['frequency'] = 'r'
if item.HasField('instance_count'):
category['instance_count'] = item.instance_count
if item.keypoints:
keypoints = {}
list_of_keypoint_ids = []
for kv in item.keypoints:
if kv.id in list_of_keypoint_ids:
raise ValueError('Duplicate keypoint ids are not allowed. '
'Found {} more than once'.format(kv.id))
keypoints[kv.label] = kv.id
list_of_keypoint_ids.append(kv.id)
category['keypoints'] = keypoints
categories.append(category)
return categories
def load_labelmap(path):
"""Loads label map proto.
Args:
path: path to StringIntLabelMap proto text file.
Returns:
a StringIntLabelMapProto
"""
with tf.io.gfile.GFile(path, 'r') as fid:
label_map_string = fid.read()
label_map = string_int_label_map_pb2.StringIntLabelMap()
try:
text_format.Merge(label_map_string, label_map)
except text_format.ParseError:
label_map.ParseFromString(label_map_string)
_validate_label_map(label_map)
return label_map
def get_label_map_dict(label_map_path_or_proto,
use_display_name=False,
fill_in_gaps_and_background=False):
"""Reads a label map and returns a dictionary of label names to id.
Args:
label_map_path_or_proto: path to StringIntLabelMap proto text file or the
proto itself.
use_display_name: whether to use the label map items' display names as keys.
fill_in_gaps_and_background: whether to fill in gaps and background with
respect to the id field in the proto. The id: 0 is reserved for the
'background' class and will be added if it is missing. All other missing
ids in range(1, max(id)) will be added with a dummy class name
("class_<id>") if they are missing.
Returns:
A dictionary mapping label names to id.
Raises:
ValueError: if fill_in_gaps_and_background and label_map has non-integer or
negative values.
"""
if isinstance(label_map_path_or_proto, string_types):
label_map = load_labelmap(label_map_path_or_proto)
else:
_validate_label_map(label_map_path_or_proto)
label_map = label_map_path_or_proto
label_map_dict = {}
for item in label_map.item:
if use_display_name:
label_map_dict[item.display_name] = item.id
else:
label_map_dict[item.name] = item.id
if fill_in_gaps_and_background:
values = set(label_map_dict.values())
if 0 not in values:
label_map_dict['background'] = 0
if not all(isinstance(value, int) for value in values):
raise ValueError('The values in label map must be integers in order to'
'fill_in_gaps_and_background.')
if not all(value >= 0 for value in values):
raise ValueError('The values in the label map must be positive.')
if len(values) != max(values) + 1:
# there are gaps in the labels, fill in gaps.
for value in range(1, max(values)):
if value not in values:
# TODO(rathodv): Add a prefix 'class_' here once the tool to generate
# teacher annotation adds this prefix in the data.
label_map_dict[str(value)] = value
return label_map_dict
def get_keypoint_label_map_dict(label_map_path_or_proto):
"""Reads a label map and returns a dictionary of keypoint names to ids.
Note that the keypoints belong to different classes will be merged into a
single dictionary. It is expected that there is no duplicated keypoint names
or ids from different classes.
Args:
label_map_path_or_proto: path to StringIntLabelMap proto text file or the
proto itself.
Returns:
A dictionary mapping keypoint names to the keypoint id (not the object id).
Raises:
ValueError: if there are duplicated keyoint names or ids.
"""
if isinstance(label_map_path_or_proto, string_types):
label_map = load_labelmap(label_map_path_or_proto)
else:
label_map = label_map_path_or_proto
label_map_dict = {}
for item in label_map.item:
for kpts in item.keypoints:
if kpts.label in label_map_dict.keys():
raise ValueError('Duplicated keypoint label: %s' % kpts.label)
if kpts.id in label_map_dict.values():
raise ValueError('Duplicated keypoint ID: %d' % kpts.id)
label_map_dict[kpts.label] = kpts.id
return label_map_dict
def get_label_map_hierarchy_lut(label_map_path_or_proto,
include_identity=False):
"""Reads a label map and returns ancestors and descendants in the hierarchy.
The function returns the ancestors and descendants as separate look up tables
(LUT) numpy arrays of shape [max_id, max_id] where lut[i,j] = 1 when there is
a hierarchical relationship between class i and j.
Args:
label_map_path_or_proto: path to StringIntLabelMap proto text file or the
proto itself.
include_identity: Boolean to indicate whether to include a class element
among its ancestors and descendants. Setting this will result in the lut
diagonal being set to 1.
Returns:
ancestors_lut: Look up table with the ancestors.
descendants_lut: Look up table with the descendants.
"""
if isinstance(label_map_path_or_proto, string_types):
label_map = load_labelmap(label_map_path_or_proto)
else:
_validate_label_map(label_map_path_or_proto)
label_map = label_map_path_or_proto
hierarchy_dict = {
'ancestors': collections.defaultdict(list),
'descendants': collections.defaultdict(list)
}
max_id = -1
for item in label_map.item:
max_id = max(max_id, item.id)
for ancestor in item.ancestor_ids:
hierarchy_dict['ancestors'][item.id].append(ancestor)
for descendant in item.descendant_ids:
hierarchy_dict['descendants'][item.id].append(descendant)
def get_graph_relations_tensor(graph_relations):
graph_relations_tensor = np.zeros([max_id, max_id])
for id_val, ids_related in graph_relations.items():
id_val = int(id_val) - _LABEL_OFFSET
for id_related in ids_related:
id_related -= _LABEL_OFFSET
graph_relations_tensor[id_val, id_related] = 1
if include_identity:
graph_relations_tensor += np.eye(max_id)
return graph_relations_tensor
ancestors_lut = get_graph_relations_tensor(hierarchy_dict['ancestors'])
descendants_lut = get_graph_relations_tensor(hierarchy_dict['descendants'])
return ancestors_lut, descendants_lut
def create_categories_from_labelmap(label_map_path, use_display_name=True):
"""Reads a label map and returns categories list compatible with eval.
This function converts label map proto and returns a list of dicts, each of
which has the following keys:
'id': an integer id uniquely identifying this category.
'name': string representing category name e.g., 'cat', 'dog'.
'keypoints': a dictionary of keypoint string label to integer id. It is only
returned when available in label map proto.
Args:
label_map_path: Path to `StringIntLabelMap` proto text file.
use_display_name: (boolean) choose whether to load 'display_name' field
as category name. If False or if the display_name field does not exist,
uses 'name' field as category names instead.
Returns:
categories: a list of dictionaries representing all possible categories.
"""
label_map = load_labelmap(label_map_path)
max_num_classes = max(item.id for item in label_map.item)
return convert_label_map_to_categories(label_map, max_num_classes, use_display_name)
def create_category_index_from_labelmap(label_map_path, use_display_name=True):
"""Reads a label map and returns a category index.
Args:
label_map_path: Path to `StringIntLabelMap` proto text file.
use_display_name: (boolean) choose whether to load 'display_name' field
as category name. If False or if the display_name field does not exist,
uses 'name' field as category names instead.
Returns:
A category index, which is a dictionary that maps integer ids to dicts
containing categories, e.g.
{1: {'id': 1, 'name': 'dog'}, 2: {'id': 2, 'name': 'cat'}, ...}
"""
categories = create_categories_from_labelmap(label_map_path, use_display_name)
return create_category_index(categories)
def create_class_agnostic_category_index():
"""Creates a category index with a single `object` class."""
return {1: {'id': 1, 'name': 'object'}}
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataset/label_map_util.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for creating TFRecord data sets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
def int64_feature(value):
"""int64_feature."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def int64_list_feature(value):
"""int64_list_feature."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def bytes_feature(value):
"""bytes_feature."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def bytes_list_feature(value):
"""bytes_list_feature."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def float_feature(value):
"""float_feature."""
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def float_list_feature(value):
"""float_list_feature."""
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def read_examples_list(path):
"""Read list of training or validation examples.
The file is assumed to contain a single example per line where the first
token in the line is an identifier that allows us to find the image and
annotation xml for that example.
For example, the line:
xyz 3
would allow us to find files xyz.jpg and xyz.xml (the 3 would be ignored).
Args:
path: absolute path to examples list file.
Returns:
list of example identifiers (strings).
"""
with tf.gfile.GFile(path) as fid:
lines = fid.readlines()
return [line.strip().split(' ')[0] for line in lines]
def recursive_parse_xml_to_dict(xml):
"""Recursively parses XML contents to python dict.
We assume that `object` tags are the only ones that can appear
multiple times at the same level of a tree.
Args:
xml: xml tree obtained by parsing XML file contents using lxml.etree
Returns:
Python dictionary holding XML contents.
"""
if not xml:
return {xml.tag: xml.text}
result = {}
for child in xml:
child_result = recursive_parse_xml_to_dict(child)
if child.tag != 'object':
result[child.tag] = child_result[child.tag]
else:
if child.tag not in result:
result[child.tag] = []
result[child.tag].append(child_result[child.tag])
return {xml.tag: result}
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/dataset/dataset_util.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to instantiate and return a clearml task."""
from datetime import datetime
import logging
import os
from clearml import Task
logger = logging.getLogger(__name__)
def get_clearml_task(clearml_config, network_name: str, action: str="train"):
"""Get clearml task.
Args:
clearml_config (protobuf): Configuration element for clearml task.
network_name (str): Name of the network running the training.
Returns
task (clearml.Task): Task object.
"""
time_string = datetime.now().strftime("%d/%y/%m_%H:%M:%S")
task = None
try:
time_now = datetime.now().strftime("%d/%y/%m_%H:%M:%S")
task_name = f"{clearml_config.task}_{time_string}" if clearml_config.task \
else f"{network_name}_{action}_{time_now}"
task = Task.init(
project_name=clearml_config.project,
task_name=task_name,
deferred_init=clearml_config.deferred_init,
reuse_last_task_id=clearml_config.reuse_last_task_id,
tags=list(clearml_config.tags) if clearml_config.tags else None,
continue_last_task=clearml_config.continue_last_task,
)
tao_base_container = os.getenv("TAO_DOCKER", None)
if tao_base_container is not None:
task.set_base_docker(tao_base_container)
return task
except Exception as e:
logger.warning(
"ClearML task init failed with error {}".format(e)
)
logger.warning(
"Training will still continue."
)
return task
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/mlops/clearml.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
"""Routines for connecting with Weights and Biases client."""
from datetime import datetime
import logging
import os
import wandb
DEFAULT_WANDB_CONFIG = "~/.netrc"
DEFAULT_TAGS = ["tao-toolkit", "training"]
logger = logging.getLogger(__name__)
_WANDB_INITIALIZED = False
def is_wandb_initialized():
"""Check if wandb has been initialized."""
global _WANDB_INITIALIZED # pylint: disable=W0602,W0603
return _WANDB_INITIALIZED
def check_wandb_logged_in():
"""Check if weights and biases have been logged in."""
wandb_logged_in = False
try:
wandb_api_key = os.getenv("WANDB_API_KEY", None)
if wandb_api_key is not None or os.path.exists(os.path.expanduser(DEFAULT_WANDB_CONFIG)):
wandb_logged_in = wandb.login(key=wandb_api_key)
return wandb_logged_in
except wandb.errors.UsageError:
logger.warning("WandB wasn't logged in.")
return False
def initialize_wandb(project: str = "TAO Toolkit",
entity: str = None,
sync_tensorboard: bool = True,
save_code: bool = False,
notes: str = None,
tags: list = None,
name: str = "train",
config=None,
wandb_logged_in: bool = False,
results_dir: str = os.getcwd()):
"""Function to initialize wandb client with the weights and biases server.
If wandb initialization fails, then the function just catches the exception
and prints an error log with the reason as to why wandb.init() failed.
Args:
project (str): Name of the project to sync data with.
entity (str): Name of the wanbd entity.
sync_tensorboard (bool): Boolean flag to synchronize
tensorboard and wanbd visualizations.
notes (str): One line description about the wandb job.
tags (list(str)): List of tags about the job.
name (str): Name of the task running.
config (OmegaConf.DictConf or Dict): Configuration element of the task that's being.
Typically, this is the yaml container generated from the `experiment_spec`
file used to run the job.
wandb_logged_in (bool): Boolean flag to check if wandb was logged in.
results_dir (str): Output directory of the experiment.
Returns:
No explicit returns.
"""
logger.info("Initializing wandb.")
try:
assert wandb_logged_in, (
"WandB client wasn't logged in. Please make sure to set "
"the WANDB_API_KEY env variable or run `wandb login` in "
"over the CLI and copy the ~/.netrc file to the container."
)
start_time = datetime.now()
time_string = start_time.strftime("%d/%y/%m_%H:%M:%S")
wandb_dir = os.path.join(results_dir, "wandb")
if not os.path.exists(wandb_dir):
os.makedirs(wandb_dir)
if tags is None:
tags = DEFAULT_TAGS
wandb_name = f"{name}_{time_string}"
wandb.init(
project=project,
entity=entity,
sync_tensorboard=sync_tensorboard,
save_code=save_code,
name=wandb_name,
notes=notes,
tags=tags,
config=config
)
global _WANDB_INITIALIZED # pylint: disable=W0602,W0603
_WANDB_INITIALIZED = True
except Exception as e:
logger.warning("Wandb logging failed with error %s", e)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/mlops/wandb.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"Module containing helpers to initialize third party MLOPS."
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/mlops/__init__.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/models/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IVA RetinaNet model construction wrapper functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nvidia_tao_tf1.core.templates.cspdarknet import CSPDarkNet
from nvidia_tao_tf1.core.templates.cspdarknet_tiny import CSPDarkNetTiny
from nvidia_tao_tf1.core.templates.darknet import DarkNet
from nvidia_tao_tf1.core.templates.efficientnet import EfficientNetB0
from nvidia_tao_tf1.core.templates.googlenet import GoogLeNet
from nvidia_tao_tf1.core.templates.mobilenet import MobileNet, MobileNetV2
from nvidia_tao_tf1.core.templates.resnet import ResNet
from nvidia_tao_tf1.core.templates.squeezenet import SqueezeNet
from nvidia_tao_tf1.core.templates.vgg import VggNet
def get_efficientnet_b0(
input_tensor=None,
data_format='channels_first',
nclasses=1000,
use_bias=False,
kernel_regularizer=None,
bias_regularizer=None,
use_imagenet_head=False,
freeze_bn=False,
freeze_blocks=None,
stride16=False,
):
"""Get an EfficientNet B0 model."""
base_model = EfficientNetB0(
input_tensor=input_tensor,
add_head=use_imagenet_head,
data_format=data_format,
use_bias=use_bias,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
classes=nclasses,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
stride16=stride16,
activation_type='relu'
)
return base_model
def get_cspdarknet(nlayers=19,
input_tensor=None,
data_format='channels_first',
kernel_regularizer=None,
bias_regularizer=None,
freeze_bn=False,
freeze_blocks=None,
force_relu=False,
activation="leaky_relu"):
"""Wrapper to get CSPDarkNet model from IVA templates."""
base_model = CSPDarkNet(nlayers=nlayers,
input_tensor=input_tensor,
data_format='channels_first',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
use_batch_norm=True,
add_head=False,
use_bias=False,
freeze_blocks=freeze_blocks,
freeze_bn=freeze_bn,
force_relu=force_relu,
activation=activation)
return base_model
def get_cspdarknet_tiny(
input_tensor=None,
data_format='channels_first',
kernel_regularizer=None,
bias_regularizer=None,
freeze_bn=False,
freeze_blocks=None,
force_relu=False,
activation="leaky_relu"
):
"""Wrapper to get CSPDarkNetTiny model from IVA templates."""
base_model = CSPDarkNetTiny(
input_tensor=input_tensor,
data_format='channels_first',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
use_batch_norm=True,
add_head=False,
use_bias=False,
freeze_blocks=freeze_blocks,
freeze_bn=freeze_bn,
force_relu=force_relu,
activation=activation
)
return base_model
def get_darknet(nlayers=19,
input_tensor=None,
data_format='channels_first',
kernel_regularizer=None,
bias_regularizer=None,
freeze_bn=False,
freeze_blocks=None,
force_relu=False):
"""Wrapper to get DarkNet model from IVA templates."""
base_model = DarkNet(nlayers=nlayers,
input_tensor=input_tensor,
data_format='channels_first',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
use_batch_norm=True,
add_head=False,
use_bias=False,
freeze_blocks=freeze_blocks,
freeze_bn=freeze_bn,
force_relu=force_relu)
return base_model
def get_googlenet(input_tensor=None,
data_format='channels_first',
kernel_regularizer=None,
bias_regularizer=None,
use_batch_norm=True,
use_bias=False,
freeze_bn=False,
freeze_blocks=None):
"""Wrapper to get GoogLeNet model from IVA templates."""
base_model = GoogLeNet(inputs=input_tensor,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
use_batch_norm=use_batch_norm,
activation_type='relu',
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
use_bias=use_bias)
return base_model
def get_resnet(nlayers=18,
input_tensor=None,
data_format='channels_first',
kernel_regularizer=None,
bias_regularizer=None,
all_projections=True,
use_batch_norm=True,
use_pooling=False,
use_bias=False,
freeze_bn=False,
freeze_blocks=None):
"""Wrapper to get ResNet model from IVA templates."""
base_model = ResNet(nlayers=nlayers,
input_tensor=input_tensor,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
use_batch_norm=use_batch_norm,
activation_type='relu',
all_projections=all_projections,
use_pooling=use_pooling,
freeze_blocks=freeze_blocks,
freeze_bn=freeze_bn,
use_bias=use_bias)
return base_model
def get_vgg(nlayers=16,
input_tensor=None,
data_format="channels_first",
kernel_regularizer=None,
bias_regularizer=None,
use_batch_norm=True,
use_pooling=False,
use_bias=False,
freeze_bn=False,
freeze_blocks=None):
"""Wrapper to get VGG model from IVA templates."""
base_model = VggNet(nlayers=nlayers,
inputs=input_tensor,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
use_batch_norm=use_batch_norm,
activation_type='relu',
use_pooling=use_pooling,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
use_bias=use_bias)
return base_model
def get_mobilenet(input_tensor=None,
data_format='channels_first',
kernel_regularizer=None,
bias_regularizer=None,
use_batch_norm=True,
use_bias=False,
freeze_bn=False,
freeze_blocks=None,
stride=16):
"""Wrapper to get MobileNet model from IVA templates."""
base_model = MobileNet(inputs=input_tensor,
dropout=0.0,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
use_batch_norm=use_batch_norm,
use_bias=use_bias,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
stride=stride,
add_head=False)
return base_model
def get_mobilenet_v2(input_tensor=None,
data_format='channels_first',
kernel_regularizer=None,
bias_regularizer=None,
use_batch_norm=True,
all_projections=False,
use_bias=False,
freeze_bn=False,
freeze_blocks=None,
stride=16):
"""Wrapper to get MobileNet V2 model from IVA templates."""
base_model = MobileNetV2(inputs=input_tensor,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
all_projections=all_projections,
use_batch_norm=use_batch_norm,
use_bias=use_bias,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
stride=stride,
add_head=False)
return base_model
def get_squeezenet(input_tensor=None,
data_format='channels_first',
dropout=1e-3,
kernel_regularizer=None,
bias_regularizer=None,
freeze_blocks=None):
"""Wrapper to get SqueezeNet model from IVA templates."""
base_model = SqueezeNet(inputs=input_tensor,
dropout=1e-3,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
freeze_blocks=None)
return base_model
model_choose = {"resnet": get_resnet,
"vgg": get_vgg,
"googlenet": get_googlenet,
"mobilenet_v1": get_mobilenet,
"mobilenet_v2": get_mobilenet_v2,
"squeezenet": get_squeezenet,
"darknet": get_darknet,
'cspdarknet': get_cspdarknet,
"cspdarknet_tiny": get_cspdarknet_tiny,
"cspdarknet_tiny_3l": get_cspdarknet_tiny,
"efficientnet_b0": get_efficientnet_b0}
def get_backbone(input_tensor,
backbone,
data_format=None,
kernel_regularizer=None,
bias_regularizer=None,
freeze_blocks=None,
**kwargs):
"""Wrapper to chose model defined in iva templates."""
# defining model dictionary
kwa = dict()
if backbone == 'googlenet':
kwa['use_batch_norm'] = kwargs['use_batch_norm']
kwa['use_bias'] = kwargs['use_bias']
kwa['freeze_bn'] = kwargs['freeze_bn']
elif backbone in ['darknet', 'cspdarknet']:
kwa['nlayers'] = kwargs['nlayers']
kwa['freeze_bn'] = kwargs['freeze_bn']
kwa['force_relu'] = kwargs['force_relu']
if backbone == "cspdarknet":
kwa['activation'] = kwargs['activation']
elif backbone == "cspdarknet_tiny":
kwa['freeze_bn'] = kwargs['freeze_bn']
kwa['force_relu'] = kwargs['force_relu']
kwa['activation'] = kwargs['activation']
elif backbone == "cspdarknet_tiny_3l":
kwa['freeze_bn'] = kwargs['freeze_bn']
kwa['force_relu'] = kwargs['force_relu']
kwa['activation'] = kwargs['activation']
elif backbone == 'resnet':
kwa['nlayers'] = kwargs['nlayers']
kwa['use_batch_norm'] = kwargs['use_batch_norm']
kwa['use_pooling'] = kwargs['use_pooling']
kwa['freeze_bn'] = kwargs['freeze_bn']
kwa['use_bias'] = kwargs['use_bias']
kwa['all_projections'] = kwargs['all_projections']
elif backbone == 'vgg':
kwa['nlayers'] = kwargs['nlayers']
kwa['use_batch_norm'] = kwargs['use_batch_norm']
kwa['use_pooling'] = kwargs['use_pooling']
kwa['freeze_bn'] = kwargs['freeze_bn']
kwa['use_bias'] = kwargs['use_bias']
elif backbone == 'mobilenet_v1':
kwa['use_batch_norm'] = kwargs['use_batch_norm']
kwa['use_bias'] = kwargs['use_bias']
kwa['freeze_bn'] = kwargs['freeze_bn']
elif backbone == 'mobilenet_v2':
kwa['use_batch_norm'] = kwargs['use_batch_norm']
kwa['use_bias'] = kwargs['use_bias']
kwa['freeze_bn'] = kwargs['freeze_bn']
kwa['all_projections'] = kwargs['all_projections']
elif backbone == 'squeezenet':
kwa['dropout'] = kwargs['dropout']
elif backbone == "efficientnet_b0":
kwa['use_bias'] = kwargs['use_bias']
kwa['freeze_bn'] = kwargs['freeze_bn']
else:
raise ValueError('Unsupported backbone model: {}'.format(backbone))
model = model_choose[backbone](input_tensor=input_tensor,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
freeze_blocks=freeze_blocks,
**kwa)
return model
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/models/backbones.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility function definitions for file processing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import logging
import os
import shutil
CHECKPOINT_FILENAME = 'checkpoint'
VAL_LOG_FILENAME = 'validation.log'
def save_best_model(save_dir, global_step, current_cost, epoch_based_checkpoint=False, extension="tlt"):
"""Only save the model with lowest current_cost in results directory.
Args:
save_dir (str): The directory where all the model files are saved.
global_step (int): Current global step number.
current_cost (float): Evaluation cost at current step.
"""
log_filename = os.path.join(save_dir, VAL_LOG_FILENAME)
# Rename and keep the model in the first round of validation.
if not os.path.isfile(log_filename):
with open(log_filename, 'w') as f:
f.write('best_model@global_step {} : {}\n'.format(global_step, current_cost))
_shutil_keras_models(
save_dir, global_step,
epoch_based_checkpoint,
extension=extension
)
else:
with open(log_filename, 'r') as f:
lines = f.readlines()
# Get current lowest cost from log file.
lowest_cost = float(lines[0].split()[-1])
# Save model and discard previous ones if current global step gives lowest loss.
if current_cost < lowest_cost:
lines[0] = 'best_model@global_step {} : {}\n'.format(global_step, current_cost)
with open(log_filename, 'w') as f:
for line in lines:
f.write(line)
_shutil_keras_models(
save_dir, global_step,
epoch_based_checkpoint,
extension=extension
)
def _shutil_keras_models(save_dir, global_step, epoch_based_checkpoint=False, extension="tlt"):
"""Shutil copy and move calls to save and delete keras models.
This will delete old backup models and copy the current keras model to 'model.hdf5'.
Also moves current keras model to 'model.keras.backup-{global_step}.hdf5".
Args:
save_dir (str): The directory where all the model files are saved.
global_step (int): Current global step number.
"""
format_string = "step"
if epoch_based_checkpoint:
format_string = "epoch"
old_backup_files = glob.glob(os.path.join(save_dir, f'model.{format_string}.backup-*'))
for fl in old_backup_files:
os.remove(fl)
logging.debug("rm {}".format(fl))
shutil.copy(os.path.join(save_dir, f'model.{format_string}-') +
str(global_step) + f'.{extension}', os.path.join(save_dir, f'model.{extension}'))
shutil.move(os.path.join(save_dir, f'model.{format_string}-') +
str(global_step) + f'.{extension}',
os.path.join(save_dir, f'model.{format_string}.backup-') +
str(global_step) + f'.{extension}')
logging.debug("cp 'model.keras-{global_step}.{extension}' to 'model.{extension}'".format(
global_step=global_step,
extension=extension))
logging.debug(
"mv 'model.keras-{global_step}.{extension}' to "
"'model.keras.backup-{global_step}.{extension}'".format(
global_step=global_step,
extension=extension
)
)
def clean_checkpoint_dir(save_dir, global_step):
"""Remove extraneous checkpoint files and keras files but keeps last checkpoint files.
The validation log file VAL_LOG_FILENAME must be initialized.
Args:
save_dir (str): The directory where all the model files are saved.
Raises:
ValueError: Checkpoint file has not been created.
"""
_files = glob.glob(os.path.join(save_dir, 'model.epoch-*'))
# Delete model files other than the lowest cost model files.
# Keep backup models.
files_to_delete = [fn for fn in _files if '-' + str(global_step+1) not in fn and
'backup' not in fn and '-' + str(global_step) not in fn]
for fl in files_to_delete:
os.remove(fl)
logging.debug("rm {}".format(fl))
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/utilities/model_file_processing.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DriveIX common utils used across all apps."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import logging
import os
import struct
import sys
import tempfile
from zipfile import BadZipFile, ZipFile
import keras
import onnx
import tensorflow as tf
from tensorflow.compat.v1 import GraphDef
import tf2onnx
from nvidia_tao_tf1.core.export import (
keras_to_onnx,
keras_to_pb,
keras_to_uff
)
from nvidia_tao_tf1.encoding import encoding
ENCRYPTION_OFF = False
# logger = logging.getLogger(__name__)
def encode_from_keras(
keras_model,
output_filename,
enc_key,
only_weights=False,
custom_objects=None
):
"""A simple function to encode a keras model into magnet export format.
Args:
keras_model (keras.models.Model object): The input keras model to be encoded.
output_filename (str): The name of the encoded output file.
enc_key (bytes): Byte text to encode the model.
custom_objects(dict): Custom objects for serialization and deserialization.
Returns:
None
"""
# Make sure that input model is a keras model object.
if not isinstance(keras_model, keras.models.Model):
raise TypeError("The model should be a keras.models.Model object")
os_handle, temp_file_name = tempfile.mkstemp()
os.close(os_handle)
# Create a temporary model file for the keras model.
if only_weights:
keras_model.save_weights(temp_file_name)
else:
keras_model.save(temp_file_name)
# Encode the keras model file.
with open(output_filename, 'wb') as outfile, open(temp_file_name, 'rb') as infile:
encoding.encode(infile, outfile, enc_key)
infile.closed
outfile.closed
# Remove the temporary keras file.
os.remove(temp_file_name)
def get_decoded_filename(input_file_name, enc_key, custom_objects=None):
"""Extract keras model file and get model dtype.
Args:
input_file_name (str): Path to input model file.
enc_key (bytes): Byte text to decode model.
custom_objects(dict): Custom objects for serialization and deserialization.
Returns:
model_dtype: Return the decoded model filename.
"""
if input_file_name.endswith(".hdf5"):
return input_file_name
# Check if input file exists.
if not os.path.isfile(input_file_name):
raise ValueError("Cannot find input file name.")
os_handle, temp_file_name = tempfile.mkstemp()
os.close(os_handle)
with open(temp_file_name, 'wb') as temp_file, open(input_file_name, 'rb') as encoded_file:
encoding.decode(encoded_file, temp_file, enc_key)
encoded_file.closed
temp_file.closed
# Check if the model is valid hdf5
try:
keras.models.load_model(temp_file_name, compile=False, custom_objects=custom_objects)
except IOError:
raise IOError("Invalid decryption. {}. The key used to load the model "
"is incorrect.".format(sys.exc_info()[1]))
except ValueError:
raise ValueError("Invalid decryption. {}. The key used to load the model "
"is incorrect.".format(sys.exc_info()[1]))
return temp_file_name
def decode_to_keras(input_file_name, enc_key,
input_model=None, compile_model=True, by_name=True,
custom_objects=None):
"""A simple function to decode an encrypted file to a keras model.
Args:
input_file_name (str): Path to encoded input file.
enc_key (bytes): Byte text to decode the model.
custom_objects(dict): Custom objects for serialization and deserialization.
Returns:
decrypted_model (keras.models.Model): Returns a decrypted keras model.
"""
# Check if input file exists.
if not os.path.isfile(input_file_name):
raise ValueError("Cannot find input file name.")
if input_file_name.endswith("hdf5"):
if input_model is None:
return keras.models.load_model(input_file_name,
compile=compile_model,
custom_objects=custom_objects)
assert isinstance(input_model, keras.models.Model), (
"Input model not a valid Keras model."
)
input_model.load_weights(input_file_name, by_name=by_name, custom_objects=custom_objects)
return input_model
os_handle, temp_file_name = tempfile.mkstemp()
os.close(os_handle)
with open(temp_file_name, 'wb') as temp_file, open(input_file_name, 'rb') as encoded_file:
encoding.decode(encoded_file, temp_file, enc_key)
encoded_file.closed
temp_file.closed
if input_model is None:
try:
decrypted_model = keras.models.load_model(temp_file_name,
compile=compile_model,
custom_objects=custom_objects)
except IOError:
raise IOError("Invalid decryption. {}. The key used to load the model "
"is incorrect.".format(sys.exc_info()[1]))
except ValueError:
raise ValueError("Invalid decryption. {}".format(sys.exc_info()[1]))
os.remove(temp_file_name)
return decrypted_model
assert isinstance(input_model, keras.models.Model), 'Input model not a valid Keras moodel.'
try:
input_model.load_weights(temp_file_name, by_name=by_name, custom_objects=custom_objects)
except IOError:
raise IOError("Invalid decryption. {}. The key used to load the model "
"is incorrect.".format(sys.exc_info()[1]))
except ValueError:
raise ValueError("Invalid decryption. {}. The key used to load the model "
"is incorrect.".format(sys.exc_info()[1]))
os.remove(temp_file_name)
return input_model
def model_io(model_path, enc_key=None, custom_objects=None):
"""Simple utility to handle model file based on file extensions.
Args:
pretrained_model_file (str): Path to the model file.
enc_key (str): Key to load tlt file.
custom_objects(dict): Custom objects for serialization and deserialization.
Returns:
model (keras.models.Model): Loaded keras model.
"""
assert os.path.exists(
model_path), "Model not found at {}".format(model_path)
if model_path.endswith('.tlt'):
assert enc_key is not None, "Key must be provided to load the model."
return decode_to_keras(str(model_path),
enc_key,
custom_objects=custom_objects)
if model_path.endswith('.hdf5'):
return keras.models.load_model(str(model_path),
compile=False,
custom_objects=custom_objects)
raise NotImplementedError(
"Invalid model file extension. {}".format(model_path))
def get_step_from_ckzip(path):
"""Gets the step number from a ckzip checkpoint.
Args:
path (str): path to the checkpoint.
Returns:
int: the step number.
"""
return int(os.path.basename(path).split('.')[1].split('-')[1])
def extract_checkpoint_file(tmp_zip_file):
"""Simple function to extract a checkpoint file.
Args:
tmp_zip_file (str): Path to the extracted zip file.
Returns:
tmp_checkpoint_path (str): Path to the extracted checkpoint.
"""
# Set-up the temporary directory.
temp_checkpoint_path = tempfile.mkdtemp()
try:
with ZipFile(tmp_zip_file, 'r') as zip_object:
for member in zip_object.namelist():
zip_object.extract(member, path=temp_checkpoint_path)
except BadZipFile:
raise ValueError(
"The zipfile extracted was corrupt. Please check your key "
"or delete the latest `*.ckzip` and re-run the command."
)
except Exception:
raise IOError(
"The last checkpoint file is not saved properly. "
"Please delete it and rerun the script."
)
return temp_checkpoint_path
def get_tf_ckpt(ckzip_path, enc_key, latest_step):
"""Simple function to extract and get a trainable checkpoint.
Args:
ckzip_path (str): Path to the encrypted checkpoint.
Returns:
tf_ckpt_path (str): Path to the decrypted tf checkpoint
"""
# Set-up the temporary directory.
os_handle, temp_zip_path = tempfile.mkstemp()
os.close(os_handle)
# Decrypt the checkpoint file.
try:
# Try reading a checkpoint file directly.
temp_checkpoint_path = extract_checkpoint_file(ckzip_path)
except ValueError:
# Decrypt and load checkpoints for TAO < 5.0
with open(ckzip_path, 'rb') as encoded_file, open(temp_zip_path, 'wb') as tmp_zip_file:
encoding.decode(encoded_file, tmp_zip_file, bytes(enc_key, 'utf-8'))
encoded_file.closed
tmp_zip_file.closed
# Load zip file and extract members to a tmp_directory.
temp_checkpoint_path = extract_checkpoint_file(temp_zip_path)
# Removing the temporary zip path.
os.remove(temp_zip_path)
return os.path.join(temp_checkpoint_path,
"model.ckpt-{}".format(latest_step))
def get_latest_checkpoint(results_dir, key):
"""Get the latest checkpoint path from a given results directory.
Parses through the directory to look for the latest checkpoint file
and returns the path to this file.
Args:
results_dir (str): Path to the results directory.
key (str): key to decode/encode the model
Returns:
ckpt_path (str): Path to the latest checkpoint.
"""
trainable_ckpts = [get_step_from_ckzip(item)
for item in os.listdir(results_dir) if item.endswith(".ckzip")]
num_ckpts = len(trainable_ckpts)
if num_ckpts == 0:
return None
latest_step = sorted(trainable_ckpts, reverse=True)[0]
latest_checkpoint = os.path.join(results_dir, "model.epoch-{}.ckzip".format(latest_step))
return get_tf_ckpt(latest_checkpoint, key, latest_step)
def get_latest_tlt_model(results_dir, extension=".hdf5"):
"""Get the latest checkpoint path from a given results directory.
Parses through the directory to look for the latest tlt file
and returns the path to this file.
Args:
results_dir (str): Path to the results directory.
Returns:
latest_checkpoint (str): Path to the latest checkpoint.
"""
trainable_ckpts = []
for item in os.listdir(results_dir):
if item.endswith(extension):
try:
step_num = get_step_from_ckzip(item)
trainable_ckpts.append(step_num)
except IndexError:
continue
num_ckpts = len(trainable_ckpts)
if num_ckpts == 0:
return None
latest_step = sorted(trainable_ckpts, reverse=True)[0]
latest_checkpoint = os.path.join(results_dir, "model.epoch-{}{}".format(latest_step, extension))
return latest_checkpoint
def load_model(model_path, key=None, custom_objects=None):
"""
Load a model either in .h5 format, .tlt format or .hdf5 format.
Args:
custom_objects(dict): Custom objects for serialization and deserialization.
Returns:
model(keras.models.Model): Returns a keras model.
"""
_, ext = os.path.splitext(model_path)
if ext == '.hdf5':
# directly load model, add dummy loss since loss is never required.
model = keras.models.load_model(model_path, compile=False, custom_objects=custom_objects)
model.load_weights(model_path)
elif ext == '.tlt':
os_handle, temp_file_name = tempfile.mkstemp(suffix='.hdf5')
os.close(os_handle)
with open(temp_file_name, 'wb') as temp_file, open(model_path, 'rb') as encoded_file:
encoding.decode(encoded_file, temp_file, str.encode(key))
encoded_file.close()
temp_file.close()
# recursive call
model = load_model(temp_file_name, None, custom_objects)
os.remove(temp_file_name)
else:
raise NotImplementedError("{0} file is not supported!".format(ext))
return model
def load_pretrained_weights(model, pretrained_model_path, key, logger=None):
"""Loads pretrained weights from another model into the specified model.
Args:
model (KerasModel): Model to load the pretrained weights into.
pretrained_model_path (str): Path to the pretrained weights for the model.
key (str): Key to decode/encode the model
logger (obj): object for loggings
"""
loaded_model = model_io(pretrained_model_path, enc_key=key)
loaded_model_layers = [layer.name for layer in loaded_model.layers]
if logger:
logger.info("pretrained_model_path: {}".format(pretrained_model_path))
logger.info("loaded_model_layers: {}".format(loaded_model_layers))
for layer in model.layers:
if layer.name in loaded_model_layers:
pretrained_layer = loaded_model.get_layer(layer.name)
weights_pretrained = pretrained_layer.get_weights()
model_layer = model.get_layer(layer.name)
try:
model_layer.set_weights(weights_pretrained)
except ValueError:
continue
del loaded_model
# Trigger garbage collector to clear memory of the deleted loaded model
gc.collect()
def save_exported_file(model, output_file_name, key, backend='onnx',
output_node_names=None, custom_objects=None,
target_opset=10, logger=None, delete_tmp_file=True):
"""Save the exported model file.
This routine converts a keras model to onnx/uff model
based on the backend the exporter was initialized with.
Args:
model (keras.model.Model): Decoded keras model to be exported.
output_file_name (str): Path to the output file.
key (str): key to decode/encode the model
backend (str): backend engine
output_node_names (str): name of the output node
target_opset (int): target opset version
"""
if backend == "onnx":
in_tensor_names, out_tensor_names, in_tensor_shape = keras_to_onnx(
model, output_file_name, custom_objects=custom_objects, target_opset=target_opset)
elif backend == 'tfonnx':
# Create froxen graph as .pb file.
os_handle_tf, tmp_tf_file = tempfile.mkstemp(suffix=".pb")
os.close(os_handle_tf)
in_tensor_names, out_tensor_names, in_tensor_shape = keras_to_pb(
model,
tmp_tf_file,
output_node_names,
custom_objects=custom_objects)
if output_node_names is None:
output_node_names = out_tensor_names
in_tensor_names, out_tensor_names = pb_to_onnx(
tmp_tf_file,
output_file_name,
in_tensor_names,
output_node_names,
target_opset,
verbose=False)
elif backend == 'uff':
os_handle, tmp_file_name = tempfile.mkstemp(suffix=".uff")
os.close(os_handle)
in_tensor_names, out_tensor_names, in_tensor_shape = keras_to_uff(
model, output_file_name, None, custom_objects=custom_objects)
else:
raise NotImplementedError("Invalid backend provided. {}".format(backend))
if logger:
logger.info('Output Tensors: {}'.format(out_tensor_names))
logger.info('Input Tensors: {} of shape: {}'.format(in_tensor_names, in_tensor_shape))
return output_file_name, in_tensor_names, out_tensor_names
def change_model_batch_size(model, input_dims, logger=None, custom_objects=None):
"""Change batch size of a model.
Args:
model: input keras model
input_dims (dict): model input name and shape.
logger (obj): object for loggings
custom_objects(dict): Custom objects for model conversion
"""
# replace input shape of first layer
layer_names_list = [layer.name for layer in model.layers]
for layer_name in input_dims.keys():
layer_idx = layer_names_list.index(layer_name)
model._layers[layer_idx].batch_input_shape = input_dims[layer_name]
# rebuild model architecture by exporting and importing via json
new_model = keras.models.model_from_json(model.to_json(), custom_objects=custom_objects)
# new_model.summary() # Disable for TLT release
# # copy weights from old model to new one
for layer in new_model.layers:
try:
layer.set_weights(model.get_layer(name=layer.name).get_weights())
except layer is None:
logger.warning("Could not transfer weights for layer {}".format(layer.name))
return new_model
def pb_to_onnx(
input_filename,
output_filename,
input_node_names,
output_node_names,
target_opset=None,
verbose=False,
):
"""Convert a TensorFlow model to ONNX.
The input model needs to be passed as a frozen Protobuf file.
The exported ONNX model may be parsed and optimized by TensorRT.
Args:
input_filename (str): path to protobuf file.
output_filename (str): file to write exported model to.
input_node_names (list of str): list of model input node names as
returned by model.layers[some_idx].get_output_at(0).name.split(':')[0].
output_node_names (list of str): list of model output node names as
returned by model.layers[some_idx].get_output_at(0).name.split(':')[0].
target_opset (int): Target opset version to use, default=<default opset for
the current keras2onnx installation>
Returns:
tuple<in_tensor_name(s), out_tensor_name(s):
in_tensor_name(s): The name(s) of the input nodes. If there is only one name, it will be
returned as a single string, otherwise a list of strings.
out_tensor_name(s): The name(s) of the output nodes. If there is only one name, it will be
returned as a single string, otherwise a list of strings.
"""
graphdef = GraphDef()
with tf.gfile.GFile(input_filename, "rb") as frozen_pb:
graphdef.ParseFromString(frozen_pb.read())
if not isinstance(input_node_names, list):
input_node_names = [input_node_names]
if not isinstance(output_node_names, list):
output_node_names = [output_node_names]
# The ONNX parser requires tensors to be passed in the node_name:port_id format.
# Since we reset the graph below, we assume input and output nodes have a single port.
input_node_names = ["{}:0".format(node_name) for node_name in input_node_names]
output_node_names = ["{}:0".format(node_name) for node_name in output_node_names]
tf.reset_default_graph()
# `tf2onnx.tfonnx.process_tf_graph` prints out layer names when
# folding the layers. Disabling INFO logging for TLT branch.
logging.getLogger("tf2onnx.tfonnx").setLevel(logging.WARNING)
with tf.Graph().as_default() as tf_graph:
tf.import_graph_def(graphdef, name="")
onnx_graph = tf2onnx.tfonnx.process_tf_graph(
tf_graph,
input_names=input_node_names,
output_names=output_node_names,
continue_on_error=True,
verbose=verbose,
opset=target_opset,
)
onnx_graph = tf2onnx.optimizer.optimize_graph(onnx_graph)
model_proto = onnx_graph.make_model("test")
with open(output_filename, "wb") as f:
f.write(model_proto.SerializeToString())
# Reload and check ONNX model.
onnx_model = onnx.load(output_filename)
onnx.checker.check_model(onnx_model)
# Return a string instead of a list if there is only one input or output.
if len(input_node_names) == 1:
input_node_names = input_node_names[0]
if len(output_node_names) == 1:
output_node_names = output_node_names[0]
return input_node_names, output_node_names
def convertKeras2TFONNX(
model,
model_name,
output_node_names=None,
target_opset=10,
custom_objects=None,
logger=None
):
"""Convert keras model to onnx via frozen tensorflow graph.
Args:
model (keras.model.Model): Decoded keras model to be exported.
model_name (str): name of the model file
output_node_names (str): name of the output node
target_opset (int): target opset version
"""
# replace input shape of first layer
output_pb_filename = model_name + '.pb'
in_tensor_names, out_tensor_names, __ = keras_to_pb(
model,
output_pb_filename,
output_node_names,
custom_objects=custom_objects)
if logger:
logger.info('Output Tensors: {}'.format(out_tensor_names))
logger.info('Input Tensors: {}'.format(in_tensor_names))
output_onnx_filename = model_name + '.onnx'
(_, _) = pb_to_onnx(output_pb_filename,
output_onnx_filename,
in_tensor_names,
out_tensor_names,
target_opset)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/utilities/tlt_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Export utils."""
import onnx
import tensorflow as tf
from tensorflow.compat.v1 import GraphDef
import tf2onnx
from nvidia_tao_tf1.core.export._uff import keras_to_pb
def pb_to_onnx(
input_filename,
output_filename,
input_node_names,
output_node_names,
target_opset=None,
):
"""Convert a TensorFlow model to ONNX.
The input model needs to be passed as a frozen Protobuf file.
The exported ONNX model may be parsed and optimized by TensorRT.
Args:
input_filename (str): path to protobuf file.
output_filename (str): file to write exported model to.
input_node_names (list of str): list of model input node names as
returned by model.layers[some_idx].get_output_at(0).name.split(':')[0].
output_node_names (list of str): list of model output node names as
returned by model.layers[some_idx].get_output_at(0).name.split(':')[0].
target_opset (int): Target opset version to use, default=<default opset for
the current keras2onnx installation>
Returns:
tuple<in_tensor_name(s), out_tensor_name(s):
in_tensor_name(s): The name(s) of the input nodes. If there is only one name, it will be
returned as a single string, otherwise a list of strings.
out_tensor_name(s): The name(s) of the output nodes. If there is only one name, it will be
returned as a single string, otherwise a list of strings.
"""
graphdef = GraphDef()
with tf.gfile.GFile(input_filename, "rb") as frozen_pb:
graphdef.ParseFromString(frozen_pb.read())
if not isinstance(input_node_names, list):
input_node_names = [input_node_names]
if not isinstance(output_node_names, list):
output_node_names = [output_node_names]
# The ONNX parser requires tensors to be passed in the node_name:port_id format.
# Since we reset the graph below, we assume input and output nodes have a single port.
input_node_names = ["{}:0".format(node_name) for node_name in input_node_names]
output_node_names = ["{}:0".format(node_name) for node_name in output_node_names]
tf.reset_default_graph()
with tf.Graph().as_default() as tf_graph:
tf.import_graph_def(graphdef, name="")
onnx_graph = tf2onnx.tfonnx.process_tf_graph(
tf_graph,
input_names=input_node_names,
output_names=output_node_names,
continue_on_error=True,
verbose=True,
opset=target_opset,
)
onnx_graph = tf2onnx.optimizer.optimize_graph(onnx_graph)
model_proto = onnx_graph.make_model("test")
with open(output_filename, "wb") as f:
f.write(model_proto.SerializeToString())
# Reload and check ONNX model.
onnx_model = onnx.load(output_filename)
onnx.checker.check_model(onnx_model)
# Return a string instead of a list if there is only one input or output.
if len(input_node_names) == 1:
input_node_names = input_node_names[0]
if len(output_node_names) == 1:
output_node_names = output_node_names[0]
return input_node_names, output_node_names
def convertKeras2TFONNX(
model,
model_name,
output_node_names=None,
target_opset=10,
custom_objects=None,
logger=None
):
"""Convert keras model to onnx via frozen tensorflow graph.
Args:
model (keras.model.Model): Decoded keras model to be exported.
model_name (str): name of the model file
output_node_names (str): name of the output node
target_opset (int): target opset version
"""
# replace input shape of first layer
output_pb_filename = model_name + '.pb'
in_tensor_names, out_tensor_names, __ = keras_to_pb(
model,
output_pb_filename,
output_node_names,
custom_objects=custom_objects)
if logger:
logger.info('Output Tensors: {}'.format(out_tensor_names))
logger.info('Input Tensors: {}'.format(in_tensor_names))
output_onnx_filename = model_name + '.onnx'
(_, _) = pb_to_onnx(output_pb_filename,
output_onnx_filename,
in_tensor_names,
out_tensor_names,
target_opset)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/utilities/export_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility function definitions for error calculations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
def rmse_2D(gt, pred):
"""Compute the two-dimensional rmse using ground truth and prediction results.
Args:
gt (Nx2 array): Array of ground truth for each data point.
pred (Nx2 array): Array of prediction for each data point.
Returns:
mean_diff (1x2 array): Mean error in each dimension.
std_diff (1x2 array): Std error in each dimension.
mean_rmse_xy (float): Mean rmse error.
std_rmse_xy (float): Std rmse error.
"""
mean_diff = np.mean(np.absolute(gt - pred), axis=0)
std_diff = np.std(np.absolute(gt - pred), axis=0)
mean_rmse_xy = np.mean(np.sqrt(np.sum(np.square(gt - pred), axis=1)))
std_rmse_xy = np.std(np.sqrt(np.sum(np.square(gt - pred), axis=1)))
return mean_diff, std_diff, mean_rmse_xy, std_rmse_xy
def rmse_3D(gt, pred):
"""Compute the three-dimensional rmse using ground truth and prediction results.
Args:
gt (Nx3 array): Array of ground truth for each data point.
pred (Nx3 array): Array of prediction for each data point.
Returns:
mean_diff (1x3 array): Mean error in each dimension.
std_diff (1x3 array): Std error in each dimension.
mean_rmse_xyz (float): Mean 3D rmse error.
std_rmse_xyz (float): Std 3D rmse error.
mean_rmse_xy (float): Mean 2D rmse error.
std_rmse_xy (float): Std 2D rmse error.
"""
mean_diff = np.mean(np.absolute(gt - pred), axis=0)
std_diff = np.std(np.absolute(gt - pred), axis=0)
mean_rmse_xyz = np.mean(np.sqrt(np.sum(np.square(gt - pred), axis=1)))
std_rmse_xyz = np.std(np.sqrt(np.sum(np.square(gt - pred), axis=1)))
mean_rmse_xy = np.mean(np.sqrt(np.sum(np.square(gt[:, :2] - pred[:, :2]), axis=1)))
std_rmse_xy = np.std(np.sqrt(np.sum(np.square(gt[:, :2] - pred[:, :2]), axis=1)))
return mean_diff, std_diff, mean_rmse_xyz, std_rmse_xyz, mean_rmse_xy, std_rmse_xy
def compute_error_xyz(results):
"""Compute the final error for xyz model using ground truth and prediction results.
Args:
results (array): List of ground truth and prediction for each data point.
Returns:
final_errors (list): Computed errors.
num_results (int): Number of evaluated samples.
"""
if isinstance(results, list):
results = np.asarray(results)
results /= 10.
# Calculate the rmse error.
mean_diff, std_diff, mean_rmse_xyz, std_rmse_xyz, mean_rmse_xy, std_rmse_xy = \
rmse_3D(results[:, :3], results[:, 3:])
final_errors = [(mean_diff[0], std_diff[0]),
(mean_diff[1], std_diff[1]),
(mean_diff[2], std_diff[2])]
final_errors.append((mean_rmse_xyz, std_rmse_xyz))
final_errors.append((mean_rmse_xy, std_rmse_xy))
num_results = results.shape[0]
return final_errors, num_results
def compute_error_theta_phi(results):
"""Compute the final error for theta-phi model using ground truth and prediction results.
Args:
results (array): List of ground truth and prediction for each data point.
Returns:
final_errors (list): Computed errors.
num_results (int): Number of evaluated samples.
"""
if isinstance(results, list):
results = np.asarray(results)
# Remove samples without theta-phi ground truth.
results = results[results[:, 0] != -1.0]
# 1) Calculate the angular gaze vector error.
gt_gv = np.zeros(shape=(results.shape[0], 3), dtype=results.dtype)
gt_gv[:, 0] = -np.cos(results[:, 0]) * np.sin(results[:, 1])
gt_gv[:, 1] = -np.sin(results[:, 0])
gt_gv[:, 2] = -np.cos(results[:, 0]) * np.cos(results[:, 1])
pr_gv = np.zeros(shape=(results.shape[0], 3), dtype=results.dtype)
pr_gv[:, 0] = -np.cos(results[:, 2]) * np.sin(results[:, 3])
pr_gv[:, 1] = -np.sin(results[:, 2])
pr_gv[:, 2] = -np.cos(results[:, 2]) * np.cos(results[:, 3])
err_cos = np.arccos(np.sum(gt_gv[:, :3] * pr_gv[:, :3], axis=1))
final_errors = [(np.mean(err_cos), np.std(err_cos))]
# 2) Calculate the theta-phi rmse error.
mean_tp_diff, std_tp_diff, mean_rmse_tp, std_rmse_tp = rmse_2D(results[:, :2], results[:, 2:])
final_errors.append((mean_tp_diff[0], std_tp_diff[0]))
final_errors.append((mean_tp_diff[1], std_tp_diff[1]))
final_errors.append((mean_rmse_tp, std_rmse_tp))
num_results = results.shape[0]
return final_errors, num_results
def compute_error_joint(results):
"""Compute the final error using ground truth and prediction results.
Args:
results (array): List of ground truth and prediction for each data point.
Returns:
final_errors (list): Computed errors.
num_results_xyz (int): Number of evaluated samples for xyz.
num_results_tp (int): Number of evaluated samples for theta-phi.
"""
if isinstance(results, list):
results = np.asarray(results)
# XYZ Error Calculation.
val_xyz = np.concatenate((results[:, 0:3], results[:, 5:8]), axis=1)
final_errors_xyz, num_results_xyz = compute_error_xyz(val_xyz)
# Theta-Phi Error Calculation.
val_tp = np.concatenate((results[:, 3:5], results[:, 8:10]), axis=1)
final_errors_tp, num_results_tp = compute_error_theta_phi(val_tp)
final_errors = final_errors_xyz + final_errors_tp
return final_errors, num_results_xyz, num_results_tp
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/utilities/error_calculation.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility function definitions for writing results to text file."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
def write_to_file_xyz(final_errors, num_results, save_dir, mode):
"""Write the final error results to a text file in result directory.
Args:
final_errors (list): List of tuples of final errors.
num_results (int): Number of samples that the errors are calculated from.
save_dir (str): The directory to output result text file.
mode (str): Current mode in evaluation, can be 'testing' or 'kpi_testing'.
"""
output_filename = os.path.join(save_dir, mode+'_error.txt')
with open(output_filename, 'w') as f:
f.write('#XYZ evaluation (' + str(num_results) + ' samples)' + '\n\n')
f.write('RMSE error [cm]: ' + '\n')
f.write('x, mean: ' + str(round(final_errors[0][0], 2)) +
' std: ' + str(round(final_errors[0][1], 2)) + '\n')
f.write('y, mean: ' + str(round(final_errors[1][0], 2)) +
' std: ' + str(round(final_errors[1][1], 2)) + '\n')
f.write('z, mean: ' + str(round(final_errors[2][0], 2)) +
' std: ' + str(round(final_errors[2][1], 2)) + '\n')
f.write('d_xyz, mean: ' + str(round(final_errors[3][0], 2)) +
' std: ' + str(round(final_errors[3][1], 2)) + '\n')
f.write('d_xy, mean: ' + str(round(final_errors[4][0], 2)) +
' std: ' + str(round(final_errors[4][1], 2)) + '\n')
f.close()
def write_to_file_theta_phi(final_errors, num_results, save_dir, mode):
"""Write the final error results to a text file in result directory.
Args:
final_errors (list): List of tuples of final errors.
num_results (int): Number of samples that the errors are calculated from.
save_dir (str): The directory to output result text file.
mode (str): Current mode in evaluation, can be 'testing' or 'kpi_testing'.
"""
output_filename = os.path.join(save_dir, mode+'_error.txt')
with open(output_filename, 'a+') as f:
f.write('#Theta-Phi evaluation (' + str(num_results) + ' samples)' + '\n\n')
f.write('Cosine gaze vector error: ' + '\n')
f.write('[radian] mean: ' + str(round(final_errors[0][0], 2)) +
' std: ' + str(round(final_errors[0][1], 2)) + '\n')
f.write('[degree] mean: ' + str(round(final_errors[0][0] * 180 / np.pi, 2)) +
' std: ' + str(round(final_errors[0][1] * 180 / np.pi, 2)) + '\n\n\n')
f.write('Euclidean error [radian]: ' + '\n')
f.write('theta, mean: ' + str(round(final_errors[1][0], 2)) +
' std: ' + str(round(final_errors[1][1], 2)) + '\n')
f.write('phi, mean: ' + str(round(final_errors[2][0], 2)) +
' std: ' + str(round(final_errors[2][1], 2)) + '\n')
f.write('d_tp, mean: ' + str(round(final_errors[3][0], 2)) +
' std: ' + str(round(final_errors[3][1], 2)) + '\n\n')
f.write('Euclidean error [degree]: ' + '\n')
f.write('theta, mean: ' + str(round(final_errors[1][0] * 180 / np.pi, 2)) +
' std: ' + str(round(final_errors[1][1] * 180 / np.pi, 2)) + '\n')
f.write('phi, mean: ' + str(round(final_errors[2][0] * 180 / np.pi, 2)) +
' std: ' + str(round(final_errors[2][1] * 180 / np.pi, 2)) + '\n')
f.write('d_tp, mean: ' + str(round(final_errors[3][0] * 180 / np.pi, 2)) +
' std: ' + str(round(final_errors[3][1] * 180 / np.pi, 2)) + '\n\n\n')
f.write('Projected error in cm in screen phy space (at 100 cm): ' + '\n')
user_dist = 100.0
f.write('x, mean:' + str(round(np.tan(final_errors[2][0]) * user_dist, 2)) +
' std: ' + str(round(np.tan(final_errors[2][1]) * user_dist, 2)) + '\n')
f.write('y, mean:' + str(round(np.tan(final_errors[1][0]) * user_dist, 2)) +
' std: ' + str(round(np.tan(final_errors[1][1]) * user_dist, 2)) + '\n')
mean_d_xy = np.sqrt(np.power(np.tan(final_errors[2][0]) * user_dist, 2) +
np.power(np.tan(final_errors[1][0]) * user_dist, 2))
std_d_xy = np.sqrt(np.power(np.tan(final_errors[2][1]) * user_dist, 2) +
np.power(np.tan(final_errors[1][1]) * user_dist, 2))
f.write('d_xy, mean:' + str(round(mean_d_xy, 2)) +
' std: ' + str(round(std_d_xy, 2)) + '\n')
f.close()
def write_to_file_joint(final_errors, num_results_xyz, num_results_tp, save_dir, mode):
"""Write the final error results to a text file in result directory.
Args:
final_errors (list): List of tuples of final errors.
num_results_xyz (int): Number of xyz samples that the errors are calculated from.
num_results_tp (int): Number of tp samples that the errors are calculated from.
save_dir (str): The directory to output result text file.
mode (str): Current mode in evaluation, can be 'testing' or 'kpi_testing'.
"""
write_to_file_xyz(final_errors, num_results_xyz, save_dir, mode)
output_filename = os.path.join(save_dir, mode + '_error.txt')
f = open(output_filename, 'a')
f.write('--------------------------------------' + '\n')
f.write('--------------------------------------' + '\n\n')
f.close()
write_to_file_theta_phi(final_errors[5:], num_results_tp, save_dir, mode)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/utilities/file_writing.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""KPI visualizer that bucketizes users in kpi sets."""
import json
import os
import pandas as pd
varnan = float('nan')
class KpiVisualizer(object):
"""
KPI visualizer collects information of each user through parsing json_datafactory.
The following information will be collected:
set id, user id, image name, if all information of frame are present, left and right
eyes status, left and right eyes occluded, left and right pupils occluded, left and right
iris occluded
"""
def __init__(self, kpi_sets, kpi_bucket_file, path_info):
"""
Initializes kpi visualizer.
Args:
kpi_sets (list of set ids(str)): kpi sets to visualize
kpi_bucket_file (str): a csv file containing users information of each kpi set
path_info (dict of lists(str)): path info for searching sets across multiple cosmos
"""
if kpi_bucket_file is None or kpi_bucket_file == '' or \
'.csv' not in kpi_bucket_file or not os.path.isfile(kpi_bucket_file):
self._kpi_bucket_file = None
else:
self._kpi_bucket_file = kpi_bucket_file
self.csvAttrColumns = ['set_id', 'user_id', 'region_name', 'image_name', 'valid',
'leye_status', 'reye_status', 'leyelid_occl', 'reyelid_occl',
'lpupil_occl', 'rpupil_occl', 'liris_occl', 'riris_occl', 'glasses',
'race', 'gender', 'tv_size', 'cam_position',
'left_eye_glare_status', 'left_pupil_glare_status',
'right_eye_glare_status', 'right_pupil_glare_status']
self.csvTable = pd.DataFrame(columns=self.csvAttrColumns)
self._root = path_info['root_path']
self.gcosmos = path_info['set_directory_path']
self.kpisets = []
for sets_list in kpi_sets:
self.kpisets.append(sets_list.split(' '))
self.csvFile = 'allkpi_dfstates_userattr.csv'
def __call__(self, output_path, write_csv=False):
"""
Parses csv file to collect information, if csv is not present or invalid.
parse data sets to generate new table
"""
if self._kpi_bucket_file is None:
print('Invalid csv file entered, regenerating new csv now...')
if len(self.gcosmos) != len(self.kpisets):
raise ValueError('Expected length of set_directory_path and'
'visualize_set_id to be same, received: {}, {}'.
format(len(self.gcosmos),
len(self.kpisets)))
self.generate_attrDF(self.gcosmos, self.kpisets)
self.csvTable = self.addAttributes(self.csvTable)
else:
self.csvTable = pd.read_csv(self._kpi_bucket_file)
print('KPI Sanity: {}'.format(len(self.csvTable.index)))
for gcosmos, sets_list in zip(self.gcosmos, self.kpisets):
self.addAdditionalSets(self.csvTable, gcosmos, sets_list)
print('KPI Sanity: {}'.format(len(self.csvTable.index)))
if len(self.csvTable.columns) < len(self.csvAttrColumns):
self.csvTable = self.addAttributes(self.csvTable)
if write_csv:
file_path = os.path.join(output_path, self.csvFile)
self.csvTable.to_csv(path_or_buf=file_path, mode='w+')
print('Attributes csv is generated: ', file_path)
return self.csvTable
def addAdditionalSets(self, df, cosmos_path, kpi_sets):
"""Finds missing data set from attribute file and append to it."""
additionalSets = []
for set_id in kpi_sets:
df_set = df[df['set_id'] == set_id]
if len(df_set.index) == 0:
additionalSets.append(set_id)
print('Appending additional set {} to attributes csv'.format(set_id))
self.generate_attrDF([cosmos_path], [additionalSets])
def addAttributes(self, df):
"""Add attributes to data frame."""
df = df.astype(str)
for index, row in df.iterrows():
for root_dir, kpi_sets in zip(self.gcosmos, self.kpisets):
if row['set_id'] in kpi_sets:
_root_directory = root_dir
attr = self.__getAttributes(_root_directory, row['set_id'], row['user_id'])
df.at[index, 'glasses'] = str(attr['glasses']) if 'glasses' in attr else str('NA')
df.at[index, 'race'] = str(attr['race']) if 'race' in attr else str('NA')
df.at[index, 'gender'] = str(attr['gender']) if 'gender' in attr else str('NA')
df.at[index, 'tv_size'] = str(attr['tv_size']) if 'tv_size' in attr else str('NA')
df.at[index, 'cam_position'] = str(attr['cam_position']) \
if 'cam_position' in attr else str('NA')
return df
def __getAttributes(self, root_dir, setpath, username):
attributes = {}
setpath = os.path.join(root_dir, setpath)
setpath = setpath.replace('postData', 'orgData')
if 'copilot.cosmos10' in setpath:
glasses_ind = 5
elif 'driveix.cosmos639' in setpath:
glasses_ind = 11
for fname in os.listdir(setpath):
if 'summary' in fname and fname.endswith('.csv'):
with open(os.path.join(setpath, fname)) as fp:
for line in fp:
if 'user' in line.lower() and 'Badge No' not in line and username in line:
attributes['hash'] = username
parts = line.strip('\n').split(',')
if 'yes' in parts[glasses_ind].lower() or \
'glasses' in parts[glasses_ind].lower():
attributes['glasses'] = str('Yes')
else:
attributes['glasses'] = str('No')
attributes['gender'] = str('Female') \
if 'female' in line.lower() else str('Male')
attributes['race'] = str(parts[4])
break
camfile = os.path.join(setpath, 'Config', 'setup.csv')
if not os.path.exists(camfile):
camfile = os.path.join(setpath, 'setup.csv')
if os.path.isfile(camfile):
with open(camfile) as fp:
for line in fp:
if "tv_size" not in line:
parts = line.strip('\n').split(',')
attributes['tv_size'] = str(parts[0])
attributes['cam_position'] = str(parts[-2])
else:
# Note that most sets do not have setup.csv to parse!
# print("Camfile not found {}".format(camfile))
attributes['tv_size'] = 'NA'
attributes['cam_position'] = "NA"
return attributes
def generate_attrDF(self, cosmos_path_list, kpi_sets):
"""Generate attributes for data frame."""
for root_dir, set_ids in zip(cosmos_path_list, kpi_sets):
for set_id in set_ids:
print('Generating data frame for {}'.format(set_id))
self.parse_set_jsons(self._root, root_dir, set_id)
def parse_set_jsons(self, root_path, root_dir, set_id):
""""Parse json files in each set."""
if root_path is None:
root_path = ''
json_datafactory = os.path.join(root_path, root_dir, set_id)
json_datafactory = json_datafactory.replace('postData', 'orgData')
if os.path.exists(os.path.join(json_datafactory, 'json_datafactory_v2')):
json_datafactory = os.path.join(json_datafactory, 'json_datafactory_v2')
elif os.path.exists(os.path.join(json_datafactory, 'json_datafactory')):
json_datafactory = os.path.join(json_datafactory, 'json_datafactory')
else:
print('Cannot find json_datafactory_v2 or json_datafactory in {}'
.format(json_datafactory))
# The table returned might be empty now.
# That is ok as long as the visualization script can handle it.
return
jsons = os.listdir(json_datafactory)
for _json in jsons:
if _json.startswith('.') or 'json' not in _json or _json.endswith('.swp'):
continue
table = self.parse_json_file(json_datafactory, _json)
table = pd.DataFrame(table, columns=self.csvAttrColumns)
self.csvTable = pd.concat([self.csvTable, table], ignore_index=True, sort=True)
def parse_json_file(self, json_datafactory, _json):
"""Parse the given json file."""
table = []
parts = _json.split('/')[-1].split('.json')[0].split("_")
if "s400_KPI" not in _json:
setid = parts[0]
userid = parts[1]
else:
setid = "s400_KPI"
userid = _json.split('/')[-1].split("_", 2)[-1].split('.json')[0]
_json_file = os.path.join(json_datafactory, _json)
text = open(_json_file, "r").read()
try:
_json_contents = json.loads(text)
except Exception:
print("Failed to load {}".format(str(_json_file)))
return None
for section in _json_contents:
valid = True
try:
filename_split = section['filename'].split("/")
frame_name = filename_split[-1]
region_name = filename_split[-2]
if not region_name.startswith('region_'):
# Legacy flat structure, no regions.
region_name = ''
except Exception:
print('Failed to read filename, skipping...')
continue
if 'png' not in frame_name and 'jpg' not in frame_name:
print("filename doesn't appear to be an image {}".format(frame_name))
continue
if len(section['annotations']) == 0:
print("Image contains zero annotations {} {}".format(frame_name, _json_file))
continue
if len(section['annotations']) == 1:
print("skipping invalid image {}".format(section['filename']))
valid = False
row = [setid, userid, region_name, frame_name, valid, "invalid", "invalid",
varnan, varnan, varnan, varnan, varnan, varnan]
table.append(row)
continue
for chunk in section['annotations']:
numLandmarks = 104
x = ['0'] * numLandmarks
y = ['0'] * numLandmarks
tags = [0] * numLandmarks
lastidx = 0
if chunk.get('class') is None:
continue
if 'eyes' in chunk['class'].lower():
# Check for eye status. Switch left-right convention
# Data factory labels left from labellers perspective (not users)
reye_status = chunk['l_status']
leye_status = chunk['r_status']
# To support Old labelling. sigh
elif 'eye' in chunk['class'].lower():
leye_status = reye_status = 'open' if 'open' \
in chunk['class'].lower() else 'closed'
# Obtain fiducial information
elif 'fiducialpoints' in chunk['class'].lower():
for point in chunk:
if 'version' in point or 'class' in point or 'Poccluded' in point:
continue
landmark_pt = int(''.join(c for c in str(point) if c.isdigit()))
lastidx = max(landmark_pt, lastidx)
if 'x' in point and landmark_pt < numLandmarks:
x[landmark_pt - 1] = str(int(float(chunk[point])))
if 'y' in point and landmark_pt < numLandmarks:
y[landmark_pt - 1] = str(int(float(chunk[point])))
if 'occ' in str(point).lower() and landmark_pt <= numLandmarks:
tags[landmark_pt - 1] = 1
# Calculate occlusions
del (x[lastidx:])
del (y[lastidx:])
del (tags[lastidx:])
reyelid_occl = tags[36:41].count(1)
leyelid_occl = tags[42:47].count(1)
rpupil_occl = tags[68:72].count(1)
lpupil_occl = tags[72:76].count(1)
if lastidx > 100:
rIris_occlusions = tags[82:91].count(1)
lIris_occlusions = tags[93:102].count(1)
else:
rIris_occlusions = lIris_occlusions = varnan
try:
row = [setid, userid, region_name, frame_name, valid, leye_status, reye_status,
leyelid_occl, reyelid_occl, lpupil_occl, rpupil_occl,
lIris_occlusions, rIris_occlusions, varnan, varnan, varnan,
varnan, varnan, varnan, varnan, varnan, varnan]
table.append(row)
except Exception as e:
print('Error processing: {}'.format([setid, userid, region_name, frame_name,
valid, leye_status, reye_status,
leyelid_occl, reyelid_occl, lpupil_occl, rpupil_occl,
lIris_occlusions, rIris_occlusions]))
print('{}'.format(e))
return table
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/utilities/kpi_visualization.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Prediction visualizer that bucketizes users predictions."""
import math
import os
import shutil
import subprocess
import cv2
import matplotlib
matplotlib.use('agg') # Causes 'E402:module level import not at top of file'.
import matplotlib.pyplot as plt # noqa: E402
import numpy as np # noqa: E402
import pandas as pd # noqa: E402
from nvidia_tao_tf1.cv.common.utilities.path_processing import mkdir_p # noqa: E402
pd.set_option('display.max_columns', None)
class kpi_prediction_linker():
"""Links together the predictions from each frame with KPI set."""
def __init__(self, model_type, kpi_sets, path_info, gaze_origin,
theta_phi_degrees, write_time_instance_info=False):
"""
Initialize kpi_prediction_linker.
model type (str): type of model to visualize, can be one of [xyz_model, joint_model
theta_phi_model]
kpi sets (list(str)) : kpi sets to visualize
gaze_origin (str): origin point of theta-phi, can be one of:
normal: normal theta-phi used as GT theta-phi from previous pipelines
mid_eyes: mid point of two eyes
left_eye: left eye
right_eye: right eye
delta: when learning delta theta-phi between normal theta-phi and head pose theta-phi
it uses mid_eyes as gaze origin
headnorm_face: when learning face normalized model
it uses mid point of face (mean of leye, reye and mouth corners)
headnorm_leye: when learning left eye normalized model
it uses left eye as gaze origin
headnorm_reye: when learning right eye normalized model
it uses right eye as gaze origin
theta_phi_degrees (bool): Specify this field if theta-phi model was learned in
degrees or radian.
write_time_instance_info (bool): Specify field if writing time instance info is needed.
"""
self._root_path = path_info['root_path']
self.gcosmos = path_info['set_directory_path']
self.model_type = model_type
if self.model_type not in ['xyz', 'joint', 'theta_phi', 'joint_xyz', 'joint_tp']:
raise NotImplementedError(
'A model of type {} is not implemented, please choose '
'from one of the following: xyz, joint, theta_phi'.format(self.model_type)
)
self.theta_phi_degrees = theta_phi_degrees
self.kpi_GT_dir = path_info['ground_truth_folder_name']
self.kpi_GT_file_folder_name = path_info['ground_truth_file_folder_name']
if write_time_instance_info:
self.kpi_GT_file = ['test', 'train', 'validate']
else:
self.kpi_GT_file = ['test']
self.gaze_origin = gaze_origin
self.kpisets = []
for sets_list in kpi_sets:
self.kpisets.append(sets_list.split(' '))
if len(self.gcosmos) != len(self.kpi_GT_dir) != len(self.kpi_GT_file_folder_name)\
!= len(self.kpisets):
raise Exception('Expected lists of directory paths and lists of GT path and'
'lists of folders and lists of kpi sets'
'to be the same, received'
' {}, {}, {}, {}'.format(len(self.gcosmos), len(self.kpi_GT_dir),
len(self.kpi_GT_file_folder_name),
len(self.kpisets)))
self.resultCols = ["image_path", "image_name",
"set_id", "user_id", "region_name", "region_label",
# Ground truth.
"screen_pix_x", "screen_pix_y",
"GT_cam_x", "GT_cam_y", "GT_cam_z", "GT_cam_norm",
"GT_theta", "GT_phi",
"center_of_eyes_x", "center_of_eyes_y", "center_of_eyes_z",
"GT_hp_theta", "GT_hp_phi",
# Prediction.
"gaze_cam_mm_x", "gaze_cam_mm_y", "gaze_cam_mm_z", "gaze_cam_norm",
"gaze_theta", "gaze_phi",
# Error.
"cam_err_x", "cam_err_y", "cam_err_z", "cam_err_xy",
"cam_error", "screen_phy_error", "user_dist", "degree_error",
"direct_degree_error", "backwardIntersection", "cone_degree"]
self.dfgaze = None
self.dfunqgaze = None
@staticmethod
def _get_region_names(org_path, set_id):
"""Read the region names from the folders in config."""
config_path = os.path.join(org_path, set_id, 'Config')
configs = os.listdir(config_path)
regions = [
config
for config in configs
if os.path.isdir(os.path.join(config_path, config))
and config.startswith('region_')
]
if not regions:
# On bench collection has no regions.
if 'incar' in set_id:
raise IOError('In car data set should have region_ folders in {}'
.format(config_path))
return ['']
if 'incar' not in set_id:
raise IOError('On bench data set should not have region_ folders in {}'
.format(config_path))
return regions
@staticmethod
def load_screen_parameters(org_path, set_id):
"""Loads R and T parameters."""
config_path = os.path.join(org_path, set_id, 'Config')
screens = {}
for region_name in kpi_prediction_linker._get_region_names(org_path, set_id):
scrpW, scrpH = 1920.0, 1080.0 # Default vals.
# Try to find a config file for the resolution.
resolution = os.path.join(config_path, region_name, 'resolution.txt')
if os.path.isfile(resolution):
with open(resolution) as f:
scrpW = float(f.readline())
scrpH = float(f.readline())
# Check which of board_size or TV_size is available.
board_size = os.path.join(config_path, region_name, 'board_size.txt')
tv_size = os.path.join(config_path, region_name, 'TV_size')
if os.path.isfile(board_size):
if os.path.isfile(tv_size):
raise IOError("Both board_size.txt and TV_size exist in {}"
.format(os.path.join(config_path, region_name)))
size_file = board_size
elif os.path.isfile(tv_size):
size_file = tv_size
else:
raise IOError("Neither board_size.txt nor TV_size exists in {}"
.format(os.path.join(config_path, region_name)))
with open(size_file) as f:
scrmW = float(f.readline())
scrmH = float(f.readline())
screens[region_name] = (scrmW, scrmH, scrpW, scrpH)
return screens
@staticmethod
def load_cam_extrinsics(org_path, set_id):
"""load calibration matrices: R and T."""
config_path = os.path.join(org_path, set_id, 'Config')
extrinsics = {}
for region_name in kpi_prediction_linker._get_region_names(org_path, set_id):
R_file_path = os.path.join(config_path, region_name, 'R.txt')
T_file_path = os.path.join(config_path, region_name, 'T.txt')
R = np.loadtxt(R_file_path, delimiter=',')
T = np.loadtxt(T_file_path)
extrinsics[region_name] = (R, T)
return extrinsics
@staticmethod
def load_region_labels(org_path, set_id):
"""Load a dict (region_name->region_label) for the set."""
config_path = os.path.join(org_path, set_id, 'Config')
labels = {}
for region_name in kpi_prediction_linker._get_region_names(org_path, set_id):
label_path = os.path.join(config_path, region_name, 'region_label.txt')
if os.path.isfile(label_path):
with open(label_path, 'r') as label_file:
label = label_file.read().strip()
else:
label = 'unknown'
labels[region_name] = label
return labels
@staticmethod
def screenPix2Phy(gaze_pix, scrmW, scrmH, scrpH, scrpW):
"""
Convert gaze point pixel coordinates (x, y) to physical coordinates in mm (x, y).
gaze_pix: pixel positions of gaze
scrmW, scrmH: screen size in mm
scrpW, scrpH: screen size in pixel
"""
gaze_phy = np.zeros(2, dtype=float)
# compute gaze on screen
gaze_phy[0] = gaze_pix[0] * scrmW / scrpW
gaze_phy[1] = gaze_pix[1] * scrmH / scrpH
return gaze_phy
@staticmethod
def fromCamera2ScreenProj_3inp(gaze, R, T):
"""
Calculates projection from camera space to screen spaces.
gaze_cam = R*gaze_scr + T
gaze_scr = R.transpose()*(gaze_cam - T)
R.transpose() and R.inverse() are equal because R is a square orthogonal matrix
Convert camera coordinates in mm (x, y, z) to screen coordinates in mm (x, y)
"""
# build array for output gaze
gaze_out = np.zeros(3)
gaze_out[0] = gaze[0]
gaze_out[1] = gaze[1]
gaze_out[2] = gaze[2]
# apply calibration results: R and T
Rt = R.transpose()
gaze_out = Rt.dot(gaze_out - T.transpose())
return gaze_out
@staticmethod
def compute_theta_phi_from_unit_vector(x, y, z):
"""
Computes theta phi angles from forward facing gaze vector in the camera space.
Args:
x (float): gaze vector x
y (float): gaze vector y
z (float): gaze vector z
Returns:
theta (float): gaze pitch in radian
phi (float): gaze yaw in radian
"""
theta = np.arcsin(-y)
phi = np.arctan2(-x, -z)
return theta, phi
@staticmethod
def compute_gaze_vector_from_theta_phi(theta, phi):
"""
Computes gaze vector through theta and phi.
Compute forward facing gaze vector (look vector) in the camera space from theta & phi
Args:
theta(float): gaze pitch in radians
phi(float): gaze yaw in radians
Returns:
gaze_vec(array of floats): forward facing gaze vector in the camera space
"""
# forward facing gaze vector in the camera space
gaze_vec = np.zeros(3)
gaze_vec[0] = -np.cos(theta) * np.sin(phi)
gaze_vec[1] = -np.sin(theta)
gaze_vec[2] = -np.cos(theta) * np.cos(phi)
return gaze_vec
@staticmethod
def compute_PoR_from_theta_phi(theta, phi, pc_cam_mm, R, T):
"""
Compute the intersection of gaze vector (generated using theta&phi) with the monitor plane.
Args:
theta (float): gaze pitch in radians
phi (float): gaze yaw in radians
pc_cam_mm (float): 3D pupil center coordinates in camera space in mm
R, T (float): camera extrinsics with respect to the monitor plane
Returns:
PoR_x, PoR_y, PoR_z (float): point of regard coordinates in camera space in mm,
or None if there is no intersection between the directed gaze ray and the plane.
"""
# Calculate point of regard given theta and phi angles
screenNormal = R
# Rotation matrix computed from mirror calibration
screenNormal = screenNormal[:, 2]
screenOrigin = T
# Translation vector computed from mirror calibration
gaze_vec = kpi_prediction_linker.compute_gaze_vector_from_theta_phi(theta, phi)
dNormalGazeVec = np.dot(screenNormal, gaze_vec)
d = np.dot(screenNormal, screenOrigin)
dNormalEyeCenter = np.dot(screenNormal, pc_cam_mm)
t = (d - dNormalEyeCenter) / dNormalGazeVec
flagBackwardIntersection = 'No'
if t < 0: # Intersects at a point behind gaze origin.
flagBackwardIntersection = 'Yes'
gaze_vec = np.expand_dims(gaze_vec, axis=1)
intersectPoint = pc_cam_mm + t * gaze_vec
PoR_x = intersectPoint[0]
PoR_y = intersectPoint[1]
PoR_z = intersectPoint[2]
return PoR_x, PoR_y, PoR_z, flagBackwardIntersection
def getDataFrame(self, predictions, kpi_dataframe):
"""Return final data frame."""
predictions = [pred.split(' ') for pred in predictions]
dfpred = pd.DataFrame([], columns=self.resultCols)
for root_dir, gt_folder, gt_file_folder, \
kpi_sets in zip(self.gcosmos,
self.kpi_GT_dir,
self.kpi_GT_file_folder_name,
self.kpisets):
org_dir = root_dir.replace('postData', 'orgData')
if 'single_eye' == self.gaze_origin:
df = self.createTableFromSingleEyePred(predictions, root_dir, org_dir,
gt_folder, gt_file_folder, kpi_sets)
else:
df = self.createTableFromPred(predictions, root_dir, org_dir,
gt_folder, gt_file_folder, kpi_sets)
dfpred = pd.concat([dfpred, df], ignore_index=True, sort=True)
if kpi_dataframe is None or kpi_dataframe.empty:
dfmerged = dfpred
else:
dfmerged = pd.merge(dfpred,
kpi_dataframe,
on=['set_id', 'user_id', 'region_name', 'image_name'],
how='left')
return dfmerged
@staticmethod
def _get_root_set(set_id):
return set_id.rsplit('-', 1)[0]
@staticmethod
def _get_camera_id(set_id):
split = set_id.rsplit('-', 1)
if len(split) > 1:
return split[1]
# s400_KPI has no cam in the name.
return 'unknown'
def ConvertTableToDF(self, table):
"""Convert input table(list) to pandas data frame."""
df = pd.DataFrame(table, columns=self.resultCols)
df['frame_num'] = df.image_name.map(lambda x: x.split('.')[0])
df['frame_num'] = df.frame_num.map(lambda x: x.split('_')[-1])
df['root_set_id'] = df.set_id.map(self._get_root_set)
df['orig_frame_id'] = df['set_id'] + "_" + df["user_id"] + \
"_" + df.region_name.map(lambda r: r + "_" if r != "" else "") \
+ df["screen_pix_x"] + "_" + df["screen_pix_y"]
df['unique_cap_id'] = df['root_set_id'] + "_" + df["user_id"] + \
"_" + df["screen_pix_x"] + "_" + df["screen_pix_y"]
df.frame_num = df.frame_num.astype(float)
df = df.sort_values(by=['orig_frame_id', 'frame_num'])
df['cam_err_x'] = df['cam_err_x'] / 10.0
df['cam_err_y'] = df['cam_err_y'] / 10.0
df['cam_err_z'] = df['cam_err_z'] / 10.0
df['cam_err_xy'] = df['cam_err_xy'] / 10.0
df['cam_error'] = df['cam_error'] / 10.0 # Convert error to cm.
df['screen_phy_error'] = df['screen_phy_error'] / 10.0 # Convert error to cm.
return df
def createTableFromPred(self, predictions, root_dir, org_dir, gt_folder, gt_file_folder,
kpi_sets):
"""Constructs table(list) through prediction and KPI data frame.
Entries in each line of a joint2 ground truth file:
l[0]: path
l[1]: frame id
l[2:5] face bounding box
l[6:8]: ground truth in camera space
l[9:10] ground truth in screen space (mm)
l[11:13] PnP based head pose angles
l[14:16] Gaze angles theta-phi
l[17:19] Unit norm gaze vector
l[20:22] Head location
l[23:25] Left eye center location
l[26:28] Right eye center location
Entries in joint model:
l[0]: path + frame id
l[1:3]: gt_x, y, z
l[4:5]: gt_theta, phi
l[6:8]: pred_x, y, z
l[9:10] pred_theta, phi
Entries in theta-phi model:
l[0] = path + frame id
l[1:2] = gt_theta, phi
l[3:4] = pred_theta, phi
"""
final_table = []
# Read the prediction file
orig_data = pd.DataFrame(predictions)
if self.model_type == 'xyz':
orig_data.columns = ["frame_id", "GT_x", "GT_y", "GT_z", "cam_x", "cam_y", "cam_z"]
elif self.model_type in ['joint_xyz', 'joint_tp', 'joint']:
orig_data.columns = ["frame_id", "GT_x", "GT_y", "GT_z", "GT_theta", "GT_phi",
"cam_x", "cam_y", "cam_z", "cam_theta", "cam_phi"]
elif self.model_type == 'theta_phi':
orig_data.columns = ["frame_id", "GT_theta", "GT_phi", "cam_theta", "cam_phi"]
gt_pr = orig_data.values
indent = 8
# construct dictionary for predictions
pr_dict = {}
for prediction in gt_pr:
chunks = prediction[0].split('/')
for c in chunks:
if c == '':
continue
if (c[0].lower() == 's' or 'germany' in c)\
and ('kpi' in c or 'KPI' in c or 'gaze' in c):
set_id = c
frame_id = chunks[-1]
region_name = chunks[-2]
user_index = -3
if not region_name.startswith('region_'):
# Legacy flat structure, no regions.
region_name = ''
user_index += 1
if 'MITData_DataFactory' in prediction[0]:
user_index -= 1
user_id = chunks[user_index]
if set_id not in pr_dict:
pr_dict[set_id] = {}
if user_id not in pr_dict[set_id]:
pr_dict[set_id][user_id] = {}
if region_name not in pr_dict[set_id][user_id]:
pr_dict[set_id][user_id][region_name] = {}
pr_dict[set_id][user_id][region_name][frame_id] = [float(x) for x in prediction[1:]]
print('gt_pr.shape: {}'.format(gt_pr.shape))
if self._root_path is None:
self._root_path = ''
base_directory = os.path.join(self._root_path, root_dir)
org_path = os.path.join(self._root_path, org_dir)
for kpi_dir in kpi_sets:
base_dir = os.path.join(base_directory, kpi_dir, gt_folder, gt_file_folder)
screens = self.load_screen_parameters(org_path, kpi_dir)
extrinsics = self.load_cam_extrinsics(org_path, kpi_dir)
region_labels = self.load_region_labels(org_path, kpi_dir)
sample_all = 0
sample = 0
sample_NA = 0
# read the GT file with theta-phi conversions
for kpi_file in self.kpi_GT_file:
data_file = kpi_file + '.txt'
if os.path.exists(os.path.join(base_dir, data_file)):
data_file = os.path.join(base_dir, data_file)
else:
data_file = os.path.join(root_dir, data_file)
if not os.path.exists(data_file):
print('ATTENTION: BASE GT FILE DOES NOT EXIST! \t', data_file)
continue
for line in open(data_file):
gt_info = line.split(' ')
gt_info = gt_info[:6] + gt_info[indent + 6:]
sample_all += 1
region_name = gt_info[0].split('/')[-1]
if not region_name.startswith('region_'): # No regions
set_id = gt_info[0].split('/')[-3]
user_id = gt_info[0].split('/')[-1]
region_name = ''
else:
set_id = gt_info[0].split('/')[-4]
user_id = gt_info[0].split('/')[-2]
frame_id = gt_info[1]
if set_id in pr_dict:
if user_id in pr_dict[set_id]:
if region_name in pr_dict[set_id][user_id]:
if frame_id in pr_dict[set_id][user_id][region_name]:
pr = pr_dict[set_id][user_id][region_name][frame_id]
else:
print('Missing frame id:', frame_id, '\t in ', gt_info[0])
continue
else:
print('Missing region name:', region_name, '\t in ', gt_info[0])
continue
else:
print('Missing user id:', user_id, '\t in ', gt_info[0])
continue
else:
print('Missing set id:', set_id, '\t in ', gt_info[0])
continue
GT_screen_px = np.zeros((2, 1), dtype=float)
GT_screen_px[0] = float(gt_info[9])
GT_screen_px[1] = float(gt_info[10])
# GT on screen plane in mm
GT_screen_mm = self.screenPix2Phy(GT_screen_px, *screens[region_name])
GT_camera_mm = np.zeros((3, 1), dtype=float)
# Read GT values
GT_camera_mm[0] = gt_info[6]
GT_camera_mm[1] = gt_info[7]
GT_camera_mm[2] = gt_info[8]
# Center between two eyes
gaze_origin_cam_mm = np.zeros((3, 1), dtype=float)
gaze_origin_cam_mm[2][0] = 800.0
# Read HP GT values
GT_hp_theta = float(gt_info[25])
GT_hp_phi = float(gt_info[26])
# GT_theta, GT_phi
if self.gaze_origin == 'normal':
GT_theta = float(gt_info[14])
GT_phi = float(gt_info[15])
elif self.gaze_origin == 'mid_eyes':
GT_theta = float(gt_info[27])
GT_phi = float(gt_info[28])
elif self.gaze_origin == 'left_eye':
GT_theta = float(gt_info[29])
GT_phi = float(gt_info[30])
elif self.gaze_origin == 'right_eye':
GT_theta = float(gt_info[31])
GT_phi = float(gt_info[32])
elif self.gaze_origin == 'delta':
GT_theta = float(gt_info[27])
GT_phi = float(gt_info[28])
if GT_theta != -1.0:
GT_theta -= GT_hp_theta
if GT_phi != -1.0:
GT_phi -= GT_hp_phi
elif self.gaze_origin == 'headnorm_face':
GT_theta = float(gt_info[37])
GT_phi = float(gt_info[38])
cnvMat = np.zeros((3, 3), dtype=np.float32)
cnvMat[0][0] = float(gt_info[62])
cnvMat[0][1] = float(gt_info[63])
cnvMat[0][2] = float(gt_info[64])
cnvMat[1][0] = float(gt_info[65])
cnvMat[1][1] = float(gt_info[66])
cnvMat[1][2] = float(gt_info[67])
cnvMat[2][0] = float(gt_info[68])
cnvMat[2][1] = float(gt_info[69])
cnvMat[2][2] = float(gt_info[70])
elif self.gaze_origin == 'headnorm_leye':
GT_theta = float(gt_info[41])
GT_phi = float(gt_info[42])
cnvMat = np.zeros((3, 3), dtype=np.float32)
cnvMat[0][0] = float(gt_info[71])
cnvMat[0][1] = float(gt_info[72])
cnvMat[0][2] = float(gt_info[73])
cnvMat[1][0] = float(gt_info[74])
cnvMat[1][1] = float(gt_info[75])
cnvMat[1][2] = float(gt_info[76])
cnvMat[2][0] = float(gt_info[77])
cnvMat[2][1] = float(gt_info[78])
cnvMat[2][2] = float(gt_info[79])
elif self.gaze_origin == 'headnorm_reye':
GT_theta = float(gt_info[45])
GT_phi = float(gt_info[46])
cnvMat = np.zeros((3, 3), dtype=np.float32)
cnvMat[0][0] = float(gt_info[80])
cnvMat[0][1] = float(gt_info[81])
cnvMat[0][2] = float(gt_info[82])
cnvMat[1][0] = float(gt_info[83])
cnvMat[1][1] = float(gt_info[84])
cnvMat[1][2] = float(gt_info[85])
cnvMat[2][0] = float(gt_info[86])
cnvMat[2][1] = float(gt_info[87])
cnvMat[2][2] = float(gt_info[88])
else:
raise NotImplementedError('Gaze origin {} is not implemented, '
'please choose from one of the following:'
'normal, left_eye, right_eye, mid_eyes, '
'delta, headnorm_face,'
'headnorm_leye, headnorm_reye'
.format(self.gaze_origin))
# Ground truth mid point of two eye centers
GT_mid_ec = np.zeros((3, 1), dtype=float)
GT_mid_ec[0] = float(gt_info[16])
GT_mid_ec[1] = float(gt_info[17])
GT_mid_ec[2] = float(gt_info[18])
# Ground truth left eye center
GT_le_ec = np.zeros((3, 1), dtype=float)
GT_le_ec[0] = float(gt_info[19])
GT_le_ec[1] = float(gt_info[20])
GT_le_ec[2] = float(gt_info[21])
# Ground truth right eye center
GT_re_ec = np.zeros((3, 1), dtype=float)
GT_re_ec[0] = float(gt_info[22])
GT_re_ec[1] = float(gt_info[23])
GT_re_ec[2] = float(gt_info[24])
valid_left_eye = False
if GT_le_ec[0] != -1.0 and GT_le_ec[1] != -1.0 and GT_le_ec[2] != -1.0:
valid_left_eye = True
valid_right_eye = False
if GT_re_ec[0] != -1.0 and GT_re_ec[1] != -1.0 and GT_re_ec[2] != -1.0:
valid_right_eye = True
if valid_left_eye and valid_right_eye:
gaze_origin_cam_mm = GT_mid_ec
elif valid_left_eye is True and valid_right_eye is False:
gaze_origin_cam_mm = GT_le_ec
elif valid_left_eye is False and valid_right_eye is True:
gaze_origin_cam_mm = GT_re_ec
if 'headnorm_face' == self.gaze_origin:
gaze_origin_cam_mm[0] = float(gt_info[89])
gaze_origin_cam_mm[1] = float(gt_info[90])
gaze_origin_cam_mm[2] = float(gt_info[91])
elif 'headnorm_leye' == self.gaze_origin:
gaze_origin_cam_mm = GT_le_ec
elif 'headnorm_reye' == self.gaze_origin:
gaze_origin_cam_mm = GT_re_ec
# Calculate gaze vector
GT_gaze_vec = GT_camera_mm - gaze_origin_cam_mm
GT_gaze_vec = np.squeeze(GT_gaze_vec, axis=1)
GT_gaze_vec_mag = np.sqrt(
GT_gaze_vec[0] ** 2 + GT_gaze_vec[1] ** 2 + GT_gaze_vec[2] ** 2)
GT_gaze_vec = GT_gaze_vec / GT_gaze_vec_mag
d_deg_err_direct = 0
# Read/calculate prediction gaze_cam_mm and gaze_theta, gaze_phi
if self.model_type == 'xyz' or 'joint' in self.model_type:
if int(GT_camera_mm[0]) != int(pr[0]) \
or int(GT_camera_mm[1]) != int(pr[1]) \
or int(GT_camera_mm[2]) != int(pr[2]):
print('GT - Data file mismatch! Sample: ',
sample, GT_camera_mm[0],
pr[0], GT_camera_mm[1], pr[1], GT_camera_mm[2], pr[2])
continue
if self.model_type == 'xyz':
gaze_cam_mm = np.zeros((3, 1), dtype=float)
gaze_cam_mm[0] = pr[3]
gaze_cam_mm[1] = pr[4]
gaze_cam_mm[2] = pr[5]
gaze_theta = -1.0
gaze_phi = -1.0
# 'joint_xyz', 'joint_tp', or 'joint'.
elif 'joint' in self.model_type:
gaze_cam_mm = np.zeros((3, 1), dtype=float)
gaze_cam_mm[0] = pr[5]
gaze_cam_mm[1] = pr[6]
gaze_cam_mm[2] = pr[7]
gaze_theta = pr[8]
gaze_phi = pr[9]
if self.theta_phi_degrees:
gaze_theta = (gaze_theta * math.pi) / 180
gaze_phi = (gaze_phi * math.pi) / 180
if self.gaze_origin == 'delta':
gaze_theta += GT_hp_theta
gaze_phi += GT_hp_phi
elif self.model_type == 'theta_phi':
if abs(GT_theta - float(pr[0])) > 1e-2 or \
abs(GT_phi - float(pr[1])) > 1e-2:
print('GT - Data file mismatch! Sample: ',
sample, GT_theta, pr[0], GT_phi, pr[1])
continue
GT_ngv = self.compute_gaze_vector_from_theta_phi(GT_theta, GT_phi)
gaze_theta = pr[2]
gaze_phi = pr[3]
ngv = self.compute_gaze_vector_from_theta_phi(gaze_theta, gaze_phi)
d_deg_err_direct = np.arccos(np.dot(GT_ngv, ngv)) * 180 / np.pi
if self.theta_phi_degrees:
gaze_theta = (gaze_theta * math.pi) / 180
gaze_phi = (gaze_phi * math.pi) / 180
if self.gaze_origin == 'delta':
gaze_theta += GT_hp_theta
gaze_phi += GT_hp_phi
if 'headnorm' in self.gaze_origin:
ngv = self.compute_gaze_vector_from_theta_phi(gaze_theta, gaze_phi)
gv = np.dot(np.linalg.inv(cnvMat), ngv)
gaze_theta, gaze_phi = self.compute_theta_phi_from_unit_vector(
gv[0], gv[1], gv[2])
# Calculate gaze_cam_mm according to the model type.
flagBackwardIntersection = 'No'
if self.model_type ['theta_phi', 'joint_tp']:
if GT_theta == -1.0 and GT_phi == -1.0:
print('Invalid Theta-Phi sample: {} {} {} {} {}'
.format(sample, set_id, user_id, region_name, frame_id))
continue
gaze_cam_mm = self.compute_PoR_from_theta_phi(
gaze_theta, gaze_phi, gaze_origin_cam_mm,
*extrinsics[region_name])
flagBackwardIntersection = gaze_cam_mm[3]
if flagBackwardIntersection == 'Yes':
print('No forward gaze vector screen plane intersection: '
'{} {} {} {} {}'
.format(sample, set_id, user_id, region_name, frame_id))
sample_NA += 1
gaze_cam_mm = np.array([gaze_cam_mm[0], gaze_cam_mm[1], gaze_cam_mm[2]])
elif self.model_type == 'joint':
# Combine gaze from TP and XYZ portions.
if GT_theta != -1.0 and GT_phi != -1.0:
tp_gaze_cam_mm = self.compute_PoR_from_theta_phi(
gaze_theta, gaze_phi, gaze_origin_cam_mm,
*extrinsics[region_name])
flagBackwardIntersection = tp_gaze_cam_mm[3]
if flagBackwardIntersection == 'No':
tp_gaze_cam_mm = np.array(
[tp_gaze_cam_mm[0], tp_gaze_cam_mm[1], tp_gaze_cam_mm[2]])
gaze_cam_mm = (gaze_cam_mm + tp_gaze_cam_mm) / 2.0
gaze_cam_mm[2] = tp_gaze_cam_mm[2]
else:
print('No forward gaze vector screen plane intersection, '
'but use XYZ portion sample: {} {} {} {} {}'
.format(sample, set_id, user_id, region_name, frame_id))
sample_NA += 1
else:
print('Invalid Theta-Phi but use XYZ portion sample: {} {} {} {} {}'
.format(sample, set_id, user_id, region_name, frame_id))
# else: #self.model_type == 'xyz' or self.model_type == 'joint_xyz'
# or self.model_type == 'joint':
# no need to have an additional step, but directly use gaze_cam_mm set from file
# Mind the indent here, this should not go under elif case above
# d-x, d-y, d-z, d-XY, d-XYZ, and degree error calculation.
x_cam_err = abs(GT_camera_mm[0] - gaze_cam_mm[0])[0]
y_cam_err = abs(GT_camera_mm[1] - gaze_cam_mm[1])[0]
z_cam_err = abs(GT_camera_mm[2] - gaze_cam_mm[2])[0]
GT_xy_vec = np.array([GT_camera_mm[0], GT_camera_mm[1]])
gaze_xy_vec = np.array([gaze_cam_mm[0], gaze_cam_mm[1]])
xy_cam_err = abs(GT_xy_vec - gaze_xy_vec)
d_xy_cam_err = np.linalg.norm(xy_cam_err)
xyz_cam_err = abs(GT_camera_mm - gaze_cam_mm)
d_xyz_cam_err = np.linalg.norm(xyz_cam_err)
gaze_scr_mm = self.fromCamera2ScreenProj_3inp(gaze_cam_mm,
*extrinsics[region_name])
xyz_scr_err = abs(GT_screen_mm - gaze_scr_mm[0:2])
d_xyz_scr_err = np.linalg.norm(xyz_scr_err)
user_dist = np.linalg.norm(gaze_origin_cam_mm - GT_camera_mm)
gaze_vec = gaze_cam_mm - gaze_origin_cam_mm
gaze_vec = np.squeeze(gaze_vec, axis=1)
gaze_vec_mag = np.sqrt(
gaze_vec[0] ** 2 + gaze_vec[1] ** 2 + gaze_vec[2] ** 2)
gaze_vec = gaze_vec / gaze_vec_mag
d_xyz_err_deg = np.arccos(np.dot(GT_gaze_vec, gaze_vec)) * 180 / np.pi
if flagBackwardIntersection == 'Yes':
d_xyz_err_deg = d_deg_err_direct
if abs(d_xyz_err_deg-d_deg_err_direct) > 1.0:
print('Error calculation might be problematic -> direct:' +
str(d_deg_err_direct) + ', post:' + str(d_xyz_err_deg))
# Calculate ref gaze vector for 45-deg cone
ref_gaze_vec = np.zeros((3, 1), dtype=float)
ref_gaze_vec[2] = -1.0
# Calculate angle between GT_gaze_vec and ref_gaze_vec
# for 45-deg cone, this should not be larger than 45/2
d_cone_deg = np.arccos(np.dot(GT_gaze_vec, ref_gaze_vec)) * 180 / np.pi
# if valid_overall_gaze:
row = [gt_info[0], gt_info[1]]
row += [set_id, user_id, region_name, region_labels[region_name]]
row += [str(int(float(gt_info[9]))), str(int(float(gt_info[10])))]
row += GT_camera_mm.flatten().tolist() + [np.linalg.norm(GT_camera_mm)]
row += [GT_theta, GT_phi]
row += gaze_origin_cam_mm.flatten().tolist()
row += [GT_hp_theta, GT_hp_phi]
row += gaze_cam_mm.flatten().tolist() + [np.linalg.norm(gaze_cam_mm)]
row += [gaze_theta, gaze_phi]
row += [x_cam_err, y_cam_err, z_cam_err, d_xy_cam_err]
row += [d_xyz_cam_err, d_xyz_scr_err, user_dist, d_xyz_err_deg,
d_deg_err_direct, flagBackwardIntersection, d_cone_deg]
final_table.append(row)
sample += 1
print('KPI: ' + kpi_dir + ' is completed!')
print('No of existing samples:' + str(sample_all))
print('No of total evaluated samples:' + str(sample))
print('No of no-forward-intersection samples:' + str(sample_NA))
return self.ConvertTableToDF(final_table)
def createTableFromSingleEyePred(self, predictions, root_dir, org_dir, gt_folder,
gt_file_folder, kpi_sets):
"""Constructs table(list) through prediction and KPI data frame.
Entries in each line of a joint2 ground truth file:
l[0]: path
l[1]: frame id
l[2:5] face bounding box
l[6:8]: ground truth in camera space
l[9:10] ground truth in screen space (mm)
l[11:13] PnP based head pose angles
l[14:16] Gaze angles theta-phi
l[17:19] Unit norm gaze vector
l[20:22] Head location
l[23:25] Left eye center location
l[26:28] Right eye center location
"""
final_table = []
# Read the prediction file
orig_data = pd.DataFrame(predictions)
orig_data.columns = ["frame_id", "GT_theta", "GT_phi", "cam_theta", "cam_phi"]
gt_pr = orig_data.values
indent = 8
# construct dictionary for predictions
print('gt_pr.shape: {}'.format(gt_pr.shape))
sample = 0
ind = 0
pr_dict = {}
while ind < gt_pr.shape[0]:
chunks = predictions[ind][0].split('/')
for c in chunks:
if c == '':
continue
if (c[0].lower() == 's' or 'germany' in c)\
and ('kpi' in c or 'KPI' in c or 'gaze' in c):
set_id = c
frame_id = chunks[-1]
region_name = chunks[-2]
user_index = -3
if not region_name.startswith('region_'):
# Legacy flat structure, no regions.
region_name = ''
user_index += 1
if 'MITData_DataFactory' in predictions[ind][0]:
user_index -= 1
user_id = chunks[user_index]
if set_id not in pr_dict:
pr_dict[set_id] = {}
if user_id not in pr_dict[set_id]:
pr_dict[set_id][user_id] = {}
if region_name not in pr_dict[set_id][user_id]:
pr_dict[set_id][user_id][region_name] = {}
if frame_id not in pr_dict[set_id][user_id][region_name]:
pr_dict[set_id][user_id][region_name][frame_id] = {}
eye_id = 0
pr_dict[set_id][user_id][region_name][frame_id][eye_id] = \
[float(x) for x in predictions[ind][1:]]
eye_id = 1
pr_dict[set_id][user_id][region_name][frame_id][eye_id] = \
[float(x) for x in predictions[ind+1][1:]]
ind += 2
sample += 1
print('Parsed samples:', sample)
if self._root_path is None:
self._root_path = ''
base_directory = os.path.join(self._root_path, root_dir)
org_path = os.path.join(self._root_path, org_dir)
for kpi_dir in kpi_sets:
base_dir = os.path.join(base_directory, kpi_dir, gt_folder, gt_file_folder)
print('kpi_dir:', kpi_dir)
print('gt_folder:', gt_folder)
print('gt_file_folder:', gt_file_folder)
print('base_dir:', base_dir)
screens = self.load_screen_parameters(org_path, kpi_dir)
extrinsics = self.load_cam_extrinsics(org_path, kpi_dir)
region_labels = self.load_region_labels(org_path, kpi_dir)
sample_all = 0
sample = 0
sample_NA = 0
# read the GT file with theta-phi conversions
for kpi_file in self.kpi_GT_file:
data_file = kpi_file + '.txt'
if os.path.exists(os.path.join(base_dir, data_file)):
data_file = os.path.join(base_dir, data_file)
else:
data_file = os.path.join(root_dir, data_file)
if not os.path.exists(data_file):
print('ATTENTION: BASE GT FILE DOES NOT EXIST! \t', data_file)
continue
for line in open(data_file):
gt_info = line.split(' ')
gt_info = gt_info[:6] + gt_info[indent + 6:]
sample_all += 1
region_name = gt_info[0].split('/')[-1]
if not region_name.startswith('region_'): # No regions
set_id = gt_info[0].split('/')[-3]
user_id = gt_info[0].split('/')[-1]
region_name = ''
else:
set_id = gt_info[0].split('/')[-4]
user_id = gt_info[0].split('/')[-2]
frame_id = gt_info[1]
if set_id in pr_dict:
if user_id in pr_dict[set_id]:
if region_name in pr_dict[set_id][user_id]:
if frame_id in pr_dict[set_id][user_id][region_name]:
pr_leye = pr_dict[set_id][user_id][region_name][frame_id][0]
pr_reye = pr_dict[set_id][user_id][region_name][frame_id][1]
# According to dataloader take neg of right eye gaze yaw
pr_reye[1] *= -1.0
pr_reye[3] *= -1.0
else:
print('Missing frame id:', frame_id, '\t in ', gt_info[0])
continue
else:
print('Missing region name:', region_name, '\t in ', gt_info[0])
continue
else:
print('Missing user id:', user_id, '\t in ', gt_info[0])
continue
else:
print('Missing set id:', set_id, '\t in ', gt_info[0])
continue
# Left eye CnvMat
cnvMat_leye = np.zeros((3, 3), dtype=np.float32)
cnvMat_leye[0][0] = float(gt_info[71])
cnvMat_leye[0][1] = float(gt_info[72])
cnvMat_leye[0][2] = float(gt_info[73])
cnvMat_leye[1][0] = float(gt_info[74])
cnvMat_leye[1][1] = float(gt_info[75])
cnvMat_leye[1][2] = float(gt_info[76])
cnvMat_leye[2][0] = float(gt_info[77])
cnvMat_leye[2][1] = float(gt_info[78])
cnvMat_leye[2][2] = float(gt_info[79])
# Denormalize left eye gaze
ngv_leye = self.compute_gaze_vector_from_theta_phi(pr_leye[2], pr_leye[3])
gv_leye = np.dot(np.linalg.inv(cnvMat_leye), ngv_leye)
gaze_theta_leye, gaze_phi_leye = self.compute_theta_phi_from_unit_vector(
gv_leye[0], gv_leye[1], gv_leye[2])
# Right eye CnvMat
cnvMat_reye = np.zeros((3, 3), dtype=np.float32)
cnvMat_reye[0][0] = float(gt_info[80])
cnvMat_reye[0][1] = float(gt_info[81])
cnvMat_reye[0][2] = float(gt_info[82])
cnvMat_reye[1][0] = float(gt_info[83])
cnvMat_reye[1][1] = float(gt_info[84])
cnvMat_reye[1][2] = float(gt_info[85])
cnvMat_reye[2][0] = float(gt_info[86])
cnvMat_reye[2][1] = float(gt_info[87])
cnvMat_reye[2][2] = float(gt_info[88])
# Denormalize right eye gaze
ngv_reye = self.compute_gaze_vector_from_theta_phi(pr_reye[2], pr_reye[3])
gv_reye = np.dot(np.linalg.inv(cnvMat_reye), ngv_reye)
gaze_theta_reye, gaze_phi_reye = self.compute_theta_phi_from_unit_vector(
gv_reye[0], gv_reye[1], gv_reye[2])
GT_screen_px = np.zeros((2, 1), dtype=float)
GT_screen_px[0] = float(gt_info[9])
GT_screen_px[1] = float(gt_info[10])
# GT on screen plane in mm
GT_screen_mm = self.screenPix2Phy(GT_screen_px, *screens[region_name])
GT_camera_mm = np.zeros((3, 1), dtype=float)
# Read GT values
GT_camera_mm[0] = gt_info[6]
GT_camera_mm[1] = gt_info[7]
GT_camera_mm[2] = gt_info[8]
# Center between two eyes
gaze_origin_cam_mm = np.zeros((3, 1), dtype=float)
gaze_origin_cam_mm[2][0] = 80.0
# Read HP GT values
GT_hp_theta = float(gt_info[25])
GT_hp_phi = float(gt_info[26])
# Ground truth mid point of two eye centers
GT_mid_ec = np.zeros((3, 1), dtype=float)
GT_mid_ec[0] = float(gt_info[16])
GT_mid_ec[1] = float(gt_info[17])
GT_mid_ec[2] = float(gt_info[18])
# Ground truth left eye center
GT_le_ec = np.zeros((3, 1), dtype=float)
GT_le_ec[0] = float(gt_info[19])
GT_le_ec[1] = float(gt_info[20])
GT_le_ec[2] = float(gt_info[21])
# Ground truth right eye center
GT_re_ec = np.zeros((3, 1), dtype=float)
GT_re_ec[0] = float(gt_info[22])
GT_re_ec[1] = float(gt_info[23])
GT_re_ec[2] = float(gt_info[24])
# Ground truth left eye norm gaze
GT_norm_gaze_theta_leye = float(gt_info[41])
GT_norm_gaze_phi_leye = float(gt_info[42])
# Ground truth right eye norm gaze
GT_norm_gaze_theta_reye = float(gt_info[45])
GT_norm_gaze_phi_reye = float(gt_info[46])
# Discard samples that do not match in GT file and prediction file
if abs(GT_norm_gaze_theta_leye - float(pr_leye[0])) > 1e-2 or \
abs(GT_norm_gaze_phi_leye - float(pr_leye[1])) > 1e-2 or \
abs(GT_norm_gaze_theta_reye - float(pr_reye[0])) > 1e-2 or \
abs(GT_norm_gaze_phi_reye - float(pr_reye[1])) > 1e-2:
print('GT - Data file mismatch! Sample: ',
sample, GT_norm_gaze_theta_leye, pr_leye[0], GT_norm_gaze_phi_leye,
pr_leye[1], GT_norm_gaze_theta_reye, pr_reye[0],
GT_norm_gaze_phi_reye, pr_reye[1])
continue
# Discard samples that are invalid
if (GT_norm_gaze_theta_leye == -1.0 and GT_norm_gaze_phi_leye == -1.0) or \
(GT_norm_gaze_theta_reye == -1.0 and GT_norm_gaze_phi_reye == -1.0):
print('Invalid Theta-Phi sample: {} {} {} {} {}'
.format(sample, set_id, user_id, region_name, frame_id))
continue
# Check validity of left and right eye centers in 3D
valid_left_eye = False
if GT_le_ec[0] != -1.0 and GT_le_ec[1] != -1.0 and GT_le_ec[2] != -1.0:
valid_left_eye = True
valid_right_eye = False
if GT_re_ec[0] != -1.0 and GT_re_ec[1] != -1.0 and GT_re_ec[2] != -1.0:
valid_right_eye = True
# TODO(marar) add more sophisticated fusion algos
# Simple head pose based fusion
if GT_hp_phi > 0.5:
valid_right_eye = False
elif GT_hp_phi < -0.5:
valid_left_eye = False
GT_gaze_theta_norm = (GT_norm_gaze_theta_leye +
GT_norm_gaze_theta_reye) * 0.5
GT_gaze_phi_norm = (GT_norm_gaze_phi_leye + GT_norm_gaze_phi_reye) * 0.5
if valid_left_eye and valid_right_eye:
gaze_origin_cam_mm = GT_mid_ec
gaze_theta = (gaze_theta_leye + gaze_theta_reye) * 0.5
gaze_phi = (gaze_phi_leye + gaze_phi_reye) * 0.5
gaze_theta_norm = (pr_leye[2] + pr_reye[2]) * 0.5
gaze_phi_norm = (pr_leye[3] + pr_reye[3]) * 0.5
elif valid_left_eye is True and valid_right_eye is False:
gaze_origin_cam_mm = GT_le_ec
gaze_theta = gaze_theta_leye
gaze_phi = gaze_phi_leye
gaze_theta_norm = pr_leye[2]
gaze_phi_norm = pr_leye[3]
GT_gaze_theta_norm = GT_norm_gaze_theta_leye
GT_gaze_phi_norm = GT_norm_gaze_phi_leye
elif valid_left_eye is False and valid_right_eye is True:
gaze_origin_cam_mm = GT_re_ec
gaze_theta = gaze_theta_reye
gaze_phi = gaze_phi_reye
gaze_theta_norm = pr_reye[2]
gaze_phi_norm = pr_reye[3]
GT_gaze_theta_norm = GT_norm_gaze_theta_reye
GT_gaze_phi_norm = GT_norm_gaze_phi_reye
ngv = self.compute_gaze_vector_from_theta_phi(gaze_theta_norm, gaze_phi_norm)
GT_ngv = self.compute_gaze_vector_from_theta_phi(GT_gaze_theta_norm,
GT_gaze_phi_norm)
d_deg_err_direct = np.arccos(np.dot(GT_ngv, ngv)) * 180 / np.pi
# Calculate GT gaze vector
GT_gaze_vec = GT_camera_mm - gaze_origin_cam_mm
GT_gaze_vec = np.squeeze(GT_gaze_vec, axis=1)
GT_gaze_vec_mag = np.sqrt(
GT_gaze_vec[0] ** 2 + GT_gaze_vec[1] ** 2 + GT_gaze_vec[2] ** 2)
GT_gaze_vec = GT_gaze_vec / GT_gaze_vec_mag
# Calculate gaze_cam_mm
flagBackwardIntersection = 'No'
gaze_cam_mm = self.compute_PoR_from_theta_phi(
gaze_theta, gaze_phi, gaze_origin_cam_mm,
*extrinsics[region_name])
flagBackwardIntersection = gaze_cam_mm[3]
if flagBackwardIntersection == 'Yes':
print('No forward gaze vector screen plane intersection: '
'{} {} {} {} {}'
.format(sample, set_id, user_id, region_name, frame_id))
sample_NA += 1
gaze_cam_mm = np.array([gaze_cam_mm[0], gaze_cam_mm[1], gaze_cam_mm[2]])
# Mind the indent here, this should not go under elif case above
# d-x, d-y, d-z, d-XY, d-XYZ, and degree error calculation.
x_cam_err = abs(GT_camera_mm[0] - gaze_cam_mm[0])[0]
y_cam_err = abs(GT_camera_mm[1] - gaze_cam_mm[1])[0]
z_cam_err = abs(GT_camera_mm[2] - gaze_cam_mm[2])[0]
GT_xy_vec = np.array([GT_camera_mm[0], GT_camera_mm[1]])
gaze_xy_vec = np.array([gaze_cam_mm[0], gaze_cam_mm[1]])
xy_cam_err = abs(GT_xy_vec - gaze_xy_vec)
d_xy_cam_err = np.linalg.norm(xy_cam_err)
xyz_cam_err = abs(GT_camera_mm - gaze_cam_mm)
d_xyz_cam_err = np.linalg.norm(xyz_cam_err)
gaze_scr_mm = self.fromCamera2ScreenProj_3inp(gaze_cam_mm,
*extrinsics[region_name])
xyz_scr_err = abs(GT_screen_mm - gaze_scr_mm[0:2])
d_xyz_scr_err = np.linalg.norm(xyz_scr_err)
user_dist = np.linalg.norm(gaze_origin_cam_mm - GT_camera_mm)
gaze_vec = gaze_cam_mm - gaze_origin_cam_mm
gaze_vec = np.squeeze(gaze_vec, axis=1)
gaze_vec_mag = np.sqrt(
gaze_vec[0] ** 2 + gaze_vec[1] ** 2 + gaze_vec[2] ** 2)
gaze_vec = gaze_vec / gaze_vec_mag
d_xyz_err_deg = np.arccos(np.dot(GT_gaze_vec, gaze_vec)) * 180 / np.pi
if flagBackwardIntersection == 'Yes':
d_xyz_err_deg = d_deg_err_direct
if abs(d_xyz_err_deg-d_deg_err_direct) > 1.0:
print('Error calculation might be problematic -> direct:' +
str(d_deg_err_direct) + ', post:' + str(d_xyz_err_deg))
# Calculate ref gaze vector for 45-deg cone
ref_gaze_vec = np.zeros((3, 1), dtype=float)
ref_gaze_vec[2] = -1.0
# Calculate angle between GT_gaze_vec and ref_gaze_vec
# for 45-deg cone, this should not be larger than 45/2
d_cone_deg = np.arccos(np.dot(GT_gaze_vec, ref_gaze_vec)) * 180 / np.pi
# if valid_overall_gaze:
row = [gt_info[0], gt_info[1]]
row += [set_id, user_id, region_name, region_labels[region_name]]
row += [str(int(float(gt_info[9]))), str(int(float(gt_info[10])))]
row += GT_camera_mm.flatten().tolist() + [np.linalg.norm(GT_camera_mm)]
row += [GT_gaze_theta_norm, GT_gaze_phi_norm]
row += gaze_origin_cam_mm.flatten().tolist()
row += [GT_hp_theta, GT_hp_phi]
row += gaze_cam_mm.flatten().tolist() + [np.linalg.norm(gaze_cam_mm)]
row += [gaze_theta, gaze_phi]
row += [x_cam_err, y_cam_err, z_cam_err, d_xy_cam_err]
row += [d_xyz_cam_err, d_xyz_scr_err, user_dist, d_xyz_err_deg,
d_deg_err_direct, flagBackwardIntersection, d_cone_deg]
final_table.append(row)
sample += 1
print('KPI: ' + kpi_dir + ' is completed!')
print('No of existing samples:' + str(sample_all))
print('No of total evaluated samples:' + str(sample))
print('No of no-forward-intersection samples:' + str(sample_NA))
return self.ConvertTableToDF(final_table)
class KPIbuckets():
"""KPIbuckets bucketizes predictions once predictions and KPI data frame are linked."""
centercams = ['s400_KPI', 's457-gaze-kpi-2', 's434-kpi-3', 's435-kpi-3',
'germany-2-kpi-2 ', 'germany-right-4-kpi-2', 's464-gaze-kpi-1',
's465-gaze-kpi-1']
badUsers = ['user8_x', 'user9_x', 'user10_x']
badGTs = ['s434-kpi_TNg6PdPzFMmNKuZgr7BbSdvabP5fFjjqJyXBFQQ-od8=_1905_15',
's457-gaze-kpi_ZLvDTe7WQmc84UYZ_960_1065']
car_region_max_x = 700
car_region_min_x = -1420
car_region_max_y = 315
car_region_min_y = -355
car_center_region_max_x = 700
car_center_region_min_x = -540 # only change
car_center_region_max_y = 315
car_center_region_min_y = -355
car_right_region_max_x = -540 # only change
car_right_region_min_x = -1420
car_right_region_max_y = 315
car_right_region_min_y = -355
def __init__(self, df, bad_users):
"""
Initializes KPIbuckets class.
DF columns: see kpi_prediction_linker.resultCols
"""
# Clean L1 and L2
# remove s400_kpi bad users
dfclean = self.removeUsers(df, bad_users)
self.dfclean = dfclean.loc[~(dfclean['unique_cap_id'].isin(self.badGTs))]
self.df_glare = None
# KPI all: all frames.
# This used to be unique frames only (as data collection was not reliable on all frames).
# Change tfrecord_folder_name_kpi to TfRecords_unique_combined instead.
self.dfkpiall = self.dfclean
self.dftop10 = self.filterTopNUsers(self.dfkpiall, N=10)
self.dfbotTen = self.filterBottomNUsers(self.dfkpiall, N=10)
self.hppitch30 = self.filterHP_pitch_30(self.dfclean)
self.hppitch60 = self.filterHP_pitch_60(self.dfclean)
self.hppitch90 = self.filterHP_pitch_90(self.dfclean)
self.hpyaw30 = self.filterHP_yaw_30(self.dfclean)
self.hpyaw60 = self.filterHP_yaw_60(self.dfclean)
self.hpyaw90 = self.filterHP_yaw_90(self.dfclean)
self.hp15 = self.filterHP_15(self.dfclean)
self.hp30 = self.filterHP_30(self.dfclean)
self.hp45 = self.filterHP_45(self.dfclean)
self.hp60 = self.filterHP_60(self.dfclean)
self.coneCenter = self.filter_cone_45_central(self.dfclean)
self.conePeriphery = self.filter_cone_45_periphery(self.dfclean)
self.coneCenterTop10 = self.filter_cone_45_central(self.dftop10)
self.conePeripheryTop10 = self.filter_cone_45_periphery(self.dftop10)
# Global min-max (of ALL3)
# Cam X max: 732 Cam X min: -2288
# Cam Y max: 585 Cam Y min: -1201
# self.max_cam_x = 700
# self.min_cam_x = -2000
# self.max_cam_y = 500
# self.min_cam_y = -850
# Decide min and max cam boundaries by min and max ground truth x, y values
# rounded to nearest hundred.
self.max_cam_x = int((max(df['GT_cam_x'].max(),
700) // 100) * 100) # round to nearest hundred
self.min_cam_x = int((min(df['GT_cam_x'].min(), -2000) // 100) * 100)
self.max_cam_y = int((max(df['GT_cam_y'].max(), 500) // 100) * 100)
self.min_cam_y = int((min(df['GT_cam_y'].min(), -850) // 100) * 100)
print('cam x: {} {}'.format(self.min_cam_x, self.max_cam_x))
print('cam y: {} {}'.format(self.min_cam_y, self.max_cam_y))
# Initialize offsets for each in-car regions in heat map
self.camera_center_x = self.max_cam_x # 700
self.camera_center_y = -self.min_cam_y # 850
self.car_min_x = self.camera_center_x - 700 # 0
self.car_max_x = self.camera_center_x + 1420 # 2120
self.car_min_y = self.camera_center_y - 355 # 495
self.car_max_y = self.camera_center_y + 315 # 1165
self.right_mirror_x = self.camera_center_x + 1305
self.right_mirror_y = self.camera_center_y - 80
self.left_mirror_x = self.camera_center_x - 585
self.left_mirror_y = self.camera_center_y - 80
self.rear_mirror_x = self.camera_center_x + 370
self.rear_mirror_y = self.camera_center_y - 275
self.csd_x = self.camera_center_x + 370
self.csd_y = self.camera_center_y + 115
self.heat_map_columns = 18
self.heat_map_rows = 9
self.heat_map_grid_width = int((self.max_cam_x - self.min_cam_x) / self.heat_map_columns)
self.heat_map_grid_height = int((self.max_cam_y - self.min_cam_y) / self.heat_map_rows)
@staticmethod
def bucketize(df, file_writer, message):
"""Prints and writes the measured error of users in each buckets."""
'''"cam_err_x", "cam_err_y", "cam_err_z", "cam_err_xy",'''
unique_frames = len(KPIbuckets.getUniqueFrames(df))
num_frames = len(df.index)
num_users = len(df.drop_duplicates(['user_id'], keep='first'))
avg_cam_err = df['cam_error'].mean()
stdd_cam_err = df['cam_error'].std()
degree_err = df['degree_error'].mean()
stdd_deg_err = df['degree_error'].std()
x_err = df['cam_err_x'].mean()
stdd_x_err = df['cam_err_x'].std()
y_err = df['cam_err_y'].mean()
stdd_y_err = df['cam_err_y'].std()
z_err = df['cam_err_z'].mean()
stdd_z_err = df['cam_err_z'].std()
xy_err = df['cam_err_xy'].mean()
stdd_xy_err = df['cam_err_xy'].std()
print('Num. frames: {}'.format(num_frames))
print('Unique frames: {}'.format(unique_frames))
print('Num. users: {}'.format(num_users))
print('Avg. Cam Error(cm): {}'.format(avg_cam_err))
print('Stdd. Cam Error(cm): {}'.format(stdd_cam_err))
print('Avg. Degree Error: {}'.format(degree_err))
print('Stdd. Degree Error: {}'.format(stdd_deg_err))
print('Avg. X Y Z Error: {} {} {}'.format(x_err, y_err, z_err))
print('Stdd. X Y Z Error: {} {} {}'.format(stdd_x_err, stdd_y_err, stdd_z_err))
print('Avg. XY Error: {}'.format(xy_err))
print('Stdd. XY Error: {}'.format(stdd_xy_err))
file_writer.write('{}\n'.format(message))
file_writer.write('Num frames: {}\n'.format(num_frames))
file_writer.write('Unique frames: {}\n'.format(unique_frames))
file_writer.write('Num users: {}\n'.format(num_users))
file_writer.write('Avg. Cam Error(cm): {}\n'.format(avg_cam_err))
file_writer.write('Stdd. Cam Error(cm): {}\n'.format(stdd_cam_err))
file_writer.write('Avg. Degree Error: {}\n'.format(degree_err))
file_writer.write('Stdd. Degree Error: {}\n'.format(stdd_deg_err))
file_writer.write('Avg. X Y Z Error: {} {} {}\n'.format(x_err, y_err, z_err))
file_writer.write('Stdd. X Y Z Error: {} {} {}\n'.format(stdd_x_err, stdd_y_err,
stdd_z_err))
file_writer.write('Avg. XY Error: {}\n'.format(xy_err))
file_writer.write('Stdd. XY Error: {}\n'.format(stdd_xy_err))
file_writer.write('\n')
@staticmethod
def getUniqueFrames(df):
"""Returns data frame with unique frames."""
old_sets = ['s400_KPI']
old_set_filter = df["set_id"].isin(old_sets)
df_old = df[old_set_filter]
df_old = df_old.sort_values(by=['orig_frame_id', 'frame_num'])
df_old = df_old.drop_duplicates(['orig_frame_id'], keep='first')
df_new = df[~old_set_filter]
df_new = df_new.sort_values(by=['orig_frame_id', 'frame_num'])
df_new = df_new.drop_duplicates(['orig_frame_id'], keep='last')
df2 = df_old.append(df_new)
return df2
@staticmethod
def filterGetOnlyBackwardIntersection(df):
"""Returns data frame with only backward intersections."""
return df.loc[df['backwardIntersection'] == 'Yes']
@staticmethod
def filterGetOnlyForwardIntersection(df):
"""Returns data frame with only forward intersections."""
return df.loc[df['backwardIntersection'] == 'No']
@staticmethod
def filterNoGlasses(df):
"""Returns data frame with users not wearing glasses."""
return df.loc[df['glasses'] == 'No']
@staticmethod
def filterGlasses(df):
"""Returns data frame with users wearing glasses."""
return df.loc[df['glasses'] == 'Yes']
@staticmethod
def filterOccluded(df):
"""Returns data frame with users having either left or right eye occluded."""
return df[(df['leye_status'] == 'occluded') | (df['reye_status'] == 'occluded')]
@staticmethod
def filterCarRegions(df, region='car', margin=0):
"""Returns data frame with specified car region."""
min_x, min_y, max_x, max_y = 0, 0, 0, 0
if region == 'car':
min_x = KPIbuckets.car_region_min_x
min_y = KPIbuckets.car_region_min_y
max_x = KPIbuckets.car_region_max_x
max_y = KPIbuckets.car_region_max_y
elif region == 'center':
min_x = KPIbuckets.car_center_region_min_x
min_y = KPIbuckets.car_center_region_min_y
max_x = KPIbuckets.car_center_region_max_x
max_y = KPIbuckets.car_center_region_max_y
elif region == 'right':
min_x = KPIbuckets.car_right_region_min_x
min_y = KPIbuckets.car_right_region_min_y
max_x = KPIbuckets.car_right_region_max_x
max_y = KPIbuckets.car_right_region_max_y
return df[(df['GT_cam_x'] > (min_x - margin)) & (df['GT_cam_x'] < (max_x + margin))
& (df['GT_cam_y'] < (max_y + margin)) & (df['GT_cam_y'] > (min_y - margin))]
@staticmethod
def filterRegionNames(df, region_names=None):
"""Splits the dataframe into its regions, keeping only region_names.
If region_names is None, keeps all.
Returns a dict(region_name->df)
"""
if region_names is None:
region_names = df['region_name'].unique()
regions = {}
for region_name in region_names:
regions[region_name] = df.loc[df['region_name'] == region_name]
return regions
@staticmethod
def filterRegionLabels(df, region_labels=None):
"""Splits the dataframe into its regions by label, keeping only region_labels.
If region_labels is None, keeps all.
Returns a dict(region_label->df)
"""
if region_labels is None:
region_labels = df['region_label'].unique()
regions = {}
for region_label in region_labels:
regions[region_label] = df.loc[df['region_label'] == region_label]
return regions
@staticmethod
def filterCameraIds(df, camera_ids=None):
"""Splits the dataframe into cam ids.
If camera_ids is None, keeps all.
Returns a dict(cameras->df)
"""
df_camera_id = df.set_id.map(kpi_prediction_linker._get_camera_id)
if camera_ids is None:
camera_ids = df_camera_id.unique()
cameras = {}
for camera_id in camera_ids:
cameras[camera_id] = df.loc[df_camera_id == camera_id]
return cameras
@staticmethod
def filterCenterCamera(df):
"""Return only frames from the center camera."""
return df[(df['cam_position'] == 'Center')
| (df['cam_position'] == 'Middle Center')
| (df['cam_position'] == 'Center Middle ')
| (df['cam_position'] == 'Center Middle')]
@staticmethod
def filterLeftCenterCamera(df):
"""Return only frames from the left center camera."""
return df[(df['cam_position'] == 'Left')
| (df['cam_position'] == 'Left Center')
| (df['cam_position'] == 'Center Left')]
@staticmethod
def filterBottomCenterCamera(df):
"""Return only frames from the bottom center camera."""
return df[(df['cam_position'] == 'Bottom')
| (df['cam_position'] == 'Bottom Center')]
@staticmethod
def filterTopCenterCamera(df):
"""Return only frames from the top center camera."""
return df[(df['cam_position'] == 'Top')
| (df['cam_position'] == 'Top Center')
| (df['cam_position'] == 'Top Centrr')]
@staticmethod
def filterRightCenterCamera(df):
"""Return only frames from the right center camera."""
return df[(df['cam_position'] == 'Right Center')
| (df['cam_position'] == 'Center Right')
| (df['cam_position'] == 'Right Middle')]
@staticmethod
def filterRightCamera(df):
"""Return only frames from the right camera."""
return df[(df['cam_position'] == 'Far Right Center')
| (df['cam_position'] == 'Bottom Right')
| (df['cam_position'] == 'Left Right')
| (df['cam_position'] == 'Right Right')]
@staticmethod
def filterTopNUsers(df, N=10, metric='degree'):
"""Returns top N users with minimal errors."""
if metric == 'degree':
dftopusers = df.groupby('user_id')['degree_error'].mean().sort_values(). \
nsmallest(N)
elif metric == 'cm':
dftopusers = df.groupby('user_id')['cam_error'].mean().sort_values(). \
nsmallest(N)
topusers = []
for index, _ in dftopusers.items():
topusers.append(index)
return df.loc[df['user_id'].isin(topusers)]
@staticmethod
def filterBottomNUsers(df, N=10):
"""Returns bottom N users with maximal errors."""
dfbotusers = df.groupby('user_id')['degree_error'].mean().sort_values(). \
nlargest(N)
botusers = []
for index, _ in dfbotusers.items():
botusers.append(index)
return df.loc[df['user_id'].isin(botusers)]
@staticmethod
def removeUsers(df, usernames):
"""Returns a data frame with a specified user removed."""
if usernames is not None and usernames != '':
return df[~df['user_id'].isin(usernames)]
return df
@staticmethod
def filterEitherOpenEyes(df):
"""Returns a data frame with users with at least one eye open."""
return df[(df['leye_status'] == 'open') | (df['reye_status'] == 'open')]
@staticmethod
def filterBothOpenEyes(df):
"""Returns a data frame with users with both of their eyes open."""
return df[(df['leye_status'] == 'open') & (df['reye_status'] == 'open')]
@staticmethod
def classifyGlare(df):
"""Classify each frame's user glare status."""
for index, _ in df.iterrows():
left_pupil_glare_status = df.at[index, 'left_pupil_glare_status']
left_eye_glare_status = df.at[index, 'left_eye_glare_status']
right_pupil_glare_status = df.at[index, 'right_pupil_glare_status']
right_eye_glare_status = df.at[index, 'right_eye_glare_status']
left_eye_occlusion = df.at[index, 'leye_status']
right_eye_occlusion = df.at[index, 'reye_status']
left_iris_status = df.at[index, 'left_iris_status']
right_iris_status = df.at[index, 'right_iris_status']
'''
Glare Ranks
1. glare on pupil
2. glare on eye (but not pupil)
3. glint on pupil(but no glare on eye or pupil)
4. glint on eye but not pupil (no glare on eye or pupil)
5. no glare or glint on eye
'''
left_eye_rank = 0
right_eye_rank = 0
bright_pupil = 'normal'
if left_eye_occlusion == 'open':
if left_pupil_glare_status == 'glare_pupil':
left_eye_rank = 1
if left_eye_glare_status == 'glare_eye' and left_pupil_glare_status == 'no_glare':
left_eye_rank = 2
if left_pupil_glare_status == 'glint_pupil' and left_eye_glare_status == 'no_glare':
left_eye_rank = 3
if left_eye_glare_status == 'glint_eye' and left_pupil_glare_status == 'no_glare':
left_eye_rank = 4
if left_eye_glare_status == 'no_glare' and left_pupil_glare_status == 'no_glare':
left_eye_rank = 5
if left_iris_status == 'bright_pupil':
bright_pupil = 'bright_pupil'
if right_eye_occlusion == 'open':
if right_pupil_glare_status == 'glare_pupil':
right_eye_rank = 1
if right_eye_glare_status == 'glare_eye' and right_pupil_glare_status == 'no_glare':
right_eye_rank = 2
if right_pupil_glare_status == 'glint_pupil' and \
right_eye_glare_status == 'no_glare':
right_eye_rank = 3
if right_eye_glare_status == 'glint_eye' and right_pupil_glare_status == 'no_glare':
right_eye_rank = 4
if right_eye_glare_status == 'no_glare' and right_pupil_glare_status == 'no_glare':
right_eye_rank = 5
if right_iris_status == 'bright_pupil':
bright_pupil = 'bright_pupil'
df.at[index, 'glare_status'] = min(left_eye_rank, right_eye_rank)
df.at[index, 'bright_pupil'] = bright_pupil
return df
@staticmethod
def filter_cone_45_central(df):
"""Returns a data frame with samples within 45-deg cone of user."""
return df[df['cone_degree'] <= 22.5]
@staticmethod
def filter_cone_45_periphery(df):
"""Returns a data frame with samples outside 45-deg cone of user."""
return df[df['cone_degree'] > 22.5]
@staticmethod
def filterHP_pitch_30(df):
"""Returns a data frame with users with hp theta smaller than 30 degrees."""
return df[(((0 < df['GT_hp_theta']) & (df['GT_hp_theta'] <= np.pi / 6)) |
((0 > df['GT_hp_theta']) & (df['GT_hp_theta'] >= -np.pi / 6)))]
@staticmethod
def filterHP_pitch_60(df):
"""Returns a data frame with users with hp theta smaller than 30 degrees."""
return df[(((np.pi / 6 < df['GT_hp_theta']) & (df['GT_hp_theta'] <= np.pi / 3)) |
((-np.pi / 6 > df['GT_hp_theta']) & (df['GT_hp_theta'] >= -np.pi / 3)))]
@staticmethod
def filterHP_pitch_90(df):
"""Returns a data frame with users with hp theta smaller than 30 degrees."""
return df[((df['GT_hp_theta'] > np.pi / 3) | (df['GT_hp_theta'] < -np.pi / 3))]
@staticmethod
def filterHP_yaw_30(df):
"""Returns a data frame with users with hp theta smaller than 30 degrees."""
return df[(((0 < df['GT_hp_phi']) & (df['GT_hp_phi'] <= np.pi / 6)) |
((0 > df['GT_hp_phi']) & (df['GT_hp_phi'] >= -np.pi / 6)))]
@staticmethod
def filterHP_yaw_60(df):
"""Returns a data frame with users with hp theta smaller than 30 degrees."""
return df[(((np.pi / 6 < df['GT_hp_phi']) & (df['GT_hp_phi'] <= np.pi / 3)) |
((-np.pi / 6 > df['GT_hp_phi']) & (df['GT_hp_phi'] >= -np.pi / 3)))]
@staticmethod
def filterHP_yaw_90(df):
"""Returns a data frame with users with hp theta smaller than 30 degrees."""
return df[((df['GT_hp_phi'] > np.pi / 3) | (df['GT_hp_phi'] < -np.pi / 3))]
@staticmethod
def filterHP_15(df):
"""Returns a data frame with users with hp theta smaller than 30 degrees."""
return df[((df['GT_hp_phi'] < np.pi / 12) & (df['GT_hp_phi'] > -np.pi / 12) &
(df['GT_hp_theta'] < np.pi / 12) & (df['GT_hp_theta'] > -np.pi / 12))]
@staticmethod
def filterHP_30(df):
"""Returns a data frame with users with hp theta smaller than 30 degrees."""
return df[((df['GT_hp_phi'] < np.pi / 6) & (df['GT_hp_phi'] > -np.pi / 6) &
(df['GT_hp_theta'] < np.pi / 6) & (df['GT_hp_theta'] > -np.pi / 6))]
@staticmethod
def filterHP_45(df):
"""Returns a data frame with users with hp theta smaller than 30 degrees."""
return df[((df['GT_hp_phi'] < np.pi / 4) & (df['GT_hp_phi'] > -np.pi / 4) &
(df['GT_hp_theta'] < np.pi / 4) & (df['GT_hp_theta'] > -np.pi / 4))]
@staticmethod
def filterHP_60(df):
"""Returns a data frame with users with hp theta smaller than 30 degrees."""
return df[((df['GT_hp_phi'] < np.pi / 3) & (df['GT_hp_phi'] > -np.pi / 3) &
(df['GT_hp_theta'] < np.pi / 3) & (df['GT_hp_theta'] > -np.pi / 3))]
def heat_map_visualization(self, df, output_path, file_name):
"""
Produces an error heat map image.
The heatmap produced shows the ground truth's xy-coordinates (in camera coordinate system).
Drawing regions only makes sense with a single camera dataframe and is therefore disabled
on multiple cameras.
"""
def color_scale_list(value):
"""Return the (b,g,r) color from the custom color scale.
For the given value between 0 and 1.
"""
if value < 1.0/16:
return (0, 255, 0) # green -> 0 - 1.5 deg, 0 - 2 cm
if value < 2.0/16:
return (0, 150, 128) # dark-green -> 1.5 -3 deg , 2 - 4 cm
if value < 4.0/16:
return (6, 158, 205) # yellow-green -> 3 - 6, 4 - 8 cm
if value < 8.0/16:
return (0, 178, 255) # orange -> 6 - 12, 8 - 16 cm
if value < 16.0/16:
return (66, 112, 243) # salmon -> 12 - 24, 16 - 32 cm
return (30, 7, 205) # red > 24
ED = 1.5 # min error in degree
DIST = 75 # cm
ECM = DIST * np.tan(ED * np.pi / 180) # min error in cm: 1.9640 cm
img_xyz_cm = np.zeros((self.max_cam_y - self.min_cam_y,
self.max_cam_x - self.min_cam_x, 3), np.uint8)
img_xyz_deg = np.zeros((self.max_cam_y - self.min_cam_y,
self.max_cam_x - self.min_cam_x, 3), np.uint8)
# dict that keeps track of error in each region of heat map
heat_map_error_cm_dict = {}
heat_map_error_degree_dict = {}
# Grids x has columns+1 entries to allow getting the end of the last column
# with heat_map_grids_x[self.heat_map_columns].
heat_map_grids_x = range(0, (self.max_cam_x - self.min_cam_x) + self.heat_map_grid_width,
self.heat_map_grid_width)
heat_map_grids_y = range(0, (self.max_cam_y - self.min_cam_y) + self.heat_map_grid_height,
self.heat_map_grid_height)
for _, row in df.iterrows():
gt_cam_x = row['GT_cam_x']
gt_cam_y = row['GT_cam_y']
gaze_error_mm = row['cam_error']
gaze_error_degree = row['degree_error']
heat_map_x_index = self.heat_map_columns-1 - min(int(gt_cam_x - self.min_cam_x) //
self.heat_map_grid_width,
self.heat_map_columns-1)
heat_map_y_index = min(int(gt_cam_y - self.min_cam_y) // self.heat_map_grid_height,
self.heat_map_rows-1)
if (heat_map_y_index, heat_map_x_index) in heat_map_error_cm_dict:
heat_map_error_cm_dict[(heat_map_y_index, heat_map_x_index)].append(gaze_error_mm)
else:
heat_map_error_cm_dict[(heat_map_y_index, heat_map_x_index)] = [gaze_error_mm]
if (heat_map_y_index, heat_map_x_index) in heat_map_error_degree_dict:
heat_map_error_degree_dict[(heat_map_y_index,
heat_map_x_index)].append(gaze_error_degree)
else:
heat_map_error_degree_dict[(heat_map_y_index,
heat_map_x_index)] = [gaze_error_degree]
for grid_y in range(self.heat_map_rows):
for grid_x in range(self.heat_map_columns):
if (grid_y, grid_x) in heat_map_error_cm_dict:
count = len(heat_map_error_cm_dict[(grid_y, grid_x)])
cam_xyz_mean_cm = np.mean(heat_map_error_cm_dict[(grid_y, grid_x)])
cam_degree_mean = np.mean(heat_map_error_degree_dict[(grid_y, grid_x)])
else:
continue
if cam_xyz_mean_cm < 0:
continue
xyz_color = color_scale_list(cam_xyz_mean_cm / (16 * ECM))
if cam_degree_mean < 0:
continue
degree_color = color_scale_list(cam_degree_mean / (16 * ED))
# Plot xyz cm error heat map regions.
cv2.rectangle(img_xyz_cm, (int(heat_map_grids_x[grid_x]),
int(heat_map_grids_y[grid_y])),
(int(heat_map_grids_x[grid_x + 1]),
int(heat_map_grids_y[grid_y + 1])),
xyz_color, thickness=-1)
cv2.rectangle(img_xyz_cm, (int(heat_map_grids_x[grid_x]),
int(heat_map_grids_y[grid_y])),
(int(heat_map_grids_x[grid_x + 1]),
int(heat_map_grids_y[grid_y + 1])),
(255, 0, 0), thickness=1)
cv2.putText(img_xyz_cm, 'Frames: {}'.format(str(count)),
(int(heat_map_grids_x[grid_x] + 5),
int(heat_map_grids_y[grid_y] + 2 * 15)),
cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 0, 0))
cv2.putText(img_xyz_cm, 'Err[cm]: {}'.format(round(cam_xyz_mean_cm, 2)),
(int(heat_map_grids_x[grid_x] + 5),
int(heat_map_grids_y[grid_y] + 3 * 15)),
cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 0, 0))
cv2.putText(img_xyz_cm, 'Err[deg]: {}'.format(round(cam_degree_mean, 2)),
(int(heat_map_grids_x[grid_x] + 5),
int(heat_map_grids_y[grid_y] + 4 * 15)),
cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 0, 0))
# Plot degrees error heat map regions.
cv2.rectangle(img_xyz_deg, (int(heat_map_grids_x[grid_x]),
int(heat_map_grids_y[grid_y])),
(int(heat_map_grids_x[grid_x + 1]),
int(heat_map_grids_y[grid_y + 1])),
degree_color, thickness=-1)
cv2.rectangle(img_xyz_deg, (int(heat_map_grids_x[grid_x]),
int(heat_map_grids_y[grid_y])),
(int(heat_map_grids_x[grid_x + 1]),
int(heat_map_grids_y[grid_y + 1])),
(255, 0, 0), thickness=1)
cv2.putText(img_xyz_deg, 'Frames: {}'.format(str(count)),
(int(heat_map_grids_x[grid_x] + 5),
int(heat_map_grids_y[grid_y] + 2 * 15)),
cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 0, 0))
cv2.putText(img_xyz_deg, 'Err[cm]: {}'.format(round(cam_xyz_mean_cm, 2)),
(int(heat_map_grids_x[grid_x] + 5),
int(heat_map_grids_y[grid_y] + 3 * 15)),
cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 0, 0))
cv2.putText(img_xyz_deg, 'Err[deg]: {}'.format(round(cam_degree_mean, 2)),
(int(heat_map_grids_x[grid_x] + 5),
int(heat_map_grids_y[grid_y] + 4 * 15)),
cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 0, 0))
# Draw center camera region.
cv2.circle(img_xyz_cm, (int(self.camera_center_x), int(self.camera_center_y)),
int(10), (255, 255, 255), -1)
cv2.circle(img_xyz_deg, (int(self.camera_center_x), int(self.camera_center_y)),
int(10), (255, 255, 255), -1)
cv2.rectangle(img_xyz_cm, (int(self.camera_center_x - 200),
int(self.camera_center_y - 85)),
(int(self.camera_center_x + 200), int(self.camera_center_y + 85)),
(255, 255, 255), thickness=2)
cv2.rectangle(img_xyz_deg, (int(self.camera_center_x - 200),
int(self.camera_center_y - 85)),
(int(self.camera_center_x + 200), int(self.camera_center_y + 85)),
(255, 255, 255), thickness=2)
cv2.putText(img_xyz_cm, 'Cam', (int(self.camera_center_x - 15),
int(self.camera_center_y - 25)),
cv2.FONT_HERSHEY_SIMPLEX,
0.8, (255, 255, 255))
cv2.putText(img_xyz_deg, 'Cam', (int(self.camera_center_x - 15),
int(self.camera_center_y - 25)),
cv2.FONT_HERSHEY_SIMPLEX,
0.8, (255, 255, 255))
cv2.putText(img_xyz_cm, '(0,0)', (int(self.camera_center_x - 35),
int(self.camera_center_y + 30)),
cv2.FONT_HERSHEY_SIMPLEX,
0.8, (255, 255, 255))
cv2.putText(img_xyz_deg, '(0,0)', (int(self.camera_center_x - 35),
int(self.camera_center_y + 30)),
cv2.FONT_HERSHEY_SIMPLEX,
0.8, (255, 255, 255))
camera_ids = df.set_id.map(kpi_prediction_linker._get_camera_id).unique()
if len(camera_ids) == 1 and camera_ids[0] != 'unknown':
# a heatmap with only a single camera, enable boxes showing the regions
# Draw entire car region.
cv2.rectangle(img_xyz_cm, (int(self.car_min_x), int(self.car_min_y)),
(int(self.car_max_x), int(self.car_max_y)),
(255, 255, 255), thickness=3)
cv2.rectangle(img_xyz_deg, (int(self.car_min_x), int(self.car_min_y)),
(int(self.car_max_x), int(self.car_max_y)),
(255, 255, 255), thickness=3)
# Draw a box around each region.
df_regions = self.filterRegionNames(df)
for region_name, df_region in df_regions.items():
if region_name == "":
region_name = "screen" # No regions -> on bench setup.
# x axis is flipped (compare to heat_map_x_index), max and min hence swapped.
min_x = self.max_cam_x - df_region['GT_cam_x'].max()
max_x = self.max_cam_x - df_region['GT_cam_x'].min()
min_y = df_region['GT_cam_y'].min() - self.min_cam_y
max_y = df_region['GT_cam_y'].max() - self.min_cam_y
center_x = min_x + (max_x-min_x)/2
center_y = min_y + (max_y-min_y)/2
cv2.circle(img_xyz_cm, (int(center_x), int(center_y)),
int(10), (255, 255, 255), -1)
cv2.circle(img_xyz_deg, (int(center_x), int(center_y)),
int(10), (255, 255, 255), -1)
cv2.rectangle(img_xyz_cm, (int(min_x), int(min_y)),
(int(max_x), int(max_y)),
(255, 255, 255), thickness=2)
cv2.rectangle(img_xyz_deg, (int(min_x), int(min_y)),
(int(max_x), int(max_y)),
(255, 255, 255), thickness=2)
cv2.putText(img_xyz_cm, region_name, (int(center_x - 15),
int(center_y - 25)),
cv2.FONT_HERSHEY_SIMPLEX,
0.8, (255, 255, 255))
cv2.putText(img_xyz_deg, region_name, (int(center_x - 15),
int(center_y - 25)),
cv2.FONT_HERSHEY_SIMPLEX,
0.8, (255, 255, 255))
# Remember opencv uses BGR colors, not RGB.
cv2.imwrite(os.path.join(output_path, file_name + '_cm.png'), img_xyz_cm)
cv2.imwrite(os.path.join(output_path, file_name + '_degree.png'), img_xyz_deg)
@staticmethod
def save_images(df, output_path, bucket, num_samples):
"""Outputs sample images from data frame for debug purposes."""
if not os.path.exists(output_path):
os.mkdir(output_path)
bucket_folder = os.path.join(output_path, bucket)
shutil.rmtree(bucket_folder, ignore_errors=True)
os.mkdir(bucket_folder)
num_samples = min(len(df.index), num_samples)
df = df.sample(num_samples)
for index, _ in df.iterrows():
cosmos_path = '/home/copilot.cosmos10/RealTimePipeline/set'
data_path = 'Data'
set_id = df.at[index, 'set_id']
user_id = df.at[index, 'user_id']
frame_id = df.at[index, 'image_name']
if set_id >= 's500' and 'germany' not in set_id:
cosmos_path = '/home/driveix.cosmos639/GazeData/orgData'
data_path = 'pngData'
frame_path = os.path.join(cosmos_path, set_id, data_path, user_id, frame_id)
image_frame = cv2.imread(frame_path)
output_file = '{}_{}_{}'.format(set_id, user_id, frame_id)
cv2.imwrite(os.path.join(bucket_folder, output_file), image_frame)
class PredictionVisualizer(object):
"""Visualizes prediction through combining KPIBuckets and kpi_prediction_linker."""
time_ins_group_cols = ['root_set_id', 'user_id', 'screen_pix_x', 'screen_pix_y', 'frame_num']
combined_eval_cols = ['num_cameras', 'min_cam_cm_err', 'min_cam_deg_err',
'min_GT_cam_norm', 'min_GT_angle_norm']
def __init__(self, model_type, kpi_sets, bad_users,
buckets_to_visualize, path_info,
gaze_origin='normal', tp_degrees=False, time_instance_info_path=None):
"""
Initializes prediction visualizer.
model_type: xyz, joint, theta-phi
kpi_sets: kpi sets to visualize
bad_users: users to be filtered from predictions
buckets_to_visualize: visualize specified user buckets: glasses, no glasses, occluded eye
time_instance_info_path: path to dump time instance info
"""
self._model_type = model_type
self._bad_users = None
if bad_users is not None and bad_users != '':
self._bad_users = bad_users.split(' ')
self._visualize_functions = buckets_to_visualize.split(' ')
self.time_instance_info_path = time_instance_info_path
write_ins = time_instance_info_path is not None
if write_ins:
mkdir_p(time_instance_info_path)
if self._model_type == 'joint':
self._kpi_prediction_linker = [kpi_prediction_linker(model_type='joint_xyz',
kpi_sets=kpi_sets,
path_info=path_info,
gaze_origin=gaze_origin,
theta_phi_degrees=tp_degrees),
kpi_prediction_linker(model_type='joint_tp',
kpi_sets=kpi_sets,
path_info=path_info,
gaze_origin=gaze_origin,
theta_phi_degrees=tp_degrees),
kpi_prediction_linker(model_type='joint',
kpi_sets=kpi_sets,
path_info=path_info,
gaze_origin=gaze_origin,
theta_phi_degrees=tp_degrees,
write_time_instance_info=write_ins)
]
self._output_file = ['kpi_bucketize_joint_xyz.txt',
'kpi_bucketize_joint_tp.txt',
'kpi_bucketize_joint.txt']
self._heat_map_folder = ['heat_map_joint_xyz',
'heat_map_joint_tp',
'heat_map_joint']
self._joint_extension = ['joint_xyz',
'joint_tp',
'joint']
else:
self._kpi_prediction_linker = kpi_prediction_linker(model_type=self._model_type,
kpi_sets=kpi_sets,
path_info=path_info,
gaze_origin=gaze_origin,
theta_phi_degrees=tp_degrees,
write_time_instance_info=write_ins)
self._output_file = 'kpi_bucketize_' + self._model_type + '.txt'
self._heat_map_folder = 'heat_map_' + self._model_type
def __call__(self, predictions, kpi_dataframe, output_path):
"""
Produces bucketized results for predictions and error heat map.
If model type is joint, bucketized results and heat map will be generated
for joint_xyz, joint_tp and joint model.
"""
if kpi_dataframe is None or kpi_dataframe.empty:
print('Got an empty kpi_dataframe, cannot compute all buckets.')
# Remove unavailable buckets and warn about them.
config = set(self._visualize_functions)
new = config - set(['hard', 'easy', 'center-camera', 'left-center-camera',
'bottom-center-camera', 'top-center-camera',
'right-center-camera', 'right-camera', 'glasses', 'no-glasses',
'occluded', 'glare-pupil', 'glare-eye', 'glint-pupil',
'glint-eye', 'no-glare', 'class-0', 'bright-pupil',
'non-bright-pupil'])
diff = config - new
print('Skipping the following buckets: {}'.format(list(diff)))
self._visualize_functions = [e for e in self._visualize_functions if e in new]
if self._model_type == 'joint':
for linker, output_file, heat_map_folder, \
joint_extension in zip(self._kpi_prediction_linker,
self._output_file,
self._heat_map_folder,
self._joint_extension):
self.visualize_kpi(linker, predictions, kpi_dataframe, output_path,
output_file, heat_map_folder, joint_extension)
else:
self.visualize_kpi(self._kpi_prediction_linker, predictions, kpi_dataframe,
output_path, self._output_file, self._heat_map_folder)
@staticmethod
def _add_min_norm(group):
group['min_cam_cm_err'] = group['cam_error'].min()
group['min_cam_deg_err'] = group['degree_error'].min()
group['min_GT_cam_norm'] = group.loc[group['cam_error'].idxmin()]['GT_cam_norm']
group['min_GT_angle_norm'] = group.loc[group['degree_error'].idxmin()]['GT_angle_norm']
return group
def _dump_time_ins_info(self, df_total):
df_total_no_dup = df_total.drop_duplicates(
self.time_ins_group_cols + ['set_id'],
keep='first',
inplace=False)
time_ins_group = df_total_no_dup.groupby(
self.time_ins_group_cols)
print('There are {} time instances.'.format(time_ins_group.ngroups))
# Populate combined_eval_cols fields
df_total_no_dup['num_cameras'] = time_ins_group['frame_num'].transform('count')
df_total_no_dup = time_ins_group.apply(self._add_min_norm)
# Flush <time_instance_path>/<root_set_id>
df_group_root = df_total_no_dup.loc[:, ['root_set_id']]
for root_set_id, _ in df_group_root.groupby(['root_set_id']):
root_set_id_path = os.path.join(self.time_instance_info_path, root_set_id)
if os.path.exists(root_set_id_path):
print('Deleting files of folder {}'.format(root_set_id_path))
subprocess.call('rm -rf ' + root_set_id_path + '/*', shell=True)
df_group_stat = df_total_no_dup.loc[:, self.time_ins_group_cols + self.combined_eval_cols]
for k, group in df_group_stat.groupby(self.time_ins_group_cols):
# Every row in the same group has the same vals for combined_eval_cols
df_group = group.head(1).loc[:, self.combined_eval_cols]
df_group['time_instance'] = '_'.join(map(str, k))
df_group = df_group[['time_instance'] + self.combined_eval_cols]
root_set_id_path = os.path.join(self.time_instance_info_path, k[0])
mkdir_p(root_set_id_path)
user = k[1]
df_group.to_csv(
os.path.join(root_set_id_path, '{}.txt'.format(user)),
header=None, index=None, sep=' ', mode='a')
def visualize_kpi(self, linker, predictions, kpi_dataframe, output_path,
output_file, heat_map_folder, joint_extension='joint'):
"""Generate combined Data frame."""
output_file = os.path.join(output_path, output_file)
sample_image_dir = os.path.join(output_path, 'sample_images')
df_total = linker.getDataFrame(predictions, kpi_dataframe)
# Write csv of the total table.
df_total.to_csv(os.path.join(output_path, 'df_total_{}.csv'.format(joint_extension)))
# Non-joint model types have default joint_extension='joint'
if joint_extension == 'joint' and self.time_instance_info_path:
self._dump_time_ins_info(df_total)
# Create buckets / User analysis using DF above
tx = KPIbuckets(df_total, self._bad_users)
heat_map_dir = os.path.join(output_path, heat_map_folder)
if not os.path.exists(heat_map_dir):
os.mkdir(heat_map_dir)
with open(output_file, 'w') as f:
for fn in self._visualize_functions:
if fn == 'all':
print('\nKPI all frames')
tx.bucketize(df_total, f, 'KPI all frames')
tx.heat_map_visualization(df_total, heat_map_dir, 'kpi_all')
elif fn == 'only-forward':
print('\nKPI only forward intersections')
df_forward = tx.filterGetOnlyForwardIntersection(tx.dfkpiall)
tx.bucketize(df_forward, f, 'KPI only forward intersections')
tx.heat_map_visualization(df_forward, heat_map_dir, 'kpi_forward')
elif fn == 'only-backward':
print('\nKPI only backward intersections')
df_backward = tx.filterGetOnlyBackwardIntersection(tx.dfkpiall)
tx.bucketize(df_backward, f, 'KPI only backward intersections')
tx.heat_map_visualization(df_backward, heat_map_dir, 'kpi_backward')
elif fn == 'unique':
print('\nKPI unique frames')
df_unique = tx.getUniqueFrames(tx.dfkpiall)
tx.bucketize(df_unique, f, 'KPI unique frames')
tx.heat_map_visualization(df_unique, heat_map_dir, 'kpi_unique')
elif fn == 'clean':
print('\nKPI clean')
tx.bucketize(tx.dfclean, f, 'KPI clean')
tx.heat_map_visualization(tx.dfclean, heat_map_dir, 'kpi_clean')
elif fn == 'hard':
print('\nKPI hard')
# KPI hard: preseved frames where user may of one of their eye closed.
dfkpihard = tx.filterEitherOpenEyes(tx.dfkpiall)
tx.bucketize(dfkpihard, f, 'KPI hard')
tx.heat_map_visualization(dfkpihard, heat_map_dir, 'kpi_hard')
elif fn == 'easy':
print('\nKPI easy')
# KPI easy: preserved frames of user having both of their eyes present.
dfkpieasy = tx.filterBothOpenEyes(tx.dfkpiall)
tx.bucketize(dfkpieasy, f, 'KPI easy')
tx.heat_map_visualization(dfkpieasy, heat_map_dir, 'kpi_easy')
elif fn == 'car':
print('\nKPI car region')
df_car = tx.filterCarRegions(tx.dfkpiall, region='car')
tx.bucketize(df_car, f, 'KPI car region')
tx.heat_map_visualization(df_car, heat_map_dir, 'kpi_car_region')
elif fn == 'car_center':
print('\nKPI car region center')
df_car_center = tx.filterCarRegions(tx.dfkpiall, region='center')
tx.bucketize(df_car_center, f, 'KPI car center region')
tx.heat_map_visualization(df_car_center, heat_map_dir, 'kpi_car_center')
elif fn == 'car_right':
print('\nKPI car region right')
df_car_right = tx.filterCarRegions(tx.dfkpiall, region='right')
tx.bucketize(df_car_right, f, 'KPI car right region')
tx.heat_map_visualization(df_car_right, heat_map_dir, 'kpi_car_right')
elif fn == 'regions':
print('\nKPI different regions...')
df_regions = tx.filterRegionNames(tx.dfkpiall)
if len(df_regions) == 1 and '' in df_regions:
print('No regions found, skipping regions bucket!')
else:
for region_name, df_region in df_regions.items():
print('\nKPI {}'.format(region_name))
tx.bucketize(df_region, f, 'KPI {}'.format(region_name))
tx.heat_map_visualization(df_region, heat_map_dir,
'kpi_{}'.format(region_name))
elif fn == 'region-labels':
print('\nKPI different region-labels...')
df_regions = tx.filterRegionLabels(tx.dfkpiall)
if len(df_regions) == 1 and 'unknown' in df_regions:
print('No regions found, skipping region-labels bucket!')
else:
for region_label, df_region in df_regions.items():
print('\nKPI {}'.format(region_label))
tx.bucketize(df_region, f, 'KPI {}'.format(region_label))
sane_label = "_".join(region_label.split())
tx.heat_map_visualization(df_region, heat_map_dir,
'kpi_{}'.format(sane_label))
elif fn == 'cameras':
print('\nKPI different cameras...')
df_cameras = tx.filterCameraIds(tx.dfkpiall)
for camera_id, df_camera in df_cameras.items():
print('\nKPI camera {}'.format(camera_id))
tx.bucketize(df_camera, f, 'KPI camera {}'.format(camera_id))
tx.heat_map_visualization(df_camera, heat_map_dir,
'kpi_camera_{}'.format(camera_id))
elif fn == 'center-camera':
print('\nKPI center camera')
df_center = tx.filterCenterCamera(tx.dfkpiall)
tx.bucketize(df_center, f, 'KPI center camera')
tx.heat_map_visualization(df_center, heat_map_dir, 'kpi_center_camera')
elif fn == 'left-center-camera':
print('\nKPI left center camera')
df_left_center_camera = tx.filterLeftCenterCamera(tx.dfkpiall)
tx.bucketize(df_left_center_camera, f, 'KPI left center camera')
tx.heat_map_visualization(df_left_center_camera, heat_map_dir,
'kpi_left_center_camera')
elif fn == 'bottom-center-camera':
print('\nKPI bottom center camera')
df_bot_center_camera = tx.filterBottomCenterCamera(tx.dfkpiall)
tx.bucketize(df_bot_center_camera, f, 'KPI bottom center camera')
tx.heat_map_visualization(df_bot_center_camera, heat_map_dir,
'kpi_bot_center_camera')
elif fn == 'top-center-camera':
print('\nKPI top center camera')
df_top_center_camera = tx.filterTopCenterCamera(tx.dfkpiall)
tx.bucketize(df_top_center_camera, f, 'KPI top center camera')
tx.heat_map_visualization(df_top_center_camera, heat_map_dir,
'kpi_top_center_camera')
elif fn == 'right-center-camera':
print('\nKPI right center camera')
df_right_center_camera = tx.filterRightCenterCamera(tx.dfkpiall)
tx.bucketize(df_right_center_camera, f, 'KPI right center camera')
tx.heat_map_visualization(df_right_center_camera, heat_map_dir,
'kpi_right_center_camera')
elif fn == 'right-camera':
print('\nKPI right camera')
df_right_camera = tx.filterRightCamera(tx.dfkpiall)
tx.bucketize(df_right_camera, f, 'KPI right camera')
tx.heat_map_visualization(df_right_camera, heat_map_dir, 'kpi_right_camera')
elif fn == 'glasses':
print('\nKPI glasses')
dfglasses = tx.filterGlasses(tx.dfkpiall)
tx.bucketize(dfglasses, f, 'KPI glasses')
tx.heat_map_visualization(dfglasses, heat_map_dir, 'kpi_glasses')
elif fn == 'no-glasses':
print('\nKPI no glasses')
dfnoglasses = tx.filterNoGlasses(tx.dfkpiall)
tx.bucketize(dfnoglasses, f, 'KPI no glasses')
tx.heat_map_visualization(dfnoglasses, heat_map_dir, 'kpi_no_glasses')
elif fn == 'occluded':
print('\nKPI either eye occluded')
dfoccl = tx.filterOccluded(tx.dfkpiall)
tx.bucketize(dfoccl, f, 'KPI either eye occluded')
tx.heat_map_visualization(dfoccl, heat_map_dir, 'kpi_occluded')
elif fn == 'topTen':
print('\nKPI top ten')
tx.bucketize(tx.dftop10, f, 'KPI top ten users')
tx.heat_map_visualization(tx.dftop10, heat_map_dir, 'kpi_top_ten')
elif fn == 'botTen':
print('\nKPI bottom ten')
tx.bucketize(tx.dfbotTen, f, 'KPI bottom ten users')
tx.heat_map_visualization(tx.dfbotTen, heat_map_dir, 'kpi_bottom_ten')
elif 'top-' in fn:
fn = fn.replace('top-', '')
nUsers = int(fn)
print('\nKPI top {} users'.format(nUsers))
dfTopN = tx.filterTopNUsers(tx.dfkpiall, nUsers)
tx.bucketize(dfTopN, f, 'KPI top {} users'.format(nUsers))
tx.heat_map_visualization(dfTopN, heat_map_dir, 'kpi_top_{}'.format(nUsers))
plt.clf()
degree_means = []
for users in range(1, nUsers):
df = tx.filterTopNUsers(tx.dfkpiall, users, metric='degree')
degree_means.append(df['degree_error'].mean())
plt.figure(1)
plt.plot(range(1, nUsers), degree_means)
plt.xlabel('KPI top {} users'.format(nUsers))
plt.ylabel('Error (degrees)')
plt.title('Gaze Top {}'.format(nUsers))
plt.savefig(os.path.join(output_path,
"UserErrorAnalysis_top_" + str(nUsers)
+ "_degree_" + joint_extension + ".png"))
plt.clf()
cam_means = []
for users in range(1, nUsers):
df = tx.filterTopNUsers(tx.dfkpiall, users, metric='cm')
cam_means.append(df['cam_error'].mean())
plt.figure(1)
plt.plot(range(1, nUsers), cam_means)
plt.xlabel('KPI top {} users'.format(nUsers))
plt.ylabel('Error (cm)')
plt.title('Gaze Top {}'.format(nUsers))
plt.savefig(os.path.join(output_path,
"UserErrorAnalysis_top_" + str(nUsers)
+ "_cm_" + joint_extension + ".png"))
plt.clf()
with open(os.path.join(output_path,
'top{}_data.txt'.format(nUsers)), 'w+') as t:
t.write('{}\n'.format(degree_means))
t.write('{}\n'.format(cam_means))
elif fn == 'glare-pupil':
if tx.df_glare is None:
tx.df_glare = tx.classifyGlare(tx.dfclean)
print('\nKPI glare pupil')
df_glare_pupil = tx.df_glare[tx.df_glare['glare_status'] == 1]
tx.bucketize(df_glare_pupil, f, 'KPI glare pupil')
tx.heat_map_visualization(df_glare_pupil, heat_map_dir, 'kpi_glare_pupil')
tx.save_images(df_glare_pupil, sample_image_dir, 'glare_pupil', 50)
elif fn == 'glare-eye':
if tx.df_glare is None:
tx.df_glare = tx.classifyGlare(tx.dfclean)
print('\nKPI glare eye')
df_glare_eye = tx.df_glare[tx.df_glare['glare_status'] == 2]
tx.bucketize(df_glare_eye, f, 'KPI glare eye')
tx.heat_map_visualization(df_glare_eye, heat_map_dir, 'kpi_glare_eye')
tx.save_images(df_glare_eye, sample_image_dir, 'glare_eye', 50)
elif fn == 'glint-pupil':
if tx.df_glare is None:
tx.df_glare = tx.classifyGlare(tx.dfclean)
print('\nKPI glint pupil')
df_glint_pupil = tx.df_glare[tx.df_glare['glare_status'] == 3]
tx.bucketize(df_glint_pupil, f, 'KPI glint pupil')
tx.heat_map_visualization(df_glint_pupil, heat_map_dir, 'kpi_glint_pupil')
tx.save_images(df_glint_pupil, sample_image_dir, 'glint_pupil', 50)
elif fn == 'glint-eye':
if tx.df_glare is None:
tx.df_glare = tx.classifyGlare(tx.dfclean)
print('\nKPI glint eye')
df_glint_eye = tx.df_glare[tx.df_glare['glare_status'] == 4]
tx.bucketize(df_glint_eye, f, 'KPI glint eye')
tx.heat_map_visualization(df_glint_eye, heat_map_dir, 'kpi_glint_eye')
tx.save_images(df_glint_eye, sample_image_dir, 'glint_eye', 50)
elif fn == 'no-glare':
if tx.df_glare is None:
tx.df_glare = tx.classifyGlare(tx.dfclean)
print('\nKPI No glare eye')
df_no_glare_eye = tx.df_glare[tx.df_glare['glare_status'] == 5]
tx.bucketize(df_no_glare_eye, f, 'KPI No glare eye')
tx.heat_map_visualization(df_no_glare_eye, heat_map_dir, 'kpi_no_glare_eye')
tx.save_images(df_no_glare_eye, sample_image_dir, 'no_galre_eye', 50)
elif fn == 'class-0':
if tx.df_glare is None:
tx.df_glare = tx.classifyGlare(tx.dfclean)
print('\nKPI glare class 0')
df_glare_class_0 = tx.df_glare[tx.df_glare['glare_status'] == 0]
tx.bucketize(df_glare_class_0, f, 'KPI glare class 0')
tx.save_images(df_glare_class_0, sample_image_dir, 'glare_class_0', 50)
elif fn == 'bright-pupil':
if tx.df_glare is None:
tx.df_glare = tx.classifyGlare(tx.dfclean)
print('\nKPI bright pupil')
df_bright_pupil = tx.df_glare[tx.df_glare['bright_pupil'] == 'bright_pupil']
tx.bucketize(df_bright_pupil, f, 'KPI bright pupil')
tx.save_images(df_bright_pupil, sample_image_dir, 'bright_pupil', 50)
elif fn == 'non-bright-pupil':
if tx.df_glare is None:
tx.df_glare = tx.classifyGlare(tx.dfclean)
print('\nKPI non-bright pupil')
df_non_bright_pupil = tx.df_glare[tx.df_glare['bright_pupil'] == 'normal']
tx.bucketize(df_non_bright_pupil, f, 'KPI Non-bright pupil')
elif fn == 'hpPitch30':
print('\nKPI HP Pitch [< 30]')
tx.bucketize(tx.hppitch30, f, 'KPI HP Pitch [< 30]')
tx.heat_map_visualization(tx.hppitch30, heat_map_dir, 'kpi_hp_pitch_30')
elif fn == 'hpPitch60':
print('\nKPI HP Pitch [30-60]')
tx.bucketize(tx.hppitch60, f, 'KPI HP Pitch [30-60]')
tx.heat_map_visualization(tx.hppitch60, heat_map_dir, 'kpi_hp_pitch_60')
elif fn == 'hpPitch90':
print('\nKPI HP Pitch [>60]')
tx.bucketize(tx.hppitch90, f, 'KPI HP Pitch [> 60]')
tx.heat_map_visualization(tx.hppitch90, heat_map_dir, 'kpi_hp_pitch_90')
elif fn == 'hpYaw30':
print('\nKPI HP Yaw [< 30]')
tx.bucketize(tx.hpyaw30, f, 'KPI HP Yaw [< 30]')
tx.heat_map_visualization(tx.hpyaw30, heat_map_dir, 'kpi_hp_yaw_30')
elif fn == 'hpYaw60':
print('\nKPI HP Yaw [30-60]')
tx.bucketize(tx.hpyaw60, f, 'KPI HP Yaw [30-60]')
tx.heat_map_visualization(tx.hpyaw60, heat_map_dir, 'kpi_hp_yaw_60')
elif fn == 'hpYaw90':
print('\nKPI HP Yaw [>60]')
tx.bucketize(tx.hpyaw90, f, 'KPI HP Yaw [> 60]')
tx.heat_map_visualization(tx.hpyaw90, heat_map_dir, 'kpi_hp_yaw_90')
elif fn == 'hp15':
print('\nKPI HP [-15,15]')
tx.bucketize(tx.hp15, f, 'KPI HP [-15,15]')
tx.heat_map_visualization(tx.hp15, heat_map_dir, 'kpi_hp_15')
elif fn == 'hp30':
print('\nKPI HP [-30,30]')
tx.bucketize(tx.hp30, f, 'KPI HP [-30,30]')
tx.heat_map_visualization(tx.hp30, heat_map_dir, 'kpi_hp_30')
elif fn == 'hp45':
print('\nKPI HP [-45,45]')
tx.bucketize(tx.hp45, f, 'KPI HP [-45,45]')
tx.heat_map_visualization(tx.hp45, heat_map_dir, 'kpi_hp_45')
elif fn == 'hp60':
print('\nKPI HP [-60,60]')
tx.bucketize(tx.hp60, f, 'KPI HP [-60,60]')
tx.heat_map_visualization(tx.hp60, heat_map_dir, 'kpi_hp_60')
elif fn == 'cone45_center':
print('\nKPI cone45_center [-22.5,22.5]')
tx.bucketize(tx.coneCenter, f, 'KPI cone45_center [-22.5,22.5]')
tx.heat_map_visualization(tx.coneCenter, heat_map_dir,
'kpi_cone45_center')
elif fn == 'cone45_periphery':
print('\nKPI cone45_periphery')
tx.bucketize(tx.conePeriphery, f, 'KPI cone45_periphery')
tx.heat_map_visualization(tx.conePeriphery, heat_map_dir,
'kpi_cone45_periphery')
elif fn == 'cone45_center_top10':
print('\nKPI cone45_center_top10 [-22.5,22.5]')
tx.bucketize(tx.coneCenterTop10, f, 'KPI cone45_center_top10 [-22.5,22.5]')
tx.heat_map_visualization(tx.coneCenterTop10, heat_map_dir,
'kpi_cone45_center_top10')
elif fn == 'cone45_periphery_top10':
print('\nKPI cone45_periphery_top10')
tx.bucketize(tx.conePeripheryTop10, f, 'KPI cone45_periphery_top10')
tx.heat_map_visualization(tx.conePeripheryTop10, heat_map_dir,
'kpi_cone45_periphery_top10')
f.close()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/utilities/prediction_visualization.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utility functions for DriveIX."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/utilities/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hook for serializing Keras models."""
import glob
import logging
import os
import re
from keras import backend as K
from nvidia_tao_tf1.core.hooks import KerasCheckpointListener
logger = logging.getLogger(__name__)
class EpochModelSerializationListener(KerasCheckpointListener):
"""Adds metadata to serialized keras model."""
def __init__(
self,
checkpoint_dir,
model,
key,
steps_per_epoch=None,
max_to_keep=None,
after_save_callable=None,
prefix="model"):
"""Constructor.
Args:
checkpoint_dir (str): Base directory for the checkpoint files.
model (keras.models.Model): Instance of the model to serialize.
postprocessing_config: postprocessing_config_pb2.PostProcessingConfig object.
key (str): A key string to serialize the model during the experiment.
"""
super(EpochModelSerializationListener, self).__init__(
model=model,
checkpoint_dir=checkpoint_dir,
max_to_keep=max_to_keep,
after_save_callable=after_save_callable,
prefix=prefix)
self._key = key
self._steps_per_epoch = steps_per_epoch
def begin(self):
"""Called after starting the session."""
pattern = r"^%s.epoch-(\d+)\.hdf5$" % re.escape(
os.path.join(self._checkpoint_dir, self._prefix)
)
compiled = re.compile(pattern)
def extract_model_number(filename):
s = compiled.findall(filename)
return int(s[0]) if s else -1, filename
filenames = glob.glob(os.path.join(self._checkpoint_dir, "*.hdf5"))
# Weed out filenames that do not match the pattern.
filenames = [
filename for filename in filenames if compiled.match(filename) is not None
]
sorted_filenames = sorted(filenames, key=extract_model_number)
self._latest_checkpoints.extend(sorted_filenames)
def after_save(self, session, global_step_value):
"""Serialize metadata to the tlt file after it has been saved."""
if session:
K.set_session(session)
K.manual_variable_initialization(True)
epoch = int(global_step_value / self._steps_per_epoch)
model_path = os.path.join(self._checkpoint_dir, 'model.epoch-%s.hdf5' % epoch)
self._model.save_model(file_name=model_path)
self._cleanup(model_path)
def end(self, session, global_step_value):
"""Run at the end of the session, reset the old variale initialization setting."""
K.manual_variable_initialization(self._previous_MANUAL_VAR_INIT)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/utilities/serialization_listener.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test GazeNet kpi visualizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nvidia_tao_tf1.cv.common.utilities.kpi_visualization import KpiVisualizer
def test_write_csv():
"""Test GazeNet Kpi Visualization on reading csv with correct number of lines and rows"""
write_csv = True
path_info = {
'root_path': '',
'set_directory_path': ['nvidia_tao_tf1/cv/common/utilities/testdata'],
'ground_truth_folder_name': [''],
'ground_truth_file_folder_name': [''],
}
kpi_bucket_file = ''
visualize_set_id = ['testset']
expected_rows = 1
expected_columns = 22
_kpi_visualizer = KpiVisualizer(visualize_set_id, kpi_bucket_file, path_info)
# kpi_bucket_file will be none on invalid file names.
assert _kpi_visualizer._kpi_bucket_file is None
dfTable = _kpi_visualizer(
output_path='nvidia_tao_tf1/cv/common/utilities/csv',
write_csv=write_csv
)
assert len(dfTable.index) == expected_rows
assert len(dfTable.columns) == expected_columns
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/utilities/test_kpi_visualizer.py |
import os
from keras import backend as K
from keras.layers import Concatenate, Conv2D, Dense, Flatten, Input
from keras.models import Model
from nvidia_tao_tf1.blocks.models import KerasModel
import numpy as np
from nvidia_tao_tf1.cv.common.utilities import tlt_utils
class DummyModel(KerasModel):
"""Dummy model for tests."""
def _build_dummy_model(self, dummy_tensor):
"""Build dummy model.
Args:
dummy_tensor (tensor): Dummy tensor.
Returns:
x_3 (tensor): Model output.
"""
x_1_1 = Conv2D(32,
kernel_size=(3, 3),
strides=(1, 1),
data_format='channels_first',
name='layer-1-1')(dummy_tensor)
x_2_1 = Conv2D(32,
kernel_size=(3, 3),
strides=(1, 1),
data_format='channels_first',
name='layer-2-1')(dummy_tensor)
x_2 = Concatenate(axis=1)([x_1_1, x_2_1])
x_2_flatten = Flatten(data_format='channels_first')(x_2)
x_3 = Dense(10)(x_2_flatten)
return x_3
def build(self, key, dummy_input):
"""Build dummy model.
Args:
key (str): Encode / decode model.
dummy_input (tensor): Input to model.
Returns:
keras model.
"""
model_name = 'DummyNet'
dummy_tensor = Input(tensor=dummy_input, name='dummy_input')
dummy_output = self._build_dummy_model(dummy_tensor)
model = Model(inputs=[dummy_tensor], outputs=[dummy_output], name=model_name)
self._keras_model = model
return self._keras_model
def test_onnx_export(tmpdir):
"""Test onnx export."""
dummy_model = DummyModel()
key = "test"
dummy_input = np.random.randn(1, 3, 72, 72)
dummy_input = K.constant(dummy_input)
model = dummy_model.build(key, dummy_input)
# Test save_exported_file() using onnx as backend.
output_file_name_onnx_backend = os.path.join(tmpdir, 'test_onnx_backend.tlt')
tlt_utils.save_exported_file(
model,
output_file_name=output_file_name_onnx_backend,
key=key,
backend='onnx')
assert os.path.isfile(output_file_name_onnx_backend)
def test_uff_export(tmpdir):
"""Test UFF export."""
dummy_model = DummyModel()
key = "test"
dummy_input = np.random.randn(1, 3, 72, 72)
dummy_input = K.constant(dummy_input)
model = dummy_model.build(key, dummy_input)
# Test save_exported_file() using uff as backend.
output_file_name_uff_backend = os.path.join(tmpdir, 'test_uff_backend.tlt')
tlt_utils.save_exported_file(
model,
output_file_name_uff_backend,
key=key,
backend='uff')
assert os.path.isfile(output_file_name_uff_backend)
def test_tfonnx_export(tmpdir):
"""Test tfonnx export."""
dummy_model = DummyModel()
key = "test"
dummy_input = np.random.randn(1, 3, 72, 72)
dummy_input = K.constant(dummy_input)
model = dummy_model.build(key, dummy_input)
# Test save_exported_file() using tfonnx as backend.
output_file_name_tfonnx_backend = os.path.join(tmpdir, 'test_tfonnx_backend.tlt')
tlt_utils.save_exported_file(
model,
output_file_name_tfonnx_backend,
key=key,
backend='tfonnx')
assert os.path.isfile(output_file_name_tfonnx_backend)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/utilities/test_tlt_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test utility script: error calculation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from nvidia_tao_tf1.cv.common.utilities.error_calculation \
import compute_error_joint, compute_error_theta_phi, compute_error_xyz
def sample_results():
"""Return a sample xyz results list."""
sample = [[313.0, 94.0, -173.0, 300.0, 82.0, -123.0],
[-110.0, -354.0, -51.0, -8.0, -360.0, -63.0],
[-800.0, -166.0, -106.0, -726.0, -132.0, 2.0],
[-523.0, 67.0, -169.0, -478.0, 123.0, -31.0]]
return sample
def sample_results_theta_phi():
"""Return a sample theta-phi results list."""
sample = [[0.0692, 0.5036, 0.1708, 0.5964],
[0.2982, -0.0896, 0.4051, 0.0851],
[0.0403, -0.3247, 0.0627, -0.4986],
[-0.2298, 0.7030, -0.1134, 0.5984]]
return sample
def sample_results_joint():
"""Return a sample joint results list."""
sample = [[-582.28, -145.09, -111.27, 0.0692, 0.5036, -697.39, -238.32, -64.77, 0.1708, 0.5964],
[43.85, -333.97, -56.72, 0.2982, -0.0896, -77.05, -441.13, 10.44, 0.4051, 0.0851],
[258.68, -176.96, -99.21, 0.0403, -0.3247, 445.05, -132.13, 2.12, 0.0627, -0.4986],
[-867.76, 136.02, -189.95, -0.2298, 0.7030, -864.74, 58.92, -170.83, -0.1134, 0.5984]]
return sample
def test_compute_error():
"""Test compute_error function."""
results = sample_results()
final_errors, num_results = compute_error_xyz(results)
exp_num_results = 4
exp_final_errors = [(5.85, 3.3109),
(2.70, 1.97230),
(7.70, 4.90815),
(11.168964, 3.8728855),
(6.8286329, 3.1200596)]
assert exp_num_results == num_results
for i in range(5):
np.testing.assert_almost_equal(exp_final_errors[i][0], final_errors[i][0], 2)
np.testing.assert_almost_equal(exp_final_errors[i][1], final_errors[i][1], 2)
# Test error calculation for theta_phi.
results_tp = sample_results_theta_phi()
final_errors_tp, num_results_tp = compute_error_theta_phi(results_tp)
exp_final_errors = [(0.1658, 0.0218),
(0.0868, 0.0375),
(0.1365, 0.038),
(0.1685, 0.0248)]
assert exp_num_results == num_results_tp
for i in range(4):
np.testing.assert_almost_equal(exp_final_errors[i][0], final_errors_tp[i][0], 2)
np.testing.assert_almost_equal(exp_final_errors[i][1], final_errors_tp[i][1], 2)
# Test error calculation for joint.
results_joint = sample_results_joint()
final_errors_joint, num_results_joint, num_results_tp = compute_error_joint(results_joint)
exp_final_errors = [(10.635, 6.5895),
(8.058, 2.322),
(5.8527, 3.0017),
(15.6632, 4.9776),
(14.4632, 4.2027),
(0.1658, 0.0218),
(0.0868, 0.0375),
(0.1365, 0.038),
(0.1685, 0.0248)]
assert exp_num_results == num_results_joint
assert exp_num_results == num_results_tp
for i in range(9):
np.testing.assert_almost_equal(exp_final_errors[i][0], final_errors_joint[i][0], 2)
np.testing.assert_almost_equal(exp_final_errors[i][1], final_errors_joint[i][1], 2)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/utilities/test_error_calculation.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extract information from Json files."""
import numpy as np
def safeints(x):
"""convert input to int value."""
x = int(x)
x = max(x, 0)
return int(x)
def extract_landmarks_from_json(frame_annotations, num_keypoints):
"""extract landmarks from json file.
Args:
frame_annotations (dict): frame annotations
num_keypoints: number of keypoints to be extracted
Return:
landmarks_2D (array): 2D landmarks points
occlusions (array): occlusion masks
num_landmarks: number of landmarks points
"""
for chunk in frame_annotations:
if 'class' not in chunk:
continue
chunk_class = str(chunk['class']).lower()
landmarks_2D = []
if chunk_class == 'fiducialpoints':
x, y, occlusions = \
extract_fiducial_points(chunk, num_keypoints)
landmarks_2D = np.asarray([x, y], dtype=np.longdouble).T
return landmarks_2D, occlusions
return None, None
def extract_face_bbox_from_json(frame_annotations):
"""extract landmarks from json file.
Args:
frame_annotations (dict): frame annotations
Return:
facex1 (int): top left point x
facey1 (int): top left point y
facex2 (int): bottom right point x
facey2 (int): bottom right point y
"""
for chunk in frame_annotations:
if 'class' not in chunk:
continue
chunk_class = str(chunk['class']).lower()
facex1 = -1
facey1 = -1
facex2 = -1
facey2 = -1
if chunk_class == 'facebbox':
facex1, facey1, facex2, facey2 = extract_from_facebbox(
chunk, facex1, facey1, facex2, facey2)
if -1 in (facex1, facey1, facex2, facey2):
continue # skip img
return facex1, facey1, facex2, facey2
def extract_from_facebbox(chunk, facex1, facey1, facex2, facey2):
"""extract landmarks from json file.
Args:
chunk (dict): frame annotations chunk
facex1 (int): top left point x
facey1 (int): top left point y
facex2 (int): bottom right point x
facey2 (int): bottom right point y
"""
if (
'face_tight_bboxx' not in chunk or
'face_tight_bboxy' not in chunk or
'face_tight_bboxwidth' not in chunk or
'face_tight_bboxheight' not in chunk
):
return facex1, facey1, facex2, facey2
facex1 = safeints(chunk['face_tight_bboxx'])
facey1 = safeints(chunk['face_tight_bboxy'])
facex2 = safeints(chunk['face_tight_bboxwidth']) + facex1
facey2 = safeints(chunk['face_tight_bboxheight']) + facey1
return facex1, facey1, facex2, facey2
def extract_fiducial_points(chunk, num_keypoints):
"""extract landmarks from json file.
Args:
chunk (dict): frame annotations chunk
num_keypoints: number of keypoints to be extracted
Return:
x (float): 2D landmarks x
y (float): 2D landmarks y
occlusions (array): occlusion masks
num_landmarks: number of landmarks points
"""
x = [-1] * num_keypoints
y = [-1] * num_keypoints
occlusions = [-1] * num_keypoints
num_landmarks = None
for point in (
point for point in chunk if (
'class' not in point and 'version' not in point)):
try:
number = int(''.join(c for c in str(point) if c.isdigit()))
if num_landmarks is None or number > num_landmarks:
num_landmarks = number
if 'x' in str(point).lower() and number <= num_keypoints:
x[number - 1] = str(np.longdouble(chunk[point]))
if 'y' in str(point).lower() and number <= num_keypoints:
y[number - 1] = str(np.longdouble(chunk[point]))
if (
'occ' in str(point).lower() and
number <= num_keypoints and
chunk[point]
):
occlusions[number - 1] = 1
for index in range(num_landmarks):
if occlusions[index] == -1:
occlusions[index] = 0
except Exception:
pass
return x, y, occlusions
def get_square_bbox(bbox_x1, bbox_y1, bbox_x2, bbox_y2, image_width, image_height):
"""get square bounding box.
Args:
bbox_x1 (int): bounding box top left x
bbox_y1 (int): bounding box top left y
bbox_x2 (int): bounding box bottom right x
bbox_y2 (int): bounding box bottom right y
"""
x = bbox_x1
y = bbox_y1
width = bbox_x2 - x
height = bbox_y2 - y
# transform it into a square bbox wrt the longer side
longer_side = max(width, height)
new_width = longer_side
new_height = longer_side
x = int(x - (new_width - width) / 2)
y = int(y - (new_height - height) / 2)
x = min(max(x, 0), image_width)
y = min(max(y, 0), image_height)
new_width = min(new_width, image_width - x)
new_height = min(new_height, image_height - y)
new_width = min(new_width, new_height)
new_height = new_width # make it a square bbox
return x, y, x + new_width, y + new_height
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/utilities/json_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test GazeNet prediction visualizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from nvidia_tao_tf1.cv.common.utilities.kpi_visualization import KpiVisualizer
from nvidia_tao_tf1.cv.common.utilities.prediction_visualization import PredictionVisualizer
def test_xyz_model():
"""Test GazeNet Kpi Visualization on reading csv with correct number of lines and rows"""
write_csv = False
kpi_bucket_file = ''
visualize_set_id = ['testset']
expected_rows = 1
expected_columns = 22
model_type = 'xyz'
bad_users = ''
buckets_to_visualize = 'all clean hard easy car car_center car_right center-camera ' \
'left-center-camera bottom-center-camera top-center-camera ' \
'right-center-camera right-camera glasses no-glasses occluded'
predictions = ['/home/copilot.cosmos10/RealTimePipeline/set/s400_KPI/Data/user5_x/'
'frame0_1194_881_vc0_02.png -463.238555908 94.0375442505 -176.797409058'
' -486.134002686 257.865386963 -141.173324585']
path_info = {
'root_path': '',
'set_directory_path': ['nvidia_tao_tf1/cv/common/utilities/testdata'],
'ground_truth_folder_name': [''],
'ground_truth_file_folder_name': [''],
}
theta_phi_degrees = False
_kpi_visualizer = KpiVisualizer(visualize_set_id, kpi_bucket_file, path_info)
# kpi_bucket_file will be none on invalid file names.
assert _kpi_visualizer._kpi_bucket_file is None
dfTable = _kpi_visualizer(
output_path='nvidia_tao_tf1/cv/common/utilities/csv',
write_csv=write_csv
)
assert len(dfTable.index) == expected_rows
assert len(dfTable.columns) == expected_columns
_prediction_visualizer = PredictionVisualizer(model_type, visualize_set_id,
bad_users, buckets_to_visualize, path_info,
tp_degrees=theta_phi_degrees)
df_all = _prediction_visualizer._kpi_prediction_linker.getDataFrame(predictions, dfTable)
expected_num_users = 1
num_users = len(df_all.drop_duplicates(['user_id'], keep='first'))
assert num_users == expected_num_users
expected_avg_cam_err = 16.921240584744147
avg_cam_err = df_all['cam_error'].mean()
assert math.isclose(
avg_cam_err, expected_avg_cam_err,
abs_tol=0.0, rel_tol=1e-6
)
expected_degree_err = 9.225318552646291
degree_err = df_all['degree_error'].mean()
assert math.isclose(
degree_err, expected_degree_err,
abs_tol=0.0, rel_tol=1e-6
)
def test_joint_model():
"""Test GazeNet Kpi Visualization on reading csv with correct number of lines and rows"""
write_csv = True
kpi_bucket_file = ''
visualize_set_id = ['testset']
expected_rows = 1
expected_columns = 22
model_type = 'joint'
bad_users = ''
buckets_to_visualize = 'all clean hard easy car car_center car_right center-camera ' \
'left-center-camera bottom-center-camera top-center-camera ' \
'right-center-camera right-camera glasses no-glasses occluded'
predictions = ['/home/copilot.cosmos10/RealTimePipeline/set/s400_KPI/'
'Data/user5_x/frame0_1194_881_vc0_02.png -463.238555908'
' 94.0375442505 -176.797409058 -0.224229842424 0.427173137665'
' -645.198913574 29.1056632996 -205.970291138 -0.116438232362 0.543292880058']
path_info = {
'root_path': '',
'set_directory_path': ['nvidia_tao_tf1/cv/common/utilities/testdata'],
'ground_truth_folder_name': [''],
'ground_truth_file_folder_name': [''],
}
theta_phi_degrees = False
_kpi_visualizer = KpiVisualizer(visualize_set_id, kpi_bucket_file, path_info)
# kpi_bucket_file will be none on invalid file names.
assert _kpi_visualizer._kpi_bucket_file is None
dfTable = _kpi_visualizer(
output_path='nvidia_tao_tf1/cv/common/utilities/csv',
write_csv=write_csv
)
assert len(dfTable.index) == expected_rows
assert len(dfTable.columns) == expected_columns
_prediction_visualizer = PredictionVisualizer(model_type, visualize_set_id,
bad_users, buckets_to_visualize, path_info,
tp_degrees=theta_phi_degrees)
for linker in _prediction_visualizer._kpi_prediction_linker:
df_all = linker.getDataFrame(predictions, dfTable)
if linker.model_type == 'joint_xyz':
expected_num_users = 1
num_users = len(df_all.drop_duplicates(['user_id'], keep='first'))
assert num_users == expected_num_users
expected_avg_cam_err = 19.538877238587443
avg_cam_err = df_all['cam_error'].mean()
assert math.isclose(
avg_cam_err, expected_avg_cam_err,
abs_tol=0.0, rel_tol=1e-6
)
expected_degree_err = 9.016554860339454
degree_err = df_all['degree_error'].mean()
assert math.isclose(
degree_err, expected_degree_err,
abs_tol=0.0, rel_tol=1e-6
)
elif linker.model_type == 'joint_tp':
expected_num_users = 1
num_users = len(df_all.drop_duplicates(['user_id'], keep='first'))
assert num_users == expected_num_users
expected_avg_cam_err = 16.202673528241796
avg_cam_err = df_all['cam_error'].mean()
assert math.isclose(
avg_cam_err, expected_avg_cam_err,
abs_tol=0.0, rel_tol=1e-6
)
expected_degree_err = 9.020981051018254
degree_err = df_all['degree_error'].mean()
# @vpraveen: Relaxing this test condition for
# CI failures.
assert math.isclose(
degree_err, expected_degree_err,
rel_tol=1e-5, abs_tol=0.0
)
elif linker.model_type == 'joint':
expected_num_users = 1
num_users = len(df_all.drop_duplicates(['user_id'], keep='first'))
assert num_users == expected_num_users
expected_avg_cam_err = 17.52980743787898
avg_cam_err = df_all['cam_error'].mean()
assert math.isclose(
avg_cam_err, expected_avg_cam_err,
abs_tol=0.0, rel_tol=1e-6
)
expected_degree_err = 9.553054268825521
degree_err = df_all['degree_error'].mean()
assert math.isclose(
degree_err, expected_degree_err,
abs_tol=0.0, rel_tol=1e-6
)
def test_theta_phi_model_radian():
"""Test GazeNet Kpi Visualization on reading csv with correct number of lines and rows"""
write_csv = True
kpi_bucket_file = ''
visualize_set_id = ['testset']
expected_rows = 1
expected_columns = 22
model_type = 'theta_phi'
bad_users = ''
buckets_to_visualize = 'all clean hard easy car car_center car_right center-camera ' \
'left-center-camera bottom-center-camera top-center-camera ' \
'right-center-camera right-camera glasses no-glasses occluded'
predictions = ['/home/copilot.cosmos10/RealTimePipeline/set/s400_KPI/'
'Data/user5_x/frame0_1194_881_vc0_02.png -0.224229842424'
' 0.427173137665 -0.180691748857 0.44389718771']
path_info = {
'root_path': '',
'set_directory_path': ['nvidia_tao_tf1/cv/common/utilities/testdata'],
'ground_truth_folder_name': [''],
'ground_truth_file_folder_name': [''],
}
theta_phi_degrees = False
_kpi_visualizer = KpiVisualizer(visualize_set_id, kpi_bucket_file, path_info)
# kpi_bucket_file will be none on invalid file names.
assert _kpi_visualizer._kpi_bucket_file is None
dfTable = _kpi_visualizer(
output_path='nvidia_tao_tf1/cv/common/utilities/csv',
write_csv=write_csv
)
assert len(dfTable.index) == expected_rows
assert len(dfTable.columns) == expected_columns
_prediction_visualizer = PredictionVisualizer(model_type, visualize_set_id,
bad_users, buckets_to_visualize, path_info,
tp_degrees=theta_phi_degrees)
df_all = _prediction_visualizer._kpi_prediction_linker.getDataFrame(predictions, dfTable)
expected_num_users = 1
num_users = len(df_all.drop_duplicates(['user_id'], keep='first'))
assert num_users == expected_num_users
# @vpraveen: Converting these to isclose and relaxing the
# test condition due to CI failures.
expected_avg_cam_err = 5.047768956137818
avg_cam_err = df_all['cam_error'].mean()
assert math.isclose(
avg_cam_err, expected_avg_cam_err,
abs_tol=0.0, rel_tol=1e-6
)
expected_degree_err = 2.683263685977131
degree_err = df_all['degree_error'].mean()
# @vpraveen: Converting these to isclose and relaxing the
# test condition due to CI failures.
assert math.isclose(
degree_err, expected_degree_err,
abs_tol=0.0, rel_tol=1e-6
)
def test_theta_phi_model_degrees():
"""Test GazeNet Kpi Visualization on reading csv with correct number of lines and rows"""
write_csv = True
kpi_bucket_file = ''
visualize_set_id = ['testset']
expected_rows = 1
expected_columns = 22
model_type = 'theta_phi'
bad_users = ''
buckets_to_visualize = 'all clean hard easy car car_center car_right center-camera ' \
'left-center-camera bottom-center-camera top-center-camera ' \
'right-center-camera right-camera glasses no-glasses occluded'
predictions = ['/home/copilot.cosmos10/RealTimePipeline/set/s400_KPI/'
'Data/user5_x/frame0_1194_881_vc0_02.png -0.224229842424'
' 0.427173137665 -10.3528746023 25.4334353935']
path_info = {
'root_path': '',
'set_directory_path': ['nvidia_tao_tf1/cv/common/utilities/testdata'],
'ground_truth_folder_name': [''],
'ground_truth_file_folder_name': [''],
}
theta_phi_degrees = True
_kpi_visualizer = KpiVisualizer(visualize_set_id, kpi_bucket_file, path_info)
# kpi_bucket_file will be none on invalid file names.
assert _kpi_visualizer._kpi_bucket_file is None
dfTable = _kpi_visualizer(
output_path='nvidia_tao_tf1/cv/common/utilities/csv',
write_csv=write_csv
)
assert len(dfTable.index) == expected_rows
assert len(dfTable.columns) == expected_columns
_prediction_visualizer = PredictionVisualizer(model_type, visualize_set_id,
bad_users, buckets_to_visualize, path_info,
tp_degrees=theta_phi_degrees)
df_all = _prediction_visualizer._kpi_prediction_linker.getDataFrame(predictions, dfTable)
expected_num_users = 1
num_users = len(df_all.drop_duplicates(['user_id'], keep='first'))
assert num_users == expected_num_users
expected_avg_cam_err = 5.04776895621619
avg_cam_err = df_all['cam_error'].mean()
assert math.isclose(
avg_cam_err, expected_avg_cam_err,
abs_tol=0.0, rel_tol=1e-6
)
expected_degree_err = 2.683263686014905
degree_err = df_all['degree_error'].mean()
assert math.isclose(
degree_err, expected_degree_err,
abs_tol=0.0, rel_tol=1e-6
)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/utilities/test_prediction_visualizer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility function definitions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import errno
import json
import os
import yaml
def mkdir_p(new_path):
"""Makedir, making also non-existing parent dirs."""
try:
print(new_path)
os.makedirs(new_path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(new_path):
pass
else:
raise
def get_file_name_noext(filepath):
"""Return file name witout extension."""
return os.path.splitext(os.path.basename(filepath))[0]
def get_file_ext(filepath):
"""Return file extension."""
return os.path.splitext(os.path.basename(filepath))[1]
def check_file_or_directory(path):
"""Check if the input path is a file or a directory."""
if not os.path.isfile(path) and not os.path.isdir:
raise FileNotFoundError('%s is not a directory or a file.')
def check_file(file_path, extension=None):
"""Check if the input path is a file.
Args:
file_path (str): Full path to the file
extension (str): File extension. If provided,
checks if the extensions match.
Example choices: [`.yaml`, `.json` ...]
"""
if not os.path.isfile(file_path):
raise FileNotFoundError('The file %s does not exist' % file_path)
# Check if the extension is right
if extension is not None and get_file_ext(file_path) != extension:
raise FileNotFoundError('The file %s is not a %s file' % extension)
def check_dir(dir_path):
"""Check if the input path is a directory."""
if not os.path.isdir(dir_path):
raise NotADirectoryError('The directory %s does not exist' % dir_path)
def load_yaml_file(file_path, mode='r'):
"""Load a yaml file.
Args:
file_path (str): path to the yaml file.
mode (str): mode to load the file in. ex. 'r', 'w' etc.
"""
check_file(file_path, '.yaml')
with open(file_path, mode) as yaml_file:
file_ = yaml.load(yaml_file.read())
return file_
def load_json_file(file_path, mode='r'):
"""Load a json file.
Args:
file_path (str): path to the json file.
mode (str): mode to load the file in. ex. 'r', 'w' etc.
"""
check_file(file_path, '.json')
with open(file_path, mode) as json_file:
file_ = json.load(json_file)
return file_
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/utilities/path_processing.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO module implementing the entrypoint."""
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/entrypoint/__init__.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""TLT command line wrapper to invoke CLI scripts."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import importlib
import logging
import os
import pkgutil
import shlex
import subprocess
import sys
from time import time
import nvidia_tao_tf1.cv.common.no_warning # noqa pylint: disable=W0611
from nvidia_tao_tf1.cv.common.telemetry.nvml_utils import get_device_details
from nvidia_tao_tf1.cv.common.telemetry.telemetry import send_telemetry_data
MULTIGPU_SUPPORTED_TASKS = ["train"]
RELEASE = True
logger = logging.getLogger(__name__)
def get_modules(package):
"""Function to get module supported tasks.
This function lists out the modules in the nvidia_tao_tf1.cv.X.scripts package
where the module subtasks are listed, and walks through it to generate a dictionary
of tasks, parser_function and path to the executable.
Args:
No explicit args.
Returns:
modules (dict): Dictionary of modules.
"""
modules = {}
module_path = package.__path__
tasks = [item[1] for item in pkgutil.walk_packages(module_path)]
for task in sorted(tasks, key=str.lower, reverse=True):
module_name = package.__name__ + '.' + task
module = importlib.import_module(module_name)
module_details = {
"module_name": module_name,
"build_parser": getattr(
module,
"build_command_line_parser") if hasattr(
module,
"build_command_line_parser"
) else None,
"runner_path": os.path.abspath(
module.__file__
)
}
modules[task] = module_details
return modules
def build_command_line_parser(package_name, modules=None):
"""Simple function to build command line parsers.
This function scans the dictionary of modules determined by the
get_modules routine and builds a chained parser.
Args:
modules (dict): Dictionary of modules as returned by the get_modules function.
Returns:
parser (argparse.ArgumentParser): An ArgumentParser class with all the
subparser instantiated for chained parsing.
"""
parser = argparse.ArgumentParser(
package_name,
add_help=True,
description="Transfer Learning Toolkit"
)
parser.add_argument(
"--num_processes",
"-np",
type=int,
default=-1,
help=("The number of horovod child processes to be spawned. "
"Default is -1(equal to --gpus)."),
required=False
)
parser.add_argument(
'--gpus',
type=int,
default=1,
help="The number of GPUs to be used for the job.",
required=False,
)
parser.add_argument(
'--gpu_index',
type=int,
nargs="+",
help="The indices of the GPU's to be used.",
default=None
)
parser.add_argument(
"--use_amp",
action="store_true",
default=False,
help="Flag to enable Auto Mixed Precision."
)
parser.add_argument(
'--log_file',
type=str,
default=None,
help="Path to the output log file.",
required=False,
)
parser.add_argument(
"--mpirun-arg",
type=str,
default="-x NCCL_IB_HCA=mlx5_4,mlx5_6,mlx5_8,mlx5_10 -x NCCL_SOCKET_IFNAME=^lo,docker",
help=argparse.SUPPRESS
)
parser.add_argument(
'--multi-node',
action='store_true',
default=False,
help=argparse.SUPPRESS
)
parser.add_argument(
"--launch_cuda_blocking",
action="store_true",
default=False,
help=argparse.SUPPRESS
)
# module subparser for the respective tasks.
module_subparsers = parser.add_subparsers(title="tasks")
for task, details in modules.items():
if not details['build_parser']:
logger.debug("Parser for task {} wasn't built.".format(
task
))
continue
subparser = module_subparsers.add_parser(
task,
parents=[parser],
add_help=False)
subparser = details['build_parser'](subparser)
return parser
def format_command_line_args(args):
"""Format command line args from command line.
Args:
args (dict): Dictionary of parsed command line arguments.
Returns:
formatted_string (str): Formatted command line string.
"""
assert isinstance(args, dict), (
"The command line args should be formatted to a dictionary."
)
formatted_string = ""
for arg, value in args.items():
if arg in ["gpus", "gpu_index", "log_file", "use_amp",
"multi_node", "mpirun_arg", "num_processes",
"launch_cuda_blocking"]:
continue
# Fix arguments that defaults to None, so that they will
# not be converted to string "None". Simply drop args
# that have value None.
# For example, export output_file arg and engine_file arg
# same for "" for cal_image_dir in export.
if value in [None, ""]:
continue
if isinstance(value, bool):
if value:
formatted_string += "--{} ".format(arg)
elif isinstance(value, list):
formatted_string += "--{} {} ".format(
arg, ' '.join(value)
)
else:
formatted_string += "--{} {} ".format(
arg, value
)
return formatted_string
def check_valid_gpus(num_gpus, gpu_ids):
"""Check if the number of GPU's called and IDs are valid.
This function scans the machine using the nvidia-smi routine to find the
number of GPU's and matches the id's and num_gpu's accordingly.
Once validated, it finally also sets the CUDA_VISIBLE_DEVICES env variable.
Args:
num_gpus (int): Number of GPUs alloted by the user for the job.
gpu_ids (list(int)): List of GPU indices used by the user.
Returns:
No explicit returns
"""
# Ensure the gpu_ids are all different, and sorted
gpu_ids = sorted(list(set(gpu_ids)))
assert num_gpus > 0, "At least 1 GPU required to run any task."
num_gpus_available = str(subprocess.check_output(["nvidia-smi", "-L"])).count("UUID")
max_id = max(gpu_ids)
assert min(gpu_ids) >= 0, (
"GPU ids cannot be negative."
)
assert len(gpu_ids) == num_gpus, (
"The number of GPUs ({}) must be the same as the number of GPU indices"
" ({}) provided.".format(
gpu_ids,
num_gpus
)
)
assert max_id < num_gpus_available and num_gpus <= num_gpus_available, (
"Checking for valid GPU ids and num_gpus."
)
cuda_visible_devices = ",".join([str(idx) for idx in gpu_ids])
os.environ['CUDA_VISIBLE_DEVICES'] = cuda_visible_devices
def get_env_variables(use_amp):
"""Simple function to get env variables for the run command."""
env_variable = ""
amp_enable = "TF_ENABLE_AUTO_MIXED_PRECISION=0"
if use_amp:
amp_enable = "TF_ENABLE_AUTO_MIXED_PRECISION=1"
env_variable += amp_enable
return env_variable
def set_gpu_info_single_node(num_gpus, gpu_ids):
"""Set gpu environment variable for single node."""
check_valid_gpus(num_gpus, gpu_ids)
env_variable = ""
visible_devices = os.getenv("CUDA_VISIBLE_DEVICES", None)
if visible_devices is not None:
env_variable = " CUDA_VISIBLE_DEVICES={}".format(
visible_devices
)
return env_variable
def launch_job(package, package_name, cl_args=None):
"""Wrap CLI builders.
This function should be included inside package entrypoint/*.py
import sys
import nvidia_tao_tf1.cv.X.scripts
from nvidia_tao_tf1.cv.common.entrypoint import launch_job
if __name__ == "__main__":
launch_job(nvidia_tao_tf1.cv.X.scripts, "X", sys.argv[1:])
"""
# Configure the logger.
verbosity = "INFO"
if not RELEASE:
verbosity = "DEBUG"
logging.basicConfig(
format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level=verbosity
)
# build modules
modules = get_modules(package)
parser = build_command_line_parser(package_name, modules)
# parse command line arguments to module entrypoint script.
args = vars(parser.parse_args(cl_args))
num_gpus = args["gpus"]
assert num_gpus > 0, "At least 1 GPU required to run any task."
np = args["num_processes"]
# np defaults to num_gpus if < 0
if np < 0:
np = num_gpus
gpu_ids = args["gpu_index"]
use_amp = args['use_amp']
multi_node = args['multi_node']
mpirun_arg = args['mpirun_arg']
launch_cuda_blocking = args["launch_cuda_blocking"]
process_passed = True
if gpu_ids is None:
gpu_ids = range(num_gpus)
log_file = sys.stdout
if args['log_file'] is not None:
log_file = os.path.realpath(args['log_file'])
log_root = os.path.dirname(log_file)
if not os.path.exists(log_root):
os.makedirs(log_root)
# Get the task to be called from the raw command line arguments.
task = None
for arg in sys.argv[1:]:
if arg in list(modules.keys()):
task = arg
break
# Either data parallelism or model parallelism, multi-gpu should only
# apply to training task
if num_gpus > 1:
assert task in MULTIGPU_SUPPORTED_TASKS, (
"Please use only 1 GPU for the task {}. Only the following tasks "
"are supported to run with multiple GPUs, {}".format(
task,
MULTIGPU_SUPPORTED_TASKS)
)
# Check for validity in terms of GPU handling and available resources.
mpi_command = ""
if np > 1:
assert num_gpus > 1, (
"Number of GPUs must be > 1 for data parallelized training(np > 1)."
)
mpi_command = f'mpirun -np {np} --oversubscribe --bind-to none --allow-run-as-root -mca pml ob1 -mca btl ^openib'
if multi_node:
mpi_command += " " + mpirun_arg
if use_amp:
assert task == "train", (
"AMP is currently supported only for training."
)
# Format final command.
env_variables = get_env_variables(use_amp)
if not multi_node:
env_variables += set_gpu_info_single_node(num_gpus, gpu_ids)
formatted_args = format_command_line_args(args)
task_command = "python {}".format(modules[task]["runner_path"])
if launch_cuda_blocking:
task_command = f"CUDA_LAUNCH_BLOCKING=1 {task_command}"
run_command = "{} bash -c '{} {} {}'".format(
mpi_command,
env_variables,
task_command,
formatted_args)
logger.debug("Run command: {}".format(run_command))
start_mark = time()
try:
if isinstance(log_file, str):
with open(log_file, "a") as lf:
subprocess.run(
shlex.split(run_command),
shell=False,
env=os.environ,
stdout=lf,
stderr=lf,
check=True
)
else:
subprocess.run(
shlex.split(run_command),
shell=False,
env=os.environ,
stdout=log_file,
stderr=log_file,
check=True
)
except (KeyboardInterrupt, SystemExit):
print("Command was interrupted.")
process_passed = True
except subprocess.CalledProcessError as e:
if e.output is not None:
print(f"TAO Toolkit task: {task} failed with error:\n{e.output}")
process_passed = False
end_mark = time()
time_lapsed = int(end_mark - start_mark)
try:
gpu_data = []
logger.debug("Gathering GPU data for TAO Toolkit Telemetry.")
for device in get_device_details():
gpu_data.append(device.get_config())
logger.debug("Sending data to the TAO Telemetry server.")
send_telemetry_data(
package_name,
task,
gpu_data,
num_gpus=num_gpus,
time_lapsed=time_lapsed,
pass_status=process_passed
)
except Exception as e:
print("Telemetry data couldn't be sent, but the command ran successfully.")
print(f"[WARNING]: {e}")
pass
if not process_passed:
print("Execution status: FAIL")
sys.exit(-1) # returning non zero return code from the process.
print("Execution status: PASS")
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/entrypoint/entrypoint.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model Parallelism."""
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/model_parallelism/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility to parallelize a Keras model for model parallelism in training."""
import keras
import numpy as np
import tensorflow as tf
def find_segment_idx(layer_idx, layer_splits):
"""Find the segment index for a given layer index."""
idx = 0
while not (layer_splits[idx] <= layer_idx < layer_splits[idx+1]):
idx += 1
assert idx < len(layer_splits) - 1, (
"Segment index should be less than {}".format(len(layer_splits) - 1)
)
return idx
def model_parallelism(
model,
parallelism,
freeze_bn=False
):
"""Split the model into several parts on multiple GPUs for model parallelism."""
# set training=False for BN layers if freeze_bn=True
# otherwise the freeze_bn flag in model builder will be ineffective
def compose_call(prev_call_method):
def call(self, inputs, training=False):
return prev_call_method(self, inputs, training)
return call
prev_batchnorm_call = keras.layers.normalization.BatchNormalization.call
if freeze_bn:
keras.layers.normalization.BatchNormalization.call = compose_call(
prev_batchnorm_call
)
world_size = len(parallelism)
# in case that model parallelism is not enabled at all...
if world_size == 0:
world_size = 1
parallelism = (1.0,)
p_arr = np.array((0.0,) + parallelism, dtype=np.float32)
cum_p_arr = np.cumsum(p_arr)
# splitting points for each segment of the model
splits = cum_p_arr / cum_p_arr[-1]
layer_splits = np.round(splits * len(model.layers))
layer_idx = 0
_explored_layers = dict()
for l in model.layers:
_explored_layers[l.name] = [False, None]
input_layer = [l for l in model.layers if (type(l) == keras.layers.InputLayer)]
layers_to_explore = input_layer
model_outputs = {}
# Loop until we reach the last layer.
while layers_to_explore:
layer = layers_to_explore.pop(0)
# Skip layers that may be revisited in the graph to prevent duplicates.
if not _explored_layers[layer.name][0]:
# Check if all inbound layers explored for given layer.
if not all([
_explored_layers[l.name][0]
for n in layer._inbound_nodes
for l in n.inbound_layers
]):
continue
outputs = None
# Visit input layer.
if type(layer) == keras.layers.InputLayer:
# Re-use the existing InputLayer.
outputs = layer.output
new_layer = layer
else:
gpu_idx = find_segment_idx(layer_idx, layer_splits)
layer_idx += 1
# pin this layer on a certain GPU
with tf.device("/gpu:{}".format(gpu_idx)):
# Create new layer.
layer_config = layer.get_config()
new_layer = type(layer).from_config(layer_config)
# Add to model.
outputs = []
for node in layer._inbound_nodes:
prev_outputs = []
for idx, l in enumerate(node.inbound_layers):
keras_layer = _explored_layers[l.name][1]
prev_outputs.append(keras_layer.get_output_at(node.node_indices[idx]))
assert prev_outputs, "Expected non-input layer to have inputs."
if len(prev_outputs) == 1:
prev_outputs = prev_outputs[0]
outputs.append(new_layer(prev_outputs))
if len(outputs) == 1:
outputs = outputs[0]
weights = layer.get_weights()
if weights is not None:
new_layer.set_weights(weights)
outbound_nodes = layer._outbound_nodes
if not outbound_nodes:
model_outputs[layer.output.name] = outputs
layers_to_explore.extend([node.outbound_layer for node in outbound_nodes])
# Mark current layer as visited and assign output nodes to the layer.
_explored_layers[layer.name][0] = True
_explored_layers[layer.name][1] = new_layer
else:
continue
output_tensors = [model_outputs[l.name] for l in model.outputs if l.name in model_outputs]
new_model = keras.models.Model(inputs=model.inputs,
outputs=output_tensors,
name=model.name)
# restore the BN call method before return
if freeze_bn:
keras.layers.normalization.BatchNormalization.call = prev_batchnorm_call
return new_model
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/model_parallelism/parallelize_model.py |
# Copyright 2019-2020 NVIDIA Corporation. All rights reserved.
"""Utility class for performing TensorRT image inference."""
import os
import numpy as np
from PIL import Image, ImageDraw
from tqdm import tqdm
from nvidia_tao_tf1.cv.makenet.utils.preprocess_input import preprocess_input
class Inferencer(object):
"""Manages TensorRT objects for model inference."""
def __init__(self, keras_model=None, batch_size=None, trt_engine_path=None,
infer_process_fn=None, class_mapping=None, threshold=0.3,
img_mean=None, keep_aspect_ratio=True, image_depth=8):
"""Initializes Keras / TensorRT objects needed for model inference.
Args:
keras_model (keras model or None): Keras model object for inference
batch_size (int or None): an int if keras_model is present or using dynamic bs engine
trt_engine_path (str or None): TensorRT engine path.
infer_process_fn (Python function): takes in the Inferencer object (self) and the
model prediction, returns list of length batch_size. Each element is of size (n, 6)
where n is the number of boxes and for each box:
class_id, confidence, xmin, ymin, xmax, ymax
class_mapping (dict): a dict mapping class_id to class_name
threshold (float): confidence threshold to draw/label a bbox.
image_depth(int): Bit depth of images(8 or 16).
"""
self.infer_process_fn = infer_process_fn
self.class_mapping = class_mapping
if trt_engine_path is not None:
# use TensorRT for inference
# Import TRTInferencer only if it's a TRT Engine.
# Note: import TRTInferencer after fork() or in MPI might fail.
from nvidia_tao_tf1.cv.common.inferencer.trt_inferencer import TRTInferencer
self.trt_inf = TRTInferencer(trt_engine_path, batch_size=batch_size)
self.batch_size = self.trt_inf.max_batch_size
self.model_input_height = self.trt_inf._input_shape[1]
self.model_input_width = self.trt_inf._input_shape[2]
img_channel = self.trt_inf._input_shape[0]
self.pred_fn = self.trt_inf.infer_batch
elif (keras_model is not None) and (batch_size is not None):
# use keras model for inference
self.keras_model = keras_model
self.batch_size = batch_size
img_channel = keras_model.layers[0].output_shape[-3]
self.model_input_width = keras_model.layers[0].output_shape[-1]
self.model_input_height = keras_model.layers[0].output_shape[-2]
self.pred_fn = self.keras_model.predict
else:
raise ValueError("Need one of (keras_model, batch_size) and trt_engine_path.")
if image_depth == 8:
self.model_img_mode = 'RGB' if img_channel == 3 else 'L'
elif image_depth == 16:
# PIL int32 mode for 16-bit images
self.model_img_mode = "I"
else:
raise ValueError(
f"Unsupported image depth: {image_depth}, should be 8 or 16"
)
self.threshold = threshold
assert self.threshold > 0, "Confidence threshold must be bigger than 0.0"
assert self.threshold < 1, "Confidence threshold must be smaller than 1.0"
if image_depth == 8:
self.supported_img_format = ['.jpg', '.jpeg', '.JPG', '.JPEG', '.png', '.PNG']
else:
# Only PNG can support 16-bit depth
self.supported_img_format = ['.png', '.PNG']
self.keep_aspect_ratio = keep_aspect_ratio
self.img_mean = img_mean
def _load_img(self, img_path):
"""load an image and returns the original image and a numpy array for model to consume.
Args:
img_path (str): path to an image
Returns:
img (PIL.Image): PIL image of original image.
ratio (float): resize ratio of original image over processed image
inference_input (array): numpy array for processed image
"""
img = Image.open(img_path)
orig_w, orig_h = img.size
ratio = min(self.model_input_width/float(orig_w), self.model_input_height/float(orig_h))
# do not change aspect ratio
new_w = int(round(orig_w*ratio))
new_h = int(round(orig_h*ratio))
if self.keep_aspect_ratio:
im = img.resize((new_w, new_h), Image.ANTIALIAS)
else:
im = img.resize((self.model_input_width, self.model_input_height), Image.ANTIALIAS)
if im.mode in ('RGBA', 'LA') or \
(im.mode == 'P' and 'transparency' in im.info) and \
self.model_img_mode == 'L':
# Need to convert to RGBA if LA format due to a bug in PIL
im = im.convert('RGBA')
inf_img = Image.new("RGBA", (self.model_input_width, self.model_input_height))
inf_img.paste(im, (0, 0))
inf_img = inf_img.convert(self.model_img_mode)
else:
inf_img = Image.new(
self.model_img_mode,
(self.model_input_width, self.model_input_height)
)
inf_img.paste(im, (0, 0))
inf_img = np.array(inf_img).astype(np.float32)
# Single channel image, either 8-bit or 16-bit
if self.model_img_mode in ['L', 'I']:
inf_img = np.expand_dims(inf_img, axis=2)
inference_input = inf_img.transpose(2, 0, 1) - self.img_mean[0]
else:
inference_input = preprocess_input(inf_img.transpose(2, 0, 1),
img_mean=self.img_mean)
return img, float(orig_w)/new_w, inference_input
def _get_bbox_and_kitti_label_single_img(
self, img, img_ratio, y_decoded,
is_draw_img, is_kitti_export
):
"""helper function to draw bbox on original img and get kitti label on single image.
Note: img will be modified in-place.
"""
kitti_txt = ""
draw = ImageDraw.Draw(img)
color_list = ['Black', 'Red', 'Blue', 'Gold', 'Purple']
for i in y_decoded:
if float(i[1]) < self.threshold:
continue
if self.keep_aspect_ratio:
i[2:6] *= img_ratio
else:
orig_w, orig_h = img.size
ratio_w = float(orig_w) / self.model_input_width
ratio_h = float(orig_h) / self.model_input_height
i[2] *= ratio_w
i[3] *= ratio_h
i[4] *= ratio_w
i[5] *= ratio_h
if is_kitti_export:
kitti_txt += self.class_mapping[int(i[0])] + ' 0 0 0 ' + \
' '.join([str(x) for x in i[2:6]])+' 0 0 0 0 0 0 0 ' + str(i[1])+'\n'
if is_draw_img:
draw.rectangle(
((i[2], i[3]), (i[4], i[5])),
outline=color_list[int(i[0]) % len(color_list)]
)
# txt pad
draw.rectangle(((i[2], i[3]), (i[2] + 100, i[3]+10)),
fill=color_list[int(i[0]) % len(color_list)])
draw.text((i[2], i[3]), "{0}: {1:.2f}".format(self.class_mapping[int(i[0])], i[1]))
return img, kitti_txt
def _predict_batch(self, inf_inputs):
'''function to predict a batch.'''
y_pred = self.pred_fn(np.array(inf_inputs))
y_pred_decoded = self.infer_process_fn(self, y_pred)
return y_pred_decoded
def _inference_single_img(self, img_in_path, img_out_path, label_out_path):
"""inference for a single image.
Args:
img_in_path: the input path for an image
img_out_path: the output path for the image
label_out_path: the output path for the label
"""
if os.path.splitext(img_in_path)[1] not in self.supported_img_format:
raise NotImplementedError(
"only "+' '.join(self.supported_img_format)+' are supported for input.')
img, ratio, inf_input = self._load_img(img_in_path)
y_pred_decoded = self._predict_batch([inf_input])
img, kitti_txt = self._get_bbox_and_kitti_label_single_img(
img, ratio, y_pred_decoded[0],
img_out_path, label_out_path
)
if img_out_path:
if os.path.splitext(img_out_path)[1] not in self.supported_img_format:
raise NotImplementedError(
"only "+' '.join(self.supported_img_format)+' are supported for image output.')
try:
img.save(img_out_path)
except Exception:
img.convert("RGB").save(img_out_path)
if label_out_path:
if os.path.splitext(label_out_path)[1].lower() != '.txt':
raise NotImplementedError("only .txt is supported for label output.")
open(label_out_path, 'w').write(kitti_txt)
def _inference_folder(self, img_in_path, img_out_path, label_out_path):
"""inference in a folder.
Args:
img_in_path: the input folder path for an image
img_out_path: the output folder path for the image
label_out_path: the output path for the label
"""
# Create output directories
if img_out_path and not os.path.exists(img_out_path):
os.mkdir(img_out_path)
if label_out_path and not os.path.exists(label_out_path):
os.mkdir(label_out_path)
image_path_basename = []
for img_path in os.listdir(img_in_path):
base_name, ext = os.path.splitext(img_path)
if ext in self.supported_img_format:
image_path_basename.append((os.path.join(img_in_path, img_path), base_name, ext))
n_batches = (len(image_path_basename) + self.batch_size - 1) // self.batch_size
for batch_idx in tqdm(range(n_batches)):
imgs = []
ratios = []
inf_inputs = []
base_names = []
exts = []
for img_path, base_name, ext in image_path_basename[
batch_idx*self.batch_size:(batch_idx+1)*self.batch_size
]:
base_names.append(base_name)
img, ratio, inf_input = self._load_img(img_path)
imgs.append(img)
ratios.append(ratio)
inf_inputs.append(inf_input)
exts.append(ext)
y_pred_decoded = self._predict_batch(inf_inputs)
for idx, base_name in enumerate(base_names):
img, kitti_txt = self._get_bbox_and_kitti_label_single_img(
imgs[idx], ratios[idx], y_pred_decoded[idx],
img_out_path, label_out_path)
if img_out_path:
img.save(os.path.join(img_out_path, base_name+exts[idx]))
if label_out_path:
open(os.path.join(label_out_path, base_name+'.txt'), 'w').write(kitti_txt)
def infer(self, img_in_path, img_out_path, label_out_path):
"""Wrapper function."""
if not os.path.exists(img_in_path):
raise ValueError("Input path does not exist")
if not (img_out_path or label_out_path):
raise ValueError("At least one of image or label output path should be set")
if os.path.isdir(img_in_path):
self._inference_folder(img_in_path, img_out_path, label_out_path)
else:
self._inference_single_img(img_in_path, img_out_path, label_out_path)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/inferencer/inferencer.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/inferencer/__init__.py |
|
# Copyright 2019-2020 NVIDIA Corporation. All rights reserved.
"""Helper functions for loading engine."""
import numpy as np
import pycuda.autoinit # noqa pylint: disable=unused-import
import pycuda.driver as cuda
import tensorrt as trt
class HostDeviceMem(object):
"""Simple helper data class that's a little nice to use than a 2-tuple."""
def __init__(self, host_mem, device_mem):
"""Init function."""
self.host = host_mem
self.device = device_mem
def __str__(self):
"""___str___."""
return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device)
def __repr__(self):
"""___repr___."""
return self.__str__()
def do_inference(context, bindings, inputs,
outputs, stream, batch_size=1,
execute_v2=False):
"""Generalization for multiple inputs/outputs.
inputs and outputs are expected to be lists of HostDeviceMem objects.
"""
# Transfer input data to the GPU.
for inp in inputs:
cuda.memcpy_htod_async(inp.device, inp.host, stream)
# Run inference.
if execute_v2:
context.execute_async_v2(bindings=bindings, stream_handle=stream.handle)
else:
context.execute_async(batch_size=batch_size, bindings=bindings, stream_handle=stream.handle)
# Transfer predictions back from the GPU.
for out in outputs:
cuda.memcpy_dtoh_async(out.host, out.device, stream)
# Synchronize the stream
stream.synchronize()
# Return only the host outputs.
return [out.host for out in outputs]
def allocate_buffers(engine, context=None):
"""Allocates host and device buffer for TRT engine inference.
This function is similair to the one in common.py, but
converts network outputs (which are np.float32) appropriately
before writing them to Python buffer. This is needed, since
TensorRT plugins doesn't support output type description, and
in our particular case, we use NMS plugin as network output.
Args:
engine (trt.ICudaEngine): TensorRT engine
context (trt.IExecutionContext): Context for dynamic shape engine
Returns:
inputs [HostDeviceMem]: engine input memory
outputs [HostDeviceMem]: engine output memory
bindings [int]: buffer to device bindings
stream (cuda.Stream): cuda stream for engine inference synchronization
"""
inputs = []
outputs = []
bindings = []
stream = cuda.Stream()
# Current NMS implementation in TRT only supports DataType.FLOAT but
# it may change in the future, which could brake this sample here
# when using lower precision [e.g. NMS output would not be np.float32
# anymore, even though this is assumed in binding_to_type]
binding_to_type = {"Input": np.float32, "NMS": np.float32, "NMS_1": np.int32,
"BatchedNMS": np.int32, "BatchedNMS_1": np.float32,
"BatchedNMS_2": np.float32, "BatchedNMS_3": np.float32,
"generate_detections": np.float32,
"mask_head/mask_fcn_logits/BiasAdd": np.float32,
"softmax_1": np.float32,
"input_1": np.float32}
for binding in engine:
if context:
binding_id = engine.get_binding_index(str(binding))
size = trt.volume(context.get_binding_shape(binding_id))
else:
size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size
# avoid error when bind to a number (YOLO BatchedNMS)
size = engine.max_batch_size if size == 0 else size
if str(binding) in binding_to_type:
dtype = binding_to_type[str(binding)]
else:
dtype = trt.nptype(engine.get_binding_dtype(binding))
# Allocate host and device buffers
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
bindings.append(int(device_mem))
# Append to the appropriate list.
if engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
return inputs, outputs, bindings, stream
def load_engine(trt_runtime, engine_path):
"""Helper funtion to load an exported engine."""
with open(engine_path, 'rb') as f:
engine_data = f.read()
engine = trt_runtime.deserialize_cuda_engine(engine_data)
return engine
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/inferencer/engine.py |
# Copyright 2019-2020 NVIDIA Corporation. All rights reserved.
"""Utility class for performing TensorRT image inference."""
import numpy as np
import tensorrt as trt
from nvidia_tao_tf1.cv.common.inferencer.engine import allocate_buffers, do_inference, load_engine
# TensorRT logger singleton
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
class TRTInferencer(object):
"""Manages TensorRT objects for model inference."""
def __init__(self, trt_engine_path, input_shape=None, batch_size=None):
"""Initializes TensorRT objects needed for model inference.
Args:
trt_engine_path (str): path where TensorRT engine should be stored
input_shape (tuple): (batch, channel, height, width) for dynamic shape engine
batch_size (int): batch size for dynamic shape engine
"""
# We first load all custom plugins shipped with TensorRT,
# some of them will be needed during inference
trt.init_libnvinfer_plugins(TRT_LOGGER, '')
# Initialize runtime needed for loading TensorRT engine from file
self.trt_runtime = trt.Runtime(TRT_LOGGER)
self.trt_engine = load_engine(self.trt_runtime, trt_engine_path)
self.max_batch_size = self.trt_engine.max_batch_size
self.execute_v2 = False
# Execution context is needed for inference
self.context = None
# Allocate memory for multiple usage [e.g. multiple batch inference]
self._input_shape = []
for binding in range(self.trt_engine.num_bindings):
if self.trt_engine.binding_is_input(binding):
self._input_shape = self.trt_engine.get_binding_shape(binding)[-3:]
assert len(self._input_shape) == 3, "Engine doesn't have valid input dimensions"
# set binding_shape for dynamic input
if (input_shape is not None) or (batch_size is not None):
self.context = self.trt_engine.create_execution_context()
if input_shape is not None:
self.context.set_binding_shape(0, input_shape)
self.max_batch_size = input_shape[0]
else:
self.context.set_binding_shape(0, [batch_size] + list(self._input_shape))
self.max_batch_size = batch_size
self.execute_v2 = True
# This allocates memory for network inputs/outputs on both CPU and GPU
self.inputs, self.outputs, self.bindings, self.stream = allocate_buffers(self.trt_engine,
self.context)
if self.context is None:
self.context = self.trt_engine.create_execution_context()
input_volume = trt.volume(self._input_shape)
self.numpy_array = np.zeros((self.max_batch_size, input_volume))
def clear_buffers(self):
"""Simple function to free input, output buffers allocated earlier.
Args:
No explicit arguments. Inputs and outputs are member variables.
Returns:
No explicit returns.
Raises:
ValueError if buffers not found.
"""
# Loop through inputs and free inputs.
for inp in self.inputs:
inp.device.free()
# Loop through outputs and free them.
for out in self.outputs:
out.device.free()
def clear_trt_session(self):
"""Simple function to free destroy tensorrt handlers.
Args:
No explicit arguments. Destroys context, runtime and engine.
Returns:
No explicit returns.
Raises:
ValueError if buffers not found.
"""
if self.trt_runtime:
del self.trt_runtime
if self.context:
del self.context
if self.trt_engine:
del self.trt_engine
if self.stream:
del self.stream
def infer_batch(self, imgs):
"""Infers model on batch of same sized images resized to fit the model.
Args:
image_paths (str): paths to images, that will be packed into batch
and fed into model
"""
# Verify if the supplied batch size is not too big
max_batch_size = self.max_batch_size
actual_batch_size = len(imgs)
if actual_batch_size > max_batch_size:
raise ValueError("image_paths list bigger ({}) than \
engine max batch size ({})".format(actual_batch_size, max_batch_size))
self.numpy_array[:actual_batch_size] = imgs.reshape(actual_batch_size, -1)
# ...copy them into appropriate place into memory...
# (self.inputs was returned earlier by allocate_buffers())
np.copyto(self.inputs[0].host, self.numpy_array.ravel())
# ...fetch model outputs...
results = do_inference(
self.context, bindings=self.bindings, inputs=self.inputs,
outputs=self.outputs, stream=self.stream,
batch_size=max_batch_size,
execute_v2=self.execute_v2)
# ...and return results up to the actual batch size.
return [i.reshape(max_batch_size, -1)[:actual_batch_size] for i in results]
def __del__(self):
"""Clear things up on object deletion."""
# Clear session and buffer
self.clear_trt_session()
self.clear_buffers()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/inferencer/trt_inferencer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities using the NVML library for GPU devices."""
import json
import pynvml
BRAND_NAMES = {
pynvml.NVML_BRAND_UNKNOWN: "Unknown",
pynvml.NVML_BRAND_QUADRO: "Quadro",
pynvml.NVML_BRAND_TESLA: "Tesla",
pynvml.NVML_BRAND_NVS: "NVS",
pynvml.NVML_BRAND_GRID: "Grid",
pynvml.NVML_BRAND_TITAN: "Titan",
pynvml.NVML_BRAND_GEFORCE: "GeForce",
pynvml.NVML_BRAND_NVIDIA_VAPPS: "NVIDIA Virtual Applications",
pynvml.NVML_BRAND_NVIDIA_VPC: "NVIDIA Virtual PC",
pynvml.NVML_BRAND_NVIDIA_VCS: "NVIDIA Virtual Compute Server",
pynvml.NVML_BRAND_NVIDIA_VWS: "NVIDIA RTX Virtual Workstation",
pynvml.NVML_BRAND_NVIDIA_VGAMING: "NVIDIA Cloud Gaming",
pynvml.NVML_BRAND_QUADRO_RTX: "Quadro RTX",
pynvml.NVML_BRAND_NVIDIA_RTX: "NVIDIA RTX",
pynvml.NVML_BRAND_NVIDIA: "NVIDIA",
pynvml.NVML_BRAND_GEFORCE_RTX: "GeForce RTX",
pynvml.NVML_BRAND_TITAN_RTX: "TITAN RTX",
}
class GPUDevice:
"""Data structure to represent a GPU device."""
def __init__(self, pci_bus_id,
device_name,
device_brand,
memory,
cuda_compute_capability):
"""Data structure representing a GPU device.
Args:
pci_bus_id (hex): PCI bus ID of the GPU.
device_name (str): Name of the device GPU.
device_branch (int): Brand of the GPU.
"""
self.name = device_name
self.pci_bus_id = pci_bus_id
if device_brand in BRAND_NAMES.keys():
self.brand = BRAND_NAMES[device_brand]
else:
self.brand = None
self.defined = True
self.memory = memory
self.cuda_compute_capability = cuda_compute_capability
def get_config(self):
"""Get json config of the device.
Returns
device_dict (dict): Dictionary containing data about the device.
"""
assert self.defined, "Device wasn't defined."
config_dict = {}
config_dict["name"] = self.name.decode().replace(" ", "-")
config_dict["pci_bus_id"] = self.pci_bus_id.decode("utf-8")
config_dict["brand"] = self.brand
config_dict["memory"] = self.memory
config_dict["cuda_compute_capability"] = self.cuda_compute_capability
return config_dict
def __str__(self):
"""Generate a printable representation of the device."""
config = self.get_config()
data_string = json.dumps(config, indent=2)
return data_string
def pynvml_context(fn):
"""Simple decorator to setup python nvml context.
Args:
f: Function pointer.
Returns:
output of f.
"""
def _fn_wrapper(*args, **kwargs):
"""Wrapper setting up nvml context."""
try:
pynvml.nvmlInit()
return fn(*args, **kwargs)
finally:
pynvml.nvmlShutdown()
return _fn_wrapper
@pynvml_context
def get_number_gpus_available():
"""Get the number of GPU's attached to the machine.
Returns:
num_gpus (int): Number of GPUs in the machine.
"""
num_gpus = pynvml.nvmlDeviceGetCount()
return num_gpus
@pynvml_context
def get_device_details():
"""Get details about each device.
Returns:
device_list (list): List of GPUDevice objects.
"""
num_gpus = pynvml.nvmlDeviceGetCount()
device_list = []
assert num_gpus > 0, "Atleast 1 GPU is required for TAO Toolkit to run."
for idx in range(num_gpus):
handle = pynvml.nvmlDeviceGetHandleByIndex(idx)
pci_info = pynvml.nvmlDeviceGetPciInfo(handle)
device_name = pynvml.nvmlDeviceGetName(handle)
brand_name = pynvml.nvmlDeviceGetBrand(handle)
memory = pynvml.nvmlDeviceGetMemoryInfo(handle)
cuda_compute_capability = pynvml.nvmlDeviceGetCudaComputeCapability(handle)
device_list.append(
GPUDevice(
pci_info.busId,
device_name,
brand_name,
memory.total,
cuda_compute_capability
)
)
return device_list
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/telemetry/nvml_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO utils for gpu devices."""
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/telemetry/__init__.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""Utilties to send data to the TAO Toolkit Telemetry Remote Service."""
import json
import logging
import os
import shutil
import subprocess
import sys
import tarfile
import tempfile
import urllib
import requests
import urllib3
from nvidia_tao_tf1.cv.common.telemetry.nvml_utils import get_device_details
logger = logging.getLogger(__name__)
TELEMETRY_TIMEOUT = int(os.getenv("TELEMETRY_TIMEOUT", "30"))
def get_url_from_variable(variable, default=None):
"""Get the Telemetry Server URL."""
url = os.getenv(variable, default)
return url
def url_exists(url):
"""Check if a URL exists.
Args:
url (str): String to be verified as a URL.
Returns:
valid (bool): True/Falso
"""
url_request = urllib.request.Request(url)
url_request.get_method = lambda: 'HEAD'
try:
urllib.request.urlopen(url_request)
return True
except urllib.request.URLError:
return False
def get_certificates():
"""Download the cacert.pem file and return the path.
Returns:
path (str): UNIX path to the certificates.
"""
certificates_url = get_url_from_variable("TAO_CERTIFICATES_URL")
if not url_exists(certificates_url):
raise urllib.request.URLError("Url for the certificates not found.")
tmp_dir = tempfile.mkdtemp()
download_command = "wget {} -P {} --quiet".format(
certificates_url,
tmp_dir
)
try:
subprocess.check_call(
download_command, shell=True, stdout=sys.stdout
)
except subprocess.CalledProcessError:
raise urllib.request.URLError("Download certificates.tar.gz failed.")
tarfile_path = os.path.join(tmp_dir, "certificates.tar.gz")
assert tarfile.is_tarfile(tarfile_path), (
"The downloaded file isn't a tar file."
)
with tarfile.open(name=tarfile_path, mode="r:gz") as tar_file:
filenames = tar_file.getnames()
for memfile in filenames:
member = tar_file.getmember(memfile)
tar_file.extract(member, tmp_dir)
file_list = [item for item in os.listdir(tmp_dir) if item.endswith(".pem")]
assert file_list, (
f"Didn't get pem files. Directory contents {file_list}"
)
return tmp_dir
def send_telemetry_data(network, action, gpu_data, num_gpus=1, time_lapsed=None, pass_status=False):
"""Wrapper to send TAO telemetry data.
Args:
network (str): Name of the network being run.
action (str): Subtask of the network called.
gpu_data (dict): Dictionary containing data about the GPU's in the machine.
num_gpus (int): Number of GPUs used in the job.
time_lapsed (int): Time lapsed.
pass_status (bool): Job passed or failed.
Returns:
No explicit returns.
"""
urllib3.disable_warnings(urllib3.exceptions.SubjectAltNameWarning)
if os.getenv('TELEMETRY_OPT_OUT', "no").lower() in ["no", "false", "0"]:
url = get_url_from_variable("TAO_TELEMETRY_SERVER")
data = {
"version": os.getenv("TAO_TOOLKIT_VERSION", "4.0.0"),
"action": action,
"network": network,
"gpu": [device["name"] for device in gpu_data[:num_gpus]],
"success": pass_status
}
if time_lapsed is not None:
data["time_lapsed"] = time_lapsed
certificate_dir = get_certificates()
cert = ('client-cert.pem', 'client-key.pem')
requests.post(
url,
json=data,
cert=tuple([os.path.join(certificate_dir, item) for item in cert]),
timeout=TELEMETRY_TIMEOUT
)
logger.debug("Telemetry data posted: \n{}".format(
json.dumps(data, indent=4)
))
shutil.rmtree(certificate_dir)
if __name__ == "__main__":
try:
print("Testing telemetry data ping.")
gpu_data = []
for device in get_device_details():
gpu_data.append(device.get_config())
print(device)
send_telemetry_data(
"detectnet_v2",
"train",
gpu_data,
1
)
except Exception as e:
logger.warning(
"Telemetry data failed with error:\n{}".format(e)
)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/telemetry/telemetry.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for TensorRT related operations."""
# TODO: remove EngineBuilder related code
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import sys
import traceback
try:
import tensorrt as trt # noqa pylint: disable=W0611 pylint: disable=W0611
# Get TensorRT version number.
[NV_TENSORRT_MAJOR, NV_TENSORRT_MINOR, NV_TENSORRT_PATCH, _] = [
int(item) for item
in trt.__version__.split(".")
]
trt_available = True
except Exception as e:
logger = logging.getLogger(__name__)
logger.warning(
"Failed to import TensorRT package, exporting TLT to a TensorRT engine "
"will not be available."
)
trt_available = False
# Default TensorRT parameters.
DEFAULT_MAX_WORKSPACE_SIZE = 2 * (1 << 30)
DEFAULT_MAX_BATCH_SIZE = 1
# Define logger.
logger = logging.getLogger(__name__)
def _create_tensorrt_logger(verbose=False):
"""Create a TensorRT logger.
Args:
verbose(bool): Flag to set logger as verbose or not.
Return:
tensorrt_logger(trt.infer.ConsoleLogger): TensorRT console logger object.
"""
if str(os.getenv('SUPPRES_VERBOSE_LOGGING', '0')) == '1':
# Do not print any warnings in TLT docker
trt_verbosity = trt.Logger.Severity.ERROR
elif verbose:
trt_verbosity = trt.Logger.INFO
else:
trt_verbosity = trt.Logger.WARNING
tensorrt_logger = trt.Logger(trt_verbosity)
return tensorrt_logger
def _set_excluded_layer_precision(network, fp32_layer_names, fp16_layer_names):
"""When generating an INT8 model, it sets excluded layers' precision as fp32 or fp16.
In detail, this function is only used when generating INT8 TensorRT models. It accepts
two lists of layer names: (1). for the layers in fp32_layer_names, their precision will
be set as fp32; (2). for those in fp16_layer_names, their precision will be set as fp16.
Args:
network: TensorRT network object.
fp32_layer_names (list): List of layer names. These layers use fp32.
fp16_layer_names (list): List of layer names. These layers use fp16.
"""
is_mixed_precision = False
use_fp16_mode = False
for i, layer in enumerate(network):
if any(s in layer.name for s in fp32_layer_names):
is_mixed_precision = True
layer.precision = trt.float32
layer.set_output_type(0, trt.float32)
logger.info("fp32 index: %d; name: %s", i, layer.name)
elif any(s in layer.name for s in fp16_layer_names):
is_mixed_precision = True
use_fp16_mode = True
layer.precision = trt.float16
layer.set_output_type(0, trt.float16)
logger.info("fp16 index: %d; name: %s", i, layer.name)
else:
layer.precision = trt.int8
layer.set_output_type(0, trt.int8)
return is_mixed_precision, use_fp16_mode
class EngineBuilder(object):
"""Create a TensorRT engine.
Args:
filename (list): List of filenames to load model from.
max_batch_size (int): Maximum batch size.
vmax_workspace_size (int): Maximum workspace size.
dtype (str): data type ('fp32', 'fp16' or 'int8').
calibrator (:any:`Calibrator`): Calibrator to use for INT8 optimization.
fp32_layer_names (list): List of layer names. These layers use fp32.
fp16_layer_names (list): List of layer names. These layers use fp16.
verbose (bool): Whether to turn on verbose mode.
tensor_scale_dict (dict): Dictionary mapping names to tensor scaling factors.
strict_type(bool): Whether or not to apply strict_type_constraints for INT8 mode.
"""
def __init__(
self,
filenames,
max_batch_size=DEFAULT_MAX_BATCH_SIZE,
max_workspace_size=DEFAULT_MAX_WORKSPACE_SIZE,
dtype="fp32",
calibrator=None,
fp32_layer_names=None,
fp16_layer_names=None,
verbose=False,
tensor_scale_dict=None,
strict_type=False,
):
"""Initialization routine."""
if dtype == "int8":
self._dtype = trt.DataType.INT8
elif dtype == "fp16":
self._dtype = trt.DataType.HALF
elif dtype == "fp32":
self._dtype = trt.DataType.FLOAT
else:
raise ValueError("Unsupported data type: %s" % dtype)
self._strict_type = strict_type
if fp32_layer_names is None:
fp32_layer_names = []
elif dtype != "int8":
raise ValueError(
"FP32 layer precision could be set only when dtype is INT8"
)
if fp16_layer_names is None:
fp16_layer_names = []
elif dtype != "int8":
raise ValueError(
"FP16 layer precision could be set only when dtype is INT8"
)
self._fp32_layer_names = fp32_layer_names
self._fp16_layer_names = fp16_layer_names
self._tensorrt_logger = _create_tensorrt_logger(verbose)
builder = trt.Builder(self._tensorrt_logger)
config = builder.create_builder_config()
trt.init_libnvinfer_plugins(self._tensorrt_logger, "")
if self._dtype == trt.DataType.HALF and not builder.platform_has_fast_fp16:
logger.error("Specified FP16 but not supported on platform.")
raise AttributeError(
"Specified FP16 but not supported on platform.")
return
if self._dtype == trt.DataType.INT8 and not builder.platform_has_fast_int8:
logger.error("Specified INT8 but not supported on platform.")
raise AttributeError(
"Specified INT8 but not supported on platform.")
return
if self._dtype == trt.DataType.INT8:
if tensor_scale_dict is None and calibrator is None:
logger.error("Specified INT8 but neither calibrator "
"nor tensor_scale_dict is provided.")
raise AttributeError("Specified INT8 but no calibrator "
"or tensor_scale_dict is provided.")
network = builder.create_network()
self._load_from_files(filenames, network)
builder.max_batch_size = max_batch_size
config.max_workspace_size = max_workspace_size
if self._dtype == trt.DataType.HALF:
config.set_flag(trt.BuilderFlag.FP16)
if self._dtype == trt.DataType.INT8:
config.set_flag(trt.BuilderFlag.INT8)
if tensor_scale_dict is None:
config.int8_calibrator = calibrator
# When use mixed precision, for TensorRT builder:
# strict_type_constraints needs to be True;
# fp16_mode needs to be True if any layer uses fp16 precision.
set_strict_types, set_fp16_mode = \
_set_excluded_layer_precision(
network=network,
fp32_layer_names=self._fp32_layer_names,
fp16_layer_names=self._fp16_layer_names,
)
if set_strict_types:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if set_fp16_mode:
config.set_flag(trt.BuilderFlag.FP16)
else:
# Discrete Volta GPUs don't have int8 tensor cores. So TensorRT might
# not pick int8 implementation over fp16 or even fp32 for V100
# GPUs found on data centers (e.g., AVDC). This will be a discrepancy
# compared to Turing GPUs including d-GPU of DDPX and also Xavier i-GPU
# both of which have int8 accelerators. We set the builder to strict
# mode to avoid picking higher precision implementation even if they are
# faster.
if self._strict_type:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
else:
config.set_flag(trt.BuilderFlag.FP16)
self._set_tensor_dynamic_ranges(
network=network, tensor_scale_dict=tensor_scale_dict
)
engine = builder.build_engine(network, config)
try:
assert engine
except AssertionError:
logger.error("Failed to create engine")
_, _, tb = sys.exc_info()
traceback.print_tb(tb) # Fixed format
tb_info = traceback.extract_tb(tb)
_, line, _, text = tb_info[-1]
raise AssertionError(
"Parsing failed on line {} in statement {}".format(
line, text)
)
self._engine = engine
def _load_from_files(self, filenames, network):
"""Load an engine from files."""
raise NotImplementedError()
@staticmethod
def _set_tensor_dynamic_ranges(network, tensor_scale_dict):
"""Set the scaling factors obtained from quantization-aware training.
Args:
network: TensorRT network object.
tensor_scale_dict (dict): Dictionary mapping names to tensor scaling factors.
"""
tensors_found = []
for idx in range(network.num_inputs):
input_tensor = network.get_input(idx)
if input_tensor.name in tensor_scale_dict:
tensors_found.append(input_tensor.name)
cal_scale = tensor_scale_dict[input_tensor.name]
input_tensor.dynamic_range = (-cal_scale, cal_scale)
for layer in network:
found_all_outputs = True
for idx in range(layer.num_outputs):
output_tensor = layer.get_output(idx)
if output_tensor.name in tensor_scale_dict:
tensors_found.append(output_tensor.name)
cal_scale = tensor_scale_dict[output_tensor.name]
output_tensor.dynamic_range = (-cal_scale, cal_scale)
else:
found_all_outputs = False
if found_all_outputs:
layer.precision = trt.int8
tensors_in_dict = tensor_scale_dict.keys()
assert set(tensors_in_dict) == set(tensors_found), (
"Some of the tensor names specified in tensor "
"scale dictionary was not found in the network."
)
def get_engine(self):
"""Return the engine that was built by the instance."""
return self._engine
class UFFEngineBuilder(EngineBuilder):
"""Create a TensorRT engine from a UFF file.
Args:
filename (str): UFF file to create engine from.
input_node_name (str): Name of the input node.
input_dims (list): Dimensions of the input tensor.
output_node_names (list): Names of the output nodes.
"""
def __init__(
self,
filename,
input_node_name,
input_dims,
output_node_names,
*args,
**kwargs
):
"""Init routine."""
self._input_node_name = input_node_name
if not isinstance(output_node_names, list):
output_node_names = [output_node_names]
self._output_node_names = output_node_names
self._input_dims = input_dims
super(UFFEngineBuilder, self).__init__([filename], *args, **kwargs)
def _load_from_files(self, filenames, network):
filename = filenames[0]
parser = trt.UffParser()
for key, value in self._input_dims.items():
parser.register_input(key, value, trt.UffInputOrder(0))
for name in self._output_node_names:
parser.register_output(name)
try:
assert parser.parse(filename, network, trt.DataType.FLOAT)
except AssertionError:
logger.error("Failed to parse UFF File")
_, _, tb = sys.exc_info()
traceback.print_tb(tb) # Fixed format
tb_info = traceback.extract_tb(tb)
_, line, _, text = tb_info[-1]
raise AssertionError(
"UFF parsing failed on line {} in statement {}".format(line, text)
)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/export/trt_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class to export trained .tlt keras models to etlt file."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import logging
import os
import tempfile
import keras
from keras.utils import CustomObjectScope
import tensorflow as tf
# Import quantization layer processing.
try:
import tensorrt # noqa pylint: disable=W0611 pylint: disable=W0611
from nvidia_tao_tf1.core.export._tensorrt import ONNXEngineBuilder, UFFEngineBuilder
except Exception as e:
logger = logging.getLogger(__name__)
logger.warning(
"Failed to import TensorRT package, exporting TLT to a TensorRT engine "
"will not be available."
)
from nvidia_tao_tf1.core.export._onnx import keras_to_onnx
from nvidia_tao_tf1.core.export._quantized import (
check_for_quantized_layers,
process_quantized_layers,
)
from nvidia_tao_tf1.core.export._uff import keras_to_pb, keras_to_uff
from nvidia_tao_tf1.core.export.app import get_model_input_dtype
from nvidia_tao_tf1.cv.common.export.base_exporter import BaseExporter
from nvidia_tao_tf1.cv.common.export.utils import pb_to_onnx
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.common.types.base_ds_config import BaseDSConfig
from nvidia_tao_tf1.cv.common.utils import (
CUSTOM_OBJS,
get_decoded_filename,
get_model_file_size,
get_num_params,
model_io
)
VALID_BACKEND = ["uff", "onnx"]
logger = logging.getLogger(__name__)
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
SUPPORTED_ONNX_ROUTES = [
"keras2onnx",
"tf2onnx"
]
class KerasExporter(BaseExporter):
"""Base class for exporter."""
def __init__(self, model_path=None,
key=None,
data_type="fp32",
strict_type=False,
backend="uff",
data_format="channels_first",
target_opset=None,
onnx_route="keras2onnx",
**kwargs):
"""Initialize the base exporter.
Args:
model_path (str): Path to the model file.
key (str): Key to load the model.
data_type (str): Path to the TensorRT backend data type.
strict_type(bool): Apply TensorRT strict_type_constraints or not for INT8 mode.
backend (str): TensorRT parser to be used.
data_format (str): Model data format. Default is channels_first.
target_opset (int): Target opset to be used for ONNX file.
onnx_route (str): Package to convert the keras model to ONNX.
Supported routes are "keras2onnx" or "tf2onnx".
Returns:
None.
"""
super(KerasExporter, self).__init__(
model_path=model_path,
data_type=data_type,
strict_type=strict_type,
key=key,
backend=backend,
**kwargs
)
self.target_opset = target_opset
self.data_format = data_format
self._onnx_graph_node_name_dict = None
self._onnx_graph_node_op_dict = None
self.image_mean = None
self.experiment_spec = None
self.model_param_count = None
self.onnx_route = onnx_route
assert self.onnx_route in SUPPORTED_ONNX_ROUTES, (
f"ONNX route {self.onnx_route} not supported in {SUPPORTED_ONNX_ROUTES}"
)
logger.info("Setting the onnx export route to {}".format(
self.onnx_route
))
def set_session(self):
"""Set keras backend session."""
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
keras.backend.set_session(tf.Session(config=config))
def set_keras_backend_dtype(self):
"""Set the keras backend data type."""
keras.backend.set_learning_phase(0)
tmp_keras_file_name = get_decoded_filename(self.model_path,
self.key)
model_input_dtype = get_model_input_dtype(tmp_keras_file_name)
keras.backend.set_floatx(model_input_dtype)
def set_input_output_node_names(self):
"""Set input output node names."""
raise NotImplementedError("This function is not implemented in the base class.")
@staticmethod
def extract_tensor_scale(model, backend):
"""Extract tensor scale from QAT trained model and de-quantize the model."""
model, tensor_scale_dict = process_quantized_layers(
model, backend,
calib_cache=None,
calib_json=None)
print(f"Extracting tensor scale: {tensor_scale_dict}")
return model, tensor_scale_dict
def load_model(self):
"""Simple function to get the keras model."""
keras.backend.clear_session()
keras.backend.set_learning_phase(0)
model = model_io(self.model_path, enc_key=self.key)
# Resetting the keras model session
# When a model is loaded or transformed multiple times
# Keras added _x to the tensor names causing the
# tensor naming to mismatch. Therefore, we
# reset the keras session and reload the model
# so that the tensor names don't mismatch when exported
# to onnx.
print("Checking for quantized layers in the exporter.")
quantized_model = check_for_quantized_layers(model)
print(f"Quantized model: {quantized_model}")
if quantized_model:
print("Extracting quantized scales")
model, self.tensor_scale_dict = self.extract_tensor_scale(
model,
self.backend
)
os_handle, tmp_keras_model = tempfile.mkstemp(suffix=".hdf5")
os.close(os_handle)
with CustomObjectScope(CUSTOM_OBJS):
model.save(tmp_keras_model)
keras.backend.clear_session()
# Creating a new keras session.
self.set_session()
keras.backend.set_learning_phase(0)
with CustomObjectScope(CUSTOM_OBJS):
new_model = keras.models.load_model(tmp_keras_model)
os.remove(tmp_keras_model)
return new_model
return model
def set_backend(self, backend):
"""Set keras backend.
Args:
backend (str): Backend to be used.
Currently only UFF is supported.
"""
if backend not in VALID_BACKEND:
raise NotImplementedError('Invalid backend "{}" called'.format(backend))
self.backend = backend
def _get_onnx_node_by_name(self, onnx_graph, node_name, clean_cache=False):
'''Find onnx graph nodes by node_name.
Args:
onnx_graph: Input onnx graph
node_name: The node name to find
clean_cache: Whether to rebuild the graph node cache
Returns:
onnx_node
It's your responsibility to make sure:
1. The node names in the graph are unique and node_name exists in graph
2. If clean_cache is False, the node name cache you build last time should work
'''
if self._onnx_graph_node_name_dict is None or clean_cache:
self._onnx_graph_node_name_dict = {n.name: n for n in onnx_graph.nodes}
return self._onnx_graph_node_name_dict[node_name]
def _get_onnx_node_by_op(self, onnx_graph, op_name, clean_cache=False):
'''Find onnx graph nodes by op_name.
Args:
onnx_graph: Input onnx graph
op_name: The node op to find
clean_cache: Whether to rebuild the graph node cache
Returns:
List of onnx_nodes
It's your responsibility to make sure:
1. If clean_cache is False, the node name cache you build last time should work
'''
if self._onnx_graph_node_op_dict is None or clean_cache:
self._onnx_graph_node_op_dict = dict()
for n in onnx_graph.nodes:
if n.op in self._onnx_graph_node_op_dict:
self._onnx_graph_node_op_dict[n.op].append(n)
else:
self._onnx_graph_node_op_dict[n.op] = [n]
if op_name in self._onnx_graph_node_op_dict:
return self._onnx_graph_node_op_dict[op_name]
return []
def _fix_onnx_paddings(self, graph):
"""Fix the paddings in onnx graph so it aligns with the Keras patch."""
# third_party/keras/tensorflow_backend.py patched the semantics of
# SAME padding, the onnx model has to align with it.
for node in graph.nodes:
if node.op == "Conv":
# in case of VALID padding, there is no 'pads' attribute
# simply skip it
if node.attrs["auto_pad"] == "VALID":
continue
k = node.attrs['kernel_shape']
g = node.attrs['group']
d = node.attrs['dilations']
# always assume kernel shape is square
effective_k = [1 + (k[ki] - 1) * d[ki] for ki in range(len(d))]
# (pad_w // 2 , pad_h // 2) == (pad_left, pad_top)
keras_paddings = tuple((ek - 1) // 2 for ek in effective_k)
# (pad_left, pad_top, pad_right, pad_bottom)
if g == 1:
# if it is not VALID, then it has to be NOTSET,
# to enable explicit paddings below
node.attrs["auto_pad"] = "NOTSET"
# only apply this patch for non-group convolutions
node.attrs['pads'] = keras_paddings * 2
elif node.op in ["AveragePool", "MaxPool"]:
# skip VALID padding case.
if node.attrs["auto_pad"] == "VALID":
continue
k = node.attrs['kernel_shape']
# (pad_w // 2 , pad_h // 2) == (pad_left, pad_top)
keras_paddings = tuple((ek - 1) // 2 for ek in k)
# force it to be NOTSET to enable explicit paddings below
node.attrs["auto_pad"] = "NOTSET"
# (pad_left, pad_top, pad_right, pad_bottom)
node.attrs['pads'] = keras_paddings * 2
def save_exported_file(self, model, output_file_name):
"""Save the exported model file.
This routine converts a keras model to onnx/uff model
based on the backend the exporter was initialized with.
Args:
model (keras.models.Model): Keras model to be saved.
output_file_name (str): Path to the output etlt file.
Returns:
output_file_name (str): Path to the temporary uff file.
"""
logger.debug("Saving exported model file at: {}.".format(output_file_name))
input_tensor_names = ""
# @vpraveen: commented out the preprocessor kwarg from keras_to_uff.
# todo: @vpraveen and @zhimeng, if required modify modulus code to add
# this.
if self.backend == "uff":
input_tensor_names, _, _ = keras_to_uff(
model,
output_file_name,
output_node_names=self.output_node_names,
custom_objects=CUSTOM_OBJS)
elif self.backend == "onnx":
if self.onnx_route == "keras2onnx":
keras_to_onnx(
model,
output_file_name,
custom_objects=CUSTOM_OBJS
)
else:
os_handle, tmp_pb_file = tempfile.mkstemp(
suffix=".pb"
)
os.close(os_handle)
input_tensor_names, out_tensor_names, _ = keras_to_pb(
model,
tmp_pb_file,
self.output_node_names,
custom_objects=CUSTOM_OBJS
)
if self.output_node_names is None:
self.output_node_names = out_tensor_names
logger.info("Model graph serialized to pb file.")
input_tensor_names, out_tensor_names = pb_to_onnx(
tmp_pb_file,
output_file_name,
input_tensor_names,
self.output_node_names,
self.target_opset,
verbose=False
)
# Forcing this to empty so that the etlt
# aligns with onnx etlt format.
input_tensor_names = ""
else:
raise NotImplementedError(
"Incompatible backend requested {}. Please choose between [onnx, uff]".format(
self.backend
)
)
return output_file_name
def generate_ds_config(self, input_dims, num_classes=None):
"""Generate Deepstream config element for the exported model."""
if input_dims[0] == 1:
color_format = "l"
else:
color_format = "bgr" if self.preprocessing_arguments["flip_channel"] else "rgb"
kwargs = {
"data_format": self.data_format,
"backend": self.backend,
# Setting this to 0 by default because there are more
# detection networks.
"network_type": 0
}
if num_classes:
kwargs["num_classes"] = num_classes
if self.backend == "uff":
kwargs.update({
"input_names": self.input_node_names,
"output_names": self.output_node_names
})
ds_config = BaseDSConfig(
self.preprocessing_arguments["scale"],
self.preprocessing_arguments["means"],
input_dims,
color_format,
self.key,
**kwargs
)
return ds_config
def get_class_labels(self):
"""Get list of class labels to serialize to a labels.txt file."""
return []
def clear_gpus(self):
"""Clear GPU memory before TRT engine building."""
tf.reset_default_graph()
def export(self, output_file_name, backend,
calibration_cache="", data_file_name="",
n_batches=1, batch_size=1, verbose=True,
calibration_images_dir="", save_engine=False,
engine_file_name="", max_workspace_size=1 << 30,
max_batch_size=1, min_batch_size=1, opt_batch_size=1,
force_ptq=False, static_batch_size=-1, gen_ds_config=True,
calib_json_file=""):
"""Simple function to export a model.
This function sets the first converts a keras graph to uff and then saves it to an etlt
file. After which, it verifies the parsability of the etlt file by creating a TensorRT
engine of desired backend datatype.
Args:
output_file_name (str): Path to the output etlt file.
backend (str): Backend parser to be used. ("uff", "onnx).
calibration_cache (str): Path to the output calibration cache file.
data_file_name (str): Path to the data tensorfile for int8 calibration.
n_batches (int): Number of batches to calibrate model for int8 calibration.
batch_size (int): Number of images per batch.
verbose (bool): Flag to set verbose logging.
calibration_images_dir (str): Path to a directory of images for custom data
to calibrate the model over.
save_engine (bool): Flag to save the engine after training.
engine_file_name (str): Path to the engine file name.
force_ptq (bool): Flag to force post training quantization using TensorRT
for a QAT trained model. This is required iff the inference platform is
a Jetson with a DLA.
static_batch_size(int): Set a static batch size for exported etlt model.
Returns:
No explicit returns.
"""
# set dynamic_batch flag
dynamic_batch = bool(static_batch_size <= 0)
# save static_batch_size for use in load_model() method
self.static_batch_size = static_batch_size
# Set keras session.
self.set_backend(backend)
self.set_input_output_node_names()
self.status_logger.write(
data=None, message=f"Using input nodes: {self.input_node_names}"
)
self.status_logger.write(
data=None, message=f"Using output nodes: {self.output_node_names}"
)
logger.info("Using input nodes: {}".format(self.input_node_names))
logger.info("Using output nodes: {}".format(self.output_node_names))
# tensor_scale_dict is created in the load_model() method
model = self.load_model()
print("Loaded model")
# Update parameter count to the model metadata for TAO studio.
model_metadata = {
"param_count": get_num_params(model)
}
self.save_exported_file(model, output_file_name)
# Add the size of the exported .etlt file for TAO Studio.
model_metadata["size"] = get_model_file_size(output_file_name)
# Get int8 calibrator
calibrator = None
input_dims = self.get_input_dims(data_file_name=data_file_name, model=model)
max_batch_size = max(batch_size, max_batch_size)
logger.debug("Input dims: {}".format(input_dims))
# Clear the backend keras session.
keras.backend.clear_session()
# keras.backend.clear_session() does not entirely free up the memory.
self.clear_gpus()
# @scha: Store calib_json file for QAT models that will be used for TAO-Deploy
# TODO: Remove TRT engine related materials in the future release
if calib_json_file and self.tensor_scale_dict:
logger.debug("Storing scales generated during QAT into JSON")
calib_json_dir = os.path.dirname(calib_json_file)
if not os.path.exists(calib_json_dir):
os.makedirs(calib_json_dir, exist_ok=True)
calib_json_data = {"tensor_scales": {}}
for tensor in self.tensor_scale_dict:
calib_json_data["tensor_scales"][tensor] = float(self.tensor_scale_dict[tensor])
with open(calib_json_file, "w") as outfile:
json.dump(calib_json_data, outfile, indent=4)
if self.data_type == "int8":
# Discard extracted tensor scales if force_ptq is set.
if self.tensor_scale_dict is None or force_ptq:
# no tensor scale, take traditional INT8 calibration approach
# use calibrator to generate calibration cache
calibrator = self.get_calibrator(calibration_cache=calibration_cache,
data_file_name=data_file_name,
n_batches=n_batches,
batch_size=batch_size,
input_dims=input_dims,
calibration_images_dir=calibration_images_dir,
image_mean=self.image_mean)
logger.info("Calibration takes time especially if number of batches is large.")
self.status_logger.write(
data=None,
message="Calibration takes time especially if number of batches is large."
)
else:
# QAT model, take tensor scale approach
# dump tensor scale to calibration cache directly
self.status_logger.write(
data=None,
message="Extracting scales generated during QAT."
)
logger.info("Extracting scales generated during QAT")
self._calibration_cache_from_dict(
self.tensor_scale_dict,
calibration_cache,
)
if gen_ds_config:
self.set_data_preprocessing_parameters(input_dims, image_mean=self.image_mean)
labels = self.get_class_labels()
num_classes = None
output_root = os.path.dirname(output_file_name)
if labels:
num_classes = len(labels)
with open(os.path.join(output_root, "labels.txt"), "w") as lfile:
for label in labels:
lfile.write("{}\n".format(label))
assert lfile.closed, (
"Label file wasn't closed after saving."
)
# Generate DS Config file.
ds_config = self.generate_ds_config(
input_dims,
num_classes=num_classes
)
if not os.path.exists(output_root):
os.makedirs(output_root)
ds_file = os.path.join(output_root, "nvinfer_config.txt")
with open(ds_file, "w") as dsf:
dsf.write(str(ds_config))
assert dsf.closed, (
"Deepstream config file wasn't closed."
)
# Verify with engine generation / run calibration.
if save_engine:
if self.backend == "uff":
# Assuming single input node graph for uff engine creation.
in_tensor_name = self.input_node_names[0]
if not isinstance(input_dims, dict):
input_dims = {in_tensor_name: input_dims}
engine_builder = UFFEngineBuilder(
output_file_name,
in_tensor_name,
input_dims,
self.output_node_names,
max_batch_size=max_batch_size,
max_workspace_size=max_workspace_size,
dtype=self.data_type,
strict_type=self.strict_type,
verbose=verbose,
calibrator=calibrator,
tensor_scale_dict=None if force_ptq else self.tensor_scale_dict)
elif self.backend == "onnx":
engine_builder = ONNXEngineBuilder(
output_file_name,
max_batch_size=max_batch_size,
min_batch_size=min_batch_size,
max_workspace_size=max_workspace_size,
opt_batch_size=opt_batch_size,
dtype=self.data_type,
strict_type=self.strict_type,
verbose=verbose,
calibrator=calibrator,
tensor_scale_dict=None if force_ptq else self.tensor_scale_dict,
dynamic_batch=dynamic_batch)
else:
raise NotImplementedError("Invalid backend.")
trt_engine = engine_builder.get_engine()
with open(engine_file_name, "wb") as outf:
outf.write(trt_engine.serialize())
if trt_engine:
del trt_engine
# If engine is saved, return the file size of the engine
# instead of the model.
if save_engine:
model_metadata["size"] = get_model_file_size(engine_file_name)
self.status_logger.write(
data=model_metadata,
message="Export complete.",
status_level=status_logging.Status.SUCCESS
)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/export/keras_exporter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base calibrator class for TensorRT INT8 Calibration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
# Simple helper class for calibration.
try:
import tensorrt as trt # noqa pylint: disable=W0611 pylint: disable=W0611
trt_available = True
except Exception as e:
logger = logging.getLogger(__name__)
logger.warning(
"Failed to import TensorRT package, exporting TLT to a TensorRT engine "
"will not be available."
)
trt_available = False
logger = logging.getLogger(__name__)
if trt_available:
class BaseCalibrator(trt.IInt8EntropyCalibrator2):
"""Calibrator class."""
def __init__(self, cache_filename,
n_batches,
batch_size,
*args, **kwargs):
"""Init routine.
This inherits from ``trt.IInt8EntropyCalibrator2`` to implement
the calibration interface that TensorRT needs to calibrate the
INT8 quantization factors.
Args:
cache_filename (str): name of calibration file to read/write to.
n_batches (int): number of batches for calibrate for.
batch_size (int): batch size to use for calibration data.
"""
super(BaseCalibrator, self).__init__(*args, **kwargs)
self._data_source = None
self._cache_filename = cache_filename
self._batch_size = batch_size
self._n_batches = n_batches
self._batch_count = 0
self._data_mem = None
def instantiate_data_source(self, data_filename):
"""Simple function to instantiate the data_source of the dataloader.
Args:
data_filename (str): The path to the data file.
Returns:
No explicit returns.
"""
raise NotImplementedError(
"Base calibrator doesn't implement data source instantiation."
)
def get_data_from_source(self):
"""Simple function to get data from the defined data_source."""
raise NotImplementedError(
"Base calibrator doesn't implement yielding data from data source"
)
def get_batch(self, names):
"""Return one batch.
Args:
names (list): list of memory bindings names.
"""
raise NotImplementedError(
"Base calibrator doesn't implement calibrator get_batch()"
)
def get_batch_size(self):
"""Return batch size."""
return self._batch_size
def read_calibration_cache(self):
"""Read calibration from file."""
logger.debug("read_calibration_cache - no-op")
if os.path.isfile(self._cache_filename):
logger.warning("Calibration file exists at {}."
" Reading this cache.".format(self._cache_filename))
with open(self._cache_filename, "rb") as cal_file:
return cal_file.read()
return None
def write_calibration_cache(self, cache):
"""Write calibration to file.
Args:
cache (memoryview): buffer to read calibration data from.
"""
logger.info("Saving calibration cache (size %d) to %s",
len(cache), self._cache_filename)
with open(self._cache_filename, 'wb') as f:
f.write(cache)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/export/base_calibrator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to export a trained model to an ETLT file for deployment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/export/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class to export trained .tlt keras models to etlt file."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import abstractmethod
import json
import logging
import os
import random
import struct
import numpy as np
from PIL import Image
from six.moves import xrange
from tqdm import tqdm
try:
import tensorrt as trt # noqa pylint: disable=W0611 pylint: disable=W0611
from nvidia_tao_tf1.cv.common.export.tensorfile_calibrator import TensorfileCalibrator
from nvidia_tao_tf1.cv.common.export.trt_utils import (
NV_TENSORRT_MAJOR,
NV_TENSORRT_MINOR,
NV_TENSORRT_PATCH
)
trt_available = True
except Exception as e:
logger = logging.getLogger(__name__)
logger.warning(
"Failed to import TensorRT package, exporting TLT to a TensorRT engine "
"will not be available."
)
trt_available = False
from nvidia_tao_tf1.cv.common.export.tensorfile import TensorFile
from nvidia_tao_tf1.cv.common.logging import logging as status_logging
# Define valid backend available for the exporter.
VALID_BACKEND = ["uff", "onnx"]
logger = logging.getLogger(__name__)
class BaseExporter(object):
"""Base class for exporter."""
def __init__(self, model_path=None,
key=None,
data_type="fp32",
strict_type=False,
backend="uff",
**kwargs):
"""Initialize the base exporter.
Args:
model_path (str): Path to the model file.
key (str): Key to load the model.
data_type (str): Path to the TensorRT backend data type.
strict_type(bool): Apply TensorRT strict_type_constraints or not for INT8 mode.
backend (str): TensorRT parser to be used.
Returns:
None.
"""
self.data_type = data_type
self.strict_type = strict_type
self.model_path = model_path
# if key is str, it will be converted to bytes in nvidia_tao_tf1.encoding
self.key = key
self.set_backend(backend)
self.status_logger = status_logging.get_status_logger()
self.tensor_scale_dict = None
self._trt_version_number = NV_TENSORRT_MAJOR * 1000 + NV_TENSORRT_MINOR * 100 + \
NV_TENSORRT_PATCH
self.image_depth = 8
def set_session(self):
"""Set keras backend session."""
raise NotImplementedError("To be implemented by the class being used.")
def set_keras_backend_dtype(self):
"""Set the keras backend data type."""
raise NotImplementedError(
"To be implemented by the class being used.")
@abstractmethod
def set_input_output_node_names(self):
"""Set input output node names."""
raise NotImplementedError(
"This function is not implemented in the base class.")
def extract_tensor_scale(self, model, backend):
"""Extract tensor scale from QAT trained model and de-quantize the model."""
raise NotImplementedError(
"This function is not implemented in the base class.")
@abstractmethod
def load_model(self, backend="uff"):
"""Simple function to get the keras model."""
raise NotImplementedError(
"This function is not implemented in the base class.")
@abstractmethod
def get_class_labels(self, experiment_spec=None):
"""Save the labels file required for deepstream implementation."""
raise NotImplementedError("Base class doesn't implement get_class_labels function.")
def set_backend(self, backend):
"""Set keras backend.
Args:
backend (str): Backend to be used.
Currently only UFF is supported.
"""
if backend not in VALID_BACKEND:
raise NotImplementedError(
'Invalid backend "{}" called'.format(backend))
self.backend = backend
@abstractmethod
def generate_ds_config(self, input_dims, num_classes=None):
"""Generate the deepstream config for the exported model.
Args:
input_dims (tuple): Input dimensions of the model.
Returns:
ds_config (nvidia_tao_tf1.cv.common.types.base_ds_config.BaseDSConfig): Deepstream
config element.
"""
raise NotImplementedError(
"Generate the DeepStream config for the model."
)
@abstractmethod
def save_exported_file(self, model, output_file_name):
"""Save the exported model file.
This routine converts a keras model to onnx/uff model
based on the backend the exporter was initialized with.
Args:
model (keras.models.Model): Keras model to be saved.
output_file_name (str): Path to the output etlt file.
Returns:
tmp_file_name (str): Path to the temporary uff file.
"""
raise NotImplementedError(
"This function is not implemented in the base class.")
def get_calibrator(self,
calibration_cache,
data_file_name,
n_batches,
batch_size,
input_dims,
calibration_images_dir=None,
image_mean=None):
"""Simple function to get an int8 calibrator.
Args:
calibration_cache (str): Path to store the int8 calibration cache file.
data_file_name (str): Path to the TensorFile. If the tensorfile doesn't exist
at this path, then one is created with either n_batches of random tensors,
images from the file in calibration_images_dir of dimensions
(batch_size,) + (input_dims)
n_batches (int): Number of batches to calibrate the model over.
batch_size (int): Number of input tensors per batch.
input_dims (tuple): Tuple of input tensor dimensions in CHW order.
calibration_images_dir (str): Path to a directory of images to generate the
data_file from.
image_mean (list): image mean per channel.
Returns:
calibrator (nvidia_tao_tf1.cv.common.export.base_calibrator.TensorfileCalibrator):
TRTEntropyCalibrator2 instance to calibrate the TensorRT engine.
"""
if not os.path.exists(data_file_name):
self.generate_tensor_file(data_file_name,
calibration_images_dir,
input_dims,
n_batches=n_batches,
batch_size=batch_size,
image_mean=image_mean)
calibrator = TensorfileCalibrator(data_file_name,
calibration_cache,
n_batches,
batch_size)
return calibrator
def _calibration_cache_from_dict(self, tensor_scale_dict,
calibration_cache=None,
calib_json=None):
"""Write calibration cache file for QAT model.
This function converts a tensor scale dictionary generated by processing
QAT models to TRT readable format. By default we set it as a
trt.IInt8.EntropyCalibrator2 cache file.
Args:
tensor_scale_dict (dict): The dictionary of parameters: scale_value file.
calibration_cache (str): Path to output calibration cache file.
Returns:
No explicit returns.
"""
if calibration_cache is not None:
cal_cache_str = "TRT-{}-EntropyCalibration2\n".format(
self._trt_version_number)
assert not os.path.exists(calibration_cache), (
"A pre-existing cache file exists. Please delete this "
"file and re-run export."
)
# Converting float numbers to hex representation.
for tensor in tensor_scale_dict:
scaling_factor = tensor_scale_dict[tensor] / 127.0
cal_scale = hex(struct.unpack(
"i", struct.pack("f", scaling_factor))[0])
assert cal_scale.startswith(
"0x"), "Hex number expected to start with 0x."
cal_scale = cal_scale[2:]
cal_cache_str += tensor + ": " + cal_scale + "\n"
with open(calibration_cache, "w") as f:
f.write(cal_cache_str)
if calib_json is not None:
calib_json_data = {"tensor_scales": {}}
for tensor in tensor_scale_dict:
calib_json_data["tensor_scales"][tensor] = float(
tensor_scale_dict[tensor])
with open(calib_json, "w") as outfile:
json.dump(calib_json_data, outfile, indent=4)
def set_data_preprocessing_parameters(self, input_dims, image_mean=None):
"""Set data pre-processing parameters for the int8 calibration."""
num_channels = input_dims[0]
if num_channels == 3:
assert self.image_depth == 8, (
f"RGB images only support image depth of 8, got {self.image_depth}"
)
if not image_mean:
means = [103.939, 116.779, 123.68]
else:
assert len(image_mean) == 3, "Image mean should have 3 values for RGB inputs."
means = image_mean
elif num_channels == 1:
if not image_mean:
if self.image_depth == 8:
means = [117.3786]
elif self.image_depth == 16:
means = [117.3786*256.]
else:
raise ValueError(
"Single-channel images only support depth of 8 or 16, "
f"got {self.image_depth}"
)
else:
assert len(image_mean) == 1, "Image mean should have 1 value for grayscale inputs."
means = image_mean
else:
raise NotImplementedError(
"Invalid number of dimensions {}.".format(num_channels))
self.preprocessing_arguments = {"scale": 1.0,
"means": means,
"flip_channel": True}
def generate_tensor_file(self, data_file_name,
calibration_images_dir,
input_dims, n_batches=10,
batch_size=1, image_mean=None):
"""Generate calibration Tensorfile for int8 calibrator.
This function generates a calibration tensorfile from a directory of images, or dumps
n_batches of random numpy arrays of shape (batch_size,) + (input_dims).
Args:
data_file_name (str): Path to the output tensorfile to be saved.
calibration_images_dir (str): Path to the images to generate a tensorfile from.
input_dims (list): Input shape in CHW order.
n_batches (int): Number of batches to be saved.
batch_size (int): Number of images per batch.
image_mean (list): Image mean per channel.
Returns:
No explicit returns.
"""
if not os.path.exists(calibration_images_dir):
logger.info("Generating a tensorfile with random tensor images. This may work well as "
"a profiling tool, however, it may result in inaccurate results at "
"inference. Please generate a tensorfile using the tlt-int8-tensorfile, "
"or provide a custom directory of images for best performance.")
self.generate_random_tensorfile(data_file_name,
input_dims,
n_batches=n_batches,
batch_size=batch_size)
else:
# Preparing the list of images to be saved.
num_images = n_batches * batch_size
valid_image_ext = ['jpg', 'jpeg', 'png']
image_list = [os.path.join(calibration_images_dir, image)
for image in os.listdir(calibration_images_dir)
if image.split('.')[-1] in valid_image_ext]
if len(image_list) < num_images:
raise ValueError('Not enough number of images provided:'
' {} < {}'.format(len(image_list), num_images))
image_idx = random.sample(xrange(len(image_list)), num_images)
self.set_data_preprocessing_parameters(input_dims, image_mean)
# Writing out processed dump.
with TensorFile(data_file_name, 'w') as f:
for chunk in tqdm(image_idx[x:x+batch_size] for x in xrange(0, len(image_idx),
batch_size)):
dump_data = self.prepare_chunk(chunk, image_list,
image_width=input_dims[2],
image_height=input_dims[1],
channels=input_dims[0],
batch_size=batch_size,
**self.preprocessing_arguments)
f.write(dump_data)
f.closed
@staticmethod
def generate_random_tensorfile(data_file_name, input_dims, n_batches=1, batch_size=1):
"""Generate a random tensorfile.
This function generates a random tensorfile containing n_batches of random np.arrays
of dimensions (batch_size,) + (input_dims).
Args:
data_file_name (str): Path to where the data tensorfile will be stored.
input_dims (tuple): Input blob dimensions in CHW order.
n_batches (int): Number of batches to save.
batch_size (int): Number of images per batch.
Return:
No explicit returns.
"""
sample_shape = (batch_size, ) + tuple(input_dims)
with TensorFile(data_file_name, 'w') as f:
for i in tqdm(xrange(n_batches)):
logger.debug("Writing batch: {}".format(i))
dump_sample = np.random.sample(sample_shape)
f.write(dump_sample)
@staticmethod
def prepare_chunk(image_ids, image_list,
image_width=480,
image_height=272,
channels=3,
scale=1.0,
means=None,
flip_channel=False,
batch_size=1):
"""Prepare a single batch of data to dump into a Tensorfile."""
dump_placeholder = np.zeros(
(batch_size, channels, image_height, image_width))
for i in xrange(len(image_ids)):
idx = image_ids[i]
im = Image.open(image_list[idx]).resize((image_width, image_height),
Image.ANTIALIAS)
if channels == 1:
logger.debug("Converting image from RGB to Grayscale")
if im.mode in ('RGBA', 'LA') or (im.mode == 'P' and 'transparency' in im.info):
bg_colour = (255, 255, 255)
# Need to convert to RGBA if LA format due to a bug in PIL
alpha = im.convert('RGBA').split()[-1]
# Create a new background image of our matt color.
# Must be RGBA because paste requires both images have the same format
bg = Image.new("RGBA", im.size, bg_colour + (255,))
bg.paste(im, mask=alpha)
im = im.convert('L')
dump_input = np.asarray(im).astype(np.float32)
dump_input = dump_input[:, :, np.newaxis]
elif channels == 3:
dump_input = np.asarray(im.convert('RGB')).astype(np.float32)
else:
raise NotImplementedError("Unsupported channel dimensions.")
# flip channel: RGB --> BGR
if flip_channel:
dump_input = dump_input[:, :, ::-1]
# means is a list of per-channel means, (H, W, C) - (C)
if means is not None:
dump_input -= np.array(means)
# (H, W, C) --> (C, H, W)
dump_input = dump_input.transpose(2, 0, 1) * scale
dump_placeholder[i, :, :, :] = dump_input
return dump_placeholder
def get_input_dims(self, data_file_name=None, model=None):
"""Simple function to get input layer dimensions.
Args:
data_file_name (str): Path to the calibration tensor file.
model (keras.models.Model): Keras model object.
Returns:
input_dims (list): Input dimensions in CHW order.
"""
if not os.path.exists(data_file_name):
logger.debug(
"Data file doesn't exist. Pulling input dimensions from the network.")
input_dims = self.get_input_dims_from_model(model)
else:
# Read the input dims from the Tensorfile.
logger.debug("Reading input dims from tensorfile.")
with TensorFile(data_file_name, "r") as tfile:
batch = tfile.read()
# Disabling pylint for py3 in this line due to a pylint issue.
# Follow issue: https://github.com/PyCQA/pylint/issues/3139
# and remove when ready.
input_dims = np.array(batch).shape[1:] # pylint: disable=E1136
return input_dims
@staticmethod
def get_input_dims_from_model(model=None):
"""Read input dimensions from the model.
Args:
model (keras.models.Model): Model to get input dimensions from.
Returns:
input_dims (tuple): Input dimensions.
"""
if model is None:
raise IOError("Invalid model object.")
input_dims = model.layers[0].input_shape[1:]
return input_dims
@abstractmethod
def export(self, output_file_name, backend,
calibration_cache="", data_file_name="",
n_batches=1, batch_size=1, verbose=True,
calibration_images_dir="", save_engine=False,
engine_file_name="", max_workspace_size=1 << 30,
max_batch_size=1, force_ptq=False):
"""Simple function to export a model.
This function sets the first converts a keras graph to uff and then saves it to an etlt
file. After which, it verifies the parsability of the etlt file by creating a TensorRT
engine of desired backend datatype.
Args:
output_file_name (str): Path to the output etlt file.
backend (str): Backend parser to be used. ("uff", "onnx).
calibration_cache (str): Path to the output calibration cache file.
data_file_name (str): Path to the data tensorfile for int8 calibration.
n_batches (int): Number of batches to calibrate model for int8 calibration.
batch_size (int): Number of images per batch.
verbose (bool): Flag to set verbose logging.
calibration_images_dir (str): Path to a directory of images for custom data
to calibrate the model over.
save_engine (bool): Flag to save the engine after training.
engine_file_name (str): Path to the engine file name.
force_ptq (bool): Flag to force post training quantization using TensorRT
for a QAT trained model. This is required iff the inference platform is
a Jetson with a DLA.
Returns:
No explicit returns.
"""
raise NotImplementedError("Base Class doesn't implement this method.")
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/export/base_exporter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modulus export APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import logging
import h5py
import numpy as np
"""Logger for data export APIs."""
logger = logging.getLogger(__name__)
class TensorFile(io.RawIOBase):
"""Class to read/write multiple tensors to a file.
The underlying implementation using an HDF5 database
to store data.
Note: this class does not support multiple writers to
the same file.
Args:
filename (str): path to file.
mode (str): mode to open file in.
r Readonly, file must exist
r+ Read/write, file must exist
w Create file, truncate if exists
w- Create file, fail if exists
a Read/write if exists, create otherwise (default)
enforce_same_shape (bool): whether to enforce that all tensors be the same shape.
"""
DEFAULT_ARRAY_KEY = "_tensorfile_array_key_"
GROUP_NAME_PREFIX = "_tensorfile_array_key_"
def __init__(
self, filename, mode="a", enforce_same_shape=True, *args, **kwargs
): # pylint: disable=W1113
"""Init routine."""
super(TensorFile, self).__init__(*args, **kwargs)
logger.debug("Opening %s with mode=%s", filename, mode)
self._enforce_same_shape = enforce_same_shape
self._mode = mode
# Open or create the HDF5 file.
self._db = h5py.File(filename, mode)
if "count" not in self._db.attrs:
self._db.attrs["count"] = 0
if "r" in mode:
self._cursor = 0
else:
self._cursor = self._db.attrs["count"]
def _get_group_name(cls, cursor):
"""Return the name of the H5 dataset to create, given a cursor index."""
return "%s_%d" % (cls.GROUP_NAME_PREFIX, cursor)
def _write_data(self, group, data):
for key, value in data.items():
if isinstance(value, dict):
self._write_data(group.create_group(key), value)
elif isinstance(value, np.ndarray):
if self._enforce_same_shape:
if "shape" not in self._db.attrs:
self._db.attrs["shape"] = value.shape
else:
expected_shape = tuple(
self._db.attrs["shape"].tolist())
if expected_shape != value.shape:
raise ValueError(
"Shape mismatch: %s v.s. %s"
% (str(expected_shape), str(value.shape))
)
group.create_dataset(key, data=value, compression="gzip")
else:
raise ValueError(
"Only np.ndarray or dicts can be written into a TensorFile."
)
def close(self):
"""Close this file."""
self._db.close()
# For python2.
def next(self):
"""Return next element."""
return self.__next__()
# For python3.
def __next__(self):
"""Return next element."""
if self._cursor < self._db.attrs["count"]:
return self.read()
raise StopIteration()
def _read_data(self, group):
if isinstance(group, h5py.Group):
data = {key: self._read_data(value)
for key, value in group.items()}
else:
data = group[()]
return data
def read(self):
"""Read from current cursor.
Return array assigned to current cursor, or ``None`` to indicate
the end of the file.
"""
if not self.readable():
raise IOError("Instance is not readable.")
group_name = self._get_group_name(self._cursor)
if group_name in self._db:
self._cursor += 1
group = self._db[group_name]
data = self._read_data(group)
if list(data.keys()) == [self.DEFAULT_ARRAY_KEY]:
# The only key in this group is the default key.
# Return the numpy array directly.
return data[self.DEFAULT_ARRAY_KEY]
return data
return None
def readable(self):
"""Return whether this instance is readable."""
return self._mode in ["r", "r+", "a"]
def seekable(self):
"""Return whether this instance is seekable."""
return True
def seek(self, n):
"""Move cursor."""
self._cursor = min(n, self._db.attrs["count"])
return self._cursor
def tell(self):
"""Return current cursor index."""
return self._cursor
def truncate(self, n):
"""Truncation is not supported."""
raise IOError("Truncate operation is not supported.")
def writable(self):
"""Return whether this instance is writable."""
return self._mode in ["r+", "w", "w-", "a"]
def write(self, data):
"""Write a Numpy array or a dictionary of numpy arrays into file."""
if not self.writable():
raise IOError("Instance is not writable.")
if isinstance(data, np.ndarray):
data = {self.DEFAULT_ARRAY_KEY: data}
group_name = self._get_group_name(self._cursor)
# Delete existing instance of datasets at this cursor position.
if group_name in self._db:
del self._db[group_name]
group = self._db.create_group(group_name)
self._write_data(group, data)
self._cursor += 1
if self._cursor > self._db.attrs["count"]:
self._db.attrs["count"] = self._cursor
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/export/tensorfile.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for onnx export."""
import logging
import tensorflow.compat.v1 as tf
import tf2onnx
logger = logging.getLogger(__name__)
def pb_to_onnx(
input_filename,
output_filename,
input_node_names,
output_node_names,
target_opset=12,
verbose=False
):
"""Convert a TensorFlow model to ONNX.
The input model needs to be passed as a frozen Protobuf file.
The exported ONNX model may be parsed and optimized by TensorRT.
Args:
input_filename (str): path to protobuf file.
output_filename (str): file to write exported model to.
input_node_names (list of str): list of model input node names as
returned by model.layers[some_idx].get_output_at(0).name.split(':')[0].
output_node_names (list of str): list of model output node names as
returned by model.layers[some_idx].get_output_at(0).name.split(':')[0].
target_opset (int): Target opset version to use, default=<default opset for
the current keras2onnx installation>
Returns:
tuple<in_tensor_name(s), out_tensor_name(s):
in_tensor_name(s): The name(s) of the input nodes. If there is only one name, it will be
returned as a single string, otherwise a list of strings.
out_tensor_name(s): The name(s) of the output nodes. If there is only one name, it will be
returned as a single string, otherwise a list of strings.
"""
graphdef = tf.GraphDef()
with tf.gfile.GFile(input_filename, "rb") as frozen_pb:
graphdef.ParseFromString(frozen_pb.read())
if not isinstance(input_node_names, list):
input_node_names = [input_node_names]
if not isinstance(output_node_names, list):
output_node_names = [output_node_names]
# The ONNX parser requires tensors to be passed in the node_name:port_id format.
# Since we reset the graph below, we assume input and output nodes have a single port.
input_node_names = ["{}:0".format(node_name) for node_name in input_node_names]
output_node_names = ["{}:0".format(node_name) for node_name in output_node_names]
logger.info(
"Input node names: {input_node_names}.".format(
input_node_names=input_node_names
)
)
logger.info(
"Output node names: {output_node_names}.".format(
output_node_names=output_node_names
)
)
tf.reset_default_graph()
# `tf2onnx.tfonnx.process_tf_graph` prints out layer names when
# folding the layers. Disabling INFO logging for TLT branch.
# logging.getLogger("tf2onnx.tfonnx").setLevel(logging.WARNING)
with tf.Graph().as_default() as tf_graph:
tf.import_graph_def(graphdef, name="")
onnx_graph = tf2onnx.tfonnx.process_tf_graph(
tf_graph,
input_names=input_node_names,
output_names=output_node_names,
continue_on_error=True,
verbose=verbose,
opset=target_opset,
)
onnx_graph = tf2onnx.optimizer.optimize_graph(onnx_graph)
model_proto = onnx_graph.make_model("test")
with open(output_filename, "wb") as f:
f.write(model_proto.SerializeToString())
# Reload and check ONNX model.
# Temporary disabling the load onnx section.
# onnx_model = onnx.load(output_filename)
# onnx.checker.check_model(onnx_model)
# Return a string instead of a list if there is only one input or output.
if len(input_node_names) == 1:
input_node_names = input_node_names[0]
if len(output_node_names) == 1:
output_node_names = output_node_names[0]
return input_node_names, output_node_names
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/export/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hook for job progress monitoring on clusters."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import timedelta
import logging
import time
import tensorflow.compat.v1 as tf
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
logger = logging.getLogger(__name__)
s_logger = status_logging.get_status_logger()
MONITOR_JSON_FILENAME = "monitor.json"
def write_status_json(
save_path, loss_value, current_epoch, max_epoch, time_per_epoch, ETA, learning_rate
):
"""Write out the data to the status.json file initiated by the experiment for monitoring.
Args:
save_path (str): Path where monitor.json needs to be saved. Basically the
result directory.
loss_value (float): Current value of loss to be recorder in the monitor.
current_epoch (int): Current epoch.
max_epoch (int): Total number of epochs.
time_per_epoch (float): Time per epoch in seconds.
ETA (float): Time per epoch in seconds.
learning_rate (float): Learning rate tensor.
Returns:
monitor_data (dict): The monitor data as a dict.
"""
monitor_data = {
"epoch": current_epoch,
"max_epoch": max_epoch,
"time_per_epoch": str(timedelta(seconds=time_per_epoch)),
"eta": str(timedelta(seconds=ETA)),
}
# Save the json file.
try:
s_logger.graphical = {
"loss": loss_value,
"learning_rate": learning_rate
}
s_logger.write(
data=monitor_data,
status_level=status_logging.Status.RUNNING)
except IOError:
# We let this pass because we do not want the json file writing to crash the whole job.
pass
return monitor_data.update(
{"loss": loss_value, "learning_rate": learning_rate}
)
class TaskProgressMonitorHook(tf.estimator.SessionRunHook):
"""Log loss and epochs for monitoring progress of cluster jobs.
Writes the current training progress (current loss, current epoch and
maximum epoch) to a json file.
"""
def __init__(self, loggable_tensors, save_path, epochs, steps_per_epoch):
"""Initialization.
Args:
loss: Loss tensor.
save_path (str): Absolute save path.
epochs (int): Number of training epochs.
steps_per_epoch (int): Number of steps per epoch.
"""
# Define the tensors to be fetched at every step.
self._fetches = loggable_tensors
self.save_path = save_path
self.epochs = epochs
self.steps_per_epoch = steps_per_epoch
# Initialize variables for epoch time calculation.
self.time_per_epoch = 0
self._step_start_time = None
# Closest estimate of the start time, in case starting from mid-epoch.
self._epoch_start_time = time.time()
def before_run(self, run_context):
"""Request loss and global step from the session.
Args:
run_context: A `SessionRunContext` object.
Returns:
A `SessionRunArgs` object.
"""
# Record start time for each step. Use the value later, if this step started an epoch.
self._step_start_time = time.time()
# Assign the tensors to be fetched.
return tf.train.SessionRunArgs(self._fetches)
def after_run(self, run_context, run_values):
"""Write the progress to json-file after each epoch.
Args:
run_context: A `SessionRunContext` object.
run_values: A `SessionRunValues` object. Contains the loss value
requested by before_run().
"""
# Get the global step value.
step = run_values.results["step"]
if (step + 1) % self.steps_per_epoch == 0:
# Last step of an epoch is completed.
epoch_end_time = time.time()
self.time_per_epoch = epoch_end_time - self._epoch_start_time
if step % self.steps_per_epoch == 0:
# First step of a new epoch is completed. Store the time when step was started.
self._epoch_start_time = self._step_start_time
loss_value = run_values.results["loss"]
learning_rate = run_values.results["learning_rate"]
current_epoch = int(step // self.steps_per_epoch)
monitor_data = write_status_json(
save_path=self.save_path,
loss_value=float(loss_value),
current_epoch=current_epoch,
max_epoch=self.epochs,
time_per_epoch=self.time_per_epoch,
ETA=(self.epochs - current_epoch) * self.time_per_epoch,
learning_rate=float(learning_rate)
)
logger.info(
"Epoch %d/%d: loss: %0.5f learning rate: %0.5f Time taken: %s ETA: %s"
% (
monitor_data["epoch"],
monitor_data["max_epoch"],
monitor_data["loss"],
monitor_data["learning_rate"],
monitor_data["time_per_epoch"],
monitor_data["eta"],
)
)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/export/task_progress_monitor_hook.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to export a trained TLT model to an ETLT file for deployment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from datetime import datetime as dt
import logging
import os
import keras # noqa pylint: disable=F401, W0611
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
logger = logging.getLogger(__name__)
DEFAULT_MAX_WORKSPACE_SIZE = 2 * (1 << 30)
DEFAULT_MAX_BATCH_SIZE = 1
DEFAULT_OPT_BATCH_SIZE = 1
DEFAULT_MIN_BATCH_SIZE = 1
def build_command_line_parser(parser=None):
"""Simple function to parse arguments."""
if parser is None:
parser = argparse.ArgumentParser(description='Export a TLT model.')
parser.add_argument("-m",
"--model",
help="Path to the model file.",
type=str,
required=True,
default=None)
parser.add_argument("-k",
"--key",
help="Key to load the model.",
type=str,
required=False,
default="")
parser.add_argument("-o",
"--output_file",
type=str,
default=None,
help="Output file. Defaults to $(input_filename).$(backend)")
parser.add_argument("--force_ptq",
action="store_true",
default=False,
# help="Flag to force post training quantization for QAT models.")
help=argparse.SUPPRESS)
# Int8 calibration arguments.
parser.add_argument("--cal_data_file",
default="",
type=str,
# help="Tensorfile to run calibration for int8 optimization.")
help=argparse.SUPPRESS)
parser.add_argument("--cal_image_dir",
default="",
type=str,
# help="Directory of images to run int8 calibration if "
# "data file is unavailable")
help=argparse.SUPPRESS)
parser.add_argument("--cal_json_file",
default="",
type=str,
help="Dictionary containing tensor scale for QAT models.")
parser.add_argument("--data_type",
type=str,
default="fp32",
help=argparse.SUPPRESS,
# help="Data type for the TensorRT export.",
choices=["fp32", "fp16", "int8"])
parser.add_argument("-s",
"--strict_type_constraints",
action="store_true",
default=False,
# help="Apply TensorRT strict_type_constraints or not for INT8 mode.")
help=argparse.SUPPRESS)
parser.add_argument("--gen_ds_config",
action="store_true",
default=False,
help="Generate a template DeepStream related configuration elements. "
"This config file is NOT a complete configuration file and requires "
"the user to update the sample config files in DeepStream with the "
"parameters generated from here.")
parser.add_argument('--cal_cache_file',
default='./cal.bin',
type=str,
# help='Calibration cache file to write to.')
help=argparse.SUPPRESS)
parser.add_argument("--batches",
type=int,
default=10,
# help="Number of batches to calibrate over.")
help=argparse.SUPPRESS)
parser.add_argument("--max_workspace_size",
type=int,
default=DEFAULT_MAX_WORKSPACE_SIZE,
# help="Max size of workspace to be set for TensorRT engine builder.")
help=argparse.SUPPRESS)
parser.add_argument("--max_batch_size",
type=int,
default=DEFAULT_MAX_BATCH_SIZE,
# help="Max batch size for TensorRT engine builder.")
help=argparse.SUPPRESS)
parser.add_argument("--batch_size",
type=int,
default=16,
# help="Number of images per batch.")
help=argparse.SUPPRESS)
parser.add_argument("--min_batch_size",
type=int,
default=DEFAULT_MIN_BATCH_SIZE,
# help="Min batch size for TensorRT engine builder.")
help=argparse.SUPPRESS)
parser.add_argument("--opt_batch_size",
type=int,
default=DEFAULT_OPT_BATCH_SIZE,
# help="Opt batch size for TensorRT engine builder.")
help=argparse.SUPPRESS)
parser.add_argument("--onnx_route",
type=str,
default="keras2onnx",
help=argparse.SUPPRESS)
parser.add_argument("-e",
"--experiment_spec",
type=str,
default=None,
help="Path to the experiment spec file.")
parser.add_argument("--engine_file",
type=str,
default=None,
# help="Path to the exported TRT engine.")
help=argparse.SUPPRESS)
parser.add_argument("--static_batch_size",
type=int,
default=-1,
help=(
"Set a static batch size for exported etlt model. "
"Default is -1(dynamic batch size)."
"This option is only relevant for ONNX based model."
))
parser.add_argument("--target_opset",
type=int,
default=12,
help="Target opset for ONNX models.")
parser.add_argument("--results_dir",
type=str,
default=None,
help="Path to the files where the logs are stored.")
parser.add_argument("-v",
"--verbose",
action="store_true",
default=False,
help="Verbosity of the logger.")
return parser
def parse_command_line(args=None):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser()
return vars(parser.parse_known_args(args)[0])
def run_export(Exporter, args, backend="uff"):
"""Wrapper to run export of tlt models.
Args:
Exporter(object): The exporter class instance.
args (dict): Dictionary of parsed arguments to run export.
backend(str): Exported model backend, either 'uff' or 'onnx'.
Returns:
No explicit returns.
"""
# Parsing command line arguments.
model_path = args['model']
key = args['key']
# Calibrator configuration.
cal_cache_file = args['cal_cache_file']
cal_image_dir = args['cal_image_dir']
cal_data_file = args['cal_data_file']
batch_size = args['batch_size']
n_batches = args['batches']
data_type = args['data_type']
strict_type = args['strict_type_constraints']
output_file = args['output_file']
experiment_spec = args['experiment_spec']
engine_file_name = args['engine_file']
max_workspace_size = args["max_workspace_size"]
max_batch_size = args["max_batch_size"]
static_batch_size = args["static_batch_size"]
target_opset = args["target_opset"]
force_ptq = args["force_ptq"]
gen_ds_config = args["gen_ds_config"]
min_batch_size = args["min_batch_size"]
opt_batch_size = args["opt_batch_size"]
cal_json_file = args.get("cal_json_file", None)
# This parameter is only relevant for classification.
classmap_file = args.get("classmap_json", None)
# Status logger for the UI. By default this will be populated in /workspace/logs.
results_dir = args.get("results_dir", None)
onnx_route = args.get("onnx_route", "keras2onnx")
# Add warning if static_batch_size != -1, we will override whatever you have,
# that batch size will be used for calibration also
# and max_batch_size won't matter
if static_batch_size != -1:
logger.warning("If you set static batch size for your ONNX, "
"the calibration batch size will also be the "
"static batch size you provided.")
if results_dir is not None:
if not os.path.exists(results_dir):
os.makedirs(results_dir)
timestamp = int(dt.timestamp(dt.now()))
filename = "status.json"
if results_dir == "/workspace/logs":
filename = f"status_export_{timestamp}.json"
status_file = os.path.join(results_dir, filename)
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True
)
)
status_logger = status_logging.get_status_logger()
save_engine = False
if engine_file_name is not None:
save_engine = True
log_level = "INFO"
if args['verbose']:
log_level = "DEBUG"
# Configure the logger.
logging.basicConfig(
format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level=log_level)
# Set default output filename if the filename
# isn't provided over the command line.
if output_file is None:
split_name = os.path.splitext(model_path)[0]
output_file = f"{split_name}.{backend}"
if not (backend in output_file):
output_file = f"{output_file}.{backend}"
logger.info("Saving exported model to {}".format(output_file))
# Warn the user if an exported file already exists.
assert not os.path.exists(output_file), "Default output file {} already "\
"exists".format(output_file)
# Make an output directory if necessary.
output_root = os.path.dirname(os.path.realpath(output_file))
if not os.path.exists(output_root):
os.makedirs(output_root)
# Build exporter instance
status_logger.write(message="Building exporter object.")
exporter = Exporter(model_path, key,
backend=backend,
experiment_spec_path=experiment_spec,
data_type=data_type,
strict_type=strict_type,
classmap_file=classmap_file,
target_opset=target_opset,
onnx_route=onnx_route)
exporter.set_session()
exporter.set_keras_backend_dtype()
# Export the model to etlt file and build the TRT engine.
status_logger.write(message="Exporting the model.")
exporter.export(output_file,
backend,
data_file_name=cal_data_file,
calibration_cache=os.path.realpath(cal_cache_file),
n_batches=n_batches,
batch_size=batch_size,
save_engine=save_engine,
engine_file_name=engine_file_name,
calibration_images_dir=cal_image_dir,
calib_json_file=cal_json_file,
max_batch_size=max_batch_size,
min_batch_size=min_batch_size,
opt_batch_size=opt_batch_size,
static_batch_size=static_batch_size,
max_workspace_size=max_workspace_size,
force_ptq=force_ptq,
gen_ds_config=gen_ds_config)
def launch_export(Exporter, args=None, backend="uff"):
"""CLI wrapper to run export.
This function should be included inside package scripts/export.py
# import build_command_line_parser as this is needed by entrypoint
from nvidia_tao_tf1.cv.common.export.app import build_command_line_parser # noqa pylint: disable=W0611
from nvidia_tao_tf1.cv.common.export.app import launch_export
from nvidia_tao_tf1.cv.X.export.X_exporter import XExporter as Exporter
if __name__ == "__main__":
launch_export(Exporter)
"""
args = parse_command_line(args)
run_export(Exporter, args, backend)
def main():
"""Raise deprecation warning."""
raise DeprecationWarning(
"This command has been deprecated in this version of TLT. "
"Please run \n <model> export <cli_args>"
)
if __name__ == "__main__":
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/export/app.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base calibrator class for TensorRT INT8 Calibration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import numpy as np
import pycuda.autoinit # noqa pylint: disable=unused-import
import pycuda.driver as cuda
# Simple helper class for calibration.
try:
import tensorrt as trt # noqa pylint: disable=W0611 pylint: disable=W0611
from nvidia_tao_tf1.cv.common.export.base_calibrator import BaseCalibrator
trt_available = True
except Exception as e:
logger = logging.getLogger(__name__)
logger.warning(
"Failed to import TensorRT package, exporting TLT to a TensorRT engine "
"will not be available."
)
trt_available = False
from nvidia_tao_tf1.cv.common.export.tensorfile import TensorFile
logger = logging.getLogger(__name__)
if trt_available:
class TensorfileCalibrator(BaseCalibrator):
"""Calibrator class."""
def __init__(self, data_filename, cache_filename,
n_batches, batch_size,
*args, **kwargs):
"""Init routine.
This inherits from ``nvidia_tao_tf1.cv.common.export.base_calibrator.BaseCalibrator``
to implement the calibration interface that TensorRT needs to
calibrate the INT8 quantization factors. The data source here is assumed
to be a Tensorfile as defined in nvidia_tao_tf1.cv.common.tensorfile.Tensorfile(), which
was pre-generated using the dataloader or nvidia_tao_tf1.cv.common.export.app.py
Args:
data_filename (str): ``TensorFile`` data file to use.
cache_filename (str): name of calibration file to read/write to.
n_batches (int): number of batches for calibrate for.
batch_size (int): batch size to use for calibration data.
"""
super(TensorfileCalibrator, self).__init__(
cache_filename,
n_batches, batch_size,
*args, **kwargs
)
self.instantiate_data_source(data_filename)
def instantiate_data_source(self, data_filename):
"""Simple function to instantiate the data_source of the dataloader.
Args:
data_filename (str): The path to the data file.
Returns:
No explicit returns.
"""
if os.path.exists(data_filename):
self._data_source = TensorFile(data_filename, "r")
else:
logger.info(
"A valid data source wasn't provided to the calibrator. "
"The calibrator will attempt to read from a cache file if provided."
)
def get_data_from_source(self):
"""Simple function to get data from the defined data_source."""
batch = np.array(self._data_source.read())
if batch is not None:
# <@vpraveen>: Disabling pylint error check on line below
# because of a python3 linting error. To be reverted when
# pylint/issues/3139 gets fixed.
batch_size = batch.shape[0] # pylint: disable=E1136
if batch_size < self._batch_size:
raise ValueError(
"Batch size yielded from data source {} < requested batch size "
"from calibrator {}".format(batch_size, self._batch_size)
)
batch = batch[:self._batch_size]
else:
raise ValueError(
"Batch wasn't yielded from the data source. You may have run "
"out of batches. Please set the num batches accordingly")
return batch
def get_batch(self, names):
"""Return one batch.
Args:
names (list): list of memory bindings names.
"""
if self._batch_count < self._n_batches:
batch = self.get_data_from_source()
if batch is not None:
if self._data_mem is None:
# 4 bytes per float32.
self._data_mem = cuda.mem_alloc(batch.size * 4)
self._batch_count += 1
# Transfer input data to device.
cuda.memcpy_htod(self._data_mem, np.ascontiguousarray(
batch, dtype=np.float32))
return [int(self._data_mem)]
if self._data_mem is not None:
self._data_mem.free()
return None
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/export/tensorfile_calibrator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple script to test export tools(tensorrt, uff, graphsurgeon)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import graphsurgeon as gs
import tensorrt as trt
import uff
# Check gs has the create_plugin_node method
def test_gs_create_plugin_node():
n = gs.create_plugin_node(name='roi_pooling_conv_1/CropAndResize_new',
op="CropAndResize",
inputs=['activation_13/Relu', 'proposal'],
crop_height=7,
crop_width=7)
assert n
# Check the TRT version
def test_trt_version():
assert trt.__version__ == '8.5.1.7'
# Check the UFF version
def test_uff_version():
assert uff.__version__ == '0.6.7'
# Check the gs version
def test_gs_version():
assert gs.__version__ == '0.4.1'
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/export/tests/test_export_tools.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logger class for TLT IVA models."""
from abc import abstractmethod
import atexit
from datetime import datetime
import json
import logging
import os
logger = logging.getLogger(__name__)
class Verbosity():
"""Verbosity levels."""
DISABLE = 0
DEBUG = 10
INFO = 20
WARNING = 30
ERROR = 40
CRITICAL = 50
# Defining a log level to name dictionary.
log_level_to_name = {
Verbosity.DISABLE: "DISABLE",
Verbosity.DEBUG: 'DEBUG',
Verbosity.INFO: 'INFO',
Verbosity.WARNING: 'WARNING',
Verbosity.ERROR: 'ERROR',
Verbosity.CRITICAL: 'CRITICAL'
}
class Status():
"""Status levels."""
SUCCESS = 0
FAILURE = 1
STARTED = 2
RUNNING = 3
SKIPPED = 4
status_level_to_name = {
Status.SUCCESS: 'SUCCESS',
Status.FAILURE: 'FAILURE',
Status.STARTED: 'STARTED',
Status.RUNNING: 'RUNNING',
Status.SKIPPED: 'SKIPPED'
}
class BaseLogger(object):
"""File logger class."""
def __init__(self, is_master=False, verbosity=Verbosity.DISABLE):
"""Base logger class."""
self.is_master = is_master
self.verbosity = verbosity
self.categorical = {}
self.graphical = {}
self.kpi = {}
@property
def date(self):
"""Get date from the status."""
date_time = datetime.now()
date_object = date_time.date()
return "{}/{}/{}".format(
date_object.month,
date_object.day,
date_object.year
)
@property
def time(self):
"""Get date from the status."""
date_time = datetime.now()
time_object = date_time.time()
return "{}:{}:{}".format(
time_object.hour,
time_object.minute,
time_object.second
)
@property
def categorical(self):
"""Categorical data to be logged."""
return self._categorical
@categorical.setter
def categorical(self, value: dict):
"""Set categorical data to be logged."""
self._categorical = value
@property
def graphical(self):
"""Graphical data to be logged."""
return self._graphical
@graphical.setter
def graphical(self, value: dict):
"""Set graphical data to be logged."""
self._graphical = value
@property
def kpi(self):
"""Set KPI data."""
return self._kpi
@kpi.setter
def kpi(self, value: dict):
"""Set KPI data."""
self._kpi = value
def flush(self):
"""Flush the logger."""
pass
def format_data(self, data: dict):
"""Format the data."""
if isinstance(data, dict):
data_string = []
for key, value in data.items():
data_string.append(
f"{key}: {self.format_data(value)}"
if isinstance(value, dict) else value
)
return ", ".join(data_string)
def log(self, level, string):
"""Log the data string."""
if level >= self.verbosity:
logging.log(level, string)
@abstractmethod
def write(self, data=None,
status_level=Status.RUNNING,
verbosity_level=Verbosity.INFO,
message=None):
"""Write data out to the log file."""
if self.verbosity > Verbosity.DISABLE:
if not data:
data = {}
# Define generic data.
data["date"] = self.date
data["time"] = self.time
data["status"] = status_level_to_name.get(status_level, "RUNNING")
data["verbosity"] = log_level_to_name.get(verbosity_level, "INFO")
if message:
data["message"] = message
logging.log(verbosity_level, message)
if self.categorical:
data["categorical"] = self.categorical
if self.graphical:
data["graphical"] = self.graphical
if self.kpi:
data["kpi"] = self.kpi
data_string = self.format_data(data)
if self.is_master:
self.log(verbosity_level, data_string)
self.flush()
class StatusLogger(BaseLogger):
"""Simple logger to save the status file."""
def __init__(self, filename=None,
is_master=False,
verbosity=Verbosity.INFO,
append=True):
"""Logger to write out the status."""
super().__init__(is_master=is_master, verbosity=verbosity)
self.log_path = os.path.realpath(filename)
self.l_file = None
if os.path.exists(self.log_path):
logger.info("Log file already exists at {}".format(self.log_path))
else:
log_dir = os.path.dirname(self.log_path)
if not os.path.exists(log_dir) and is_master:
os.makedirs(log_dir, exist_ok=True)
if is_master:
self.l_file = open(self.log_path, "a" if append else "w")
atexit.register(self.l_file.close)
def log(self, level, string):
"""Log the data string."""
if level >= self.verbosity:
self.l_file.write(string + "\n")
def flush(self):
"Flush contents of the log file."
if self.is_master:
self.l_file.flush()
@staticmethod
def format_data(data):
"""Format the dictionary data."""
if not isinstance(data, dict):
raise TypeError(f"Data must be a dictionary and not type {type(data)}.")
data_string = json.dumps(data)
return data_string
# Define the logger here so it's static.
_STATUS_LOGGER = BaseLogger()
def set_status_logger(status_logger):
"""Set the status logger.
Args:
status_logger: An instance of the logger class.
"""
global _STATUS_LOGGER # pylint: disable=W0603
_STATUS_LOGGER = status_logger
def get_status_logger():
"""Get the status logger."""
global _STATUS_LOGGER # pylint: disable=W0602,W0603
return _STATUS_LOGGER
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/logging/logging.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logger for TLT IVA models."""
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/logging/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base visualizer element defining basic elements."""
from contextlib import contextmanager
import logging
logger = logging.getLogger(__name__)
class Descriptor(object):
"""Descriptor setter and getter for class properties."""
def __init__(self, attr):
"""Constructor for the descriptor."""
self.attr = attr
def __get__(self, instance, owner):
"""Getter of the property.
Checks that the Visualizer is actually built before returning the value.
Args:
instance: Instance of the Visualizer (Will be None, because the
descriptor is called for the class instead of an instance).
owner: The owner class.
"""
if not owner._built:
raise RuntimeError(f"The {type(owner)} wasn't built.")
return getattr(owner, self.attr)
class BaseVisualizer(object):
"""Base visualizer class for TAO-TF."""
def __init_sublass__(cls, **kwargs):
"""Constructor for the base visualizer."""
cls._enabled = False
cls._num_images = 3
cls._built = False
@classmethod
def build(cls, enabled, num_images):
"""Build the visualizer."""
cls._enabled = enabled
cls._num_images = num_images
cls._built = True
enabled = Descriptor("_enabled")
num_images = Descriptor("_num_images")
built = Descriptor("_built")
@classmethod
@contextmanager
def disable(cls):
"""Context manager for temporarily disabling the visualizations."""
if not cls._built:
raise RuntimeError("Visualizer was not built to disabled.")
old_state = cls._enabled
cls._enabled = False
yield
cls._enabled = old_state
@classmethod
def image(cls, tensor_name, tensor_value, value_range=None):
"""Visualizer function to render image."""
raise NotImplementedError("This method is not implemented in the base class.")
@classmethod
def histogram(cls, tensor_name, tensor_value):
"""Visualize histogram for a given tensor."""
raise NotImplementedError("This method hasn't been implemented in the base class.")
@classmethod
def scalar(cls, tensor_name, tensor_value):
"""Render a scalar in the visualizer."""
raise NotImplementedError("This method hasn't been implemented in the base class.")
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/visualizer/base_visualizer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DetectNet V2 model visualization routines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/visualizer/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DetectNet V2 visualization utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import keras
import tensorflow as tf
from nvidia_tao_tf1.cv.common.visualizer.base_visualizer import BaseVisualizer, Descriptor
logger = logging.getLogger(__name__)
class TensorBoardVisualizer(BaseVisualizer):
"""Visualizer implemented as a static class."""
backend = Descriptor("_backend")
@classmethod
def build(cls, enabled, num_images):
"""Build the TensorBoard Visualizer.
Args:
enabled (bool): Boolean value to show that the Visualizer was enabled.
num_images (int): Number of images to be visualized.
"""
cls._backend = "tensorboard"
super().build(
enabled, num_images=num_images
)
@classmethod
def build_from_config(cls, visualizer_config):
"""Build visualizer from config.
Arguments:
visualizer_config (visualizer_config_pb2.VisualizerConfig).
"""
enabled = visualizer_config.enabled
num_images = visualizer_config.num_images
cls.build(
enabled,
num_images
)
@classmethod
def image(cls, name, value, value_range=None,
data_format='channels_first', collections=None):
"""Add a 4D tensor to Tensorboard.
Args:
name (string): Image name.
value: 4D tensor in NCHW or NHWC format.
value_range (float, float): Black and white-point for mapping value to color.
None means use TF automatic scaling
data_format (string): Format of the input values. Must be either 'channels_first' (NCHW)
or 'channels_last' (NHWC).
collections: Optional list of ops.GraphKeys. The collections to add the summary to.
"""
if cls.enabled and cls.num_images > 0:
tensor_name = f"image/{name}"
# Optionally apply scaling and offseting and cast to uint8.
# By default rely on Tensorflow normalization.
if value_range is not None:
# Compute scale and offset such that black-point maps to 0.0
# and white-point maps to 255.0
black, white = value_range
scale = 255.0 / (white - black)
offset = 255.0 * black / (black - white)
value = tf.cast(tf.clip_by_value(value * scale + offset, 0., 255.), tf.uint8)
# Images must be in NHWC format. Convert as needed.
if data_format == 'channels_first':
value = tf.transpose(value, (0, 2, 3, 1))
tf.summary.image(tensor_name, value[:, :, :, :3], cls.num_images,
collections=collections)
@classmethod
def histogram(cls, tensor_name, tensor_value, collections=None):
"""Visualize histogram for a given tensor."""
tensor_name = f"histogram/{tensor_name}"
tf.summary.histogram(name=tensor_name, values=tensor_value, collections=collections)
@classmethod
def keras_model_weight_histogram(cls, keras_model, collections=None):
"""Add model weight histogram to tensorboard summary.
Args:
model: Keras model.
collections: Optional list of ops.GraphKeys. The collections to add the summary to.
"""
for layer in keras_model.layers:
if isinstance(layer, keras.engine.training.Model):
cls.keras_model_weight_histogram(layer, collections)
if isinstance(layer, keras.layers.convolutional.DepthwiseConv2D):
# Plot histogram of conv layer weight.
cls.histogram(
tensor_name=f'weight/{layer.name}',
tensor_value=layer.depthwise_kernel,
collections=collections
)
elif isinstance(layer, keras.layers.convolutional.Conv2D):
# Plot histogram of conv layer weight.
cls.histogram(
tensor_name=f'weight/{layer.name}',
tensor_value=layer.kernel,
collections=collections
)
elif isinstance(layer, keras.layers.normalization.BatchNormalization):
# Plot histogram of gamma and beta.
cls.histogram(
tensor_name=f'gamma/{layer.name}',
tensor_value=layer.gamma,
collections=collections
)
cls.histogram(
tensor_name=f'beta/{layer.name}',
tensor_value=layer.beta,
collections=collections
)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/visualizer/tensorboard_visualizer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test visualizations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pytest
from nvidia_tao_tf1.cv.common.proto.visualizer_config_pb2 import VisualizerConfig
from nvidia_tao_tf1.cv.common.visualizer.tensorboard_visualizer import \
TensorBoardVisualizer as Visualizer
def test_build_visualizer():
"""Test visualizer config parsing."""
config = VisualizerConfig()
# Default values should pass.
Visualizer.build_from_config(config)
config.enabled = True
config.num_images = 3
Visualizer.build_from_config(config)
assert Visualizer.enabled is True
assert Visualizer.num_images == 3
def test_nonbuilt_visualizer():
"""Test that the visualizer needs to be built."""
Visualizer._built = False
with pytest.raises(RuntimeError):
Visualizer.enabled
def test_singleton_behavior():
"""Test the visualizer context handler for disabling visualizations."""
config = VisualizerConfig()
config.enabled = False
Visualizer.build_from_config(config)
# Disabling a disabled visualizer should keep it disabled.
with Visualizer.disable():
assert not Visualizer.enabled, "Visualizer is enabled with Visualizer().disabled."
assert not Visualizer.enabled, \
"Disabled Visualizer is enabled after returning from disabled state."
# Enable the visualizer and check the Disabling context manager.
config.enabled = True
Visualizer.build_from_config(config)
with Visualizer.disable():
assert not Visualizer.enabled, "Visualizer is enabled with Visualizer.disabled."
assert Visualizer.enabled, "Visualizer is not enabled after returning from disabled state."
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/common/visualizer/tests/test_visualizer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IVA RetinaNet root module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/__init__.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/losses/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Focal Loss for training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def smooth_L1_loss(y_true, y_pred):
'''
Compute smooth L1 loss, see references.
Arguments:
y_true (nD tensor): A TensorFlow tensor of any shape containing the ground truth data.
In this context, the expected tensor has shape `(batch_size, #boxes, 4)` and
contains the ground truth bounding box coordinates, where the last dimension
contains `(d_cx, d_cy, log_w, log_h)`.
y_pred (nD tensor): A TensorFlow tensor of identical structure to `y_true` containing
the predicted data, in this context the predicted bounding box coordinates.
Returns:
The smooth L1 loss, a nD-1 Tensorflow tensor. In this context a 2D tensor
of shape (batch, n_boxes_total).
References:
https://arxiv.org/abs/1504.08083
'''
absolute_loss = tf.abs(y_true - y_pred)
square_loss = 0.5 * (y_true - y_pred)**2
l1_loss = tf.where(tf.less(absolute_loss, 1.0), square_loss, absolute_loss - 0.5)
return tf.reduce_sum(l1_loss, axis=-1)
def bce_focal_loss(y_true, y_pred, alpha, gamma):
'''
Compute the bce focal loss.
Arguments:
y_true (nD tensor): A TensorFlow tensor of any shape containing the ground truth data.
In this context, the expected tensor has shape (batch_size, #boxes, #classes)
and contains the ground truth bounding box categories.
y_pred (nD tensor): A TensorFlow tensor of identical structure to `y_true` containing
the predicted data, in this context the predicted bounding box categories.
Returns:
The softmax log loss, a nD-1 Tensorflow tensor. In this context a 2D tensor
of shape (batch, n_boxes_total).
'''
# Compute the log loss
bce_loss = -(y_true * tf.log(tf.maximum(y_pred, 1e-18)) +
(1.0-y_true) * tf.log(tf.maximum(1.0-y_pred, 1e-18)))
p_ = (y_true * y_pred) + (1.0-y_true) * (1.0-y_pred)
modulating_factor = tf.pow(1.0 - p_, gamma)
weight_factor = (y_true * alpha + (1.0 - y_true) * (1.0-alpha))
focal_loss = modulating_factor * weight_factor * bce_loss
return tf.reduce_sum(focal_loss, axis=-1)
class FocalLoss:
'''
Focal Loss class.
Focal loss, see https://arxiv.org/abs/1708.02002
'''
def __init__(self,
loc_loss_weight=1.0,
alpha=0.25,
gamma=2.0):
'''Loss init function.'''
self.loc_loss_weight = loc_loss_weight
self.alpha = alpha
self.gamma = gamma
def compute_loss(self, y_true, y_pred):
'''
Compute the loss of the SSD model prediction against the ground truth.
Arguments:
y_true (array): A Numpy array of shape `(batch_size, #boxes, #classes + 12)`,
where `#boxes` is the total number of boxes that the model predicts
per image. Be careful to make sure that the index of each given
box in `y_true` is the same as the index for the corresponding
box in `y_pred`. The last axis must have length `1 + #classes + 12` and contain
`[class_weights, classes one-hot encoded, 4 gt box coordinate offsets,
8 arbitrary entries]`
in this order, including the background class. The last eight entries of the
last axis are not used by this function and therefore their contents are
irrelevant, they only exist so that `y_true` has the same shape as `y_pred`,
where the last four entries of the last axis contain the anchor box
coordinates, which are needed during inference. Important: Boxes that
you want the cost function to ignore need to have a one-hot
class vector of all zeros.
y_pred (Keras tensor): The model prediction. The shape is identical
to that of `y_true`, i.e. `(batch_size, #boxes, #classes + 12)`.
The last axis must contain entries in the format
`[classes one-hot encoded, 4 predicted box offsets, 8 arbitrary entries]`.
Returns:
A scalar, the total multitask loss for classification and localization.
'''
batch_size = tf.shape(y_pred)[0] # Output dtype: tf.int32
# 1: Compute the losses for class and box predictions for every box.
class_weights = y_true[:, :, 0]
classification_loss = tf.dtypes.cast(bce_focal_loss(y_true[:, :, 2:-12],
y_pred[:, :, 1:-12],
self.alpha, self.gamma),
tf.float32)
localization_loss = tf.dtypes.cast(smooth_L1_loss(y_true[:, :, -12:-8],
y_pred[:, :, -12:-8]), tf.float32)
# 2: Compute the classification losses for the positive and negative targets.
# Create masks for the positive and negative ground truth classes.
# Tensor of shape (batch_size, n_boxes)
positives = tf.dtypes.cast(tf.reduce_max(y_true[:, :, 2:-12], axis=-1), tf.float32)
non_neutral = tf.dtypes.cast(tf.reduce_max(y_true[:, :, 1:-12], axis=-1), tf.float32)
# Count the number of positive boxes (classes 1 to n) in y_true across the whole batch.
n_positive = tf.reduce_sum(positives)
class_loss = tf.reduce_sum(classification_loss * non_neutral * class_weights, axis=-1)
# 3: Compute the localization loss for the positive targets.
# We don't compute a localization loss for negative predicted boxes
# (obviously: there are no ground truth boxes they would correspond to).
# Shape (batch_size,)
loc_loss = tf.reduce_sum(localization_loss * positives * class_weights, axis=-1)
# 4: Compute the total loss.
# In case `n_positive == 0`
total_loss = (class_loss + self.loc_loss_weight *
loc_loss) / tf.maximum(1.0, n_positive)
# Keras has the bad habit of dividing the loss by the batch size, which sucks in our case
# because the relevant criterion to average our loss over is the number of positive boxes
# in the batch (by which we're dividing in the line above), not the batch size. So in
# order to revert Keras' averaging over the batch size, we'll have to multiply by it.
total_loss = total_loss * tf.dtypes.cast(batch_size, tf.float32)
return total_loss
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/losses/focal_loss.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test focal loss."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nvidia_tao_tf1.cv.retinanet.losses.focal_loss import bce_focal_loss
from nvidia_tao_tf1.cv.retinanet.losses.focal_loss import FocalLoss
from nvidia_tao_tf1.cv.retinanet.losses.focal_loss import smooth_L1_loss
def test_loss_zero():
focal_loss = FocalLoss(1.0, 0.25, 2.0)
y_true = [[[1.0, 1, 0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.1, 0.2, 0.2]]]
y_pred = [[[1, 0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.1, 0.2, 0.2]]]
with tf.Session() as sess:
assert abs(sess.run(focal_loss.compute_loss(tf.constant(y_true),
tf.constant(y_pred)))[0]) < 1e-5
def test_loss_non_zero_loc():
focal_loss = FocalLoss(1.0, 0.25, 2.0)
y_true = [[[1.0, 0, 1, 0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.1, 0.2, 0.2]]]
y_pred = [[[0, 1, 0, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.1, 0.2, 0.2]]]
with tf.Session() as sess:
bce_loss = sess.run(bce_focal_loss(tf.constant(y_true)[:, :, 2:-12],
tf.constant(y_pred)[:, :, 1:-12], 0.25, 2.0))
loc_loss = sess.run(smooth_L1_loss(tf.constant(y_true)[:, :, -12:-8],
tf.constant(y_pred)[:, :, -12:-8]))
total_loss = sess.run(focal_loss.compute_loss(tf.constant(y_true), tf.constant(y_pred)))
assert abs(bce_loss[0]) < 1e-5
assert abs(total_loss[0] - loc_loss[0]) < 1e-5
assert abs(total_loss[0] - 0.00125) < 1e-5
def test_loss_non_zero():
focal_loss = FocalLoss(1.0, 0.25, 2.0)
y_true = [[[1.0, 0, 1, 0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.1, 0.2, 0.2]]]
y_pred = [[[0, 0.3, 0, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.1, 0.2, 0.2]]]
with tf.Session() as sess:
bce_loss = sess.run(bce_focal_loss(tf.constant(y_true)[:, :, 2:-12],
tf.constant(y_pred)[:, :, 1:-12], 0.25, 2.0))
loc_loss = sess.run(smooth_L1_loss(tf.constant(y_true)[:, :, -12:-8],
tf.constant(y_pred)[:, :, -12:-8]))
total_loss = sess.run(focal_loss.compute_loss(tf.constant(y_true), tf.constant(y_pred)))
assert abs(bce_loss[0] - 0.1474866) < 1e-5
assert abs(total_loss[0] - bce_loss[0] - loc_loss[0]) < 1e-5
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/losses/tests/test_losses.py |
"""RetinaNet entry point."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from maglev_sdk.docker_container.entrypoint import main
if __name__ == '__main__':
main('retinanet', 'nvidia_tao_tf1/cv/retinanet/scripts')
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/docker/retinanet.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/dataio/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TLT RetinaNet data sequence."""
import numpy as np
from nvidia_tao_tf1.cv.common.dataio.detection_data_sequence import DetectionDataSequence
from nvidia_tao_tf1.cv.ssd.builders.data_generator.data_augmentation_chain_original_ssd import \
SSDDataAugmentation
from nvidia_tao_tf1.cv.ssd.builders.data_generator.object_detection_2d_geometric_ops import Resize
class RetinaKittiDataSequence(DetectionDataSequence):
"""RetinaNet data loader for data with KITTI format."""
def __init__(self, dataset_config, *args, **kwargs):
"""Init function."""
super().__init__(dataset_config=dataset_config, *args, **kwargs)
self.output_height = self.augmentation_config.output_height
self.output_width = self.augmentation_config.output_width
# mapping class to 1-based integer
mapping_dict = dataset_config.target_class_mapping
classes = sorted({str(x).lower() for x in mapping_dict.values()})
self.classes = dict(
zip(classes, range(1, len(classes) + 1)))
self.class_mapping = {key.lower(): self.classes[str(val.lower())]
for key, val in mapping_dict.items()}
def set_encoder(self, encode_fn):
'''Set label encoder.'''
self.encode_fn = encode_fn
def _load_gt_label(self, label_path):
"""Load Kitti labels.
Returns:
[class_idx, is_difficult, x_min, y_min, x_max, y_max]
"""
entries = open(label_path, 'r').read().strip().split('\n')
results = []
for entry in entries:
items = entry.strip().split()
if len(items) < 9:
continue
items[0] = items[0].lower()
if items[0] not in self.class_mapping:
continue
label = [self.class_mapping[items[0]], 1 if int(
items[2]) != 0 else 0, *items[4:8]]
results.append([float(x) for x in label])
return np.array(results).reshape(-1, 6)
def _preprocessing(self, image, label, output_img_size):
'''
SSD-style data augmentation will be performed in training.
And in evaluation/inference phase, only resize will be performed;
'''
# initial SSD augmentation parameters:
if self.is_training:
augmentation_func = \
SSDDataAugmentation(img_height=self.augmentation_config.output_height,
img_width=self.augmentation_config.output_width,
rc_min=self.augmentation_config.random_crop_min_scale or 0.3,
rc_max=self.augmentation_config.random_crop_max_scale or 1.0,
rc_min_ar=self.augmentation_config.random_crop_min_ar or 0.5,
rc_max_ar=self.augmentation_config.random_crop_max_ar or 2.0,
zo_min=self.augmentation_config.zoom_out_min_scale or 1.0,
zo_max=self.augmentation_config.zoom_out_max_scale or 4.0,
b_delta=self.augmentation_config.brightness,
c_delta=self.augmentation_config.contrast,
s_delta=self.augmentation_config.saturation,
h_delta=self.augmentation_config.hue,
flip_prob=self.augmentation_config.random_flip,
background=(123.68, 116.779, 103.939))
else:
augmentation_func = Resize(height=self.augmentation_config.output_height,
width=self.augmentation_config.output_width)
if self.is_training:
bboxes = label[:, -4:]
cls_id = label[:, 0:1]
label = np.concatenate((cls_id, bboxes), axis=-1)
image, label = augmentation_func(image, label)
else:
bboxes = label[:, -4:]
cls_id = label[:, 0:1]
temp_label = np.concatenate((cls_id, bboxes), axis=-1)
image, temp_label = augmentation_func(image, temp_label)
# Finalize
label[:, -4:] = temp_label[:, -4:]
label = self._filter_invalid_labels(label)
if self.encode_fn is not None:
label = self.encode_fn(label)
return image, label
def _filter_invalid_labels(self, labels):
"""filter out invalid labels.
Arg:
labels: size (N, 5) or (N, 6), where bboxes is normalized to 0~1.
Returns:
labels: size (M, 5) or (N, 6), filtered bboxes with clipped boxes.
"""
# clip
# -4 -3 -2 -1
x_coords = labels[:, [-4, -2]]
x_coords = np.clip(x_coords, 0, self.output_width - 1)
labels[:, [-4, -2]] = x_coords
y_coords = labels[:, [-3, -1]]
y_coords = np.clip(y_coords, 0, self.output_height - 1)
labels[:, [-3, -1]] = y_coords
# exclude invalid boxes
x_cond = labels[:, -2] - labels[:, -4] > 1e-3
y_cond = labels[:, -1] - labels[:, -3] > 1e-3
return labels[x_cond & y_cond]
def _get_single_item(self, idx, output_img_size):
"""Load and process single image and its label."""
image = self._load_gt_image(self.image_paths[self.data_inds[idx]])
label = self._load_gt_label(self.label_paths[self.data_inds[idx]])
return self._preprocessing(image, label, output_img_size)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/dataio/kitti_loader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TLT detection data sequence."""
import numpy as np
from nvidia_tao_tf1.cv.common.dataio.coco_data_sequence import CocoDataSequence
from nvidia_tao_tf1.cv.ssd.builders.data_generator.data_augmentation_chain_original_ssd import \
SSDDataAugmentation
from nvidia_tao_tf1.cv.ssd.builders.data_generator.object_detection_2d_geometric_ops import Resize
class RetinaCocoDataSequence(CocoDataSequence):
"""RetinaNet data loader for data with COCO format."""
def __init__(self, dataset_config, *args, **kwargs):
"""Class initialization."""
super().__init__(dataset_config=dataset_config, *args, **kwargs)
def _preprocessing(self, image, label, enable_mask=False):
'''
SSD-style data augmentation will be performed in training.
And in evaluation/inference phase, only resize will be performed;
'''
# initialize augmentation
if self.is_training:
augmentation_func = \
SSDDataAugmentation(img_height=self.augmentation_config.output_height,
img_width=self.augmentation_config.output_width,
background=(123.68, 116.779, 103.939))
else:
augmentation_func = Resize(height=self.augmentation_config.output_height,
width=self.augmentation_config.output_width)
if self.is_training:
bboxes = label[:, -4:]
cls_id = label[:, 0:1]
label = np.concatenate((cls_id, bboxes), axis=-1)
image, label = augmentation_func(image, label)
else:
bboxes = label[:, -4:]
cls_id = label[:, 0:1]
temp_label = np.concatenate((cls_id, bboxes), axis=-1)
image, temp_label = augmentation_func(image, temp_label)
# Finalize
label[:, -4:] = temp_label[:, -4:]
label = self._filter_invalid_labels(label)
if self.encode_fn is not None:
label = self.encode_fn(label)
return image, label
def _filter_invalid_labels(self, labels):
"""filter out invalid labels.
Arg:
labels: size (N, 5) or (N, 6), where bboxes is normalized to 0~1.
Returns:
labels: size (M, 5) or (N, 6), filtered bboxes with clipped boxes.
"""
# clip
# -4 -3 -2 -1
x_coords = labels[:, [-4, -2]]
x_coords = np.clip(x_coords, 0, self.output_width - 1)
labels[:, [-4, -2]] = x_coords
y_coords = labels[:, [-3, -1]]
y_coords = np.clip(y_coords, 0, self.output_height - 1)
labels[:, [-3, -1]] = y_coords
# exclude invalid boxes
x_cond = labels[:, -2] - labels[:, -4] > 1e-3
y_cond = labels[:, -1] - labels[:, -3] > 1e-3
return labels[x_cond & y_cond]
def _batch_post_processing(self, images, labels):
"""Post processing for a batch."""
images = np.array(images)
# RGB -> BGR, channels_last -> channels_first
images = images[..., [2, 1, 0]].transpose(0, 3, 1, 2)
# subtract imagenet mean
images -= np.array([[[[103.939]], [[116.779]], [[123.68]]]])
# try to make labels a numpy array
is_make_array = True
x_shape = None
for x in labels:
if not isinstance(x, np.ndarray):
is_make_array = False
break
if x_shape is None:
x_shape = x.shape
elif x_shape != x.shape:
is_make_array = False
break
if is_make_array:
labels = np.array(labels)
return images, labels
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/dataio/coco_loader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test COCO dataloader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pytest
from nvidia_tao_tf1.cv.retinanet.dataio.coco_loader import RetinaCocoDataSequence
from nvidia_tao_tf1.cv.retinanet.utils.spec_loader import load_experiment_spec
@pytest.mark.skipif(
os.getenv("RUN_ON_CI", "0") == "1",
reason="Cannot be run on CI"
)
def test_coco_dataloader():
bs = 2
training = False
file_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
spec_path = os.path.join(file_path, '../../retinanet/experiment_specs/default_spec.txt')
experiment_spec = load_experiment_spec(spec_path, merge_from_default=False)
dataset_config = experiment_spec.dataset_config
aug_config = experiment_spec.augmentation_config
d = RetinaCocoDataSequence(dataset_config=dataset_config,
augmentation_config=aug_config,
batch_size=bs, is_training=training,
encode_fn=None)
img, _ = d.__getitem__(0)
assert img.shape == (2, 3, 512, 512)
assert len(d.classes) == 80
assert d.n_samples == 5000
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/dataio/tests/test_coco_dl.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/layers/__init__.py |
|
'''
A custom Keras layer to generate anchor boxes.
Copyright (C) 2019 Pierluigi Ferrari
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from __future__ import division
import keras.backend as K
from keras.engine.topology import InputSpec, Layer
import numpy as np
import tensorflow as tf
from nvidia_tao_tf1.cv.ssd.utils.box_utils import np_convert_coordinates as convert_coordinates
class RetinaAnchorBoxes(Layer):
'''
RetinaNet AnchorBoxes layer.
A Keras layer to create an output tensor containing anchor box coordinates
and variances based on the input tensor and the passed arguments.
A set of 2D anchor boxes of different aspect ratios is created for each spatial unit of
the input tensor. The number of anchor boxes created per unit depends on the arguments
`aspect_ratios` and `two_boxes_for_ar1`, in the default case it is 4. The boxes
are parameterized by the coordinate tuple `(xmin, xmax, ymin, ymax)`.
The purpose of having this layer in the network is to make the model self-sufficient
at inference time. Since the model is predicting offsets to the anchor boxes
(rather than predicting absolute box coordinates directly), one needs to know the anchor
box coordinates in order to construct the final prediction boxes from the predicted offsets.
If the model's output tensor did not contain the anchor box coordinates, the necessary
information to convert the predicted offsets back to absolute coordinates would be missing
in the model output. The reason why it is necessary to predict offsets to the anchor boxes
rather than to predict absolute box coordinates directly is explained in `README.md`.
Input shape:
4D tensor of shape `(batch, channels, height, width)` if `dim_ordering = 'th'`
or `(batch, height, width, channels)` if `dim_ordering = 'tf'`.
Output shape:
5D tensor of shape `(batch, height, width, n_boxes, 8)`. The last axis contains
the four anchor box coordinates and the four variance values for each box.
'''
def __init__(self,
img_height,
img_width,
this_scale,
next_scale,
aspect_ratios=None,
two_boxes_for_ar1=True,
this_steps=None,
this_offsets=None,
clip_boxes=False,
variances=None,
n_anchor_levels=3,
**kwargs):
'''
Init function.
All arguments need to be set to the same values as in the box encoding process,
otherwise the behavior is undefined.
Arguments:
img_height (int): The height of the input images.
img_width (int): The width of the input images.
this_scale (float): A float in [0, 1], the scaling factor for the size of
the generated anchor boxes as a fraction of the shorter side of the input image.
next_scale (float): A float in [0, 1], the next larger scaling factor. Only
relevant if self.two_boxes_for_ar1 == True`.
aspect_ratios (list, optional): The list of aspect ratios for which default
boxes are to be generated for this layer.
two_boxes_for_ar1 (bool, optional): Only relevant if `aspect_ratios` contains 1.
If `True`, two default boxes will be generated for aspect ratio 1. The first
will be generated using the scaling factor for the respective layer, the second
one will be generated using geometric mean of said scaling factor and next bigger
scaling factor.
clip_boxes (bool, optional): If `True`, clips the anchor box coordinates to stay within
image boundaries.
variances (list, optional): A list of 4 floats >0. The anchor box offset for
each coordinate will be divided by its respective variance value.
'''
if K.backend() != 'tensorflow':
raise TypeError("This layer only supports TF at the moment, but you are using the {}."
.format(K.backend()))
if (this_scale < 0) or (next_scale < 0) or (this_scale > 1):
raise ValueError("`this_scale` must be in [0, 1] and `next_scale` must be >0, \
but `this_scale` == {}, `next_scale` == {}"
.format(this_scale, next_scale))
if len(variances) != 4:
raise ValueError("4 variance values must be pased, but {} values were received."
.format(len(variances)))
variances = np.array(variances)
if np.any(variances <= 0):
raise ValueError("All variances must be >0, but the variances given are {}"
.format(variances))
self.img_height = img_height
self.img_width = img_width
self.this_scale = this_scale
self.next_scale = next_scale
self.aspect_ratios = aspect_ratios
self.two_boxes_for_ar1 = two_boxes_for_ar1
self.this_steps = this_steps
self.this_offsets = this_offsets
self.clip_boxes = clip_boxes
self.variances = variances
self.n_anchor_levels = n_anchor_levels
# Compute the number of boxes per cell
if (1 in aspect_ratios) and two_boxes_for_ar1:
self.n_boxes = n_anchor_levels * (len(aspect_ratios) + 1)
else:
self.n_boxes = n_anchor_levels * len(aspect_ratios)
self.anchor_sizes = np.power(2, np.linspace(0, 1, 1+n_anchor_levels))[:-1]
super(RetinaAnchorBoxes, self).__init__(**kwargs)
def build(self, input_shape):
"""Layer build function."""
self.input_spec = [InputSpec(shape=input_shape)]
size = min(self.img_height, self.img_width)
# Compute the box widths and and heights for all aspect ratios
wh_list = []
for scale_augment in self.anchor_sizes: # 2^0, 2^(1/3), 2^(2/3), as used in the paper
for ar in self.aspect_ratios:
if (ar == 1):
# Compute the regular anchor box for aspect ratio 1.
box_height = box_width = self.this_scale * size * scale_augment
wh_list.append((box_width, box_height))
if self.two_boxes_for_ar1:
# Compute one slightly larger version using the geometric mean.
box_height = box_width = np.sqrt(self.this_scale * self.next_scale) \
* size * scale_augment
wh_list.append((box_width, box_height))
else:
box_height = self.this_scale * size * scale_augment / np.sqrt(ar)
box_width = self.this_scale * size * scale_augment * np.sqrt(ar)
wh_list.append((box_width, box_height))
wh_list = np.array(wh_list)
_, _, feature_map_height, feature_map_width = input_shape
# Compute the grid of box center points. They are identical for all aspect ratios.
# Compute the step sizes
if (self.this_steps is None):
step_height = self.img_height / feature_map_height
step_width = self.img_width / feature_map_width
else:
if isinstance(self.this_steps, (list, tuple)) and (len(self.this_steps) == 2):
step_height = self.this_steps[0]
step_width = self.this_steps[1]
elif isinstance(self.this_steps, (int, float)):
step_height = self.this_steps
step_width = self.this_steps
# Compute the offsets.
if (self.this_offsets is None):
offset_height = 0.5
offset_width = 0.5
else:
if isinstance(self.this_offsets, (list, tuple)) and (len(self.this_offsets) == 2):
offset_height = self.this_offsets[0]
offset_width = self.this_offsets[1]
elif isinstance(self.this_offsets, (int, float)):
offset_height = self.this_offsets
offset_width = self.this_offsets
# Now that we have the offsets and step sizes, compute the grid of anchor box center points.
cy = np.linspace(offset_height * step_height,
(offset_height + feature_map_height - 1) * step_height, feature_map_height)
cx = np.linspace(offset_width * step_width,
(offset_width + feature_map_width - 1) * step_width, feature_map_width)
cx_grid, cy_grid = np.meshgrid(cx, cy)
cx_grid = np.expand_dims(cx_grid, -1) # This is necessary for np.tile()
cy_grid = np.expand_dims(cy_grid, -1) # This is necessary for np.tile()
# Create a 4D tensor template of shape `(feature_map_height, feature_map_width, n_boxes, 4)`
# where the last dimension will contain `(cx, cy, w, h)`
boxes_tensor = np.zeros((feature_map_height, feature_map_width, self.n_boxes, 4))
boxes_tensor[:, :, :, 0] = np.tile(cx_grid, (1, 1, self.n_boxes)) # Set cx
boxes_tensor[:, :, :, 1] = np.tile(cy_grid, (1, 1, self.n_boxes)) # Set cy
boxes_tensor[:, :, :, 2] = wh_list[:, 0] # Set w
boxes_tensor[:, :, :, 3] = wh_list[:, 1] # Set h
# Convert `(cx, cy, w, h)` to `(xmin, ymin, xmax, ymax)`
boxes_tensor = convert_coordinates(boxes_tensor, start_index=0,
conversion='centroids2corners')
# If `clip_boxes` is enabled, clip the coordinates to lie within the image boundaries
if self.clip_boxes:
x_coords = boxes_tensor[:, :, :, [0, 2]]
x_coords[x_coords >= self.img_width] = self.img_width - 1
x_coords[x_coords < 0] = 0
boxes_tensor[:, :, :, [0, 2]] = x_coords
y_coords = boxes_tensor[:, :, :, [1, 3]]
y_coords[y_coords >= self.img_height] = self.img_height - 1
y_coords[y_coords < 0] = 0
boxes_tensor[:, :, :, [1, 3]] = y_coords
boxes_tensor[:, :, :, [0, 2]] /= self.img_width
boxes_tensor[:, :, :, [1, 3]] /= self.img_height
# AnchorBox layer will output `(xmin,ymin,xmax,ymax)`. The ground truth is
# `(cx,cy,logw,logh)`. However, we don't need to further convert to centroids here since
# this layer will not carry any gradient backprob. The following command will do the
# convertion if we eventually want.
# boxes_tensor = convert_coordinates(boxes_tensor,
# start_index=0, conversion='corners2centroids')
# Create a tensor to contain the variances and append it to `boxes_tensor`.
# This tensor has the same shape as `boxes_tensor` and simply contains the same
# 4 variance values for every position in the last axis.
variances_tensor = np.zeros_like(boxes_tensor) # `(height, width, n_boxes, 4)`
variances_tensor += self.variances # Long live broadcasting
# Now `boxes_tensor` becomes a tensor of shape `(height, width, n_boxes, 8)`
boxes_tensor = np.concatenate((boxes_tensor, variances_tensor), axis=-1)
# Below to make tensor 4D.
# (feature_map, n_boxes, 8)
boxes_tensor = boxes_tensor.reshape((-1, self.n_boxes, 8))
# Now prepend one dimension to `boxes_tensor` to account for the batch size and tile it.
# The result will be a 5D tensor of shape `(batch_size, height, width, n_boxes, 8)`
boxes_tensor = np.expand_dims(boxes_tensor, axis=0)
self.boxes_tensor = K.constant(boxes_tensor, dtype='float32')
# (feature_map, n_boxes, 8)
super(RetinaAnchorBoxes, self).build(input_shape)
def call(self, x, mask=None):
'''
Return an anchor box tensor based on the shape of the input tensor.
Note that this tensor does not participate in any graph computations at runtime.
It is being created as a constant once during graph creation and is just being
output along with the rest of the model output during runtime. Because of this,
all logic is implemented as Numpy array operations and it is sufficient to convert
the resulting Numpy array into a Keras tensor at the very end before outputting it.
Arguments:
x (tensor): 4D tensor of shape `(batch, channels, height, width)` if
`dim_ordering = 'th'` or `(batch, height, width, channels)` if
`dim_ordering = 'tf'`. The input for this layer must be the output
of the localization predictor layer.
'''
# Compute box width and height for each aspect ratio
# The shorter side of the image will be used to compute `w` and `h`.
box_tensor_dup = tf.identity(self.boxes_tensor)
with tf.name_scope(None, 'FirstDimTile'):
x_dup = tf.identity(x)
boxes_tensor = K.tile(box_tensor_dup, (K.shape(x_dup)[0], 1, 1, 1))
return boxes_tensor
def compute_output_shape(self, input_shape):
'''Layer output shape function.'''
batch_size, _, feature_map_height, feature_map_width = input_shape
return (batch_size, feature_map_height*feature_map_width, self.n_boxes, 8)
def get_config(self):
'''Layer get_config function.'''
config = {
'img_height': self.img_height,
'img_width': self.img_width,
'this_scale': self.this_scale,
'next_scale': self.next_scale,
'aspect_ratios': list(self.aspect_ratios),
'two_boxes_for_ar1': self.two_boxes_for_ar1,
'clip_boxes': self.clip_boxes,
'variances': list(self.variances),
'n_anchor_levels': self.n_anchor_levels
}
base_config = super(RetinaAnchorBoxes, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/layers/anchor_box_layer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TF implementation of RetinaNet output decoder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.engine.topology import InputSpec, Layer
import tensorflow as tf
class DecodeDetections(Layer):
'''
A Keras layer to decode the raw RetinaNet prediction output.
Input shape:
3D tensor of shape `(batch_size, n_boxes, n_classes + 12)`.
Output shape:
3D tensor of shape `(batch_size, top_k, 6)`.
'''
def __init__(self,
confidence_thresh=0.01,
iou_threshold=0.45,
top_k=200,
nms_max_output_size=400,
img_height=None,
img_width=None,
**kwargs):
'''Init function.'''
if (img_height is None) or (img_width is None):
raise ValueError("If relative box coordinates are supposed to be converted to absolute \
coordinates, the decoder needs the image size in order to decode the predictions, but \
`img_height == {}` and `img_width == {}`".format(img_height, img_width))
# We need these members for the config.
self.confidence_thresh = confidence_thresh
self.iou_threshold = iou_threshold
self.top_k = top_k
self.img_height = img_height
self.img_width = img_width
self.nms_max_output_size = nms_max_output_size
super(DecodeDetections, self).__init__(**kwargs)
def build(self, input_shape):
'''Layer build function.'''
self.input_spec = [InputSpec(shape=input_shape)]
super(DecodeDetections, self).build(input_shape)
def call(self, y_pred, mask=None):
'''
Layer call function.
Input shape:
3D tensor of shape `(batch_size, n_boxes, n_classes + 12)`.
Returns:
3D tensor of shape `(batch_size, top_k, 6)`. The second axis is zero-padded
to always yield `top_k` predictions per batch item. The last axis contains
the coordinates for each predicted box in the format
`[class_id, confidence, xmin, ymin, xmax, ymax]`.
'''
# 1. calculate boxes location
scores = y_pred[..., 1:-12]
cx_pred = y_pred[..., -12]
cy_pred = y_pred[..., -11]
w_pred = y_pred[..., -10]
h_pred = y_pred[..., -9]
w_anchor = y_pred[..., -6] - y_pred[..., -8]
h_anchor = y_pred[..., -5] - y_pred[..., -7]
cx_anchor = tf.truediv(y_pred[..., -6] + y_pred[..., -8], 2.0)
cy_anchor = tf.truediv(y_pred[..., -5] + y_pred[..., -7], 2.0)
cx_variance = y_pred[..., -4]
cy_variance = y_pred[..., -3]
variance_w = y_pred[..., -2]
variance_h = y_pred[..., -1]
# Convert anchor box offsets to image offsets.
cx = cx_pred * cx_variance * w_anchor + cx_anchor
cy = cy_pred * cy_variance * h_anchor + cy_anchor
w = tf.exp(w_pred * variance_w) * w_anchor
h = tf.exp(h_pred * variance_h) * h_anchor
# Convert 'centroids' to 'corners'.
xmin = cx - 0.5 * w
ymin = cy - 0.5 * h
xmax = cx + 0.5 * w
ymax = cy + 0.5 * h
xmin = tf.expand_dims(xmin * self.img_width, axis=-1)
ymin = tf.expand_dims(ymin * self.img_height, axis=-1)
xmax = tf.expand_dims(xmax * self.img_width, axis=-1)
ymax = tf.expand_dims(ymax * self.img_height, axis=-1)
# [batch_size, num_boxes, 1, 4]
boxes = tf.stack(values=[xmin, ymin, xmax, ymax], axis=-1)
# 2. apply NMS
nmsed_box, nmsed_score, nmsed_class, _ = tf.image.combined_non_max_suppression(
boxes,
scores,
max_output_size_per_class=self.nms_max_output_size,
max_total_size=self.top_k,
iou_threshold=self.iou_threshold,
score_threshold=self.confidence_thresh,
pad_per_class=False,
clip_boxes=False,
name='batched_nms')
nmsed_class += 1
nmsed_score = tf.expand_dims(nmsed_score, axis=-1)
nmsed_class = tf.expand_dims(nmsed_class, axis=-1)
outputs = tf.concat([nmsed_class, nmsed_score, nmsed_box], axis=-1)
return outputs
def compute_output_shape(self, input_shape):
'''Keras layer compute_output_shape.'''
batch_size, _, _ = input_shape
return (batch_size, self.top_k, 6) # Last axis: (cls_ID, confidence, 4 box coordinates)
def get_config(self):
'''Keras layer get config.'''
config = {
'confidence_thresh': self.confidence_thresh,
'iou_threshold': self.iou_threshold,
'top_k': self.top_k,
'nms_max_output_size': self.nms_max_output_size,
'img_height': self.img_height,
'img_width': self.img_width,
}
base_config = super(DecodeDetections, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/layers/output_decoder_layer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test output decoder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Input
from keras.models import Model
import numpy as np
from nvidia_tao_tf1.cv.retinanet.layers.output_decoder_layer import DecodeDetections
def test_output_decoder_no_compression():
x = Input(shape=(2, 15))
y = DecodeDetections(top_k=2, nms_max_output_size=5, img_height=300, img_width=300)(x)
model = Model(inputs=x, outputs=y)
encoded_val = '''np.array(
[[[ 0. , 1. , 0. , -2.46207404,
-5.01084082, -21.38983255, -20.27411479, 0.25 ,
0.5 , 0.96124919, 0.96124919, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , -1.07137391,
-2.54451304, -3.64782921, -7.11356512, 0.25 ,
0.5 , 0.62225397, 1.24450793, 0.1 ,
0.1 , 0.2 , 0.2 ]]]
)'''
encoded_val = eval(encoded_val)[:, :, :]
expected = '''np.array(
[[[ 1. , 1. , 127.67308, 148.65036, 130.63277,
151.04959],
[ 1. , 0. , 0. , 0. , 0. ,
0. ]]])'''
expected = eval(expected)
pred = model.predict(encoded_val)
assert np.max(abs(pred - expected)) < 1e-5
def test_output_decoder_compression():
x = Input(shape=(10, 15))
y = DecodeDetections(top_k=5, nms_max_output_size=15, img_height=300, img_width=300)(x)
model = Model(inputs=x, outputs=y)
encoded_val = '''np.array(
[[
[ 0. , 1. , 0. , 4.36584869,
0.26784348, -1.88672624, -8.81819805, 0.05 ,
0.5 , 1.24450793, 0.62225397, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 0. , 1. , 0. , 3.56231825,
0.26784348, -1.88672624, -8.81819805, 0.15 ,
0.5 , 1.24450793, 0.62225397, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 0. , 0. , 1. , 2.75878782,
0.26784348, -1.88672624, -8.81819805, 0.25 ,
0.5 , 1.24450793, 0.62225397, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 1.95525739,
0.26784348, -1.88672624, -8.81819805, 0.35 ,
0.5 , 1.24450793, 0.62225397, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 1.15172695,
0.26784348, -1.88672624, -8.81819805, 0.45 ,
0.5 , 1.24450793, 0.62225397, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , 0.34819652,
0.26784348, -1.88672624, -8.81819805, 0.55 ,
0.5 , 1.24450793, 0.62225397, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , -0.45533391,
0.26784348, -1.88672624, -8.81819805, 0.65 ,
0.5 , 1.24450793, 0.62225397, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , -1.25886435,
0.26784348, -1.88672624, -8.81819805, 0.75 ,
0.5 , 1.24450793, 0.62225397, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , -2.06239478,
0.26784348, -1.88672624, -8.81819805, 0.85 ,
0.5 , 1.24450793, 0.62225397, 0.1 ,
0.1 , 0.2 , 0.2 ],
[ 1. , 0. , 0. , -2.86592521,
0.26784348, -1.88672624, -8.81819805, 0.95 ,
0.5 , 1.24450793, 0.62225397, 0.1 ,
0.1 , 0.2 , 0.2 ]]])'''
encoded_val = eval(encoded_val)[:, :, :]
expected = '''np.array(
[[[ 1., 1., 227.77, 166.17693, 473.4848, 172.46394],
[ 2., 1., 204.19827, 166.17693, 408.7723, 172.46394],
[ 1., 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0., 0.]]])'''
expected = eval(expected)
pred = model.predict(encoded_val)
assert np.max(abs(pred - expected)) < 1e-5
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/layers/tests/test_output_decoder_layer.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/retinanet/proto/retinanet_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/retinanet/proto/retinanet_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n8nvidia_tao_tf1/cv/retinanet/proto/retinanet_config.proto\"\xfa\x03\n\x0fRetinaNetConfig\x12\x15\n\raspect_ratios\x18\x01 \x01(\t\x12\x1c\n\x14\x61spect_ratios_global\x18\x02 \x01(\t\x12\x0e\n\x06scales\x18\x03 \x01(\t\x12\x11\n\tmin_scale\x18\x04 \x01(\x02\x12\x11\n\tmax_scale\x18\x05 \x01(\x02\x12\x19\n\x11two_boxes_for_ar1\x18\x06 \x01(\x08\x12\r\n\x05steps\x18\x07 \x01(\t\x12\x12\n\nclip_boxes\x18\x08 \x01(\x08\x12\x11\n\tvariances\x18\t \x01(\t\x12\x0f\n\x07offsets\x18\n \x01(\t\x12\x12\n\nmean_color\x18\x0b \x01(\t\x12\x0c\n\x04\x61rch\x18\x0c \x01(\t\x12\x17\n\x0floss_loc_weight\x18\r \x01(\x02\x12\x18\n\x10\x66ocal_loss_alpha\x18\x0e \x01(\x02\x12\x18\n\x10\x66ocal_loss_gamma\x18\x0f \x01(\x02\x12\x15\n\rfreeze_blocks\x18\x10 \x03(\x02\x12\x11\n\tfreeze_bn\x18\x11 \x01(\x08\x12\x0f\n\x07nlayers\x18\x12 \x01(\r\x12\x11\n\tn_kernels\x18\x13 \x01(\r\x12\x14\n\x0c\x66\x65\x61ture_size\x18\x14 \x01(\r\x12\x16\n\x0epos_iou_thresh\x18\x15 \x01(\x02\x12\x16\n\x0eneg_iou_thresh\x18\x16 \x01(\x02\x12\x17\n\x0fn_anchor_levels\x18\x17 \x01(\rb\x06proto3')
)
_RETINANETCONFIG = _descriptor.Descriptor(
name='RetinaNetConfig',
full_name='RetinaNetConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='aspect_ratios', full_name='RetinaNetConfig.aspect_ratios', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='aspect_ratios_global', full_name='RetinaNetConfig.aspect_ratios_global', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='scales', full_name='RetinaNetConfig.scales', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_scale', full_name='RetinaNetConfig.min_scale', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_scale', full_name='RetinaNetConfig.max_scale', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='two_boxes_for_ar1', full_name='RetinaNetConfig.two_boxes_for_ar1', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='steps', full_name='RetinaNetConfig.steps', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='clip_boxes', full_name='RetinaNetConfig.clip_boxes', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='variances', full_name='RetinaNetConfig.variances', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='offsets', full_name='RetinaNetConfig.offsets', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mean_color', full_name='RetinaNetConfig.mean_color', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='arch', full_name='RetinaNetConfig.arch', index=11,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='loss_loc_weight', full_name='RetinaNetConfig.loss_loc_weight', index=12,
number=13, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='focal_loss_alpha', full_name='RetinaNetConfig.focal_loss_alpha', index=13,
number=14, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='focal_loss_gamma', full_name='RetinaNetConfig.focal_loss_gamma', index=14,
number=15, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='freeze_blocks', full_name='RetinaNetConfig.freeze_blocks', index=15,
number=16, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='freeze_bn', full_name='RetinaNetConfig.freeze_bn', index=16,
number=17, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='nlayers', full_name='RetinaNetConfig.nlayers', index=17,
number=18, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='n_kernels', full_name='RetinaNetConfig.n_kernels', index=18,
number=19, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='feature_size', full_name='RetinaNetConfig.feature_size', index=19,
number=20, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pos_iou_thresh', full_name='RetinaNetConfig.pos_iou_thresh', index=20,
number=21, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='neg_iou_thresh', full_name='RetinaNetConfig.neg_iou_thresh', index=21,
number=22, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='n_anchor_levels', full_name='RetinaNetConfig.n_anchor_levels', index=22,
number=23, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=61,
serialized_end=567,
)
DESCRIPTOR.message_types_by_name['RetinaNetConfig'] = _RETINANETCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
RetinaNetConfig = _reflection.GeneratedProtocolMessageType('RetinaNetConfig', (_message.Message,), dict(
DESCRIPTOR = _RETINANETCONFIG,
__module__ = 'nvidia_tao_tf1.cv.retinanet.proto.retinanet_config_pb2'
# @@protoc_insertion_point(class_scope:RetinaNetConfig)
))
_sym_db.RegisterMessage(RetinaNetConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/proto/retinanet_config_pb2.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/proto/__init__.py |
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/retinanet/proto/experiment.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_tao_tf1.cv.ssd.proto import augmentation_config_pb2 as nvidia__tao__tf1_dot_cv_dot_ssd_dot_proto_dot_augmentation__config__pb2
from nvidia_tao_tf1.cv.common.proto import detection_sequence_dataset_config_pb2 as nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_detection__sequence__dataset__config__pb2
from nvidia_tao_tf1.cv.common.proto import training_config_pb2 as nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_training__config__pb2
from nvidia_tao_tf1.cv.common.proto import nms_config_pb2 as nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_nms__config__pb2
from nvidia_tao_tf1.cv.common.proto import class_weighting_config_pb2 as nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_class__weighting__config__pb2
from nvidia_tao_tf1.cv.retinanet.proto import eval_config_pb2 as nvidia__tao__tf1_dot_cv_dot_retinanet_dot_proto_dot_eval__config__pb2
from nvidia_tao_tf1.cv.retinanet.proto import retinanet_config_pb2 as nvidia__tao__tf1_dot_cv_dot_retinanet_dot_proto_dot_retinanet__config__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/retinanet/proto/experiment.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n2nvidia_tao_tf1/cv/retinanet/proto/experiment.proto\x1a\x35nvidia_tao_tf1/cv/ssd/proto/augmentation_config.proto\x1a\x46nvidia_tao_tf1/cv/common/proto/detection_sequence_dataset_config.proto\x1a\x34nvidia_tao_tf1/cv/common/proto/training_config.proto\x1a/nvidia_tao_tf1/cv/common/proto/nms_config.proto\x1a;nvidia_tao_tf1/cv/common/proto/class_weighting_config.proto\x1a\x33nvidia_tao_tf1/cv/retinanet/proto/eval_config.proto\x1a\x38nvidia_tao_tf1/cv/retinanet/proto/retinanet_config.proto\"\xca\x02\n\nExperiment\x12\x13\n\x0brandom_seed\x18\x01 \x01(\r\x12&\n\x0e\x64\x61taset_config\x18\x02 \x01(\x0b\x32\x0e.DatasetConfig\x12\x30\n\x13\x61ugmentation_config\x18\x03 \x01(\x0b\x32\x13.AugmentationConfig\x12(\n\x0ftraining_config\x18\x04 \x01(\x0b\x32\x0f.TrainingConfig\x12 \n\x0b\x65val_config\x18\x05 \x01(\x0b\x32\x0b.EvalConfig\x12\x1e\n\nnms_config\x18\x06 \x01(\x0b\x32\n.NMSConfig\x12*\n\x10retinanet_config\x18\x07 \x01(\x0b\x32\x10.RetinaNetConfig\x12\x35\n\x16\x63lass_weighting_config\x18\x08 \x01(\x0b\x32\x15.ClassWeightingConfigb\x06proto3')
,
dependencies=[nvidia__tao__tf1_dot_cv_dot_ssd_dot_proto_dot_augmentation__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_detection__sequence__dataset__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_training__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_nms__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_class__weighting__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_retinanet_dot_proto_dot_eval__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_retinanet_dot_proto_dot_retinanet__config__pb2.DESCRIPTOR,])
_EXPERIMENT = _descriptor.Descriptor(
name='Experiment',
full_name='Experiment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='random_seed', full_name='Experiment.random_seed', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dataset_config', full_name='Experiment.dataset_config', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='augmentation_config', full_name='Experiment.augmentation_config', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='training_config', full_name='Experiment.training_config', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='eval_config', full_name='Experiment.eval_config', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='nms_config', full_name='Experiment.nms_config', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='retinanet_config', full_name='Experiment.retinanet_config', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='class_weighting_config', full_name='Experiment.class_weighting_config', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=457,
serialized_end=787,
)
_EXPERIMENT.fields_by_name['dataset_config'].message_type = nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_detection__sequence__dataset__config__pb2._DATASETCONFIG
_EXPERIMENT.fields_by_name['augmentation_config'].message_type = nvidia__tao__tf1_dot_cv_dot_ssd_dot_proto_dot_augmentation__config__pb2._AUGMENTATIONCONFIG
_EXPERIMENT.fields_by_name['training_config'].message_type = nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_training__config__pb2._TRAININGCONFIG
_EXPERIMENT.fields_by_name['eval_config'].message_type = nvidia__tao__tf1_dot_cv_dot_retinanet_dot_proto_dot_eval__config__pb2._EVALCONFIG
_EXPERIMENT.fields_by_name['nms_config'].message_type = nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_nms__config__pb2._NMSCONFIG
_EXPERIMENT.fields_by_name['retinanet_config'].message_type = nvidia__tao__tf1_dot_cv_dot_retinanet_dot_proto_dot_retinanet__config__pb2._RETINANETCONFIG
_EXPERIMENT.fields_by_name['class_weighting_config'].message_type = nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_class__weighting__config__pb2._CLASSWEIGHTINGCONFIG
DESCRIPTOR.message_types_by_name['Experiment'] = _EXPERIMENT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Experiment = _reflection.GeneratedProtocolMessageType('Experiment', (_message.Message,), dict(
DESCRIPTOR = _EXPERIMENT,
__module__ = 'nvidia_tao_tf1.cv.retinanet.proto.experiment_pb2'
# @@protoc_insertion_point(class_scope:Experiment)
))
_sym_db.RegisterMessage(Experiment)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/proto/experiment_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/retinanet/proto/eval_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/retinanet/proto/eval_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n3nvidia_tao_tf1/cv/retinanet/proto/eval_config.proto\"\xc6\x01\n\nEvalConfig\x12)\n!validation_period_during_training\x18\x01 \x01(\r\x12\x33\n\x16\x61verage_precision_mode\x18\x02 \x01(\x0e\x32\x13.EvalConfig.AP_MODE\x12\x12\n\nbatch_size\x18\x03 \x01(\r\x12\x1e\n\x16matching_iou_threshold\x18\x04 \x01(\x02\"$\n\x07\x41P_MODE\x12\n\n\x06SAMPLE\x10\x00\x12\r\n\tINTEGRATE\x10\x01\x62\x06proto3')
)
_EVALCONFIG_AP_MODE = _descriptor.EnumDescriptor(
name='AP_MODE',
full_name='EvalConfig.AP_MODE',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='SAMPLE', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INTEGRATE', index=1, number=1,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=218,
serialized_end=254,
)
_sym_db.RegisterEnumDescriptor(_EVALCONFIG_AP_MODE)
_EVALCONFIG = _descriptor.Descriptor(
name='EvalConfig',
full_name='EvalConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='validation_period_during_training', full_name='EvalConfig.validation_period_during_training', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='average_precision_mode', full_name='EvalConfig.average_precision_mode', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batch_size', full_name='EvalConfig.batch_size', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='matching_iou_threshold', full_name='EvalConfig.matching_iou_threshold', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_EVALCONFIG_AP_MODE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=56,
serialized_end=254,
)
_EVALCONFIG.fields_by_name['average_precision_mode'].enum_type = _EVALCONFIG_AP_MODE
_EVALCONFIG_AP_MODE.containing_type = _EVALCONFIG
DESCRIPTOR.message_types_by_name['EvalConfig'] = _EVALCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
EvalConfig = _reflection.GeneratedProtocolMessageType('EvalConfig', (_message.Message,), dict(
DESCRIPTOR = _EVALCONFIG,
__module__ = 'nvidia_tao_tf1.cv.retinanet.proto.eval_config_pb2'
# @@protoc_insertion_point(class_scope:EvalConfig)
))
_sym_db.RegisterMessage(EvalConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/proto/eval_config_pb2.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''build model for training or inference.'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import keras.backend as K
from nvidia_tao_tf1.cv.retinanet.architecture.retinanet import retinanet
from nvidia_tao_tf1.cv.retinanet.utils.helper import eval_str
def build(experiment_spec,
n_classes,
kernel_regularizer=None,
input_tensor=None):
'''
Build a model for training with or without training tensors.
For inference, this function can be used to build a base model, which can be passed into
eval_builder to attach a decode layer.
'''
img_channels = experiment_spec.augmentation_config.output_channel
img_height = experiment_spec.augmentation_config.output_height
img_width = experiment_spec.augmentation_config.output_width
scales = eval_str(experiment_spec.retinanet_config.scales)
aspect_ratios_global = eval_str(experiment_spec.retinanet_config.aspect_ratios_global)
aspect_ratios_per_layer = eval_str(experiment_spec.retinanet_config.aspect_ratios)
steps = eval_str(experiment_spec.retinanet_config.steps)
offsets = eval_str(experiment_spec.retinanet_config.offsets)
variances = eval_str(experiment_spec.retinanet_config.variances)
freeze_blocks = eval_str(experiment_spec.retinanet_config.freeze_blocks)
freeze_bn = eval_str(experiment_spec.retinanet_config.freeze_bn)
nlayers = experiment_spec.retinanet_config.nlayers
arch = experiment_spec.retinanet_config.arch
n_anchor_levels = experiment_spec.retinanet_config.n_anchor_levels or 3
# Config FPN
n_kernels = experiment_spec.retinanet_config.n_kernels
feature_size = experiment_spec.retinanet_config.feature_size
# Enable QAT
use_qat = experiment_spec.training_config.enable_qat
# set learning to be 1 for generating train graph
original_learning_phase = K.learning_phase()
K.set_learning_phase(1)
model_train = retinanet(image_size=(img_channels, img_height, img_width),
n_classes=n_classes,
kernel_regularizer=kernel_regularizer,
freeze_blocks=freeze_blocks,
freeze_bn=freeze_bn,
scales=scales,
min_scale=experiment_spec.retinanet_config.min_scale,
max_scale=experiment_spec.retinanet_config.max_scale,
aspect_ratios_global=aspect_ratios_global,
aspect_ratios_per_layer=aspect_ratios_per_layer,
two_boxes_for_ar1=experiment_spec.retinanet_config.two_boxes_for_ar1,
steps=steps,
n_anchor_levels=n_anchor_levels,
offsets=offsets,
clip_boxes=experiment_spec.retinanet_config.clip_boxes,
variances=variances,
nlayers=nlayers,
arch=arch,
n_kernels=n_kernels,
feature_size=feature_size,
input_tensor=input_tensor,
qat=use_qat)
# Set learning to be 0 for generating eval graph
K.set_learning_phase(0)
model_eval = retinanet(image_size=(img_channels, img_height, img_width),
n_classes=n_classes,
kernel_regularizer=kernel_regularizer,
freeze_blocks=freeze_blocks,
freeze_bn=freeze_bn,
scales=scales,
min_scale=experiment_spec.retinanet_config.min_scale,
max_scale=experiment_spec.retinanet_config.max_scale,
aspect_ratios_global=aspect_ratios_global,
aspect_ratios_per_layer=aspect_ratios_per_layer,
two_boxes_for_ar1=experiment_spec.retinanet_config.two_boxes_for_ar1,
steps=steps,
n_anchor_levels=n_anchor_levels,
offsets=offsets,
clip_boxes=experiment_spec.retinanet_config.clip_boxes,
variances=variances,
nlayers=nlayers,
arch=arch,
n_kernels=n_kernels,
feature_size=feature_size,
input_tensor=None,
qat=use_qat)
K.set_learning_phase(original_learning_phase)
return model_train, model_eval
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/builders/model_builder.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/builders/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''build input dataset for training or evaluation.'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nvidia_tao_tf1.cv.retinanet.dataio.coco_loader import RetinaCocoDataSequence
from nvidia_tao_tf1.cv.retinanet.dataio.kitti_loader import RetinaKittiDataSequence
from nvidia_tao_tf1.cv.ssd.builders.dalipipeline_builder import SSDDALIDataset
def build(experiment_spec,
training=True,
root_path=None,
device_id=0,
shard_id=0,
num_shards=1,
use_dali=False):
'''
Build a model for training with or without training tensors.
For inference, this function can be used to build a base model, which can be passed into
eval_builder to attach a decode layer.
'''
supported_data_loader = {'kitti': RetinaKittiDataSequence,
'coco': RetinaCocoDataSequence}
# train/val batch size
train_bs = experiment_spec.training_config.batch_size_per_gpu
val_bs = experiment_spec.eval_config.batch_size
dl_type = experiment_spec.dataset_config.type or 'kitti'
assert dl_type in list(supported_data_loader.keys()), \
"dataloader type is invalid. Only coco and kitti are supported."
if use_dali:
dataset = SSDDALIDataset(experiment_spec=experiment_spec,
device_id=device_id,
shard_id=shard_id,
num_shards=num_shards)
else:
dataset = supported_data_loader[dl_type](
dataset_config=experiment_spec.dataset_config,
augmentation_config=experiment_spec.augmentation_config,
batch_size=train_bs if training else val_bs,
is_training=training,
encode_fn=None,
root_path=root_path)
return dataset
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/builders/input_builder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''build model for evaluation.'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Input
from keras.models import Model
from nvidia_tao_tf1.cv.retinanet.layers.output_decoder_layer import DecodeDetections
def build(training_model,
confidence_thresh=0.05,
iou_threshold=0.5,
top_k=200,
nms_max_output_size=1000,
include_encoded_pred=False):
'''build model for evaluation.'''
im_channel, im_height, im_width = training_model.layers[0].input_shape[1:]
decoded_predictions = DecodeDetections(confidence_thresh=confidence_thresh,
iou_threshold=iou_threshold,
top_k=top_k,
nms_max_output_size=nms_max_output_size,
img_height=im_height,
img_width=im_width,
name='decoded_predictions')
if include_encoded_pred:
model_output = [training_model.layers[-1].output,
decoded_predictions(training_model.layers[-1].output)]
else:
model_output = decoded_predictions(training_model.layers[-1].output)
eval_model = Model(inputs=training_model.layers[1].input,
outputs=model_output)
new_input = Input(shape=(im_channel, im_height, im_width))
eval_model = Model(inputs=new_input, outputs=eval_model(new_input))
return eval_model
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/builders/eval_builder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IVA RetinaNet data loader builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import nvidia_tao_tf1.core
from nvidia_tao_tf1.blocks.multi_source_loader.types.bbox_2d_label import Bbox2DLabel
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.build_dataloader import build_dataloader
from nvidia_tao_tf1.cv.ssd.utils.tensor_utils import get_non_empty_rows_2d_sparse
class DataGenerator:
"""
Data loader class.
DataGenerator can be used in two ways:
1. build groundtruth image and label TF tensors. Those two tensors can be
directly used for training.
2. build a generator that yields image and label numpy arrays. In this case,
a TF session needs to be passed into the class initializer.
"""
def __init__(self,
experiment_spec,
label_encoder=None,
training=True,
sess=None):
"""
Data loader init function.
Arguments:
experiment_spec: The loaded config pb2.
label_encoder (function, optional): If passed in, groundtruth label will be encoded.
training (bool): Return training set or validation set.
sess (TF Session): Required if generator() function needs to be called. Otherwise, just
pass None.
"""
dataset_proto = experiment_spec.dataset_config
dataloader = build_dataloader(
dataset_proto=dataset_proto,
augmentation_proto=experiment_spec.augmentation_config)
if training:
batch_size = experiment_spec.training_config.batch_size_per_gpu
else:
batch_size = experiment_spec.eval_config.batch_size
self.images, self.ground_truth_labels, self.num_samples = \
dataloader.get_dataset_tensors(batch_size, training=training,
enable_augmentation=training)
if self.num_samples == 0:
return
cls_mapping_dict = experiment_spec.dataset_config.target_class_mapping
self.classes = sorted({str(x) for x in cls_mapping_dict.values()})
cls_map = nvidia_tao_tf1.core.processors.LookupTable(
keys=self.classes,
values=list(range(len(self.classes))),
default_value=-1)
cls_map.build()
self.H, self.W = self.images.get_shape().as_list()[2:]
self.label_encoder = label_encoder
# preprocess input.
self.images *= 255.0
num_channels = experiment_spec.augmentation_config.preprocessing.output_image_channel
if num_channels == 3:
perm = tf.constant([2, 1, 0])
self.images = tf.gather(self.images, perm, axis=1)
self.images -= tf.constant([[[[103.939]], [[116.779]], [[123.68]]]])
elif num_channels == 1:
self.images -= 117.3786
else:
raise NotImplementedError(
"Invalid number of input channels {} requested.".format(num_channels)
)
gt_labels = []
if isinstance(self.ground_truth_labels, list):
for l in self.ground_truth_labels:
obj_id = cls_map(l['target/object_class'])
x1 = tf.clip_by_value(tf.cast(tf.round(l['target/coordinates_x1']), tf.int32), 0,
self.W - 1)
x2 = tf.clip_by_value(tf.cast(tf.round(l['target/coordinates_x2']), tf.int32), 0,
self.W - 1)
y1 = tf.clip_by_value(tf.cast(tf.round(l['target/coordinates_y1']), tf.int32), 0,
self.H - 1)
y2 = tf.clip_by_value(tf.cast(tf.round(l['target/coordinates_y2']), tf.int32), 0,
self.H - 1)
# only select valid labels
select = tf.logical_and(tf.not_equal(obj_id, -1),
tf.logical_and(tf.less(x1, x2), tf.less(y1, y2)))
label = tf.stack([obj_id, x1, y1, x2, y2], axis=1)
gt_labels.append(tf.boolean_mask(label, select))
elif isinstance(self.ground_truth_labels, Bbox2DLabel):
source_classes = self.ground_truth_labels.object_class
mapped_classes = tf.SparseTensor(
values=cls_map(source_classes.values),
indices=source_classes.indices,
dense_shape=source_classes.dense_shape)
mapped_labels = self.ground_truth_labels._replace(object_class=mapped_classes)
valid_indices = tf.not_equal(mapped_classes.values, -1)
filtered_labels = mapped_labels.filter(valid_indices)
filtered_obj_ids = tf.sparse.reshape(filtered_labels.object_class, [batch_size, -1, 1])
filtered_coords = tf.sparse.reshape(filtered_labels.vertices.coordinates,
[batch_size, -1, 4])
filtered_coords = tf.sparse.SparseTensor(
values=tf.cast(tf.round(filtered_coords.values), tf.int32),
indices=filtered_coords.indices,
dense_shape=filtered_coords.dense_shape)
labels_all = tf.sparse.concat(axis=-1, sp_inputs=[filtered_obj_ids, filtered_coords])
labels_split = tf.sparse.split(sp_input=labels_all, num_split=batch_size, axis=0)
labels_split = [tf.sparse.reshape(x, [-1, 5]) for x in labels_split]
labels = [tf.sparse.to_dense(get_non_empty_rows_2d_sparse(x)) for x in labels_split]
for l in labels:
obj_id = l[:, 0]
x1 = tf.clip_by_value(l[:, 1], 0, self.W - 1)
x2 = tf.clip_by_value(l[:, 3], 0, self.W - 1)
y1 = tf.clip_by_value(l[:, 2], 0, self.H - 1)
y2 = tf.clip_by_value(l[:, 4], 0, self.H - 1)
# only select valid labels
select = tf.logical_and(tf.not_equal(obj_id, -1),
tf.logical_and(tf.less(x1, x2), tf.less(y1, y2)))
label = tf.stack([obj_id, x1, y1, x2, y2], axis=1)
gt_labels.append(tf.boolean_mask(label, select))
else:
raise TypeError('Input must be either list or Bbox2DLabel instance')
self.gt_labels = gt_labels
self.ground_truth_labels = gt_labels
if self.label_encoder is not None:
self.ground_truth_labels = self.label_encoder(gt_labels)
self.sess = sess
def set_encoder(self, label_encoder):
"""Set a new label encoder for output labels."""
self.ground_truth_labels = label_encoder(self.gt_labels)
def generator(self):
"""Yields img and label numpy arrays."""
if self.sess is None:
raise ValueError('TF session can not be found. Pass a session to the initializer!')
while True:
img, label = self.sess.run([self.images, self.ground_truth_labels])
yield img, label
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/builders/data_generator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test model builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import keras.backend as K
from nvidia_tao_tf1.cv.retinanet.builders import model_builder
from nvidia_tao_tf1.cv.retinanet.utils.spec_loader import load_experiment_spec
def test_model_builder():
K.set_learning_phase(0)
experiment_spec = load_experiment_spec(merge_from_default=True)
cls_mapping = experiment_spec.dataset_config.target_class_mapping
classes = sorted({str(x) for x in cls_mapping.values()})
model_train, _ = model_builder.build(experiment_spec, len(classes) + 1, input_tensor=None)
assert model_train.get_layer('retinanet_predictions').output_shape[-2:] == (49104, 16)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/builders/tests/test_model_builder.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/callbacks/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unified eval and mAP callback."""
import sys
from keras import backend as K
from keras.utils.data_utils import OrderedEnqueuer
import numpy as np
from tqdm import trange
from nvidia_tao_tf1.cv.common.callbacks.detection_metric_callback import DetectionMetricCallback
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
class RetinaMetricCallback(DetectionMetricCallback):
'''
Callback function to calculate model mAP / validation loss per k epoch.
Args:
ap_evaluator: object of class APEvaluator.
built_eval_model: eval model built with additional layers for encoded output AND bbox
output (model requires two outputs!!!)
eval_sequence: Eval data sequence (based on keras sequence) that gives images, labels.
labels is list (batch_size) of tuples (encoded_label, raw_label)
loss_ops: three element tuple or list. [gt_placeholder, pred_placeholder, loss]
eval_model: the training graph part of built_eval_model. Note, this model must share
TF nodes with built_eval_model
metric_interval: calculate model mAP per k epoch
verbose: True if you want print ap message.
'''
def __init__(self, ap_evaluator, built_eval_model, eval_sequence, loss_ops, *args, **kwargs):
"""Init function."""
super().__init__(ap_evaluator=ap_evaluator,
built_eval_model=built_eval_model,
eval_sequence=eval_sequence,
loss_ops=loss_ops,
*args, **kwargs)
self.ap_evaluator = ap_evaluator
self.built_eval_model = built_eval_model
self.classes = eval_sequence.classes
self.class_mapping = {v : k for k, v in self.classes.items()}
self.enqueuer = OrderedEnqueuer(eval_sequence, use_multiprocessing=False)
self.n_batches = len(eval_sequence)
self.loss_ops = loss_ops
self.output_height = eval_sequence.output_height
self.output_width = eval_sequence.output_width
def _skip_metric(self, logs):
for i in self.classes:
logs['AP_' + i] = np.float64(np.nan)
logs['mAP'] = np.float64(np.nan)
logs['validation_loss'] = np.float64(np.nan)
def _calc_metric(self, logs):
total_loss = 0.0
gt_labels = []
pred_labels = []
if self.verbose:
tr = trange(self.n_batches, file=sys.stdout)
tr.set_description('Producing predictions')
else:
tr = range(self.n_batches)
self.enqueuer.start(workers=1, max_queue_size=10)
output_generator = self.enqueuer.get()
# Loop over all batches.
for _ in tr:
# Generate batch.
batch_X, batch_labs = next(output_generator)
encoded_lab, gt_lab = zip(*batch_labs)
# Predict.
y_pred_encoded, y_pred = self.built_eval_model.predict(batch_X)
batch_loss = K.get_session().run(self.loss_ops[2],
feed_dict={self.loss_ops[0]: np.array(encoded_lab),
self.loss_ops[1]: y_pred_encoded})
total_loss += np.sum(batch_loss) * len(gt_lab)
gt_labels.extend(gt_lab)
for i in range(len(y_pred)):
y_pred_valid = y_pred[i][y_pred[i][:, 1] > self.ap_evaluator.conf_thres]
y_pred_valid[..., 2] = np.clip(y_pred_valid[..., 2].round(), 0.0,
self.output_width)
y_pred_valid[..., 3] = np.clip(y_pred_valid[..., 3].round(), 0.0,
self.output_height)
y_pred_valid[..., 4] = np.clip(y_pred_valid[..., 4].round(), 0.0,
self.output_width)
y_pred_valid[..., 5] = np.clip(y_pred_valid[..., 5].round(), 0.0,
self.output_height)
pred_labels.append(y_pred_valid)
self.enqueuer.stop()
logs['validation_loss'] = total_loss / len(gt_labels)
m_ap, ap = self.ap_evaluator(gt_labels, pred_labels, verbose=self.verbose)
m_ap = np.mean(ap[1:])
if self.verbose:
print("*******************************")
for i in range(len(self.classes)):
logs['AP_' + self.class_mapping[i+1]] = np.float64(ap[i+1])
if self.verbose:
print("{:<14}{:<6}{}".format(
self.class_mapping[i+1], 'AP', round(ap[i+1], 5)))
if self.verbose:
print("{:<14}{:<6}{}".format('', 'mAP', round(m_ap, 5)))
print("*******************************")
print("Validation loss:", logs['validation_loss'])
logs['mAP'] = m_ap
graphical_data = {
"validation loss": round(logs['validation_loss'], 8),
"mean average precision": round(logs['mAP'], 5)
}
s_logger = status_logging.get_status_logger()
if isinstance(s_logger, status_logging.StatusLogger):
s_logger.graphical = graphical_data
s_logger.write(
status_level=status_logging.Status.RUNNING,
message="Evaluation metrics generated."
)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/callbacks/retinanet_metric_callback.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of callbacks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.callbacks import Callback
from nvidia_tao_tf1.cv.retinanet.utils.model_io import save_model
class KerasModelSaver(Callback):
"""Save the encrypted model after every epoch.
Attributes:
filepath: formated string for saving models
ENC_KEY: API key to encrypt the model.
"""
def __init__(self, filepath, key, save_model, verbose=1):
"""Initialization with encryption key."""
self.filepath = filepath
self._ENC_KEY = key
self.verbose = verbose
self.save_model = save_model
def on_epoch_end(self, epoch, logs=None):
"""Called at the end of an epoch."""
self.save_model.set_weights(self.model.get_weights())
fname = self.filepath.format(epoch=epoch + 1)
fname = save_model(self.save_model, fname, str.encode(self._ENC_KEY), '.hdf5')
if self.verbose > 0:
print('\nEpoch %05d: saving model to %s' % (epoch + 1, fname))
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/callbacks/enc_model_saver.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/box_coder/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
RetinaNet label encoder.
Code partially from GitHub (Apache v2 license):
https://github.com/pierluigiferrari/ssd_keras/tree/3ac9adaf3889f1020d74b0eeefea281d5e82f353
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from nvidia_tao_tf1.cv.ssd.box_coder.bounding_box_utils import convert_coordinates, iou
from nvidia_tao_tf1.cv.ssd.box_coder.matching_utils import match_bipartite_greedy, match_multi
class InputEncoder:
'''
Encoder class.
Transforms ground truth labels for object detection in images
(2D bounding box coordinates and class labels) to the format required for
training an SSD or RetinaNet model.
In the process of encoding the ground truth labels, a template of anchor boxes
is being built, which are subsequently matched to the ground truth boxes
via an intersection-over-union threshold criterion.
'''
def __init__(self,
img_height,
img_width,
n_classes,
predictor_sizes,
min_scale=0.1,
max_scale=0.9,
scales=None,
aspect_ratios_global=None,
aspect_ratios_per_layer=None,
two_boxes_for_ar1=True,
steps=None,
n_anchor_levels=3,
offsets=None,
clip_boxes=False,
variances=None,
matching_type='multi',
pos_iou_threshold=0.5,
neg_iou_limit=0.4,
border_pixels='half',
coords='centroids',
normalize_coords=True,
background_id=0,
class_weights=None):
'''
Init encoder.
Arguments:
img_height (int): The height of the input images.
img_width (int): The width of the input images.
n_classes (int): The number of positive classes, e.g. 20 for Pascal VOC, 80 for MS COCO.
predictor_sizes (list): A list of int-tuples of the format `(height, width)`
containing the output heights and widths of the convolutional predictor layers.
min_scale (float, optional): The smallest scaling factor for the size of the anchor
boxes as a fraction of the shorter side of the input images. Note that you should
set the scaling factors such that the resulting anchor box sizes correspond to the
sizes of the objects you are trying to detect. Must be >0.
max_scale (float, optional): The largest scaling factor for the size of the anchor boxes
as a fraction of the shorter side of the input images. All scaling factors between
the smallest and the largest will be linearly interpolated. Note that the second to
last of the linearly interpolated scaling factors will actually be the scaling
factor for the last predictor layer, while the last scaling factor is used for the
second box for aspect ratio 1 in the last predictor layer if `two_boxes_for_ar1` is
`True`. Note that you should set the scaling factors such that the resulting anchor
box sizes correspond to the sizes of the objects you are trying to detect. Must be
greater than or equal to `min_scale`.
scales (list, optional): A list of floats >0 containing scaling factors per
convolutional predictor layer. This list must be one element longer than the number
of predictor layers. The first `k` elements are the scaling factors for the `k`
predictor layers, while the last element is used for the second box for aspect ratio
1 in the last predictor layer if `two_boxes_for_ar1` is `True`. This additional last
scaling factor must be passed either way, even if it is not being used. If a list is
passed, this argument overrides `min_scale` and `max_scale`. All scaling factors
must be greater than zero. Note that you should set the scaling factors such that
the resulting anchor box sizes correspond to the sizes of the objects you are trying
to detect.
aspect_ratios_global (list, optional): The list of aspect ratios for which anchor boxes
are to be generated. This list is valid for all prediction layers. Note that you
should set the aspect ratios such that the resulting anchor box shapes roughly
correspond to the shapes of the objects you are trying to detect.
aspect_ratios_per_layer (list, optional): A list containing one aspect ratio list for
each prediction layer. If a list is passed, it overrides `aspect_ratios_global`.
Note that you should set the aspect ratios such that the resulting anchor box shapes
very roughly correspond to the shapes of the objects you are trying to detect.
two_boxes_for_ar1 (bool, optional): Only relevant for aspect ratios lists that contain
1. Will be ignored otherwise. If `True`, two anchor boxes will be generated for
aspect ratio 1. The first will be generated using the scaling factor for the
respective layer, the second one will be generated using geometric mean of said
scaling factor and next bigger scaling factor.
steps (list, optional): `None` or a list with as many elements as there are predictor
layers. The elements can be either ints/floats or tuples of two ints/floats. These
numbers represent for each predictor layer how many pixels apart the anchor box
center points should be vertically and horizontally along the spatial grid over the
image. If the list contains ints/floats, then that value will be used for both
spatial dimensions. If the list contains tuples of two ints/floats, then they
represent `(step_height, step_width)`. If no steps are provided, then they will be
computed such that the anchor box center points will form an equidistant grid within
the image dimensions.
offsets (list, optional): `None` or a list with as many elements as there are predictor
layers. The elements can be either floats or tuples of two floats. These numbers
represent for each predictor layer how many pixels from the top and left boarders of
the image the top-most and left-most anchor box center points should be as a
fraction of `steps`. The last bit is important: The offsets are not absolute pixel
values, but fractions of the step size specified in the `steps` argument. If the
list contains floats, then that value will be used for both spatial dimensions. If
the list contains tuples of two floats, then they represent
`(vertical_offset, horizontal_offset)`. If no offsets are provided, then they will
default to 0.5 of the step size.
clip_boxes (bool, optional): If `True`, limits the anchor box coordinates to stay within
image boundaries.
variances (list, optional): A list of 4 floats >0. The anchor box offset for each
coordinate will be divided by its respective variance value.
pos_iou_threshold (float, optional): The intersection-over-union similarity threshold
that must be met in order to match a given ground truth box to a given anchor box.
neg_iou_limit (float, optional): The maximum allowed intersection-over-union similarity
of an anchor box with any ground truth box to be labeled a negative
(i.e. background) box. If an anchor box is neither a positive, nor a negative box,
it will be ignored during training.
normalize_coords (bool, optional): If `True`, the encoder uses relative instead of
absolute coordinates. This means instead of using absolute tartget coordinates, the
encoder will scale all coordinates to be within [0,1]. This way learning becomes
independent of the input image size.
'''
predictor_sizes = np.array(predictor_sizes)
if predictor_sizes.ndim == 1:
predictor_sizes = np.expand_dims(predictor_sizes, axis=0)
##################################################################################
# Handle exceptions.
##################################################################################
if (min_scale is None or max_scale is None) and scales is None:
raise ValueError("Either `min_scale` and `max_scale` or `scales` need to be specified.")
if scales:
if (len(scales) != predictor_sizes.shape[0] + 1):
# Must be two nested `if` statements since `list` and `bool` can't be combined by &
raise ValueError("It must be either scales is None or len(scales) == \
len(predictor_sizes)+1, but len(scales) == {} and len(predictor_sizes)+1 == {}"
.format(len(scales), len(predictor_sizes)+1))
scales = np.array(scales)
if np.any(scales <= 0):
raise ValueError("All values in `scales` must be greater than 0, but the passed \
list of scales is {}".format(scales))
else:
# If no scales passed, we make sure that `min_scale` and `max_scale` are valid values.
if not 0 < min_scale <= max_scale:
raise ValueError("It must be 0 < min_scale <= max_scale, but it is min_scale = {} \
and max_scale = {}".format(min_scale, max_scale))
if not (aspect_ratios_per_layer is None):
if (len(aspect_ratios_per_layer) != predictor_sizes.shape[0]):
# Must be two nested `if` statements since `list` and `bool` can't be combined by &
raise ValueError("It must be either aspect_ratios_per_layer is None or \
len(aspect_ratios_per_layer) == len(predictor_sizes), but len(aspect_ratios_per_layer) == {} \
and len(predictor_sizes) == {}".format(len(aspect_ratios_per_layer), len(predictor_sizes)))
for aspect_ratios in aspect_ratios_per_layer:
if np.any(np.array(aspect_ratios) <= 0):
raise ValueError("All aspect ratios must be greater than zero.")
else:
if (aspect_ratios_global is None):
raise ValueError("At least one of `aspect_ratios_global` and \
`aspect_ratios_per_layer` must not be `None`.")
if np.any(np.array(aspect_ratios_global) <= 0):
raise ValueError("All aspect ratios must be greater than zero.")
if len(variances) != 4:
raise ValueError("4 variance values must be pased, but {} values were received."
.format(len(variances)))
variances = np.array(variances)
if np.any(variances <= 0):
raise ValueError("All variances must be >0, but the variances given are {}"
.format(variances))
if (not (steps is None)) and (len(steps) != predictor_sizes.shape[0]):
raise ValueError("You must provide at least one step value per predictor layer.")
if (not (offsets is None)) and (len(offsets) != predictor_sizes.shape[0]):
raise ValueError("You must provide at least one offset value per predictor layer.")
##################################################################################
# Set or compute members.
##################################################################################
self.img_height = float(img_height)
self.img_width = float(img_width)
self.n_classes = n_classes
self.predictor_sizes = predictor_sizes
self.min_scale = min_scale
self.max_scale = max_scale
# If `scales` is None, compute the scaling factors by linearly interpolating between
# `min_scale` and `max_scale`. If an explicit list of `scales` is given, however,
# then it takes precedent over `min_scale` and `max_scale`.
if (scales is None):
self.scales = np.linspace(self.min_scale, self.max_scale, len(self.predictor_sizes)+1)
else:
# If a list of scales is given explicitly, we'll use that instead of computing it from
# `min_scale` and `max_scale`.
self.scales = scales
# If `aspect_ratios_per_layer` is None, then we use the same list of aspect ratios
# `aspect_ratios_global` for all predictor layers. If `aspect_ratios_per_layer` is given,
# however, then it takes precedent over `aspect_ratios_global`.
if (aspect_ratios_per_layer is None):
self.aspect_ratios = [aspect_ratios_global] * predictor_sizes.shape[0]
else:
# If aspect ratios are given per layer, we'll use those.
self.aspect_ratios = aspect_ratios_per_layer
self.two_boxes_for_ar1 = two_boxes_for_ar1
if not (steps is None):
self.steps = steps
else:
self.steps = [None] * predictor_sizes.shape[0]
if not (offsets is None):
self.offsets = offsets
else:
self.offsets = [None] * predictor_sizes.shape[0]
self.clip_boxes = clip_boxes
self.variances = variances
self.matching_type = matching_type
self.pos_iou_threshold = pos_iou_threshold
self.border_pixels = border_pixels
self.coords = coords
self.neg_iou_limit = neg_iou_limit
self.normalize_coords = normalize_coords
self.background_id = background_id
# Compute the number of boxes per spatial location for each predictor layer.
# For example, if a predictor layer has three different aspect ratios, [1.0, 0.5, 2.0], and
# is supposed to predict two boxes of slightly different size for aspect ratio 1.0, then
# that predictor layer predicts a total of four boxes at every spatial location across the
# feature map.
if not (aspect_ratios_per_layer is None):
self.n_boxes = []
for aspect_ratios in aspect_ratios_per_layer:
if (1 in aspect_ratios) & two_boxes_for_ar1:
self.n_boxes.append(len(aspect_ratios) + 1)
else:
self.n_boxes.append(len(aspect_ratios))
else:
if (1 in aspect_ratios_global) & two_boxes_for_ar1:
self.n_boxes = n_anchor_levels * (len(aspect_ratios_global) + 1)
else:
self.n_boxes = n_anchor_levels * len(aspect_ratios_global)
self.n_anchor_levels = n_anchor_levels
self.anchor_sizes = np.power(2, np.linspace(0, 1, 1+n_anchor_levels))[:-1]
##################################################################################
# Compute the anchor boxes for each predictor layer.
##################################################################################
# Compute the anchor boxes for each predictor layer. We only have to do this once
# since the anchor boxes depend only on the model configuration, not on the input data.
# For each predictor layer (i.e. for each scaling factor) the tensors for that layer's
# anchor boxes will have the shape `(feature_map_height, feature_map_width, n_boxes, 4)`.
# This will store the anchor boxes for each predicotr layer.
self.boxes_list = []
# The following lists just store diagnostic information. Sometimes it's handy to have the
# boxes' center points, heights, widths, etc. in a list.
self.wh_list_diag = [] # Box widths and heights for each predictor layer
# Horizontal and vertical distances between any two boxes for each predictor layer
self.steps_diag = []
self.offsets_diag = [] # Offsets for each predictor layer
# Anchor box center points as `(cy, cx)` for each predictor layer
self.centers_diag = []
# Iterate over all predictor layers and compute the anchor boxes for each one.
for i in range(len(self.predictor_sizes)):
out = self.generate_anchor_boxes_for_layer(feature_map_size=self.predictor_sizes[i],
aspect_ratios=self.aspect_ratios[i],
this_scale=self.scales[i],
next_scale=self.scales[i+1],
this_steps=self.steps[i],
this_offsets=self.offsets[i],
diagnostics=True)
boxes, center, wh, step, offset = out
self.boxes_list.append(boxes)
self.wh_list_diag.append(wh)
self.steps_diag.append(step)
self.offsets_diag.append(offset)
self.centers_diag.append(center)
self.y_encoding_template = np.ascontiguousarray(
self.generate_encoding_template(diagnostics=False))
if class_weights is None:
self.class_weights = np.ones(self.n_classes, dtype=np.float32)
else:
self.class_weights = np.array(class_weights)
def generate_anchor_boxes_for_layer(self,
feature_map_size,
aspect_ratios,
this_scale,
next_scale,
this_steps=None,
this_offsets=None,
diagnostics=False):
'''generate anchors per layer.'''
# Compute box width and height for each aspect ratio.
# The shorter side of the image will be used to compute `w` and `h`
# using `scale` and `aspect_ratios`.
size = min(self.img_height, self.img_width)
# Compute the box widths and and heights for all aspect ratios
wh_list = []
for scale_augment in self.anchor_sizes:
# 2^0, 2^(1/3), 2^(2/3), used in the original paper
for ar in aspect_ratios:
if (ar == 1):
# Compute the regular anchor box for aspect ratio 1.
box_height = this_scale * size * scale_augment
box_width = this_scale * size * scale_augment
wh_list.append((box_width, box_height))
if self.two_boxes_for_ar1:
# Compute one slightly larger version using the geometric mean of this scale
# value and the next.
box_height = np.sqrt(this_scale * next_scale) * size * scale_augment
box_width = np.sqrt(this_scale * next_scale) * size * scale_augment
wh_list.append((box_width, box_height))
else:
box_width = this_scale * size * scale_augment * np.sqrt(ar)
box_height = this_scale * size * scale_augment / np.sqrt(ar)
wh_list.append((box_width, box_height))
wh_list = np.array(wh_list)
n_boxes = len(wh_list)
# Compute the grid of box center points. They are identical for all aspect ratios.
# Compute the step sizes,
# i.e. how far apart the anchor box center points will be vertically and horizontally.
if (this_steps is None):
step_height = self.img_height / feature_map_size[0]
step_width = self.img_width / feature_map_size[1]
else:
if isinstance(this_steps, (list, tuple)) and (len(this_steps) == 2):
step_height = this_steps[0]
step_width = this_steps[1]
elif isinstance(this_steps, (int, float)):
step_height = this_steps
step_width = this_steps
# Compute the offsets,
# i.e. at what pixel values the first anchor box center point
# will be from the top and from the left of the image.
if (this_offsets is None):
offset_height = 0.5
offset_width = 0.5
else:
if isinstance(this_offsets, (list, tuple)) and (len(this_offsets) == 2):
offset_height = this_offsets[0]
offset_width = this_offsets[1]
elif isinstance(this_offsets, (int, float)):
offset_height = this_offsets
offset_width = this_offsets
# Now that we have the offsets and step sizes, compute the grid of anchor box center points.
cy = np.linspace(offset_height * step_height,
(offset_height +
feature_map_size[0] - 1) * step_height,
feature_map_size[0])
cx = np.linspace(offset_width * step_width,
(offset_width + feature_map_size[1] - 1) * step_width,
feature_map_size[1])
cx_grid, cy_grid = np.meshgrid(cx, cy)
# This is necessary for np.tile() to do what we want further down
cx_grid = np.expand_dims(cx_grid, -1)
# This is necessary for np.tile() to do what we want further down
cy_grid = np.expand_dims(cy_grid, -1)
# Create a 4D tensor template of shape
# `(feature_map_height, feature_map_width, n_boxes, 4)`
# where the last dimension will contain `(cx, cy, w, h)`
boxes_tensor = np.zeros(
(feature_map_size[0], feature_map_size[1], n_boxes, 4))
boxes_tensor[:, :, :, 0] = np.tile(cx_grid, (1, 1, n_boxes)) # Set cx
boxes_tensor[:, :, :, 1] = np.tile(cy_grid, (1, 1, n_boxes)) # Set cy
boxes_tensor[:, :, :, 2] = wh_list[:, 0] # Set w
boxes_tensor[:, :, :, 3] = wh_list[:, 1] # Set h
# Convert `(cx, cy, w, h)` to `(xmin, ymin, xmax, ymax)`
boxes_tensor = convert_coordinates(
boxes_tensor, start_index=0, conversion='centroids2corners')
# If `clip_boxes` is enabled, clip the coordinates to lie within the image boundaries
if self.clip_boxes:
x_coords = boxes_tensor[:, :, :, [0, 2]]
x_coords[x_coords >= self.img_width] = self.img_width - 1
x_coords[x_coords < 0] = 0
boxes_tensor[:, :, :, [0, 2]] = x_coords
y_coords = boxes_tensor[:, :, :, [1, 3]]
y_coords[y_coords >= self.img_height] = self.img_height - 1
y_coords[y_coords < 0] = 0
boxes_tensor[:, :, :, [1, 3]] = y_coords
# `normalize_coords` is enabled, normalize the coordinates to be within [0,1]
if self.normalize_coords:
boxes_tensor[:, :, :, [0, 2]] /= self.img_width
boxes_tensor[:, :, :, [1, 3]] /= self.img_height
# TODO: Implement box limiting directly for `(cx, cy, w, h)`
# so that we don't have to unnecessarily convert back and forth.
if self.coords == 'centroids':
# Convert `(xmin, ymin, xmax, ymax)` back to `(cx, cy, w, h)`.
boxes_tensor = convert_coordinates(
boxes_tensor, start_index=0, conversion='corners2centroids', border_pixels='half')
elif self.coords == 'minmax':
# Convert `(xmin, ymin, xmax, ymax)` to `(xmin, xmax, ymin, ymax).
boxes_tensor = convert_coordinates(
boxes_tensor, start_index=0, conversion='corners2minmax', border_pixels='half')
if diagnostics:
return boxes_tensor, (cy, cx), wh_list, (step_height, step_width),\
(offset_height, offset_width)
return boxes_tensor
def generate_encoding_template(self, diagnostics=False):
'''
Produces an encoding template for the ground truth label tensor for a given batch.
Arguments:
batch_size (int): The batch size.
Returns:
A Numpy array of shape `(batch_size, #boxes, #classes + 12)`
'''
# Tile the anchor boxes for each predictor layer across all batch items.
boxes_batch = []
for boxes in self.boxes_list:
boxes = np.expand_dims(boxes, axis=0)
boxes = np.reshape(boxes, (-1, 4))
boxes_batch.append(boxes)
# Concatenate the anchor tensors from the individual layers to one.
# boxes_tensor = np.concatenate(boxes_batch, axis=1)
boxes_tensor = np.concatenate(boxes_batch, axis=0)
classes_tensor = np.zeros((boxes_tensor.shape[0], self.n_classes))
classes_weights = np.ones((boxes_tensor.shape[0], 1))
# 4: Create a tensor to contain the variances.
variances_tensor = np.zeros_like(boxes_tensor)
variances_tensor += self.variances # Long live broadcasting
self.variances_tensor = variances_tensor
# 4: Concatenate the classes, boxes and variances tensors
y_encoding_template = np.concatenate(
(classes_weights, classes_tensor, boxes_tensor, boxes_tensor, variances_tensor), axis=1)
if diagnostics:
return y_encoding_template, self.centers_diag, \
self.wh_list_diag, self.steps_diag, self.offsets_diag
return y_encoding_template
def __call__(self, ground_truth_labels, diagnostics=False):
'''
Converts ground truth bounding box data into a suitable format to train an SSD model.
Arguments:
ground_truth_labels (list): A python list of length `batch_size` that contains one 2D
Numpy array for each batch image. Each such array has `k` rows for the `k` ground
truth bounding boxes belonging to the respective image, and the data for each ground
truth bounding box has the format `(class_id, xmin, ymin, xmax, ymax)` (i.e. the
'corners' coordinate format), and `class_id` must be an integer greater than 0 for
all boxes as class ID 0 is reserved for the background class.
Returns:
`y_encoded`, a 3D numpy array of shape `(batch_size, #boxes, #classes + 4 + 4 + 4)` that
serves as the ground truth label tensor for training, where `#boxes` is the total number
of boxes predicted by the model per image, and the classes are one-hot-encoded. The four
elements after the class vecotrs in the last axis are the box coordinates, the next four
elements after that are just dummy elements, and the last four elements are the
variances.
'''
class_id = 0
xmin = 1
ymin = 2
xmax = 3
ymax = 4
y_encoded = np.copy(self.y_encoding_template)
##################################################################################
# Match ground truth boxes to anchor boxes.
##################################################################################
# Match the ground truth boxes to the anchor boxes. Every anchor box that does not have
# a ground truth match and for which the maximal IoU overlap with any ground truth box is
# less than or equal to `neg_iou_limit` will be a negative (background) box.
# All boxes are background boxes by default.
# +1 for class weights
y_encoded[:, self.background_id + 1] = 1
# The total number of boxes that the model predicts per batch item
# An identity matrix that we'll use as one-hot class vectors
class_vectors = np.eye(self.n_classes)
# If there is no ground truth for this batch item, there is nothing to match.
if ground_truth_labels.size != 0:
labels = ground_truth_labels.astype(
np.float) # The labels for this batch item
# Check for degenerate ground truth bounding boxes before attempting any computations.
if np.any(labels[:, [xmax]] - labels[:, [xmin]] <= 0) or \
np.any(labels[:, [ymax]] - labels[:, [ymin]] <= 0):
raise ValueError("Input encoder detected degenerate \
ground truth bounding boxes\
with bounding boxes {}, ".format(labels) +
"i.e. bounding boxes where xmax <= xmin and/or ymax <= ymin. \
Degenerate ground truth " +
"bounding boxes will lead to NaN during the training.")
# Maybe normalize the box coordinates.
if self.normalize_coords:
# Normalize ymin and ymax relative to the image height
labels[:, [ymin, ymax]] /= self.img_height
# Normalize xmin and xmax relative to the image width
labels[:, [xmin, xmax]] /= self.img_width
# Maybe convert the box coordinate format.
if self.coords == 'centroids':
labels = convert_coordinates(
labels, start_index=xmin,
conversion='corners2centroids', border_pixels=self.border_pixels)
elif self.coords == 'minmax':
labels = convert_coordinates(
labels, start_index=xmin, conversion='corners2minmax')
# The one-hot class IDs for the ground truth boxes of this batch item
classes_one_hot = class_vectors[(labels[:, class_id]).astype(np.int)]
classes_weights = self.class_weights[(labels[:, class_id]).astype(np.int)].reshape([-1,
1])
# The one-hot version of the labels for this batch item
labels_one_hot = np.concatenate(
[classes_weights, classes_one_hot, labels[:, [xmin, ymin, xmax, ymax]]], axis=-1)
# Compute the IoU similarities between all anchor boxes
# and all ground truth boxes for this batch item.
# This is a matrix of shape `(num_ground_truth_boxes, num_anchor_boxes)`.
similarities = iou(labels[:, [xmin, ymin, xmax, ymax]], y_encoded[:, -12:-8],
coords=self.coords, mode='outer_product',
border_pixels=self.border_pixels)
# First: Do bipartite matching, i.e. match each ground truth box
# to the one anchor box with the highest IoU.
# This ensures that each ground truth box will have at least one good match.
# For each ground truth box, get the anchor box to match with it.
bipartite_matches = match_bipartite_greedy(
weight_matrix=similarities)
# Write the ground truth data to the matched anchor boxes.
y_encoded[bipartite_matches, :-8] = labels_one_hot
# Write the highest IOU flag
y_encoded[bipartite_matches, -1] = 1024
# Set the columns of the matched anchor boxes to
# zero to indicate that they were matched.
similarities[:, bipartite_matches] = 0
# Second: Maybe do 'multi' matching, where each remaining anchor
# box will be matched to its most similar
# ground truth box with an IoU of at least `pos_iou_threshold`,
# or not matched if there is no
# such ground truth box.
if self.matching_type == 'multi':
# Get all matches that satisfy the IoU threshold.
matches = match_multi(
weight_matrix=similarities, threshold=self.pos_iou_threshold)
# Write the ground truth data to the matched anchor boxes.
y_encoded[matches[1], :-8] = labels_one_hot[matches[0]]
# Set the columns of the matched anchor boxes to
# zero to indicate that they were matched.
similarities[:, matches[1]] = 0
# Third: Now after the matching is done, all negative (background)
# anchor boxes that have
# an IoU of `neg_iou_limit` or more with
# any ground truth box will be set to netral,
# i.e. they will no longer be background boxes.
# These anchors are "too close" to a
# ground truth box to be valid background boxes.
max_background_similarities = np.amax(similarities, axis=0)
neutral_boxes = np.nonzero(
max_background_similarities >= self.neg_iou_limit)[0]
# +1 for class weights
y_encoded[neutral_boxes, self.background_id + 1] = 0
##################################################################################
# Convert box coordinates to anchor box offsets.
##################################################################################
if self.coords == 'centroids':
# cx(gt) - cx(anchor), cy(gt) - cy(anchor)
y_encoded[:, [-12, -11]] -= y_encoded[:, [-8, -7]]
# (cx(gt) - cx(anchor)) / w(anchor) / cx_variance,
# (cy(gt) - cy(anchor)) / h(anchor) / cy_variance
# y_encoded[:, [-12, -11]] /= y_encoded[:,
# [-6, -5]] * y_encoded[:, [-4, -3]]
y_encoded[:, [-12, -11]] /= y_encoded[:, [-6, -5]] * self.variances_tensor[:, [-4, -3]]
# w(gt) / w(anchor), h(gt) / h(anchor)
y_encoded[:, [-10, -9]] /= y_encoded[:, [-6, -5]]
# ln(w(gt) / w(anchor)) / w_variance,
# ln(h(gt) / h(anchor)) / h_variance (ln == natural logarithm)
# y_encoded[:, [-10, -9]
# ] = np.log(y_encoded[:, [-10, -9]]) / y_encoded[:, [-2, -1]]
y_encoded[:, [-10, -9]] = \
np.log(y_encoded[:, [-10, -9]]) / self.variances_tensor[:, [-2, -1]]
elif self.coords == 'corners':
# (gt - anchor) for all four coordinates
y_encoded[:, -12:-8] -= y_encoded[:, -8:-4]
# (xmin(gt) - xmin(anchor)) / w(anchor), (xmax(gt) - xmax(anchor)) / w(anchor)
y_encoded[:, [-12, -10]
] /= np.expand_dims(y_encoded[:, -6] - y_encoded[:, -8], axis=-1)
# (ymin(gt) - ymin(anchor)) / h(anchor), (ymax(gt) - ymax(anchor)) / h(anchor)
y_encoded[:, [-11, -9]
] /= np.expand_dims(y_encoded[:, -5] - y_encoded[:, -7], axis=-1)
# (gt - anchor) / size(anchor) / variance for all four coordinates,
# where 'size' refers to w and h respectively
y_encoded[:, -12:-8] /= y_encoded[:, -4:]
elif self.coords == 'minmax':
# (gt - anchor) for all four coordinates
y_encoded[:, -12:-8] -= y_encoded[:, -8:-4]
# (xmin(gt) - xmin(anchor)) / w(anchor), (xmax(gt) - xmax(anchor)) / w(anchor)
y_encoded[:, [-12, -11]
] /= np.expand_dims(y_encoded[:, -7] - y_encoded[:, -8], axis=-1)
# (ymin(gt) - ymin(anchor)) / h(anchor), (ymax(gt) - ymax(anchor)) / h(anchor)
y_encoded[:, [-10, -9]
] /= np.expand_dims(y_encoded[:, -5] - y_encoded[:, -6], axis=-1)
# (gt - anchor) / size(anchor) / variance for all four coordinates,
# where 'size' refers to w and h respectively
y_encoded[:, -12:-8] /= y_encoded[:, -4:]
if diagnostics:
# Here we'll save the matched anchor boxes
# (i.e. anchor boxes that were matched to a ground truth box,
# but keeping the anchor box coordinates).
y_matched_anchors = np.copy(y_encoded)
# Keeping the anchor box coordinates means setting the offsets to zero.
y_matched_anchors[:, -12:-8] = 0
return y_encoded, y_matched_anchors
return y_encoded
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/box_coder/input_encoder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
RetinaNet label encoder.
Code partially from GitHub (Apache v2 license):
https://github.com/pierluigiferrari/ssd_keras/tree/3ac9adaf3889f1020d74b0eeefea281d5e82f353
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from nvidia_tao_tf1.cv.ssd.utils.box_utils import (
bipartite_match_row,
corners_to_centroids,
iou,
multi_match
)
from nvidia_tao_tf1.cv.ssd.utils.box_utils import np_convert_coordinates
from nvidia_tao_tf1.cv.ssd.utils.tensor_utils import tensor_slice_replace, tensor_strided_replace
class InputEncoderTF:
'''
Encoder class.
Transforms ground truth labels for object detection in images
(2D bounding box coordinates and class labels) to the format required for
training an SSD or RetinaNet model.
In the process of encoding the ground truth labels, a template of anchor boxes
is being built, which are subsequently matched to the ground truth boxes
via an intersection-over-union threshold criterion.
'''
def __init__(self,
img_height,
img_width,
n_classes,
predictor_sizes,
min_scale=0.1,
max_scale=0.9,
scales=None,
aspect_ratios_global=None,
aspect_ratios_per_layer=None,
two_boxes_for_ar1=True,
steps=None,
n_anchor_levels=3,
offsets=None,
clip_boxes=False,
variances=None,
pos_iou_threshold=0.5,
neg_iou_limit=0.3,
normalize_coords=True,
gt_normalized=False,
class_weights=None):
'''
Init encoder.
Arguments:
img_height (int): The height of the input images.
img_width (int): The width of the input images.
n_classes (int): The number of positive classes, e.g. 20 for Pascal VOC, 80 for MS COCO.
predictor_sizes (list): A list of int-tuples of the format `(height, width)`
containing the output heights and widths of the convolutional predictor layers.
min_scale (float, optional): The smallest scaling factor for the size of the anchor
boxes as a fraction of the shorter side of the input images. Note that you should
set the scaling factors such that the resulting anchor box sizes correspond to the
sizes of the objects you are trying to detect. Must be >0.
max_scale (float, optional): The largest scaling factor for the size of the anchor boxes
as a fraction of the shorter side of the input images. All scaling factors between
the smallest and the largest will be linearly interpolated. Note that the second to
last of the linearly interpolated scaling factors will actually be the scaling
factor for the last predictor layer, while the last scaling factor is used for the
second box for aspect ratio 1 in the last predictor layer if `two_boxes_for_ar1` is
`True`. Note that you should set the scaling factors such that the resulting anchor
box sizes correspond to the sizes of the objects you are trying to detect. Must be
greater than or equal to `min_scale`.
scales (list, optional): A list of floats >0 containing scaling factors per
convolutional predictor layer. This list must be one element longer than the number
of predictor layers. The first `k` elements are the scaling factors for the `k`
predictor layers, while the last element is used for the second box for aspect ratio
1 in the last predictor layer if `two_boxes_for_ar1` is `True`. This additional last
scaling factor must be passed either way, even if it is not being used. If a list is
passed, this argument overrides `min_scale` and `max_scale`. All scaling factors
must be greater than zero. Note that you should set the scaling factors such that
the resulting anchor box sizes correspond to the sizes of the objects you are trying
to detect.
aspect_ratios_global (list, optional): The list of aspect ratios for which anchor boxes
are to be generated. This list is valid for all prediction layers. Note that you
should set the aspect ratios such that the resulting anchor box shapes roughly
correspond to the shapes of the objects you are trying to detect.
aspect_ratios_per_layer (list, optional): A list containing one aspect ratio list for
each prediction layer. If a list is passed, it overrides `aspect_ratios_global`.
Note that you should set the aspect ratios such that the resulting anchor box shapes
very roughly correspond to the shapes of the objects you are trying to detect.
two_boxes_for_ar1 (bool, optional): Only relevant for aspect ratios lists that contain
1. Will be ignored otherwise. If `True`, two anchor boxes will be generated for
aspect ratio 1. The first will be generated using the scaling factor for the
respective layer, the second one will be generated using geometric mean of said
scaling factor and next bigger scaling factor.
steps (list, optional): `None` or a list with as many elements as there are predictor
layers. The elements can be either ints/floats or tuples of two ints/floats. These
numbers represent for each predictor layer how many pixels apart the anchor box
center points should be vertically and horizontally along the spatial grid over the
image. If the list contains ints/floats, then that value will be used for both
spatial dimensions. If the list contains tuples of two ints/floats, then they
represent `(step_height, step_width)`. If no steps are provided, then they will be
computed such that the anchor box center points will form an equidistant grid within
the image dimensions.
offsets (list, optional): `None` or a list with as many elements as there are predictor
layers. The elements can be either floats or tuples of two floats. These numbers
represent for each predictor layer how many pixels from the top and left boarders of
the image the top-most and left-most anchor box center points should be as a
fraction of `steps`. The last bit is important: The offsets are not absolute pixel
values, but fractions of the step size specified in the `steps` argument. If the
list contains floats, then that value will be used for both spatial dimensions. If
the list contains tuples of two floats, then they represent
`(vertical_offset, horizontal_offset)`. If no offsets are provided, then they will
default to 0.5 of the step size.
clip_boxes (bool, optional): If `True`, limits the anchor box coordinates to stay within
image boundaries.
variances (list, optional): A list of 4 floats >0. The anchor box offset for each
coordinate will be divided by its respective variance value.
pos_iou_threshold (float, optional): The intersection-over-union similarity threshold
that must be met in order to match a given ground truth box to a given anchor box.
neg_iou_limit (float, optional): The maximum allowed intersection-over-union similarity
of an anchor box with any ground truth box to be labeled a negative
(i.e. background) box. If an anchor box is neither a positive, nor a negative box,
it will be ignored during training.
normalize_coords (bool, optional): If `True`, the encoder uses relative instead of
absolute coordinates. This means instead of using absolute tartget coordinates, the
encoder will scale all coordinates to be within [0,1]. This way learning becomes
independent of the input image size.
'''
predictor_sizes = np.array(predictor_sizes)
if predictor_sizes.ndim == 1:
predictor_sizes = np.expand_dims(predictor_sizes, axis=0)
##################################################################################
# Handle exceptions.
##################################################################################
if (min_scale is None or max_scale is None) and scales is None:
raise ValueError("Either `min_scale` and `max_scale` or `scales` need to be specified.")
if scales:
if (len(scales) != predictor_sizes.shape[0] + 1):
# Must be two nested `if` statements since `list` and `bool` can't be combined by &
raise ValueError("It must be either scales is None or len(scales) == \
len(predictor_sizes)+1, but len(scales) == {} and len(predictor_sizes)+1 == {}"
.format(len(scales), len(predictor_sizes)+1))
scales = np.array(scales)
if np.any(scales <= 0):
raise ValueError("All values in `scales` must be greater than 0, but the passed \
list of scales is {}".format(scales))
else:
# If no scales passed, we make sure that `min_scale` and `max_scale` are valid values.
if not 0 < min_scale <= max_scale:
raise ValueError("It must be 0 < min_scale <= max_scale, but it is min_scale = {} \
and max_scale = {}".format(min_scale, max_scale))
if not (aspect_ratios_per_layer is None):
if (len(aspect_ratios_per_layer) != predictor_sizes.shape[0]):
# Must be two nested `if` statements since `list` and `bool` can't be combined by &
raise ValueError("It must be either aspect_ratios_per_layer is None or \
len(aspect_ratios_per_layer) == len(predictor_sizes), but len(aspect_ratios_per_layer) == {} \
and len(predictor_sizes) == {}".format(len(aspect_ratios_per_layer), len(predictor_sizes)))
for aspect_ratios in aspect_ratios_per_layer:
if np.any(np.array(aspect_ratios) <= 0):
raise ValueError("All aspect ratios must be greater than zero.")
else:
if (aspect_ratios_global is None):
raise ValueError("At least one of `aspect_ratios_global` and \
`aspect_ratios_per_layer` must not be `None`.")
if np.any(np.array(aspect_ratios_global) <= 0):
raise ValueError("All aspect ratios must be greater than zero.")
if len(variances) != 4:
raise ValueError("4 variance values must be pased, but {} values were received."
.format(len(variances)))
variances = np.array(variances)
if np.any(variances <= 0):
raise ValueError("All variances must be >0, but the variances given are {}"
.format(variances))
if (not (steps is None)) and (len(steps) != predictor_sizes.shape[0]):
raise ValueError("You must provide at least one step value per predictor layer.")
if (not (offsets is None)) and (len(offsets) != predictor_sizes.shape[0]):
raise ValueError("You must provide at least one offset value per predictor layer.")
##################################################################################
# Set or compute members.
##################################################################################
self.img_height = float(img_height)
self.img_width = float(img_width)
self.n_classes = n_classes
self.predictor_sizes = predictor_sizes
self.min_scale = min_scale
self.max_scale = max_scale
# If `scales` is None, compute the scaling factors by linearly interpolating between
# `min_scale` and `max_scale`. If an explicit list of `scales` is given, however,
# then it takes precedent over `min_scale` and `max_scale`.
if (scales is None):
self.scales = np.linspace(self.min_scale, self.max_scale, len(self.predictor_sizes)+1)
else:
# If a list of scales is given explicitly, we'll use that instead of computing it from
# `min_scale` and `max_scale`.
self.scales = scales
# If `aspect_ratios_per_layer` is None, then we use the same list of aspect ratios
# `aspect_ratios_global` for all predictor layers. If `aspect_ratios_per_layer` is given,
# however, then it takes precedent over `aspect_ratios_global`.
if (aspect_ratios_per_layer is None):
self.aspect_ratios = [aspect_ratios_global] * predictor_sizes.shape[0]
else:
# If aspect ratios are given per layer, we'll use those.
self.aspect_ratios = aspect_ratios_per_layer
self.two_boxes_for_ar1 = two_boxes_for_ar1
if not (steps is None):
self.steps = steps
else:
self.steps = [None] * predictor_sizes.shape[0]
if not (offsets is None):
self.offsets = offsets
else:
self.offsets = [None] * predictor_sizes.shape[0]
self.clip_boxes = clip_boxes
self.variances = variances
self.pos_iou_threshold = pos_iou_threshold
self.neg_iou_limit = neg_iou_limit
self.normalize_coords = normalize_coords
self.gt_normalized = gt_normalized
# Compute the number of boxes per spatial location for each predictor layer.
# For example, if a predictor layer has three different aspect ratios, [1.0, 0.5, 2.0], and
# is supposed to predict two boxes of slightly different size for aspect ratio 1.0, then
# that predictor layer predicts a total of four boxes at every spatial location across the
# feature map.
if not (aspect_ratios_per_layer is None):
self.n_boxes = []
for aspect_ratios in aspect_ratios_per_layer:
if (1 in aspect_ratios) & two_boxes_for_ar1:
self.n_boxes.append(len(aspect_ratios) + 1)
else:
self.n_boxes.append(len(aspect_ratios))
else:
if (1 in aspect_ratios_global) & two_boxes_for_ar1:
self.n_boxes = n_anchor_levels * (len(aspect_ratios_global) + 1)
else:
self.n_boxes = n_anchor_levels * len(aspect_ratios_global)
self.n_anchor_levels = n_anchor_levels
self.anchor_sizes = np.power(2, np.linspace(0, 1, 1+n_anchor_levels))[:-1]
##################################################################################
# Compute the anchor boxes for each predictor layer.
##################################################################################
# Compute the anchor boxes for each predictor layer. We only have to do this once
# since the anchor boxes depend only on the model configuration, not on the input data.
# For each predictor layer (i.e. for each scaling factor) the tensors for that layer's
# anchor boxes will have the shape `(feature_map_height, feature_map_width, n_boxes, 4)`.
boxes_list = [] # This will store the anchor boxes for each predicotr layer.
boxes_corner = []
# Iterate over all predictor layers and compute the anchor boxes for each one.
for i in range(len(self.predictor_sizes)):
boxes, box_corner = self.__anchor_layer(feature_map_size=self.predictor_sizes[i],
aspect_ratios=self.aspect_ratios[i],
this_scale=self.scales[i],
next_scale=self.scales[i+1],
this_steps=self.steps[i],
this_offsets=self.offsets[i])
boxes_list.append(boxes)
boxes_corner.append(box_corner)
anchor_box_list_np = np.concatenate([i.reshape((-1, 4)) for i in boxes_corner], axis=0)
self.anchorbox_tensor = tf.convert_to_tensor(anchor_box_list_np, dtype=tf.float32)
self.encoding_template_tensor = tf.convert_to_tensor(self.__encode_template(boxes_list),
dtype=tf.float32)
if class_weights is None:
self.class_weights = np.ones(self.n_classes, dtype=np.float32)
else:
self.class_weights = np.array(class_weights, dtype=np.float32)
self.class_weights = tf.constant(self.class_weights, dtype=tf.float32)
def __anchor_layer(self,
feature_map_size,
aspect_ratios,
this_scale,
next_scale,
this_steps=None,
this_offsets=None):
'''
Generate numpy anchors for each layer.
Computes an array of the spatial positions and sizes of the anchor boxes for one predictor
layer of size `feature_map_size == [feature_map_height, feature_map_width]`.
Arguments:
feature_map_size (tuple): A list or tuple `[feature_map_height, feature_map_width]` with
the spatial dimensions of the feature map for which to generate the anchor boxes.
aspect_ratios (list): A list of floats, the aspect ratios for which anchor boxes are to
be generated. All list elements must be unique.
this_scale (float): A float in [0, 1], the scaling factor for the size of the generate
anchor boxes as a fraction of the shorter side of the input image.
next_scale (float): A float in [0, 1], the next larger scaling factor. Only relevant if
`self.two_boxes_for_ar1 == True`.
Returns:
A 4D np tensor of shape `(feature_map_height, feature_map_width, n_boxes_per_cell, 4)`
where the last dimension contains `(xmin, xmax, ymin, ymax)` for each anchor box in each
cell of the feature map.
'''
# Compute box width and height for each aspect ratio.
# The shorter side of the image will be used to compute `w` and `h` using `scale` and
# `aspect_ratios`.
size = min(self.img_height, self.img_width)
# Compute the box widths and and heights for all aspect ratios
wh_list = []
for scale_augment in self.anchor_sizes:
# 2^0, 2^(1/3), 2^(2/3), used in the original paper
for ar in aspect_ratios:
if (ar == 1):
# Compute the regular anchor box for aspect ratio 1.
box_height = this_scale * size * scale_augment
box_width = this_scale * size * scale_augment
wh_list.append((box_width, box_height))
if self.two_boxes_for_ar1:
# Compute one slightly larger version using the geometric mean of this scale
# value and the next.
box_height = np.sqrt(this_scale * next_scale) * size * scale_augment
box_width = np.sqrt(this_scale * next_scale) * size * scale_augment
wh_list.append((box_width, box_height))
else:
box_width = this_scale * size * scale_augment * np.sqrt(ar)
box_height = this_scale * size * scale_augment / np.sqrt(ar)
wh_list.append((box_width, box_height))
wh_list = np.array(wh_list)
n_boxes = len(wh_list)
# Compute the grid of box center points. They are identical for all aspect ratios.
# Compute the step sizes, i.e. how far apart the anchor box center points will be vertically
# and horizontally.
if (this_steps is None):
step_height = self.img_height / feature_map_size[0]
step_width = self.img_width / feature_map_size[1]
else:
if isinstance(this_steps, (list, tuple)) and (len(this_steps) == 2):
step_height = this_steps[0]
step_width = this_steps[1]
elif isinstance(this_steps, (int, float)):
step_height = this_steps
step_width = this_steps
# Compute the offsets, i.e. at what pixel values the first anchor box center point will be
# from the top and from the left of the image.
if (this_offsets is None):
offset_height = 0.5
offset_width = 0.5
else:
if isinstance(this_offsets, (list, tuple)) and (len(this_offsets) == 2):
offset_height = this_offsets[0]
offset_width = this_offsets[1]
elif isinstance(this_offsets, (int, float)):
offset_height = this_offsets
offset_width = this_offsets
# Now that we have the offsets and step sizes, compute the grid of anchor box center points.
cy = np.linspace(offset_height * step_height,
(offset_height + feature_map_size[0] - 1) * step_height,
feature_map_size[0])
cx = np.linspace(offset_width * step_width,
(offset_width + feature_map_size[1] - 1) * step_width, feature_map_size[1])
cx_grid, cy_grid = np.meshgrid(cx, cy)
# This is necessary for np.tile() to do what we want further down
cx_grid = np.expand_dims(cx_grid, -1)
# This is necessary for np.tile() to do what we want further down
cy_grid = np.expand_dims(cy_grid, -1)
# Create a 4D tensor template of shape `(feature_map_height, feature_map_width, n_boxes, 4)`
# where the last dimension will contain `(cx, cy, w, h)`
boxes_tensor = np.zeros((feature_map_size[0], feature_map_size[1], n_boxes, 4))
boxes_tensor[:, :, :, 0] = np.tile(cx_grid, (1, 1, n_boxes)) # Set cx
boxes_tensor[:, :, :, 1] = np.tile(cy_grid, (1, 1, n_boxes)) # Set cy
boxes_tensor[:, :, :, 2] = wh_list[:, 0] # Set w
boxes_tensor[:, :, :, 3] = wh_list[:, 1] # Set h
# Convert `(cx, cy, w, h)` to `(xmin, ymin, xmax, ymax)`
boxes_tensor = np_convert_coordinates(boxes_tensor,
start_index=0,
conversion='centroids2corners')
# If `clip_boxes` is enabled, clip the coordinates to lie within the image boundaries
if self.clip_boxes:
x_coords = boxes_tensor[:, :, :, [0, 2]]
x_coords[x_coords >= self.img_width] = self.img_width - 1
x_coords[x_coords < 0] = 0
boxes_tensor[:, :, :, [0, 2]] = x_coords
y_coords = boxes_tensor[:, :, :, [1, 3]]
y_coords[y_coords >= self.img_height] = self.img_height - 1
y_coords[y_coords < 0] = 0
boxes_tensor[:, :, :, [1, 3]] = y_coords
# `normalize_coords` is enabled, normalize the coordinates to be within [0,1]
if self.normalize_coords:
boxes_tensor[:, :, :, [0, 2]] /= self.img_width
boxes_tensor[:, :, :, [1, 3]] /= self.img_height
box_tensor_corner = np.array(boxes_tensor)
# Convert `(xmin, ymin, xmax, ymax)` back to `(cx, cy, w, h)`.
boxes_tensor = np_convert_coordinates(boxes_tensor,
start_index=0,
conversion='corners2centroids')
return boxes_tensor, box_tensor_corner
def __encode_template(self, boxes_list):
# Tile the anchor boxes for each predictor layer across all batch items.
boxes_batch = []
for boxes in boxes_list:
boxes = np.reshape(boxes, (-1, 4))
boxes_batch.append(boxes)
# Concatenate the anchor tensors from the individual layers to one.
boxes_tensor = np.concatenate(boxes_batch, axis=0)
# 3: Create a template tensor to hold the one-hot class encodings of shape
# `(batch, #boxes, #classes)`. It will contain all zeros for now, the classes will be set in
# the matching process that follows
classes_tensor = np.zeros((boxes_tensor.shape[0], self.n_classes))
classes_tensor[:, 0] = 1
classes_weights = np.ones((boxes_tensor.shape[0], 1))
# 4: Create a tensor to contain the variances. This tensor has the same shape as
# `boxes_tensor` and simply contains the same 4 variance values for every position in the
# last axis.
variances_tensor = np.zeros_like(boxes_tensor)
variances_tensor += self.variances # Long live broadcasting
self.variances_tensor = tf.convert_to_tensor(variances_tensor, dtype=tf.float32)
# 4: Concatenate the classes, boxes and variances tensors to get our final template for
# y_encoded. We also need another tensor of the shape of `boxes_tensor` as a space filler
# so that `y_encoding_template` has the same shape as the SSD model output tensor. The
# content of this tensor is irrelevant, we'll just use `boxes_tensor` a second time.
# Add class weights
y_encoding_template = np.concatenate((classes_weights, classes_tensor,
boxes_tensor, boxes_tensor, variances_tensor), axis=1)
return y_encoding_template
def __call__(self, ground_truth_labels):
'''
Converts ground truth bounding box data into a suitable format to train an SSD model.
Arguments:
ground_truth_labels (list): A python list of length `batch_size` that contains one 2D
Numpy array for each batch image. Each such array has `k` rows for the `k` ground
truth bounding boxes belonging to the respective image, and the data for each ground
truth bounding box has the format `(class_id, xmin, ymin, xmax, ymax)` (i.e. the
'corners' coordinate format), and `class_id` must be an integer greater than 0 for
all boxes as class ID 0 is reserved for the background class.
Returns:
`y_encoded`, a 3D numpy array of shape `(batch_size, #boxes, #classes + 4 + 4 + 4)` that
serves as the ground truth label tensor for training, where `#boxes` is the total number
of boxes predicted by the model per image, and the classes are one-hot-encoded. The four
elements after the class vecotrs in the last axis are the box coordinates, the next four
elements after that are just dummy elements, and the last four elements are the
variances.
'''
y_encoded = []
##################################################################################
# Match ground truth boxes to anchor boxes.
##################################################################################
# Match the ground truth boxes to the anchor boxes. Every anchor box that does not have
# a ground truth match and for which the maximal IoU overlap with any ground truth box is
# less than or equal to `neg_iou_limit` will be a negative (background) box.
for gt_label in ground_truth_labels: # For each batch item...
match_y = tf.cond(tf.equal(tf.shape(gt_label)[0], 0),
lambda: self.encoding_template_tensor,
lambda label=gt_label: self.__calc_matched_anchor_gt(label))
y_encoded.append(match_y)
y_encoded = tf.stack(y_encoded, axis=0)
##################################################################################
# Convert box coordinates to anchor box offsets.
##################################################################################
y_encoded = self.__tf_convert_anchor_to_offset(y_encoded)
return y_encoded
def __tf_convert_anchor_to_offset(self, tensor):
return tensor_strided_replace(tensor, (-12, -8), tf.concat([
tf.truediv(tensor[..., -12:-10] - tensor[..., -8:-6],
tensor[..., -6:-4] * self.variances_tensor[..., -4:-2]),
tf.truediv(tf.log(tf.truediv(tensor[..., -10:-8], tensor[..., -6:-4])),
self.variances_tensor[..., -2:])
], axis=-1), axis=-1)
def __calc_matched_anchor_gt(self, gt_label):
gt_label = tf.cast(gt_label, tf.float32)
# Maybe normalize the box coordinates.
confs = tf.gather(gt_label, tf.constant(0, dtype=tf.int32), axis=-1)
if self.normalize_coords:
# gt_label is [class_id, xmin, ymin, xmax, ymax]
"""
gt_label = tf.stack([gt_label[:, 0],
tf.truediv(gt_label[:, 1], self.img_width),
tf.truediv(gt_label[:, 2], self.img_height),
tf.truediv(gt_label[:, 3], self.img_width),
tf.truediv(gt_label[:, 4], self.img_height),
], axis=-1)
"""
if not self.gt_normalized:
x_mins = tf.gather(gt_label, tf.constant(1, dtype=tf.int32), axis=-1)
y_mins = tf.gather(gt_label, tf.constant(2, dtype=tf.int32), axis=-1)
x_maxs = tf.gather(gt_label, tf.constant(3, dtype=tf.int32), axis=-1)
y_maxs = tf.gather(gt_label, tf.constant(4, dtype=tf.int32), axis=-1)
gt_label = tf.stack([confs,
tf.truediv(x_mins, self.img_width),
tf.truediv(y_mins, self.img_height),
tf.truediv(x_maxs, self.img_width),
tf.truediv(y_maxs, self.img_height)], axis=-1)
"""
classes_one_hot = tf.one_hot(tf.reshape(tf.cast(gt_label[:, 0], tf.int32), [-1]),
self.n_classes)
"""
classes_one_hot = tf.one_hot(tf.reshape(tf.cast(confs, tf.int32), [-1]),
self.n_classes)
classes_weights = tf.gather(self.class_weights, tf.cast(gt_label[:, 0:1], tf.int32))
# Compute the IoU similarities between all anchor boxes and all ground truth boxes for this
# batch item. This is a matrix of shape `(num_ground_truth_boxes, num_anchor_boxes).
gt_xys = tf.gather(gt_label, tf.range(1, 5), axis=-1)
# similarities = iou(gt_label[..., 1:], self.anchorbox_tensor)
similarities = iou(gt_xys, self.anchorbox_tensor)
# Maybe convert the box coordinate format.
# gt_label = corners_to_centroids(gt_label, start_index=1)
gt_centroid = corners_to_centroids(gt_label, start_index=1)
gt_centroid = tf.gather(gt_centroid, tf.range(1, 5), axis=-1)
# labels_one_hot = tf.concat([classes_one_hot, gt_label[:, 1:]], axis=-1)
labels_one_hot = tf.concat([classes_weights, classes_one_hot, gt_centroid], axis=-1)
# First: Do bipartite matching, i.e. match each ground truth box to the one anchor box with
# the highest IoU.
# This ensures that each ground truth box will have at least one good match.
# For each ground truth box, get the anchor box to match with it.
bipartite_matches = bipartite_match_row(similarities)
# Write the ground truth data to the matched anchor boxes.
y_encoded_cls_box = self.encoding_template_tensor[:, :-8]
match_y = tensor_slice_replace(y_encoded_cls_box,
labels_one_hot,
bipartite_matches,
tf.range(tf.shape(labels_one_hot)[0]))
# Write the highest IOU flag:
end_flag = tf.expand_dims(self.encoding_template_tensor[:, -1], -1)
end_flag_on = tf.fill(tf.shape(end_flag), 1024.0)
end_flag = tensor_slice_replace(end_flag,
end_flag_on,
bipartite_matches,
bipartite_matches)
# Set the columns of the matched anchor boxes to zero to indicate that they were matched.
sim_trans = tf.transpose(similarities)
sim_trans_zero = tf.zeros_like(sim_trans)
sim_trans_replace = tensor_slice_replace(sim_trans,
sim_trans_zero,
bipartite_matches,
bipartite_matches)
similarities = tf.transpose(sim_trans_replace)
# Second: Maybe do 'multi' matching, where each remaining anchor box will be matched to its
# most similar ground truth box with an IoU of at least `pos_iou_threshold`, or not
# matched if there is no such ground truth box.
# Get all matches that satisfy the IoU threshold.
matches = multi_match(similarities, self.pos_iou_threshold)
# Write the ground truth data to the matched anchor boxes.
match_y = tensor_slice_replace(match_y,
labels_one_hot,
matches[1],
matches[0])
# Set the columns of the matched anchor boxes to zero to indicate that they were matched.
sim_trans_replace = tensor_slice_replace(sim_trans_replace,
sim_trans_zero,
matches[1],
matches[1])
similarities = tf.transpose(sim_trans_replace)
# Third: Now after the matching is done, all negative (background) anchor boxes that have
# an IoU of `neg_iou_limit` or more with any ground truth box will be set to netral,
# i.e. they will no longer be background boxes. These anchors are "too close" to a
# ground truth box to be valid background boxes.
# +1 on background_id for class_weights
max_background_similarities = tf.reduce_max(similarities, axis=0)
neutral_boxes = tf.reshape(tf.where(max_background_similarities >= self.neg_iou_limit),
[-1])
neutral_boxes = tf.cast(neutral_boxes, tf.int32)
match_y_bg_only = tf.expand_dims(match_y[:, 1], -1)
match_y_bg_only_zero = tf.zeros_like(match_y_bg_only)
match_y_bg_only = tensor_slice_replace(match_y_bg_only,
match_y_bg_only_zero,
neutral_boxes,
neutral_boxes)
match_y = tensor_strided_replace(match_y, (1, 2), match_y_bg_only, axis=-1)
match_y = tf.concat([match_y, self.encoding_template_tensor[:, -8:-1], end_flag], axis=-1)
return match_y
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/box_coder/input_encoder_tf.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test input encoder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from nvidia_tao_tf1.cv.retinanet.box_coder.input_encoder import InputEncoder
def test_input_encoder():
encoder = InputEncoder(img_height=300,
img_width=300,
n_classes=3,
predictor_sizes=[(1, 2), (1, 2)],
scales=[0.1, 0.88, 1.05],
aspect_ratios_per_layer=[[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5]],
two_boxes_for_ar1=True,
steps=None,
offsets=None,
clip_boxes=False,
variances=[0.1, 0.1, 0.2, 0.2],
pos_iou_threshold=0.5,
neg_iou_limit=0.5,
normalize_coords=True)
gt = np.array([[0, 10, 10, 100, 100], [1, 2, 3, 6, 8]])
assert encoder(gt).shape == (60, 16)
def test_input_encoder_multimatch():
encoder = InputEncoder(img_height=300,
img_width=300,
n_classes=3,
predictor_sizes=[(1, 10), (1, 10)],
scales=[0.1, 0.88, 1.05],
aspect_ratios_per_layer=[[2.0], [2.0]],
two_boxes_for_ar1=True,
steps=None,
offsets=None,
clip_boxes=False,
variances=[0.1, 0.1, 0.2, 0.2],
pos_iou_threshold=0.01,
neg_iou_limit=0.01,
normalize_coords=True)
raw_gt = '''np.array(
[[1, 0, 139, 36, 171],
[1, 23, 139, 66, 171],
[0, 50, 139, 306, 171]])'''
gt = eval(raw_gt)
assert encoder(gt).shape == (60, 16)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/box_coder/tests/test_input_encoder.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/utils/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper function to load model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import tempfile
import keras
from nvidia_tao_tf1.core.utils.path_utils import expand_path
from nvidia_tao_tf1.cv.common.utils import load_keras_model
from nvidia_tao_tf1.cv.retinanet.builders import model_builder
from nvidia_tao_tf1.cv.retinanet.initializers.prior_prob import PriorProbability
from nvidia_tao_tf1.cv.retinanet.layers.anchor_box_layer import RetinaAnchorBoxes
from nvidia_tao_tf1.cv.retinanet.losses.focal_loss import FocalLoss
from nvidia_tao_tf1.encoding import encoding
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level='INFO')
logger = logging.getLogger(__name__)
custom_objs = {'RetinaAnchorBoxes': RetinaAnchorBoxes,
'PriorProbability': PriorProbability}
def get_model_with_input(model, input_layer):
"""Implement a trick to replace input tensor."""
_explored_layers = dict()
for l in model.layers:
_explored_layers[l.name] = [False, None]
layers_to_explore = [l for l in model.layers if (type(l) == keras.layers.InputLayer)]
model_outputs = {}
# Loop until we reach the last layer.
while layers_to_explore:
layer = layers_to_explore.pop(0)
# Skip layers that may be revisited in the graph to prevent duplicates.
if not _explored_layers[layer.name][0]:
# Check if all inbound layers explored for given layer.
if not all([
_explored_layers[l.name][0]
for n in layer._inbound_nodes
for l in n.inbound_layers
]):
continue
outputs = None
# Visit input layer.
if type(layer) == keras.layers.InputLayer:
# skip input layer and use outside input tensors intead.
_explored_layers[layer.name][0] = True
_explored_layers[layer.name][1] = None
layers_to_explore.extend([node.outbound_layer for
node in layer._outbound_nodes])
continue
else:
# Create new layer.
layer_config = layer.get_config()
with keras.utils.CustomObjectScope({'PriorProbability': PriorProbability}):
new_layer = type(layer).from_config(layer_config)
# Add to model.
outputs = []
for node in layer._inbound_nodes:
prev_outputs = []
for idx, l in enumerate(node.inbound_layers):
if type(l) == keras.layers.InputLayer:
prev_outputs.append(input_layer)
else:
keras_layer = _explored_layers[l.name][1]
_tmp_output = keras_layer.get_output_at(node.node_indices[idx])
prev_outputs.append(_tmp_output)
assert prev_outputs, "Expected non-input layer to have inputs."
if len(prev_outputs) == 1:
prev_outputs = prev_outputs[0]
outputs.append(new_layer(prev_outputs))
if len(outputs) == 1:
outputs = outputs[0]
weights = layer.get_weights()
if weights is not None and type(layer) != keras.layers.Dense:
try:
new_layer.set_weights(weights)
except ValueError:
print("{} is NOT loaded".format(layer.name))
outbound_nodes = layer._outbound_nodes
if not outbound_nodes:
model_outputs[layer.output.name] = outputs
layers_to_explore.extend([node.outbound_layer for node in outbound_nodes])
# Mark current layer as visited and assign output nodes to the layer.
_explored_layers[layer.name][0] = True
_explored_layers[layer.name][1] = new_layer
else:
continue
# Create new keras model object from pruned specifications.
# only use input_image as Model Input.
output_tensors = [model_outputs[l.name] for l in model.outputs if l.name in model_outputs]
new_model = keras.models.Model(inputs=input_layer, outputs=output_tensors, name=model.name)
return new_model
def load_model(model_path, experiment_spec=None, input_tensor=None, key=None):
"""Load a model either in .h5 format, .tlt format or .hdf5 format."""
_, ext = os.path.splitext(model_path)
if ext == '.h5':
# build model and load weights
assert experiment_spec is not None, "To load weights, spec file must be provided"
model = model_builder.build(experiment_spec, model_only=True, input_tensor=input_tensor)
model.load_weights(model_path)
elif ext == '.hdf5':
focalloss = FocalLoss(loc_loss_weight=experiment_spec.retinanet_config.loss_loc_weight,
alpha=experiment_spec.retinanet_config.focal_loss_alpha,
gamma=experiment_spec.retinanet_config.focal_loss_gamma)
custom_objs['compute_loss'] = focalloss.compute_loss
# directly load model, add dummy loss since loss is never required.
if input_tensor is None:
model = load_keras_model(model_path,
custom_objects=custom_objs)
bs, im_channel, im_height, im_width = model.layers[0].input_shape[:]
# make bs implicit for DALI trained model
if bs is not None:
new_input = keras.layers.Input(shape=(im_channel, im_height, im_width),
name="Input")
model = get_model_with_input(model, new_input)
else:
input_layer = keras.layers.Input(tensor=input_tensor, name="Input")
model = load_keras_model(model_path, custom_objects=custom_objs)
model = get_model_with_input(model, input_layer)
elif ext == '.tlt':
os_handle, temp_file_name = tempfile.mkstemp(suffix='.hdf5')
os.close(os_handle)
if isinstance(key, str):
key = key.encode()
with open(temp_file_name, 'wb') as temp_file, open(expand_path(model_path), 'rb') as encoded_file:
encoding.decode(encoded_file, temp_file, key)
encoded_file.close()
temp_file.close()
# recursive call
model = load_model(temp_file_name, experiment_spec, input_tensor, None)
os.remove(temp_file_name)
else:
raise NotImplementedError("{0} file is not supported!".format(ext))
return model
def load_model_as_pretrain(model_path, load_graph, n_classes,
experiment_spec=None, input_tensor=None, key=None,
kernel_regularizer=None, resume_training=False):
"""
Load a model as pretrained weights.
If the model is pruned, just return the model.
Returns:
model_train: model for training
model_eval: model for eval
optimizer: None if not resume_training
"""
model_train, model_eval = model_builder.build(experiment_spec, n_classes,
kernel_regularizer=kernel_regularizer,
input_tensor=input_tensor)
if not model_path:
logger.info("Building model from spec file...")
return model_train, model_eval, None
model_load = load_model(model_path, experiment_spec, None, key)
if resume_training:
logger.info("Resume from checkpoint...")
# using DALI
if input_tensor is not None:
input_layer = keras.layers.Input(tensor=input_tensor, name="Input")
return get_model_with_input(model_load, input_layer), model_load, model_load.optimizer
return model_load, model_load, model_load.optimizer
strict_mode = True
logger.info("Loading model weights...")
for layer in model_load.layers[1:]:
# The layer must match up to retinanet layers.
if layer.name.find('retinanet_') != -1:
strict_mode = False
try:
l_return = model_train.get_layer(layer.name)
except ValueError:
# Some layers are not there
continue
try:
l_return.set_weights(layer.get_weights())
except ValueError:
if strict_mode:
logger.info("Pruned model detected...")
assert load_graph, "Your pretrained model is a pruned graph. \
Please use pruned_model_path to set the path."
# This is a pruned model
model_config = model_load.get_config()
for layer, layer_config in zip(model_load.layers, model_config['layers']):
if hasattr(layer, 'kernel_regularizer'):
layer_config['config']['kernel_regularizer'] = kernel_regularizer
reg_model = keras.models.Model.from_config(model_config,
custom_objects=custom_objs)
reg_model.set_weights(model_load.get_weights())
# if use dali, replace input
if input_tensor is not None:
input_layer = keras.layers.Input(tensor=input_tensor, name="Input")
reg_model = get_model_with_input(reg_model, input_layer)
# Alternative: save reg_model to tmp file
# os_handle, temp_file_name = tempfile.mkstemp(suffix='.hdf5')
# os.close(os_handle)
# reg_model.save(temp_file_name, overwrite=True, include_optimizer=False)
# reg_model = load_model(temp_file_name, experiment_spec,
# input_tensor, None)
return reg_model, model_load, None
return model_train, model_eval, None
def save_model(keras_model, model_path, key, save_format=None):
"""Save a model to either .h5, .tlt or .hdf5 format."""
_, ext = os.path.splitext(model_path)
if (save_format is not None) and (save_format != ext):
# recursive call to save a correct model
return save_model(keras_model, model_path + save_format, key, None)
if ext == '.h5':
keras_model.save_weights(model_path)
elif ext == '.hdf5':
keras_model.save(model_path, overwrite=True, include_optimizer=False)
else:
raise NotImplementedError("{0} file is not supported!".format(ext))
return model_path
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/utils/model_io.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of helper functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
def eval_str(s):
"""If s is a string, return the eval results. Else return itself."""
if isinstance(s, six.string_types):
if len(s) > 0:
return eval(s)
return None
return s
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/utils/helper.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Load an experiment spec file to run RetinaNet training, evaluation, pruning."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
from google.protobuf.text_format import Merge as merge_text_proto
from nvidia_tao_tf1.core.utils.path_utils import expand_path
import nvidia_tao_tf1.cv.retinanet.proto.experiment_pb2 as experiment_pb2
logger = logging.getLogger(__name__)
def load_proto(spec_path, proto_buffer, default_spec_path=None, merge_from_default=True):
"""Load spec from file and merge with given proto_buffer instance.
Args:
spec_path (str): location of a file containing the custom spec proto.
proto_buffer(pb2): protocal buffer instance to be loaded.
default_spec_path(str): location of default spec to use if merge_from_default is True.
merge_from_default (bool): disable default spec, if False, spec_path must be set.
Returns:
proto_buffer(pb2): protocol buffer instance updated with spec.
"""
def _load_from_file(filename, pb2):
with open(expand_path(filename), "r") as f:
merge_text_proto(f.read(), pb2)
# Setting this flag false prevents concatenating repeated-fields
if merge_from_default:
assert default_spec_path, \
"default spec path has to be defined if merge_from_default is enabled"
# Load the default spec
_load_from_file(default_spec_path, proto_buffer)
else:
assert spec_path, "spec_path has to be defined, if merge_from_default is disabled"
# Merge a custom proto on top of the default spec, if given
if spec_path:
logger.info("Merging specification from %s", spec_path)
_load_from_file(spec_path, proto_buffer)
return proto_buffer
def load_experiment_spec(spec_path=None, merge_from_default=False):
"""Load experiment spec from a .txt file and return an experiment_pb2.Experiment object.
Args:
spec_path (str): location of a file containing the custom experiment spec proto.
dataset_export_spec_paths (list of str): paths to the dataset export specs.
merge_from_default (bool): disable default spec, if False, spec_path must be set.
Returns:
experiment_spec: protocol buffer instance of type experiment_pb2.Experiment.
"""
experiment_spec = experiment_pb2.Experiment()
file_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
default_spec_path = os.path.join(file_path, 'experiment_specs/default_spec.txt')
experiment_spec = load_proto(spec_path, experiment_spec, default_spec_path,
merge_from_default)
# dataset_config
assert len(experiment_spec.dataset_config.target_class_mapping.values()) > 0, \
"Please specify target_class_mapping"
assert len(experiment_spec.dataset_config.data_sources) > 0, "Please specify data sources"
assert len(experiment_spec.dataset_config.validation_data_sources) > 0, \
"Please specify validation data sources"
# augmentation check is in SSD augmentation (data_augmentation_chain_original_ssd.py)
assert experiment_spec.augmentation_config.output_channel in [1, 3], \
"output_channel must be either 1 or 3."
img_mean = experiment_spec.augmentation_config.image_mean
if experiment_spec.augmentation_config.output_channel == 3:
if img_mean:
assert all(c in img_mean for c in ['r', 'g', 'b']) , (
"'r', 'g', 'b' should all be present in image_mean "
"for images with 3 channels."
)
else:
if img_mean:
assert 'l' in img_mean, (
"'l' should be present in image_mean for images "
"with 1 channel."
)
# training config
assert experiment_spec.training_config.batch_size_per_gpu > 0, "batch size must be positive"
assert experiment_spec.training_config.num_epochs > 0, \
"number of training epochs (num_epochs) must be positive"
assert (experiment_spec.training_config.checkpoint_interval or 1) > 0, \
"checkpoint_interval must be positive"
# eval config
assert experiment_spec.eval_config.batch_size > 0, "batch size must be positive"
assert 0.0 < experiment_spec.eval_config.matching_iou_threshold <= 1.0, \
"matching_iou_threshold must be within (0, 1]"
# nms config
assert 0.0 < experiment_spec.nms_config.clustering_iou_threshold <= 1.0, \
"clustering_iou_threshold must be within (0, 1]"
# retinanet config
assert len(eval(experiment_spec.retinanet_config.scales)) == 6, \
"FPN should have 6 scales for configuration."
assert len(eval(experiment_spec.retinanet_config.variances)) == 4, \
"4 values must be specified for variance."
assert 0 < experiment_spec.retinanet_config.focal_loss_alpha < 1, \
"focal_loss_alpha must be within (0, 1)."
assert 0 < experiment_spec.retinanet_config.focal_loss_gamma, \
"focal_loss_gamma must be greater than 0."
assert 0 < experiment_spec.retinanet_config.n_kernels, \
"n_kernels must be greater than 0."
assert 1 < experiment_spec.retinanet_config.feature_size, \
"feature_size must be greater than 1."
assert 0 < experiment_spec.retinanet_config.n_anchor_levels, \
"n_anchor_levels must be greater than 0."
# Validate early_stopping config
if experiment_spec.training_config.HasField("early_stopping"):
es = experiment_spec.training_config.early_stopping
if es.monitor not in ["loss", "validation_loss", "val_loss"]:
raise ValueError(
"Only `loss` and `validation_loss` and `val_loss` are supported monitors"
f", got {es.monitor}"
)
if es.min_delta < 0.:
raise ValueError(
f"`min_delta` should be non-negative, got {es.min_delta}"
)
if es.patience == 0:
raise ValueError(
f"`patience` should be positive, got {es.patience}"
)
return experiment_spec
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/utils/spec_loader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test spec loader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nvidia_tao_tf1.cv.retinanet.utils.spec_loader import load_experiment_spec
def test_spec_loader():
experiment_spec = load_experiment_spec(merge_from_default=True)
# params
img_channels = experiment_spec.augmentation_config.output_channel
img_height = experiment_spec.augmentation_config.output_height
img_width = experiment_spec.augmentation_config.output_width
freeze_blocks = experiment_spec.retinanet_config.freeze_blocks
freeze_bn = experiment_spec.retinanet_config.freeze_bn
nlayers = experiment_spec.retinanet_config.nlayers
arch = experiment_spec.retinanet_config.arch
assert arch == 'resnet'
assert nlayers == 18
assert freeze_bn
assert freeze_blocks == [0.0, 1.0]
assert img_width == img_height == 512
assert img_channels == 3
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/utils/tests/test_spec_loader.py |
"""IVA RetinaNet Feature Pyramid Generator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import keras
from keras.layers import Conv2D, ReLU, UpSampling2D
from nvidia_tao_tf1.cv.common.models.backbones import get_backbone
fpn_dict = {'vgg16': ('block_3c_relu', 'block_4c_relu', 'block_5c_relu'),
'vgg19': ('block_3d_relu', 'block_4d_relu', 'block_5d_relu'),
'resnet10': ('block_1a_relu', 'block_2a_relu', 'block_4a_relu'),
'resnet18': ('block_1b_relu', 'block_2b_relu', 'block_4b_relu'),
'resnet34': ('block_1c_relu', 'block_2d_relu', 'block_4c_relu'),
'resnet50': ('block_1c_relu', 'block_2d_relu', 'block_4c_relu'),
'resnet101': ('block_1c_relu', 'block_2d_relu', 'block_4c_relu'),
'googlenet': ('activation_3', 'inception_3b_output', 'inception_5b_output'),
'mobilenet_v1': ('conv_pw_relu_3', 'conv_pw_relu_5', 'conv_pw_relu_11'),
'mobilenet_v2': ('re_lu_4', 're_lu_7', 'block_12_add'),
'squeezenet': ('fire4', 'fire8', 'fire9'),
'darknet19': ('b3_conv3_lrelu', 'b4_conv5_lrelu', 'b5_conv5_lrelu'),
'darknet53': ('b3_add8', 'b4_add8', 'b5_add4'),
'efficientnet_b0': ('block4a_expand_activation',
'block6a_expand_activation',
'top_activation')}
class FPN:
"""Class for generating feature pyramid."""
def __init__(self,
input_tensor,
model_name,
**kwargs):
"""Initialize input and backbone."""
self.input_tensor = input_tensor
if model_name in ['vgg', 'resnet', 'darknet']:
self.nlayers = kwargs['nlayers']
self.model_name = model_name + str(self.nlayers)
else:
self.model_name = model_name
self.nlayers = None
self.backbone = get_backbone(input_tensor, model_name, **kwargs)
def generate(self, feature_size, kernel_regularizer):
"""Return a list of feature maps in FPN."""
options = {
'padding' : 'same',
'kernel_initializer' : 'he_normal',
'kernel_regularizer' : kernel_regularizer,
# 'use_bias' : False
}
if 'darknet' in self.model_name:
B1, B2, B3 = fpn_dict[self.model_name]
C3 = self.backbone.get_layer(B1).output
C4 = self.backbone.get_layer(B2).output
C5 = self.backbone.get_layer(B3).output
expand1 = Conv2D(feature_size,
kernel_size=1,
strides=1,
name='expand_conv1',
**options)(C5)
C5 = ReLU(name='expand1_relu')(expand1)
elif 'efficientnet' in self.model_name:
B1, B2, B3 = fpn_dict[self.model_name]
C3 = self.backbone.get_layer(B1).output
C4 = self.backbone.get_layer(B2).output
C5 = self.backbone.get_layer(B3).output
else:
_, B2, B3 = fpn_dict[self.model_name]
C3 = self.backbone.get_layer(B2).output
C4 = self.backbone.get_layer(B3).output
expand1 = Conv2D(feature_size,
kernel_size=3,
strides=2,
name='expand_conv1',
**options)(C4)
C5 = ReLU(name='expand1_relu')(expand1)
# Extra feature maps
P5 = Conv2D(feature_size, kernel_size=1, strides=1,
name='C5_reduced', **options)(C5)
P5_upsampled = UpSampling2D(size=(2, 2),
data_format='channels_first',
name='P5_upsampled')(P5)
P5 = Conv2D(feature_size, kernel_size=3, strides=1,
name='P5', **options)(P5)
P5 = ReLU(name='P5_relu')(P5)
# add P5 elementwise to C4
P4 = Conv2D(feature_size, kernel_size=1, strides=1,
name='C4_reduced', **options)(C4)
P4 = keras.layers.Add(name='P4_merged')([P5_upsampled, P4])
P4_upsampled = UpSampling2D(size=(2, 2),
data_format='channels_first',
name='P4_upsampled')(P4)
P4 = Conv2D(feature_size, kernel_size=3, strides=1,
name='P4', **options)(P4)
P4 = ReLU(name='P4_relu')(P4)
# add P4 elementwise to C3
P3 = Conv2D(feature_size, kernel_size=1, strides=1,
name='C3_reduced', **options)(C3)
P3 = keras.layers.Add(name='P3_merged')([P4_upsampled, P3])
P3 = Conv2D(feature_size, kernel_size=3, strides=1,
name='P3', **options)(P3)
P3 = ReLU(name='P3_relu')(P3)
# "P6 is obtained via a 3x3 stride-2 conv on C5"
P6 = Conv2D(feature_size, kernel_size=3, strides=2,
name='P6', **options)(C5)
# "P7 is computed by applying ReLU followed by a 3x3 stride-2 conv on P6"
P6 = ReLU(name='P6_relu')(P6)
P7 = Conv2D(feature_size, kernel_size=3, strides=2,
name='P7', **options)(P6)
P7 = ReLU(name='P7_relu')(P7)
return P3, P4, P5, P6, P7
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/models/fpn.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/models/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test retinanet FPN."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Input
import pytest
from nvidia_tao_tf1.cv.retinanet.models.fpn import FPN
@pytest.mark.slow
@pytest.mark.parametrize("bb, nlayers",
[('mobilenet_v2', None),
('resnet', 18),
('resnet', 10),
('resnet', 34),
('resnet', 50),
('resnet', 101),
('vgg', 16),
('vgg', 19),
('mobilenet_v1', 10),
('googlenet', None)])
def test_fpn(bb, nlayers):
input_tensor = Input(shape=(3, 512, 512))
fpn = FPN(input_tensor, bb,
nlayers=nlayers,
use_batch_norm=True,
use_pooling=False,
freeze_bn=False,
use_bias=False,
data_format='channels_first',
dropout=None,
all_projections=True)
feature_map_list = fpn.generate(
feature_size=256,
kernel_regularizer=None)
assert len(feature_map_list) == 5
p1, p2, p3, p4, p5 = feature_map_list
shape1 = p1.get_shape().as_list()
shape2 = p2.get_shape().as_list()
shape3 = p3.get_shape().as_list()
shape4 = p4.get_shape().as_list()
shape5 = p5.get_shape().as_list()
assert len(shape1) == 4
assert shape1[1] == shape2[1] == shape3[1] == shape4[1] == shape5[1]
assert shape1[-1] == 2 * shape2[-1]
assert shape2[-1] == 2 * shape3[-1]
assert shape3[-1] == 2 * shape4[-1]
assert shape4[-1] == 2 * shape5[-1]
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/models/tests/test_fpn.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/architecture/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IVA RetinaNet base architecture."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import keras
from keras.layers import Activation, Concatenate, Conv2D, Input, Permute, Reshape
from keras.models import Model
import numpy as np
from nvidia_tao_tf1.core.models.quantize_keras_model import create_quantized_keras_model
from nvidia_tao_tf1.cv.retinanet.initializers.prior_prob import PriorProbability
from nvidia_tao_tf1.cv.retinanet.layers.anchor_box_layer import RetinaAnchorBoxes
from nvidia_tao_tf1.cv.retinanet.models.fpn import FPN
class Subnet(object):
"""Subnet Base Class."""
def __init__(self, name):
"""Init."""
self.name = name
self.subnet = {}
self.regressor = None
def __call__(self, feature_map):
"""Build model graph."""
x = feature_map
for i in range(len(self.subnet)):
x = self.subnet[self.name + '_' + str(i)](x)
return self.regressor(x)
class ClassSubnet(Subnet):
"""Subnet for classification branch."""
def __init__(self, n_anchors, n_classes, prior_prob, n_kernels,
feature_size=256, kernel_regularizer=None, name='class_submodule'):
"""Initialize layers in the subnet."""
self.n_anchors = n_anchors
self.n_classes = n_classes
self.prior_prob = prior_prob
self.n_kernels = n_kernels
self.feature_size = feature_size
self.name = name
options = {
'padding' : 'same',
'data_format' : 'channels_first',
'kernel_initializer' : keras.initializers.normal(mean=0.0, stddev=0.01, seed=None),
'kernel_regularizer' : kernel_regularizer
}
self.subnet = {}
for i in range(n_kernels):
self.subnet[name + '_' + str(i)] = Conv2D(feature_size, (3, 3),
activation='relu',
name='retinanet_class_subn_'+str(i),
bias_initializer='zeros',
**options)
self.regressor = Conv2D(n_anchors * n_classes, (3, 3),
bias_initializer=PriorProbability(probability=prior_prob),
name='retinanet_conf_regressor',
**options)
class LocSubnet(Subnet):
"""Subnet for localization branch."""
def __init__(self, n_anchors, n_kernels, feature_size=256,
kernel_regularizer=None, name='loc_submodule'):
"""Initialize layers in the subnet."""
self.n_anchors = n_anchors
self.n_kernels = n_kernels
self.feature_size = feature_size
self.name = name
options = {
'kernel_size' : 3,
'strides' : 1,
'padding' : 'same',
'data_format' : 'channels_first',
'kernel_initializer' : keras.initializers.normal(mean=0.0, stddev=0.01, seed=None),
'kernel_regularizer' : kernel_regularizer,
'bias_initializer' : 'zeros'
}
self.subnet = {}
for i in range(n_kernels):
self.subnet[name + '_' + str(i)] = Conv2D(feature_size, activation='relu',
name='retinanet_loc_subn_'+str(i),
**options)
self.regressor = Conv2D(n_anchors * 4,
name='retinanet_loc_regressor',
**options)
def retinanet(image_size,
n_classes,
kernel_regularizer=None,
bias_regularizer=None,
freeze_blocks=None,
freeze_bn=None,
use_batch_norm=True,
use_pooling=False,
use_bias=False,
all_projections=True,
dropout=None,
min_scale=None,
max_scale=None,
scales=None,
aspect_ratios_global=None,
aspect_ratios_per_layer=None,
two_boxes_for_ar1=False,
steps=None,
n_anchor_levels=3,
offsets=None,
clip_boxes=False,
variances=None,
arch="resnet",
nlayers=None,
n_kernels=2,
feature_size=256,
input_tensor=None,
qat=False):
'''
Build a Keras model with RetinaNet architecture, see references.
Arguments:
image_size (tuple): The input image size in the format `(channels, height, width)`.
n_classes (int): The number of positive classes, e.g. 20 for Pascal VOC, 80 for MS COCO.
kernel_regularizer (float, optional): Applies to all convolutional layers.
freeze_blocks (list, optional): The indices of the freezed subblocks.
freeze_bn (boolean, optional): Whether to freeze the BN layers.
use_batch_norm (boolean, optional): Whether to use BN in the feature extractor.
use_pooling (boolean, optional): Whether to use max pooling in the feature extractor.
use_bias (boolean, optional): Whether to use bias in conv layers of the feature extractor.
all_projections (boolean, optional): Whether to use projection in some feature extractors.
dropout (float, optional): dropout ratio for squeezenet.
n_kernels (int, optional): number of conv kernels in the submodule.
feature_size (int, optional): number of channels in FPN and submodule.
nlayers (int, optional): number of layers in ResNets or Vggs.
arch (string): name of the feature extractor.
min_scale (float, optional): The smallest scaling factor for the size of the anchor
boxes as a fraction of the shorter side of the input images.
max_scale (float, optional): The largest scaling factor for the size of the anchor boxes
as a fraction of the shorter side of the input images. All scaling factors between
the smallest and the largest will be linearly interpolated. Note that the second to
last of the linearly interpolated scaling factors will actually be the scaling factor
for the last predictor layer, while the last scaling factor is used for the second box
for aspect ratio 1 in the last predictor layer if `two_boxes_for_ar1` is `True`.
scales (list, optional): A list of floats containing scaling factors per convolutional
predictor layer. This list must be one element longer than the number of predictor
layers. The first `k` elements are the scaling factors for the `k` predictor layers,
while the last element is used for the second box for aspect ratio 1 in the last
predictor layer if `two_boxes_for_ar1` is `True`. This additional last scaling factor
must be passed either way, even if it is not being used. If a list is passed, this
argument overrides `min_scale` and `max_scale`. All scaling factors must be greater
than zero.
aspect_ratios_global (list, optional): The list of aspect ratios for which anchor boxes are
to be generated. This list is valid for all prediction layers.
aspect_ratios_per_layer (list, optional): A list containing one aspect ratio list for each
prediction layer. This allows you to set the aspect ratios for each predictor layer
individually. If a list is
passed, it overrides `aspect_ratios_global`.
two_boxes_for_ar1 (bool, optional): Only relevant for aspect ratio lists that contain 1.
Will be ignored otherwise. If `True`, two anchor boxes will be generated for aspect
ratio 1. The first will be generated using the scaling factor for the respective layer,
the second one will be generated using geometric mean of said scaling factor and next
bigger scaling factor.
steps (list, optional): `None` or a list with as many elements as there are pred layers.
The elements can be either ints/floats or tuples of two ints/floats. These numbers
represent for each predictor layer how many pixels apart the anchor box center points
should be vertically and horizontally along the spatial grid over the image. If the
list contains ints/floats, then that value will be used for both spatial dimensions.
If the list contains tuples of two ints/floats, then they represent
`(step_height, step_width)`. If no steps are provided, then they will be computed such
that the anchor box center points will form an equidistant grid within the image
dimensions.
offsets (list, optional): `None` or a list with as many elements as there are predictor
layers. The elements can be either floats or tuples of two floats. These numbers
represent for each predictor layer how many pixels from the top and left boarders of
the image the top-most and left-most anchor box center points should be as a fraction of
`steps`. The last bit is important: The offsets are not absolute pixel values, but
fractions of the step size specified in the `steps` argument. If the list contains
floats, then that value will be used for both spatial dimensions. If the list contains
tuples of two floats, then they represent `(vertical_offset, horizontal_offset)`. If no
offsets are provided, then they will default to 0.5 of the step size.
clip_boxes (bool, optional): If `True`, clips the anchor box coordinates to stay within
image boundaries.
variances (list, optional): A list of 4 floats >0. The anchor box offset for each coordinate
will be divided by its respective variance value.
divide_by_stddev (array-like, optional): `None` or an array-like object of non-zero integers
or floating point values of any shape that is broadcast-compatible with the image shape.
The image pixel intensity values will be divided by the elements of this array. For
example, pass a list of three integers to perform per-channel standard deviation
normalization for color images.
swap_channels (list, optional): Either `False` or a list of integers representing the
desired order in which the input image channels should be swapped.
return_predictor_sizes (bool, optional): If `True`, this function not only returns the
model, but also a list containing the spatial dimensions of the predictor layers. This
isn't strictly necessary since you can always get their sizes easily via the Keras API,
but it's convenient and less error-prone to get them this way. They are only relevant
for training anyway, for inference you don't need them.
qat (bool): If `True`, build an quantization aware model.
Returns:
model: The Keras RetinaNet model.
predictor_sizes (optional): A Numpy array containing the `(height, width)` portion
of the output tensor shape for each convolutional predictor layer. During
training, the generator function needs this in order to transform
the ground truth labels into tensors of identical structure as the
output tensors of the model, which is in turn needed for the cost
function.
References:
https://arxiv.org/abs/1708.02002
'''
n_predictor_layers = 5 # n_predictor conv layers in the network is 5 for retinanet.
img_channels, img_height, img_width = image_size[0], image_size[1], image_size[2]
############################################################################
# Get a few exceptions out of the way.
############################################################################
if aspect_ratios_global is None and aspect_ratios_per_layer is None:
raise ValueError("`aspect_ratios_global` and `aspect_ratios_per_layer` cannot both be None.\
At least one needs to be specified.")
if aspect_ratios_per_layer:
if len(aspect_ratios_per_layer) != n_predictor_layers:
raise ValueError("It must be either aspect_ratios_per_layer is None or \
len(aspect_ratios_per_layer) == {}, but len(aspect_ratios_per_layer) == {}."
.format(n_predictor_layers, len(aspect_ratios_per_layer)))
if (min_scale is None or max_scale is None) and scales is None:
raise ValueError("Either `min_scale` and `max_scale` or `scales` need to be specified.")
if scales:
if len(scales) != n_predictor_layers+1:
raise ValueError("It must be either scales is None or len(scales) == {}, but \
len(scales) == {}.".format(n_predictor_layers+1, len(scales)))
else: # If no explicit list of scaling factors was passed, compute the list of scaling factors
scales = np.linspace(min_scale, max_scale, n_predictor_layers+1)
if len(variances) != 4:
raise ValueError("4 variance values must be passed, but {} values were received."
.format(len(variances)))
variances = np.array(variances)
if np.any(variances <= 0):
raise ValueError("All variances must be >0, but the variances given are {}"
.format(variances))
if (not (steps is None)) and (len(steps) != n_predictor_layers):
raise ValueError("You must provide at least one step value per predictor layer.")
if (not (offsets is None)) and (len(offsets) != n_predictor_layers):
raise ValueError("You must provide at least one offset value per predictor layer.")
############################################################################
# Compute the anchor box parameters.
############################################################################
# Set the aspect ratios for each predictor layer. These are needed for the anchor box layers.
if aspect_ratios_per_layer:
aspect_ratios = aspect_ratios_per_layer
else:
aspect_ratios = [aspect_ratios_global] * n_predictor_layers
# If only a global aspect ratio list was passed, then the number of boxes is the same
# for each predictor layer
if (1 in aspect_ratios_global) & two_boxes_for_ar1:
n_boxes = n_anchor_levels * (len(aspect_ratios_global) + 1)
else:
n_boxes = n_anchor_levels * (len(aspect_ratios_global))
if steps is None:
steps = [None] * n_predictor_layers
if offsets is None:
offsets = [None] * n_predictor_layers
############################################################################
# Build the network.
############################################################################
if input_tensor is None:
x = Input(shape=(img_channels, img_height, img_width), name="Input")
else:
x = Input(tensor=input_tensor, name="Input")
retinanet_fpn = FPN(x, arch,
nlayers=nlayers,
data_format='channels_first',
use_batch_norm=use_batch_norm,
use_pooling=use_pooling,
use_bias=use_bias,
all_projections=all_projections,
dropout=dropout,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
freeze_blocks=freeze_blocks,
freeze_bn=freeze_bn,
force_relu=False)
if arch in ['vgg', 'resnet']:
assert nlayers > 0, "Please specify the number of layers for VGGNets or ResNets."
feature_map_list = retinanet_fpn.generate(feature_size=feature_size,
kernel_regularizer=kernel_regularizer)
if len(feature_map_list) != 5:
raise ValueError('Need 5 feature maps from base model')
conf_list = []
loc_list = []
anchor_list = []
classification_subnet = ClassSubnet(n_boxes, n_classes, 0.01, n_kernels,
feature_size=feature_size,
kernel_regularizer=kernel_regularizer)
localization_subnet = LocSubnet(n_boxes, n_kernels,
feature_size=feature_size,
kernel_regularizer=kernel_regularizer)
if qat:
raw_model = Model(inputs=x, outputs=feature_map_list)
qat_model = create_quantized_keras_model(raw_model)
feature_map_list = [
qat_model.get_layer('P3_relu').output,
qat_model.get_layer('P4_relu').output,
qat_model.get_layer('P5_relu').output,
qat_model.get_layer('P6_relu').output,
qat_model.get_layer('P7_relu').output]
for idx, feature_map in enumerate(feature_map_list):
conf = classification_subnet(feature_map)
loc = localization_subnet(feature_map)
anchor = RetinaAnchorBoxes(img_height, img_width,
this_scale=scales[idx],
next_scale=scales[idx+1],
aspect_ratios=aspect_ratios[idx],
two_boxes_for_ar1=two_boxes_for_ar1,
this_steps=steps[idx],
this_offsets=offsets[idx],
clip_boxes=clip_boxes,
variances=variances,
n_anchor_levels=n_anchor_levels,
name='retinanet_anchor_'+str(idx))(loc)
conf = Reshape((-1, 1, n_classes), name='conf_reshape_'+str(idx))(Permute((2, 3, 1))(conf))
loc = Reshape((-1, 1, 4), name='loc_reshape_'+str(idx))(Permute((2, 3, 1))(loc))
anchor = Reshape((-1, 1, 8), name='anchor_reshape_'+str(idx))(anchor)
conf_list.append(conf)
loc_list.append(loc)
anchor_list.append(anchor)
# Concatenate the predictions from the different layers
# Axis0 (batch) and axis2 (n_classes or 4, respectively) are identical for all layer predictions
# so we want to concatenate along axis 1, the number of boxes per layer
# Output shape of `mbox_conf`: (batch, n_boxes_total, n_classes)
mbox_conf = Concatenate(axis=1, name='mbox_conf')(conf_list)
# Output shape of `mbox_loc`: (batch, n_boxes_total, 4)
mbox_loc = Concatenate(axis=1, name='mbox_loc')(loc_list)
# Output shape of `mbox_priorbox`: (batch, n_boxes_total, 8)
mbox_priorbox = Concatenate(axis=1, name='mbox_priorbox')(anchor_list)
# The box coordinate predictions will go into the loss function just the way they are,
# but for the class predictions, we'll apply a softmax activation layer first
mbox_conf_sigmoid = Activation('sigmoid', name='mbox_conf_sigmoid')(mbox_conf)
# Concatenate the class and box predictions and the anchors to one large predictions vector
# Output shape of `predictions`: (batch, n_boxes_total, n_classes + 4 + 8)
predictions = Concatenate(axis=-1)([mbox_conf_sigmoid, mbox_loc, mbox_priorbox])
predictions = Reshape((-1, n_classes+4+8), name='retinanet_predictions')(predictions)
return Model(inputs=x, outputs=predictions, name='retinanet_'+arch)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/architecture/retinanet.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test retinanet arch builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pytest
from nvidia_tao_tf1.cv.retinanet.architecture.retinanet import retinanet
@pytest.mark.parametrize("arch, nlayers, n_anchor_levels, qat, feature_size",
[('vgg', 16, 1, False, 256),
('resnet', 18, 1, False, 128),
('efficientnet_b0', None, 3, False, 256),
('mobilenet_v1', None, 3, True, 256),
('mobilenet_v2', None, 2, False, 16),
('squeezenet', None, 2, True, 32),
('darknet', 53, 1, False, 64)])
def test_arch(arch, nlayers, n_anchor_levels, qat, feature_size):
model = retinanet(
(3, 512, 512),
20,
arch=arch,
nlayers=nlayers,
kernel_regularizer=None,
freeze_blocks=[0],
freeze_bn=None,
min_scale=0.1,
max_scale=0.8,
scales=None,
aspect_ratios_global=[1, 0.5, 2],
aspect_ratios_per_layer=None,
two_boxes_for_ar1=False,
steps=None,
offsets=None,
clip_boxes=False,
variances=[0.1, 0.1, 0.2, 0.2],
input_tensor=None,
n_anchor_levels=n_anchor_levels,
qat=qat,
feature_size=feature_size)
assert model.get_layer('retinanet_predictions').output_shape[-2:] == \
(16368 * n_anchor_levels, 32)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/architecture/tests/test_retinanet.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IVA RetinaNet entrypoint scripts."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/scripts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Export a retinanet model."""
# import build_command_line_parser as this is needed by entrypoint
from nvidia_tao_tf1.cv.common.export.app import build_command_line_parser # noqa pylint: disable=W0611
from nvidia_tao_tf1.cv.common.export.app import launch_export
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.retinanet.export.onnx_exporter import RetinaNetOnnxExporter as Exporter
if __name__ == "__main__":
try:
launch_export(Exporter, backend='onnx')
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Export finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Export was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/scripts/export.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert KITTI dataset to DALI TFRecords for RetinaNet TLT model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.ssd.scripts.dataset_convert import ( # noqa pylint: disable=unused-import
build_command_line_parser,
main,
)
if __name__ == "__main__":
try:
main(sys.argv[1:])
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Dataset convert finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Dataset convert was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/retinanet/scripts/dataset_convert.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.