python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the KITTI converter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import inspect
import json
import os
import tempfile
import numpy as np
from PIL import Image
import pytest
import six
from six.moves import range
from six.moves import zip
import tensorflow as tf
from nvidia_tao_tf1.cv.detectnet_v2.dataio.build_converter import build_converter
from nvidia_tao_tf1.cv.detectnet_v2.dataio.kitti_converter_lib import KITTIConverter
import nvidia_tao_tf1.cv.detectnet_v2.proto.dataset_export_config_pb2 as\
dataset_export_config_pb2
import nvidia_tao_tf1.cv.detectnet_v2.proto.kitti_config_pb2 as kitti_config_pb2
def _dataset_export_config(num_partitions=0):
"""Return a KITTI dataset export configuration with given number of partitions."""
dataset_export_config = dataset_export_config_pb2.DatasetExportConfig()
kitti_config = kitti_config_pb2.KITTIConfig()
root_dir = os.path.dirname(os.path.abspath(
inspect.getsourcefile(lambda: None)))
root_dir += "/test_data"
kitti_config.root_directory_path = root_dir
kitti_config.num_partitions = num_partitions
kitti_config.num_shards = 0
kitti_config.partition_mode = "sequence"
kitti_config.image_dir_name = "image_2"
kitti_config.image_extension = ".jpg"
kitti_config.point_clouds_dir = "velodyne"
kitti_config.calibrations_dir = "calib"
kitti_config.kitti_sequence_to_frames_file = generate_sequence_map_file()
get_mock_images(kitti_config)
dataset_export_config.kitti_config.CopyFrom(kitti_config)
return dataset_export_config
def get_mock_images(kitti_config):
"""Generate mock images from the image_ids."""
image_root = os.path.join(
kitti_config.root_directory_path, kitti_config.image_dir_name)
if not os.path.exists(image_root):
os.makedirs(image_root)
image_file = {'000012': (1242, 375),
'000000': (1224, 370),
'000001': (1242, 375)}
for idx, sizes in six.iteritems(image_file):
image_file_name = os.path.join(image_root,
'{}{}'.format(idx, kitti_config.image_extension))
image = Image.new("RGB", sizes)
image.save(image_file_name)
return image_file
def generate_sequence_map_file():
"""Generate a sequence map file for sequence wise partitioning kitti."""
os_handle, temp_file_name = tempfile.mkstemp()
os.close(os_handle)
mock_sequence_to_frames_map = {'0': ['000000', '000001', '000012']}
with open(temp_file_name, 'w') as tfile:
json.dump(mock_sequence_to_frames_map, tfile)
return temp_file_name
def _mock_open_image(image_file):
"""Mock image open()."""
# the images are opened to figure out their dimensions so mock the image size
mock_image = namedtuple('mock_image', ['size'])
images = {'000012': mock_image((1242, 375)),
'000000': mock_image((1224, 370)),
'000001': mock_image((1242, 375))}
# return the mocked image corresponding to frame_id in the image_file path
for frame_id in images.keys():
if frame_id in image_file:
return images[frame_id]
return mock_image
def _mock_converter(mocker, tmpdir):
"""Return a KITTI converter with a mocked sequence to frame map."""
output_filename = os.path.join(str(tmpdir), 'kitti_test.tfrecords')
# Mock image open().
mocker.patch.object(Image, 'open', _mock_open_image)
# Instead of using all KITTI labels, use only a few samples.
mock_sequence_to_frames_map = {'0': ['000000', '000001', '000012']}
# Convert a few KITTI labels to TFrecords.
dataset_export_config = _dataset_export_config()
mocker.patch.object(KITTIConverter, '_read_sequence_to_frames_file',
return_value=mock_sequence_to_frames_map)
converter = build_converter(dataset_export_config, output_filename)
converter.labels_dir = ""
return converter, output_filename
@pytest.mark.parametrize("num_partitions,expected_partitions",
[(1, [list(range(1)) + list(range(2))
+ list(range(3)) + list(range(4)) + list(range(5))]),
(2, [list(range(5)) + list(range(3)) +
list(range(1)), list(range(4)) + list(range(2))]),
(3, [list(range(5)) + list(range(2)),
list(range(4)) + list(range(1)), list(range(3))]),
(5, [list(range(num_items)) for num_items in
range(5, 0, -1)])])
def test_partition(mocker, num_partitions, expected_partitions):
"""KITTI partitioning loops sequences starting from the longest one.
Frames corresponding to sequences are added to partitions one-by-one.
"""
dataset_export_config = _dataset_export_config(num_partitions)
# Create a dummy mapping in which num_items maps to a list of length num_items.
mock_sequence_to_frames_map = {num_items: list(range(num_items)) for
num_items in range(1, 6)}
mocker.patch.object(KITTIConverter, '_read_sequence_to_frames_file',
return_value=mock_sequence_to_frames_map)
os_handle, output_filename = tempfile.mkstemp()
os.close(os_handle)
output_filename = os.path.join(str(output_filename), "kitti.testrecords")
# Create a converter and run partitioning.
converter = build_converter(dataset_export_config,
output_filename=output_filename)
partitions = converter._partition()
assert partitions == expected_partitions[::-1] # reverse to match Rumpy
return dataset_export_config
expected_objects = [[b'truck', b'car', b'cyclist', b'dontcare', b'dontcare',
b'dontcare', b'dontcare'],
[b'pedestrian'],
[b'car', b'van', b'dontcare', b'dontcare', b'dontcare']]
expected_truncation = [[0.0, 0.0, -1.0, -1.0, -1.0],
[0.0],
[0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0]]
expected_occlusion = [[0, 0, -1, -1, -1], [0], [0, 0, 3, -1, -1, -1, -1]]
expected_x1 = [[662.20, 448.07, 610.5, 582.969971, 600.359985],
[712.4],
[599.41, 387.63, 676.60, 503.89, 511.35, 532.37, 559.62]]
expected_y1 = [[185.85, 177.14, 179.95, 182.70, 185.59],
[143.0],
[156.40, 181.54, 163.95, 169.71, 174.96, 176.35, 175.83]]
expected_x2 = [[690.21, 481.60, 629.68, 594.78, 608.36],
[810.73],
[629.75, 423.81, 688.98, 590.61, 527.81, 542.68, 575.40]]
expected_y2 = [[205.03, 206.41, 196.31, 191.05, 192.69],
[307.92],
[189.25, 203.12, 193.93, 190.13, 187.45, 185.27, 183.15]]
expected_truncation = expected_truncation[::-1]
expected_occlusion = expected_occlusion[::-1]
expected_x1 = expected_x1[::-1]
expected_y1 = expected_y1[::-1]
expected_x2 = expected_x2[::-1]
expected_y2 = expected_y2[::-1]
expected_point_cloud_channels = [[4], [4], [4]]
expected_T_lidar_to_camera1 = \
np.array([1.0, 0.0, 0.1, 4.0, 0.0, 0.0, -1.0, 0.0,
0.0, 1.0, 0.2, 6.0, 0.0, 0.0, 0.0, 1.0]).T
expected_T_lidar_to_camera2 = np.array(
[1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]).T
expected_T_lidar_to_camera3 = np.array(
[0, 0, 1, 0, 0, 1, 0, 6, -1, 0, 0, -4, 0, 0, 0, 1]).T
expected_T_lidar_to_camera = [expected_T_lidar_to_camera1, expected_T_lidar_to_camera2,
expected_T_lidar_to_camera3]
expected_T_lidar_to_camera = expected_T_lidar_to_camera[::-1]
expected_P_lidar_to_image1 = np.array(
[10., 2., 1.4, 57., 0., 3., -9.4, 18., 0., 1., 0.2, 6.]).T
expected_P_lidar_to_image2 = np.array([1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0]).T
expected_P_lidar_to_image3 = np.array(
[0, 0, 2, 3, 0, 2, 0, 12, -1, 0, 0, -4]).T
expected_P_lidar_to_image = [expected_P_lidar_to_image1, expected_P_lidar_to_image2,
expected_P_lidar_to_image3]
expected_P_lidar_to_image = expected_P_lidar_to_image[::-1]
single_class_expected_values = (expected_objects, expected_truncation, expected_occlusion,
expected_x1, expected_y1, expected_x2, expected_y2,
expected_point_cloud_channels, expected_T_lidar_to_camera,
expected_P_lidar_to_image)
expected_objects = [[b'truck', b'car', b'cyclist', b'dontcare', b'dontcare',
b'dontcare', b'dontcare'],
[b'pedestrian'],
[b'car', b'van', b'dontcare', b'dontcare', b'dontcare']]
expected_truncation = [[0.0, 0.0, -1.0, -1.0, -1.0],
[0.0],
[0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0]]
expected_occlusion = [[0, 0, -1, -1, -1], [0], [0, 0, 3, -1, -1, -1, -1]]
expected_x1 = [[662.20, 448.07, 610.5, 582.97, 600.36],
[712.40],
[599.41, 387.63, 676.60, 503.89, 511.35, 532.37, 559.62]]
expected_y1 = [[185.85, 177.14, 179.95, 182.70, 185.59],
[143.0],
[156.4, 181.54, 163.95, 169.71, 174.96, 176.35, 175.83]]
expected_x2 = [[690.21, 481.60, 629.68, 594.78, 608.36],
[810.73],
[629.75, 423.81, 688.98, 590.61, 527.81, 542.68, 575.40]]
expected_y2 = [[205.03, 206.41, 196.31, 191.05, 192.69],
[307.92],
[189.25, 203.12, 193.93, 190.13, 187.45, 185.27, 183.15]]
expected_truncation = expected_truncation[::-1]
expected_occlusion = expected_occlusion[::-1]
expected_x1 = expected_x1[::-1]
expected_y1 = expected_y1[::-1]
expected_x2 = expected_x2[::-1]
expected_y2 = expected_y2[::-1]
multi_class_expected_values = (expected_objects, expected_truncation, expected_occlusion,
expected_x1, expected_y1, expected_x2, expected_y2,
expected_point_cloud_channels, expected_T_lidar_to_camera,
expected_P_lidar_to_image)
@pytest.mark.parametrize("expected_values",
[single_class_expected_values, multi_class_expected_values])
def test_tfrecords_roundtrip(mocker, tmpdir, expected_values):
"""Test converting a few labels to TFRecords and parsing them back to Python."""
converter, output_filename = _mock_converter(mocker, tmpdir)
converter.convert()
tfrecords = tf.python_io.tf_record_iterator(output_filename)
# Common to all test cases
frame_ids = ['000001', '000000', '000012']
expected_point_cloud_ids = [
'velodyne/' + frame_id for frame_id in frame_ids]
expected_frame_ids = ['image_2/' + frame_id for frame_id in frame_ids]
expected_img_widths = [1242, 1224, 1242]
expected_img_heights = [375, 370, 375]
# Specific to each test case
expected_objects, expected_truncation, expected_occlusion, \
expected_x1, expected_y1, expected_x2, expected_y2, \
expected_point_cloud_channels, expected_T_lidar_to_camera, \
expected_P_lidar_to_image = expected_values
for i, record in enumerate(tfrecords):
example = tf.train.Example()
example.ParseFromString(record)
features = example.features.feature
assert features['frame/id'].bytes_list.value[0] == bytes(expected_frame_ids[i], 'utf-8')
assert features['target/object_class'].bytes_list.value[:] == expected_objects[i]
assert features['frame/width'].int64_list.value[0] == expected_img_widths[i]
assert features['frame/height'].int64_list.value[0] == expected_img_heights[i]
assert features['target/truncation'].float_list.value[:] == expected_truncation[i]
assert features['target/occlusion'].int64_list.value[:] == expected_occlusion[i]
bbox_features = ['target/coordinates_' +
x for x in ('x1', 'y1', 'x2', 'y2')]
bbox_values = [expected_x1[i], expected_y1[i],
expected_x2[i], expected_y2[i]]
for feature, expected_value in zip(bbox_features, bbox_values):
np.testing.assert_allclose(
features[feature].float_list.value[:], expected_value)
assert features['point_cloud/id'].bytes_list.value[0].decode() \
== expected_point_cloud_ids[i]
assert features['point_cloud/num_input_channels'].int64_list.value[:] == \
expected_point_cloud_channels[i]
np.testing.assert_allclose(
features['calibration/T_lidar_to_camera'].float_list.value[:],
expected_T_lidar_to_camera[i])
np.testing.assert_allclose(
features['calibration/P_lidar_to_image'].float_list.value[:],
expected_P_lidar_to_image[i])
return converter
def test_count_targets(mocker, tmpdir):
"""Test that count_targets counts objects correctly."""
# Take a few examples from the KITTI dataset.
object_counts = {
'000000': {
b'pedestrian': 1
},
'000001': {
b'car': 1,
b'truck': 1,
b'cyclist': 1,
b'dontcare': 4
},
'000012': {
b'car': 1,
b'van': 1,
b'dontcare': 3
}
}
converter, _ = _mock_converter(mocker, tmpdir)
# Check the counts.
for frame_id, object_count in six.iteritems(object_counts):
example = converter._create_example_proto(frame_id)
returned_count = converter._count_targets(example)
assert returned_count == object_count
return converter
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/dataio/tests/test_kitti_converter_lib.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for SampleModifier."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pytest
import six
from six.moves import range
import tensorflow as tf
from nvidia_tao_tf1.cv.detectnet_v2.common.dataio.converter_lib import _bytes_feature
from nvidia_tao_tf1.cv.detectnet_v2.dataio.build_sample_modifier import build_sample_modifier
import nvidia_tao_tf1.cv.detectnet_v2.proto.dataset_export_config_pb2 as\
dataset_export_config_pb2
class TestSampleModifier:
"""Tests for SampleModifier."""
@pytest.mark.parametrize(
"objects_in_sample,source_to_target_class_mapping,filtered",
[([b'automobile', b'van'], {b'automobile': b'car', b'van': b'car'}, True),
# Even though 'cat' is not defined in the mapping, there should be no filtering happening,
# since 'dog' is.
([b'heavy_truck', b'dog', b'cat'], {b'heavy_truck': b'car', b'dog': b'animal'}, False),
# In this case, only 'heavy_truck' is mapped, the frame should be filtered.
([b'heavy_truck', b'dog', b'cat'], {b'heavy_truck': b'car'}, True)
])
def test_filter_samples(self, objects_in_sample, source_to_target_class_mapping, filtered):
"""Test filtering samples that contain objects only in one class."""
sample_modifier_config = \
dataset_export_config_pb2.DatasetExportConfig.SampleModifierConfig()
# Assign class mapping.
for source_class_name, target_class_name in six.iteritems(source_to_target_class_mapping):
sample_modifier_config.source_to_target_class_mapping[source_class_name] = \
target_class_name
sample_modifier_config.filter_samples_containing_only.extend(['car'])
sample_modifier = build_sample_modifier(sample_modifier_config=sample_modifier_config,
validation_fold=0)
example = tf.train.Example(features=tf.train.Features(feature={
'target/object_class': _bytes_feature(*objects_in_sample),
}))
filtered_samples = sample_modifier._filter_sample(example)
if filtered:
assert filtered_samples is None
else:
assert filtered_samples == example
@pytest.mark.parametrize(
"objects_in_sample,minimum_target_class_imbalance,"
"source_to_target_class_mapping,num_duplicates,num_expected_samples",
# Number of canines / number of cars = 2.0 > 1.0 => Should be duplicated.
[([b'automobile', b'dog', b'dog', b'cat'], 1.0, {b'automobile': b'car',
b'dog': b'canine'}, 1, 2),
# Number of canine / number of cars = 1.0 => Should not be duplicated.
([b'automobile', b'dog'], 1.0, {b'automobile': b'car', b'dog': b'canine'}, 1, 1),
# Number of canine / number of cars = 1.0 > 0.5 => Should be duplicated.
([b'automobile', b'dog'], 0.5, {b'automobile': b'car', b'dog': b'canine'}, 2, 3),
# Number of canine / number of cars = 0.33 < 0.5 => Should not be duplicated.
([b'automobile', b'automobile', b'automobile', b'dog'], 0.5,
{b'automobile': b'car', b'dog': b'canine'}, 1, 1)
])
def test_duplicate_samples(self, objects_in_sample, minimum_target_class_imbalance,
source_to_target_class_mapping, num_duplicates,
num_expected_samples):
"""Test sample duplication.
Test that samples that fulfill the condition
number of rare class / number of dominant class > minimum_imbalance
are duplicated.
"""
sample_modifier_config = \
dataset_export_config_pb2.DatasetExportConfig.SampleModifierConfig()
# Assign class mapping.
for source_class_name, target_class_name in six.iteritems(source_to_target_class_mapping):
sample_modifier_config.source_to_target_class_mapping[source_class_name] = \
target_class_name
sample_modifier_config.dominant_target_classes.extend([b'car'])
for target_class_name in set(source_to_target_class_mapping.values()):
sample_modifier_config.minimum_target_class_imbalance[target_class_name] = \
minimum_target_class_imbalance
sample_modifier_config.minimum_target_class_imbalance[b'car'] = 1.0
sample_modifier_config.num_duplicates = num_duplicates
sample_modifier = build_sample_modifier(sample_modifier_config=sample_modifier_config,
validation_fold=0)
example = tf.train.Example(features=tf.train.Features(feature={
'target/object_class': _bytes_feature(*objects_in_sample),
}))
duplicated_samples = sample_modifier._duplicate_sample(example)
assert duplicated_samples == [example]*num_expected_samples
@pytest.mark.parametrize("in_training_set", [True, False])
def test_in_training_set(self, in_training_set):
"""Test that a sample is modified only if it belongs to the training set."""
# Configure a SampleModifier and create a dummy sample that should be filtered if the
# sample belongs to the training set.
sample_modifier_config = \
dataset_export_config_pb2.DatasetExportConfig.SampleModifierConfig()
# Assign class mapping.
sample_modifier_config.source_to_target_class_mapping[b'cvip'] = b'car'
sample_modifier_config.filter_samples_containing_only.extend([b'car'])
validation_fold = 0
sample_modifier = build_sample_modifier(sample_modifier_config=sample_modifier_config,
validation_fold=validation_fold)
example = tf.train.Example(features=tf.train.Features(feature={
'target/object_class': _bytes_feature(*['cvip', 'cvip']),
}))
validation_fold = validation_fold + 1 if in_training_set else validation_fold
modified_samples = sample_modifier.modify_sample(example, validation_fold)
expected = [] if in_training_set else [example]
assert modified_samples == expected
@pytest.mark.parametrize("objects_in_sample", [[b'car', b'person', b'person'],
[b'car', b'person']])
def test_no_modifications(self, objects_in_sample):
"""Test that no modifications are done if the modification parameters are not set."""
sample_modifier_config = \
dataset_export_config_pb2.DatasetExportConfig.SampleModifierConfig()
sample_modifier = build_sample_modifier(sample_modifier_config=sample_modifier_config,
validation_fold=0)
example = tf.train.Example(features=tf.train.Features(feature={
'target/object_class': _bytes_feature(*objects_in_sample),
}))
modified_samples = sample_modifier.modify_sample(example, sample_modifier.validation_fold)
assert modified_samples == [example]
@pytest.mark.parametrize("objects_in_sample, folds, num_samples, max_training_samples,"
"validation_fold", [([b'car', b'person'], 5, 50, 25, 0),
([b'car', b'person'], 4, 30, 20, None)])
def test_max_num_training_samples(self, objects_in_sample, folds, num_samples,
max_training_samples, validation_fold):
"""Test that no more than max_per_training_fold are retained in each training fold."""
sample_modifier_config = \
dataset_export_config_pb2.DatasetExportConfig.SampleModifierConfig()
sample_modifier_config.max_training_samples = max_training_samples
sample_modifier = build_sample_modifier(sample_modifier_config=sample_modifier_config,
validation_fold=validation_fold,
num_folds=folds)
if validation_fold is None:
expected_num_per_fold = max_training_samples // folds
else:
expected_num_per_fold = max_training_samples // (folds - 1)
validation_fold = sample_modifier.validation_fold
for fold in range(folds):
for sample in range(num_samples // folds):
example = tf.train.Example(features=tf.train.Features(feature={
'target/object_class': _bytes_feature(*objects_in_sample),
}))
modified_samples = sample_modifier.modify_sample(example, fold)
expect_retained = (fold == validation_fold) or (sample < expected_num_per_fold)
expected_samples = [example] if expect_retained else []
assert modified_samples == expected_samples
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/dataio/tests/test_sample_modifier.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for exporting .tfrecords based on dataset export config."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import tempfile
from PIL import Image
import six
from six.moves import range
from nvidia_tao_tf1.cv.detectnet_v2.dataio.export import export_tfrecords, TEMP_TFRECORDS_DIR
from nvidia_tao_tf1.cv.detectnet_v2.dataio.kitti_converter_lib import KITTIConverter
import nvidia_tao_tf1.cv.detectnet_v2.proto.dataset_export_config_pb2 as\
dataset_export_config_pb2
import nvidia_tao_tf1.cv.detectnet_v2.proto.experiment_pb2 as experiment_pb2
import nvidia_tao_tf1.cv.detectnet_v2.proto.kitti_config_pb2 as kitti_config_pb2
def get_mock_images(kitti_config):
"""Generate mock images from the image_ids."""
image_root = os.path.join(kitti_config.root_directory_path, kitti_config.image_dir_name)
if not os.path.exists(image_root):
os.makedirs(image_root)
image_file = {'000012': (1242, 375),
'000000': (1224, 370),
'000001': (1242, 375)}
for idx, sizes in six.iteritems(image_file):
image_file_name = os.path.join(image_root,
'{}{}'.format(idx, kitti_config.image_extension))
image = Image.new("RGB", sizes)
image.save(image_file_name)
def generate_sequence_map_file():
"""Generate a sequence map file for sequence wise partitioning kitti."""
os_handle, temp_file_name = tempfile.mkstemp()
os.close(os_handle)
mock_sequence_to_frames_map = {'0': ['000000', '000001', '000012']}
with open(temp_file_name, 'w') as tfile:
json.dump(mock_sequence_to_frames_map, tfile)
return temp_file_name
def test_export_tfrecords(mocker):
"""Create a set of dummy dataset export config and test exporting to .tfrecords."""
kitti_config1 = kitti_config_pb2.KITTIConfig()
kitti_config2 = kitti_config_pb2.KITTIConfig()
export_config1 = dataset_export_config_pb2.DatasetExportConfig()
export_config1.kitti_config.CopyFrom(kitti_config1)
export_config1.image_directory_path = "images0"
export_config1.kitti_config.partition_mode = "sequence"
export_config1.kitti_config.image_dir_name = "image_2"
export_config1.kitti_config.image_extension = ".jpg"
export_config1.kitti_config.point_clouds_dir = "velodyne"
export_config1.kitti_config.calibrations_dir = "calib"
export_config1.kitti_config.kitti_sequence_to_frames_file = generate_sequence_map_file()
get_mock_images(export_config1.kitti_config)
export_config2 = dataset_export_config_pb2.DatasetExportConfig()
export_config2.kitti_config.CopyFrom(kitti_config2)
export_config2.image_directory_path = "images1"
export_config2.kitti_config.partition_mode = "sequence"
export_config2.kitti_config.image_dir_name = "image_2"
export_config2.kitti_config.image_extension = ".jpg"
export_config2.kitti_config.point_clouds_dir = "velodyne"
export_config2.kitti_config.calibrations_dir = "calib"
export_config2.kitti_config.kitti_sequence_to_frames_file = generate_sequence_map_file()
get_mock_images(export_config2.kitti_config)
experiment_config = experiment_pb2.Experiment()
experiment_config.dataset_export_config.extend([export_config1, export_config2])
# Mock these functions that take a lot of time in the constructor.
mocker.patch.object(KITTIConverter, "_read_sequence_to_frames_file", return_value=None)
# Mock the export step.
kitti_converter = mocker.patch.object(KITTIConverter, 'convert')
data_sources = export_tfrecords(experiment_config.dataset_export_config, 0)
tfrecords_paths = [data_source.tfrecords_path for data_source in data_sources]
image_dir_paths = [data_source.image_directory_path for data_source in data_sources]
# Check that the expected path was returned and the converters were called as expected.
assert tfrecords_paths == [TEMP_TFRECORDS_DIR + '/' + str(i) + '*' for i in range(2)]
assert image_dir_paths == ['images' + str(i) for i in range(2)]
assert kitti_converter.call_count == 2
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/dataio/tests/test_export.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/detectnet_v2/proto/optimizer_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_tao_tf1.cv.detectnet_v2.proto import adam_optimizer_config_pb2 as nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_adam__optimizer__config__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/detectnet_v2/proto/optimizer_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n;nvidia_tao_tf1/cv/detectnet_v2/proto/optimizer_config.proto\x1a@nvidia_tao_tf1/cv/detectnet_v2/proto/adam_optimizer_config.proto\"D\n\x0fOptimizerConfig\x12$\n\x04\x61\x64\x61m\x18\x01 \x01(\x0b\x32\x14.AdamOptimizerConfigH\x00\x42\x0b\n\toptimizerb\x06proto3')
,
dependencies=[nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_adam__optimizer__config__pb2.DESCRIPTOR,])
_OPTIMIZERCONFIG = _descriptor.Descriptor(
name='OptimizerConfig',
full_name='OptimizerConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='adam', full_name='OptimizerConfig.adam', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='optimizer', full_name='OptimizerConfig.optimizer',
index=0, containing_type=None, fields=[]),
],
serialized_start=129,
serialized_end=197,
)
_OPTIMIZERCONFIG.fields_by_name['adam'].message_type = nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_adam__optimizer__config__pb2._ADAMOPTIMIZERCONFIG
_OPTIMIZERCONFIG.oneofs_by_name['optimizer'].fields.append(
_OPTIMIZERCONFIG.fields_by_name['adam'])
_OPTIMIZERCONFIG.fields_by_name['adam'].containing_oneof = _OPTIMIZERCONFIG.oneofs_by_name['optimizer']
DESCRIPTOR.message_types_by_name['OptimizerConfig'] = _OPTIMIZERCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
OptimizerConfig = _reflection.GeneratedProtocolMessageType('OptimizerConfig', (_message.Message,), dict(
DESCRIPTOR = _OPTIMIZERCONFIG,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.optimizer_config_pb2'
# @@protoc_insertion_point(class_scope:OptimizerConfig)
))
_sym_db.RegisterMessage(OptimizerConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/proto/optimizer_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/detectnet_v2/proto/objective_label_filter.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_tao_tf1.cv.detectnet_v2.proto import label_filter_pb2 as nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_label__filter__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/detectnet_v2/proto/objective_label_filter.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\nAnvidia_tao_tf1/cv/detectnet_v2/proto/objective_label_filter.proto\x1a\x37nvidia_tao_tf1/cv/detectnet_v2/proto/label_filter.proto\"\x9f\x02\n\x14ObjectiveLabelFilter\x12X\n\x1eobjective_label_filter_configs\x18\x01 \x03(\x0b\x32\x30.ObjectiveLabelFilter.ObjectiveLabelFilterConfig\x12\x17\n\x0fmask_multiplier\x18\x02 \x01(\x02\x12\x1d\n\x15preserve_ground_truth\x18\x03 \x01(\x08\x1au\n\x1aObjectiveLabelFilterConfig\x12\"\n\x0clabel_filter\x18\x01 \x01(\x0b\x32\x0c.LabelFilter\x12\x1a\n\x12target_class_names\x18\x02 \x03(\t\x12\x17\n\x0fobjective_names\x18\x03 \x03(\tb\x06proto3')
,
dependencies=[nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_label__filter__pb2.DESCRIPTOR,])
_OBJECTIVELABELFILTER_OBJECTIVELABELFILTERCONFIG = _descriptor.Descriptor(
name='ObjectiveLabelFilterConfig',
full_name='ObjectiveLabelFilter.ObjectiveLabelFilterConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='label_filter', full_name='ObjectiveLabelFilter.ObjectiveLabelFilterConfig.label_filter', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='target_class_names', full_name='ObjectiveLabelFilter.ObjectiveLabelFilterConfig.target_class_names', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='objective_names', full_name='ObjectiveLabelFilter.ObjectiveLabelFilterConfig.objective_names', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=297,
serialized_end=414,
)
_OBJECTIVELABELFILTER = _descriptor.Descriptor(
name='ObjectiveLabelFilter',
full_name='ObjectiveLabelFilter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='objective_label_filter_configs', full_name='ObjectiveLabelFilter.objective_label_filter_configs', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mask_multiplier', full_name='ObjectiveLabelFilter.mask_multiplier', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='preserve_ground_truth', full_name='ObjectiveLabelFilter.preserve_ground_truth', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_OBJECTIVELABELFILTER_OBJECTIVELABELFILTERCONFIG, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=127,
serialized_end=414,
)
_OBJECTIVELABELFILTER_OBJECTIVELABELFILTERCONFIG.fields_by_name['label_filter'].message_type = nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_label__filter__pb2._LABELFILTER
_OBJECTIVELABELFILTER_OBJECTIVELABELFILTERCONFIG.containing_type = _OBJECTIVELABELFILTER
_OBJECTIVELABELFILTER.fields_by_name['objective_label_filter_configs'].message_type = _OBJECTIVELABELFILTER_OBJECTIVELABELFILTERCONFIG
DESCRIPTOR.message_types_by_name['ObjectiveLabelFilter'] = _OBJECTIVELABELFILTER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ObjectiveLabelFilter = _reflection.GeneratedProtocolMessageType('ObjectiveLabelFilter', (_message.Message,), dict(
ObjectiveLabelFilterConfig = _reflection.GeneratedProtocolMessageType('ObjectiveLabelFilterConfig', (_message.Message,), dict(
DESCRIPTOR = _OBJECTIVELABELFILTER_OBJECTIVELABELFILTERCONFIG,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.objective_label_filter_pb2'
# @@protoc_insertion_point(class_scope:ObjectiveLabelFilter.ObjectiveLabelFilterConfig)
))
,
DESCRIPTOR = _OBJECTIVELABELFILTER,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.objective_label_filter_pb2'
# @@protoc_insertion_point(class_scope:ObjectiveLabelFilter)
))
_sym_db.RegisterMessage(ObjectiveLabelFilter)
_sym_db.RegisterMessage(ObjectiveLabelFilter.ObjectiveLabelFilterConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/proto/objective_label_filter_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/detectnet_v2/proto/adam_optimizer_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/detectnet_v2/proto/adam_optimizer_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n@nvidia_tao_tf1/cv/detectnet_v2/proto/adam_optimizer_config.proto\"D\n\x13\x41\x64\x61mOptimizerConfig\x12\x0f\n\x07\x65psilon\x18\x01 \x01(\x02\x12\r\n\x05\x62\x65ta1\x18\x02 \x01(\x02\x12\r\n\x05\x62\x65ta2\x18\x03 \x01(\x02\x62\x06proto3')
)
_ADAMOPTIMIZERCONFIG = _descriptor.Descriptor(
name='AdamOptimizerConfig',
full_name='AdamOptimizerConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='epsilon', full_name='AdamOptimizerConfig.epsilon', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='beta1', full_name='AdamOptimizerConfig.beta1', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='beta2', full_name='AdamOptimizerConfig.beta2', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=68,
serialized_end=136,
)
DESCRIPTOR.message_types_by_name['AdamOptimizerConfig'] = _ADAMOPTIMIZERCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AdamOptimizerConfig = _reflection.GeneratedProtocolMessageType('AdamOptimizerConfig', (_message.Message,), dict(
DESCRIPTOR = _ADAMOPTIMIZERCONFIG,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.adam_optimizer_config_pb2'
# @@protoc_insertion_point(class_scope:AdamOptimizerConfig)
))
_sym_db.RegisterMessage(AdamOptimizerConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/proto/adam_optimizer_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/detectnet_v2/proto/training_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_tao_tf1.cv.detectnet_v2.proto import cost_scaling_config_pb2 as nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_cost__scaling__config__pb2
from nvidia_tao_tf1.cv.detectnet_v2.proto import learning_rate_config_pb2 as nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_learning__rate__config__pb2
from nvidia_tao_tf1.cv.detectnet_v2.proto import optimizer_config_pb2 as nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_optimizer__config__pb2
from nvidia_tao_tf1.cv.detectnet_v2.proto import regularizer_config_pb2 as nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_regularizer__config__pb2
from nvidia_tao_tf1.cv.detectnet_v2.proto import visualizer_config_pb2 as nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_visualizer__config__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/detectnet_v2/proto/training_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n:nvidia_tao_tf1/cv/detectnet_v2/proto/training_config.proto\x1a>nvidia_tao_tf1/cv/detectnet_v2/proto/cost_scaling_config.proto\x1a?nvidia_tao_tf1/cv/detectnet_v2/proto/learning_rate_config.proto\x1a;nvidia_tao_tf1/cv/detectnet_v2/proto/optimizer_config.proto\x1a=nvidia_tao_tf1/cv/detectnet_v2/proto/regularizer_config.proto\x1a<nvidia_tao_tf1/cv/detectnet_v2/proto/visualizer_config.proto\"\xbc\x02\n\x0eTrainingConfig\x12\x1a\n\x12\x62\x61tch_size_per_gpu\x18\x01 \x01(\r\x12\x12\n\nnum_epochs\x18\x02 \x01(\r\x12*\n\rlearning_rate\x18\x03 \x01(\x0b\x32\x13.LearningRateConfig\x12\'\n\x0bregularizer\x18\x04 \x01(\x0b\x32\x12.RegularizerConfig\x12#\n\toptimizer\x18\x05 \x01(\x0b\x32\x10.OptimizerConfig\x12(\n\x0c\x63ost_scaling\x18\x06 \x01(\x0b\x32\x12.CostScalingConfig\x12\x1b\n\x13\x63heckpoint_interval\x18\x07 \x01(\r\x12\x12\n\nenable_qat\x18\x08 \x01(\x08\x12%\n\nvisualizer\x18\t \x01(\x0b\x32\x11.VisualizerConfigb\x06proto3')
,
dependencies=[nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_cost__scaling__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_learning__rate__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_optimizer__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_regularizer__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_visualizer__config__pb2.DESCRIPTOR,])
_TRAININGCONFIG = _descriptor.Descriptor(
name='TrainingConfig',
full_name='TrainingConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='batch_size_per_gpu', full_name='TrainingConfig.batch_size_per_gpu', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_epochs', full_name='TrainingConfig.num_epochs', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='learning_rate', full_name='TrainingConfig.learning_rate', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='regularizer', full_name='TrainingConfig.regularizer', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='optimizer', full_name='TrainingConfig.optimizer', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cost_scaling', full_name='TrainingConfig.cost_scaling', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='checkpoint_interval', full_name='TrainingConfig.checkpoint_interval', index=6,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='enable_qat', full_name='TrainingConfig.enable_qat', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='visualizer', full_name='TrainingConfig.visualizer', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=378,
serialized_end=694,
)
_TRAININGCONFIG.fields_by_name['learning_rate'].message_type = nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_learning__rate__config__pb2._LEARNINGRATECONFIG
_TRAININGCONFIG.fields_by_name['regularizer'].message_type = nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_regularizer__config__pb2._REGULARIZERCONFIG
_TRAININGCONFIG.fields_by_name['optimizer'].message_type = nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_optimizer__config__pb2._OPTIMIZERCONFIG
_TRAININGCONFIG.fields_by_name['cost_scaling'].message_type = nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_cost__scaling__config__pb2._COSTSCALINGCONFIG
_TRAININGCONFIG.fields_by_name['visualizer'].message_type = nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_visualizer__config__pb2._VISUALIZERCONFIG
DESCRIPTOR.message_types_by_name['TrainingConfig'] = _TRAININGCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TrainingConfig = _reflection.GeneratedProtocolMessageType('TrainingConfig', (_message.Message,), dict(
DESCRIPTOR = _TRAININGCONFIG,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.training_config_pb2'
# @@protoc_insertion_point(class_scope:TrainingConfig)
))
_sym_db.RegisterMessage(TrainingConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/proto/training_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/detectnet_v2/proto/cost_scaling_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/detectnet_v2/proto/cost_scaling_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n>nvidia_tao_tf1/cv/detectnet_v2/proto/cost_scaling_config.proto\"d\n\x11\x43ostScalingConfig\x12\x0f\n\x07\x65nabled\x18\x01 \x01(\x08\x12\x18\n\x10initial_exponent\x18\x02 \x01(\x01\x12\x11\n\tincrement\x18\x03 \x01(\x01\x12\x11\n\tdecrement\x18\x04 \x01(\x01\x62\x06proto3')
)
_COSTSCALINGCONFIG = _descriptor.Descriptor(
name='CostScalingConfig',
full_name='CostScalingConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='enabled', full_name='CostScalingConfig.enabled', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='initial_exponent', full_name='CostScalingConfig.initial_exponent', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='increment', full_name='CostScalingConfig.increment', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='decrement', full_name='CostScalingConfig.decrement', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=66,
serialized_end=166,
)
DESCRIPTOR.message_types_by_name['CostScalingConfig'] = _COSTSCALINGCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CostScalingConfig = _reflection.GeneratedProtocolMessageType('CostScalingConfig', (_message.Message,), dict(
DESCRIPTOR = _COSTSCALINGCONFIG,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.cost_scaling_config_pb2'
# @@protoc_insertion_point(class_scope:CostScalingConfig)
))
_sym_db.RegisterMessage(CostScalingConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/proto/cost_scaling_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/detectnet_v2/proto/early_stopping_annealing_schedule_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/detectnet_v2/proto/early_stopping_annealing_schedule_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\nSnvidia_tao_tf1/cv/detectnet_v2/proto/early_stopping_annealing_schedule_config.proto\"\xa9\x01\n$EarlyStoppingAnnealingScheduleConfig\x12\x19\n\x11min_learning_rate\x18\x01 \x01(\x02\x12\x19\n\x11max_learning_rate\x18\x02 \x01(\x02\x12\x19\n\x11soft_start_epochs\x18\x03 \x01(\r\x12\x18\n\x10\x61nnealing_epochs\x18\x04 \x01(\r\x12\x16\n\x0epatience_steps\x18\x05 \x01(\rb\x06proto3')
)
_EARLYSTOPPINGANNEALINGSCHEDULECONFIG = _descriptor.Descriptor(
name='EarlyStoppingAnnealingScheduleConfig',
full_name='EarlyStoppingAnnealingScheduleConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='min_learning_rate', full_name='EarlyStoppingAnnealingScheduleConfig.min_learning_rate', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_learning_rate', full_name='EarlyStoppingAnnealingScheduleConfig.max_learning_rate', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='soft_start_epochs', full_name='EarlyStoppingAnnealingScheduleConfig.soft_start_epochs', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='annealing_epochs', full_name='EarlyStoppingAnnealingScheduleConfig.annealing_epochs', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='patience_steps', full_name='EarlyStoppingAnnealingScheduleConfig.patience_steps', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=88,
serialized_end=257,
)
DESCRIPTOR.message_types_by_name['EarlyStoppingAnnealingScheduleConfig'] = _EARLYSTOPPINGANNEALINGSCHEDULECONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
EarlyStoppingAnnealingScheduleConfig = _reflection.GeneratedProtocolMessageType('EarlyStoppingAnnealingScheduleConfig', (_message.Message,), dict(
DESCRIPTOR = _EARLYSTOPPINGANNEALINGSCHEDULECONFIG,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.early_stopping_annealing_schedule_config_pb2'
# @@protoc_insertion_point(class_scope:EarlyStoppingAnnealingScheduleConfig)
))
_sym_db.RegisterMessage(EarlyStoppingAnnealingScheduleConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/proto/early_stopping_annealing_schedule_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/detectnet_v2/proto/regularizer_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/detectnet_v2/proto/regularizer_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n=nvidia_tao_tf1/cv/detectnet_v2/proto/regularizer_config.proto\"\x8a\x01\n\x11RegularizerConfig\x12\x33\n\x04type\x18\x01 \x01(\x0e\x32%.RegularizerConfig.RegularizationType\x12\x0e\n\x06weight\x18\x02 \x01(\x02\"0\n\x12RegularizationType\x12\n\n\x06NO_REG\x10\x00\x12\x06\n\x02L1\x10\x01\x12\x06\n\x02L2\x10\x02\x62\x06proto3')
)
_REGULARIZERCONFIG_REGULARIZATIONTYPE = _descriptor.EnumDescriptor(
name='RegularizationType',
full_name='RegularizerConfig.RegularizationType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='NO_REG', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='L1', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='L2', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=156,
serialized_end=204,
)
_sym_db.RegisterEnumDescriptor(_REGULARIZERCONFIG_REGULARIZATIONTYPE)
_REGULARIZERCONFIG = _descriptor.Descriptor(
name='RegularizerConfig',
full_name='RegularizerConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='RegularizerConfig.type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='weight', full_name='RegularizerConfig.weight', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_REGULARIZERCONFIG_REGULARIZATIONTYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=66,
serialized_end=204,
)
_REGULARIZERCONFIG.fields_by_name['type'].enum_type = _REGULARIZERCONFIG_REGULARIZATIONTYPE
_REGULARIZERCONFIG_REGULARIZATIONTYPE.containing_type = _REGULARIZERCONFIG
DESCRIPTOR.message_types_by_name['RegularizerConfig'] = _REGULARIZERCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
RegularizerConfig = _reflection.GeneratedProtocolMessageType('RegularizerConfig', (_message.Message,), dict(
DESCRIPTOR = _REGULARIZERCONFIG,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.regularizer_config_pb2'
# @@protoc_insertion_point(class_scope:RegularizerConfig)
))
_sym_db.RegisterMessage(RegularizerConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/proto/regularizer_config_pb2.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""Defining protocol buffers for different components of GB."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/proto/__init__.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/detectnet_v2/proto/augmentation_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/detectnet_v2/proto/augmentation_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n>nvidia_tao_tf1/cv/detectnet_v2/proto/augmentation_config.proto\"\xb2\x07\n\x12\x41ugmentationConfig\x12\x38\n\rpreprocessing\x18\x01 \x01(\x0b\x32!.AugmentationConfig.Preprocessing\x12\x45\n\x14spatial_augmentation\x18\x02 \x01(\x0b\x32\'.AugmentationConfig.SpatialAugmentation\x12\x41\n\x12\x63olor_augmentation\x18\x03 \x01(\x0b\x32%.AugmentationConfig.ColorAugmentation\x1a\xe0\x02\n\rPreprocessing\x12\x1a\n\x12output_image_width\x18\x01 \x01(\r\x12\x1b\n\x13output_image_height\x18\x02 \x01(\r\x12\x18\n\x10output_image_min\x18\x0e \x01(\r\x12\x18\n\x10output_image_max\x18\x0f \x01(\r\x12\x1a\n\x12\x65nable_auto_resize\x18\x10 \x01(\x08\x12\x1c\n\x14output_image_channel\x18\r \x01(\r\x12\x11\n\tcrop_left\x18\x04 \x01(\r\x12\x10\n\x08\x63rop_top\x18\x05 \x01(\r\x12\x12\n\ncrop_right\x18\x06 \x01(\r\x12\x13\n\x0b\x63rop_bottom\x18\x07 \x01(\r\x12\x16\n\x0emin_bbox_width\x18\x08 \x01(\x02\x12\x17\n\x0fmin_bbox_height\x18\t \x01(\x02\x12\x13\n\x0bscale_width\x18\n \x01(\x02\x12\x14\n\x0cscale_height\x18\x0b \x01(\x02\x1a\xd5\x01\n\x13SpatialAugmentation\x12\x19\n\x11hflip_probability\x18\x01 \x01(\x02\x12\x19\n\x11vflip_probability\x18\x02 \x01(\x02\x12\x10\n\x08zoom_min\x18\x03 \x01(\x02\x12\x10\n\x08zoom_max\x18\x04 \x01(\x02\x12\x17\n\x0ftranslate_max_x\x18\x05 \x01(\x02\x12\x17\n\x0ftranslate_max_y\x18\x06 \x01(\x02\x12\x16\n\x0erotate_rad_max\x18\x07 \x01(\x02\x12\x1a\n\x12rotate_probability\x18\x08 \x01(\x02\x1a\x9c\x01\n\x11\x43olorAugmentation\x12\x1a\n\x12\x63olor_shift_stddev\x18\x01 \x01(\x02\x12\x18\n\x10hue_rotation_max\x18\x02 \x01(\x02\x12\x1c\n\x14saturation_shift_max\x18\x03 \x01(\x02\x12\x1a\n\x12\x63ontrast_scale_max\x18\x05 \x01(\x02\x12\x17\n\x0f\x63ontrast_center\x18\x08 \x01(\x02\x62\x06proto3')
)
_AUGMENTATIONCONFIG_PREPROCESSING = _descriptor.Descriptor(
name='Preprocessing',
full_name='AugmentationConfig.Preprocessing',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='output_image_width', full_name='AugmentationConfig.Preprocessing.output_image_width', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_image_height', full_name='AugmentationConfig.Preprocessing.output_image_height', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_image_min', full_name='AugmentationConfig.Preprocessing.output_image_min', index=2,
number=14, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_image_max', full_name='AugmentationConfig.Preprocessing.output_image_max', index=3,
number=15, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='enable_auto_resize', full_name='AugmentationConfig.Preprocessing.enable_auto_resize', index=4,
number=16, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_image_channel', full_name='AugmentationConfig.Preprocessing.output_image_channel', index=5,
number=13, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='crop_left', full_name='AugmentationConfig.Preprocessing.crop_left', index=6,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='crop_top', full_name='AugmentationConfig.Preprocessing.crop_top', index=7,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='crop_right', full_name='AugmentationConfig.Preprocessing.crop_right', index=8,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='crop_bottom', full_name='AugmentationConfig.Preprocessing.crop_bottom', index=9,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_bbox_width', full_name='AugmentationConfig.Preprocessing.min_bbox_width', index=10,
number=8, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_bbox_height', full_name='AugmentationConfig.Preprocessing.min_bbox_height', index=11,
number=9, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='scale_width', full_name='AugmentationConfig.Preprocessing.scale_width', index=12,
number=10, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='scale_height', full_name='AugmentationConfig.Preprocessing.scale_height', index=13,
number=11, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=286,
serialized_end=638,
)
_AUGMENTATIONCONFIG_SPATIALAUGMENTATION = _descriptor.Descriptor(
name='SpatialAugmentation',
full_name='AugmentationConfig.SpatialAugmentation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='hflip_probability', full_name='AugmentationConfig.SpatialAugmentation.hflip_probability', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='vflip_probability', full_name='AugmentationConfig.SpatialAugmentation.vflip_probability', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='zoom_min', full_name='AugmentationConfig.SpatialAugmentation.zoom_min', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='zoom_max', full_name='AugmentationConfig.SpatialAugmentation.zoom_max', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='translate_max_x', full_name='AugmentationConfig.SpatialAugmentation.translate_max_x', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='translate_max_y', full_name='AugmentationConfig.SpatialAugmentation.translate_max_y', index=5,
number=6, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rotate_rad_max', full_name='AugmentationConfig.SpatialAugmentation.rotate_rad_max', index=6,
number=7, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rotate_probability', full_name='AugmentationConfig.SpatialAugmentation.rotate_probability', index=7,
number=8, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=641,
serialized_end=854,
)
_AUGMENTATIONCONFIG_COLORAUGMENTATION = _descriptor.Descriptor(
name='ColorAugmentation',
full_name='AugmentationConfig.ColorAugmentation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='color_shift_stddev', full_name='AugmentationConfig.ColorAugmentation.color_shift_stddev', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hue_rotation_max', full_name='AugmentationConfig.ColorAugmentation.hue_rotation_max', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='saturation_shift_max', full_name='AugmentationConfig.ColorAugmentation.saturation_shift_max', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='contrast_scale_max', full_name='AugmentationConfig.ColorAugmentation.contrast_scale_max', index=3,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='contrast_center', full_name='AugmentationConfig.ColorAugmentation.contrast_center', index=4,
number=8, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=857,
serialized_end=1013,
)
_AUGMENTATIONCONFIG = _descriptor.Descriptor(
name='AugmentationConfig',
full_name='AugmentationConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='preprocessing', full_name='AugmentationConfig.preprocessing', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='spatial_augmentation', full_name='AugmentationConfig.spatial_augmentation', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='color_augmentation', full_name='AugmentationConfig.color_augmentation', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_AUGMENTATIONCONFIG_PREPROCESSING, _AUGMENTATIONCONFIG_SPATIALAUGMENTATION, _AUGMENTATIONCONFIG_COLORAUGMENTATION, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=67,
serialized_end=1013,
)
_AUGMENTATIONCONFIG_PREPROCESSING.containing_type = _AUGMENTATIONCONFIG
_AUGMENTATIONCONFIG_SPATIALAUGMENTATION.containing_type = _AUGMENTATIONCONFIG
_AUGMENTATIONCONFIG_COLORAUGMENTATION.containing_type = _AUGMENTATIONCONFIG
_AUGMENTATIONCONFIG.fields_by_name['preprocessing'].message_type = _AUGMENTATIONCONFIG_PREPROCESSING
_AUGMENTATIONCONFIG.fields_by_name['spatial_augmentation'].message_type = _AUGMENTATIONCONFIG_SPATIALAUGMENTATION
_AUGMENTATIONCONFIG.fields_by_name['color_augmentation'].message_type = _AUGMENTATIONCONFIG_COLORAUGMENTATION
DESCRIPTOR.message_types_by_name['AugmentationConfig'] = _AUGMENTATIONCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AugmentationConfig = _reflection.GeneratedProtocolMessageType('AugmentationConfig', (_message.Message,), dict(
Preprocessing = _reflection.GeneratedProtocolMessageType('Preprocessing', (_message.Message,), dict(
DESCRIPTOR = _AUGMENTATIONCONFIG_PREPROCESSING,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.augmentation_config_pb2'
# @@protoc_insertion_point(class_scope:AugmentationConfig.Preprocessing)
))
,
SpatialAugmentation = _reflection.GeneratedProtocolMessageType('SpatialAugmentation', (_message.Message,), dict(
DESCRIPTOR = _AUGMENTATIONCONFIG_SPATIALAUGMENTATION,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.augmentation_config_pb2'
# @@protoc_insertion_point(class_scope:AugmentationConfig.SpatialAugmentation)
))
,
ColorAugmentation = _reflection.GeneratedProtocolMessageType('ColorAugmentation', (_message.Message,), dict(
DESCRIPTOR = _AUGMENTATIONCONFIG_COLORAUGMENTATION,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.augmentation_config_pb2'
# @@protoc_insertion_point(class_scope:AugmentationConfig.ColorAugmentation)
))
,
DESCRIPTOR = _AUGMENTATIONCONFIG,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.augmentation_config_pb2'
# @@protoc_insertion_point(class_scope:AugmentationConfig)
))
_sym_db.RegisterMessage(AugmentationConfig)
_sym_db.RegisterMessage(AugmentationConfig.Preprocessing)
_sym_db.RegisterMessage(AugmentationConfig.SpatialAugmentation)
_sym_db.RegisterMessage(AugmentationConfig.ColorAugmentation)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/proto/augmentation_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/detectnet_v2/proto/coco_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/detectnet_v2/proto/coco_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n6nvidia_tao_tf1/cv/detectnet_v2/proto/coco_config.proto\"\x86\x01\n\nCOCOConfig\x12\x1b\n\x13root_directory_path\x18\x01 \x01(\t\x12\x15\n\rimg_dir_names\x18\x02 \x03(\t\x12\x18\n\x10\x61nnotation_files\x18\x03 \x03(\t\x12\x16\n\x0enum_partitions\x18\x04 \x01(\r\x12\x12\n\nnum_shards\x18\x05 \x03(\rb\x06proto3')
)
_COCOCONFIG = _descriptor.Descriptor(
name='COCOConfig',
full_name='COCOConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='root_directory_path', full_name='COCOConfig.root_directory_path', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='img_dir_names', full_name='COCOConfig.img_dir_names', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='annotation_files', full_name='COCOConfig.annotation_files', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_partitions', full_name='COCOConfig.num_partitions', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_shards', full_name='COCOConfig.num_shards', index=4,
number=5, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=59,
serialized_end=193,
)
DESCRIPTOR.message_types_by_name['COCOConfig'] = _COCOCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
COCOConfig = _reflection.GeneratedProtocolMessageType('COCOConfig', (_message.Message,), dict(
DESCRIPTOR = _COCOCONFIG,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.coco_config_pb2'
# @@protoc_insertion_point(class_scope:COCOConfig)
))
_sym_db.RegisterMessage(COCOConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/proto/coco_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/detectnet_v2/proto/postprocessing_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/detectnet_v2/proto/postprocessing_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n@nvidia_tao_tf1/cv/detectnet_v2/proto/postprocessing_config.proto\"\xfd\x02\n\x10\x43lusteringConfig\x12\x1a\n\x12\x63overage_threshold\x18\x01 \x01(\x02\x12#\n\x1bminimum_bounding_box_height\x18\x02 \x01(\x05\x12\x43\n\x14\x63lustering_algorithm\x18\x03 \x01(\x0e\x32%.ClusteringConfig.ClusteringAlgorithm\x12\x12\n\ndbscan_eps\x18\x04 \x01(\x02\x12\x1a\n\x12\x64\x62scan_min_samples\x18\x05 \x01(\x05\x12\x19\n\x11neighborhood_size\x18\x06 \x01(\x05\x12#\n\x1b\x64\x62scan_confidence_threshold\x18\x07 \x01(\x02\x12\x19\n\x11nms_iou_threshold\x18\x08 \x01(\x02\x12 \n\x18nms_confidence_threshold\x18\t \x01(\x02\"6\n\x13\x43lusteringAlgorithm\x12\n\n\x06\x44\x42SCAN\x10\x00\x12\x07\n\x03NMS\x10\x01\x12\n\n\x06HYBRID\x10\x02\"o\n\x10\x43onfidenceConfig\x12\x1c\n\x14\x63onfidence_threshold\x18\x01 \x01(\x02\x12!\n\x19\x63onfidence_model_filename\x18\x02 \x01(\t\x12\x1a\n\x12normalization_mode\x18\x03 \x01(\t\"\xb5\x02\n\x14PostProcessingConfig\x12I\n\x13target_class_config\x18\x01 \x03(\x0b\x32,.PostProcessingConfig.TargetClassConfigEntry\x1ao\n\x11TargetClassConfig\x12,\n\x11\x63lustering_config\x18\x01 \x01(\x0b\x32\x11.ClusteringConfig\x12,\n\x11\x63onfidence_config\x18\x02 \x01(\x0b\x32\x11.ConfidenceConfig\x1a\x61\n\x16TargetClassConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x36\n\x05value\x18\x02 \x01(\x0b\x32\'.PostProcessingConfig.TargetClassConfig:\x02\x38\x01\x62\x06proto3')
)
_CLUSTERINGCONFIG_CLUSTERINGALGORITHM = _descriptor.EnumDescriptor(
name='ClusteringAlgorithm',
full_name='ClusteringConfig.ClusteringAlgorithm',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='DBSCAN', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NMS', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='HYBRID', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=396,
serialized_end=450,
)
_sym_db.RegisterEnumDescriptor(_CLUSTERINGCONFIG_CLUSTERINGALGORITHM)
_CLUSTERINGCONFIG = _descriptor.Descriptor(
name='ClusteringConfig',
full_name='ClusteringConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='coverage_threshold', full_name='ClusteringConfig.coverage_threshold', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='minimum_bounding_box_height', full_name='ClusteringConfig.minimum_bounding_box_height', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='clustering_algorithm', full_name='ClusteringConfig.clustering_algorithm', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dbscan_eps', full_name='ClusteringConfig.dbscan_eps', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dbscan_min_samples', full_name='ClusteringConfig.dbscan_min_samples', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='neighborhood_size', full_name='ClusteringConfig.neighborhood_size', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dbscan_confidence_threshold', full_name='ClusteringConfig.dbscan_confidence_threshold', index=6,
number=7, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='nms_iou_threshold', full_name='ClusteringConfig.nms_iou_threshold', index=7,
number=8, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='nms_confidence_threshold', full_name='ClusteringConfig.nms_confidence_threshold', index=8,
number=9, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_CLUSTERINGCONFIG_CLUSTERINGALGORITHM,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=69,
serialized_end=450,
)
_CONFIDENCECONFIG = _descriptor.Descriptor(
name='ConfidenceConfig',
full_name='ConfidenceConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='confidence_threshold', full_name='ConfidenceConfig.confidence_threshold', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='confidence_model_filename', full_name='ConfidenceConfig.confidence_model_filename', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='normalization_mode', full_name='ConfidenceConfig.normalization_mode', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=452,
serialized_end=563,
)
_POSTPROCESSINGCONFIG_TARGETCLASSCONFIG = _descriptor.Descriptor(
name='TargetClassConfig',
full_name='PostProcessingConfig.TargetClassConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='clustering_config', full_name='PostProcessingConfig.TargetClassConfig.clustering_config', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='confidence_config', full_name='PostProcessingConfig.TargetClassConfig.confidence_config', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=665,
serialized_end=776,
)
_POSTPROCESSINGCONFIG_TARGETCLASSCONFIGENTRY = _descriptor.Descriptor(
name='TargetClassConfigEntry',
full_name='PostProcessingConfig.TargetClassConfigEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='PostProcessingConfig.TargetClassConfigEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='PostProcessingConfig.TargetClassConfigEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=778,
serialized_end=875,
)
_POSTPROCESSINGCONFIG = _descriptor.Descriptor(
name='PostProcessingConfig',
full_name='PostProcessingConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='target_class_config', full_name='PostProcessingConfig.target_class_config', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_POSTPROCESSINGCONFIG_TARGETCLASSCONFIG, _POSTPROCESSINGCONFIG_TARGETCLASSCONFIGENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=566,
serialized_end=875,
)
_CLUSTERINGCONFIG.fields_by_name['clustering_algorithm'].enum_type = _CLUSTERINGCONFIG_CLUSTERINGALGORITHM
_CLUSTERINGCONFIG_CLUSTERINGALGORITHM.containing_type = _CLUSTERINGCONFIG
_POSTPROCESSINGCONFIG_TARGETCLASSCONFIG.fields_by_name['clustering_config'].message_type = _CLUSTERINGCONFIG
_POSTPROCESSINGCONFIG_TARGETCLASSCONFIG.fields_by_name['confidence_config'].message_type = _CONFIDENCECONFIG
_POSTPROCESSINGCONFIG_TARGETCLASSCONFIG.containing_type = _POSTPROCESSINGCONFIG
_POSTPROCESSINGCONFIG_TARGETCLASSCONFIGENTRY.fields_by_name['value'].message_type = _POSTPROCESSINGCONFIG_TARGETCLASSCONFIG
_POSTPROCESSINGCONFIG_TARGETCLASSCONFIGENTRY.containing_type = _POSTPROCESSINGCONFIG
_POSTPROCESSINGCONFIG.fields_by_name['target_class_config'].message_type = _POSTPROCESSINGCONFIG_TARGETCLASSCONFIGENTRY
DESCRIPTOR.message_types_by_name['ClusteringConfig'] = _CLUSTERINGCONFIG
DESCRIPTOR.message_types_by_name['ConfidenceConfig'] = _CONFIDENCECONFIG
DESCRIPTOR.message_types_by_name['PostProcessingConfig'] = _POSTPROCESSINGCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ClusteringConfig = _reflection.GeneratedProtocolMessageType('ClusteringConfig', (_message.Message,), dict(
DESCRIPTOR = _CLUSTERINGCONFIG,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.postprocessing_config_pb2'
# @@protoc_insertion_point(class_scope:ClusteringConfig)
))
_sym_db.RegisterMessage(ClusteringConfig)
ConfidenceConfig = _reflection.GeneratedProtocolMessageType('ConfidenceConfig', (_message.Message,), dict(
DESCRIPTOR = _CONFIDENCECONFIG,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.postprocessing_config_pb2'
# @@protoc_insertion_point(class_scope:ConfidenceConfig)
))
_sym_db.RegisterMessage(ConfidenceConfig)
PostProcessingConfig = _reflection.GeneratedProtocolMessageType('PostProcessingConfig', (_message.Message,), dict(
TargetClassConfig = _reflection.GeneratedProtocolMessageType('TargetClassConfig', (_message.Message,), dict(
DESCRIPTOR = _POSTPROCESSINGCONFIG_TARGETCLASSCONFIG,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.postprocessing_config_pb2'
# @@protoc_insertion_point(class_scope:PostProcessingConfig.TargetClassConfig)
))
,
TargetClassConfigEntry = _reflection.GeneratedProtocolMessageType('TargetClassConfigEntry', (_message.Message,), dict(
DESCRIPTOR = _POSTPROCESSINGCONFIG_TARGETCLASSCONFIGENTRY,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.postprocessing_config_pb2'
# @@protoc_insertion_point(class_scope:PostProcessingConfig.TargetClassConfigEntry)
))
,
DESCRIPTOR = _POSTPROCESSINGCONFIG,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.postprocessing_config_pb2'
# @@protoc_insertion_point(class_scope:PostProcessingConfig)
))
_sym_db.RegisterMessage(PostProcessingConfig)
_sym_db.RegisterMessage(PostProcessingConfig.TargetClassConfig)
_sym_db.RegisterMessage(PostProcessingConfig.TargetClassConfigEntry)
_POSTPROCESSINGCONFIG_TARGETCLASSCONFIGENTRY._options = None
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/proto/postprocessing_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/detectnet_v2/proto/visualizer_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_tao_tf1.cv.common.proto import clearml_config_pb2 as nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_clearml__config__pb2
from nvidia_tao_tf1.cv.common.proto import wandb_config_pb2 as nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_wandb__config__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/detectnet_v2/proto/visualizer_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n<nvidia_tao_tf1/cv/detectnet_v2/proto/visualizer_config.proto\x1a\x33nvidia_tao_tf1/cv/common/proto/clearml_config.proto\x1a\x31nvidia_tao_tf1/cv/common/proto/wandb_config.proto\"\xa2\x03\n\x10VisualizerConfig\x12\x0f\n\x07\x65nabled\x18\x01 \x01(\x08\x12\x12\n\nnum_images\x18\x02 \x01(\r\x12 \n\x18scalar_logging_frequency\x18\x03 \x01(\r\x12$\n\x1cinfrequent_logging_frequency\x18\x04 \x01(\r\x12\x45\n\x13target_class_config\x18\x05 \x03(\x0b\x32(.VisualizerConfig.TargetClassConfigEntry\x12\"\n\x0cwandb_config\x18\x06 \x01(\x0b\x32\x0c.WandBConfig\x12&\n\x0e\x63learml_config\x18\x07 \x01(\x0b\x32\x0e.ClearMLConfig\x1a/\n\x11TargetClassConfig\x12\x1a\n\x12\x63overage_threshold\x18\x01 \x01(\x02\x1a]\n\x16TargetClassConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x32\n\x05value\x18\x02 \x01(\x0b\x32#.VisualizerConfig.TargetClassConfig:\x02\x38\x01\x62\x06proto3')
,
dependencies=[nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_clearml__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_wandb__config__pb2.DESCRIPTOR,])
_VISUALIZERCONFIG_TARGETCLASSCONFIG = _descriptor.Descriptor(
name='TargetClassConfig',
full_name='VisualizerConfig.TargetClassConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='coverage_threshold', full_name='VisualizerConfig.TargetClassConfig.coverage_threshold', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=445,
serialized_end=492,
)
_VISUALIZERCONFIG_TARGETCLASSCONFIGENTRY = _descriptor.Descriptor(
name='TargetClassConfigEntry',
full_name='VisualizerConfig.TargetClassConfigEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='VisualizerConfig.TargetClassConfigEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='VisualizerConfig.TargetClassConfigEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=494,
serialized_end=587,
)
_VISUALIZERCONFIG = _descriptor.Descriptor(
name='VisualizerConfig',
full_name='VisualizerConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='enabled', full_name='VisualizerConfig.enabled', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_images', full_name='VisualizerConfig.num_images', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='scalar_logging_frequency', full_name='VisualizerConfig.scalar_logging_frequency', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='infrequent_logging_frequency', full_name='VisualizerConfig.infrequent_logging_frequency', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='target_class_config', full_name='VisualizerConfig.target_class_config', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='wandb_config', full_name='VisualizerConfig.wandb_config', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='clearml_config', full_name='VisualizerConfig.clearml_config', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_VISUALIZERCONFIG_TARGETCLASSCONFIG, _VISUALIZERCONFIG_TARGETCLASSCONFIGENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=169,
serialized_end=587,
)
_VISUALIZERCONFIG_TARGETCLASSCONFIG.containing_type = _VISUALIZERCONFIG
_VISUALIZERCONFIG_TARGETCLASSCONFIGENTRY.fields_by_name['value'].message_type = _VISUALIZERCONFIG_TARGETCLASSCONFIG
_VISUALIZERCONFIG_TARGETCLASSCONFIGENTRY.containing_type = _VISUALIZERCONFIG
_VISUALIZERCONFIG.fields_by_name['target_class_config'].message_type = _VISUALIZERCONFIG_TARGETCLASSCONFIGENTRY
_VISUALIZERCONFIG.fields_by_name['wandb_config'].message_type = nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_wandb__config__pb2._WANDBCONFIG
_VISUALIZERCONFIG.fields_by_name['clearml_config'].message_type = nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_clearml__config__pb2._CLEARMLCONFIG
DESCRIPTOR.message_types_by_name['VisualizerConfig'] = _VISUALIZERCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
VisualizerConfig = _reflection.GeneratedProtocolMessageType('VisualizerConfig', (_message.Message,), dict(
TargetClassConfig = _reflection.GeneratedProtocolMessageType('TargetClassConfig', (_message.Message,), dict(
DESCRIPTOR = _VISUALIZERCONFIG_TARGETCLASSCONFIG,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.visualizer_config_pb2'
# @@protoc_insertion_point(class_scope:VisualizerConfig.TargetClassConfig)
))
,
TargetClassConfigEntry = _reflection.GeneratedProtocolMessageType('TargetClassConfigEntry', (_message.Message,), dict(
DESCRIPTOR = _VISUALIZERCONFIG_TARGETCLASSCONFIGENTRY,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.visualizer_config_pb2'
# @@protoc_insertion_point(class_scope:VisualizerConfig.TargetClassConfigEntry)
))
,
DESCRIPTOR = _VISUALIZERCONFIG,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.visualizer_config_pb2'
# @@protoc_insertion_point(class_scope:VisualizerConfig)
))
_sym_db.RegisterMessage(VisualizerConfig)
_sym_db.RegisterMessage(VisualizerConfig.TargetClassConfig)
_sym_db.RegisterMessage(VisualizerConfig.TargetClassConfigEntry)
_VISUALIZERCONFIG_TARGETCLASSCONFIGENTRY._options = None
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/proto/visualizer_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/detectnet_v2/proto/learning_rate_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_tao_tf1.cv.detectnet_v2.proto import soft_start_annealing_schedule_config_pb2 as nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_soft__start__annealing__schedule__config__pb2
from nvidia_tao_tf1.cv.detectnet_v2.proto import early_stopping_annealing_schedule_config_pb2 as nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_early__stopping__annealing__schedule__config__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/detectnet_v2/proto/learning_rate_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n?nvidia_tao_tf1/cv/detectnet_v2/proto/learning_rate_config.proto\x1aOnvidia_tao_tf1/cv/detectnet_v2/proto/soft_start_annealing_schedule_config.proto\x1aSnvidia_tao_tf1/cv/detectnet_v2/proto/early_stopping_annealing_schedule_config.proto\"\xc5\x01\n\x12LearningRateConfig\x12J\n\x1dsoft_start_annealing_schedule\x18\x01 \x01(\x0b\x32!.SoftStartAnnealingScheduleConfigH\x00\x12R\n!early_stopping_annealing_schedule\x18\x02 \x01(\x0b\x32%.EarlyStoppingAnnealingScheduleConfigH\x00\x42\x0f\n\rlearning_rateb\x06proto3')
,
dependencies=[nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_soft__start__annealing__schedule__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_early__stopping__annealing__schedule__config__pb2.DESCRIPTOR,])
_LEARNINGRATECONFIG = _descriptor.Descriptor(
name='LearningRateConfig',
full_name='LearningRateConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='soft_start_annealing_schedule', full_name='LearningRateConfig.soft_start_annealing_schedule', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='early_stopping_annealing_schedule', full_name='LearningRateConfig.early_stopping_annealing_schedule', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='learning_rate', full_name='LearningRateConfig.learning_rate',
index=0, containing_type=None, fields=[]),
],
serialized_start=234,
serialized_end=431,
)
_LEARNINGRATECONFIG.fields_by_name['soft_start_annealing_schedule'].message_type = nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_soft__start__annealing__schedule__config__pb2._SOFTSTARTANNEALINGSCHEDULECONFIG
_LEARNINGRATECONFIG.fields_by_name['early_stopping_annealing_schedule'].message_type = nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_early__stopping__annealing__schedule__config__pb2._EARLYSTOPPINGANNEALINGSCHEDULECONFIG
_LEARNINGRATECONFIG.oneofs_by_name['learning_rate'].fields.append(
_LEARNINGRATECONFIG.fields_by_name['soft_start_annealing_schedule'])
_LEARNINGRATECONFIG.fields_by_name['soft_start_annealing_schedule'].containing_oneof = _LEARNINGRATECONFIG.oneofs_by_name['learning_rate']
_LEARNINGRATECONFIG.oneofs_by_name['learning_rate'].fields.append(
_LEARNINGRATECONFIG.fields_by_name['early_stopping_annealing_schedule'])
_LEARNINGRATECONFIG.fields_by_name['early_stopping_annealing_schedule'].containing_oneof = _LEARNINGRATECONFIG.oneofs_by_name['learning_rate']
DESCRIPTOR.message_types_by_name['LearningRateConfig'] = _LEARNINGRATECONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
LearningRateConfig = _reflection.GeneratedProtocolMessageType('LearningRateConfig', (_message.Message,), dict(
DESCRIPTOR = _LEARNINGRATECONFIG,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.learning_rate_config_pb2'
# @@protoc_insertion_point(class_scope:LearningRateConfig)
))
_sym_db.RegisterMessage(LearningRateConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/proto/learning_rate_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/detectnet_v2/proto/inference.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_tao_tf1.cv.detectnet_v2.proto import inferencer_config_pb2 as nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_inferencer__config__pb2
from nvidia_tao_tf1.cv.detectnet_v2.proto import postprocessing_config_pb2 as nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_postprocessing__config__pb2
from nvidia_tao_tf1.cv.common.proto import wandb_config_pb2 as nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_wandb__config__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/detectnet_v2/proto/inference.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n4nvidia_tao_tf1/cv/detectnet_v2/proto/inference.proto\x1a<nvidia_tao_tf1/cv/detectnet_v2/proto/inferencer_config.proto\x1a@nvidia_tao_tf1/cv/detectnet_v2/proto/postprocessing_config.proto\x1a\x31nvidia_tao_tf1/cv/common/proto/wandb_config.proto\"\xe1\x01\n\x1a\x43lasswiseBboxHandlerConfig\x12,\n\x11\x63lustering_config\x18\x01 \x01(\x0b\x32\x11.ClusteringConfig\x12\x18\n\x10\x63onfidence_model\x18\x02 \x01(\t\x12\x12\n\noutput_map\x18\x03 \x01(\t\x12\x39\n\nbbox_color\x18\x07 \x01(\x0b\x32%.ClasswiseBboxHandlerConfig.BboxColor\x1a,\n\tBboxColor\x12\t\n\x01R\x18\x01 \x01(\x05\x12\t\n\x01G\x18\x02 \x01(\x05\x12\t\n\x01\x42\x18\x03 \x01(\x05\"\xd4\x02\n\x11\x42\x62oxHandlerConfig\x12\x12\n\nkitti_dump\x18\x01 \x01(\x08\x12\x17\n\x0f\x64isable_overlay\x18\x02 \x01(\x08\x12\x19\n\x11overlay_linewidth\x18\x03 \x01(\x05\x12Y\n\x1d\x63lasswise_bbox_handler_config\x18\x04 \x03(\x0b\x32\x32.BboxHandlerConfig.ClasswiseBboxHandlerConfigEntry\x12\x18\n\x10postproc_classes\x18\x05 \x03(\t\x12\"\n\x0cwandb_config\x18\x06 \x01(\x0b\x32\x0c.WandBConfig\x1a^\n\x1f\x43lasswiseBboxHandlerConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12*\n\x05value\x18\x02 \x01(\x0b\x32\x1b.ClasswiseBboxHandlerConfig:\x02\x38\x01\"j\n\tInference\x12,\n\x11inferencer_config\x18\x01 \x01(\x0b\x32\x11.InferencerConfig\x12/\n\x13\x62\x62ox_handler_config\x18\x02 \x01(\x0b\x32\x12.BboxHandlerConfigb\x06proto3')
,
dependencies=[nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_inferencer__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_postprocessing__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_wandb__config__pb2.DESCRIPTOR,])
_CLASSWISEBBOXHANDLERCONFIG_BBOXCOLOR = _descriptor.Descriptor(
name='BboxColor',
full_name='ClasswiseBboxHandlerConfig.BboxColor',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='R', full_name='ClasswiseBboxHandlerConfig.BboxColor.R', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='G', full_name='ClasswiseBboxHandlerConfig.BboxColor.G', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='B', full_name='ClasswiseBboxHandlerConfig.BboxColor.B', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=417,
serialized_end=461,
)
_CLASSWISEBBOXHANDLERCONFIG = _descriptor.Descriptor(
name='ClasswiseBboxHandlerConfig',
full_name='ClasswiseBboxHandlerConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='clustering_config', full_name='ClasswiseBboxHandlerConfig.clustering_config', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='confidence_model', full_name='ClasswiseBboxHandlerConfig.confidence_model', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_map', full_name='ClasswiseBboxHandlerConfig.output_map', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bbox_color', full_name='ClasswiseBboxHandlerConfig.bbox_color', index=3,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_CLASSWISEBBOXHANDLERCONFIG_BBOXCOLOR, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=236,
serialized_end=461,
)
_BBOXHANDLERCONFIG_CLASSWISEBBOXHANDLERCONFIGENTRY = _descriptor.Descriptor(
name='ClasswiseBboxHandlerConfigEntry',
full_name='BboxHandlerConfig.ClasswiseBboxHandlerConfigEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='BboxHandlerConfig.ClasswiseBboxHandlerConfigEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='BboxHandlerConfig.ClasswiseBboxHandlerConfigEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=710,
serialized_end=804,
)
_BBOXHANDLERCONFIG = _descriptor.Descriptor(
name='BboxHandlerConfig',
full_name='BboxHandlerConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='kitti_dump', full_name='BboxHandlerConfig.kitti_dump', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='disable_overlay', full_name='BboxHandlerConfig.disable_overlay', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='overlay_linewidth', full_name='BboxHandlerConfig.overlay_linewidth', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='classwise_bbox_handler_config', full_name='BboxHandlerConfig.classwise_bbox_handler_config', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='postproc_classes', full_name='BboxHandlerConfig.postproc_classes', index=4,
number=5, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='wandb_config', full_name='BboxHandlerConfig.wandb_config', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_BBOXHANDLERCONFIG_CLASSWISEBBOXHANDLERCONFIGENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=464,
serialized_end=804,
)
_INFERENCE = _descriptor.Descriptor(
name='Inference',
full_name='Inference',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='inferencer_config', full_name='Inference.inferencer_config', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bbox_handler_config', full_name='Inference.bbox_handler_config', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=806,
serialized_end=912,
)
_CLASSWISEBBOXHANDLERCONFIG_BBOXCOLOR.containing_type = _CLASSWISEBBOXHANDLERCONFIG
_CLASSWISEBBOXHANDLERCONFIG.fields_by_name['clustering_config'].message_type = nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_postprocessing__config__pb2._CLUSTERINGCONFIG
_CLASSWISEBBOXHANDLERCONFIG.fields_by_name['bbox_color'].message_type = _CLASSWISEBBOXHANDLERCONFIG_BBOXCOLOR
_BBOXHANDLERCONFIG_CLASSWISEBBOXHANDLERCONFIGENTRY.fields_by_name['value'].message_type = _CLASSWISEBBOXHANDLERCONFIG
_BBOXHANDLERCONFIG_CLASSWISEBBOXHANDLERCONFIGENTRY.containing_type = _BBOXHANDLERCONFIG
_BBOXHANDLERCONFIG.fields_by_name['classwise_bbox_handler_config'].message_type = _BBOXHANDLERCONFIG_CLASSWISEBBOXHANDLERCONFIGENTRY
_BBOXHANDLERCONFIG.fields_by_name['wandb_config'].message_type = nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_wandb__config__pb2._WANDBCONFIG
_INFERENCE.fields_by_name['inferencer_config'].message_type = nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_inferencer__config__pb2._INFERENCERCONFIG
_INFERENCE.fields_by_name['bbox_handler_config'].message_type = _BBOXHANDLERCONFIG
DESCRIPTOR.message_types_by_name['ClasswiseBboxHandlerConfig'] = _CLASSWISEBBOXHANDLERCONFIG
DESCRIPTOR.message_types_by_name['BboxHandlerConfig'] = _BBOXHANDLERCONFIG
DESCRIPTOR.message_types_by_name['Inference'] = _INFERENCE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ClasswiseBboxHandlerConfig = _reflection.GeneratedProtocolMessageType('ClasswiseBboxHandlerConfig', (_message.Message,), dict(
BboxColor = _reflection.GeneratedProtocolMessageType('BboxColor', (_message.Message,), dict(
DESCRIPTOR = _CLASSWISEBBOXHANDLERCONFIG_BBOXCOLOR,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.inference_pb2'
# @@protoc_insertion_point(class_scope:ClasswiseBboxHandlerConfig.BboxColor)
))
,
DESCRIPTOR = _CLASSWISEBBOXHANDLERCONFIG,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.inference_pb2'
# @@protoc_insertion_point(class_scope:ClasswiseBboxHandlerConfig)
))
_sym_db.RegisterMessage(ClasswiseBboxHandlerConfig)
_sym_db.RegisterMessage(ClasswiseBboxHandlerConfig.BboxColor)
BboxHandlerConfig = _reflection.GeneratedProtocolMessageType('BboxHandlerConfig', (_message.Message,), dict(
ClasswiseBboxHandlerConfigEntry = _reflection.GeneratedProtocolMessageType('ClasswiseBboxHandlerConfigEntry', (_message.Message,), dict(
DESCRIPTOR = _BBOXHANDLERCONFIG_CLASSWISEBBOXHANDLERCONFIGENTRY,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.inference_pb2'
# @@protoc_insertion_point(class_scope:BboxHandlerConfig.ClasswiseBboxHandlerConfigEntry)
))
,
DESCRIPTOR = _BBOXHANDLERCONFIG,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.inference_pb2'
# @@protoc_insertion_point(class_scope:BboxHandlerConfig)
))
_sym_db.RegisterMessage(BboxHandlerConfig)
_sym_db.RegisterMessage(BboxHandlerConfig.ClasswiseBboxHandlerConfigEntry)
Inference = _reflection.GeneratedProtocolMessageType('Inference', (_message.Message,), dict(
DESCRIPTOR = _INFERENCE,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.inference_pb2'
# @@protoc_insertion_point(class_scope:Inference)
))
_sym_db.RegisterMessage(Inference)
_BBOXHANDLERCONFIG_CLASSWISEBBOXHANDLERCONFIGENTRY._options = None
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/proto/inference_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/detectnet_v2/proto/bbox_rasterizer_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/detectnet_v2/proto/bbox_rasterizer_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\nAnvidia_tao_tf1/cv/detectnet_v2/proto/bbox_rasterizer_config.proto\"\xe4\x02\n\x14\x42\x62oxRasterizerConfig\x12I\n\x13target_class_config\x18\x01 \x03(\x0b\x32,.BboxRasterizerConfig.TargetClassConfigEntry\x12\x17\n\x0f\x64\x65\x61\x64zone_radius\x18\x02 \x01(\x02\x1a\x84\x01\n\x11TargetClassConfig\x12\x14\n\x0c\x63ov_center_x\x18\x01 \x01(\x02\x12\x14\n\x0c\x63ov_center_y\x18\x02 \x01(\x02\x12\x14\n\x0c\x63ov_radius_x\x18\x03 \x01(\x02\x12\x14\n\x0c\x63ov_radius_y\x18\x04 \x01(\x02\x12\x17\n\x0f\x62\x62ox_min_radius\x18\x05 \x01(\x02\x1a\x61\n\x16TargetClassConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x36\n\x05value\x18\x02 \x01(\x0b\x32\'.BboxRasterizerConfig.TargetClassConfig:\x02\x38\x01\x62\x06proto3')
)
_BBOXRASTERIZERCONFIG_TARGETCLASSCONFIG = _descriptor.Descriptor(
name='TargetClassConfig',
full_name='BboxRasterizerConfig.TargetClassConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='cov_center_x', full_name='BboxRasterizerConfig.TargetClassConfig.cov_center_x', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cov_center_y', full_name='BboxRasterizerConfig.TargetClassConfig.cov_center_y', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cov_radius_x', full_name='BboxRasterizerConfig.TargetClassConfig.cov_radius_x', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cov_radius_y', full_name='BboxRasterizerConfig.TargetClassConfig.cov_radius_y', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bbox_min_radius', full_name='BboxRasterizerConfig.TargetClassConfig.bbox_min_radius', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=195,
serialized_end=327,
)
_BBOXRASTERIZERCONFIG_TARGETCLASSCONFIGENTRY = _descriptor.Descriptor(
name='TargetClassConfigEntry',
full_name='BboxRasterizerConfig.TargetClassConfigEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='BboxRasterizerConfig.TargetClassConfigEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='BboxRasterizerConfig.TargetClassConfigEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=329,
serialized_end=426,
)
_BBOXRASTERIZERCONFIG = _descriptor.Descriptor(
name='BboxRasterizerConfig',
full_name='BboxRasterizerConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='target_class_config', full_name='BboxRasterizerConfig.target_class_config', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='deadzone_radius', full_name='BboxRasterizerConfig.deadzone_radius', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_BBOXRASTERIZERCONFIG_TARGETCLASSCONFIG, _BBOXRASTERIZERCONFIG_TARGETCLASSCONFIGENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=70,
serialized_end=426,
)
_BBOXRASTERIZERCONFIG_TARGETCLASSCONFIG.containing_type = _BBOXRASTERIZERCONFIG
_BBOXRASTERIZERCONFIG_TARGETCLASSCONFIGENTRY.fields_by_name['value'].message_type = _BBOXRASTERIZERCONFIG_TARGETCLASSCONFIG
_BBOXRASTERIZERCONFIG_TARGETCLASSCONFIGENTRY.containing_type = _BBOXRASTERIZERCONFIG
_BBOXRASTERIZERCONFIG.fields_by_name['target_class_config'].message_type = _BBOXRASTERIZERCONFIG_TARGETCLASSCONFIGENTRY
DESCRIPTOR.message_types_by_name['BboxRasterizerConfig'] = _BBOXRASTERIZERCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
BboxRasterizerConfig = _reflection.GeneratedProtocolMessageType('BboxRasterizerConfig', (_message.Message,), dict(
TargetClassConfig = _reflection.GeneratedProtocolMessageType('TargetClassConfig', (_message.Message,), dict(
DESCRIPTOR = _BBOXRASTERIZERCONFIG_TARGETCLASSCONFIG,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.bbox_rasterizer_config_pb2'
# @@protoc_insertion_point(class_scope:BboxRasterizerConfig.TargetClassConfig)
))
,
TargetClassConfigEntry = _reflection.GeneratedProtocolMessageType('TargetClassConfigEntry', (_message.Message,), dict(
DESCRIPTOR = _BBOXRASTERIZERCONFIG_TARGETCLASSCONFIGENTRY,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.bbox_rasterizer_config_pb2'
# @@protoc_insertion_point(class_scope:BboxRasterizerConfig.TargetClassConfigEntry)
))
,
DESCRIPTOR = _BBOXRASTERIZERCONFIG,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.bbox_rasterizer_config_pb2'
# @@protoc_insertion_point(class_scope:BboxRasterizerConfig)
))
_sym_db.RegisterMessage(BboxRasterizerConfig)
_sym_db.RegisterMessage(BboxRasterizerConfig.TargetClassConfig)
_sym_db.RegisterMessage(BboxRasterizerConfig.TargetClassConfigEntry)
_BBOXRASTERIZERCONFIG_TARGETCLASSCONFIGENTRY._options = None
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/proto/bbox_rasterizer_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/detectnet_v2/proto/soft_start_annealing_schedule_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/detectnet_v2/proto/soft_start_annealing_schedule_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\nOnvidia_tao_tf1/cv/detectnet_v2/proto/soft_start_annealing_schedule_config.proto\"\x7f\n SoftStartAnnealingScheduleConfig\x12\x19\n\x11min_learning_rate\x18\x01 \x01(\x02\x12\x19\n\x11max_learning_rate\x18\x02 \x01(\x02\x12\x12\n\nsoft_start\x18\x03 \x01(\x02\x12\x11\n\tannealing\x18\x04 \x01(\x02\x62\x06proto3')
)
_SOFTSTARTANNEALINGSCHEDULECONFIG = _descriptor.Descriptor(
name='SoftStartAnnealingScheduleConfig',
full_name='SoftStartAnnealingScheduleConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='min_learning_rate', full_name='SoftStartAnnealingScheduleConfig.min_learning_rate', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_learning_rate', full_name='SoftStartAnnealingScheduleConfig.max_learning_rate', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='soft_start', full_name='SoftStartAnnealingScheduleConfig.soft_start', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='annealing', full_name='SoftStartAnnealingScheduleConfig.annealing', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=83,
serialized_end=210,
)
DESCRIPTOR.message_types_by_name['SoftStartAnnealingScheduleConfig'] = _SOFTSTARTANNEALINGSCHEDULECONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SoftStartAnnealingScheduleConfig = _reflection.GeneratedProtocolMessageType('SoftStartAnnealingScheduleConfig', (_message.Message,), dict(
DESCRIPTOR = _SOFTSTARTANNEALINGSCHEDULECONFIG,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.soft_start_annealing_schedule_config_pb2'
# @@protoc_insertion_point(class_scope:SoftStartAnnealingScheduleConfig)
))
_sym_db.RegisterMessage(SoftStartAnnealingScheduleConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/proto/soft_start_annealing_schedule_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/detectnet_v2/proto/experiment.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_tao_tf1.cv.detectnet_v2.proto import augmentation_config_pb2 as nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_augmentation__config__pb2
from nvidia_tao_tf1.cv.detectnet_v2.proto import bbox_rasterizer_config_pb2 as nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_bbox__rasterizer__config__pb2
from nvidia_tao_tf1.cv.detectnet_v2.proto import cost_function_config_pb2 as nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_cost__function__config__pb2
from nvidia_tao_tf1.cv.detectnet_v2.proto import dataset_config_pb2 as nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_dataset__config__pb2
from nvidia_tao_tf1.cv.detectnet_v2.proto import evaluation_config_pb2 as nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_evaluation__config__pb2
from nvidia_tao_tf1.cv.detectnet_v2.proto import model_config_pb2 as nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_model__config__pb2
from nvidia_tao_tf1.cv.detectnet_v2.proto import objective_label_filter_pb2 as nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_objective__label__filter__pb2
from nvidia_tao_tf1.cv.detectnet_v2.proto import postprocessing_config_pb2 as nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_postprocessing__config__pb2
from nvidia_tao_tf1.cv.detectnet_v2.proto import training_config_pb2 as nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_training__config__pb2
from nvidia_tao_tf1.cv.detectnet_v2.proto import dataset_export_config_pb2 as nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_dataset__export__config__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/detectnet_v2/proto/experiment.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n5nvidia_tao_tf1/cv/detectnet_v2/proto/experiment.proto\x1a>nvidia_tao_tf1/cv/detectnet_v2/proto/augmentation_config.proto\x1a\x41nvidia_tao_tf1/cv/detectnet_v2/proto/bbox_rasterizer_config.proto\x1a?nvidia_tao_tf1/cv/detectnet_v2/proto/cost_function_config.proto\x1a\x39nvidia_tao_tf1/cv/detectnet_v2/proto/dataset_config.proto\x1a<nvidia_tao_tf1/cv/detectnet_v2/proto/evaluation_config.proto\x1a\x37nvidia_tao_tf1/cv/detectnet_v2/proto/model_config.proto\x1a\x41nvidia_tao_tf1/cv/detectnet_v2/proto/objective_label_filter.proto\x1a@nvidia_tao_tf1/cv/detectnet_v2/proto/postprocessing_config.proto\x1a:nvidia_tao_tf1/cv/detectnet_v2/proto/training_config.proto\x1a@nvidia_tao_tf1/cv/detectnet_v2/proto/dataset_export_config.proto\"\x83\x04\n\nExperiment\x12\x13\n\x0brandom_seed\x18\x01 \x01(\r\x12&\n\x0e\x64\x61taset_config\x18\x02 \x01(\x0b\x32\x0e.DatasetConfig\x12\x30\n\x13\x61ugmentation_config\x18\x03 \x01(\x0b\x32\x13.AugmentationConfig\x12\x34\n\x15postprocessing_config\x18\x04 \x01(\x0b\x32\x15.PostProcessingConfig\x12\"\n\x0cmodel_config\x18\x05 \x01(\x0b\x32\x0c.ModelConfig\x12,\n\x11\x65valuation_config\x18\x06 \x01(\x0b\x32\x11.EvaluationConfig\x12\x31\n\x14\x63ost_function_config\x18\x08 \x01(\x0b\x32\x13.CostFunctionConfig\x12(\n\x0ftraining_config\x18\t \x01(\x0b\x32\x0f.TrainingConfig\x12\x35\n\x16\x62\x62ox_rasterizer_config\x18\n \x01(\x0b\x32\x15.BboxRasterizerConfig\x12\x35\n\x16loss_mask_label_filter\x18\x0b \x01(\x0b\x32\x15.ObjectiveLabelFilter\x12\x33\n\x15\x64\x61taset_export_config\x18\x0c \x03(\x0b\x32\x14.DatasetExportConfigb\x06proto3')
,
dependencies=[nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_augmentation__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_bbox__rasterizer__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_cost__function__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_dataset__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_evaluation__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_model__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_objective__label__filter__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_postprocessing__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_training__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_dataset__export__config__pb2.DESCRIPTOR,])
_EXPERIMENT = _descriptor.Descriptor(
name='Experiment',
full_name='Experiment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='random_seed', full_name='Experiment.random_seed', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dataset_config', full_name='Experiment.dataset_config', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='augmentation_config', full_name='Experiment.augmentation_config', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='postprocessing_config', full_name='Experiment.postprocessing_config', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='model_config', full_name='Experiment.model_config', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='evaluation_config', full_name='Experiment.evaluation_config', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cost_function_config', full_name='Experiment.cost_function_config', index=6,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='training_config', full_name='Experiment.training_config', index=7,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bbox_rasterizer_config', full_name='Experiment.bbox_rasterizer_config', index=8,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='loss_mask_label_filter', full_name='Experiment.loss_mask_label_filter', index=9,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dataset_export_config', full_name='Experiment.dataset_export_config', index=10,
number=12, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=691,
serialized_end=1206,
)
_EXPERIMENT.fields_by_name['dataset_config'].message_type = nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_dataset__config__pb2._DATASETCONFIG
_EXPERIMENT.fields_by_name['augmentation_config'].message_type = nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_augmentation__config__pb2._AUGMENTATIONCONFIG
_EXPERIMENT.fields_by_name['postprocessing_config'].message_type = nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_postprocessing__config__pb2._POSTPROCESSINGCONFIG
_EXPERIMENT.fields_by_name['model_config'].message_type = nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_model__config__pb2._MODELCONFIG
_EXPERIMENT.fields_by_name['evaluation_config'].message_type = nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_evaluation__config__pb2._EVALUATIONCONFIG
_EXPERIMENT.fields_by_name['cost_function_config'].message_type = nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_cost__function__config__pb2._COSTFUNCTIONCONFIG
_EXPERIMENT.fields_by_name['training_config'].message_type = nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_training__config__pb2._TRAININGCONFIG
_EXPERIMENT.fields_by_name['bbox_rasterizer_config'].message_type = nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_bbox__rasterizer__config__pb2._BBOXRASTERIZERCONFIG
_EXPERIMENT.fields_by_name['loss_mask_label_filter'].message_type = nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_objective__label__filter__pb2._OBJECTIVELABELFILTER
_EXPERIMENT.fields_by_name['dataset_export_config'].message_type = nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_dataset__export__config__pb2._DATASETEXPORTCONFIG
DESCRIPTOR.message_types_by_name['Experiment'] = _EXPERIMENT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Experiment = _reflection.GeneratedProtocolMessageType('Experiment', (_message.Message,), dict(
DESCRIPTOR = _EXPERIMENT,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.experiment_pb2'
# @@protoc_insertion_point(class_scope:Experiment)
))
_sym_db.RegisterMessage(Experiment)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/proto/experiment_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/detectnet_v2/proto/label_filter.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/detectnet_v2/proto/label_filter.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n7nvidia_tao_tf1/cv/detectnet_v2/proto/label_filter.proto\"\x88\x04\n\x0bLabelFilter\x12N\n\x1c\x62\x62ox_dimensions_label_filter\x18\x01 \x01(\x0b\x32&.LabelFilter.BboxDimensionsLabelFilterH\x00\x12\x42\n\x16\x62\x62ox_crop_label_filter\x18\x02 \x01(\x0b\x32 .LabelFilter.BboxCropLabelFilterH\x00\x12H\n\x19source_class_label_filter\x18\x03 \x01(\x0b\x32#.LabelFilter.SourceClassLabelFilterH\x00\x1ai\n\x19\x42\x62oxDimensionsLabelFilter\x12\x11\n\tmin_width\x18\x01 \x01(\x02\x12\x12\n\nmin_height\x18\x02 \x01(\x02\x12\x11\n\tmax_width\x18\x03 \x01(\x02\x12\x12\n\nmax_height\x18\x04 \x01(\x02\x1a\x63\n\x13\x42\x62oxCropLabelFilter\x12\x11\n\tcrop_left\x18\x01 \x01(\x02\x12\x12\n\ncrop_right\x18\x02 \x01(\x02\x12\x10\n\x08\x63rop_top\x18\x03 \x01(\x02\x12\x13\n\x0b\x63rop_bottom\x18\x04 \x01(\x02\x1a\x34\n\x16SourceClassLabelFilter\x12\x1a\n\x12source_class_names\x18\x04 \x03(\tB\x15\n\x13label_filter_paramsb\x06proto3')
)
_LABELFILTER_BBOXDIMENSIONSLABELFILTER = _descriptor.Descriptor(
name='BboxDimensionsLabelFilter',
full_name='LabelFilter.BboxDimensionsLabelFilter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='min_width', full_name='LabelFilter.BboxDimensionsLabelFilter.min_width', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_height', full_name='LabelFilter.BboxDimensionsLabelFilter.min_height', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_width', full_name='LabelFilter.BboxDimensionsLabelFilter.max_width', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_height', full_name='LabelFilter.BboxDimensionsLabelFilter.max_height', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=297,
serialized_end=402,
)
_LABELFILTER_BBOXCROPLABELFILTER = _descriptor.Descriptor(
name='BboxCropLabelFilter',
full_name='LabelFilter.BboxCropLabelFilter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='crop_left', full_name='LabelFilter.BboxCropLabelFilter.crop_left', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='crop_right', full_name='LabelFilter.BboxCropLabelFilter.crop_right', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='crop_top', full_name='LabelFilter.BboxCropLabelFilter.crop_top', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='crop_bottom', full_name='LabelFilter.BboxCropLabelFilter.crop_bottom', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=404,
serialized_end=503,
)
_LABELFILTER_SOURCECLASSLABELFILTER = _descriptor.Descriptor(
name='SourceClassLabelFilter',
full_name='LabelFilter.SourceClassLabelFilter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='source_class_names', full_name='LabelFilter.SourceClassLabelFilter.source_class_names', index=0,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=505,
serialized_end=557,
)
_LABELFILTER = _descriptor.Descriptor(
name='LabelFilter',
full_name='LabelFilter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='bbox_dimensions_label_filter', full_name='LabelFilter.bbox_dimensions_label_filter', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bbox_crop_label_filter', full_name='LabelFilter.bbox_crop_label_filter', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='source_class_label_filter', full_name='LabelFilter.source_class_label_filter', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_LABELFILTER_BBOXDIMENSIONSLABELFILTER, _LABELFILTER_BBOXCROPLABELFILTER, _LABELFILTER_SOURCECLASSLABELFILTER, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='label_filter_params', full_name='LabelFilter.label_filter_params',
index=0, containing_type=None, fields=[]),
],
serialized_start=60,
serialized_end=580,
)
_LABELFILTER_BBOXDIMENSIONSLABELFILTER.containing_type = _LABELFILTER
_LABELFILTER_BBOXCROPLABELFILTER.containing_type = _LABELFILTER
_LABELFILTER_SOURCECLASSLABELFILTER.containing_type = _LABELFILTER
_LABELFILTER.fields_by_name['bbox_dimensions_label_filter'].message_type = _LABELFILTER_BBOXDIMENSIONSLABELFILTER
_LABELFILTER.fields_by_name['bbox_crop_label_filter'].message_type = _LABELFILTER_BBOXCROPLABELFILTER
_LABELFILTER.fields_by_name['source_class_label_filter'].message_type = _LABELFILTER_SOURCECLASSLABELFILTER
_LABELFILTER.oneofs_by_name['label_filter_params'].fields.append(
_LABELFILTER.fields_by_name['bbox_dimensions_label_filter'])
_LABELFILTER.fields_by_name['bbox_dimensions_label_filter'].containing_oneof = _LABELFILTER.oneofs_by_name['label_filter_params']
_LABELFILTER.oneofs_by_name['label_filter_params'].fields.append(
_LABELFILTER.fields_by_name['bbox_crop_label_filter'])
_LABELFILTER.fields_by_name['bbox_crop_label_filter'].containing_oneof = _LABELFILTER.oneofs_by_name['label_filter_params']
_LABELFILTER.oneofs_by_name['label_filter_params'].fields.append(
_LABELFILTER.fields_by_name['source_class_label_filter'])
_LABELFILTER.fields_by_name['source_class_label_filter'].containing_oneof = _LABELFILTER.oneofs_by_name['label_filter_params']
DESCRIPTOR.message_types_by_name['LabelFilter'] = _LABELFILTER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
LabelFilter = _reflection.GeneratedProtocolMessageType('LabelFilter', (_message.Message,), dict(
BboxDimensionsLabelFilter = _reflection.GeneratedProtocolMessageType('BboxDimensionsLabelFilter', (_message.Message,), dict(
DESCRIPTOR = _LABELFILTER_BBOXDIMENSIONSLABELFILTER,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.label_filter_pb2'
# @@protoc_insertion_point(class_scope:LabelFilter.BboxDimensionsLabelFilter)
))
,
BboxCropLabelFilter = _reflection.GeneratedProtocolMessageType('BboxCropLabelFilter', (_message.Message,), dict(
DESCRIPTOR = _LABELFILTER_BBOXCROPLABELFILTER,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.label_filter_pb2'
# @@protoc_insertion_point(class_scope:LabelFilter.BboxCropLabelFilter)
))
,
SourceClassLabelFilter = _reflection.GeneratedProtocolMessageType('SourceClassLabelFilter', (_message.Message,), dict(
DESCRIPTOR = _LABELFILTER_SOURCECLASSLABELFILTER,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.label_filter_pb2'
# @@protoc_insertion_point(class_scope:LabelFilter.SourceClassLabelFilter)
))
,
DESCRIPTOR = _LABELFILTER,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.label_filter_pb2'
# @@protoc_insertion_point(class_scope:LabelFilter)
))
_sym_db.RegisterMessage(LabelFilter)
_sym_db.RegisterMessage(LabelFilter.BboxDimensionsLabelFilter)
_sym_db.RegisterMessage(LabelFilter.BboxCropLabelFilter)
_sym_db.RegisterMessage(LabelFilter.SourceClassLabelFilter)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/proto/label_filter_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/detectnet_v2/proto/kitti_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/detectnet_v2/proto/kitti_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n7nvidia_tao_tf1/cv/detectnet_v2/proto/kitti_config.proto\"\xa5\x02\n\x0bKITTIConfig\x12\x1b\n\x13root_directory_path\x18\x01 \x01(\t\x12\x16\n\x0eimage_dir_name\x18\x02 \x01(\t\x12\x16\n\x0elabel_dir_name\x18\x03 \x01(\t\x12\x18\n\x10point_clouds_dir\x18\x04 \x01(\t\x12\x18\n\x10\x63\x61librations_dir\x18\x05 \x01(\t\x12%\n\x1dkitti_sequence_to_frames_file\x18\x06 \x01(\t\x12\x17\n\x0fimage_extension\x18\x07 \x01(\t\x12\x16\n\x0enum_partitions\x18\x08 \x01(\r\x12\x12\n\nnum_shards\x18\t \x01(\r\x12\x16\n\x0epartition_mode\x18\n \x01(\t\x12\x11\n\tval_split\x18\x0b \x01(\x02\x62\x06proto3')
)
_KITTICONFIG = _descriptor.Descriptor(
name='KITTIConfig',
full_name='KITTIConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='root_directory_path', full_name='KITTIConfig.root_directory_path', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image_dir_name', full_name='KITTIConfig.image_dir_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='label_dir_name', full_name='KITTIConfig.label_dir_name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='point_clouds_dir', full_name='KITTIConfig.point_clouds_dir', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='calibrations_dir', full_name='KITTIConfig.calibrations_dir', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='kitti_sequence_to_frames_file', full_name='KITTIConfig.kitti_sequence_to_frames_file', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image_extension', full_name='KITTIConfig.image_extension', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_partitions', full_name='KITTIConfig.num_partitions', index=7,
number=8, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_shards', full_name='KITTIConfig.num_shards', index=8,
number=9, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='partition_mode', full_name='KITTIConfig.partition_mode', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='val_split', full_name='KITTIConfig.val_split', index=10,
number=11, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=60,
serialized_end=353,
)
DESCRIPTOR.message_types_by_name['KITTIConfig'] = _KITTICONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
KITTIConfig = _reflection.GeneratedProtocolMessageType('KITTIConfig', (_message.Message,), dict(
DESCRIPTOR = _KITTICONFIG,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.kitti_config_pb2'
# @@protoc_insertion_point(class_scope:KITTIConfig)
))
_sym_db.RegisterMessage(KITTIConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/proto/kitti_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/detectnet_v2/proto/evaluation_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/detectnet_v2/proto/evaluation_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n<nvidia_tao_tf1/cv/detectnet_v2/proto/evaluation_config.proto\"\xbe\x05\n\x10\x45valuationConfig\x12)\n!validation_period_during_training\x18\x01 \x01(\r\x12\x1e\n\x16\x66irst_validation_epoch\x18\x02 \x01(\r\x12%\n\x1d\x65\x61rly_stopping_patience_steps\x18\x06 \x01(\r\x12i\n&minimum_detection_ground_truth_overlap\x18\x03 \x03(\x0b\x32\x39.EvaluationConfig.MinimumDetectionGroundTruthOverlapEntry\x12I\n\x15\x65valuation_box_config\x18\x04 \x03(\x0b\x32*.EvaluationConfig.EvaluationBoxConfigEntry\x12\x39\n\x16\x61verage_precision_mode\x18\x05 \x01(\x0e\x32\x19.EvaluationConfig.AP_MODE\x1aI\n\'MinimumDetectionGroundTruthOverlapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x02:\x02\x38\x01\x1as\n\x13\x45valuationBoxConfig\x12\x16\n\x0eminimum_height\x18\x01 \x01(\x05\x12\x16\n\x0emaximum_height\x18\x02 \x01(\x05\x12\x15\n\rminimum_width\x18\x03 \x01(\x05\x12\x15\n\rmaximum_width\x18\x04 \x01(\x05\x1a\x61\n\x18\x45valuationBoxConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x34\n\x05value\x18\x02 \x01(\x0b\x32%.EvaluationConfig.EvaluationBoxConfig:\x02\x38\x01\"$\n\x07\x41P_MODE\x12\n\n\x06SAMPLE\x10\x00\x12\r\n\tINTEGRATE\x10\x01\x62\x06proto3')
)
_EVALUATIONCONFIG_AP_MODE = _descriptor.EnumDescriptor(
name='AP_MODE',
full_name='EvaluationConfig.AP_MODE',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='SAMPLE', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INTEGRATE', index=1, number=1,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=731,
serialized_end=767,
)
_sym_db.RegisterEnumDescriptor(_EVALUATIONCONFIG_AP_MODE)
_EVALUATIONCONFIG_MINIMUMDETECTIONGROUNDTRUTHOVERLAPENTRY = _descriptor.Descriptor(
name='MinimumDetectionGroundTruthOverlapEntry',
full_name='EvaluationConfig.MinimumDetectionGroundTruthOverlapEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='EvaluationConfig.MinimumDetectionGroundTruthOverlapEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='EvaluationConfig.MinimumDetectionGroundTruthOverlapEntry.value', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=440,
serialized_end=513,
)
_EVALUATIONCONFIG_EVALUATIONBOXCONFIG = _descriptor.Descriptor(
name='EvaluationBoxConfig',
full_name='EvaluationConfig.EvaluationBoxConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='minimum_height', full_name='EvaluationConfig.EvaluationBoxConfig.minimum_height', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='maximum_height', full_name='EvaluationConfig.EvaluationBoxConfig.maximum_height', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='minimum_width', full_name='EvaluationConfig.EvaluationBoxConfig.minimum_width', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='maximum_width', full_name='EvaluationConfig.EvaluationBoxConfig.maximum_width', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=515,
serialized_end=630,
)
_EVALUATIONCONFIG_EVALUATIONBOXCONFIGENTRY = _descriptor.Descriptor(
name='EvaluationBoxConfigEntry',
full_name='EvaluationConfig.EvaluationBoxConfigEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='EvaluationConfig.EvaluationBoxConfigEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='EvaluationConfig.EvaluationBoxConfigEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=632,
serialized_end=729,
)
_EVALUATIONCONFIG = _descriptor.Descriptor(
name='EvaluationConfig',
full_name='EvaluationConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='validation_period_during_training', full_name='EvaluationConfig.validation_period_during_training', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='first_validation_epoch', full_name='EvaluationConfig.first_validation_epoch', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='early_stopping_patience_steps', full_name='EvaluationConfig.early_stopping_patience_steps', index=2,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='minimum_detection_ground_truth_overlap', full_name='EvaluationConfig.minimum_detection_ground_truth_overlap', index=3,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='evaluation_box_config', full_name='EvaluationConfig.evaluation_box_config', index=4,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='average_precision_mode', full_name='EvaluationConfig.average_precision_mode', index=5,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_EVALUATIONCONFIG_MINIMUMDETECTIONGROUNDTRUTHOVERLAPENTRY, _EVALUATIONCONFIG_EVALUATIONBOXCONFIG, _EVALUATIONCONFIG_EVALUATIONBOXCONFIGENTRY, ],
enum_types=[
_EVALUATIONCONFIG_AP_MODE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=65,
serialized_end=767,
)
_EVALUATIONCONFIG_MINIMUMDETECTIONGROUNDTRUTHOVERLAPENTRY.containing_type = _EVALUATIONCONFIG
_EVALUATIONCONFIG_EVALUATIONBOXCONFIG.containing_type = _EVALUATIONCONFIG
_EVALUATIONCONFIG_EVALUATIONBOXCONFIGENTRY.fields_by_name['value'].message_type = _EVALUATIONCONFIG_EVALUATIONBOXCONFIG
_EVALUATIONCONFIG_EVALUATIONBOXCONFIGENTRY.containing_type = _EVALUATIONCONFIG
_EVALUATIONCONFIG.fields_by_name['minimum_detection_ground_truth_overlap'].message_type = _EVALUATIONCONFIG_MINIMUMDETECTIONGROUNDTRUTHOVERLAPENTRY
_EVALUATIONCONFIG.fields_by_name['evaluation_box_config'].message_type = _EVALUATIONCONFIG_EVALUATIONBOXCONFIGENTRY
_EVALUATIONCONFIG.fields_by_name['average_precision_mode'].enum_type = _EVALUATIONCONFIG_AP_MODE
_EVALUATIONCONFIG_AP_MODE.containing_type = _EVALUATIONCONFIG
DESCRIPTOR.message_types_by_name['EvaluationConfig'] = _EVALUATIONCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
EvaluationConfig = _reflection.GeneratedProtocolMessageType('EvaluationConfig', (_message.Message,), dict(
MinimumDetectionGroundTruthOverlapEntry = _reflection.GeneratedProtocolMessageType('MinimumDetectionGroundTruthOverlapEntry', (_message.Message,), dict(
DESCRIPTOR = _EVALUATIONCONFIG_MINIMUMDETECTIONGROUNDTRUTHOVERLAPENTRY,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.evaluation_config_pb2'
# @@protoc_insertion_point(class_scope:EvaluationConfig.MinimumDetectionGroundTruthOverlapEntry)
))
,
EvaluationBoxConfig = _reflection.GeneratedProtocolMessageType('EvaluationBoxConfig', (_message.Message,), dict(
DESCRIPTOR = _EVALUATIONCONFIG_EVALUATIONBOXCONFIG,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.evaluation_config_pb2'
# @@protoc_insertion_point(class_scope:EvaluationConfig.EvaluationBoxConfig)
))
,
EvaluationBoxConfigEntry = _reflection.GeneratedProtocolMessageType('EvaluationBoxConfigEntry', (_message.Message,), dict(
DESCRIPTOR = _EVALUATIONCONFIG_EVALUATIONBOXCONFIGENTRY,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.evaluation_config_pb2'
# @@protoc_insertion_point(class_scope:EvaluationConfig.EvaluationBoxConfigEntry)
))
,
DESCRIPTOR = _EVALUATIONCONFIG,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.evaluation_config_pb2'
# @@protoc_insertion_point(class_scope:EvaluationConfig)
))
_sym_db.RegisterMessage(EvaluationConfig)
_sym_db.RegisterMessage(EvaluationConfig.MinimumDetectionGroundTruthOverlapEntry)
_sym_db.RegisterMessage(EvaluationConfig.EvaluationBoxConfig)
_sym_db.RegisterMessage(EvaluationConfig.EvaluationBoxConfigEntry)
_EVALUATIONCONFIG_MINIMUMDETECTIONGROUNDTRUTHOVERLAPENTRY._options = None
_EVALUATIONCONFIG_EVALUATIONBOXCONFIGENTRY._options = None
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/proto/evaluation_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/detectnet_v2/proto/dataset_export_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_tao_tf1.cv.detectnet_v2.proto import kitti_config_pb2 as nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_kitti__config__pb2
from nvidia_tao_tf1.cv.detectnet_v2.proto import coco_config_pb2 as nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_coco__config__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/detectnet_v2/proto/dataset_export_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n@nvidia_tao_tf1/cv/detectnet_v2/proto/dataset_export_config.proto\x1a\x37nvidia_tao_tf1/cv/detectnet_v2/proto/kitti_config.proto\x1a\x36nvidia_tao_tf1/cv/detectnet_v2/proto/coco_config.proto\"\xec\x06\n\x13\x44\x61tasetExportConfig\x12\"\n\x0b\x63oco_config\x18\x01 \x01(\x0b\x32\x0b.COCOConfigH\x00\x12$\n\x0ckitti_config\x18\x02 \x01(\x0b\x32\x0c.KITTIConfigH\x00\x12I\n\x16sample_modifier_config\x18\x05 \x01(\x0b\x32).DatasetExportConfig.SampleModifierConfig\x12\x1c\n\x14image_directory_path\x18\x06 \x01(\t\x12J\n\x14target_class_mapping\x18\x07 \x03(\x0b\x32,.DatasetExportConfig.TargetClassMappingEntry\x1a\x83\x04\n\x14SampleModifierConfig\x12&\n\x1e\x66ilter_samples_containing_only\x18\x01 \x03(\t\x12\x1f\n\x17\x64ominant_target_classes\x18\x02 \x03(\t\x12r\n\x1eminimum_target_class_imbalance\x18\x03 \x03(\x0b\x32J.DatasetExportConfig.SampleModifierConfig.MinimumTargetClassImbalanceEntry\x12\x16\n\x0enum_duplicates\x18\x04 \x01(\r\x12\x1c\n\x14max_training_samples\x18\x05 \x01(\r\x12q\n\x1esource_to_target_class_mapping\x18\x06 \x03(\x0b\x32I.DatasetExportConfig.SampleModifierConfig.SourceToTargetClassMappingEntry\x1a\x42\n MinimumTargetClassImbalanceEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x02:\x02\x38\x01\x1a\x41\n\x1fSourceToTargetClassMappingEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x39\n\x17TargetClassMappingEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\x15\n\x13\x63onvert_config_typeb\x06proto3')
,
dependencies=[nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_kitti__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_coco__config__pb2.DESCRIPTOR,])
_DATASETEXPORTCONFIG_SAMPLEMODIFIERCONFIG_MINIMUMTARGETCLASSIMBALANCEENTRY = _descriptor.Descriptor(
name='MinimumTargetClassImbalanceEntry',
full_name='DatasetExportConfig.SampleModifierConfig.MinimumTargetClassImbalanceEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='DatasetExportConfig.SampleModifierConfig.MinimumTargetClassImbalanceEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='DatasetExportConfig.SampleModifierConfig.MinimumTargetClassImbalanceEntry.value', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=843,
serialized_end=909,
)
_DATASETEXPORTCONFIG_SAMPLEMODIFIERCONFIG_SOURCETOTARGETCLASSMAPPINGENTRY = _descriptor.Descriptor(
name='SourceToTargetClassMappingEntry',
full_name='DatasetExportConfig.SampleModifierConfig.SourceToTargetClassMappingEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='DatasetExportConfig.SampleModifierConfig.SourceToTargetClassMappingEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='DatasetExportConfig.SampleModifierConfig.SourceToTargetClassMappingEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=911,
serialized_end=976,
)
_DATASETEXPORTCONFIG_SAMPLEMODIFIERCONFIG = _descriptor.Descriptor(
name='SampleModifierConfig',
full_name='DatasetExportConfig.SampleModifierConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='filter_samples_containing_only', full_name='DatasetExportConfig.SampleModifierConfig.filter_samples_containing_only', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dominant_target_classes', full_name='DatasetExportConfig.SampleModifierConfig.dominant_target_classes', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='minimum_target_class_imbalance', full_name='DatasetExportConfig.SampleModifierConfig.minimum_target_class_imbalance', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_duplicates', full_name='DatasetExportConfig.SampleModifierConfig.num_duplicates', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_training_samples', full_name='DatasetExportConfig.SampleModifierConfig.max_training_samples', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='source_to_target_class_mapping', full_name='DatasetExportConfig.SampleModifierConfig.source_to_target_class_mapping', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_DATASETEXPORTCONFIG_SAMPLEMODIFIERCONFIG_MINIMUMTARGETCLASSIMBALANCEENTRY, _DATASETEXPORTCONFIG_SAMPLEMODIFIERCONFIG_SOURCETOTARGETCLASSMAPPINGENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=461,
serialized_end=976,
)
_DATASETEXPORTCONFIG_TARGETCLASSMAPPINGENTRY = _descriptor.Descriptor(
name='TargetClassMappingEntry',
full_name='DatasetExportConfig.TargetClassMappingEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='DatasetExportConfig.TargetClassMappingEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='DatasetExportConfig.TargetClassMappingEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=978,
serialized_end=1035,
)
_DATASETEXPORTCONFIG = _descriptor.Descriptor(
name='DatasetExportConfig',
full_name='DatasetExportConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='coco_config', full_name='DatasetExportConfig.coco_config', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='kitti_config', full_name='DatasetExportConfig.kitti_config', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sample_modifier_config', full_name='DatasetExportConfig.sample_modifier_config', index=2,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image_directory_path', full_name='DatasetExportConfig.image_directory_path', index=3,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='target_class_mapping', full_name='DatasetExportConfig.target_class_mapping', index=4,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_DATASETEXPORTCONFIG_SAMPLEMODIFIERCONFIG, _DATASETEXPORTCONFIG_TARGETCLASSMAPPINGENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='convert_config_type', full_name='DatasetExportConfig.convert_config_type',
index=0, containing_type=None, fields=[]),
],
serialized_start=182,
serialized_end=1058,
)
_DATASETEXPORTCONFIG_SAMPLEMODIFIERCONFIG_MINIMUMTARGETCLASSIMBALANCEENTRY.containing_type = _DATASETEXPORTCONFIG_SAMPLEMODIFIERCONFIG
_DATASETEXPORTCONFIG_SAMPLEMODIFIERCONFIG_SOURCETOTARGETCLASSMAPPINGENTRY.containing_type = _DATASETEXPORTCONFIG_SAMPLEMODIFIERCONFIG
_DATASETEXPORTCONFIG_SAMPLEMODIFIERCONFIG.fields_by_name['minimum_target_class_imbalance'].message_type = _DATASETEXPORTCONFIG_SAMPLEMODIFIERCONFIG_MINIMUMTARGETCLASSIMBALANCEENTRY
_DATASETEXPORTCONFIG_SAMPLEMODIFIERCONFIG.fields_by_name['source_to_target_class_mapping'].message_type = _DATASETEXPORTCONFIG_SAMPLEMODIFIERCONFIG_SOURCETOTARGETCLASSMAPPINGENTRY
_DATASETEXPORTCONFIG_SAMPLEMODIFIERCONFIG.containing_type = _DATASETEXPORTCONFIG
_DATASETEXPORTCONFIG_TARGETCLASSMAPPINGENTRY.containing_type = _DATASETEXPORTCONFIG
_DATASETEXPORTCONFIG.fields_by_name['coco_config'].message_type = nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_coco__config__pb2._COCOCONFIG
_DATASETEXPORTCONFIG.fields_by_name['kitti_config'].message_type = nvidia__tao__tf1_dot_cv_dot_detectnet__v2_dot_proto_dot_kitti__config__pb2._KITTICONFIG
_DATASETEXPORTCONFIG.fields_by_name['sample_modifier_config'].message_type = _DATASETEXPORTCONFIG_SAMPLEMODIFIERCONFIG
_DATASETEXPORTCONFIG.fields_by_name['target_class_mapping'].message_type = _DATASETEXPORTCONFIG_TARGETCLASSMAPPINGENTRY
_DATASETEXPORTCONFIG.oneofs_by_name['convert_config_type'].fields.append(
_DATASETEXPORTCONFIG.fields_by_name['coco_config'])
_DATASETEXPORTCONFIG.fields_by_name['coco_config'].containing_oneof = _DATASETEXPORTCONFIG.oneofs_by_name['convert_config_type']
_DATASETEXPORTCONFIG.oneofs_by_name['convert_config_type'].fields.append(
_DATASETEXPORTCONFIG.fields_by_name['kitti_config'])
_DATASETEXPORTCONFIG.fields_by_name['kitti_config'].containing_oneof = _DATASETEXPORTCONFIG.oneofs_by_name['convert_config_type']
DESCRIPTOR.message_types_by_name['DatasetExportConfig'] = _DATASETEXPORTCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DatasetExportConfig = _reflection.GeneratedProtocolMessageType('DatasetExportConfig', (_message.Message,), dict(
SampleModifierConfig = _reflection.GeneratedProtocolMessageType('SampleModifierConfig', (_message.Message,), dict(
MinimumTargetClassImbalanceEntry = _reflection.GeneratedProtocolMessageType('MinimumTargetClassImbalanceEntry', (_message.Message,), dict(
DESCRIPTOR = _DATASETEXPORTCONFIG_SAMPLEMODIFIERCONFIG_MINIMUMTARGETCLASSIMBALANCEENTRY,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.dataset_export_config_pb2'
# @@protoc_insertion_point(class_scope:DatasetExportConfig.SampleModifierConfig.MinimumTargetClassImbalanceEntry)
))
,
SourceToTargetClassMappingEntry = _reflection.GeneratedProtocolMessageType('SourceToTargetClassMappingEntry', (_message.Message,), dict(
DESCRIPTOR = _DATASETEXPORTCONFIG_SAMPLEMODIFIERCONFIG_SOURCETOTARGETCLASSMAPPINGENTRY,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.dataset_export_config_pb2'
# @@protoc_insertion_point(class_scope:DatasetExportConfig.SampleModifierConfig.SourceToTargetClassMappingEntry)
))
,
DESCRIPTOR = _DATASETEXPORTCONFIG_SAMPLEMODIFIERCONFIG,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.dataset_export_config_pb2'
# @@protoc_insertion_point(class_scope:DatasetExportConfig.SampleModifierConfig)
))
,
TargetClassMappingEntry = _reflection.GeneratedProtocolMessageType('TargetClassMappingEntry', (_message.Message,), dict(
DESCRIPTOR = _DATASETEXPORTCONFIG_TARGETCLASSMAPPINGENTRY,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.dataset_export_config_pb2'
# @@protoc_insertion_point(class_scope:DatasetExportConfig.TargetClassMappingEntry)
))
,
DESCRIPTOR = _DATASETEXPORTCONFIG,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.dataset_export_config_pb2'
# @@protoc_insertion_point(class_scope:DatasetExportConfig)
))
_sym_db.RegisterMessage(DatasetExportConfig)
_sym_db.RegisterMessage(DatasetExportConfig.SampleModifierConfig)
_sym_db.RegisterMessage(DatasetExportConfig.SampleModifierConfig.MinimumTargetClassImbalanceEntry)
_sym_db.RegisterMessage(DatasetExportConfig.SampleModifierConfig.SourceToTargetClassMappingEntry)
_sym_db.RegisterMessage(DatasetExportConfig.TargetClassMappingEntry)
_DATASETEXPORTCONFIG_SAMPLEMODIFIERCONFIG_MINIMUMTARGETCLASSIMBALANCEENTRY._options = None
_DATASETEXPORTCONFIG_SAMPLEMODIFIERCONFIG_SOURCETOTARGETCLASSMAPPINGENTRY._options = None
_DATASETEXPORTCONFIG_TARGETCLASSMAPPINGENTRY._options = None
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/proto/dataset_export_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/detectnet_v2/proto/inferencer_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/detectnet_v2/proto/inferencer_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n<nvidia_tao_tf1/cv/detectnet_v2/proto/inferencer_config.proto\"`\n\x10\x43\x61libratorConfig\x12\x19\n\x11\x63\x61libration_cache\x18\x01 \x01(\t\x12\x1e\n\x16\x63\x61libration_tensorfile\x18\x02 \x01(\t\x12\x11\n\tn_batches\x18\x03 \x01(\x05\"\x1a\n\tTLTConfig\x12\r\n\x05model\x18\x01 \x01(\t\"\xf1\x02\n\x0eTensorRTConfig\x12&\n\x06parser\x18\x01 \x01(\x0e\x32\x16.TensorRTConfig.Parser\x12\x12\n\ncaffemodel\x18\x02 \x01(\t\x12\x10\n\x08prototxt\x18\x03 \x01(\t\x12\x11\n\tuff_model\x18\x04 \x01(\t\x12\x12\n\netlt_model\x18\x05 \x01(\t\x12:\n\x11\x62\x61\x63kend_data_type\x18\x06 \x01(\x0e\x32\x1f.TensorRTConfig.BackendDataType\x12\x13\n\x0bsave_engine\x18\x07 \x01(\x08\x12\x12\n\ntrt_engine\x18\x08 \x01(\t\x12,\n\x11\x63\x61librator_config\x18\t \x01(\x0b\x32\x11.CalibratorConfig\"&\n\x06Parser\x12\x08\n\x04\x45TLT\x10\x00\x12\x07\n\x03UFF\x10\x01\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x02\"/\n\x0f\x42\x61\x63kendDataType\x12\x08\n\x04\x46P32\x10\x00\x12\x08\n\x04\x46P16\x10\x01\x12\x08\n\x04INT8\x10\x02\"\xb2\x02\n\x10InferencerConfig\x12 \n\ntlt_config\x18\x01 \x01(\x0b\x32\n.TLTConfigH\x00\x12*\n\x0ftensorrt_config\x18\x02 \x01(\x0b\x32\x0f.TensorRTConfigH\x00\x12\x13\n\x0binput_nodes\x18\x03 \x03(\t\x12\x14\n\x0coutput_nodes\x18\x04 \x03(\t\x12\x12\n\nbatch_size\x18\x05 \x01(\x05\x12\x14\n\x0cimage_height\x18\x06 \x01(\x05\x12\x13\n\x0bimage_width\x18\x07 \x01(\x05\x12\x16\n\x0eimage_channels\x18\x08 \x01(\x05\x12\x11\n\tgpu_index\x18\t \x01(\x05\x12\x16\n\x0etarget_classes\x18\n \x03(\t\x12\x0e\n\x06stride\x18\x0b \x01(\x05\x42\x13\n\x11model_config_typeb\x06proto3')
)
_TENSORRTCONFIG_PARSER = _descriptor.EnumDescriptor(
name='Parser',
full_name='TensorRTConfig.Parser',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='ETLT', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UFF', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CAFFE', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=473,
serialized_end=511,
)
_sym_db.RegisterEnumDescriptor(_TENSORRTCONFIG_PARSER)
_TENSORRTCONFIG_BACKENDDATATYPE = _descriptor.EnumDescriptor(
name='BackendDataType',
full_name='TensorRTConfig.BackendDataType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='FP32', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FP16', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INT8', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=513,
serialized_end=560,
)
_sym_db.RegisterEnumDescriptor(_TENSORRTCONFIG_BACKENDDATATYPE)
_CALIBRATORCONFIG = _descriptor.Descriptor(
name='CalibratorConfig',
full_name='CalibratorConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='calibration_cache', full_name='CalibratorConfig.calibration_cache', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='calibration_tensorfile', full_name='CalibratorConfig.calibration_tensorfile', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='n_batches', full_name='CalibratorConfig.n_batches', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=64,
serialized_end=160,
)
_TLTCONFIG = _descriptor.Descriptor(
name='TLTConfig',
full_name='TLTConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='model', full_name='TLTConfig.model', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=162,
serialized_end=188,
)
_TENSORRTCONFIG = _descriptor.Descriptor(
name='TensorRTConfig',
full_name='TensorRTConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='parser', full_name='TensorRTConfig.parser', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='caffemodel', full_name='TensorRTConfig.caffemodel', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='prototxt', full_name='TensorRTConfig.prototxt', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='uff_model', full_name='TensorRTConfig.uff_model', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='etlt_model', full_name='TensorRTConfig.etlt_model', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='backend_data_type', full_name='TensorRTConfig.backend_data_type', index=5,
number=6, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='save_engine', full_name='TensorRTConfig.save_engine', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='trt_engine', full_name='TensorRTConfig.trt_engine', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='calibrator_config', full_name='TensorRTConfig.calibrator_config', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_TENSORRTCONFIG_PARSER,
_TENSORRTCONFIG_BACKENDDATATYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=191,
serialized_end=560,
)
_INFERENCERCONFIG = _descriptor.Descriptor(
name='InferencerConfig',
full_name='InferencerConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='tlt_config', full_name='InferencerConfig.tlt_config', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tensorrt_config', full_name='InferencerConfig.tensorrt_config', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='input_nodes', full_name='InferencerConfig.input_nodes', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_nodes', full_name='InferencerConfig.output_nodes', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batch_size', full_name='InferencerConfig.batch_size', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image_height', full_name='InferencerConfig.image_height', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image_width', full_name='InferencerConfig.image_width', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image_channels', full_name='InferencerConfig.image_channels', index=7,
number=8, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gpu_index', full_name='InferencerConfig.gpu_index', index=8,
number=9, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='target_classes', full_name='InferencerConfig.target_classes', index=9,
number=10, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stride', full_name='InferencerConfig.stride', index=10,
number=11, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='model_config_type', full_name='InferencerConfig.model_config_type',
index=0, containing_type=None, fields=[]),
],
serialized_start=563,
serialized_end=869,
)
_TENSORRTCONFIG.fields_by_name['parser'].enum_type = _TENSORRTCONFIG_PARSER
_TENSORRTCONFIG.fields_by_name['backend_data_type'].enum_type = _TENSORRTCONFIG_BACKENDDATATYPE
_TENSORRTCONFIG.fields_by_name['calibrator_config'].message_type = _CALIBRATORCONFIG
_TENSORRTCONFIG_PARSER.containing_type = _TENSORRTCONFIG
_TENSORRTCONFIG_BACKENDDATATYPE.containing_type = _TENSORRTCONFIG
_INFERENCERCONFIG.fields_by_name['tlt_config'].message_type = _TLTCONFIG
_INFERENCERCONFIG.fields_by_name['tensorrt_config'].message_type = _TENSORRTCONFIG
_INFERENCERCONFIG.oneofs_by_name['model_config_type'].fields.append(
_INFERENCERCONFIG.fields_by_name['tlt_config'])
_INFERENCERCONFIG.fields_by_name['tlt_config'].containing_oneof = _INFERENCERCONFIG.oneofs_by_name['model_config_type']
_INFERENCERCONFIG.oneofs_by_name['model_config_type'].fields.append(
_INFERENCERCONFIG.fields_by_name['tensorrt_config'])
_INFERENCERCONFIG.fields_by_name['tensorrt_config'].containing_oneof = _INFERENCERCONFIG.oneofs_by_name['model_config_type']
DESCRIPTOR.message_types_by_name['CalibratorConfig'] = _CALIBRATORCONFIG
DESCRIPTOR.message_types_by_name['TLTConfig'] = _TLTCONFIG
DESCRIPTOR.message_types_by_name['TensorRTConfig'] = _TENSORRTCONFIG
DESCRIPTOR.message_types_by_name['InferencerConfig'] = _INFERENCERCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CalibratorConfig = _reflection.GeneratedProtocolMessageType('CalibratorConfig', (_message.Message,), dict(
DESCRIPTOR = _CALIBRATORCONFIG,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.inferencer_config_pb2'
# @@protoc_insertion_point(class_scope:CalibratorConfig)
))
_sym_db.RegisterMessage(CalibratorConfig)
TLTConfig = _reflection.GeneratedProtocolMessageType('TLTConfig', (_message.Message,), dict(
DESCRIPTOR = _TLTCONFIG,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.inferencer_config_pb2'
# @@protoc_insertion_point(class_scope:TLTConfig)
))
_sym_db.RegisterMessage(TLTConfig)
TensorRTConfig = _reflection.GeneratedProtocolMessageType('TensorRTConfig', (_message.Message,), dict(
DESCRIPTOR = _TENSORRTCONFIG,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.inferencer_config_pb2'
# @@protoc_insertion_point(class_scope:TensorRTConfig)
))
_sym_db.RegisterMessage(TensorRTConfig)
InferencerConfig = _reflection.GeneratedProtocolMessageType('InferencerConfig', (_message.Message,), dict(
DESCRIPTOR = _INFERENCERCONFIG,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.inferencer_config_pb2'
# @@protoc_insertion_point(class_scope:InferencerConfig)
))
_sym_db.RegisterMessage(InferencerConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/proto/inferencer_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/detectnet_v2/proto/cost_function_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/detectnet_v2/proto/cost_function_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n?nvidia_tao_tf1/cv/detectnet_v2/proto/cost_function_config.proto\"\x88\x03\n\x12\x43ostFunctionConfig\x12\x37\n\x0etarget_classes\x18\x01 \x03(\x0b\x32\x1f.CostFunctionConfig.TargetClass\x12\x1c\n\x14\x65nable_autoweighting\x18\x02 \x01(\x08\x12\x1c\n\x14max_objective_weight\x18\x03 \x01(\x02\x12\x1c\n\x14min_objective_weight\x18\x04 \x01(\x02\x1a\xde\x01\n\x0bTargetClass\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x63lass_weight\x18\x02 \x01(\x02\x12\"\n\x1a\x63overage_foreground_weight\x18\x03 \x01(\x02\x12=\n\nobjectives\x18\x04 \x03(\x0b\x32).CostFunctionConfig.TargetClass.Objective\x1aH\n\tObjective\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x16\n\x0einitial_weight\x18\x02 \x01(\x02\x12\x15\n\rweight_target\x18\x03 \x01(\x02\x62\x06proto3')
)
_COSTFUNCTIONCONFIG_TARGETCLASS_OBJECTIVE = _descriptor.Descriptor(
name='Objective',
full_name='CostFunctionConfig.TargetClass.Objective',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='CostFunctionConfig.TargetClass.Objective.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='initial_weight', full_name='CostFunctionConfig.TargetClass.Objective.initial_weight', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='weight_target', full_name='CostFunctionConfig.TargetClass.Objective.weight_target', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=388,
serialized_end=460,
)
_COSTFUNCTIONCONFIG_TARGETCLASS = _descriptor.Descriptor(
name='TargetClass',
full_name='CostFunctionConfig.TargetClass',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='CostFunctionConfig.TargetClass.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='class_weight', full_name='CostFunctionConfig.TargetClass.class_weight', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='coverage_foreground_weight', full_name='CostFunctionConfig.TargetClass.coverage_foreground_weight', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='objectives', full_name='CostFunctionConfig.TargetClass.objectives', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_COSTFUNCTIONCONFIG_TARGETCLASS_OBJECTIVE, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=238,
serialized_end=460,
)
_COSTFUNCTIONCONFIG = _descriptor.Descriptor(
name='CostFunctionConfig',
full_name='CostFunctionConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='target_classes', full_name='CostFunctionConfig.target_classes', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='enable_autoweighting', full_name='CostFunctionConfig.enable_autoweighting', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_objective_weight', full_name='CostFunctionConfig.max_objective_weight', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_objective_weight', full_name='CostFunctionConfig.min_objective_weight', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_COSTFUNCTIONCONFIG_TARGETCLASS, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=68,
serialized_end=460,
)
_COSTFUNCTIONCONFIG_TARGETCLASS_OBJECTIVE.containing_type = _COSTFUNCTIONCONFIG_TARGETCLASS
_COSTFUNCTIONCONFIG_TARGETCLASS.fields_by_name['objectives'].message_type = _COSTFUNCTIONCONFIG_TARGETCLASS_OBJECTIVE
_COSTFUNCTIONCONFIG_TARGETCLASS.containing_type = _COSTFUNCTIONCONFIG
_COSTFUNCTIONCONFIG.fields_by_name['target_classes'].message_type = _COSTFUNCTIONCONFIG_TARGETCLASS
DESCRIPTOR.message_types_by_name['CostFunctionConfig'] = _COSTFUNCTIONCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CostFunctionConfig = _reflection.GeneratedProtocolMessageType('CostFunctionConfig', (_message.Message,), dict(
TargetClass = _reflection.GeneratedProtocolMessageType('TargetClass', (_message.Message,), dict(
Objective = _reflection.GeneratedProtocolMessageType('Objective', (_message.Message,), dict(
DESCRIPTOR = _COSTFUNCTIONCONFIG_TARGETCLASS_OBJECTIVE,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.cost_function_config_pb2'
# @@protoc_insertion_point(class_scope:CostFunctionConfig.TargetClass.Objective)
))
,
DESCRIPTOR = _COSTFUNCTIONCONFIG_TARGETCLASS,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.cost_function_config_pb2'
# @@protoc_insertion_point(class_scope:CostFunctionConfig.TargetClass)
))
,
DESCRIPTOR = _COSTFUNCTIONCONFIG,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.cost_function_config_pb2'
# @@protoc_insertion_point(class_scope:CostFunctionConfig)
))
_sym_db.RegisterMessage(CostFunctionConfig)
_sym_db.RegisterMessage(CostFunctionConfig.TargetClass)
_sym_db.RegisterMessage(CostFunctionConfig.TargetClass.Objective)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/proto/cost_function_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/detectnet_v2/proto/model_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/detectnet_v2/proto/model_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n7nvidia_tao_tf1/cv/detectnet_v2/proto/model_config.proto\"\xd9\x07\n\x0bModelConfig\x12\x1d\n\x15pretrained_model_file\x18\x01 \x01(\t\x12 \n\x18\x66reeze_pretrained_layers\x18\x02 \x01(\x08\x12\'\n\x1f\x61llow_loaded_model_modification\x18\x03 \x01(\x08\x12\x12\n\nnum_layers\x18\x04 \x01(\x05\x12\x13\n\x0buse_pooling\x18\x05 \x01(\x08\x12\x16\n\x0euse_batch_norm\x18\x06 \x01(\x08\x12\x14\n\x0c\x64ropout_rate\x18\x07 \x01(\x02\x12+\n\nactivation\x18\x08 \x01(\x0b\x32\x17.ModelConfig.Activation\x12\x30\n\robjective_set\x18\t \x01(\x0b\x32\x19.ModelConfig.ObjectiveSet\x12:\n\x12training_precision\x18\n \x01(\x0b\x32\x1e.ModelConfig.TrainingPrecision\x12\x11\n\tfreeze_bn\x18\x0b \x01(\x08\x12\x15\n\rfreeze_blocks\x18\x0c \x03(\x02\x12\x0c\n\x04\x61rch\x18\r \x01(\t\x12\x12\n\nload_graph\x18\x0e \x01(\x08\x12\x17\n\x0f\x61ll_projections\x18\x0f \x01(\x08\x1a\xb4\x01\n\nActivation\x12\x17\n\x0f\x61\x63tivation_type\x18\x01 \x01(\t\x12P\n\x15\x61\x63tivation_parameters\x18\x02 \x03(\x0b\x32\x31.ModelConfig.Activation.ActivationParametersEntry\x1a;\n\x19\x41\x63tivationParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x02:\x02\x38\x01\x1a=\n\rBboxObjective\x12\r\n\x05input\x18\x01 \x01(\t\x12\r\n\x05scale\x18\x02 \x01(\x02\x12\x0e\n\x06offset\x18\x03 \x01(\x02\x1a\x1d\n\x0c\x43ovObjective\x12\r\n\x05input\x18\x01 \x01(\t\x1a`\n\x0cObjectiveSet\x12(\n\x04\x62\x62ox\x18\x01 \x01(\x0b\x32\x1a.ModelConfig.BboxObjective\x12&\n\x03\x63ov\x18\x02 \x01(\x0b\x32\x19.ModelConfig.CovObjective\x1a\x91\x01\n\x11TrainingPrecision\x12\x44\n\x0e\x62\x61\x63kend_floatx\x18\x01 \x01(\x0e\x32,.ModelConfig.TrainingPrecision.BackendFloatx\"6\n\rBackendFloatx\x12\x0b\n\x07\x46LOAT32\x10\x00\x12\x0b\n\x07\x46LOAT16\x10\x01\x12\x0b\n\x07INVALID\x10\x02\x62\x06proto3')
)
_MODELCONFIG_TRAININGPRECISION_BACKENDFLOATX = _descriptor.EnumDescriptor(
name='BackendFloatx',
full_name='ModelConfig.TrainingPrecision.BackendFloatx',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='FLOAT32', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FLOAT16', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INVALID', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=991,
serialized_end=1045,
)
_sym_db.RegisterEnumDescriptor(_MODELCONFIG_TRAININGPRECISION_BACKENDFLOATX)
_MODELCONFIG_ACTIVATION_ACTIVATIONPARAMETERSENTRY = _descriptor.Descriptor(
name='ActivationParametersEntry',
full_name='ModelConfig.Activation.ActivationParametersEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='ModelConfig.Activation.ActivationParametersEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='ModelConfig.Activation.ActivationParametersEntry.value', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=646,
serialized_end=705,
)
_MODELCONFIG_ACTIVATION = _descriptor.Descriptor(
name='Activation',
full_name='ModelConfig.Activation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='activation_type', full_name='ModelConfig.Activation.activation_type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='activation_parameters', full_name='ModelConfig.Activation.activation_parameters', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_MODELCONFIG_ACTIVATION_ACTIVATIONPARAMETERSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=525,
serialized_end=705,
)
_MODELCONFIG_BBOXOBJECTIVE = _descriptor.Descriptor(
name='BboxObjective',
full_name='ModelConfig.BboxObjective',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='input', full_name='ModelConfig.BboxObjective.input', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='scale', full_name='ModelConfig.BboxObjective.scale', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='offset', full_name='ModelConfig.BboxObjective.offset', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=707,
serialized_end=768,
)
_MODELCONFIG_COVOBJECTIVE = _descriptor.Descriptor(
name='CovObjective',
full_name='ModelConfig.CovObjective',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='input', full_name='ModelConfig.CovObjective.input', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=770,
serialized_end=799,
)
_MODELCONFIG_OBJECTIVESET = _descriptor.Descriptor(
name='ObjectiveSet',
full_name='ModelConfig.ObjectiveSet',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='bbox', full_name='ModelConfig.ObjectiveSet.bbox', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cov', full_name='ModelConfig.ObjectiveSet.cov', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=801,
serialized_end=897,
)
_MODELCONFIG_TRAININGPRECISION = _descriptor.Descriptor(
name='TrainingPrecision',
full_name='ModelConfig.TrainingPrecision',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='backend_floatx', full_name='ModelConfig.TrainingPrecision.backend_floatx', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_MODELCONFIG_TRAININGPRECISION_BACKENDFLOATX,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=900,
serialized_end=1045,
)
_MODELCONFIG = _descriptor.Descriptor(
name='ModelConfig',
full_name='ModelConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pretrained_model_file', full_name='ModelConfig.pretrained_model_file', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='freeze_pretrained_layers', full_name='ModelConfig.freeze_pretrained_layers', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='allow_loaded_model_modification', full_name='ModelConfig.allow_loaded_model_modification', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_layers', full_name='ModelConfig.num_layers', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='use_pooling', full_name='ModelConfig.use_pooling', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='use_batch_norm', full_name='ModelConfig.use_batch_norm', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dropout_rate', full_name='ModelConfig.dropout_rate', index=6,
number=7, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='activation', full_name='ModelConfig.activation', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='objective_set', full_name='ModelConfig.objective_set', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='training_precision', full_name='ModelConfig.training_precision', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='freeze_bn', full_name='ModelConfig.freeze_bn', index=10,
number=11, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='freeze_blocks', full_name='ModelConfig.freeze_blocks', index=11,
number=12, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='arch', full_name='ModelConfig.arch', index=12,
number=13, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='load_graph', full_name='ModelConfig.load_graph', index=13,
number=14, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='all_projections', full_name='ModelConfig.all_projections', index=14,
number=15, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_MODELCONFIG_ACTIVATION, _MODELCONFIG_BBOXOBJECTIVE, _MODELCONFIG_COVOBJECTIVE, _MODELCONFIG_OBJECTIVESET, _MODELCONFIG_TRAININGPRECISION, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=60,
serialized_end=1045,
)
_MODELCONFIG_ACTIVATION_ACTIVATIONPARAMETERSENTRY.containing_type = _MODELCONFIG_ACTIVATION
_MODELCONFIG_ACTIVATION.fields_by_name['activation_parameters'].message_type = _MODELCONFIG_ACTIVATION_ACTIVATIONPARAMETERSENTRY
_MODELCONFIG_ACTIVATION.containing_type = _MODELCONFIG
_MODELCONFIG_BBOXOBJECTIVE.containing_type = _MODELCONFIG
_MODELCONFIG_COVOBJECTIVE.containing_type = _MODELCONFIG
_MODELCONFIG_OBJECTIVESET.fields_by_name['bbox'].message_type = _MODELCONFIG_BBOXOBJECTIVE
_MODELCONFIG_OBJECTIVESET.fields_by_name['cov'].message_type = _MODELCONFIG_COVOBJECTIVE
_MODELCONFIG_OBJECTIVESET.containing_type = _MODELCONFIG
_MODELCONFIG_TRAININGPRECISION.fields_by_name['backend_floatx'].enum_type = _MODELCONFIG_TRAININGPRECISION_BACKENDFLOATX
_MODELCONFIG_TRAININGPRECISION.containing_type = _MODELCONFIG
_MODELCONFIG_TRAININGPRECISION_BACKENDFLOATX.containing_type = _MODELCONFIG_TRAININGPRECISION
_MODELCONFIG.fields_by_name['activation'].message_type = _MODELCONFIG_ACTIVATION
_MODELCONFIG.fields_by_name['objective_set'].message_type = _MODELCONFIG_OBJECTIVESET
_MODELCONFIG.fields_by_name['training_precision'].message_type = _MODELCONFIG_TRAININGPRECISION
DESCRIPTOR.message_types_by_name['ModelConfig'] = _MODELCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ModelConfig = _reflection.GeneratedProtocolMessageType('ModelConfig', (_message.Message,), dict(
Activation = _reflection.GeneratedProtocolMessageType('Activation', (_message.Message,), dict(
ActivationParametersEntry = _reflection.GeneratedProtocolMessageType('ActivationParametersEntry', (_message.Message,), dict(
DESCRIPTOR = _MODELCONFIG_ACTIVATION_ACTIVATIONPARAMETERSENTRY,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.model_config_pb2'
# @@protoc_insertion_point(class_scope:ModelConfig.Activation.ActivationParametersEntry)
))
,
DESCRIPTOR = _MODELCONFIG_ACTIVATION,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.model_config_pb2'
# @@protoc_insertion_point(class_scope:ModelConfig.Activation)
))
,
BboxObjective = _reflection.GeneratedProtocolMessageType('BboxObjective', (_message.Message,), dict(
DESCRIPTOR = _MODELCONFIG_BBOXOBJECTIVE,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.model_config_pb2'
# @@protoc_insertion_point(class_scope:ModelConfig.BboxObjective)
))
,
CovObjective = _reflection.GeneratedProtocolMessageType('CovObjective', (_message.Message,), dict(
DESCRIPTOR = _MODELCONFIG_COVOBJECTIVE,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.model_config_pb2'
# @@protoc_insertion_point(class_scope:ModelConfig.CovObjective)
))
,
ObjectiveSet = _reflection.GeneratedProtocolMessageType('ObjectiveSet', (_message.Message,), dict(
DESCRIPTOR = _MODELCONFIG_OBJECTIVESET,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.model_config_pb2'
# @@protoc_insertion_point(class_scope:ModelConfig.ObjectiveSet)
))
,
TrainingPrecision = _reflection.GeneratedProtocolMessageType('TrainingPrecision', (_message.Message,), dict(
DESCRIPTOR = _MODELCONFIG_TRAININGPRECISION,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.model_config_pb2'
# @@protoc_insertion_point(class_scope:ModelConfig.TrainingPrecision)
))
,
DESCRIPTOR = _MODELCONFIG,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.model_config_pb2'
# @@protoc_insertion_point(class_scope:ModelConfig)
))
_sym_db.RegisterMessage(ModelConfig)
_sym_db.RegisterMessage(ModelConfig.Activation)
_sym_db.RegisterMessage(ModelConfig.Activation.ActivationParametersEntry)
_sym_db.RegisterMessage(ModelConfig.BboxObjective)
_sym_db.RegisterMessage(ModelConfig.CovObjective)
_sym_db.RegisterMessage(ModelConfig.ObjectiveSet)
_sym_db.RegisterMessage(ModelConfig.TrainingPrecision)
_MODELCONFIG_ACTIVATION_ACTIVATIONPARAMETERSENTRY._options = None
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/proto/model_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/detectnet_v2/proto/dataset_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/detectnet_v2/proto/dataset_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n9nvidia_tao_tf1/cv/detectnet_v2/proto/dataset_config.proto\"Y\n\nDataSource\x12\x16\n\x0etfrecords_path\x18\x01 \x01(\t\x12\x1c\n\x14image_directory_path\x18\x02 \x01(\t\x12\x15\n\rsource_weight\x18\x03 \x01(\x02\"\x99\x04\n\rDatasetConfig\x12!\n\x0c\x64\x61ta_sources\x18\x01 \x03(\x0b\x32\x0b.DataSource\x12\x17\n\x0fimage_extension\x18\x02 \x01(\t\x12\x44\n\x14target_class_mapping\x18\x03 \x03(\x0b\x32&.DatasetConfig.TargetClassMappingEntry\x12\x19\n\x0fvalidation_fold\x18\x04 \x01(\rH\x00\x12-\n\x16validation_data_source\x18\x05 \x01(\x0b\x32\x0b.DataSourceH\x00\x12\x37\n\x0f\x64\x61taloader_mode\x18\x06 \x01(\x0e\x32\x1e.DatasetConfig.DATALOADER_MODE\x12\x33\n\rsampling_mode\x18\x07 \x01(\x0e\x32\x1c.DatasetConfig.SAMPLING_MODE\x1a\x39\n\x17TargetClassMappingEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\";\n\x0f\x44\x41TALOADER_MODE\x12\x0f\n\x0bMULTISOURCE\x10\x00\x12\n\n\x06LEGACY\x10\x01\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x02\"@\n\rSAMPLING_MODE\x12\x10\n\x0cUSER_DEFINED\x10\x00\x12\x10\n\x0cPROPORTIONAL\x10\x01\x12\x0b\n\x07UNIFORM\x10\x02\x42\x14\n\x12\x64\x61taset_split_typeb\x06proto3')
)
_DATASETCONFIG_DATALOADER_MODE = _descriptor.EnumDescriptor(
name='DATALOADER_MODE',
full_name='DatasetConfig.DATALOADER_MODE',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='MULTISOURCE', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LEGACY', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DEFAULT', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=543,
serialized_end=602,
)
_sym_db.RegisterEnumDescriptor(_DATASETCONFIG_DATALOADER_MODE)
_DATASETCONFIG_SAMPLING_MODE = _descriptor.EnumDescriptor(
name='SAMPLING_MODE',
full_name='DatasetConfig.SAMPLING_MODE',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='USER_DEFINED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PROPORTIONAL', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNIFORM', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=604,
serialized_end=668,
)
_sym_db.RegisterEnumDescriptor(_DATASETCONFIG_SAMPLING_MODE)
_DATASOURCE = _descriptor.Descriptor(
name='DataSource',
full_name='DataSource',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='tfrecords_path', full_name='DataSource.tfrecords_path', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image_directory_path', full_name='DataSource.image_directory_path', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='source_weight', full_name='DataSource.source_weight', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=61,
serialized_end=150,
)
_DATASETCONFIG_TARGETCLASSMAPPINGENTRY = _descriptor.Descriptor(
name='TargetClassMappingEntry',
full_name='DatasetConfig.TargetClassMappingEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='DatasetConfig.TargetClassMappingEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='DatasetConfig.TargetClassMappingEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=484,
serialized_end=541,
)
_DATASETCONFIG = _descriptor.Descriptor(
name='DatasetConfig',
full_name='DatasetConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='data_sources', full_name='DatasetConfig.data_sources', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image_extension', full_name='DatasetConfig.image_extension', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='target_class_mapping', full_name='DatasetConfig.target_class_mapping', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='validation_fold', full_name='DatasetConfig.validation_fold', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='validation_data_source', full_name='DatasetConfig.validation_data_source', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dataloader_mode', full_name='DatasetConfig.dataloader_mode', index=5,
number=6, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sampling_mode', full_name='DatasetConfig.sampling_mode', index=6,
number=7, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_DATASETCONFIG_TARGETCLASSMAPPINGENTRY, ],
enum_types=[
_DATASETCONFIG_DATALOADER_MODE,
_DATASETCONFIG_SAMPLING_MODE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='dataset_split_type', full_name='DatasetConfig.dataset_split_type',
index=0, containing_type=None, fields=[]),
],
serialized_start=153,
serialized_end=690,
)
_DATASETCONFIG_TARGETCLASSMAPPINGENTRY.containing_type = _DATASETCONFIG
_DATASETCONFIG.fields_by_name['data_sources'].message_type = _DATASOURCE
_DATASETCONFIG.fields_by_name['target_class_mapping'].message_type = _DATASETCONFIG_TARGETCLASSMAPPINGENTRY
_DATASETCONFIG.fields_by_name['validation_data_source'].message_type = _DATASOURCE
_DATASETCONFIG.fields_by_name['dataloader_mode'].enum_type = _DATASETCONFIG_DATALOADER_MODE
_DATASETCONFIG.fields_by_name['sampling_mode'].enum_type = _DATASETCONFIG_SAMPLING_MODE
_DATASETCONFIG_DATALOADER_MODE.containing_type = _DATASETCONFIG
_DATASETCONFIG_SAMPLING_MODE.containing_type = _DATASETCONFIG
_DATASETCONFIG.oneofs_by_name['dataset_split_type'].fields.append(
_DATASETCONFIG.fields_by_name['validation_fold'])
_DATASETCONFIG.fields_by_name['validation_fold'].containing_oneof = _DATASETCONFIG.oneofs_by_name['dataset_split_type']
_DATASETCONFIG.oneofs_by_name['dataset_split_type'].fields.append(
_DATASETCONFIG.fields_by_name['validation_data_source'])
_DATASETCONFIG.fields_by_name['validation_data_source'].containing_oneof = _DATASETCONFIG.oneofs_by_name['dataset_split_type']
DESCRIPTOR.message_types_by_name['DataSource'] = _DATASOURCE
DESCRIPTOR.message_types_by_name['DatasetConfig'] = _DATASETCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DataSource = _reflection.GeneratedProtocolMessageType('DataSource', (_message.Message,), dict(
DESCRIPTOR = _DATASOURCE,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.dataset_config_pb2'
# @@protoc_insertion_point(class_scope:DataSource)
))
_sym_db.RegisterMessage(DataSource)
DatasetConfig = _reflection.GeneratedProtocolMessageType('DatasetConfig', (_message.Message,), dict(
TargetClassMappingEntry = _reflection.GeneratedProtocolMessageType('TargetClassMappingEntry', (_message.Message,), dict(
DESCRIPTOR = _DATASETCONFIG_TARGETCLASSMAPPINGENTRY,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.dataset_config_pb2'
# @@protoc_insertion_point(class_scope:DatasetConfig.TargetClassMappingEntry)
))
,
DESCRIPTOR = _DATASETCONFIG,
__module__ = 'nvidia_tao_tf1.cv.detectnet_v2.proto.dataset_config_pb2'
# @@protoc_insertion_point(class_scope:DatasetConfig)
))
_sym_db.RegisterMessage(DatasetConfig)
_sym_db.RegisterMessage(DatasetConfig.TargetClassMappingEntry)
_DATASETCONFIG_TARGETCLASSMAPPINGENTRY._options = None
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/proto/dataset_config_pb2.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cost functions used by gridbox."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
EPSILON = 1e-05
GT_BBOX_AREA_CRITERION = 0.001
def weighted_binary_cross_entropy_cost(target, pred, weight, loss_mask):
"""Elementwise weighted BCE cost."""
BCE = -(target * tf.log(pred + EPSILON) + (1.0 - target) * tf.log(1.0 - pred + EPSILON))
weight_vec_for_one = weight * tf.ones_like(target)
weight_vec_for_zero = (1.0 - weight) * tf.ones_like(target)
weights_tensor = tf.where(target > 0.5, weight_vec_for_one, weight_vec_for_zero)
return tf.multiply(loss_mask, weights_tensor * BCE)
def weighted_L1_cost(target, pred, weight, loss_mask):
"""Elementwise weighted L1 cost."""
weight = tf.ones_like(target) * weight
dist = tf.abs(pred - target)
return tf.multiply(loss_mask, tf.multiply(weight, dist))
def weighted_circular_L1_cost(target, pred, weight, loss_mask):
"""Element-wise circular L1 loss.
<pred> and <target> are expected to produce values in ]-1; 1[ range, as well as represent
functions with a period of 2.0, for this loss to make any sense.
Under those two assumptions, the loss l is defined as:
l = min(2 - |target| - |pred|, |target - pred|)
Args:
target (tf.Tensor): Ground truth tensor.
pred (tf.Tensor): Prediction tensor.
weight (tf.Tensor): Element-wise weight by which to multiply the cost.
loss_mask (tf.Tensor): Element-wise loss mask by which to multiply the cost.
Returns:
circular_L1_cost (tf.Tensor): Element-wise loss representing l in the above formula.
"""
weight = tf.ones_like(target) * weight
abs_pred = tf.abs(pred)
abs_target = tf.abs(target)
circular_L1_cost = tf.minimum(2.0 - abs_pred - abs_target, tf.abs(pred - target))
# Apply weight and loss_mask.
circular_L1_cost = tf.multiply(loss_mask, tf.multiply(weight, circular_L1_cost))
return circular_L1_cost
def weighted_GIOU_cost(abs_gt, abs_pred, weight, loss_mask):
"""Element-wise GIOU cost without zero-area bboxes of ground truth.
Args:
abs_gt (tf.Tensor): Ground truth tensors of absolute coordinates in input image space.
abs_pred (tf.Tensor): Prediction tensors of absolute coordinates in input image space.
weight (tf.Tensor): Element-wise weight by which to multiply the cost.
loss_mask (tf.Tensor): Element-wise loss mask by which to multiply the cost.
Returns:
giou_cost_with_removed_zero_gt (tf.Tensor): Element-wise GIOU cost of shape [B, 4, H, W].
"""
abs_pred = tf.unstack(abs_pred, axis=1)
abs_gt = tf.unstack(abs_gt, axis=1)
coords_left_pred, coords_top_pred, coords_right_pred, coords_bottom_pred = abs_pred
coords_left_gt, coords_top_gt, coords_right_gt, coords_bottom_gt = abs_gt
# Calculate element-wise bbox IOU.
x1 = tf.maximum(coords_left_pred, coords_left_gt)
y1 = tf.maximum(coords_top_pred, coords_top_gt)
x2 = tf.minimum(coords_right_pred, coords_right_gt)
y2 = tf.minimum(coords_bottom_pred, coords_bottom_gt)
w = tf.maximum(x2 - x1, 0.0)
h = tf.maximum(y2 - y1, 0.0)
intersection = tf.multiply(w, h)
area_pred = tf.multiply(coords_right_pred - coords_left_pred,
coords_bottom_pred - coords_top_pred)
area_gt = tf.multiply(coords_right_gt - coords_left_gt,
coords_bottom_gt - coords_top_gt)
union = area_pred + area_gt - intersection
iou = tf.divide(intersection, union + EPSILON)
# Calculate element-wise GIOU-cost.
x1c = tf.minimum(coords_left_pred, coords_left_gt)
y1c = tf.minimum(coords_top_pred, coords_top_gt)
x2c = tf.maximum(coords_right_pred, coords_right_gt)
y2c = tf.maximum(coords_bottom_pred, coords_bottom_gt)
area_all = tf.multiply(x2c - x1c, y2c - y1c)
giou = iou - tf.divide(area_all - union, area_all + EPSILON)
giou_cost = 1.0 - giou
# Remove losses related with zero-area ground truth bboxes.
zero_tmp = tf.zeros_like(area_gt)
giou_cost_with_removed_zero_gt = \
tf.where(tf.greater(tf.abs(area_gt), GT_BBOX_AREA_CRITERION),
giou_cost, zero_tmp)
# Expand GIOU_cost to the certain shape [B, 4, H, W].
giou_cost_with_removed_zero_gt = tf.expand_dims(
giou_cost_with_removed_zero_gt, 1)
giou_cost_with_removed_zero_gt = tf.tile(
giou_cost_with_removed_zero_gt, [1, 4, 1, 1])
# Multiply weights on GIOU_cost.
giou_cost_with_removed_zero_gt = tf.multiply(
giou_cost_with_removed_zero_gt, weight)
giou_cost_with_removed_zero_gt = tf.multiply(
giou_cost_with_removed_zero_gt, loss_mask)
return giou_cost_with_removed_zero_gt
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/cost_function/cost_functions.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cost auto weight hook."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nvidia_tao_tf1.core import distribution
from nvidia_tao_tf1.cv.detectnet_v2.cost_function.cost_function_parameters import (
build_target_class_list
)
from nvidia_tao_tf1.cv.detectnet_v2.visualization.visualizer import \
DetectNetTBVisualizer as Visualizer
def build_cost_auto_weight_hook(cost_function_config, steps_per_epoch):
"""Build a CostAutoWeightHook based on proto.
Arguments:
cost_function_config: CostFunctionConfig.
steps_per_epoch (int): Number of steps per epoch.
Returns:
A CostAutoWeightHook instance.
"""
if steps_per_epoch <= 0:
raise ValueError("steps_per_epoch must be > 0")
if not cost_function_config.target_classes :
raise ValueError("CostFunctionConfig should have at least one class")
for target_class in cost_function_config.target_classes:
if not target_class.objectives :
raise ValueError("CostFunctionConfig.target_classes should have at least one "
"objective")
return CostAutoWeightHook(build_target_class_list(cost_function_config),
cost_function_config.enable_autoweighting,
cost_function_config.min_objective_weight,
cost_function_config.max_objective_weight,
steps_per_epoch)
class CostAutoWeightHook(tf.estimator.SessionRunHook):
"""Class for computing objective auto weighting and total cost."""
def __init__(self, target_classes, enable_autoweighting,
min_objective_weight, max_objective_weight, steps_per_epoch):
"""__init__ method.
Compute normalized initial values for class and objective weights based on parameters.
Create objective auto weighting variables and update ops. Add update ops to lists for
execution on epoch begin/end callbacks.
Args:
target_classes: A list of TargetClass objects.
enable_autoweighting (bool): Whether auto weighting is enabled.
min_objective_weight (float): Minimum objective cost weight is clamped to this value.
max_objective_weight (float): Maximum objective cost weight is clamped to this value.
steps_per_epoch (int): Number of steps per epoch.
"""
self.target_classes = target_classes
self.enable_autoweighting = enable_autoweighting
self.min_objective_weight = min_objective_weight
self.max_objective_weight = max_objective_weight
self.steps_per_epoch = steps_per_epoch
self.steps_counter = 0
self.debug = False
# Initialize lists of callback update ops.
self.on_epoch_begin_updates = []
self.on_epoch_end_updates = []
# Stored values for testing purposes.
self._before_run_values = []
self._after_run_values = []
self._init_target_class_weights()
self._init_objective_weights()
def _init_target_class_weights(self):
# Compute sum of class weight initial values for normalization.
target_class_weight_sum = 0.
for target_class in self.target_classes:
target_class_weight_sum += target_class.class_weight
# Initialize class weights (currently constant).
self.target_class_weights = {}
for target_class in self.target_classes:
# Normalize initial value.
init_val = target_class.class_weight / target_class_weight_sum
self.target_class_weights[target_class.name] = tf.constant(init_val, dtype=tf.float32)
def _init_objective_weights(self):
# Initialize objective weighting.
self.cost_sums = {}
self.objective_weights = {}
for target_class in self.target_classes:
# Compute objective weight sum for normalization.
objective_weight_sum = 0.
for objective in target_class.objectives:
objective_weight_sum += objective.initial_weight
self.cost_sums[target_class.name] = {}
self.objective_weights[target_class.name] = {}
for objective in target_class.objectives:
# Create cost sum variables.
with tf.variable_scope('cost_sums'):
init = tf.constant(0., dtype=tf.float32)
name = '%s-%s' % (target_class.name, objective.name)
var = tf.get_variable(name, initializer=init, trainable=False)
self.cost_sums[target_class.name][objective.name] = var
# Reset the value at the beginning of every epoch.
self.on_epoch_begin_updates.append(tf.assign(ref=var, value=init))
# Create objective weight variables.
with tf.variable_scope('objective_weights'):
# Normalize initial value.
if objective_weight_sum:
init_val = objective.initial_weight / objective_weight_sum
else:
init_val = 0.0
init = tf.constant(init_val, dtype=tf.float32)
name = '%s-%s' % (target_class.name, objective.name)
var = tf.get_variable(name, initializer=init, trainable=False)
self.objective_weights[target_class.name][objective.name] = var
if self.enable_autoweighting:
# Construct objective weight update op.
# Step 1: compute objective weights and their sum based on objective cost
# means over the last epoch.
weights = {}
sum_weights = 0.
for objective in target_class.objectives:
# Note: cost sums are actually sums of minibatch means, so in principle we
# should divide them by the number of minibatches per epoch, but since we're
# normalizing the weights, division by the number of minibatches cancels out.
# Note 2: for multi-GPU, we need to average over all GPUs in order to keep
# the weights in sync. Each process will compute the same updates, so
# there's no need to broadcast the results. Allreduce computes a sum of
# means so we should divide the result by the number of GPUs, but again
# the division cancels out due to normalization.
obj_mean = self.cost_sums[target_class.name][objective.name]
obj_mean = distribution.get_distributor().allreduce(obj_mean)
# Compute 1/obj_mean. If obj_mean is 0, result is 0.
oo_obj_mean = tf.where(tf.equal(obj_mean, 0.), obj_mean, 1. / obj_mean)
weights[objective.name] = objective.weight_target * oo_obj_mean
sum_weights += weights[objective.name]
# Step 2: compute weight normalizer.
# Note: in case sum_weights is 0, we will retain the old weights so the value of
# nrm doesn't matter.
nrm = tf.where(tf.equal(sum_weights, 0.), 0., 1. / sum_weights)
# Step 3: compute normalized objective weights and schedule weight update op.
for objective in target_class.objectives:
w = weights[objective.name] * nrm
w = tf.maximum(w, self.min_objective_weight)
w = tf.minimum(w, self.max_objective_weight)
# If weight sum is 0, setting objective weight does not make sense ->
# retain old value.
oldw = self.objective_weights[target_class.name][objective.name]
w = tf.where(tf.equal(sum_weights, 0.), oldw, w)
# Schedule objective weight update op to be executed at the end of each epoch.
op = tf.assign(self.objective_weights[target_class.name][objective.name], w)
if self.debug:
op = tf.Print(op, [op], "objective weight gpu %d - %s - %s = " %
(distribution.get_distributor().local_rank(),
target_class.name, objective.name))
self.on_epoch_end_updates.append(op)
def after_create_session(self, session, coord):
"""Called when new TensorFlow session is created.
Args:
session: A TensorFlow Session that has been created.
coord: A Coordinator object which keeps track of all threads.
"""
self.session = session
def before_run(self, run_context):
"""Called before each call to run().
Run epoch begin updates before the first step of each epoch.
Args:
run_context: A SessionRunContext object.
"""
if self.steps_counter == 0:
# Store value for testing purposes.
self._before_run_values = self.session.run(self.on_epoch_begin_updates)
def after_run(self, run_context, run_values):
"""Called after each call to run().
Run epoch end updates after the last step of each epoch.
Args:
run_context: A SessionRunContext object.
run_values: A SessionRunValues object.
"""
self.steps_counter += 1
if self.steps_counter == self.steps_per_epoch:
self.steps_counter = 0
# Store value for testing purposes.
self._after_run_values = self.session.run(self.on_epoch_end_updates)
def cost_combiner_func(self, component_costs):
"""Cost function.
Args:
component_costs: Per target class per objective cost means over a minibatch.
Returns:
Total minibatch cost.
"""
# Target_classes in component_costs must be present in cost_function_parameters.
# Cost_function_parameters must not have additional target_classes.
assert {target_class.name for target_class in self.target_classes} ==\
set(component_costs)
# Compute a weighted sum of component costs.
total_cost = 0.0
costs = {}
for target_class in self.target_classes:
# Objectives in component_costs must be present in cost_function_parameters.
# Cost_function_parameters must not have additional objectives.
assert {objective.name for objective in target_class.objectives} ==\
set(component_costs[target_class.name])
costs[target_class.name] = {}
for objective in target_class.objectives:
# Average cost over minibatch and spatial dimensions.
mean_cost = component_costs[target_class.name][objective.name]
# Accumulate per class per objective cost, and total_cost.
# Control dependency needed since total_cost doesn't depend on the cost sum
# variables and thus they wouldn't get updated otherwise.
op = tf.assign_add(self.cost_sums[target_class.name][objective.name], mean_cost)
with tf.control_dependencies([op]):
cost = mean_cost * self.target_class_weights[target_class.name] *\
self.objective_weights[target_class.name][objective.name]
costs[target_class.name][objective.name] = cost
total_cost += cost
# Compute and visualize percentage of how much each component contributes to the total cost.
if Visualizer.enabled:
for target_class in self.target_classes:
for objective in target_class.objectives:
percentage = 100. * costs[target_class.name][objective.name] / total_cost
tf.summary.scalar('cost_percentage_%s_%s' % (target_class.name, objective.name),
percentage)
tf.summary.scalar('cost_autoweight_%s_%s' % (target_class.name, objective.name),
self.objective_weights[target_class.name][objective.name])
return total_cost
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/cost_function/cost_auto_weight_hook.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""Defines functions and classes handling gridbox cost functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/cost_function/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cost function config parser."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class Objective(object):
"""Objective parameters."""
def __init__(self, name, initial_weight, weight_target):
"""Constructor.
Args:
name (str): Name of the objective.
initial_weight (float): Initial weight that will be assigned to this objective's cost.
weight_target (float): Target weight that will be assigned to this objective's cost.
Raises:
ValueError: On invalid input args.
"""
if name is None or name == "":
raise ValueError("cost_function_parameters.Objective: name must be set.")
if initial_weight < 0.0:
raise ValueError("cost_function_parameters.Objective: initial_weight must be >= 0.")
if weight_target < 0.0:
raise ValueError("cost_function_parameters.Objective: weight_target must be >= 0.")
self.name = name
self.initial_weight = initial_weight
self.weight_target = weight_target
class TargetClass(object):
"""Target class parameters."""
def __init__(self, name, class_weight, coverage_foreground_weight, objectives):
"""Constructor.
Args:
name (str): Name of the target class.
class_weight (float): Weight assigned to this target class's cost (all objectives
combined).
coverage_foreground_weight (float): Relative weight associated with the cost of cells
where there is a foreground instance (i.e. the presence of what this TargetClass
represents). Value should be in the range ]0., 1.[.
objectives (list): Each item is a cost_function_parameters.Objective instance which
contains the cost configuration options for this target class's objectives.
Raises:
ValueError: On invalid input args.
"""
if name is None or name == "":
raise ValueError("cost_function_parameters.TargetClass: name must be set.")
if class_weight <= 0.0:
raise ValueError("cost_function_parameters.TargetClass: class_weight must be > 0.")
if coverage_foreground_weight <= 0.0 or coverage_foreground_weight >= 1.0:
raise ValueError("cost_function_parameters.TargetClass: coverage_foreground_weight "
"must be in ]0., 1.[.")
# Check that the sum of all objectives' weights for this target class is positive.
initial_weight_sum = sum([objective.initial_weight for objective in objectives])
weight_target_sum = sum([objective.weight_target for objective in objectives])
if initial_weight_sum <= 0.0:
raise ValueError("cost_function_parameters.objectives: Sum of initial_weight values "
"must be > 0.")
if weight_target_sum <= 0.0:
raise ValueError("cost_function_parameters.objectives: Sum of target_weight values "
"must be > 0.")
self.name = name
self.class_weight = class_weight
self.coverage_foreground_weight = coverage_foreground_weight
self.objectives = objectives
def build_target_class_list(cost_function_config):
"""Build a list of TargetClasses based on proto.
Arguments:
cost_function_config: CostFunctionConfig.
Returns:
A list of TargetClass instances.
"""
target_classes = []
for target_class in cost_function_config.target_classes:
objectives = []
for objective in target_class.objectives:
objectives.append(Objective(objective.name,
objective.initial_weight,
objective.weight_target))
target_classes.append(TargetClass(target_class.name,
target_class.class_weight,
target_class.coverage_foreground_weight,
objectives))
return target_classes
def get_target_class_names(cost_function_config):
"""Return a list of target class names.
Args:
cost_function_config (cost_function_pb2.CostFunctionConfig): proto message.
Returns:
List of target class names (str).
"""
return [target_class.name for target_class in cost_function_config.target_classes]
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/cost_function/cost_function_parameters.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test cost functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pytest
from six.moves import range
import tensorflow as tf
from nvidia_tao_tf1.cv.detectnet_v2.cost_function.cost_auto_weight_hook import (
build_cost_auto_weight_hook,
CostAutoWeightHook
)
from nvidia_tao_tf1.cv.detectnet_v2.cost_function.cost_function_parameters import (
build_target_class_list,
get_target_class_names,
Objective,
TargetClass
)
from nvidia_tao_tf1.cv.detectnet_v2.model.utilities import get_class_predictions
from nvidia_tao_tf1.cv.detectnet_v2.objectives.objective_set import build_objective_set
from nvidia_tao_tf1.cv.detectnet_v2.proto.cost_function_config_pb2 import CostFunctionConfig
from nvidia_tao_tf1.cv.detectnet_v2.proto.model_config_pb2 import ModelConfig
from nvidia_tao_tf1.cv.detectnet_v2.proto.visualizer_config_pb2 import VisualizerConfig
from nvidia_tao_tf1.cv.detectnet_v2.visualization.visualizer import \
DetectNetTBVisualizer as Visualizer
verbose = False
def _weighted_BCE_cost(target, pred, weight):
"""Elementwise weighted BCE cost."""
EPSILON = 1e-05
BCE = -(target * np.log(pred + EPSILON) + (1.0 - target) * np.log(1.0 - pred + EPSILON))
weight_vec_for_one = weight * np.ones_like(target)
weight_vec_for_zero = (1.0 - weight) * np.ones_like(target)
weights_tensor = np.where(target > 0.5, weight_vec_for_one, weight_vec_for_zero)
return weights_tensor * BCE
def _weighted_L1_cost(target, pred, weight):
"""Weighted L1 cost."""
weight = np.ones_like(target) * weight
dist = np.abs(pred - target)
return np.multiply(weight, dist)
class TestCostFunction:
"""Test cost functions."""
def _compute_expected_total_cost(self, cost_function_config, target_dict, predictions_dict,
target_class_weights, objective_weights, cost_means):
# Compute expected total cost value.
total_cost = 0.0
for target_class in cost_function_config.target_classes:
# Targets.
cov_target = target_dict[target_class.name]['cov']
cov_norm_target = target_dict[target_class.name]['cov_norm']
bbox_target = target_dict[target_class.name]['bbox']
# Predictions.
cov_pred = predictions_dict[target_class.name]['cov']
bbox_pred = predictions_dict[target_class.name]['bbox']
# Compute costs.
cov_cost = np.mean(_weighted_BCE_cost(cov_target, cov_pred,
target_class.coverage_foreground_weight))
bbox_cost = np.mean(_weighted_L1_cost(bbox_target, bbox_pred, cov_norm_target))
# Sum per target, per objective costs.
cost_means[target_class.name]['cov'] += cov_cost
cost_means[target_class.name]['bbox'] += bbox_cost
# Accumulate total cost.
cov_cost *= objective_weights[target_class.name]['cov']
bbox_cost *= objective_weights[target_class.name]['bbox']
total_cost += target_class_weights[target_class.name] * (cov_cost + bbox_cost)
return total_cost
def _compute_expected_updated_weights(self, cost_function_config, current_weights,
cost_means):
updated_weights = []
for target_class in cost_function_config.target_classes:
# Compute objective weights and their sum.
weights = {}
sum_weights = 0.
for objective in target_class.objectives:
o = cost_means[target_class.name][objective.name]
if o != 0.:
o = objective.weight_target / o
weights[objective.name] = o
sum_weights += o
# Compute weight normalizer.
nrm = 1.0 / sum_weights
# Update objective weights.
for objective in target_class.objectives:
# If coverage cost is 0, setting objective weight does not make sense ->
# retain old value.
if sum_weights == 0.:
w = current_weights[target_class.name][objective.name]
else:
w = max(weights[objective.name] * nrm,
cost_function_config.min_objective_weight)
w = min(weights[objective.name] * nrm,
cost_function_config.max_objective_weight)
updated_weights.append(w)
return updated_weights
def _get_model_config(self):
"""Get a valid model config."""
model_config = ModelConfig()
model_config.num_layers = 18
model_config.objective_set.bbox.scale = 35.
model_config.objective_set.bbox.offset = 0.5
model_config.objective_set.cov.MergeFrom(ModelConfig.CovObjective())
return model_config
def _get_cost_function_config(self):
"""Get a valid cost function config."""
cost_function_config = CostFunctionConfig()
cost_function_config.enable_autoweighting = True
cost_function_config.max_objective_weight = 0.9999
cost_function_config.min_objective_weight = 0.0001
return cost_function_config
def test_cost_function(self):
"""Test cost function."""
config = VisualizerConfig()
config.enabled = False
Visualizer.build_from_config(config)
model_config = self._get_model_config()
cost_function_config = self._get_cost_function_config()
# Add 'cat' class.
cat = cost_function_config.target_classes.add()
cat.name = 'cat'
cat.class_weight = 1.
cat.coverage_foreground_weight = .9
# Add 'cov' objective for 'cat'.
cat_cov = cat.objectives.add()
cat_cov.name = 'cov'
cat_cov.initial_weight = 4.
cat_cov.weight_target = 2.
# Add 'bbox' objective for 'cat'.
cat_bbox = cat.objectives.add()
cat_bbox.name = 'bbox'
cat_bbox.initial_weight = 1.
cat_bbox.weight_target = 1.
# Add 'dog' class.
dog = cost_function_config.target_classes.add()
dog.name = 'dog'
dog.class_weight = 3.0
dog.coverage_foreground_weight = 0.5
# Add 'cov' objective for 'dog'.
dog_cov = dog.objectives.add()
dog_cov.name = 'cov'
dog_cov.initial_weight = 1.
dog_cov.weight_target = 1.
# Add 'bbox' objective for 'dog'.
dog_bbox = dog.objectives.add()
dog_bbox.name = 'bbox'
dog_bbox.initial_weight = 3.
dog_bbox.weight_target = 3.
# Expected initial weight values after normalization.
expected_target_class_weights = {'cat': 0.25, 'dog': 0.75}
expected_objective_weights = {'cat': {'cov': 0.8, 'bbox': 0.2},
'dog': {'cov': 0.25, 'bbox': 0.75}}
batch_size = 1
num_classes = len(cost_function_config.target_classes)
grid_height = 4
grid_width = 4
num_epochs = 5
num_batches_per_epoch = 2
cost_auto_weight_hook = build_cost_auto_weight_hook(cost_function_config,
num_batches_per_epoch)
input_height, input_width = 16, 16
output_height, output_width = 1, 1
objective_set = build_objective_set(model_config.objective_set, output_height, output_width,
input_height, input_width)
target_classes = build_target_class_list(cost_function_config)
# Construct ground truth tensor.
target_dict = {}
for target_class in cost_function_config.target_classes:
target_dict[target_class.name] = {}
for objective in objective_set.objectives:
target = np.zeros((batch_size, objective.num_channels, grid_height, grid_width),
dtype=np.float32)
target[:, :, 1:3, 1:3] = 1.0
target_dict[target_class.name][objective.name] = target
# Construct prediction tensors.
cov_pred = np.zeros((batch_size, num_classes, 1, grid_height, grid_width),
dtype=np.float32)
bbox_pred = np.zeros((batch_size, num_classes, 4, grid_height, grid_width),
dtype=np.float32)
with tf.Session() as session:
cost_auto_weight_hook.after_create_session(session, coord=None)
session.run(tf.global_variables_initializer())
# Emulate a few epochs of training.
for epoch in range(num_epochs):
# Begin epoch. This clears auto weighting related variables.
cost_auto_weight_hook.before_run(run_context=None)
cost_sums = cost_auto_weight_hook._before_run_values
# Check that cost_sums are all zeroes.
np.testing.assert_equal(cost_sums, [0., 0., 0., 0.])
# Emulate a few minibatches.
expected_cost_means = {}
for target_class in cost_function_config.target_classes:
expected_cost_means[target_class.name] = {'cov': 0., 'bbox': 0.}
for batch in range(num_batches_per_epoch):
# Emulate network learning: Predictions close in on targets on every iteration.
v = float(epoch*num_batches_per_epoch+batch) / \
float(num_epochs*num_batches_per_epoch-1)
cov_pred[:, :, :, 1:3, 1:3] = v
bbox_pred[:, :, :, 1:3, 1:3] = 11.0 - 10.0 * v
predictions_dict = get_class_predictions(
{'cov': cov_pred, 'bbox': bbox_pred},
[t.name for t in cost_function_config.target_classes])
# Compute minibatch cost. Accumulates objective costs.
def cost_func(y_true, y_pred):
component_costs = objective_set.compute_component_costs(y_true, y_pred,
target_classes)
return cost_auto_weight_hook.cost_combiner_func(component_costs)
total_cost = session.run(cost_func(target_dict, predictions_dict))
# Check that total cost matches expectation.
expected_total_cost = \
self._compute_expected_total_cost(cost_function_config,
target_dict, predictions_dict,
expected_target_class_weights,
expected_objective_weights,
expected_cost_means)
if verbose:
print("epoch %s batch %s total_cost: computed %s expected %s" %
(epoch, batch, total_cost, expected_total_cost))
np.testing.assert_almost_equal(total_cost, expected_total_cost)
# End batch. This computes updated objective weights at the end of an epoch.
cost_auto_weight_hook.after_run(run_context=None, run_values=None)
updated_weights = cost_auto_weight_hook._after_run_values
# Check that updated objective weights match expectation.
expected_updated_weights = \
self._compute_expected_updated_weights(cost_function_config,
expected_objective_weights,
expected_cost_means)
if verbose:
print("epoch %s updated weights: computed %s expected %s" %
(epoch, updated_weights, expected_updated_weights))
np.testing.assert_almost_equal(updated_weights, expected_updated_weights)
# Update weights for the next epoch
expected_objective_weights = {'cat': {'cov': expected_updated_weights[0],
'bbox': expected_updated_weights[1]},
'dog': {'cov': expected_updated_weights[2],
'bbox': expected_updated_weights[3]}}
class TestCostFunctionParameters:
"""Test cost function parameters."""
def test_build_target_class_list(self):
"""Test cost function config parsing."""
config = CostFunctionConfig()
# Default values should generate an empty list.
ret = build_target_class_list(config)
assert ret == []
# Add a class and an objective, but forget to set objective's weight_target.
c = config.target_classes.add()
c.name = "cat"
c.class_weight = 0.5
c.coverage_foreground_weight = 0.75
o = c.objectives.add()
o.name = "mouse"
o.initial_weight = 0.25
with pytest.raises(ValueError):
# o.weight_target is not set, so it will default to 0, which is an illegal value.
build_target_class_list(config)
# This config should pass.
o.weight_target = 1.0
ret = build_target_class_list(config)
assert len(ret) == 1
assert ret[0].name == "cat"
assert ret[0].class_weight == 0.5
assert ret[0].coverage_foreground_weight == 0.75
assert len(ret[0].objectives) == 1
assert ret[0].objectives[0].initial_weight == 0.25
assert ret[0].objectives[0].weight_target == 1.0
# Add a second objective but forget to set its name.
o2 = c.objectives.add()
o2.initial_weight = 0.25
o2.weight_target = 1.0
with pytest.raises(ValueError):
# o2.name is not set, so it will default to 0, which is an illegal value.
build_target_class_list(config)
# Fix the problem and check that the result is ok.
o2.name = "bird"
ret = build_target_class_list(config)
assert len(ret[0].objectives) == 2
def test_get_target_class_names(self):
"""Test cost function config parsing."""
config = CostFunctionConfig()
ret = get_target_class_names(config)
assert not ret
c = config.target_classes.add()
c.name = "cat"
ret = get_target_class_names(config)
assert len(ret) == 1
assert ret[0] == "cat"
c2 = config.target_classes.add()
c2.name = "dog"
ret = get_target_class_names(config)
assert len(ret) == 2
assert ret[0] == "cat"
assert ret[1] == "dog"
def test_build_cost_auto_weight_hook():
"""Test CostAutoWeightHook creation."""
config = CostFunctionConfig()
# Default values should not pass.
with pytest.raises(ValueError):
build_cost_auto_weight_hook(config, 1)
# Add a class.
c = config.target_classes.add()
c.name = "cat"
c.class_weight = 0.5
c.coverage_foreground_weight = 0.75
# No objectives should not pass.
with pytest.raises(ValueError):
build_cost_auto_weight_hook(config, 1)
# Add an objective.
o = c.objectives.add()
o.name = "mouse"
o.initial_weight = 0.25
o.weight_target = 1.0
# A valid config should pass.
ret = build_cost_auto_weight_hook(config, 1)
assert isinstance(ret, CostAutoWeightHook)
with pytest.raises(ValueError):
# steps_per_epoch == 0 should fail.
build_cost_auto_weight_hook(config, 0)
class TestObjective(object):
"""Test cost_function_parameters.Objective."""
@pytest.mark.parametrize(
"name,initial_weight,weight_target",
[("level_3", -1.1, 0.5),
("level_3", 0.5, -1.1),
("", 0.5, 0.6),
(None, 0.1, 0.2)])
def test_objective_value_error(self, name, initial_weight, weight_target):
"""Test that a ValueError is raised on invalid inputs."""
with pytest.raises(ValueError):
Objective(name=name, initial_weight=initial_weight, weight_target=weight_target)
class TestTargetClass(object):
"""Test cost_function_parameters.TargetClass."""
VALID_OBJECTIVES = [Objective(name="number_1", initial_weight=0.5, weight_target=0.5),
Objective(name="number_2", initial_weight=0.2, weight_target=0.1)]
# The sum of initial_weight is 0.0.
INVALID_OBJECTIVES_1 = [Objective(name="number_1", initial_weight=0.0, weight_target=1.0),
Objective(name="number_2", initial_weight=0.0, weight_target=1.0)]
# The sum of weight_target is 0.0.
INVALID_OBJECTIVES_2 = [Objective(name="number_1", initial_weight=1.0, weight_target=0.0),
Objective(name="number_2", initial_weight=1.0, weight_target=0.0)]
@pytest.mark.parametrize(
"name,class_weight,coverage_foreground_weight,objectives",
[
("", 20., 0.1, VALID_OBJECTIVES),
(None, 20., 0.1, VALID_OBJECTIVES),
("cat", -0.5, 0.75, VALID_OBJECTIVES),
("cat", 0.5, -0.5, VALID_OBJECTIVES),
("cat", 0.5, 1.1, VALID_OBJECTIVES),
("cat", 0.5, 0.5, INVALID_OBJECTIVES_1),
("cat", 0.5, 0.5, INVALID_OBJECTIVES_2)
])
def test_target_class_value_error(
self, name, class_weight,
coverage_foreground_weight, objectives):
"""Test that TargetClass raises a ValueError on invalid values."""
with pytest.raises(ValueError):
TargetClass(
name=name,
class_weight=class_weight,
coverage_foreground_weight=coverage_foreground_weight,
objectives=objectives)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/cost_function/tests/test_cost_function.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for parsing training configs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import keras
import tensorflow as tf
from nvidia_tao_tf1.core import distribution
from nvidia_tao_tf1.core.hooks.utils import get_softstart_annealing_learning_rate
from nvidia_tao_tf1.cv.detectnet_v2.proto.regularizer_config_pb2 import RegularizerConfig
from nvidia_tao_tf1.cv.detectnet_v2.training.train_op_generator import TrainOpGenerator
def build_optimizer(optimizer_config, learning_rate):
"""Build an Optimizer.
Arguments:
optimizer_config (optimizer_config_pb2.OptimizerConfig): Configuration for the Optimizer
being built.
learning_rate: Constant or variable learning rate.
"""
# Check the config and create object.
distributor = distribution.get_distributor()
if optimizer_config.HasField("adam"):
adam = optimizer_config.adam
if adam.epsilon <= 0.0:
raise ValueError("AdamOptimizerConfig.epsilon must be > 0")
if adam.beta1 < 0.0 or adam.beta1 >= 1.0:
raise ValueError("AdamOptimizerConfig.beta1 must be >= 0 and < 1")
if adam.beta2 < 0.0 or adam.beta2 >= 1.0:
raise ValueError("AdamOptimizerConfig.beta2 must be >= 0 and < 1")
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,
beta1=adam.beta1,
beta2=adam.beta2,
epsilon=adam.epsilon)
else:
raise NotImplementedError("The selected optimizer is not supported.")
# Wrap the optimizer to the Horovod optimizer to ensure synchronous training in the multi-GPU
# case.
optimizer = distributor.distribute_optimizer(optimizer)
return optimizer
def build_regularizer(regularizer_config):
"""Build kernel and bias regularizers.
Arguments:
regularizer_config (regularizer_config_pb2.RegularizerConfig): Config for
regularization.
Returns:
kernel_regularizer, bias_regularizer: Keras regularizers created.
"""
# Check the config and create objects.
if regularizer_config.weight < 0.0:
raise ValueError("TrainingConfig.regularization_weight must be >= 0")
if regularizer_config.type == RegularizerConfig.NO_REG:
kernel_regularizer = None
bias_regularizer = None
elif regularizer_config.type == RegularizerConfig.L1:
kernel_regularizer = keras.regularizers.l1(regularizer_config.weight)
bias_regularizer = keras.regularizers.l1(regularizer_config.weight)
elif regularizer_config.type == RegularizerConfig.L2:
kernel_regularizer = keras.regularizers.l2(regularizer_config.weight)
bias_regularizer = keras.regularizers.l2(regularizer_config.weight)
else:
raise NotImplementedError("The selected regularizer is not supported.")
return kernel_regularizer, bias_regularizer
def build_learning_rate_schedule(learning_rate_config, max_steps):
"""Build learning rate schedule.
Args:
learning_rate_config (learning_rate_config_pb2.LearningRateConfig): Configuration for
learning rate.
max_steps (int): Total number of training steps.
Returns:
learning_rate: Learning rate schedule created.
"""
# Check the config and create objects.
global_step = tf.train.get_or_create_global_step()
if learning_rate_config.HasField("soft_start_annealing_schedule"):
params = learning_rate_config.soft_start_annealing_schedule
if params.min_learning_rate <= 0.0:
raise ValueError("SoftStartAnnealingScheduleConfig.min_learning_rate must be > 0")
if params.max_learning_rate <= 0.0:
raise ValueError("SoftStartAnnealingScheduleConfig.max_learning_rate must be > 0")
if params.soft_start < 0.0 or params.soft_start > 1.0 or\
params.soft_start > params.annealing:
raise ValueError("SoftStartAnnealingScheduleConfig.soft_start must be between 0 and 1 \
and less than SoftStartAnnealingScheduleConfig.annealing")
if params.annealing < 0.0 or params.annealing > 1.0:
raise ValueError("SoftStartAnnealingScheduleConfig.annealing must be between 0 and 1")
learning_rate = get_softstart_annealing_learning_rate(
progress=tf.cast(global_step, dtype=tf.float32) / max_steps,
soft_start=params.soft_start,
annealing=params.annealing,
base_lr=params.max_learning_rate,
min_lr=params.min_learning_rate)
else:
raise NotImplementedError("The selected learning rate schedule is not supported.")
return learning_rate
def build_train_op_generator(cost_scaling_config):
"""Build a class that returns train op with or without cost scaling.
Arguments:
cost_scaling_config (cost_scaling_config_pb2.CostScalingConfig): Configuration for
cost scaling.
"""
if cost_scaling_config.increment < 0.0:
raise ValueError("CostScalingConfig.increment must be >= 0")
if cost_scaling_config.decrement < 0.0:
raise ValueError("CostScalingConfig.decrement must be >= 0")
return TrainOpGenerator(
cost_scaling_config.enabled,
cost_scaling_config.initial_exponent,
cost_scaling_config.increment,
cost_scaling_config.decrement
)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/training/training_proto_utilities.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entry point scripts for the gridbox app defined here."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/training/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TrainOpGenerator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from keras import backend as K
import tensorflow as tf
logger = logging.getLogger(__name__)
class TrainOpGenerator(object):
"""TrainOpGenerator class.
TrainOpGenerator contains parameters for dynamic cost scaling required in mixed-precision
training. It creates a TF op that includes the adaptation logic for dynamic cost scaling.
The cost scaling feature can be disabled through parameters and in this case the generator
returns a plain train op by calling optimizer.minimize.
"""
def __init__(self, cost_scaling_enabled, cost_scaling_init,
cost_scaling_inc, cost_scaling_dec):
"""Setup a train op generator.
Args:
cost_scaling_enabled (bool): Enable or disable dynamic cost scaling.
cost_scaling_init (float): Initial value for cost scaling exponent.
cost_scaling_inc (float): Added to scaling exponent if gradients are OK.
cost_scaling_dec (float): Subtracted from scaling exponent if gradients overflow.
"""
# Store the parameters.
self.cost_scaling_enabled = cost_scaling_enabled
self.cost_scaling_init = cost_scaling_init
self.cost_scaling_inc = cost_scaling_inc
self.cost_scaling_dec = cost_scaling_dec
# Sanity check: allow user to train float16 without cost scaling, but give a warning.
if K.floatx() == 'float16' and not self.cost_scaling_enabled:
logger.warning('Cost scaling is disabled while mixed-precision training is enabled.')
def get_train_op(self, optimizer, total_cost, var_list=None):
"""Return a train op with or without cost scaling.
Args:
optimizer (horovod.tensorflow.DistributedOptimizer): TF-compatible optimizer object.
total_cost (float32 tf.Tensor): Scalar cost value used for computing gradients.
var_list (list<tf.Variable>): Variables to update to minimize loss. If None, defaults
to the list of variables collected in the graph under the key
GraphKeys.TRAINABLE_VARIABLES.
"""
if self.cost_scaling_enabled:
return self._get_train_op_with_cost_scaling(optimizer, total_cost, var_list)
return self._get_train_op_without_cost_scaling(optimizer, total_cost, var_list)
def _get_train_op_without_cost_scaling(self, optimizer, total_cost, var_list):
"""Return a train op without cost scaling.
Args:
optimizer (horovod.tensorflow.DistributedOptimizer): TF-compatible optimizer object.
total_cost (float32 tf.Tensor): Scalar cost value used for computing gradients.
var_list (list<tf.Variable>): Variables to update to minimize loss. If None, defaults
to the list of variables collected in the graph under the key
GraphKeys.TRAINABLE_VARIABLES.
"""
global_step = tf.train.get_or_create_global_step()
return optimizer.minimize(loss=total_cost,
global_step=global_step,
var_list=var_list)
def _get_train_op_with_cost_scaling(self, optimizer, total_cost, var_list):
"""Return a train op with cost scaling.
Args:
optimizer (horovod.tensorflow.DistributedOptimizer): TF-compatible optimizer object.
total_cost (float32 tf.Tensor): Scalar cost value used for computing gradients.
var_list (list<tf.Variable>): Variables to update to minimize loss. If None, defaults
to the list of variables collected in the graph under the key
GraphKeys.TRAINABLE_VARIABLES.
"""
# Create a persistent cost scaling exponent.
cost_scaling_exponent = tf.Variable(initial_value=self.cost_scaling_init,
dtype=tf.float32,
name='cost_scaling_exponent',
trainable=False)
# Log the number of discarded gradients.
bad_grad_counter = tf.Variable(initial_value=0,
dtype=tf.int64,
name='bad_grad_counter',
trainable=False)
# Multiply the total cost by 2^X.
cost_multiplier = 2.0 ** cost_scaling_exponent
inverse_multiplier = 1.0 / cost_multiplier
scaled_total_cost = total_cost * cost_multiplier
# Add tensorboard summaries.
tf.summary.scalar('scaled_total_cost', scaled_total_cost)
tf.summary.scalar('cost_scaling_exponent', cost_scaling_exponent)
tf.summary.scalar('bad_grad_counter', bad_grad_counter)
# Get the gradient tensors with the scaled cost.
grads_and_vars = optimizer.compute_gradients(loss=scaled_total_cost,
var_list=var_list)
# Bring the gradient scale back to original (divide by 2^X).
grads_and_vars = [(grad * inverse_multiplier, var)
for grad, var in grads_and_vars if grad is not None]
# Check that gradients are finite.
grad_ok = tf.reduce_all(tf.stack(
[tf.reduce_all(tf.is_finite(grad)) for grad, var in grads_and_vars]))
# When gradients are not OK, apply zeros to maintain Horovod multi-GPU sync.
zero_grads_and_vars = [(tf.zeros_like(var), var) for grad, var in grads_and_vars]
# Get global step.
global_step = tf.train.get_or_create_global_step()
# Create a conditional training op.
train_op = tf.cond(
# Condition is the finiteness of the gradients.
grad_ok,
# Finite gradients -> increase scaling and apply gradients.
lambda: tf.group(tf.assign_add(cost_scaling_exponent, self.cost_scaling_inc),
optimizer.apply_gradients(grads_and_vars=grads_and_vars,
global_step=global_step)),
# Invalid gradients -> decrease scaling and apply zero-gradients.
lambda: tf.group(tf.assign_add(bad_grad_counter, 1),
tf.assign_add(cost_scaling_exponent, -self.cost_scaling_dec),
optimizer.apply_gradients(grads_and_vars=zero_grads_and_vars,
global_step=global_step))
)
return train_op
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/training/train_op_generator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from keras import backend as K
import tensorflow as tf
from nvidia_tao_tf1.core import distribution
from nvidia_tao_tf1.core import hooks as tao_hooks
from nvidia_tao_tf1.core.utils import set_random_seed
from nvidia_tao_tf1.cv.detectnet_v2.common.graph import get_init_ops
def initialize(random_seed, training_precision=None):
"""Initialization.
Args:
random_seed: Random_seed in experiment spec.
training_precision: (TrainingPrecision or None) Proto object with FP16/FP32 parameters or
None. None leaves K.floatx() in its previous setting.
"""
setup_keras_backend(training_precision, is_training=True)
# Set Maglev random seed. Take care to give different seed to each process.
seed = distribution.get_distributor().distributed_seed(random_seed)
set_random_seed(seed)
def setup_keras_backend(training_precision, is_training):
"""Setup Keras-specific backend settings for training or inference.
Args:
training_precision: (TrainingPrecision or None) Proto object with FP16/FP32 parameters or
None. None leaves K.floatx() in its previous setting.
is_training: (bool) If enabled, Keras is set in training mode.
"""
# Learning phase of '1' indicates training mode -- important for operations
# that behave differently at training/test times (e.g. batch normalization)
if is_training:
K.set_learning_phase(1)
else:
K.set_learning_phase(0)
# Set training precision, if given. Otherwise leave K.floatx() in its previous setting.
# K.floatx() determines how Keras creates weights and casts them (Keras default: 'float32').
if training_precision is not None:
if training_precision.backend_floatx == training_precision.FLOAT32:
K.set_floatx('float32')
elif training_precision.backend_floatx == training_precision.FLOAT16:
K.set_floatx('float16')
else:
raise RuntimeError('Invalid training precision selected')
def get_weights_dir(results_dir):
"""Return weights directory.
Args:
results_dir: Base results directory.
Returns:
A directory for saved model and weights.
"""
save_weights_dir = os.path.join(results_dir, 'weights')
if distribution.get_distributor().is_master() and not os.path.exists(save_weights_dir):
os.makedirs(save_weights_dir)
return save_weights_dir
def compute_steps_per_epoch(num_samples, batch_size_per_gpu, logger):
"""Compute steps per epoch based on data set size, minibatch size, and number of GPUs.
Args:
num_samples (int): Number of samples in a data set.
batch_size_per_gpu (int): Minibatch size for a single GPU.
logger: logger instance.
Returns:
Number of steps needed to iterate through the data set once.
"""
steps_per_epoch, remainder = divmod(num_samples, batch_size_per_gpu)
if remainder != 0:
logger.info("Cannot iterate over exactly {} samples with a batch size of {}; "
"each epoch will therefore take one extra step.".format(
num_samples, batch_size_per_gpu))
steps_per_epoch = steps_per_epoch + 1
number_of_processors = distribution.get_distributor().size()
steps_per_epoch, remainder = divmod(steps_per_epoch, number_of_processors)
if remainder != 0:
logger.info("Cannot iterate over exactly {} steps per epoch with {} processors; "
"each processor will therefore take one extra step per epoch.".format(
steps_per_epoch, batch_size_per_gpu))
steps_per_epoch = steps_per_epoch + 1
return steps_per_epoch
def compute_summary_logging_frequency(steps_per_epoch_per_gpu, num_logging_points=10):
"""Compute summary logging point frequency.
Args:
steps_per_epoch_per_gpu (int): Number of steps per epoch for single GPU.
num_logging_points (int): Number of logging points per epoch.
Returns:
Summary logging frequency (int).
"""
if num_logging_points > steps_per_epoch_per_gpu:
return 1 # Log every step in epoch.
return steps_per_epoch_per_gpu // num_logging_points
def get_singular_monitored_session(keras_models, session_config=None,
hooks=None, scaffold=None,
checkpoint_filename=None):
"""Create a SingularMonitoredSession with KerasModelHook.
Args:
keras_models: A single Keras model or list of Keras models.
session_config (tf.ConfigProto): Specifies the session configuration options. Optional.
hooks (list): List of tf.SessionRunHook (or child class) objects. Can be None, in which case
a KerasModelHook is added, which takes care of properly initializing the variables in
a keras model.
scaffold (tf.train.Scaffold): Scaffold object that may contain various pieces needed to
train a model. Can be None, in which case only local variable initializer ops are added.
Returns:
A SingularMonitoredSession that initializes the given Keras model.
"""
ignore_keras_values = checkpoint_filename is not None
if hooks is None:
hooks = []
if keras_models is not None:
# KerasModelHook takes care of initializing model variables.
hooks.insert(0, tao_hooks.KerasModelHook(keras_models, ignore_keras_values))
if scaffold is None:
scaffold = tf.train.Scaffold(local_init_op=get_init_ops())
return tf.train.SingularMonitoredSession(hooks=hooks,
scaffold=scaffold,
config=session_config,
checkpoint_filename_with_path=checkpoint_filename)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/training/utilities.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for TrainingConfig parsing functions."""
from __future__ import absolute_import
import keras
import pytest
import tensorflow as tf
from nvidia_tao_tf1.cv.detectnet_v2.proto.learning_rate_config_pb2 import LearningRateConfig
from nvidia_tao_tf1.cv.detectnet_v2.proto.optimizer_config_pb2 import OptimizerConfig
from nvidia_tao_tf1.cv.detectnet_v2.proto.regularizer_config_pb2 import RegularizerConfig
from nvidia_tao_tf1.cv.detectnet_v2.training.training_proto_utilities import (
build_learning_rate_schedule,
build_optimizer,
build_regularizer
)
def test_build_optimizer():
"""Test optimizer parsing."""
optimizer_config = OptimizerConfig()
learning_rate = 0.5
# Default values shouldn't pass.
with pytest.raises(NotImplementedError):
build_optimizer(optimizer_config, learning_rate)
# Valid config should work.
optimizer_config.adam.epsilon = 0.01
optimizer_config.adam.beta1 = 0.9
optimizer_config.adam.beta2 = 0.999
ret = build_optimizer(optimizer_config, learning_rate)
assert isinstance(ret, tf.train.AdamOptimizer)
# Test various invalid values.
with pytest.raises(ValueError):
optimizer_config.adam.beta1 = 1.1
build_optimizer(optimizer_config, learning_rate)
with pytest.raises(ValueError):
optimizer_config.adam.beta1 = 0.9
optimizer_config.adam.beta2 = -1.0
build_optimizer(optimizer_config, learning_rate)
with pytest.raises(ValueError):
optimizer_config.adam.beta2 = 0.99
optimizer_config.adam.epsilon = 0.0
build_optimizer(optimizer_config, learning_rate)
def test_build_regularizer():
"""Test regularizer parsing."""
regularizer_config = RegularizerConfig()
weight = 0.001
# Default values should pass (defaults to NO_REG).
ret = build_regularizer(regularizer_config)
assert ret == (None, None)
# Test the other regularization types.
regularizer_config.weight = weight
regularizer_config.type = RegularizerConfig.L1
ret = build_regularizer(regularizer_config)
assert isinstance(ret[0], keras.regularizers.L1L2)
assert isinstance(ret[1], keras.regularizers.L1L2)
assert pytest.approx(ret[0].get_config()['l1']) == weight
assert pytest.approx(ret[0].get_config()['l2']) == 0.0
assert pytest.approx(ret[1].get_config()['l1']) == weight
assert pytest.approx(ret[1].get_config()['l2']) == 0.0
regularizer_config.type = RegularizerConfig.L2
ret = build_regularizer(regularizer_config)
assert isinstance(ret[0], keras.regularizers.L1L2)
assert isinstance(ret[1], keras.regularizers.L1L2)
assert pytest.approx(ret[0].get_config()['l1']) == 0.0
assert pytest.approx(ret[0].get_config()['l2']) == weight
assert pytest.approx(ret[1].get_config()['l1']) == 0.0
assert pytest.approx(ret[1].get_config()['l2']) == weight
# Test invalid values.
with pytest.raises(ValueError):
regularizer_config.weight = -1.0
build_regularizer(regularizer_config)
def test_build_learning_rate_schedule():
"""Test learning rate schedule parsing."""
learning_rate_config = LearningRateConfig()
# Default values should not pass, forcing user to set the config.
with pytest.raises(NotImplementedError):
build_learning_rate_schedule(learning_rate_config, 10)
# Default values should not pass.
params = learning_rate_config.soft_start_annealing_schedule
params.min_learning_rate = 0.1
with pytest.raises(ValueError):
build_learning_rate_schedule(learning_rate_config, 10)
# Setting proper values should pass.
params.max_learning_rate = 1.0
params.soft_start = 0.1
params.annealing = 0.7
ret = build_learning_rate_schedule(learning_rate_config, 10)
assert isinstance(ret, tf.Tensor)
# Test various invalid values.
with pytest.raises(ValueError):
params.min_learning_rate = 0.0
build_learning_rate_schedule(learning_rate_config, 10)
with pytest.raises(ValueError):
params.min_learning_rate = 0.1
params.max_learning_rate = 0.0
build_learning_rate_schedule(learning_rate_config, 10)
with pytest.raises(ValueError):
params.soft_start = 1.0
params.max_learning_rate = 1.0
build_learning_rate_schedule(learning_rate_config, 10)
with pytest.raises(ValueError):
params.soft_start = 0.4
params.annealing = 0.3
build_learning_rate_schedule(learning_rate_config, 10)
with pytest.raises(ValueError):
params.soft_start = 0.4
params.annealing = 1.1
build_learning_rate_schedule(learning_rate_config, 10)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/training/tests/test_training_proto_utilities.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A datastructure holding individual detections after clustering."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
Detection = namedtuple('Detection', [
'class_name', # String, e.g. 'car'.
'bbox', # Float (x1, y1, x2, y2).
'confidence', # Float.
'bbox_variance', # Float. Variance of the bboxes used for this Detection.
'num_raw_bboxes', # Float. Number of bboxes used for this Detection.
'cov', # Float. Average coverage of the object. Optional.
'depth', # Float. Predicted distance of the object. Optional.
'orientation', # Float. Predicted orientation of the object. Optional.
'urgency', # Float. Predicted urgency of the object. Optional.
])
num_optionals = 4
# Default optional fields to None.
Detection.__new__.__defaults__ = (None,) * num_optionals
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/postprocessor/detection.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""Confidence config class that holds parameters for postprocessing confidence."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nvidia_tao_tf1.cv.detectnet_v2.proto.postprocessing_config_pb2 import ConfidenceConfig as \
ConfidenceProto
def build_confidence_config(confidence_config):
"""Build ConfidenceConfig from a proto.
Args:
confidence_config: confidence_config proto message.
Returns:
ConfidenceConfig object.
"""
return ConfidenceConfig(confidence_config.confidence_model_filename,
confidence_config.confidence_threshold)
def build_confidence_proto(confidence_config):
"""Build proto from ConfidenceConfig.
Args:
confidence_config: ConfidenceConfig object.
Returns:
confidence_config: confidence_config proto.
"""
proto = ConfidenceProto()
proto.confidence_model_filename = confidence_config.confidence_model_filename
proto.confidence_threshold = confidence_config.confidence_threshold
return proto
class ConfidenceConfig(object):
"""Hold the parameters for postprocessing confidence."""
def __init__(self, confidence_model_filename, confidence_threshold):
"""Constructor.
Args:
confidence_model_filename (str): Absolute path to the confidence model hdf5.
confidence_threshold (float): Confidence threshold value. Must be >= 0.
Raises:
ValueError: If the input arg is not within the accepted range.
"""
if confidence_threshold < 0.0:
raise ValueError("ConfidenceConfig.confidence_threshold must be >= 0")
self.confidence_model_filename = confidence_model_filename
self.confidence_threshold = confidence_threshold
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/postprocessor/confidence_config.py |
# Copyright (c) 2017 - 2019, NVIDIA CORPORATION. All rights reserved.
"""Post processing handler for TLT gridbox models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/postprocessor/__init__.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""PostProcessingConfig class that holds postprocessing parameters."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from nvidia_tao_tf1.cv.detectnet_v2.postprocessor.clustering_config import build_clustering_config
from nvidia_tao_tf1.cv.detectnet_v2.postprocessor.clustering_config import build_clustering_proto
from nvidia_tao_tf1.cv.detectnet_v2.postprocessor.confidence_config import build_confidence_config
from nvidia_tao_tf1.cv.detectnet_v2.postprocessor.confidence_config import build_confidence_proto
from nvidia_tao_tf1.cv.detectnet_v2.proto.postprocessing_config_pb2 import PostProcessingConfig as\
PostProcessingProto
def build_postprocessing_config(postprocessing_proto):
"""Build PostProcessingConfig from a proto.
Args:
postprocessing_proto: proto.postprocessing_config proto message.
Returns:
configs: A dict of PostProcessingConfig instances indexed by target class name.
"""
configs = {}
for class_name, config in six.iteritems(postprocessing_proto.target_class_config):
clustering_config = build_clustering_config(config.clustering_config)
confidence_config = build_confidence_config(config.confidence_config)
configs[class_name] = PostProcessingConfig(clustering_config, confidence_config)
return configs
class PostProcessingConfig(object):
"""Hold the post-processing parameters for one class."""
def __init__(self, clustering_config, confidence_config):
"""Constructor.
Args:
clustering_config (ClusteringConfig object): Built clustering configuration object.
confidence_config (ConfidenceConfig object): Built confidence configuration object.
"""
self.clustering_config = clustering_config
self.confidence_config = confidence_config
def build_postprocessing_proto(postprocessing_config):
"""Build proto from a PostProcessingConfig dictionary.
Args:
postprocessing_config: A dict of PostProcessingConfig instances indexed by target class
name.
Returns:
postprocessing_proto: proto.postprocessing_config proto message.
"""
proto = PostProcessingProto()
for target_class_name, target_class in six.iteritems(postprocessing_config):
proto.target_class_config[target_class_name].clustering_config.CopyFrom(
build_clustering_proto(target_class.clustering_config))
proto.target_class_config[target_class_name].confidence_config.CopyFrom(
build_confidence_proto(target_class.confidence_config))
return proto
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/postprocessor/postprocessing_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Apply clustering to prediction tensors and create Detection objects."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
from sklearn.cluster import DBSCAN as dbscan
from nvidia_tao_tf1.cv.detectnet_v2.postprocessor.detection import Detection
from nvidia_tao_tf1.cv.detectnet_v2.postprocessor.utilities import get_keep_indices
def cluster_predictions(predictions, postprocessing_config):
"""Cluster bounding boxes from raw predictions, with some other preprocessing options.
Args:
predictions: Nested dictionary of prediction tensors with the structure
{'car': 'bbox': 4D tensor}
postprocessing_config: A dict in which keys are target class names and values
PostProcessingConfig objects.
Returns:
clustered_detections: A dict of list of lists, which contains all detections for each frame.
Keys are target class names.
Raises:
AssertionError: When target_class does not exist in postprocessing_config.
"""
clustered_detections = {}
# Cluster each class separately.
for target_class in predictions:
def flatten_spatial(array):
return array.reshape(array.shape[:-2] + (-1, ))
# Grab coverage and absolute bbox predictions.
prediction = {}
for objective in predictions[target_class]:
prediction[objective] = flatten_spatial(predictions[target_class][objective])
assert prediction[objective].ndim == 3
assert target_class in postprocessing_config
class_clustering_config = postprocessing_config[target_class].clustering_config
clustered_detections[target_class] = cluster_bboxes(
target_class, prediction,
class_clustering_config,
algo=class_clustering_config.clustering_algorithm)
return clustered_detections
def cluster_bboxes(target_class, raw_detections, clustering_config, algo="dbscan"):
"""
Cluster bboxes with a clustering algorithm.
Args:
target_class (str):
raw_detections: dictionary with keys:
bbox: rectangle coordinates in absolute image space, (num_imgs, 4, H*W) array.
cov: weights for the rectangles, (num_imgs, 1, H*W) array.
[other objectives similarly as the above]
clustering_config: ClusteringConfig object.
algo (str): The algorithm to be used for clustering.
choices: "nms", "dbscan".
Returns:
detections_per_image: a list of lists of Detection objects, one list for each input frame.
"""
db = None
if algo == "dbscan":
db = dbscan(
eps=clustering_config.dbscan_eps,
min_samples=max(int(clustering_config.dbscan_min_samples), 1),
metric='precomputed'
)
num_images = len(raw_detections['cov'])
# Initialize output detections to empty lists.
detections_per_image = [[] for _ in range(num_images)]
# Loop images.
for image_idx in range(num_images):
detection_data = threshold_data(
raw_detections,
clustering_config.coverage_threshold,
image_idx
)
# make sure boxes exist after preliminary filtering.
if detection_data is None:
continue
# Cluster boxes based on the clustering algorithm.
if algo == "dbscan":
detections_per_image[image_idx] += cluster_with_dbscan(
detection_data,
db,
target_class,
clustering_config.minimum_bounding_box_height,
threshold=clustering_config.dbscan_confidence_threshold
)
elif algo == "nms":
detections_per_image[image_idx] += cluster_with_nms(
detection_data,
target_class,
clustering_config.minimum_bounding_box_height,
nms_iou_threshold=clustering_config.nms_iou_threshold,
confidence_threshold=clustering_config.nms_confidence_threshold)
else:
raise NotImplementedError(
"Invalid clustering algorithm requested: {}".format(algo)
)
# Sort in descending order of confidence.
detections_per_image = [sorted(image_detections, key=lambda det: -det.confidence)
for image_detections in detections_per_image]
return detections_per_image
def cluster_with_dbscan(detection_data,
db,
target_class,
min_bbox_height,
threshold=0.01):
"""Clustering bboxes with DBSCAN.
Args:
detection_data (dict): Dictionary of thresholded predicitions.
db (sklearn.dbscan): Scikit learn dbscan object:
target_class (str): Target class string to compile clustered detections.
min_bbox_height (float): Minimum height of a bbox to be considered a valid detection.
Returns:
detections_per_image (list): List of clustered detections per image.
"""
detections_per_image = []
# Compute clustering data.
clustering_data = compute_clustering_data(detection_data)
sample_weight_data = detection_data['cov'].flatten()
labeling = db.fit_predict(
X=clustering_data, sample_weight=sample_weight_data)
# Ignore detections which don't belong to any cluster (i.e., noisy samples).
labels = np.unique(labeling[labeling >= 0])
for label in labels:
detection_indices = labeling == label
detection = create_detection(
target_class, detection_data, detection_indices)
# Filter out too small bboxes.
if bbox_height_image(detection.bbox) <= min_bbox_height:
continue
if detection.confidence < threshold:
continue
detections_per_image += [detection]
return detections_per_image
def cluster_with_nms(detection_data,
target_class,
min_bbox_height,
nms_iou_threshold=0.2,
confidence_threshold=0.01):
"""Clustering raw detections with NMS."""
bboxes = detection_data["bbox"]
covs = detection_data["cov"][:, 0]
keep_indices = get_keep_indices(bboxes, covs, min_bbox_height,
Nt=nms_iou_threshold,
threshold=confidence_threshold)
if keep_indices.size == 0:
return []
filterred_boxes = np.take_along_axis(bboxes, keep_indices, axis=0)
filterred_coverages = covs[keep_indices]
assert filterred_boxes.shape[0] == filterred_coverages.shape[0], (
"The number of boxes and covs after filtering must be the same: "
"{} != {}".format(filterred_boxes.shape[0], filterred_coverages.shape[0])
)
clustered_boxes_per_image = []
for idx in range(len(filterred_boxes)):
clustered_boxes_per_image.append(Detection(
class_name=target_class,
bbox_variance=None,
num_raw_bboxes=None,
bbox=filterred_boxes[idx, :],
confidence=filterred_coverages[idx][0],
cov=filterred_coverages[idx][0]))
return clustered_boxes_per_image
def threshold_data(raw_detections, coverage_threshold, image_idx):
"""Threshold output detections based on clustering_config.
Args:
raw_detections (dict): Dictionary of raw predictions.
coverage_threshold (float): Minimum confidence in the cov blob
to filter bboxes.
image_idx (int): Id of the image in the batch being processed.
Returns:
detection_data (dict): Dictionary of thresholded predictions per image.
"""
covs = raw_detections['cov'][image_idx][0]
# Check if the input was empty.
if not covs.size:
return None
# Discard too low coverage detections.
valid_indices = covs > coverage_threshold
if not valid_indices.any():
# Filtered out everything, continue.
return None
# Filter and reshape bbox data so that clustering data can be calculated.
detection_data = {}
for objective in raw_detections:
detection_data[objective] = raw_detections[objective][image_idx][:,
valid_indices].T
return detection_data
def compute_clustering_data(detection_data):
"""
Compute data required by the clustering algorithm.
Args:
detection_data: Values for bbox coordinates in the image plane.
Returns:
clustering_data: Numpy array which contains data for the clustering algorithm to use.
"""
clustering_data = 1.0 - compute_iou(detection_data['bbox'])
return clustering_data
def bbox_height_image(bbox):
"""Height of an bbox in (x1, y1, x2, y2) or LTRB format on image plane."""
return bbox[3] - bbox[1]
def compute_iou(rectangles):
"""Intersection over union (IOU) among a list of rectangles in (x1, y1, x2, y2) format.
Args:
rectangles: numpy array of shape (N, 4), (x1, y1, x2, y2) format, assumes x1 < x2, y1 < y2
Returns:
iou: numpy array of shape (N, N) of the IOU between all pairs of rectangles
"""
# Get coordinates
x1, y1, x2, y2 = rectangles.T
# Form intersection coordinates
intersection_x1 = np.maximum(x1[:, None], x1[None, :])
intersection_y1 = np.maximum(y1[:, None], y1[None, :])
intersection_x2 = np.minimum(x2[:, None], x2[None, :])
intersection_y2 = np.minimum(y2[:, None], y2[None, :])
# Form intersection areas
intersection_width = np.maximum(0, intersection_x2 - intersection_x1)
intersection_height = np.maximum(0, intersection_y2 - intersection_y1)
intersection_area = intersection_width * intersection_height
# Original rectangle areas
areas = (x2 - x1) * (y2 - y1)
# Union area is area_a + area_b - intersection area
union_area = (areas[:, None] + areas[None, :] - intersection_area)
# Return IOU regularized with a small constant to avoid outputing NaN in pathological
# cases (area_a = area_b = isect = 0)
iou = intersection_area / (union_area + 1e-5)
return iou
def _bbox_area_image(bbox):
"""Bounding box area for LTRB image plane bboxes."""
return (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
def mean_angle(angles, weights=None):
"""
Compute the (weighted) average of given angles.
The average is computed taking wrap-around into account. If weights are given, compute a
weighted average.
Args:
angles: The angles in radians
weights: The corresponding weights
Returns: The mean angle
"""
if weights is None:
# Note: since np.arctan2 does an element wise quotient, the weights need not sum to 1.0.
weights = np.ones_like(angles)
cos_sum = np.sum(np.cos(angles) * weights)
sin_sum = np.sum(np.sin(angles) * weights)
return np.arctan2(sin_sum, cos_sum)
def create_detection(target_class, detection_data, detection_indices):
"""Create a detection based on grid cell indices which belong to the same cluster.
Confidence of the detection is the sum of coverage values and bbox coordinates are the
weighted mean of the bbox coordinates in the grid cell indices.
Args:
target_class (str):
detection_data: Values for bbox coordinates.
detection_indices: Indices part of this detection.
Returns:
detection: Detection object that defines a detection.
"""
cluster = {}
for objective in detection_data:
cluster[objective] = detection_data[objective][detection_indices]
w = cluster['cov']
n = len(w)
# Sum of coverages and normalized coverages.
aggregated_w = np.sum(w)
w_norm = w / aggregated_w
# Cluster mean.
cluster_mean = {}
for objective in detection_data:
if objective == 'orientation':
cluster_mean[objective] = mean_angle(cluster[objective], w_norm)
elif objective == 'cov':
cluster_mean[objective] = aggregated_w / n
else:
cluster_mean[objective] = np.sum((cluster[objective]*w_norm), axis=0)
# Compute coefficient of variation of the box coords.
bbox_area = _bbox_area_image(cluster_mean['bbox'])
# Clamp to epsilon to avoid division by zero.
epsilon = 0.001
if bbox_area < epsilon:
bbox_area = epsilon
# Calculate weighted bounding box variance normalized by
# bounding box size.
bbox_variance = np.sum(w_norm.reshape((-1, 1)) * (cluster['bbox'] - cluster_mean['bbox']) ** 2,
axis=0)
bbox_variance = np.sqrt(np.mean(bbox_variance) / bbox_area)
detection = Detection(
class_name=target_class,
confidence=aggregated_w,
bbox_variance=bbox_variance,
num_raw_bboxes=n,
**cluster_mean
)
return detection
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/postprocessor/cluster.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""Clustering config class that holds parameters for clustering detections."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nvidia_tao_tf1.cv.detectnet_v2.proto.postprocessing_config_pb2 import ClusteringConfig \
as ClusteringProto
CLUSTERING_ALGORITHM = {
0: "dbscan",
1: "nms",
2: "hybrid"
}
def build_clustering_config(clustering_config):
"""Build ClusteringConfig from a proto.
Args:
clustering_config: clustering_config proto message.
Returns:
ClusteringConfig object.
"""
return ClusteringConfig(clustering_config.coverage_threshold,
clustering_config.dbscan_eps,
clustering_config.dbscan_min_samples,
clustering_config.minimum_bounding_box_height,
clustering_config.clustering_algorithm,
clustering_config.nms_iou_threshold,
clustering_config.nms_confidence_threshold,
clustering_config.dbscan_confidence_threshold)
def build_clustering_proto(clustering_config):
"""Build proto from ClusteringConfig.
Args:
clustering_config: ClusteringConfig object.
Returns:
clustering_config: clustering_config proto message.
"""
proto = ClusteringProto()
proto.coverage_threshold = clustering_config.coverage_threshold
proto.dbscan_eps = clustering_config.dbscan_eps
proto.dbscan_min_samples = clustering_config.dbscan_min_samples
proto.minimum_bounding_box_height = clustering_config.minimum_bounding_box_height
proto.clustering_algorithm = clustering_config.clustering_algorithm
proto.nms_iou_threshold = clustering_config.nms_iou_threshold
proto.nms_confidence_threshold = clustering_config.nms_confidence_threshold
proto.dbscan_confidence_threshold = clustering_config.dbscan_confidence_threshold
return proto
class ClusteringConfig(object):
"""Hold the parameters for clustering detections."""
def __init__(self, coverage_threshold, dbscan_eps, dbscan_min_samples,
minimum_bounding_box_height, clustering_algorithm,
nms_iou_threshold, dbscan_confidence_threshold,
nms_confidence_threshold):
"""Constructor.
Args:
coverage_threshold (float): Grid cells with coverage lower than this
threshold will be ignored. Valid range [0.0, 1.0].
dbscan_eps (float): DBSCAN eps parameter. The maximum distance between two samples
for them to be considered as in the same neighborhood. Valid range [0.0, 1.0].
dbscan_min_samples (float): DBSCAN min samples parameter. The number of samples (or
total weight) in a neighborhood for a point to be considered as a core point.
This includes the point itself. Must be >= 0.0.
minimum_bounding_box_height (int): Minimum bbox height. Must be >= 0.
clustering_algorithm (clustering_config.clustering_algorithm): The type of clustering
algorithm.
nms_iou_threshold (float): The iou threshold for NMS.
dbscan_confidence_threshold (float): The dbscan confidence threshold.
nms_confidence_threshold (float): The nms confidence threshold.
Raises:
ValueError: If the input arg is not within the accepted range.
"""
if coverage_threshold < 0.0 or coverage_threshold > 1.0:
raise ValueError("ClusteringConfig.coverage_threshold must be in [0.0, 1.0]")
clustering_algorithm = CLUSTERING_ALGORITHM[clustering_algorithm]
if clustering_algorithm not in ["dbscan", "nms"]:
raise NotImplementedError(
"Invalid clustering algorithm: {}".format(clustering_algorithm)
)
if clustering_algorithm == "dbscan":
if dbscan_eps < 0.0 or dbscan_eps > 1.0:
raise ValueError("ClusteringConfig.dbscan_eps must be in [0.0, 1.0]")
if dbscan_min_samples < 0.0:
raise ValueError("ClusteringConfig.dbscan_min_samples must be >= 0.0")
if dbscan_confidence_threshold < 0.0:
raise ValueError("ClusteringConfig.dbscan_confidence_threshold must be >= 0.0")
if minimum_bounding_box_height < 0:
raise ValueError(
"ClusteringConfig.minimum_bounding_box_height must be >= 0"
)
if clustering_algorithm == "nms":
if nms_iou_threshold < 0.0 or nms_iou_threshold > 1.0:
raise ValueError(
"ClusteringConfig.nms_iou_threshold must be in [0.0, 1.0]"
)
if nms_confidence_threshold < 0.0 or nms_confidence_threshold > 1.0:
raise ValueError("ClusteringConfig.nms_confidence_threshold must in [0.0, 1.0]")
self.coverage_threshold = coverage_threshold
self.dbscan_eps = dbscan_eps
self.dbscan_min_samples = dbscan_min_samples
self.minimum_bounding_box_height = minimum_bounding_box_height
self.clustering_algorithm = clustering_algorithm
self.nms_iou_threshold = nms_iou_threshold
self.dbscan_confidence_threshold = dbscan_confidence_threshold
self.nms_confidence_threshold = nms_confidence_threshold
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/postprocessor/clustering_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Postprocess for Detections."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from nvidia_tao_tf1.cv.detectnet_v2.postprocessor.cluster import cluster_predictions
def _bbox_xywh_image(bbox, image_size):
"""Convert bbox from LTRB to normalized XYWH.
Arguments:
bbox: Bbox in LTRB format.
image_size: Range of bbox coordinates.
Returns:
Bbox in XYWH format, normalized to [0,1] range.
"""
x = (bbox[0] + bbox[2]) / 2
y = (bbox[1] + bbox[3]) / 2
w = bbox[2] - bbox[0]
h = bbox[3] - bbox[1]
x /= float(image_size[0])
y /= float(image_size[1])
w /= float(image_size[0])
h /= float(image_size[1])
return x, y, w, h
def detections_to_confidence_model_input(detections, image_size):
"""Construct an input batch of detections.
Arguments:
detections: A list of Detections.
image_size: Detection bbox resolution as tuple (width, height).
Returns:
A list of confidence model input vectors.
"""
detection_tensors = []
for sample in detections:
for detection in sample:
bbox_x, bbox_y, bbox_width, bbox_height = _bbox_xywh_image(detection.bbox, image_size)
det = [detection.confidence,
detection.bbox_variance,
bbox_height,
bbox_width,
detection.num_raw_bboxes,
bbox_x,
bbox_y]
detection_tensors.append(np.array(det))
return detection_tensors
def _patch_detections(detections, confidences):
"""Reconstruct Detections with the computed confidence values.
Arguments:
detections: A list of list of Detections.
confidences: A list of confidence values, one for each Detection.
Returns:
A list of list of Detections with patched confidence values.
"""
index = 0
updated_detections = []
for sample in detections:
updated_sample = []
for detection in sample:
updated_detection = \
detection._replace(confidence=confidences[index][0])
updated_sample.append(updated_detection)
index = index + 1
updated_detections.append(updated_sample)
return updated_detections
def _filter_by_confidence(detections, confidence_threshold):
"""Filter list of detections by given confidence threshold.
Args:
detections (list): List of list of detections. Each outer list indexes frames, and each
inner list contains the Detection instances for a given frame.
confidence_threshold (float): Confidence threshold to use for filtering.
Returns:
filtered_detections (list): Filtered detections in the same format as detections.
"""
filtered_detections = [list([det for det in detections_list if det.confidence >=
confidence_threshold]) for detections_list in detections]
return filtered_detections
class PostProcessor(object):
"""Hold all the pieces of the DetectNet V2 postprocessing pipeline."""
def __init__(self, postprocessing_config,
confidence_models=None, image_size=None):
"""Constructor.
Args:
postprocessing_config (dict): Each key is a target class name (str), and value a
PostProcessingConfig object.
confidence_models (dict): Each key is a target class name (str), and value a
ConfidenceModel. Can be None.
image_size (tuple): Dimensions of the input to the detector (width, height). If
<confidence_models> are supplied, this must also be supplied.
Raises:
ValueError: If <confidence_models> are supplied, but <image_size> is not.
"""
if confidence_models is not None:
raise ValueError("PostProcessor: Confidence Model is currently not supported")
self._postprocessing_config = postprocessing_config
if confidence_models is None:
self._confidence_models = dict()
else:
self._confidence_models = confidence_models
self._image_size = image_size
def cluster_predictions(self, predictions, postprocessing_config=None):
"""Cluster raw predictions into detections.
Args:
predictions (dict): Nested dictionary with structure [target_class_name][objective_name]
and values the corresponding 4-D (N, C, H, W) np.array as produced by the detector.
N is the number of images in a batch, C the number of dimension that objective has
(e.g. 4 coordinates for 'bbox'), and H and W are the spatial dimensions of the
detector's output.
postprocessing_config (dict of PostProcessingConfigs): Dictionary of postprocessing
parameters per class, which, if provided, override existing clustering parameters
for this call only.
Returns:
detections (dict): Keys are target class names, values are lists of lists of Detection
instances. Each outer list indexes frames, each inner list, the detections for that
frame.
"""
if postprocessing_config is None:
postprocessing_config = self._postprocessing_config
detections = cluster_predictions(predictions, postprocessing_config)
return detections
def postprocess_predictions(self, predictions, target_class_names,
postprocessing_config=None, session=None):
"""Cluster predictions into Detections.
Optionally apply confidence models, and filter by confidence.
Args:
predictions (dict): Nested dictionary with structure [target_class_name][objective_name]
and values the corresponding 4-D (N, C, H, W) np.array as produced by the detector.
N is the number of images in a batch, C the number of dimension that objective has
(e.g. 4 coordinates for 'bbox'), and H and W are the spatial dimensions of the
detector's output.
target_class_names (list): A list of target class names.
postprocessing_config (dict of PostProcessingConfigs): Dictionary of postprocessing
parameters per class, which, if provided, override existing clustering parameters
for this call only.
session (tf.Session): A session for confidence model inference. If
`self._confidence_models` is not None, this must also be supplied.
Returns:
detections (dict): Keys are target class names, values are lists of lists of Detection
instances. Each outer list indexes frames, each inner list, the detections for that
frame.
"""
detections = self.cluster_predictions(predictions, postprocessing_config)
if self._confidence_models:
detections = self.apply_confidence_models(
detections=detections,
session=session,
target_class_names=target_class_names)
# Now, filter by confidence.
detections = self.filter_by_confidence(detections)
return detections
def filter_by_confidence(self, detections, confidence_threshold=None):
"""Filter list of detections by given confidence threshold.
Args:
detections (dict): Keys are target class names, values are lists of lists of Detection
instances. Each outer list indexes frames, each inner list, the detections for that
frame.
confidence_threshold (float): Confidence threshold to use for filtering. Can be None.
If not supplied, the one defined in `self._postprocessing_config` is used.
Returns:
filtered_detections (dict): Filtered detections in the same format as <detections>.
"""
filtered_detections = dict()
for target_class_name in detections:
if confidence_threshold is None:
confidence_threshold = self._postprocessing_config[target_class_name].\
confidence_config.confidence_threshold
filtered_detections[target_class_name] = _filter_by_confidence(
detections[target_class_name],
confidence_threshold=confidence_threshold)
return filtered_detections
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/postprocessor/postprocessing.py |
# Copyright (c) 2017 - 2019, NVIDIA CORPORATION. All rights reserved.
"""Utilities file containing helper functions to post process raw predictions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import numpy as np
from sklearn.cluster import DBSCAN as dbscan
from nvidia_tao_tf1.cv.detectnet_v2.utilities.constants import Detection
logger = logging.getLogger(__name__)
def cluster_bboxes(raw_detections,
criterion,
eps,
min_samples,
min_weight,
min_height,
confidence_model,
cluster_weights=(1.0, 0.0, 0.0, 0.0, 0.0, 0.0),
image_size=None,
framework='tlt',
clustering_algorithm="dbscan",
confidence_threshold=0.01,
nms_iou_threshold=0.01,
nms_confidence_threshold=0.1):
"""
Cluster the bboxes from the raw feature map to output boxes.
It works in two steps.
1. Obtain grid cell indices where coverage > min_weight.
2. Make a list of all bboxes from the grid cells short listed from 1.
3. Cluster this list of bboxes using a density based clustering algorithm..
Inputs:
raw_detections : dict with keys:
bbox: rectangle coordinates, (num_imgs, 4, W, H) array
cov: weights for the rectangles, (num_imgs, 1, W, H) array
criterion (str): clustering criterion ('MSE' or 'IOU')
eps (float): threshold for considering two rectangles as one
min_samples (int): minimum cumulative weight for output rectangle
min_weight (float): filter out bboxes with weight smaller than
min_weight prior to clustering
min_height (float): filter out bboxes with height smaller than
min_height after clustering
cluster_weights (dict): weighting of different distance components
(bbox, depth, orientation, bottom_vis, width_vis, orient_markers)
confidence_model (str): Dict of {kind: 'mlp' or 'aggregate_cov'
model: the expected model format or None}
image_size (tuple): Size of the image at inference in the format
(image_width, image_height)
framework (str): Framework to run inferences under. (supported = tensorrt, tlt)
clustering_algorithm (str): Algorithm used to cluster the raw predictions.
confidence_threshold (float): The final overlay threshold post clustering.
nms_iou_threshold (float): IOU overlap threshold to be used when running NMS.
Returns:
detections_per_image (list): A list of lists of Detection objects, one list
for each input frame.
"""
db = None
if clustering_algorithm in ["dbscan", "hybrid"]:
db = setup_dbscan_object(eps, min_samples, criterion)
num_images = len(raw_detections['cov'])
if confidence_model == 'mlp':
raise NotImplementedError("MLP confidence thresholding not supported.")
# Initialize output detections to empty lists
# DO NOT DO a=[[]]*num_images --> that means 'a[0] is a[1] == True' !!!
detections_per_image = [[] for _ in range(num_images)]
# Needed when doing keras confidence model.
# keras.backend.get_session().run(tf.initialize_all_variables())
# Loop images
logger.debug("Clustering bboxes")
for image_idx in range(num_images):
# Check if the input was empty.
if raw_detections['cov'][image_idx].size == 0:
continue
bboxes, covs = threshold_bboxes(raw_detections, image_idx, min_weight)
if covs.size == 0:
continue
# Cluster using DBSCAN.
if clustering_algorithm == "dbscan":
logger.debug("Clustering bboxes using dbscan.")
clustered_boxes_per_image = cluster_with_dbscan(bboxes,
covs,
criterion,
db,
confidence_model,
cluster_weights,
min_height,
threshold=confidence_threshold)
# Clustering detections with NMS.
elif clustering_algorithm == "nms":
logger.debug("Clustering using NMS")
clustered_boxes_per_image = cluster_with_nms(bboxes, covs,
min_height,
nms_iou_threshold=nms_iou_threshold,
threshold=nms_confidence_threshold)
elif clustering_algorithm == "hybrid":
logger.debug("Clustering with DBSCAN + NMS")
clustered_boxes_per_image = cluster_with_hybrid(
bboxes, covs,
criterion, db,
confidence_model,
cluster_weights,
min_height,
nms_iou_threshold=nms_iou_threshold,
confidence_threshold=confidence_threshold,
nms_confidence_threshold=nms_confidence_threshold
)
else:
raise NotImplementedError("Clustering with {} algorithm not supported.".
format(clustering_algorithm))
detections_per_image[image_idx].extend(clustered_boxes_per_image)
# Sort in descending order of cumulative weight
detections_per_image = [sorted(dets, key=lambda det: -det.confidence)
for dets in detections_per_image]
# ToDo: @<vpraveen> This is needed when running confidence model in
# keras and the OD inference happens using TensorRT.
# if framework == "tensorrt":
# K.clear_session()
return detections_per_image
def get_keep_indices(dets, covs, min_height,
Nt=0.3, sigma=0.4, threshold=0.01,
method=4):
"""Perform NMS over raw detections.
This function implements clustering using multiple variants of NMS, namely,
Linear, Soft-NMS, D-NMS and NMS. It computes the indexes of the raw detections
that may be preserved post NMS.
Args:
dets (np.ndarray): Array of filtered bboxes.
scores (np.ndarray): Array of filtered scores (coverages).
min_height (int): Minimum height of the boxes to be retained.
Nt (float): Overlap threshold.
sigma (float): Variance using in the Gaussian soft nms.
threshold (float): Filtering threshold post bbox clustering.
method (int): Variant of nms to be used.
Returns:
keep (np.ndarray): Array of indices of boxes to be retained after clustering.
"""
N = dets.shape[0]
assert len(dets.shape) == 2 and dets.shape[1] == 4, \
"BBox dimensions are invalid, {}.".format(dets.shape)
indexes = np.array([np.arange(N)])
assert len(covs.shape) == 1 and covs.shape[0] == N, \
"Coverage dimensions are invalid. {}".format(covs.shape)
# Convert to t, l, b, r representation for NMS.
l, t, r, b = dets.T
dets = np.asarray([t, l, b, r]).T
dets = np.concatenate((dets, indexes.T), axis=1)
scores = covs
# Compute box areas.
areas = (r - l + 1) * (b - t + 1)
for i in range(N):
# intermediate parameters for later parameters exchange
tBD = dets[i, :].copy()
tscore = scores[i].copy()
tarea = areas[i].copy()
pos = i + 1
if i != N-1:
maxscore = np.max(scores[pos:], axis=0)
maxpos = np.argmax(scores[pos:], axis=0)
else:
maxscore = scores[-1]
maxpos = 0
if tscore < maxscore:
dets[i, :] = dets[maxpos + i + 1, :]
dets[maxpos + i + 1, :] = tBD
tBD = dets[i, :]
scores[i] = scores[maxpos + i + 1]
scores[maxpos + i + 1] = tscore
tscore = scores[i]
areas[i] = areas[maxpos + i + 1]
areas[maxpos + i + 1] = tarea
tarea = areas[i]
# IoU calculate
xx1 = np.maximum(dets[i, 1], dets[pos:, 1])
yy1 = np.maximum(dets[i, 0], dets[pos:, 0])
xx2 = np.minimum(dets[i, 3], dets[pos:, 3])
yy2 = np.minimum(dets[i, 2], dets[pos:, 2])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[pos:] - inter)
# min_overlap_box
x1c = np.minimum(dets[i, 1], dets[pos:, 1])
y1c = np.minimum(dets[i, 0], dets[pos:, 0])
x2c = np.maximum(dets[i, 3], dets[pos:, 3])
y2c = np.maximum(dets[i, 2], dets[pos:, 2])
c1x, c1y = (dets[i, 1] + dets[i, 3]) / 2.0, (dets[i, 0] + dets[i, 2]) / 2.0
c2x, c2y = (dets[pos:, 1] + dets[pos:, 3]) / 2.0, (dets[pos:, 0] + dets[pos:, 2]) / 2.0
centre_dis = ((c1x - c2x) ** 2)+((c1y - c2y) ** 2)
diag = ((x1c - x2c) ** 2)+((y1c - y2c) ** 2)
ovr_dis = ovr - centre_dis/diag
# Four methods: 1.linear 2.gaussian soft NMS 3. D-NMS 4.original NMS
if method == 1:
# linear NMS
weight = np.ones(ovr.shape)
weight[ovr > Nt] = weight[ovr > Nt] - ovr[ovr > Nt]
elif method == 2: # gaussian
# Gaussian Soft NMS
weight = np.exp(-(ovr * ovr) / sigma)
elif method == 3:
# D-NMS
weight = np.ones(ovr.shape)
weight[ovr_dis > Nt] = 0
elif method == 4:
# original NMS
weight = np.ones(ovr.shape)
weight[ovr > Nt] = 0
else:
raise NotImplementedError("NMS variants can only be between [1 - 4] where \n"
"1. linear NMS\n2. Gaussian Soft NMS\n3. D-NMS\n4. "
"Original NMS")
scores[pos:] = weight * scores[pos:]
# Filtering based on confidence threshold.
inds = dets[:, 4][scores > threshold]
keep = inds.astype(int)
keep = np.array([[f] for f in keep])
return keep
def cluster_with_nms(bboxes, covs, min_height,
nms_iou_threshold=0.01,
threshold=0.01):
"""Cluster raw detections with NMS.
Args:
bboxes (np.ndarray): The raw bbox predictions from the network.
covs (np.ndarray): The raw coverage predictions from the network.
min_height (float): The minimum height to filter out bboxes post clustering.
nms_iou_threshold (float): The overlap threshold to be used when running NMS.
threshold (float): The final confidence threshold to filter out bboxes
after clustering.
Returns:
clustered_boxes_per_images (list): List of clustered and filtered detections.
"""
keep_indices = get_keep_indices(bboxes, covs, min_height,
threshold=threshold,
Nt=nms_iou_threshold)
logger.debug("Keep indices: shape: {}, type: {}".format(keep_indices.shape,
type(keep_indices)))
if keep_indices.size == 0:
return []
filterred_boxes = np.take_along_axis(bboxes, keep_indices, axis=0)
filterred_coverages = covs[keep_indices]
assert(filterred_boxes.shape[0] == filterred_coverages.shape[0])
clustered_boxes_per_image = []
for idx in range(len(filterred_boxes)):
clustered_boxes_per_image.append(Detection(
bbox=filterred_boxes[idx, :],
confidence=filterred_coverages[idx][0],
cluster_cv=None,
num_raw_boxes=None,
sum_coverages=None,
max_cov_value=None,
min_cov_value=None,
candidate_covs=filterred_coverages[idx],
candidate_bboxes=filterred_boxes[idx]))
return clustered_boxes_per_image
def cluster_with_dbscan(bboxes, covs, criterion, db,
confidence_model, cluster_weights,
min_height, threshold=0.01):
"""Cluster raw predictions using dbscan.
Args:
boxes (np.array): Thresholded raw bbox blob
covs (np.array): Thresholded raw covs blob
criterion (str): DBSCAN clustering criterion.
db: Instantiated dbscan object.
cluster_weights (dict): weighting of different distance components
(bbox, depth, orientation, bottom_vis, width_vis, orient_markers)
min_height (float): filter out bboxes with height smaller than
min_height after clustering
threshold (float): Final threshold to filter bboxes post
clustering.
Returns:
detections_per_image.
"""
detections_per_image = []
if criterion[:3] in ['MSE', 'IOU']:
if criterion[:3] == 'MSE':
data = bboxes
labeling = db.fit_predict(X=data, sample_weight=covs)
elif criterion[:3] == 'IOU':
pairwise_dist = \
cluster_weights[0] * (1.0 - iou_vectorized(bboxes))
labeling = db.fit_predict(X=pairwise_dist, sample_weight=covs)
labels = np.unique(labeling[labeling >= 0])
logger.debug("Number of boxes: {}".format(len(labels)))
for label in labels:
w = covs[labeling == label]
aggregated_w = np.sum(w)
w_norm = w / aggregated_w
n = len(w)
w_max = np.max(w)
w_min = np.min(w)
# Mean bounding box
b = bboxes[labeling == label]
mean_bbox = np.sum((b.T*w_norm).T, axis=0)
# Compute coefficient of variation of the box coords
bbox_area = (mean_bbox[2] - mean_bbox[0]) * (mean_bbox[3] - mean_bbox[1])
# Calculate weighted bounding box variance normalized by
# bounding box size
cluster_cv = np.sum(w_norm.reshape((-1, 1)) * (b - mean_bbox) ** 2, axis=0)
cluster_cv = np.sqrt(np.mean(cluster_cv) / bbox_area)
# Update confidence output based on mode of confidence.
if confidence_model == 'aggregate_cov':
confidence = aggregated_w
elif confidence_model == 'mean_cov':
w_mean = aggregated_w / n
confidence = (w_mean - w_min)/(w_max - w_min)
# ToDo <vpraveen>: Remove comment for MLP based confidence thresholding is ready.
# elif confidence_model.kind == 'mlp':
# conf_input = [aggregated_w,
# cluster_cv,
# float(mean_bbox[3] - mean_bbox[1]) / image_size[0],
# float(mean_bbox[2] - mean_bbox[0]) / image_size[1],
# n,
# float(mean_bbox[2] + mean_bbox[0]) / (2 * image_size[0]),
# float(mean_bbox[3] + mean_bbox[1]) / (2 * image_size[1])]
# conf_input = np.array(conf_input, 'float32')
# conf_input.shape = (1, ) + conf_input.shape
# # Predict on confidence model to generate inferences.
# predictions = conf_keras_model.predict(conf_input)
# confidence = predictions[0]
else:
raise NotImplementedError("Unknown confidence kind %s!" %
confidence_model.kind)
# Filter out too small bboxes
if mean_bbox[3] - mean_bbox[1] <= min_height:
continue
if confidence > threshold:
detections_per_image += [Detection(
bbox=mean_bbox,
confidence=confidence,
cluster_cv=cluster_cv,
num_raw_boxes=n,
sum_coverages=aggregated_w,
max_cov_value=w_max,
min_cov_value=w_min,
candidate_covs=w,
candidate_bboxes=b
)]
return detections_per_image
raise NotImplementedError("DBSCAN for this criterion is not implemented. {}".format(criterion))
def threshold_bboxes(raw_detections, image_idx, min_weight):
"""Threshold raw predictions based on coverages.
Args:
raw_detections (dict): Dictionary containing raw detections, cov
and bboxes.
Returns:
bboxes, covs: The filtered numpy array of bboxes and covs.
"""
# Get bbox coverage values, flatten (discard spatial and scale info)
covs = raw_detections['cov'][image_idx].flatten()
valid_indices = covs > min_weight
covs = covs[valid_indices]
# Flatten last three dimensions (discard spatial and scale info)
# assume bbox is always present
bboxes = raw_detections['bbox'][image_idx]
bboxes = bboxes.reshape(bboxes.shape[:1] + (-1,)).T[valid_indices]
return bboxes, covs
def setup_dbscan_object(eps, min_samples, criterion):
"""Instantiate dbscan object for clustering predictions with dbscan.
Args:
eps (float): DBSCAN epsilon value (search distance parameter)
min_samples (int): minimum cumulative weight for output rectangle
criterion (str): clustering criterion ('MSE' or 'IOU')
Returns:
db (dbscan object): DBSCAN object from scikit learn.
"""
min_samples = max(int(min_samples), 1)
if criterion[:3] == 'MSE':
# MSE between coordinates is used as the distance
# If depth and orientation are included, add them as
# additional coordinates
db = dbscan(eps=eps, min_samples=min_samples)
elif criterion[:3] == 'IOU':
# 1.-IOU is used as distance between bboxes
# For depth and orientation, use a normalized difference
# measure
# The final distance metric is a weighted sum of the above
db = dbscan(eps=eps, min_samples=min_samples, metric='precomputed')
else:
raise Exception("cluster_bboxes: Unknown bbox clustering criterion!")
return db
def cluster_with_hybrid(bboxes, covs,
criterion, db,
confidence_model,
cluster_weights,
min_height,
nms_iou_threshold=0.3,
confidence_threshold=0.1,
nms_confidence_threshold=0.1):
"""Cluster raw predictions with DBSCAN + NMS.
Args:
boxes (np.array): Thresholded raw bbox blob
covs (np.array): Thresholded raw covs blob
criterion (str): DBSCAN clustering criterion.
db: Instantiated dbscan object.
cluster_weights (dict): weighting of different distance components
(bbox, depth, orientation, bottom_vis, width_vis, orient_markers)
min_height (float): filter out bboxes with height smaller than
min_height after clustering
nms_iou_threshold (float): The overlap threshold to be used when running NMS.
confiedence_threshold (float): The confidence threshold to filter out bboxes
after clustering by dbscan.
nms_confidence_threshold (float): The confidence threshold to filter out bboxes
after clustering by NMS.
Returns:
nms_clustered_detection_per_image (list): List of clustered detections
after hybrid clustering.
"""
dbscan_clustered_detections_per_image = cluster_with_dbscan(
bboxes,
covs,
criterion,
db,
confidence_model,
cluster_weights,
min_height,
threshold=confidence_threshold
)
# Extract raw detections from clustered outputs.
nms_candidate_boxes = []
nms_candidate_covs = []
for detections in dbscan_clustered_detections_per_image:
nms_candidate_boxes.extend(detections.candidate_bboxes)
nms_candidate_covs.extend(detections.candidate_covs)
nms_candidate_boxes = np.asarray(nms_candidate_boxes).astype(np.float32)
nms_candidate_covs = np.asarray(nms_candidate_covs).astype(np.float32)
if nms_candidate_covs.size == 0:
return []
# Clustered candidates from dbscan to run NMS.
nms_clustered_detections_per_image = cluster_with_nms(
nms_candidate_boxes,
nms_candidate_covs,
min_height,
nms_iou_threshold=nms_iou_threshold,
threshold=nms_confidence_threshold
)
return nms_clustered_detections_per_image
def iou_vectorized(rects):
"""
Intersection over union among a list of rectangles in LTRB format.
Args:
rects (np.array) : numpy array of shape (N, 4), LTRB format, assumes L<R and T<B
Returns::
d (np.array) : numpy array of shape (N, N) of the IOU between all pairs of rects
"""
# coordinates
l, t, r, b = rects.T
# form intersection coordinates
isect_l = np.maximum(l[:, None], l[None, :])
isect_t = np.maximum(t[:, None], t[None, :])
isect_r = np.minimum(r[:, None], r[None, :])
isect_b = np.minimum(b[:, None], b[None, :])
# form intersection area
isect_w = np.maximum(0, isect_r - isect_l)
isect_h = np.maximum(0, isect_b - isect_t)
area_isect = isect_w * isect_h
# original rect areas
areas = (r - l) * (b - t)
# Union area is area_a + area_b - intersection area
denom = (areas[:, None] + areas[None, :] - area_isect)
# Return IOU regularized with .01, to avoid outputing NaN in pathological
# cases (area_a = area_b = isect = 0)
return area_isect / (denom + .01)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/postprocessor/utilities.py |
# Copyright (c) 2017 - 2019, NVIDIA CORPORATION. All rights reserved.
"""Post processing handler for TLT DetectNet_v2 models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from contextlib import contextmanager
from copy import deepcopy
from functools import partial
import logging
from multiprocessing import Pool
import operator
import os
from time import time
from addict import Dict
import numpy as np
from PIL import ImageDraw
from six.moves import range
import wandb
from nvidia_tao_tf1.cv.common.mlops.wandb import is_wandb_initialized
from nvidia_tao_tf1.cv.detectnet_v2.postprocessor.utilities import cluster_bboxes
from nvidia_tao_tf1.cv.detectnet_v2.utilities.constants import criterion, scales
logger = logging.getLogger(__name__)
CLUSTERING_ALGORITHM = {0: "dbscan",
1: "nms",
2: "hybrid"}
@contextmanager
def pool_context(*args, **kwargs):
"""Simple wrapper to get pool context with close function."""
pool = Pool(*args, **kwargs)
try:
yield pool
finally:
pool.terminate()
def render_single_image_output(input_tuple, target_classes,
image_overlay, save_kitti,
output_image_root, output_label_root,
class_wise_detections,
linewidth, resized_size,
confidence_model,
box_color,
output_map,
frame_height,
frame_width):
"""Rendering for a single image.
Args:
input_tuple (tuple): Tuple of rendering inputs.
target_classes (list): List of classes to be post-processed.
image_overlay (bool): Flag to set images to overlay.
save_kitti (bool): Flag to dump kitti label files.
output_image_root (str): Path to the directory where rendered images are to be saved.
output_label_root (str): Path to the directory where kitti labels are to be saved.
class_wise_detections (dict): Dictionary of class-wise detections.
linewidth (int): Thickness of bbox pixels.
resized_size (tuple): Size of resized images.
confidence_model (dict): Dictionary of confidence models per class.
box_color (dict): Dictionary of rendering colors for boxes.
output_map (dict): Dictionary of output map classes.
frame_height (int): Inference frame height.
frame_width (int): Inference frame width.
Returns:
No explicit returns.
"""
idx = input_tuple[0]
pil_input = input_tuple[1]
image_name = input_tuple[2]
scaling_factor = tuple(map(operator.truediv, pil_input.size, resized_size))
processed_image = deepcopy(pil_input)
label_list = []
image_file = os.path.join(output_image_root, image_name)
label_file = os.path.join(output_label_root, os.path.splitext(image_name)[0] + '.txt')
draw = ImageDraw.Draw(processed_image)
for keys in target_classes:
key = str(keys)
cluster_key = key
if key not in list(output_map.keys()):
cluster_key = "default"
bbox_list, confidence_list = _get_bbox_and_confs(class_wise_detections[key][idx],
scaling_factor,
cluster_key,
confidence_model,
frame_height,
frame_width)
num_boxes = len(bbox_list)
if num_boxes != 0:
for box in range(len(bbox_list)):
edgecolor = box_color[cluster_key]
x1 = float(bbox_list[box][0])
y1 = float(bbox_list[box][1])
x2 = float(bbox_list[box][2])
y2 = float(bbox_list[box][3])
if cluster_key == "default":
class_name = key
else:
class_name = output_map[key] \
if key in list(output_map.keys()) else key
if image_overlay:
if (x2 - x1) >= 0.0 and (y2 - y1) >= 0.0:
draw.rectangle(((x1, y1), (x2, y2)), outline=edgecolor)
for i in range(linewidth):
draw.rectangle(((x1 - i, y1 - i), (x2 + i, y2 + i)), outline=edgecolor)
draw.text((x1, y1), f"{class_name}:{confidence_list[box]:.3f}")
if save_kitti:
label_tail = " 0.00 0.00 0.00 "\
"0.00 0.00 0.00 0.00 {:.3f}\n".format(confidence_list[box])
label_head = class_name + " 0.00 0 0.00 "
bbox_string = "{:.3f} {:.3f} {:.3f} {:.3f}".format(x1, y1,
x2, y2)
label_string = label_head + bbox_string + label_tail
label_list.append(label_string)
if image_overlay:
processed_image.save(image_file)
if is_wandb_initialized():
wandb_image = wandb.Image(processed_image, os.path.basename(os.path.splitext(image_file)[0]))
wandb.log({"Rendered images": wandb_image})
if save_kitti:
with open(label_file, 'w') as f:
if label_list:
for line in label_list:
f.write(line)
f.closed
def _get_bbox_and_confs(classwise_detections, scaling_factor,
key, confidence_model, frame_height,
frame_width):
"""Simple function to get bbox and confidence formatted list."""
bbox_list = []
confidence_list = []
for i in range(len(classwise_detections)):
bbox_object = classwise_detections[i]
coords_scaled = _scale_bbox(bbox_object.bbox, scaling_factor,
frame_height, frame_width)
if confidence_model[key] == 'mlp':
confidence = bbox_object.confidence[0]
else:
confidence = bbox_object.confidence
bbox_list.append(coords_scaled)
confidence_list.append(confidence)
return bbox_list, confidence_list
def _scale_bbox(bbox, scaling_factor, frame_height, frame_width):
'''
Scale bbox coordinates back to original image dimensions.
Args:
bbox (list): bbox coordinates ltrb
scaling factor (float): input_image size/model inference size
Returns:
bbox_scaled (list): list of scaled coordinates
'''
# Clipping and clamping coordinates.
x1 = min(max(0.0, float(bbox[0])), frame_width)
y1 = min(max(0.0, float(bbox[1])), frame_height)
x2 = max(min(float(bbox[2]), frame_width), x1)
y2 = max(min(float(bbox[3]), frame_height), y1)
# Rescaling center.
hx, hy = x2 - x1, y2 - y1
cx = x1 + hx/2
cy = y1 + hy/2
# Rescaling height, width
nx, ny = cx * scaling_factor[0], cy * scaling_factor[1]
nhx, nhy = hx * scaling_factor[0], hy * scaling_factor[1]
# Final bbox coordinates.
nx1, nx2 = nx - nhx/2, nx + nhx/2
ny1, ny2 = ny - nhy/2, ny + nhy/2
# Stacked coordinates.
bbox_scaled = np.asarray([nx1, ny1, nx2, ny2])
return bbox_scaled
class BboxHandler(object):
"""Class to handle bbox output from the inference script."""
def __init__(self, spec=None, **kwargs):
"""Setting up Bbox handler."""
self.spec = spec
self.cluster_params = Dict()
self.frame_height = kwargs.get('frame_height', 544)
self.frame_width = kwargs.get('frame_width', 960)
self.bbox_normalizer = kwargs.get('bbox_normalizer', 35)
self.bbox = kwargs.get('bbox', 'ltrb')
self.cluster_params = kwargs.get('cluster_params', None)
self.classwise_cluster_params = kwargs.get("classwise_cluster_params", None)
self.bbox_norm = (self.bbox_normalizer, )*2
self.stride = kwargs.get("stride", 16)
self.train_img_size = kwargs.get('train_img_size', None)
self.save_kitti = kwargs.get('save_kitti', True)
self.image_overlay = kwargs.get('image_overlay', True)
self.extract_crops = kwargs.get('extract_crops', True)
self.target_classes = kwargs.get('target_classes', None)
self.bbox_offset = kwargs.get("bbox_offset", 0.5)
self.clustering_criterion = kwargs.get("criterion", "IOU")
self.postproc_classes = kwargs.get('postproc_classes', self.target_classes)
confidence_threshold = {}
nms_confidence_threshold = {}
for key, value in list(self.classwise_cluster_params.items()):
confidence_threshold[key] = value.clustering_config.dbscan_confidence_threshold
if value.clustering_config.nms_confidence_threshold:
nms_confidence_threshold[key] = value.clustering_config.nms_confidence_threshold
self.state = Dict({
'scales': scales,
'display_classes': self.target_classes,
'min_height': 0,
'criterion': criterion,
'confidence_th': {'car': 0.9, 'person': 0.1, 'truck': 0.1},
'nms_confidence_th': {'car': 0.9, 'person': 0.1, 'truck': 0.1},
'cluster_weights': (1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
})
self.framework = kwargs.get("framework", "tlt")
self.state.confidence_th = confidence_threshold
self.state.nms_confidence_th = nms_confidence_threshold
def bbox_preprocessing(self, input_cluster):
"""Function to perform inplace manipulation of prediction dicts before clustering.
Args:
input_cluster (Dict): prediction dictionary of output cov and bbox per class.
Returns:
input_cluster (Dict): shape manipulated prediction dictionary.
"""
for classes in self.target_classes:
input_cluster[classes]['bbox'] = self.abs_bbox_converter(input_cluster[classes]
['bbox'])
# Stack predictions
for keys in list(input_cluster[classes].keys()):
if 'bbox' in keys:
input_cluster[classes][keys] = \
input_cluster[classes][keys][np.newaxis, :, :, :, :]
input_cluster[classes][keys] = \
np.asarray(input_cluster[classes][keys]).transpose((1, 2, 3, 4, 0))
elif 'cov' in keys:
input_cluster[classes][keys] = input_cluster[classes][keys][np.newaxis,
np.newaxis,
:, :, :]
input_cluster[classes][keys] = \
np.asarray(input_cluster[classes][keys]).transpose((2, 1, 3, 4, 0))
return input_cluster
def abs_bbox_converter(self, bbox):
'''Convert the raw grid cell corrdinates to image space coordinates.
Args:
bbox (np.array): BBox coordinates blob per batch with shape [n, 4, h, w].
Returns:
bbox (np.array): BBox coordinates reconstructed from grid cell based coordinates
with the same dimensions.
'''
target_shape = bbox.shape[-2:]
# Define grid cell centers
gc_centers = [(np.arange(s) * self.stride + self.bbox_offset) for s in target_shape]
gc_centers = [s / n for s, n in zip(gc_centers, self.bbox_norm)]
# Mapping cluster output
if self.bbox == 'arxy':
assert not self.train_img_size, \
"ARXY bbox format needs same train and inference image shapes."
# reverse mapping of abs bbox to arxy
area = (bbox[:, 0, :, :] / 10.) ** 2.
width = np.sqrt(area * bbox[:, 1, :, :])
height = np.sqrt(area / bbox[:, 1, :, :])
cen_x = width * bbox[:, 2, :, :] + gc_centers[0][:, np.newaxis]
cen_y = height * bbox[:, 3, :, :] + gc_centers[1]
bbox[:, 0, :, :] = cen_x - width / 2.
bbox[:, 1, :, :] = cen_y - height / 2.
bbox[:, 2, :, :] = cen_x + width / 2.
bbox[:, 3, :, :] = cen_y + height / 2.
bbox[:, 0, :, :] *= self.bbox_norm[0]
bbox[:, 1, :, :] *= self.bbox_norm[1]
bbox[:, 2, :, :] *= self.bbox_norm[0]
bbox[:, 3, :, :] *= self.bbox_norm[1]
elif self.bbox == 'ltrb':
# Convert relative LTRB bboxes to absolute bboxes inplace.
# Input bbox in format (image, bbox_value,
# grid_cell_x, grid_cell_y).
# Ouput bboxes given in pixel coordinates in the source resolution.
if not self.train_img_size:
self.train_img_size = self.bbox_norm
# Compute scalers that allow using different resolution in
# inference and training
scale_w = self.bbox_norm[0] / self.train_img_size[0]
scale_h = self.bbox_norm[1] / self.train_img_size[1]
bbox[:, 0, :, :] -= gc_centers[0][:, np.newaxis] * scale_w
bbox[:, 1, :, :] -= gc_centers[1] * scale_h
bbox[:, 2, :, :] += gc_centers[0][:, np.newaxis] * scale_w
bbox[:, 3, :, :] += gc_centers[1] * scale_h
bbox[:, 0, :, :] *= -self.train_img_size[0]
bbox[:, 1, :, :] *= -self.train_img_size[1]
bbox[:, 2, :, :] *= self.train_img_size[0]
bbox[:, 3, :, :] *= self.train_img_size[1]
return bbox
def cluster_detections(self, preds):
"""
Cluster detections and filter based on confidence.
Also determines false positives and missed detections based on the
clustered detections.
Args:
- spec: The experiment spec
- preds: Raw predictions, a Dict of Dicts
- state: The DetectNet_v2 viz state
Returns:
- classwise_detections (NamedTuple): DBSCan clustered detections.
"""
# Cluster
classwise_detections = Dict()
clustering_time = 0.
for object_type in preds:
start_time = time()
if object_type not in list(self.classwise_cluster_params.keys()):
logger.info("Object type {} not defined in cluster file. Falling back to default"
"values".format(object_type))
buffer_type = "default"
if buffer_type not in list(self.classwise_cluster_params.keys()):
raise ValueError("If the class-wise cluster params for an object isn't "
"there then please mention a default class.")
else:
buffer_type = object_type
logger.debug("Clustering bboxes {}".format(buffer_type))
classwise_params = self.classwise_cluster_params[buffer_type]
clustering_config = classwise_params.clustering_config
clustering_algorithm = CLUSTERING_ALGORITHM[clustering_config.clustering_algorithm]
nms_iou_threshold = 0.3
if clustering_config.nms_iou_threshold:
nms_iou_threshold = clustering_config.nms_iou_threshold
confidence_threshold = self.state.confidence_th.get(buffer_type, 0.1)
nms_confidence_threshold = self.state.nms_confidence_th.get(buffer_type, 0.1)
detections = cluster_bboxes(preds[object_type],
criterion=self.clustering_criterion,
eps=classwise_params.clustering_config.dbscan_eps + 1e-12,
min_samples=clustering_config.dbscan_min_samples,
min_weight=clustering_config.coverage_threshold,
min_height=clustering_config.minimum_bounding_box_height,
confidence_model=classwise_params.confidence_model,
cluster_weights=self.state.cluster_weights,
image_size=(self.frame_width, self.frame_height),
framework=self.framework,
confidence_threshold=confidence_threshold,
clustering_algorithm=clustering_algorithm,
nms_iou_threshold=nms_iou_threshold,
nms_confidence_threshold=nms_confidence_threshold)
clustering_time += (time() - start_time) / len(preds)
classwise_detections[object_type] = detections
return classwise_detections
def render_outputs(self, _classwise_detections, pil_list,
output_image_root, output_label_root,
chunk_list, resized_size, linewidth=2):
"""Overlay primary detections on original image.
Args:
class_wise_detections (list): classwise detections outputs from network
handler
pil_input (PIL object): PIL object (image) on which detector was inferenced
scaling factor (float): input/models image size ratio to reconstruct labels
back to image coordinates
output_image_root (str): Output image directory where the images are
saved after rendering
output_label_root (str): Path to the output directory where the labels
are saved after rendering.
image_name (str): Name of the current image.
idx (int): batchwise inferencing image id in the batch
linewidth (int): thickness of bbox lines in pixels
Returns:
processed_image (pil_object): Detections overlain pil object
"""
if self.image_overlay:
if not os.path.exists(output_image_root):
os.makedirs(output_image_root)
if self.save_kitti:
if not os.path.exists(output_label_root):
os.makedirs(output_label_root)
if len(pil_list) != len(chunk_list):
raise ValueError("Cannot render a chunk with unequal number of images and image_names.")
# Setting up picklable arguments.
input_tuples = [(i, pil_list[i], chunk_list[i]) for i in range(len(pil_list))]
# Unpacking cluster params.
box_color = {}
output_map = {}
confidence_model = {}
for key in list(self.classwise_cluster_params.keys()):
confidence_model[key] = None
if self.classwise_cluster_params[key].confidence_model:
confidence_model[key] = self.classwise_cluster_params[key].confidence_model
output_map[key] = None
if self.classwise_cluster_params[key].output_map:
output_map[key] = self.classwise_cluster_params[key].output_map
box_color[key] = (0, 255, 0)
if self.classwise_cluster_params[key].bbox_color:
color = self.classwise_cluster_params[key].bbox_color
box_color[key] = (color.R, color.G, color.B)
# Running rendering across mulitple threads
with pool_context() as pool:
pool.map(partial(render_single_image_output,
target_classes=list(self.postproc_classes),
image_overlay=self.image_overlay,
save_kitti=self.save_kitti,
output_image_root=output_image_root,
output_label_root=output_label_root,
class_wise_detections=_classwise_detections,
linewidth=linewidth,
resized_size=resized_size,
confidence_model=confidence_model,
box_color=box_color,
output_map=output_map,
frame_height=self.frame_height,
frame_width=self.frame_width), input_tuples)
def extract_bboxes(self, class_wise_detections, pil_input, scaling_factor, idx=0):
'''Extract sub images of primary detections from primary image.
Args:
class_wise_detections (list): classwise detections outputs from network
handler.
pil_input (Pillow object): PIL object for input image from which crops are extracted.
scaling factor (float): input/models image size ratio to reconstruct labels
back to image coordinates
idx (int): batchwise inferencing image id in the batch
Returns:
crop_list (list): list of pil objects corresponding to crops of primary
detections
'''
crops = {}
for keys in self.postproc_classes:
key = str(keys)
bbox_list = []
for i in range(len(class_wise_detections[key][idx])):
bbox_list.append(_scale_bbox(class_wise_detections[key][idx][i].bbox,
scaling_factor,
self.frame_height,
self.frame_width))
crop_list = []
if bbox_list:
for box in range(len(bbox_list)):
x1 = float(bbox_list[box][0])
y1 = float(bbox_list[box][1])
x2 = float(bbox_list[box][2])
y2 = float(bbox_list[box][3])
crop = pil_input.crop((x1, y1, x2, y2))
crop_list.append(crop)
crops[key] = crop_list
return crops
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/postprocessor/bbox_handler.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Detection postprocessing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pytest
from nvidia_tao_tf1.cv.detectnet_v2.postprocessor.detection import Detection
from nvidia_tao_tf1.cv.detectnet_v2.postprocessor.postprocessing import _filter_by_confidence
from nvidia_tao_tf1.cv.detectnet_v2.postprocessor.postprocessing import _patch_detections
from nvidia_tao_tf1.cv.detectnet_v2.postprocessor.postprocessing import PostProcessor
test_detections = [[
Detection(
class_name='car',
bbox=[0., 0., 16., 16.],
confidence=50.0,
bbox_variance=0.,
num_raw_bboxes=1),
Detection(
class_name='pedestrian',
bbox=[16., 16., 32., 32.],
confidence=50.0,
bbox_variance=0.,
num_raw_bboxes=1)
]]
def test_patch_detections():
"""Test _patch_detections."""
confidences = np.array([[0.5], [0.5]])
updated_detections = _patch_detections(test_detections, confidences)
assert updated_detections[0][0].confidence == confidences[0][0]
assert updated_detections[0][1].confidence == confidences[1][0]
def _mocked_cluster_predictions(batch_predictions, clustering_config):
"""Mocked cluster_predictions."""
return {'car': [[batch_predictions[0][0]]], 'pedestrian': [[batch_predictions[0][1]]]}
def test_filter_detections():
"""Test _filter_by_confidence."""
# Generate random data for testing.
test_detections = [[Detection(class_name='car', bbox=[0., 0., 16., 16.], confidence=0.8,
bbox_variance=0., num_raw_bboxes=1),
Detection(class_name='car', bbox=[0., 0., 12., 12.], confidence=0.2,
bbox_variance=0., num_raw_bboxes=1)],
[Detection(class_name='car', bbox=[0., 0., 10., 10.], confidence=0.7,
bbox_variance=0., num_raw_bboxes=1)]]
expected_filtered_detections = [[test_detections[0][0]], test_detections[1]]
filtered_detections = _filter_by_confidence(test_detections, confidence_threshold=0.5)
np.testing.assert_equal(filtered_detections, expected_filtered_detections)
@pytest.fixture(scope='function')
def target_class_names():
return ['car', 'pedestrian']
@pytest.fixture(scope='function')
def postprocessor(mocker, target_class_names):
"""Define a PostProcessor object."""
# Mock clustering.
mocker.patch("nvidia_tao_tf1.cv.detectnet_v2.postprocessor.postprocessing.cluster_predictions",
_mocked_cluster_predictions)
# Mock confidence config.
mock_confidence_config = mocker.MagicMock(confidence_threshold=0.3)
image_size = (32., 32.)
mock_postprocessing_config = \
dict.fromkeys(target_class_names,
mocker.MagicMock(confidence_config=mock_confidence_config))
postprocessor = PostProcessor(
postprocessing_config=mock_postprocessing_config,
confidence_models=None,
image_size=image_size)
return postprocessor
def test_postprocessor(mocker, postprocessor, target_class_names):
"""Test the different steps in the postprocessing pipeline."""
clustered_detections = postprocessor.cluster_predictions(test_detections)
assert clustered_detections['car'][0][0] == test_detections[0][0]
assert clustered_detections['pedestrian'][0][0] == test_detections[0][1]
def test_postprocess_predictions(mocker, postprocessor, target_class_names):
"""Test that a the single postprocess_predictions() call applies all expected steps.
The end result should be the same as that of <clustered_detections_with_confidence> in the
test_processor() function.
"""
final_detections = postprocessor.postprocess_predictions(
predictions=test_detections,
target_class_names=target_class_names,
session=mocker.MagicMock()) # Not required because of the patch.
assert final_detections['car'][0][0].confidence == 50.0
assert final_detections['pedestrian'][0][0].confidence == 50.0
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/postprocessor/tests/test_postprocessing.py |
"""Tests for bbox clustering using nms."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
from google.protobuf.text_format import Merge as merge_text_proto
import numpy as np
import pytest
from nvidia_tao_tf1.cv.detectnet_v2.postprocessor.utilities import (
cluster_with_hybrid,
cluster_with_nms,
setup_dbscan_object
)
from nvidia_tao_tf1.cv.detectnet_v2.proto.inference_pb2 import BboxHandlerConfig
bbox_handler_config = """
kitti_dump: true
disable_overlay: false
overlay_linewidth: 2
classwise_bbox_handler_config{
key: "person"
value: {
confidence_model: "aggregate_cov"
output_map: "person"
bbox_color{
R: 0
G: 255
B: 0
}
clustering_config{
clustering_algorithm: HYBRID
nms_iou_threshold: 0.3
nms_confidence_threshold: 0.2
coverage_threshold: 0.005
dbscan_confidence_threshold: 0.9
dbscan_eps: 0.3
dbscan_min_samples: 1
minimum_bounding_box_height: 4
}
}
}
"""
TEST_CLASS = "person"
cluster_weights = (1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
def traverse_up(file_path, num_levels=3):
"""Traverse root up by num_levels.
Args:
file_path (str): Source path to the file.
num_levels (int): Number of levels to traverse up.
Returns:
file_path (str): Updated path moved up by num_levels.
"""
for _ in range(num_levels):
file_path = os.path.dirname(file_path)
return file_path
detectnet_root = traverse_up(os.path.realpath(__file__))
test_fixture_root = os.path.join(
detectnet_root,
"postprocessor/tests/test_fixtures"
)
labels_dbscan_candidates = os.path.join(
test_fixture_root,
"labels_dbscan_cluster_candidates.txt"
)
labels_nms_output = os.path.join(
test_fixture_root,
"labels_nms_output.txt"
)
labels_raw = os.path.join(
test_fixture_root,
"labels_raw.txt"
)
def read_kitti_labels(label_file):
"""Parse kitti label files.
Args:
label_path (str): Path to the kitti label string.
Returns:
label_data (dict): Dictionary of classwise boxes and covs.
"""
label_list = []
if not os.path.exists(label_file):
raise ValueError("Labelfile : {} does not exist".format(label_file))
with open(label_file, 'r') as lf:
for row in csv.reader(lf, delimiter=' '):
label_list.append(row)
lf.closed
return label_list
def generate_test_fixture(label_list):
"""Generate a test fixture from kitti labels.
Args:
label_list (list): List of parsed kitti labels.
Returns:
dict: bboxes and coverages formatted for the output.
"""
bboxes = []
coverages = []
for obj in label_list:
if obj[0].lower() == TEST_CLASS:
bboxes.append([float(coord) for coord in obj[4:8]])
coverages.append(float(obj[-1]))
bboxes = np.asarray(bboxes, dtype=np.float32)
coverages = np.asarray(coverages, dtype=np.float32)
return {"bboxes": bboxes, "coverages": coverages}
def load_bbox_handler_config(proto_string):
"""Read bbox handler prototxt."""
bbox_handler_proto = BboxHandlerConfig()
merge_text_proto(proto_string, bbox_handler_proto)
return bbox_handler_proto
test_case_1 = {
"raw_predictions": generate_test_fixture(read_kitti_labels(labels_raw)),
"dbscan_candidates": generate_test_fixture(read_kitti_labels(labels_dbscan_candidates)),
"nms_outputs": generate_test_fixture(read_kitti_labels(labels_nms_output)),
"bbox_handler_spec": load_bbox_handler_config(bbox_handler_config)
}
test_data = [(test_case_1)]
@pytest.mark.parametrize(
"test_fixt",
test_data,
)
def test_dbscan_nms_hybrid(test_fixt):
"""Test hybrid clustering algorithm for detectnet inferences.
Args:
test_fixt (tuple): Tuple containing a dictionary of test cases.
Returns:
No explicit returns.
"""
# Extract the text fixtures.
b_config = test_fixt["bbox_handler_spec"]
raw_predictions = test_fixt["raw_predictions"]
dbscan_detections = test_fixt["dbscan_candidates"]
classwise_bbox_handler_config = dict(b_config.classwise_bbox_handler_config)
clustering_config = classwise_bbox_handler_config[TEST_CLASS].clustering_config
confidence_model = classwise_bbox_handler_config[TEST_CLASS].confidence_model
eps = clustering_config.dbscan_eps
min_samples = clustering_config.dbscan_min_samples
criterion = "IOU"
# Setup dbscan clustering object.
db = setup_dbscan_object(
eps,
min_samples,
criterion
)
# Cluster bboxes using hybrid clustering.
clustered_detections = cluster_with_hybrid(
bboxes=raw_predictions["bboxes"],
covs=raw_predictions["coverages"],
criterion="IOU",
db=db,
confidence_model=confidence_model,
cluster_weights=cluster_weights,
min_height=clustering_config.minimum_bounding_box_height,
nms_iou_threshold=clustering_config.nms_iou_threshold,
confidence_threshold=clustering_config.dbscan_confidence_threshold,
nms_confidence_threshold=clustering_config.nms_confidence_threshold
)
# Cluster dbscan candidates using NMS.
nms_clustered_boxes_per_image = cluster_with_nms(
dbscan_detections["bboxes"],
dbscan_detections["coverages"],
clustering_config.minimum_bounding_box_height,
nms_iou_threshold=clustering_config.nms_iou_threshold,
threshold=clustering_config.nms_confidence_threshold
)
# Check the number of bboxes output from the nms output
assert len(clustered_detections) == len(test_fixt["nms_outputs"]["bboxes"])
assert len(nms_clustered_boxes_per_image) == len(test_fixt["nms_outputs"]["bboxes"])
output_bboxes = []
for detection in clustered_detections:
output_bboxes.append(detection.bbox)
output_bboxes = np.asarray(output_bboxes).astype(np.float32)
assert np.array_equal(output_bboxes, test_fixt["nms_outputs"]["bboxes"])
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/postprocessor/tests/test_hybrid_clustering.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test ClusteringConfig builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from google.protobuf.text_format import Merge as merge_text_proto
import pytest
from nvidia_tao_tf1.cv.detectnet_v2.postprocessor.clustering_config import build_clustering_config
from nvidia_tao_tf1.cv.detectnet_v2.postprocessor.clustering_config import ClusteringConfig
from nvidia_tao_tf1.cv.detectnet_v2.proto.experiment_pb2 import Experiment
@pytest.fixture(scope='function')
def experiment_proto():
experiment_proto = Experiment()
prototxt = """
postprocessing_config {
target_class_config {
key: "car"
value: {
clustering_config {
coverage_threshold: 0.5
dbscan_eps: 0.125
dbscan_min_samples: 1
minimum_bounding_box_height: 4
clustering_algorithm: DBSCAN
}
}
}
target_class_config {
key: "pedestrian"
value: {
clustering_config {
coverage_threshold: 0.25
minimum_bounding_box_height: 2
nms_iou_threshold: 0.40
clustering_algorithm: NMS
}
}
}
}
"""
merge_text_proto(prototxt, experiment_proto)
return experiment_proto
def test_build_clustering_config(experiment_proto):
"""Test that clustering_config gets parsed correctly."""
clustering_config = build_clustering_config(experiment_proto.postprocessing_config.
target_class_config['car'].clustering_config)
assert clustering_config.coverage_threshold == 0.5
assert clustering_config.dbscan_eps == 0.125
assert clustering_config.dbscan_min_samples == 1
assert clustering_config.minimum_bounding_box_height == 4
assert clustering_config.clustering_algorithm == "dbscan"
clustering_config = build_clustering_config(experiment_proto.postprocessing_config.
target_class_config['pedestrian'].clustering_config)
assert clustering_config.coverage_threshold == 0.25
assert clustering_config.minimum_bounding_box_height == 2
assert clustering_config.clustering_algorithm == "nms"
assert clustering_config.nms_iou_threshold
def test_clustering_config_limits():
"""Test that ClusteringConfig constructor raises correct errors."""
# Invalid coverage_threshold.
with pytest.raises(ValueError):
ClusteringConfig(2.0, 0.5, 0.5, 1, 0, 0.4, 0.1, 0.2)
# Invalid dbscan_eps.
with pytest.raises(ValueError):
ClusteringConfig(0.5, 2.0, 0.5, 1, 0, 0.2, 0.1, 0.2)
# Invalid dbscan_min_samples.
with pytest.raises(ValueError):
ClusteringConfig(0.5, 0.5, -1.0, 1, 0, 0.2, 0.1, 0.2)
# Invalid minimum_bounding_box_height.
with pytest.raises(ValueError):
ClusteringConfig(0.5, 0.5, 0.5, -1, 0, 0.2, 0.1, 0.2)
with pytest.raises(ValueError):
ClusteringConfig(0.5, 0.5, -1.0, -1, 1, 1.5, 0.1, 0.2)
with pytest.raises(NotImplementedError):
ClusteringConfig(0.5, 0.5, 0.75, 4, 2, 0.5, 0.1, 0.2)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/postprocessor/tests/test_build_clustering_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test PostProcessingConfig builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from google.protobuf.text_format import Merge as merge_text_proto
import pytest
from nvidia_tao_tf1.cv.detectnet_v2.postprocessor.postprocessing_config import (
build_postprocessing_config
)
from nvidia_tao_tf1.cv.detectnet_v2.proto.experiment_pb2 import Experiment
@pytest.fixture(scope='function')
def experiment_proto():
experiment_proto = Experiment()
prototxt = """
postprocessing_config {
target_class_config {
key: "car"
value: {
clustering_config {
coverage_threshold: 0.5
dbscan_eps: 0.125
dbscan_min_samples: 1
minimum_bounding_box_height: 4
clustering_algorithm: DBSCAN
}
confidence_config {
confidence_threshold: 0.75
confidence_model_filename: "car_mlp.hdf5"
}
}
}
target_class_config {
key: "pedestrian"
value: {
clustering_config {
coverage_threshold: 0.25
dbscan_eps: 0.25
dbscan_min_samples: 1
minimum_bounding_box_height: 2
clustering_algorithm: DBSCAN
}
confidence_config {
confidence_threshold: 0.5
confidence_model_filename: "pedestrian_mlp.hdf5"
}
}
}
}
"""
merge_text_proto(prototxt, experiment_proto)
return experiment_proto
def test_build_postprocessing_config(experiment_proto):
"""Test that postprocessing_config gets parsed correctly."""
postprocessing_config = build_postprocessing_config(experiment_proto.postprocessing_config)
assert 'car' in postprocessing_config
assert 'pedestrian' in postprocessing_config
assert len(postprocessing_config) == 2
assert postprocessing_config['car'].clustering_config.coverage_threshold == 0.5
assert postprocessing_config['car'].clustering_config.dbscan_eps == 0.125
assert postprocessing_config['car'].clustering_config.dbscan_min_samples == 1
assert postprocessing_config['car'].clustering_config.minimum_bounding_box_height == 4
assert postprocessing_config['car'].clustering_config.clustering_algorithm == "dbscan"
assert postprocessing_config['car'].confidence_config.confidence_threshold == 0.75
assert postprocessing_config['car'].confidence_config.confidence_model_filename == \
"car_mlp.hdf5"
assert postprocessing_config['pedestrian'].clustering_config.coverage_threshold == 0.25
assert postprocessing_config['pedestrian'].clustering_config.dbscan_eps == 0.25
assert postprocessing_config['pedestrian'].clustering_config.dbscan_min_samples == 1
assert postprocessing_config['pedestrian'].clustering_config.clustering_algorithm == "dbscan"
assert postprocessing_config['pedestrian'].clustering_config.minimum_bounding_box_height == 2
assert postprocessing_config['pedestrian'].confidence_config.confidence_threshold == 0.5
assert postprocessing_config['pedestrian'].confidence_config.confidence_model_filename == \
"pedestrian_mlp.hdf5"
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/postprocessor/tests/test_build_postprocessing_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for bbox clustering using nms."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pytest
from nvidia_tao_tf1.cv.detectnet_v2.postprocessor.utilities import get_keep_indices
# Defining test inputs.
raw_detections = \
np.asarray([[1016.668, 156.726, 1271.648, 492.481, 0.010],
[1023.230, 158.498, 1270.281, 492.082, 0.016],
[1014.993, 156.877, 1265.633, 492.768, 0.010],
[1029.288, 153.725, 1271.930, 482.015, 0.008],
[1026.280, 156.922, 1270.562, 494.780, 0.255],
[1026.861, 158.151, 1270.836, 494.381, 0.394],
[1026.451, 158.498, 1270.016, 493.303, 0.443],
[1026.041, 159.591, 1268.375, 495.754, 0.211],
[1023.136, 156.885, 1270.836, 496.033, 0.058],
[1031.244, 153.453, 1270.570, 423.659, 0.033],
[1027.758, 156.786, 1269.203, 494.916, 0.358],
[1028.305, 157.608, 1268.793, 496.009, 0.623],
[1027.484, 157.819, 1267.016, 496.153, 0.737],
[1028.373, 157.691, 1266.742, 497.925, 0.491],
[1027.758, 155.935, 1265.785, 496.576, 0.155],
[1030.705, 152.733, 1270.783, 370.858, 0.006],
[1032.277, 155.760, 1270.646, 380.230, 0.132],
[1029.543, 157.464, 1270.373, 417.559, 0.204],
[1029.475, 157.879, 1270.168, 497.502, 0.436],
[1030.568, 158.226, 1268.186, 498.867, 0.523],
[1029.885, 159.456, 1267.707, 502.946, 0.304],
[1027.150, 156.478, 1267.434, 507.840, 0.122],
[1031.465, 150.807, 1272.773, 380.366, 0.025],
[1030.781, 157.397, 1271.133, 382.952, 0.074],
[1031.602, 157.676, 1269.902, 411.189, 0.058],
[1030.508, 157.819, 1268.809, 473.217, 0.048],
[1030.234, 158.234, 1270.176, 503.896, 0.026],
[1028.457, 157.699, 1284.668, 518.425, 0.010],
[1379.579, 244.472, 1634.320, 488.824, 0.046],
[1379.477, 244.955, 1635.141, 491.411, 0.199],
[1379.306, 245.099, 1633.227, 491.011, 0.218],
[1379.613, 245.785, 1634.594, 494.683, 0.012],
[1380.373, 243.548, 1632.414, 492.481, 0.015],
[1379.553, 244.676, 1642.258, 493.167, 0.330],
[1380.168, 244.684, 1638.156, 493.039, 0.691],
[1379.894, 244.895, 1638.976, 492.233, 0.615],
[1379.963, 246.056, 1638.430, 491.833, 0.156],
[1378.672, 243.379, 1638.506, 493.431, 0.015],
[1377.168, 244.676, 1640.830, 493.982, 0.357],
[1379.219, 245.159, 1642.607, 494.125, 0.719],
[1378.877, 244.895, 1642.197, 492.368, 0.740],
[1380.449, 245.378, 1641.855, 492.648, 0.197],
[1379.637, 245.897, 1643.504, 493.574, 0.157],
[1379.363, 246.448, 1642.205, 492.632, 0.360],
[1377.313, 246.591, 1642.684, 490.876, 0.399],
[1378.270, 249.042, 1644.461, 491.562, 0.090],
[1383.062, 248.280, 1651.578, 493.311, 0.014],
[1386.754, 249.170, 1648.434, 492.097, 0.012],
[1243.581, 492.780, 1489.641, 700.320, 0.023],
[1251.066, 495.808, 1486.086, 698.971, 0.127],
[1248.195, 495.612, 1483.625, 706.579, 0.088],
[1247.793, 493.154, 1487.188, 694.892, 0.129],
[1249.160, 497.369, 1486.367, 695.849, 0.360],
[1248.545, 498.191, 1485.547, 697.214, 0.378],
[1248.340, 497.384, 1487.188, 705.093, 0.081],
[1251.014, 496.784, 1486.922, 694.756, 0.106],
[1248.963, 498.183, 1486.785, 695.171, 0.344],
[1248.895, 499.751, 1486.717, 696.672, 0.380],
[1248.689, 499.691, 1488.016, 697.086, 0.082],
[1276.588, 500.550, 1485.699, 694.077, 0.007],
[1257.926, 501.711, 1487.750, 694.085, 0.080],
[1255.738, 502.398, 1488.023, 695.111, 0.106],
[1247.945, 502.405, 1487.682, 694.949, 0.007],
[1270.770, 781.954, 1573.875, 970.867, 0.088],
[1271.658, 785.015, 1575.789, 970.603, 0.170],
[1271.453, 788.416, 1580.164, 1025.168, 0.133],
[1265.582, 782.768, 1573.473, 971.817, 0.136],
[1270.162, 785.694, 1576.480, 970.603, 0.212],
[1273.512, 789.162, 1577.164, 969.593, 0.114],
[1271.195, 785.313, 1573.070, 968.424, 0.132],
[1273.930, 787.322, 1576.352, 966.803, 0.210],
[1275.570, 790.791, 1576.762, 966.472, 0.127],
[1320.695, 786.907, 1572.463, 967.610, 0.091],
[1325.344, 788.544, 1572.599, 965.853, 0.153],
[1312.629, 791.537, 1573.488, 965.861, 0.097],
[1322.207, 792.004, 1572.437, 963.003, 0.016]], dtype=np.float32)
# Defining GT outputs.
filterred_detections = \
np.asarray([[1378.877, 244.895, 1642.197, 492.368],
[1027.484, 157.819, 1267.016, 496.153],
[1248.895, 499.751, 1486.717, 696.672],
[1270.162, 785.694, 1576.480, 970.603]], dtype=np.float32)
# formatting test cases.
test_data = [(raw_detections, filterred_detections)]
def run_nms_function(*args, **kwargs):
"""Simple wrapper to set-up a py-fixture to test NMS."""
indices = get_keep_indices(*args, **kwargs)
return indices
@pytest.mark.parametrize(
"raw_detections, filterred_detections",
test_data,
)
def test_nms_clustering(raw_detections, filterred_detections):
"""Simple function to test the NMS clustering function."""
# defining nms constants.
min_height = 4
nms_iou_threshold = 0.3
threshold = 0.2
if raw_detections.size == 0:
raw_bboxes = np.asarray([])
raw_coverages = np.asarray([])
else:
raw_bboxes = raw_detections[:, :4]
raw_coverages = raw_detections[:, 4:].flatten()
indices = run_nms_function(raw_bboxes, raw_coverages, min_height, Nt=nms_iou_threshold,
threshold=threshold)
clustered_boxes = np.take_along_axis(raw_bboxes, indices, axis=0)
assert np.array_equal(clustered_boxes, filterred_detections)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/postprocessor/tests/test_nms_clustering.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for bbox clustering."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import numpy.testing as npt
import pytest
from six.moves import zip
from nvidia_tao_tf1.cv.detectnet_v2.postprocessor.cluster import cluster_predictions
from nvidia_tao_tf1.cv.detectnet_v2.postprocessor.cluster import mean_angle
from nvidia_tao_tf1.cv.detectnet_v2.postprocessor.clustering_config import ClusteringConfig
from nvidia_tao_tf1.cv.detectnet_v2.postprocessor.confidence_config import ConfidenceConfig
from nvidia_tao_tf1.cv.detectnet_v2.postprocessor.detection import Detection
from nvidia_tao_tf1.cv.detectnet_v2.postprocessor.postprocessing_config import PostProcessingConfig
class ClusteringTestCase:
def __init__(self, target_classes, raw_detections, postprocessing_config, outputs):
self.target_classes = target_classes
self.raw_detections = raw_detections
self.postprocessing_config = postprocessing_config
self.outputs = outputs
def create_default_case(shape=(64, 64)):
"""Create default test case to be modified."""
target_classes = ['car']
# (num_images, num_classes, num_outputs, grid_height, grid_width)
bboxes = np.zeros((1, 1, 4) + shape, dtype=np.float32)
cov = np.zeros((1, 1, 1) + shape, dtype=np.float32)
raw_detections = {
'bbox': bboxes,
'cov': cov
}
clustering_config = ClusteringConfig(
coverage_threshold=0.005,
dbscan_eps=0.15,
dbscan_min_samples=1,
minimum_bounding_box_height=4,
clustering_algorithm=0,
nms_iou_threshold=0.4,
dbscan_confidence_threshold=0.1,
nms_confidence_threshold=0.1)
confidence_config = ConfidenceConfig(confidence_model_filename=None,
confidence_threshold=0.0)
car_postprocessing_config = PostProcessingConfig(clustering_config, confidence_config)
postprocessing_config = {}
postprocessing_config['car'] = car_postprocessing_config
outputs = [[]]
default_test_case = ClusteringTestCase(target_classes, raw_detections,
postprocessing_config, outputs)
return default_test_case
# Test cases and ids (for pytest) are compiled into this lists
test_cases = [create_default_case()]
test_ids = ['empty_prediction']
# Test whether averaging of the bounding box coordinates is done right
case = create_default_case()
case.raw_detections['bbox'][0, 0, 0:2, 0:5, 0:5] = 0.
case.raw_detections['bbox'][0, 0, 2:4, 0:5, 0:5] = 16.
case.raw_detections['bbox'][0, 0, 0:2, 5:10, 5:10] = .1
case.raw_detections['bbox'][0, 0, 2:4, 5:10, 5:10] = 16.1
case.raw_detections['cov'][0, 0, 0, :24, :24] = 1
case.outputs = [[
Detection(
class_name='car',
bbox=[0.05, 0.05, 16.05, 16.05],
confidence=50.0,
bbox_variance=0.,
num_raw_bboxes=1)
]]
test_cases += [case]
test_ids += ['bbox_coordinate_averaging']
# Test whether additional outputs (depth) is clustered right
case = create_default_case()
case.raw_detections['bbox'][0, 0, 0:2, 0:5, 0:5] = 0.
case.raw_detections['bbox'][0, 0, 2:4, 0:5, 0:5] = 16.
case.raw_detections['bbox'][0, 0, 0:2, 5:10, 5:10] = .1
case.raw_detections['bbox'][0, 0, 2:4, 5:10, 5:10] = 16.1
case.raw_detections['depth'] = np.zeros_like(case.raw_detections['cov'])
case.raw_detections['depth'][0, 0, 0, 0:5, 0:5] = 10.0
case.raw_detections['depth'][0, 0, 0, 5:10, 5:10] = 20.0
case.raw_detections['cov'][0, 0, 0, :24, :24] = 1
case.outputs = [[
Detection(
class_name='car',
bbox=[0.05, 0.05, 16.05, 16.05],
confidence=50.0,
bbox_variance=0.,
num_raw_bboxes=1,
depth=15.0)
]]
test_cases += [case]
test_ids += ['depth_prediction_averaging']
# Test whether coverage_threshold filters grid cells with low coverage values.
case = create_default_case()
case.raw_detections['bbox'][0, 0, 2, :5, :5] = 16
case.raw_detections['bbox'][0, 0, 3, :5, :5] = 16
case.raw_detections['bbox'][0, 0, 0, 5:10, 5:10] = 16
case.raw_detections['bbox'][0, 0, 1, 5:10, 5:10] = 16
case.raw_detections['bbox'][0, 0, 2, 5:10, 5:10] = 32
case.raw_detections['bbox'][0, 0, 3, 5:10, 5:10] = 32
case.raw_detections['cov'][0, 0, 0, :5, :5] = 0.01
case.raw_detections['cov'][0, 0, 0, 5:10, 5:10] = 1
case.outputs = [[
Detection(
class_name='car',
bbox=[16, 16, 32, 32],
confidence=25.,
bbox_variance=0.,
num_raw_bboxes=1)
]]
case.postprocessing_config['car'].clustering_config.coverage_threshold = 0.1
test_cases += [case]
test_ids += ['coverage_thresholding']
# Test whether minimum_bounding_box_height works
case = create_default_case()
case.raw_detections['bbox'][0, 0, 2, :5, :5] = 5
case.raw_detections['bbox'][0, 0, 3, :5, :5] = 5
case.raw_detections['bbox'][0, 0, 0, 5:10, 5:10] = 5
case.raw_detections['bbox'][0, 0, 1, 5:10, 5:10] = 5
case.raw_detections['bbox'][0, 0, 2, 5:10, 5:10] = 15
case.raw_detections['bbox'][0, 0, 3, 5:10, 5:10] = 15
# Add one bbox which shouldn't be considered because it only has one sample
case.raw_detections['bbox'][0, 0, 0, 11, 11] = 15
case.raw_detections['bbox'][0, 0, 1, 11, 11] = 15
case.raw_detections['bbox'][0, 0, 2, 11, 11] = 25
case.raw_detections['bbox'][0, 0, 3, 11, 11] = 25
case.raw_detections['cov'][0, 0, 0, :11, :11] = 1
case.postprocessing_config['car'].clustering_config.minimum_bounding_box_height = 6
case.outputs = [[
Detection(
class_name='car',
bbox=[5, 5, 15, 15],
confidence=25.,
bbox_variance=0.,
num_raw_bboxes=1)
]]
test_cases += [case]
test_ids += ['minimum bounding box height']
# Test clustering of two classes
case = create_default_case()
case.target_classes = ['car', 'pedestrian']
case.raw_detections['cov'] = np.zeros((1, 2, 1, 64, 64))
case.raw_detections['cov'][0, 0, 0, :10, :10] = 1 # first object
case.raw_detections['cov'][0, 1, 0, 10:20, 10:20] = 1 # second
case.raw_detections['bbox'] = np.zeros((1, 2, 4, 64, 64))
case.raw_detections['bbox'][0, 0, 0:2, 0:5, 0:5] = 0.
case.raw_detections['bbox'][0, 0, 2:4, 0:5, 0:5] = 16.
case.raw_detections['bbox'][0, 0, 0:2, 5:10, 5:10] = 0.
case.raw_detections['bbox'][0, 0, 2:4, 5:10, 5:10] = 16.
case.raw_detections['bbox'][0, 1, 0:2, 10:15, 10:15] = 16.
case.raw_detections['bbox'][0, 1, 2:4, 10:15, 10:15] = 32.
case.raw_detections['bbox'][0, 1, 0:2, 15:20, 15:20] = 16.
case.raw_detections['bbox'][0, 1, 2:4, 15:20, 15:20] = 32.
case.outputs = [[
Detection(
class_name='car',
bbox=[0., 0., 16., 16.],
confidence=50.0,
bbox_variance=0.,
num_raw_bboxes=1),
Detection(
class_name='pedestrian',
bbox=[16., 16., 32., 32.],
confidence=50.0,
bbox_variance=0.,
num_raw_bboxes=1)
]]
# Add clustering parameters for the second class
pedestrian_clustering_config = ClusteringConfig(
coverage_threshold=0.005,
dbscan_eps=0.15,
dbscan_min_samples=1,
minimum_bounding_box_height=4,
clustering_algorithm=0,
nms_iou_threshold=None,
dbscan_confidence_threshold=0.1,
nms_confidence_threshold=0.1)
confidence_config = ConfidenceConfig(confidence_model_filename=None,
confidence_threshold=0.0)
pedestrian_postprocessing_config = PostProcessingConfig(pedestrian_clustering_config,
confidence_config)
case.postprocessing_config['pedestrian'] = pedestrian_postprocessing_config
test_cases += [case]
test_ids += ['two_bounding_boxes']
class TestClustering:
"""Test cluster_predictions."""
@pytest.mark.parametrize('case', test_cases, ids=test_ids)
def test_cluster_detections(self, case):
"""Cluster bboxes and test if they are clustered right."""
target_classes = case.target_classes
predictions = dict()
raw_detections = case.raw_detections
for target_class_idx, target_class in enumerate(target_classes):
predictions[target_class] = {}
for objective in raw_detections:
predictions[target_class][objective] = \
raw_detections[objective][:, target_class_idx, :]
clustered_detections = cluster_predictions(predictions, case.postprocessing_config)
# Loop all frames for each target class and the number of detections matches the
# number of expected detections and that bbox coordinates and confidences are the same.
for target_class in target_classes:
for frame_idx, frame_expected_detections in enumerate(case.outputs):
expected_detections = [detection for detection in frame_expected_detections if
detection.class_name == target_class]
detections = clustered_detections[target_class][frame_idx]
assert len(detections) == len(expected_detections)
for detection, expected_detection in zip(detections, expected_detections):
npt.assert_allclose(detection.bbox, expected_detection.bbox, atol=1e-5)
npt.assert_allclose(detection.confidence, expected_detection.confidence)
if expected_detection.depth is not None:
npt.assert_allclose(detection.depth, expected_detection.depth, atol=1e-5)
@pytest.mark.parametrize(
"angles,weights,expected_angle",
[(np.array([0.0, 1.0, 1.5]), None, 0.8513678), # None --> equal weighting.
(np.array([1.2, -0.5, -0.7]), np.array([0.1, 0.2, 0.3]), -0.41795065)
]
)
def test_mean_angle(angles, weights, expected_angle):
"""Test that the weighted average of angles is calculated properly.
Also checks that the periodicity of 2*pi is taken into account.
"""
# First, use given inputs.
calculated_angle = mean_angle(angles=angles, weights=weights)
assert np.allclose(calculated_angle, expected_angle)
# Now, force a periodic shift.
num_periods = np.random.randint(low=1, high=10)
sign = np.random.choice([-1., 1.])
shifted_angles = angles + sign * num_periods * 2. * np.pi
calculated_angle = mean_angle(angles=shifted_angles, weights=weights)
assert np.allclose(calculated_angle, expected_angle)
# Check that the scaling of weights does not matter.
if weights is not None:
# Choose a random scaling factor.
scale = np.random.uniform(low=0.2, high=5.0)
calculated_angle = mean_angle(angles=angles, weights=scale*weights)
assert np.allclose(calculated_angle, expected_angle)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/postprocessor/tests/test_clustering.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IVA checkpoint hook for tlt files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
from zipfile import ZipFile
from nvidia_tao_tf1.core.decorators import override, subclass
from nvidia_tao_tf1.encoding import encoding
import tensorflow as tf
from tensorflow.python.platform import tf_logging as logging
INFREQUENT_SUMMARY_KEY = b'infrequent_summary'
@subclass
class IVACheckpointSaverHook(tf.estimator.CheckpointSaverHook):
"""Saves time files only for every N steps or seconds."""
def __init__(self,
checkpoint_dir,
key=None,
save_secs=None,
save_steps=None,
saver=None,
checkpoint_basename="model.ckpt",
steps_per_epoch=None,
scaffold=None,
listeners=None):
"""Initialize an IVACheckpointSaverHook.
Args:
checkpoint_dir (str): Base directory for the checkpoint files.
key (str): The key to decode the model.
save_secs (int): Save every N secs.
save_steps (int): Save every N steps.
saver (Saver): Object used for saving.
checkpoint_basename (str): Base name for the checkpoint files.
scaffold (Scaffold): Use to get saver object.
listeners (list of CheckpointSaverListener): Subclass instances.
Used for callbacks that run immediately before or after this hook saves
the checkpoint.
Raises:
ValueError: One of `save_steps` or `save_secs` should be set.
ValueError: At most one of `saver` or `scaffold` should be set.
"""
# Initialize the parent class.
super(IVACheckpointSaverHook, self).__init__(checkpoint_dir,
save_secs=save_secs,
save_steps=save_steps,
saver=saver,
checkpoint_basename=checkpoint_basename,
scaffold=scaffold,
listeners=listeners)
self.key = key
self.steps_per_epoch = steps_per_epoch
@override
def _save(self, session, step):
"""Saves the latest checkpoint, returns should_stop."""
logging.info("Saving checkpoints for step-%d.", step)
# Saving the keras model.
for l in self._listeners:
l.before_save(session, step)
should_stop = False
# Setting up checkpoint saving.
self._save_encrypted_checkpoint(session, step)
for l in self._listeners:
if l.after_save(session, step):
logging.info(
"A CheckpointSaverListener requested that training be stopped. "
"listener: {}".format(l))
should_stop = True
return should_stop
def _save_encrypted_checkpoint(self, session, step):
"""Saves the encrypted checkpoint."""
# Get checkpoint saver and save to tempfile.
saver = self._get_saver()
temp_ckpt_path = tempfile.mkdtemp()
# Template for zip file.
epoch = int(step / self.steps_per_epoch)
ckzip_file = os.path.join(self._checkpoint_dir, 'model.epoch-{}.ckzip'.format(epoch))
# Saving session to the zip file.
saver.save(session, os.path.join(temp_ckpt_path, "model.ckpt"), global_step=epoch)
prev_dir = os.getcwd()
os.chdir(temp_ckpt_path)
# Zip the checkpoint files to one file.
with ZipFile(ckzip_file, 'w') as zip_object:
for ckpt_file in os.listdir(temp_ckpt_path):
zip_object.write(ckpt_file)
# Restore previous execution directory and remove tmp files/directories.
os.chdir(prev_dir)
shutil.rmtree(temp_ckpt_path)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/tfhooks/checkpoint_saver_hook.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Useful hooks to the tensorflow session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/tfhooks/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An early stopping hook that watches validation cost."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import tensorflow as tf
from nvidia_tao_tf1.core import distribution
from nvidia_tao_tf1.core.distribution.distribution import hvd
from nvidia_tao_tf1.core.utils import summary_from_value
from nvidia_tao_tf1.cv.detectnet_v2.tfhooks.validation_hook import ValidationHook
class LRAnnealingEarlyStoppingHook(ValidationHook):
"""Watch DetectNetv2 validation loss during training to stop training early.
This integrates with the soft-start annealing learning rate schedule as follows.
The learning rate is ramped up for num_soft_start_epochs from min_learning rate
to max_learning rate. Then, validation loss is computed every validation_period epochs.
If no improvement in loss is observable for num_patience_steps, the learning rate
is annealed back to min_learning_rate over num_annealing_epochs. Then, the validation
loss is monitored again, and after no improvement for num_patience_steps is observed,
training is stopped.
"""
def __init__(
self,
validation_period,
last_epoch,
steps_per_epoch,
results_dir,
first_validation_epoch,
num_validation_steps,
num_patience_steps,
max_learning_rate,
min_learning_rate,
num_soft_start_epochs,
num_annealing_epochs,
validation_cost=None,
):
"""Create a hook object for validating DetectNetv2 during training.
Args:
validation_period: How often (in epochs) the model is validated during training.
last_epoch: Last epoch of training.
steps_per_epoch: Number of steps per epoch.
results_dir: Directory for logging the validation results.
first_validation_epoch: The first validation epoch. Validation happens on epochs
first_validation_epoch + i * validation_period, i=0, ...
num_validation_steps: Number of steps for a single validation run.
num_patience_steps: Number of epochs we tolerate w/o validation loss improvement.
max_learning_rate: Maximum learning rate in the soft-start-annealing learning rate
schedule.
max_learning_rate: Minimum learning rate in the soft-start-annealing learning rate
schedule.
num_soft_start_epochs: Number of epochs over which we soft-start the learning rate.
num_annealing_epochs: Number of epochs over which we anneal the learning rate.
validation_cost (Tensor): Validation cost tensor.
"""
super(LRAnnealingEarlyStoppingHook, self).__init__(
None,
validation_period,
last_epoch,
steps_per_epoch,
results_dir,
first_validation_epoch,
)
if validation_period < 1:
raise ValueError("Early stopping hook requires validation_period >= 1")
if validation_period > num_patience_steps:
raise ValueError(
f"Validation period {validation_period} should be <= "
f"Number of patience steps {num_patience_steps}"
)
if first_validation_epoch < 0:
raise ValueError("Early stopping hook requires first_validation_epoch >= 0")
if min_learning_rate <= 0.0:
raise ValueError(
"Early stopping min_learning_rate must be > 0"
)
if max_learning_rate <= 0.0:
raise ValueError(
"Early stopping max_learning_rate must be > 0"
)
if num_soft_start_epochs < 0.0:
raise ValueError("Early stopping num_soft_start_epochs must be >= 0")
if num_annealing_epochs < 0.0:
raise ValueError(
"Early stopping num_annealing_epochs must be >= 0"
)
if num_patience_steps > last_epoch:
raise ValueError(
f"Number of patience steps {num_patience_steps} "
f"> last_epoch {last_epoch}"
)
self.num_validation_steps = num_validation_steps
self.num_patience_steps = num_patience_steps
self.validation_cost = validation_cost
self.max_learning_rate = max_learning_rate
self.min_learning_rate = min_learning_rate
self.soft_start_steps = int(num_soft_start_epochs * steps_per_epoch)
self.annealing_steps = int(num_annealing_epochs * steps_per_epoch)
self.global_step = tf.compat.v1.train.get_or_create_global_step()
self._session = None
# Learning rate variable.
self.learning_rate = None
# Smallest cost so far.
self._min_cost = None
# Epoch when we observed smallest cost.
self._min_cost_epoch = None
# Kill-flag to request stop training.
self._should_continue = None
# Starting step for current phase (soft-start, or anneal).
self._lr_phase_start_step = None
# Op to set the lr_phase_start_step to the current step.
self._set_lr_phase_start_step_op = None
# Step inside current phase.
self._lr_phase_step = None
# Flag indicating whether we are inside the annealing phase.
self._in_annealing_phase = None
# Op to broadcast state to workers.
self._broadcast_state_op = None
# Op to set flag for stopping training.
self._set_request_stop_op = None
# Op to set start_annealing.
self._set_start_annealing_op = None
# Initialize the variables above.
self._make_control_variables()
logging.info(
(
"Early stopping: first val. epoch {} , {} validation steps, {} patience steps, "
"{} soft-start steps, {} annealing steps, {} max_learning_rate, "
"{} min_learning_rate"
).format(
first_validation_epoch,
self.num_validation_steps,
self.num_patience_steps,
self.soft_start_steps,
self.annealing_steps,
self.max_learning_rate,
self.min_learning_rate,
)
)
def _make_control_variables(self):
"""Initialize internal TF control variables."""
with tf.compat.v1.name_scope("EarlyStopping"):
self._should_continue = tf.Variable(True, name="should_continue")
self._lr_phase_start_step = tf.Variable(0, dtype=tf.int64, name="lr_phase_start_step")
self._lr_phase_step = tf.cast(
tf.compat.v1.train.get_or_create_global_step() - self._lr_phase_start_step,
tf.float32
)
self._in_annealing_phase = tf.Variable(False, name="in_annealing_phase")
self._broadcast_state_op = tf.group(
self._should_continue.assign(
hvd().broadcast(
self._should_continue,
distribution.get_distributor()._master_rank,
)
),
self._in_annealing_phase.assign(
hvd().broadcast(
self._in_annealing_phase,
distribution.get_distributor()._master_rank,
)
),
self._lr_phase_start_step.assign(
hvd().broadcast(
self._lr_phase_start_step,
distribution.get_distributor()._master_rank,
)
),
)
self._set_request_stop_op = self._should_continue.assign(False)
self.learning_rate = get_variable_softstart_annealing_learning_rate(
self._lr_phase_step,
self.soft_start_steps,
self.annealing_steps,
self._in_annealing_phase,
self.max_learning_rate,
self.min_learning_rate,
)
self._set_lr_phase_start_step_op = self._lr_phase_start_step.assign(
tf.compat.v1.train.get_or_create_global_step()
)
self._set_start_annealing_op = self._in_annealing_phase.assign(True)
def _start_annealing(self):
"""Helper function to initiate annealing phase."""
self._session.run([self._set_lr_phase_start_step_op, self._set_start_annealing_op])
def after_create_session(self, session, coord):
"""Store session for later use."""
self._session = session
def broadcast_state(self):
"""Broadcast current state."""
self._session.run(self._broadcast_state_op)
def _compute_validation_cost(self):
"""Compute total validation cost using current session."""
total_cost = 0
for _ in range(self.num_validation_steps):
total_cost += self._session.run(self.validation_cost)
return total_cost / self.num_validation_steps
def _validate_master(self, run_context):
"""Run validation on master."""
current_epoch = self.epoch_counter
logging.info(
"Validation at epoch {}/{}".format(self.epoch_counter, self.last_epoch)
)
logging.info(
"Running {} steps to compute validation cost".format(
self.num_validation_steps
)
)
validation_cost = self._compute_validation_cost()
logging.info(
"Validation cost {} at epoch {}".format(validation_cost, current_epoch)
)
# Loss decreased.
if self._min_cost is None or self._min_cost > validation_cost:
self._min_cost = validation_cost
self._min_cost_epoch = current_epoch
logging.info(
"New best validation cost {} at epoch {}".format(
validation_cost, current_epoch
)
)
# Loss did not decrease and we exceeded patience.
elif current_epoch - self._min_cost_epoch >= self.num_patience_steps:
logging.info(
"Validation cost did not improve for {} epochs, which is >= "
"num_patience_steps {}.".format(
current_epoch - self._min_cost_epoch,
self.num_patience_steps
)
)
logging.info(
"Best cost {} at epoch {}. Current epoch {}".format(
self._min_cost, self._min_cost_epoch, current_epoch
)
)
annealing_started = self._session.run(self._in_annealing_phase)
annealing_finished = (
annealing_started
and self._session.run(self._lr_phase_step) > self.annealing_steps
)
# If we are after annealing phase, stop training.
if annealing_started and annealing_finished:
logging.info("Requesting to stop training.")
self._session.run(self._set_request_stop_op)
# If we are before annealing phase, start annealing.
elif not annealing_started:
logging.info(
"Starting to anneal learning rate. Setting new best validation cost to current."
)
self._start_annealing()
self._min_cost = validation_cost
self._min_cost_epoch = current_epoch
else:
logging.info(
"Last best validation cost {} at epoch {}".format(
self._min_cost, self._min_cost_epoch
)
)
summary = summary_from_value("validation_cost", validation_cost)
self.writer.add_summary(summary, current_epoch)
def validate(self, run_context):
"""Called at the end of each epoch to validate the model."""
if distribution.get_distributor().is_master():
self._validate_master(run_context)
# Broadcast new state.
self.broadcast_state()
if not self._session.run(self._should_continue):
logging.info("Requested to stop training.")
run_context.request_stop()
def get_variable_softstart_annealing_learning_rate(
lr_step, soft_start_steps, annealing_steps, start_annealing, base_lr, min_lr
):
"""Return learning rate at current epoch progress.
When start_annealing is False, ramp up learning rate from min_lr to base_lr on a logarithmic
scale. After soft_start_steps learning rate will reach base_lr and be kept there until
start_annealing becomes True. Then, learning rate is decreased from base_lr to min_lr,
again on a logarithmic scale until it reaches min_lr, where it is kept for the rest
of training.
Note: start_annealing should not be set to True before soft_star_steps of warming up to
base_lr, since the annealing phase will always start at base_lr.
Args:
lr_step (tf.Variable): Step number inside the current phase (soft-start, or annealing).
soft_start_steps (int): Number of soft-start steps.
annealing_steps (int): Number of annealing steps.
start_annealing (tf.Variable): Boolean variable indicating whether we are in
soft-start phase (False) or annealing phase (True).
base_lr (float): Maximum learning rate.
min_lr (float): Minimum learning rate.
Returns:
lr: A tensor (scalar float) indicating the learning rate.
"""
# Need this as float32.
lr_step = tf.cast(lr_step, tf.float32)
# Ratio in soft-start phase, going from 0 to 1.
if soft_start_steps > 0:
t_softstart = lr_step / soft_start_steps
else: # Learning rate starts from base_lr.
t_softstart = tf.constant(1.0, dtype=tf.float32)
if annealing_steps > 0:
# Ratio in annealing phase, going from 1 to 0.
t_annealing = 1.0 - lr_step / annealing_steps
else: # Learning rate is never annealed.
t_annealing = tf.constant(1.0, dtype=tf.float32)
# Ratio is at least 0, even if we do more thatn annealing_steps.
t_annealing = tf.compat.v1.where(
t_annealing < 0.0, tf.constant(0.0, dtype=tf.float32), t_annealing
)
# Select appropriate schedule.
t = tf.compat.v1.where(start_annealing, t_annealing, t_softstart)
# Limit ratio to max 1.0.
t = tf.compat.v1.where(t > 1.0, tf.constant(1.0, dtype=tf.float32), t)
# Adapt learning rate linearly on log scale between min_lr and base_lr.
lr = tf.exp(tf.math.log(min_lr) + t * (tf.math.log(base_lr) - tf.math.log(min_lr)))
return tf.cast(lr, tf.float32)
def build_early_stopping_hook(
evaluation_config,
steps_per_epoch,
results_dir,
num_validation_steps,
experiment_spec,
validation_cost
):
"""Builder function to create early stopping hook.
Args:
evaluation_config (nvidia_tao_tf1.cv.detectnet_v2.evaluation.EvaluationConfig):
Configuration for evaluation.
steps_per_epoch (int): Total number of training steps per epoch.
results_dir (str): Where to store results and write TensorBoard summaries.
num_validation_steps (int): Number of steps needed for validation.
experiment_spec (nvidia_tao_tf1.cv.detectnet_v2.proto.experiment_pb2):
Experiment spec message.
validation_cost (Tensor): Validation cost tensor. Can be
None for workers, since validation cost is only computed on master.
Returns:
learning_rate: Learning rate schedule created.
"""
learning_rate_config = experiment_spec.training_config.learning_rate
if not learning_rate_config.HasField("early_stopping_annealing_schedule"):
raise ValueError("Early stopping hook is missing "
"learning_rate_config.early_stopping_annealing_schedule")
params = learning_rate_config.early_stopping_annealing_schedule
num_epochs = experiment_spec.training_config.num_epochs
return LRAnnealingEarlyStoppingHook(
validation_period=evaluation_config.validation_period_during_training,
last_epoch=num_epochs,
steps_per_epoch=steps_per_epoch,
results_dir=results_dir,
first_validation_epoch=evaluation_config.first_validation_epoch,
num_validation_steps=num_validation_steps,
num_patience_steps=params.patience_steps,
max_learning_rate=params.max_learning_rate,
min_learning_rate=params.min_learning_rate,
num_soft_start_epochs=params.soft_start_epochs,
num_annealing_epochs=params.annealing_epochs,
validation_cost=validation_cost
)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/tfhooks/early_stopping_hook.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DetectNet_v2 setting up hooks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from nvidia_tao_tf1.core import distribution
import nvidia_tao_tf1.core.hooks
from nvidia_tao_tf1.core.hooks.validation_hook import ValidationHook
from nvidia_tao_tf1.cv.detectnet_v2.tfhooks.checkpoint_saver_hook import IVACheckpointSaverHook
INFREQUENT_SUMMARY_KEY = 'infrequent_summary'
def get_common_training_hooks(log_tensors, log_every_n_secs, checkpoint_n_steps, model, last_step,
checkpoint_dir, scaffold, summary_every_n_steps,
infrequent_summary_every_n_steps,
steps_per_epoch=None, validation_every_n_steps=None,
evaluator=None, model_store_config=None, listeners=None,
max_ckpt_to_keep=5, key=None):
"""Set up commonly used hooks for tensorflow training sessions.
Args:
log_tensors (dict): A dictionary of tensors to print to stdout. The keys of the dict should
be strings, and the values should be tensors.
log_every_n_secs (int): Log the ``log_tensors`` argument every ``n`` seconds.
checkpoint_n_steps (int, list): Perform a tensorflow and Keras checkpoint every ``n`` steps.
model: An instance of ``keras.models.Model`` to be saved with each snapshot.
last_step (int): The step after which the associated session's `should_stop` method should
evaluate to ``True``.
checkpoint_dir: The directory used for saving the graph, summaries and checkpoints. In case
it's ``None``, no checkpoints and model files will be saved and no tensorboard summaries
will be produced.
scaffold: An instance of the same ``tf.train.Scaffold`` that will be passed to the
training session.
summary_every_n_steps: Save sumaries every ``n`` steps. The steps per second will also
be printed to console.
infrequent_summary_every_n_steps: Save infrequent summaries every ``n`` steps. This is for
summaries that should be rarely evaluated, like images or histograms. This relates
to summaries marked with the ``INFREQUENT_SUMMARY_KEY`` key.
steps_per_epoch (int): Number of steps per epoch.
validation_every_n_steps (int): Validate every ``n`` steps. Should be specified if evaluator
object is not None.
evaluator: An instance of Evaluator class that performs evaluation (default=None).
model_store_config (dict): a dictionary consisting of the following key/values:
client (:any:`modelstore.Client`): client to use to push model checkpoints.
model_id (str): ID of model to assign model checkpoints to.
param_set_id (str): ID of param set to assign model checkpoint to.
fold (int): fold to assign model checkpoints to.
listeners: A list of CheckpointSaverListener objects (or child classes). Can be None.
If provided, will leave out the default listeners provided otherwise.
max_ckpt_to_keep: Maximum number of model checkpoints to keep.
Returns:
A list of hooks, all inheriting from ``tf.SessionRunHook``.
"""
hooks = [tf.estimator.LoggingTensorHook(tensors=log_tensors, every_n_secs=log_every_n_secs),
tf.estimator.StopAtStepHook(last_step=last_step),
# Setup hook that cleanly stops the session if SIGUSR1 is received.
nvidia_tao_tf1.core.hooks.SignalHandlerHook(), ]
if model is not None:
hooks.append(nvidia_tao_tf1.core.hooks.KerasModelHook(model))
# If we are running in a distributed setting, we need to broadcast the initial variables.
if distribution.get_distributor().is_distributed():
hooks.append(distribution.get_distributor().broadcast_global_variables_hook())
# Save checkpoints only on master to prevent other workers from corrupting them.
if distribution.get_distributor().is_master():
step_counter_hook = tf.estimator.StepCounterHook(
every_n_steps=summary_every_n_steps,
output_dir=checkpoint_dir
)
hooks.append(step_counter_hook)
if checkpoint_dir is not None:
if listeners is None:
listeners = []
if model is not None:
keras_checkpoint_listener = nvidia_tao_tf1.core.hooks.KerasCheckpointListener(
model=model, checkpoint_dir=checkpoint_dir,
max_to_keep=max_ckpt_to_keep)
listeners.insert(0, keras_checkpoint_listener)
if not isinstance(checkpoint_n_steps, list):
checkpoint_n_steps = [checkpoint_n_steps]
for n_steps in checkpoint_n_steps:
checkpoint_hook = IVACheckpointSaverHook(checkpoint_dir=checkpoint_dir,
key=key,
save_steps=n_steps,
listeners=listeners,
steps_per_epoch=steps_per_epoch,
scaffold=scaffold)
hooks.append(checkpoint_hook)
# Set up the frequent and infrequent summary savers.
summary_saver_directory = os.path.join(checkpoint_dir, "events")
if not os.path.exists(summary_saver_directory):
os.makedirs(summary_saver_directory)
if summary_every_n_steps > 0:
summary_saver = tf.estimator.SummarySaverHook(
save_steps=summary_every_n_steps,
scaffold=scaffold,
output_dir=summary_saver_directory
)
hooks.append(summary_saver)
if infrequent_summary_every_n_steps > 0:
infrequent_summary_op = tf.compat.v1.summary.merge_all(key=INFREQUENT_SUMMARY_KEY)
if infrequent_summary_op is None:
raise ValueError('Infrequent summaries requested, but None found.')
infrequent_summary_saver = tf.estimator.SummarySaverHook(
save_steps=infrequent_summary_every_n_steps,
output_dir=summary_saver_directory,
summary_op=infrequent_summary_op)
hooks.append(infrequent_summary_saver)
# Set up evaluator hook after checkpoint saver hook, so that evaluation is performed
# on the latest saved model.
if evaluator is not None:
if validation_every_n_steps is not None:
hooks.append(ValidationHook(evaluator, validation_every_n_steps))
else:
raise ValueError('Specify ``validation_every_n_steps`` if Evaluator is not None')
return hooks
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/tfhooks/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hook for job progress monitoring on clusters."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import timedelta
import logging
import time
import tensorflow.compat.v1 as tf
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
logger = logging.getLogger(__name__)
MONITOR_JSON_FILENAME = "monitor.json"
def write_status_json(
save_path, loss_value, current_epoch, max_epoch, time_per_epoch, ETA, learning_rate
):
"""Write out the data to the status.json file initiated by the experiment for monitoring.
Args:
save_path (str): Path where monitor.json needs to be saved. Basically the
result directory.
loss_value (float): Current value of loss to be recorder in the monitor.
current_epoch (int): Current epoch.
max_epoch (int): Total number of epochs.
time_per_epoch (float): Time per epoch in seconds.
ETA (float): Time per epoch in seconds.
learning_rate (float): Learning rate tensor.
Returns:
monitor_data (dict): The monitor data as a dict.
"""
monitor_data = {
"epoch": current_epoch,
"max_epoch": max_epoch,
"time_per_epoch": str(timedelta(seconds=time_per_epoch)),
"eta": str(timedelta(seconds=ETA)),
}
s_logger = status_logging.get_status_logger()
# Save the json file.
try:
s_logger.graphical = {
"loss": loss_value,
"learning_rate": learning_rate
}
s_logger.write(
data=monitor_data,
status_level=status_logging.Status.RUNNING)
except IOError:
# We let this pass because we do not want the json file writing to crash the whole job.
pass
# Adding the data back after the graphical data was set to the status logger.
monitor_data["loss"] = loss_value
monitor_data["learning_rate"] = learning_rate
return monitor_data
class TaskProgressMonitorHook(tf.estimator.SessionRunHook):
"""Log loss and epochs for monitoring progress of cluster jobs.
Writes the current training progress (current loss, current epoch and
maximum epoch) to a json file.
"""
def __init__(self, loggable_tensors, save_path, epochs, steps_per_epoch):
"""Initialization.
Args:
loss: Loss tensor.
save_path (str): Absolute save path.
epochs (int): Number of training epochs.
steps_per_epoch (int): Number of steps per epoch.
"""
# Define the tensors to be fetched at every step.
self._fetches = loggable_tensors
self.save_path = save_path
self.epochs = epochs
self.steps_per_epoch = steps_per_epoch
# Initialize variables for epoch time calculation.
self.time_per_epoch = 0
self._step_start_time = None
# Closest estimate of the start time, in case starting from mid-epoch.
self._epoch_start_time = time.time()
def before_run(self, run_context):
"""Request loss and global step from the session.
Args:
run_context: A `SessionRunContext` object.
Returns:
A `SessionRunArgs` object.
"""
# Record start time for each step. Use the value later, if this step started an epoch.
self._step_start_time = time.time()
# Assign the tensors to be fetched.
return tf.train.SessionRunArgs(self._fetches)
def after_run(self, run_context, run_values):
"""Write the progress to json-file after each epoch.
Args:
run_context: A `SessionRunContext` object.
run_values: A `SessionRunValues` object. Contains the loss value
requested by before_run().
"""
# Get the global step value.
step = run_values.results["step"]
if (step + 1) % self.steps_per_epoch == 0:
# Last step of an epoch is completed.
epoch_end_time = time.time()
self.time_per_epoch = epoch_end_time - self._epoch_start_time
if step % self.steps_per_epoch == 0:
# First step of a new epoch is completed. Store the time when step was started.
self._epoch_start_time = self._step_start_time
loss_value = run_values.results["loss"]
learning_rate = str(run_values.results.get("learning_rate", "Not logged"))
current_epoch = int(step // self.steps_per_epoch)
monitor_data = write_status_json(
save_path=self.save_path,
loss_value=float(loss_value),
current_epoch=current_epoch,
max_epoch=self.epochs,
time_per_epoch=self.time_per_epoch,
ETA=(self.epochs - current_epoch) * self.time_per_epoch,
learning_rate=learning_rate
)
logger.info(
"Epoch %d/%d: loss: %0.5f learning rate: %s Time taken: %s ETA: %s"
% (
monitor_data["epoch"],
monitor_data["max_epoch"],
monitor_data["loss"],
monitor_data["learning_rate"],
monitor_data["time_per_epoch"],
monitor_data["eta"],
)
)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/tfhooks/task_progress_monitor_hook.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A base class for a hook to compute model validation during training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from nvidia_tao_tf1.core.utils import summary_from_value
from nvidia_tao_tf1.cv.detectnet_v2.visualization.visualizer import \
DetectNetTBVisualizer as Visualizer
class ValidationHook(tf.estimator.SessionRunHook):
"""ValidationHook to run evaluation for DetectNet V2 Model."""
def __init__(self, evaluator, validation_period, last_epoch,
steps_per_epoch, results_dir, first_validation_epoch=0):
"""Create a hook object for validating a gridbox model during training.
Args:
evaluator: Evaluator object for running evaluation on a trained model.
validation_period: How often (in epochs) the model is validated during training.
last_epoch: Last epoch of training.
steps_per_epoch: Number of steps per epoch.
results_dir: Directory for logging the validation results.
first_validation_epoch: The first validation epoch. Validation happens on epochs
first_validation_epoch + i * validation_period, i=0, ...
"""
self.evaluator = evaluator
self.validation_period = validation_period
self.last_epoch = last_epoch
self.steps_per_epoch = steps_per_epoch
self.steps_counter = 0
self.epoch_counter = 0
self.first_validation_epoch = first_validation_epoch
self._global_step_tensor = tf.compat.v1.train.get_or_create_global_step()
# Use an existing FileWriter.
events_dir = os.path.join(
results_dir, "events"
)
self.writer = tf.summary.FileWriterCache.get(events_dir)
def before_run(self, run_context):
"""Request the value of global step.
Args:
run_context: A `SessionRunContext` object.
Returns:
A `SessionRunArgs` object.
"""
return tf.estimator.SessionRunArgs(self._global_step_tensor)
def _step(self, global_step_value):
"""Process one training step.
Returns:
Boolean indicating whether it's time to run validation.
"""
# Global step is zero after the first step, but self.steps_counter
# needs to be one for backward compatibility.
self.steps_counter = global_step_value + 1
# Validate only at the end of the epoch and not in between epochs.
if self.steps_counter % self.steps_per_epoch != 0:
return False
# Calculate the current epoch.
self.epoch_counter = int(self.steps_counter // self.steps_per_epoch)
# Validate at every self.first_validation_epoch + i * self.validation_period epoch
# and at the last epoch.
is_validation_epoch = (self.epoch_counter >= self.first_validation_epoch) and \
((self.epoch_counter - self.first_validation_epoch) % self.validation_period == 0)
return is_validation_epoch or self.epoch_counter == self.last_epoch
def after_run(self, run_context, run_values):
"""Called after each call to run()."""
run_validate = self._step(run_values.results)
if run_validate is True:
self.validate(run_context)
def validate(self, run_context):
"""Called at the end of each epoch to validate the model."""
# TODO(jrasanen) Optionally print metrics_results_with_confidence?
metrics_result, validation_cost, median_inference_time = \
self.evaluator.evaluate(run_context.session)
print("Epoch %d/%d" % (self.epoch_counter, self.last_epoch))
print('=========================')
self.evaluator.print_metrics(metrics_result, validation_cost, median_inference_time)
if Visualizer.enabled:
self._add_to_tensorboard(metrics_result, validation_cost)
def _add_to_tensorboard(self, metrics_result, validation_cost, bucket='mdrt'):
"""Add metrics to tensorboard."""
summary = summary_from_value('validation_cost', validation_cost)
self.writer.add_summary(summary, self.steps_counter)
summary = summary_from_value(
'mean average precision (mAP) (in %)',
metrics_result['mAP']
)
self.writer.add_summary(summary, self.steps_counter)
classwise_ap = metrics_result["average_precisions"]
for class_name, ap in classwise_ap.items():
tensor_name = f'{class_name}_AP (in %)'
summary = summary_from_value(tensor_name, ap)
self.writer.add_summary(summary, self.steps_counter)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/tfhooks/validation_hook.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the TaskProgressMonitorHook."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import time
import mock
import numpy as np
import tensorflow.compat.v1 as tf
from nvidia_tao_tf1.cv.common.logging import logging as status_logging
from nvidia_tao_tf1.cv.detectnet_v2.tfhooks.task_progress_monitor_hook import (
TaskProgressMonitorHook
)
if sys.version_info >= (3, 0):
_BUILTIN_OPEN = "builtins.open"
else:
_BUILTIN_OPEN = "__builtin__.open"
status_logging.set_status_logger(status_logging.StatusLogger(filename="/root", is_master=False))
@mock.patch("time.time")
def test_task_progress_monitor_hook(mock_time):
"""Test that monitor.json is correctly written."""
num_epochs = 2
steps_per_epoch = 3
mock_time.side_effect = [1000, 1060, 2000, 2180]
loggable_tensors = {}
with tf.device("/cpu:0"):
x = tf.placeholder(1)
y = tf.placeholder(1)
z = tf.placeholder(1)
loggable_tensors["loss"] = x
loggable_tensors["learning_rate"] = y
loggable_tensors["step"] = z
progress_monitor_hook = TaskProgressMonitorHook(
loggable_tensors, "", num_epochs, steps_per_epoch
)
# Input data is a sequence of numbers.
data = np.arange(num_epochs * steps_per_epoch)
learning_rate = np.arange(num_epochs * steps_per_epoch)
expected_time_per_epoch = {0: "0:00:00", 1: "0:01:00"}
expected_ETA = {0: "0:00:00", 1: "0:01:00"}
mock_open = mock.mock_open()
handle = mock_open()
with mock.patch(_BUILTIN_OPEN, mock_open, create=True):
with tf.train.SingularMonitoredSession(hooks=[progress_monitor_hook]) as sess:
for epoch in range(num_epochs):
for step in range(steps_per_epoch):
sess.run([loggable_tensors], feed_dict={
x: data[epoch * steps_per_epoch + step],
y: learning_rate[epoch * steps_per_epoch + step],
z: epoch * steps_per_epoch + step})
expected_write_data = {
"cur_epoch": epoch,
"loss": steps_per_epoch * epoch,
"max_epoch": num_epochs,
"ETA": expected_ETA[epoch],
"time_per_epoch": expected_time_per_epoch[epoch],
"learning_rate": epoch * steps_per_epoch
}
assert handle.write.called_once_with(expected_write_data)
def test_epoch_time():
"""Test that time taken per epoch is calculated correctly."""
num_epochs = 2
steps_per_epoch = 2
x = tf.placeholder(1)
progress_monitor_hook = TaskProgressMonitorHook(
x, "", num_epochs, steps_per_epoch)
expected_time_per_epoch = {0: "0:00:00", 1: "0:00:02"}
expected_ETA = {0: "0:00:00", 1: "0:00:02"}
# Mock run_values argument for after_run()
progress_monitor_hook.begin()
mock_open = mock.mock_open()
handle = mock_open()
with mock.patch(_BUILTIN_OPEN, mock_open, create=True):
global_step = 0
for epoch in range(num_epochs):
for _ in range(steps_per_epoch):
mock_run_values = mock.MagicMock(
results={"loss": 2, "step": global_step, "learning_rate": 0.1}
)
progress_monitor_hook.before_run(None)
time.sleep(1)
progress_monitor_hook.after_run(None, mock_run_values)
expected_write_data = {
"cur_epoch": epoch,
"loss": 2,
"max_epoch": num_epochs,
"ETA": expected_ETA[epoch],
"time_per_epoch": expected_time_per_epoch[epoch],
"learning_rate": 0.1,
}
assert handle.write.called_once_with(expected_write_data)
global_step += 1
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/tfhooks/tests/test_task_progress_monitor_hook.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Objective builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nvidia_tao_tf1.cv.detectnet_v2.objectives.bbox_objective import BboxObjective
from nvidia_tao_tf1.cv.detectnet_v2.objectives.cov_norm_objective import CovNormObjective
from nvidia_tao_tf1.cv.detectnet_v2.objectives.cov_objective import CovObjective
def build_objective(name, output_height, output_width, input_height, input_width, objective_config):
"""Construct objective of desired type.
Args:
name (str): objective name
output_* (float): output tensor shape
input_* (float): input tensor shape
objective_config: Objective configuration proto
"""
if objective_config:
input_layer_name = objective_config.input
else:
input_layer_name = None
if name == 'bbox':
scale = objective_config.scale
offset = objective_config.offset
objective = BboxObjective(input_layer_name, output_height, output_width,
input_height, input_width, scale, offset, loss_ratios=None)
elif name == 'cov':
objective = CovObjective(input_layer_name, output_height, output_width)
elif name == 'cov_norm':
objective = CovNormObjective(
input_layer_name, output_height, output_width)
else:
raise ValueError("Unknown objective: %s" % name)
return objective
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/objectives/build_objective.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bounding box coordinates objective."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import tensorflow as tf
import nvidia_tao_tf1.core as tao_core
from nvidia_tao_tf1.cv.detectnet_v2.cost_function.cost_functions import \
weighted_GIOU_cost, weighted_L1_cost
from nvidia_tao_tf1.cv.detectnet_v2.objectives.base_objective import BaseObjective
from nvidia_tao_tf1.cv.detectnet_v2.visualization.visualizer import \
DetectNetTBVisualizer as Visualizer
BBOX_LOSS_BASE_TYPES = {"L1", "GIOU"}
logger = logging.getLogger(__name__)
class BboxObjective(BaseObjective):
"""Bounding box objective.
BBoxObjective implements the bounding box objective-specific parts of the
following functionalities:
- Rasterization (labels -> tensors)
- Bounding box objective transforms (label domain <-> DNN output domain)
- Cost function
- Bounding box objective-specific visualization
- Spatial transformation of objectives (applying spatial transformation
matrices to predicted tensors)
"""
def __init__(self, input_layer_name, output_height, output_width,
input_height, input_width, scale, offset, loss_ratios=None):
"""Constructor for the bounding box objective.
Args:
input_layer_name (string): Name of the input layer of the Objective head.
If None the last layer of the model will be used.
output_height, output_width: Shape of the DNN output tensor.
input_height, input_width: Shape of the DNN input tensor.
scale (float): Bounding box scaling factor
offset (float): Bounding box offset
loss_ratios (dict(str, float)): Ratios of loss_function.
Keys should be "L1" or "GIOU", values are the ratios of the certain loss,
eg. {"L1": 0.5, "GIOU": 0.5} means 0.5 * L1_loss + 0.5 * GIOU_loss.
"""
super(BboxObjective, self).__init__(
input_layer_name, output_height, output_width)
self.name = 'bbox'
self.num_channels = 4
self.gradient_flag = tao_core.processors.BboxRasterizer.GRADIENT_MODE_PASSTHROUGH
# Bbox objective specific properties
self.input_height = input_height
self.input_width = input_width
self.scale = scale
self.offset = offset
self.loss_ratios = {}
if not loss_ratios:
logger.info("Default L1 loss function will be used.")
self.loss_ratios = {"L1": 1.0}
else:
for loss_function_name, ratio in loss_ratios.items():
loss_function_time = loss_function_name.upper()
if loss_function_time not in BBOX_LOSS_BASE_TYPES:
raise ValueError("Bbox loss function '{}' is not supported"
.format(loss_function_name))
elif ratio <= 0.0:
raise ValueError("Ratio of loss {} is {} and should be a positive number."
.format(loss_function_name, ratio))
else:
self.loss_ratios[loss_function_name] = ratio
def cost(self, y_true, y_pred, target_class, loss_mask=None):
"""Bounding box cost function.
Args:
y_true: GT tensor dictionary. Contains keys 'bbox' and 'cov_norm'
y_pred: Prediction tensor dictionary. Contains key 'bbox'
target_class: (TargetClass) for which to create the cost
loss_mask: (tf.Tensor) Loss mask to multiply the cost by.
Returns:
cost: TF scalar.
"""
assert 'cov_norm' in y_true
assert 'bbox' in y_true
assert 'bbox' in y_pred
# Compute 'bbox' cost.
bbox_target = y_true['bbox']
bbox_pred = y_pred['bbox']
bbox_weight = y_true['cov_norm']
bbox_loss_mask = 1.0 if loss_mask is None else loss_mask
bbox_cost = weighted_L1_cost(bbox_target, bbox_pred,
bbox_weight, bbox_loss_mask)
bbox_cost = 0.0
for loss_function_name, ratio in self.loss_ratios.items():
if loss_function_name == "L1":
# Use L1-loss for bbox regression.
cost_item = weighted_L1_cost(bbox_target, bbox_pred,
bbox_weight, bbox_loss_mask)
else:
# Use GIOU-loss for bbox regression.
abs_bbox_target = self._predictions_to_absolute_per_class(
bbox_target)
abs_bbox_pred = self._predictions_to_absolute_per_class(
bbox_pred)
cost_item = weighted_GIOU_cost(abs_bbox_target, abs_bbox_pred,
bbox_weight, bbox_loss_mask)
bbox_cost = bbox_cost + ratio * cost_item
mean_cost = tf.reduce_mean(bbox_cost)
# Visualize cost, target, and prediction.
if Visualizer.enabled:
# Visualize mean losses (scalar) always.
tf.summary.scalar('mean_cost_%s_bbox' %
target_class.name, mean_cost)
# Visualize tensors, if it is enabled in the spec. Use absolute
# scale to avoid Tensorflow automatic scaling. This facilitates
# comparing images as the network trains.
Visualizer.image('%s_bbox_cost' % target_class.name, bbox_cost,
value_range=[-0.125, 0.125],
collections=[tao_core.hooks.utils.INFREQUENT_SUMMARY_KEY])
Visualizer.image('%s_bbox_gt' % target_class.name, bbox_target,
value_range=[-4.0, 4.0],
collections=[tao_core.hooks.utils.INFREQUENT_SUMMARY_KEY])
Visualizer.image('%s_bbox_pred' % target_class.name, bbox_pred,
value_range=[-4.0, 4.0],
collections=[tao_core.hooks.utils.INFREQUENT_SUMMARY_KEY])
if isinstance(loss_mask, tf.Tensor):
Visualizer.image('%s_bbox_loss_mask' % target_class.name, bbox_loss_mask,
collections=[tao_core.hooks.utils.INFREQUENT_SUMMARY_KEY])
return mean_cost
def target_gradient(self, ground_truth_label):
"""Bounding box gradient.
The bounding box gradients (4, one for each box edge) tell the rasterizer
to output the distances to each of the bounding box edges from the output pixel.
Args:
ground_truth_label: dictionary of label attributes. Uses the attribute
on key 'target/output_space_coordinates'
Returns:
The gradients' coefficients.
"""
coordinates = ground_truth_label['target/output_space_coordinates']
# Input coordinates are already in network output "space", i.e. input image pixel
# space divided by stride.
xmin = coordinates[0]
ymin = coordinates[1]
xmax = coordinates[2]
ymax = coordinates[3]
# Scaled width and height of the bounding box
bbox_scale_x = float(self.input_width) / \
(float(self.output_width) * self.scale)
bbox_scale_y = float(self.input_height) / \
(float(self.output_height) * self.scale)
dx = (xmax - xmin) * bbox_scale_x
dy = (ymax - ymin) * bbox_scale_y
# Bounding box gradient offset values for x and y directions
ox = oy = self.offset / self.scale
# Values of the gradient for distance to left edge of the bounding box at
# columns xmin and xmax. The gradient increases linearly from ox at column
# xmin to dx+ox at column xmax.
L = [ox, dx + ox]
# Values of the gradient for distance to top edge of the bounding box at
# rows ymin and ymax. The gradient increases linearly from oy at row
# ymin to dy+oy at row ymax.
T = [oy, dy + oy]
# Values of the gradient for distance to right edge of the bounding box at
# columns xmin and xmax. The gradient decreases linearly from dx-ox at column
# xmin to -ox at column xmax.
R = [dx - ox, -ox]
# Values of the gradient for distance to bottom edge of the bounding box at
# rows ymin and ymax. The gradient decreases linearly from dy-oy at row
# ymin to -oy at row ymax.
B = [dy - oy, -oy]
# Bbox coordinates gradient definitions. Gradient coefficients are of the form
# [x_slope, y_slope, offset], and are computed from values at two endpoints by
# the helper function _gradient_from_endpoints.
#
# The first element in the bbox_coeffs list is the gradient for distance to
# the left edge of the bounding box. That gradient has a value of L[0] at
# the left edge of the bbox (x=xmin) and value of L[1] at the right edge of
# the bbox (x=xmax), with a linear gradient in between. The gradient is vertical,
# i.e. constant for every row, because the y-coordinate of the both endpoints
# is the same (here 0.0, but could be in fact chosen arbitrarily).
# Similarly, the second element contains the gradient coefficients for the
# distance to the top edge of the bounding box. Here the value is T[0] at
# the top edge (y=ymin), increasing linearly to T[1] at the bottom edge (y=ymax).
# The other two gradients are set up similarly.
bbox_coeffs = [_gradient_from_endpoints(xmin, 0., L[0], xmax, 0., L[1]),
_gradient_from_endpoints(0., ymin, T[0], 0., ymax, T[1]),
_gradient_from_endpoints(xmin, 0., R[0], xmax, 0., R[1]),
_gradient_from_endpoints(0., ymin, B[0], 0., ymax, B[1])]
gradient = tf.transpose(bbox_coeffs, (2, 0, 1))
return gradient
def predictions_to_absolute(self, prediction):
"""Convert grid cell center-relative coordinates to absolute coordinates.
Convert the bounding box coordinate prediction to absolute coordinates in
input image plane. Undo scaling and offset done for training.
Absolute bbox coordinate is computed as (example for left edge):
L = x_center + offset - pred[:, :, 0, :, :] * scale
The output coordinates are further clipped here such that the predictions
are wifthin the input image boundaries.
Args:
prediction (tensor): shape (batch, class, self.num_channels, height, width)
Returns:
transformed prediction (tensor)
"""
in_h = self.input_height
in_w = self.input_width
out_h = self.output_height
out_w = self.output_width
# Construct a 2D grid of cell x and y coordinates and add offset.
grid_max_x = tf.cast(out_w - 1, tf.float32) * \
float(in_w) / float(out_w)
grid_max_y = tf.cast(out_h - 1, tf.float32) * \
float(in_h) / float(out_h)
grid_x, grid_y = tf.meshgrid(tf.linspace(self.offset, grid_max_x + self.offset, out_w),
tf.linspace(self.offset, grid_max_y + self.offset, out_h))
# Multiply LTRB values by bbox_scale to obtain the same scale as the image.
# Convert from relative to absolute coordinates by adding the grid cell center
# coordinates. Clip by image boundary and constrain widht and height
# to be non-negative.
coords = tf.unstack(prediction * self.scale, axis=2)
coordsL = tf.clip_by_value(grid_x - coords[0], 0., in_w)
coordsT = tf.clip_by_value(grid_y - coords[1], 0., in_h)
coordsR = tf.clip_by_value(grid_x + coords[2], coordsL, in_w)
coordsB = tf.clip_by_value(grid_y + coords[3], coordsT, in_h)
coords = tf.stack([coordsL, coordsT, coordsR, coordsB], axis=2)
return coords
def transform_predictions(self, prediction, matrices=None):
"""Transform bounding box predictions by spatial transformation matrices.
Args:
prediction (tensor): shape (batch, class, self.num_channels, height, width)
matrices: A tensor of 3x3 transformation matrices, shape (batch, 3, 3).
Returns:
transformed prediction (tensor)
"""
if matrices is None:
return prediction
num_classes = int(prediction.shape[1])
height = int(prediction.shape[3])
width = int(prediction.shape[4])
x1 = prediction[:, :, 0]
y1 = prediction[:, :, 1]
x2 = prediction[:, :, 2]
y2 = prediction[:, :, 3]
one = tf.ones_like(x1)
# Construct a batch of top-left and bottom-right bbox coordinate vectors.
# x1-y2 = [n,c,h,w], matrices = [n,3,3].
# Stack top-left and bottom right coordinate vectors into a tensor [n,c,h,w,6].
c = tf.stack([x1, y1, one, x2, y2, one], axis=4)
# Reshape into a batch of vec3s, shape [n,c*h*w*2,3].
c = tf.reshape(c, (-1, num_classes*height*width*2, 3))
# Transform the coordinate vectors by the matrices. This loops over the outmost
# dimension performing n matmuls, each consisting of c*h*w*2 vec3 by mat3x3 multiplies.
c = tf.matmul(c, matrices)
# Reshape back into a tensor of shape [n,c,h,w,6].
c = tf.reshape(c, (-1, num_classes, height, width, 6))
# Unstack the last dimension to arrive at a list of 6 coords of shape [n,c,h,w].
c = tf.unstack(c, axis=4)
# Compute min-max of bbox corners.
x1 = tf.minimum(c[0], c[3])
y1 = tf.minimum(c[1], c[4])
x2 = tf.maximum(c[0], c[3])
y2 = tf.maximum(c[1], c[4])
# Reconstruct bbox coordinate tensor [n,c,4,h,w].
bbox_tensor = tf.stack([x1, y1, x2, y2], axis=2)
return bbox_tensor
def _predictions_to_absolute_per_class(self, relative_coords):
"""Wrap predictions_to_absolute to be adapted to coordinate shape [B, C, H, W].
Args:
relative_coords (tf.Tensor): Tensors of relative coordinates in output feature space.
Returns:
abs_coords (tf.Tensor): Tensors of absolute coordinates in input image space.
"""
original_shape = [-1, 4, self.output_height, self.output_width]
expand_shape = [-1, 1, 4, self.output_height, self.output_width]
relative_coords_expand = tf.reshape(relative_coords, expand_shape)
abs_coords_expand = self.predictions_to_absolute(
relative_coords_expand)
abs_coords = tf.reshape(abs_coords_expand, original_shape)
return abs_coords
def _gradient_from_endpoints(sx, sy, svalue, ex, ey, evalue):
"""Compute gradient coefficients based on values at two points.
Args:
sx: starting point x coordinate
sy: starting point y coordinate
svalue: value at the starting point
ex: ending point x coordinate
ey: ending point y coordinate
evalue: value at the ending point
Returns:
Gradient coefficients (slope_x, slope_y, offset).
"""
# edge = [ex - sx, ey - sy]
# p = [px - sx, py - sy]
# ratio = dot(p, edge) / |edge|^2
# value = (1-ratio) * svalue + ratio * evalue
# ->
# l = 1 / |edge|^2
# ratio = ((ex - sx) * (px - sx) + (ey - sy) * (py - sy)) * l
# ->
# dvalue = (evalue - svalue), dx = (ex - sx), dy = (ey - sy)
# value = dvalue * dx * l * px +
# dvalue * dy * l * py +
# svalue - dvalue * dx * l * sx - dvalue * dy * l * sy
# ->
# A = dvalue * dx * l
# B = dvalue * dy * l
# C = svalue - dvalue * dx * l * sx - dvalue * dy * l * sy
dx = ex - sx
dy = ey - sy
l = dx * dx + dy * dy # noqa: E741
# Avoid division by zero with degenerate bboxes. This effectively clamps the smallest
# allowed bbox to a tenth of a pixel (note that l is edge length squared). Note that
# this is just a safety measure. Dataset converters should have removed degenerate
# bboxes, but augmentation might scale them.
l = tf.maximum(l, 0.01) # noqa: E741
dvalue = (evalue - svalue) / l
dvx = dvalue * dx
dvy = dvalue * dy
offset = svalue - (dvx * sx + dvy * sy)
vec = [dvx, dvy, offset]
return vec
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/objectives/bbox_objective.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""Define a lightweight class for configuring ObjectiveLabelFilter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class ObjectiveLabelFilterConfig(object):
"""Lightweight class with the information necessary to instantiate a ObjectiveLabelFilter."""
def __init__(self,
label_filter,
objective_names=None,
target_class_names=None):
"""Constructor.
Args:
label_filter: LabelFilter instance.
objective_names (list of str): List of objective names to which this label filter config
should apply. If None, indicates the config should be for all objectives.
target_class_names (list of str): List of target class names to which this label filter
config should apply. If None, indicates the config should be for all target classes.
"""
self.label_filter = label_filter
self.objective_names = set(
objective_names) if objective_names is not None else None
self.target_class_names = \
set(target_class_names) if target_class_names is not None else None
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/objectives/objective_label_filter_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Coverage objective."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import nvidia_tao_tf1.core as tao_core
from nvidia_tao_tf1.cv.detectnet_v2.cost_function.cost_functions import (
weighted_binary_cross_entropy_cost
)
from nvidia_tao_tf1.cv.detectnet_v2.objectives.base_objective import BaseObjective
from nvidia_tao_tf1.cv.detectnet_v2.visualization.visualizer import \
DetectNetTBVisualizer as Visualizer
class CovObjective(BaseObjective):
"""Coverage objective.
CovObjective implements the coverage objective-specific parts of the
following functionalities:
- Rasterization (labels -> tensors)
- Cost function
- Objective-specific visualization
"""
def __init__(self, input_layer_name, output_height, output_width):
"""Constructor for the coverage objective.
Args:
input_layer_name (string): Name of the input layer of the Objective head.
If None the last layer of the model will be used.
output_height, output_width: Shape of the DNN output tensor.
"""
super(CovObjective, self).__init__(
input_layer_name, output_height, output_width)
self.name = 'cov'
self.num_channels = 1
self.activation = 'sigmoid'
self.gradient_flag = tao_core.processors.BboxRasterizer.GRADIENT_MODE_MULTIPLY_BY_COVERAGE
def cost(self, y_true, y_pred, target_class, loss_mask=None):
"""Coverage cost function.
Args:
y_true: GT tensor dictionary. Contains key 'cov'
y_pred: Prediction tensor dictionary. Contains key 'cov'
target_class: (TargetClass) for which to create the cost
loss_mask: (tf.Tensor) Loss mask to multiply the cost by.
Returns:
cost: TF scalar.
"""
assert 'cov' in y_true
assert 'cov' in y_pred
cov_target = y_true['cov']
cov_pred = y_pred['cov']
cov_weight = target_class.coverage_foreground_weight
cov_loss_mask = 1.0 if loss_mask is None else loss_mask
cov_cost = weighted_binary_cross_entropy_cost(cov_target, cov_pred,
cov_weight, cov_loss_mask)
mean_cost = tf.reduce_mean(cov_cost)
# Visualize cost, target, and prediction.
if Visualizer.enabled:
# Visualize mean losses (scalar) always.
tf.summary.scalar('mean_cost_%s_cov' %
target_class.name, mean_cost)
# Visualize tensors, if it is enabled in the spec. Use absolute
# scale to avoid Tensorflow automatic scaling. This facilitates
# comparing images as the network trains.
value_range = [0.0, 1.0]
Visualizer.image('%s_cov_cost' % target_class.name, cov_cost,
value_range=value_range,
collections=[tao_core.hooks.utils.INFREQUENT_SUMMARY_KEY])
Visualizer.image('%s_cov_gt' % target_class.name, cov_target,
value_range=value_range,
collections=[tao_core.hooks.utils.INFREQUENT_SUMMARY_KEY])
Visualizer.image('%s_cov_norm' % target_class.name, y_true['cov_norm'],
value_range=value_range,
collections=[tao_core.hooks.utils.INFREQUENT_SUMMARY_KEY])
Visualizer.image('%s_cov_pred' % target_class.name, cov_pred,
value_range=value_range,
collections=[tao_core.hooks.utils.INFREQUENT_SUMMARY_KEY])
if isinstance(loss_mask, tf.Tensor):
Visualizer.image('%s_cov_loss_mask' % target_class.name, cov_loss_mask,
collections=[tao_core.hooks.utils.INFREQUENT_SUMMARY_KEY])
return mean_cost
def target_gradient(self, ground_truth_label):
"""Coverage gradient.
This will make the rasterizer rasterize a constant value 1.0 for each
target bounding box. The constant value is further multiplied by the
coverage value (according to self.gradient_flag).
Args:
ground_truth_label: dictionary of label attributes. Uses the attribute
on key 'target/inv_bbox_area' for getting the number of boxes.
Returns:
gradient (tf.Tensor): The shape is (num_bboxes, 1, 3).
"""
num_boxes = tf.size(ground_truth_label['target/inv_bbox_area'])
zero = tf.zeros(shape=[num_boxes])
one = tf.ones(shape=[num_boxes])
gradient = tf.transpose([[zero, zero, one]], (2, 0, 1))
return gradient
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/objectives/cov_objective.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model template definitions. One model per file in this directory."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/objectives/__init__.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""Objective label filter class that handles the necessary label filtering logic."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import tensorflow as tf
from nvidia_tao_tf1.blocks.multi_source_loader.types.bbox_2d_label import Bbox2DLabel
from nvidia_tao_tf1.cv.detectnet_v2.label_filter.base_label_filter import (
filter_labels,
get_chained_filters_indices
)
from nvidia_tao_tf1.cv.detectnet_v2.label_filter.source_class_label_filter import (
SourceClassLabelFilter
)
class ObjectiveLabelFilter(object):
"""Holds the necessary <LabelFilter>s to apply to ground truths.
Unlike the LabelFilter classes, which have been stripped of as much model-specific information
as possible, this class holds such information in a 'hierarchy'.
It is for now comprised of two levels: [target_class_name][objective_name], although in the
future it is quite likely an additional [head_name] level will be pre-pended to it.
"""
def __init__(self, objective_label_filter_configs, target_class_to_source_classes_mapping,
learnable_objective_names, mask_multiplier=1.0, preserve_ground_truth=False):
"""Constructor.
Args:
objective_label_filter_configs (list of ObjectiveLabelFilterConfig).
target_class_to_source_classes_mapping (dict): maps from target class name to a list of
source class names.
learnable_objective_names (list of str): List of learnable objective names. These are
the objective names a LabelFilter will be applied to if the
ObjectiveLabelFilterConfig containing it has objective_names set to <NoneType>.
mask_multiplier (float): Indicates the weight to be assigned to the labels resulting
from this set of filters. Default value of 1.0 amounts to a no-op.
preserve_ground_truth (bool): When True, the objective label filter will NOT multiply
areas which already have nonzero coverage (the definition of a dont-care region).
Default False implies coverage will not affect objective filtering.
"""
self.objective_label_filter_configs = objective_label_filter_configs
self.target_class_to_source_classes_mapping = target_class_to_source_classes_mapping
self.learnable_objective_names = set(learnable_objective_names)
self.mask_multiplier = mask_multiplier
self.preserve_ground_truth = preserve_ground_truth
# Do some sanity checks.
for label_filter_config in self.objective_label_filter_configs:
if label_filter_config.target_class_names is not None:
assert set(label_filter_config.target_class_names) <= \
set(target_class_to_source_classes_mapping.keys()), \
"The filter is configured to act on at least one target class that does not " \
"appear in target_class_to_source_classes_mapping."
# The following will hold the 'hierarchy' as defined in the class docstring above.
self._label_filter_lists = self._get_label_filter_lists()
def _get_label_filter_lists(self):
"""Set up the defined hierarchy and populates it with the necessary LabelFilters.
Returns:
label_filter_lists (dict): maps from [target_class_name][objective_name] to list of
LabelFilter objects.
"""
label_filter_lists = dict()
# Get the "atomic" label filters.
for config in self.objective_label_filter_configs:
# Determine which objective(s) this particular label filter will be used for.
objective_names = self.learnable_objective_names if config.objective_names is None \
else config.objective_names
# Determine which target class(es) this particular label filter will be used for.
if config.target_class_names is None:
# This means the filter should apply to all classes.
target_class_names = list(
self.target_class_to_source_classes_mapping.keys())
else:
target_class_names = config.target_class_names
# Finally, instantiate the LabelFilters.
for target_class_name in target_class_names:
if target_class_name not in label_filter_lists:
# Initialize to empty dict.
label_filter_lists[target_class_name] = dict()
for objective_name in objective_names:
if objective_name not in label_filter_lists[target_class_name]:
# Initialize to empty list.
label_filter_lists[target_class_name][objective_name] = list(
)
# Add the appropriate LabelFilter.
label_filter_lists[target_class_name][objective_name].\
append(config.label_filter)
return label_filter_lists
def _apply_filters_to_labels(self, labels, label_filters,
source_class_label_filter):
"""Helper method to apply filters to a single frame's labels.
For a high-level description of some of the logic implemented here, please refer to
doc/loss_masks.md.
Args:
frame_labels (dict of Tensors): Contains the labels for a single frame.
label_filters (list): Each element is an instance of BaseLabelFilter to apply to
<frame_labels>.
source_class_label_filter (SourceClassLabelFilter): This will be used in conjunction
with those filters in <label_filters> that are not of type SourceClassLabelFilter.
Returns:
filtered_labels (dict of Tensors): Same format as <frame_labels>, but with
<label_filters> applied to them.
"""
# Initialize indices to False.
if isinstance(labels, dict):
filtered_indices = \
tf.zeros_like(labels['target/object_class'], dtype=tf.bool)
elif isinstance(labels, Bbox2DLabel):
filtered_indices = \
tf.zeros_like(labels.object_class.values, dtype=tf.bool)
else:
raise ValueError("Unsupported type.")
# First, get the filters in filter_list that are also SourceClassLabelFilter.
source_class_label_filters = \
[l for l in label_filters if isinstance(l, SourceClassLabelFilter)]
other_label_filters = \
[l for l in label_filters if not isinstance(
l, SourceClassLabelFilter)]
# Find those labels mapped to target_class_name, and satisfying any of the
# other_filters. The assumption here is that, if a user specifies a filter that is not of
# type SourceClassLabelFilter, then implicitly they would like it to be applied to only
# those source classes mapped to a given target class. e.g. If one would specify that
# targets whose bbox dimensions were in a given range should be selected for the target
# class 'car', then only those objects that are actually (mapped to) 'car' will have this
# filter applied on them, hence the logical-and.
if len(other_label_filters) > 0:
filtered_indices = \
tf.logical_and(get_chained_filters_indices(other_label_filters, labels, 'or'),
source_class_label_filter.is_criterion_satisfied(labels))
# Apply the user-specified source class label filters, if necessary. Here, the indices
# satisfying any said source class label filter will be logical-or-ed with the result
# of the previous step. We do not want to logical-and the user-specified source class label
# filters with the one that maps to a given target class, because the assumption is that
# if the user specifies such filters, it is that they only want those.
# Note that the source classes for a source class label filter need not be present in the
# mapping for a given target class for this to work.
if len(source_class_label_filters) > 0:
source_class_filtered_indices = \
get_chained_filters_indices(
source_class_label_filters, labels, 'or')
filtered_indices = \
tf.logical_or(filtered_indices, source_class_filtered_indices)
filtered_labels = filter_labels(labels, filtered_indices)
return filtered_labels
def apply_filters(self, batch_labels):
"""Method that users will call to actually do the filtering.
Args:
batch_labels (list of dict of Tensors): contains the labels for a batch of frames.
Each element in the list corresponds to a single frame's labels, and is a dict
containing various label features.
Returns:
filtered_labels_dict (nested dict): for now, has two levels:
[target_class_name][objective_name]. The leaf values are the corresponding filtered
ground truth labels in tf.Tensor form for a batch of frames.
"""
filtered_labels_dict = dict()
for target_class_name, target_class_filters in six.iteritems(self._label_filter_lists):
filtered_labels_dict[target_class_name] = dict()
# Get a filter that will filter labels whose source class names are mapped to
# this target_class_name.
source_class_names = self.target_class_to_source_classes_mapping[target_class_name]
source_class_label_filter = \
SourceClassLabelFilter(source_class_names=source_class_names)
for objective_name, filter_list in six.iteritems(target_class_filters):
# Initialize the list of filtered labels for this combination of
# [target_class_name][objective_name]. Each element will correspond to one frame's
# labels.
if isinstance(batch_labels, list):
filtered_labels = []
for frame_labels in batch_labels:
filtered_labels.append(self._apply_filters_to_labels(
frame_labels, filter_list, source_class_label_filter))
elif isinstance(batch_labels, Bbox2DLabel):
filtered_labels = \
self._apply_filters_to_labels(batch_labels,
filter_list,
source_class_label_filter)
else:
raise ValueError("Unsupported type.")
filtered_labels_dict[target_class_name][objective_name] = filtered_labels
return filtered_labels_dict
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/objectives/objective_label_filter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Normalized coverage objective."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import nvidia_tao_tf1.core as tao_core
from nvidia_tao_tf1.cv.detectnet_v2.objectives.base_objective import BaseObjective
class CovNormObjective(BaseObjective):
"""Normalized coverage objective (not learnable).
CovNormObjective implements the normalized coverage objective-specific part of the
following functionality:
- Rasterization (labels -> tensors)
"""
def __init__(self, input_layer_name, output_height, output_width):
"""Constructor for normalized coverage objective.
Args:
input_layer_name (string): Name of the input layer of the Objective head.
If None the last layer of the model will be used.
output_height, output_width: Shape of the DNN output tensor.
"""
super(CovNormObjective, self).__init__(
input_layer_name, output_height, output_width)
self.name = 'cov_norm'
self.num_channels = 1
self.learnable = False
# TODO(pjanis): Check the impact of this one, Rumpy uses passthrough here.
# Intuitively; should we down-weight objective costs at edges of coverage blobs?
self.gradient_flag = tao_core.processors.BboxRasterizer.GRADIENT_MODE_MULTIPLY_BY_COVERAGE
def target_gradient(self, ground_truth_label):
"""Gradient for rasterizing the normalized coverage tensor.
This will make the rasterizer rasterize a constant value equal to the
inverse of the bounding box area for each target bounding box. The
constant value is further multiplied by the coverage value (according
to self.gradient_flag).
Args:
ground_truth_label: dictionary of label attributes. Uses the attribute
on key 'target/inv_bbox_area'
Returns:
The gradients' coefficients.
"""
inv_bbox_area = ground_truth_label['target/inv_bbox_area']
num_boxes = tf.size(inv_bbox_area)
zero = tf.zeros(shape=[num_boxes])
gradient = tf.transpose([[zero, zero, inv_bbox_area]], (2, 0, 1))
return gradient
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/objectives/cov_norm_objective.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Objective set class and builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
from nvidia_tao_tf1.cv.detectnet_v2.objectives.build_objective import build_objective
def build_objective_set(objective_set_config,
output_height, output_width,
input_height, input_width):
"""Construct the model output Objectives.
Args:
objective_set_config: The ObjectiveSet configuration proto
output_height, output_width: Shape of the DNN output tensor.
input_height, input_width: Shape of the DNN input tensor.
Returns:
ObjectiveSet
"""
objective_names = list(
objective_set_config.DESCRIPTOR.fields_by_name.keys())
objectives = []
for objective_name in objective_names:
if objective_set_config.HasField(objective_name):
objectives.append(build_objective(objective_name,
output_height,
output_width,
input_height,
input_width,
getattr(objective_set_config, objective_name)))
assert objectives, "Model config needs to contain at least one objective"
# Add the normalized coverage objective
objectives.append(build_objective('cov_norm',
output_height,
output_width,
input_height,
input_width,
None))
return ObjectiveSet(objectives)
def get_head_input(model, input_layer_name):
"""Get an output tensor from model based on a layer name search string.
Args:
model (Keras.Model): Model from where to look for the input tensor.
input_layer_name (string): Layer name search string. If empty, last
layer of model is used.
Returns:
The unique tensor whose name contains the input name.
Raises:
AssertionError: When a unique tensor is not found.
"""
if input_layer_name:
input_layers = [l for l in model.layers if input_layer_name in l.name]
assert len(input_layers) == 1, \
"Did not find a unique input matching '%s'. Found %s." % \
(input_layer_name, [l.name for l in input_layers])
input_tensor = input_layers[0].output
else:
# Input layer name was not given, default to last layer of model.
input_tensor = model.layers[-1].output
return input_tensor
class ObjectiveSet(object):
"""Class for sets of objectives."""
def __init__(self, objectives):
"""Constructor.
Args:
objectives: (list<Objective>) List of the Objectives.
"""
self.objectives = objectives
# Form list of learnable objectives for convenience
self.learnable_objectives = [o for o in self.objectives if o.learnable]
def compute_component_costs(self, y_true, y_pred, target_classes, loss_masks=None):
"""Per target class per objective cost function.
Args:
y_true: Ground truth images dictionary.
y_pred: Network predictions dictionary.
target_classes: A list of TargetClass instances.
loss_masks (nested dict): [target_class_name][objective_name]. The leaf values are the
corresponding loss masks (tf.Tensor) for a batch of frames.
Returns:
Dictionary of cost components indexed by target class name and objective name.
"""
# Compute cost for each target class and objective.
component_costs = {}
for target_class in target_classes:
assert target_class.name in y_true
assert target_class.name in y_pred
component_costs[target_class.name] = \
self.get_objective_costs(
y_true, y_pred, target_class, loss_masks)
return component_costs
def get_objective_costs(self, y_true, y_pred, target_class, loss_masks=None):
"""Cost per objective for a given target class.
Args:
y_true: Ground truth tensor dictionary.
y_pred: Prediction tensor dictionary.
target_class: (TargetClass) for which to create the cost.
loss_masks (nested dict): [target_class_name][objective_name]. The leaf values are the
corresponding loss masks (tf.Tensor) for a batch of frames.
Returns:
objective_costs: Dictionary of per objective scalar cost tensors.
"""
if loss_masks is None:
loss_masks = dict()
objective_costs = dict()
for objective in self.learnable_objectives:
# TODO(@williamz): Should loss_masks have been pre-populated with 1.0?
if target_class.name in loss_masks and objective.name in loss_masks[target_class.name]:
loss_mask = loss_masks[target_class.name][objective.name]
else:
loss_mask = 1.0
objective_cost = objective.cost(y_true[target_class.name],
y_pred[target_class.name],
target_class,
loss_mask=loss_mask)
objective_costs[objective.name] = objective_cost
return objective_costs
def construct_outputs(self, model, num_classes, data_format,
kernel_regularizer, bias_regularizer):
"""Construct the output heads for predicting the objectives.
For every objective, check whether the model already has a matching output.
In case the output is not found, construct the corresponding DNN head and
return it. In case a matching output is found in the model, return the existing
output (pretrained models may already contain the outputs).
Args:
model: Model to which the outputs are added.
num_classes: The number of model target classes.
data_format: Order of the dimensions. Set to 'channels_first'.
kernel_regularizer: Keras regularizer to be applied to convolution kernels.
bias_regularizer: Keras regularizer to be applied to biases.
Returns:
outputs: List of output tensors for a set of objectives.
"""
outputs = []
for objective in self.learnable_objectives:
# Check if model already has the output.
matching_outputs = [
o for o in model.outputs if objective.name in o.name]
# We should not find multiple tensors whose name matches a single objective.
assert len(matching_outputs) < 2, \
"Ambiguous model output names: %s. Objective name %s." % \
([o.name for o in model.outputs], objective.name)
if matching_outputs:
output = matching_outputs[0]
elif objective.template:
output = objective.dnn_head_from_template(
model=model,
num_classes=num_classes,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer)
else:
input_tensor = get_head_input(
model, objective.input_layer_name)
output = objective.dnn_head(num_classes=num_classes,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer)(input_tensor)
outputs.append(output)
return outputs
def predictions_to_absolute(self, predictions):
"""Convert predictions from model output space to the absolute image space.
Args:
predictions: Dictionary of model output space predictions of shape
(num_samples, num_classes, num_channels, output_height, output_width).
Returns:
absolute_predictions: Dictionary of predictions tensors in the image space.
The shape of the tensors remains unchanged.
"""
absolute_predictions = dict()
for objective in self.learnable_objectives:
prediction = predictions[objective.name]
prediction = objective.predictions_to_absolute(prediction)
absolute_predictions[objective.name] = prediction
return absolute_predictions
def transform_predictions(self, predictions, matrices=None):
"""Transform predictions by applying transformation matrices.
Args:
predictions: Dictionary of predictions of shape
(num_samples, num_classes, num_channels, output_height, output_width).
matrices: A tensor of 3x3 transformation matrices, shape (num_samples, 3, 3).
Matrices are applied to the predictions sample-wise.
Returns:
transformed_predictions: Dictionary of transformed predictions tensor. The shape
of the tensors remains unchanged.
"""
transformed_predictions = dict()
for objective in self.learnable_objectives:
prediction = predictions[objective.name]
prediction = objective.transform_predictions(prediction, matrices)
transformed_predictions[objective.name] = prediction
return transformed_predictions
def generate_ground_truth_tensors(self, bbox_rasterizer, batch_labels):
"""Generate ground truth tensors.
Args:
bbox_rasterizer (BboxRasterizer): Instance of the BboxRasterizer class that will handle
label-to-rasterizer-arg translation and provide the target_gradient() methods with
the necessary inputs, as well as perform the final call to the SDK's rasterizer.
batch_labels (list): Each element is a dict of target features (each a tf.Tensor).
Returns:
target_tensors (dict): [target_class_name][objective_name] rasterized ground truth
tensor.
"""
target_tensors = defaultdict(dict)
if isinstance(batch_labels, list):
# Corresponds to old (DefaultDataloader) path.
# Get necessary info to compute target gradients from based on the labels.
batch_bbox_rasterizer_input = [
bbox_rasterizer.get_target_gradient_info(item) for item in batch_labels
]
batch_gradient_info = [item.gradient_info for item in batch_bbox_rasterizer_input]
else:
# Implicitly assumes here it is a Bbox2DLabel.
# Get necessary info to compute target gradients from based on the labels.
batch_bbox_rasterizer_input = bbox_rasterizer.get_target_gradient_info(batch_labels)
# Retrieve gradient info.
batch_gradient_info = batch_bbox_rasterizer_input.gradient_info
for objective in self.objectives:
# Now compute the target gradients.
if isinstance(batch_labels, list):
batch_gradients = [objective.target_gradient(item) for item in batch_gradient_info]
else:
batch_gradients = objective.target_gradient(batch_gradient_info)
# Call the rasterizer.
target_tensor = \
bbox_rasterizer.rasterize_labels(
batch_bbox_rasterizer_input=batch_bbox_rasterizer_input,
batch_gradients=batch_gradients,
num_gradients=objective.num_channels,
gradient_flag=objective.gradient_flag)
# Slice per-class targets out of the rasterized target tensor
for class_index, target_class_name in enumerate(bbox_rasterizer.target_class_names):
target_tensors[target_class_name][objective.name] = target_tensor[:, class_index]
return target_tensors
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/objectives/objective_set.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""ObjectiveLabelFilter class builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nvidia_tao_tf1.cv.detectnet_v2.label_filter.build_label_filter import build_label_filter
from nvidia_tao_tf1.cv.detectnet_v2.objectives.objective_label_filter import ObjectiveLabelFilter
from nvidia_tao_tf1.cv.detectnet_v2.objectives.objective_label_filter_config import (
ObjectiveLabelFilterConfig
)
def build_objective_label_filter_config(objective_label_filter_config_proto):
"""Build a ObjectiveLabelFilterConfig from proto.
Args:
objective_label_filter_config_proto:
proto.objective_label_filter.ObjectiveLabelFilter.ObjectiveLabelFilterConfig message.
Returns:
objective_label_filter_config (ObjectiveLabelFilterConfig).
"""
label_filter = build_label_filter(
objective_label_filter_config_proto.label_filter)
if not objective_label_filter_config_proto.target_class_names:
target_class_names = None
else:
target_class_names = objective_label_filter_config_proto.target_class_names
if not objective_label_filter_config_proto.objective_names:
objective_names = None
else:
objective_names = objective_label_filter_config_proto.objective_names
return ObjectiveLabelFilterConfig(
label_filter=label_filter,
objective_names=objective_names,
target_class_names=target_class_names
)
def build_objective_label_filter(objective_label_filter_proto,
target_class_to_source_classes_mapping,
learnable_objective_names):
"""Build a ObjectiveLabelFilter.
Args:
objective_label_filter_proto: proto.objective_label_filter.ObjectiveLabelFilter message.
target_class_to_source_classes_mapping (dict): maps from target class name to a list of
source class names.
learnable_objective_names (list of str): List of learnable objective names. These are
the objective names a LabelFilter will be applied to if the ObjectiveLabelFilterConfig
containing it has objective_names set to <NoneType>.
Returns:
objective_label_filter (ObjectiveLabelFilter).
"""
objective_label_filter_configs = \
[build_objective_label_filter_config(
con_temp) for con_temp in objective_label_filter_proto.objective_label_filter_configs]
mask_multiplier = objective_label_filter_proto.mask_multiplier
preserve_ground_truth = objective_label_filter_proto.preserve_ground_truth
return ObjectiveLabelFilter(
objective_label_filter_configs=objective_label_filter_configs,
target_class_to_source_classes_mapping=target_class_to_source_classes_mapping,
learnable_objective_names=learnable_objective_names,
mask_multiplier=mask_multiplier,
preserve_ground_truth=preserve_ground_truth)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/objectives/build_objective_label_filter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class / API definition of DNN objectives."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import ABCMeta, abstractmethod
from keras.layers import Conv2D
from keras.layers import Reshape
import six
class BaseObjective(six.with_metaclass(ABCMeta, object)):
"""Objective base class defining the interface to objectives and common methods.
Objectives implement the following functionalities:
- Rasterization (labels -> tensors)
- Objective transforms (label domain <-> DNN output domain)
- DNN output head creation
- Cost function
- Objective-specific visualization
- Spatial transformation of objectives (applying spatial transformation
matrices to predicted tensors)
"""
@abstractmethod
def __init__(self, input_layer_name, output_height, output_width):
"""Interface to initializing an Objective and the base initializer.
Contains the common implementation, concrete classes need to call this.
Args:
input_layer_name (string): Name of the input layer of the Objective head.
If None the last layer of the model will be used.
output_height, output_width: Shape of the DNN output tensor.
"""
self.num_channels = None
self.gradient_flag = None
self.activation = None
self.learnable = True
self.input_layer_name = input_layer_name
self.template = None
self.output_height = output_height
self.output_width = output_width
def dnn_head(self, num_classes, data_format, kernel_regularizer,
bias_regularizer):
"""Function for adding a head to DNN that outputs the prediction tensors.
Applies the predictor head to a tensor, syntax:
output = objective.dnn_head(...)(input_tensor)
Args:
num_classes: (int) Number of classes.
data_format: (string) e.g. 'channels_first'.
kernel_regularizer: Keras regularizer to be applied to convolution kernels.
bias_regularizer: Keras regularizer to be applied to biases.
Returns:
Function for adding the predictor head.
"""
# TODO: @vpraveen update the naming if mulitstide model is implemented.
conv = Conv2D(filters=num_classes*self.num_channels,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation=self.activation,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
name="output_" + self.name)
return conv
def reshape_output(self, x, num_classes):
"""Reshape class index to its own dimension.
Args:
x: The output tensor as produced by self.dnn_head(), shape
(num_classes*num_channels, H, W).
num_classes: (int) Number of classes.
Returns:
Output tensor with shape (num_classes, num_channels, H, W).
"""
shape = (num_classes, self.num_channels,
self.output_height, self.output_width)
reshaped_x = Reshape(shape)(x)
return reshaped_x
def cost(self, y_true, y_pred, target_class, loss_mask=None):
"""Interface for creating the scalar cost for the Objective.
Non-learnable objectives do not need to implement this method.
Args:
y_true: GT tensor dictionary
y_pred: Prediction tensor dictionary
target_class: (TargetClass) for which to create the cost
loss_mask: (tf.Tensor) Loss mask to multiply the cost by.
Returns:
cost: TF scalar.
"""
pass
@abstractmethod
def target_gradient(self, ground_truth_label):
"""Interface for creating target gradient config for rasterizer.
This function is called separately for each bounding box target.
The gradients are represented by tuples of coefficients c=(slope_x, slope_y, offset).
This enables the rasterizer to rasterize a linear gradient whose value at pixel
(x, y) is x * slope_x + y * slope_y + offset.
The gradient may be multiplied by the coverage values, if the gradient flag is
set accordingly.
Args:
ground_truth_label: dictionary of label attributes
Returns:
The gradients' coefficients.
"""
pass
def predictions_to_absolute(self, prediction):
"""Interface / pass through for converting predictions to absolute values.
This function is called for each DNN output prediction tensor. The function
transforms back the predictions to the absolute (dataset domain) values. For
instance for bounding boxes the function converts grid-cell center relative
coords to absolute coords.
The base-class implementation returns the input prediction unmodified.
Args:
prediction (tensor): shape (batch, class, self.num_channels, height, width)
Returns:
transformed prediction (tensor)
"""
return prediction
def transform_predictions(self, prediction, matrices=None):
"""Interface / pass through for transforming predictions spatially.
This may be used for example to undo spatial augmentation effect on
the bounding box, depth, etc predictions.
The base-class implementation returns the input prediction unmodified.
Args:
prediction (tensor): shape (batch, class, self.num_channels, height, width)
matrices: A tensor of 3x3 transformation matrices, shape (batch, 3, 3).
Returns:
transformed prediction (tensor)
"""
return prediction
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/objectives/base_objective.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test objective label filter builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from google.protobuf.text_format import Merge as merge_text_proto
import pytest
from nvidia_tao_tf1.cv.detectnet_v2.label_filter.bbox_dimensions_label_filter import (
BboxDimensionsLabelFilter
)
from nvidia_tao_tf1.cv.detectnet_v2.label_filter.source_class_label_filter import (
SourceClassLabelFilter
)
from nvidia_tao_tf1.cv.detectnet_v2.objectives.build_objective_label_filter import (
build_objective_label_filter
)
import nvidia_tao_tf1.cv.detectnet_v2.proto.objective_label_filter_pb2 as \
objective_label_filter_pb2
# Some dummy learnable_objective_names.
_LEARNABLE_OBJECTIVE_NAMES = ['cov_norm']
class TestObjectiveLabelFilterBuilder(object):
@pytest.fixture(scope="function")
def objective_label_filter_proto(self):
"""Generate a proto to build an ObjectiveLabelFilter with."""
objective_label_filter_proto = objective_label_filter_pb2.ObjectiveLabelFilter()
prototxt = """
objective_label_filter_configs {
target_class_names: "car"
target_class_names: "person"
label_filter: {
bbox_dimensions_label_filter: {
min_width: 10.0
min_height: 10.0
max_width: 400.0
max_height: 400.0
}
}
}
objective_label_filter_configs {
target_class_names: "car"
objective_names: "depth"
label_filter: {
source_class_label_filter: {
source_class_names: "automobile"
}
}
}
objective_label_filter_configs {
target_class_names: "car"
objective_names: "depth"
label_filter: {
source_class_label_filter: {
source_class_names: "van"
}
}
}
"""
merge_text_proto(prototxt, objective_label_filter_proto)
return objective_label_filter_proto
@pytest.fixture(scope='function')
def target_class_to_source_classes_mapping(self):
target_class_to_source_classes_mapping = {
'person': ['pedestrian', 'person_group', 'rider'],
'car': ['heavy_truck', 'automobile', 'unclassifiable_vehicle']
}
return target_class_to_source_classes_mapping
def test_objective_label_filter_builder(self,
objective_label_filter_proto,
target_class_to_source_classes_mapping):
"""Test that the builder for ObjectiveLabelFilter instantiates the object correctly.
Args:
objective_label_filter_proto (proto.objective_label_filter_pb2.ObjectiveLabelFilter)
target_class_to_source_classes_mapping (dict): Maps from target class name (str) to
a list of source class names (str).
"""
objective_label_filter = \
build_objective_label_filter(
objective_label_filter_proto=objective_label_filter_proto,
target_class_to_source_classes_mapping=target_class_to_source_classes_mapping,
learnable_objective_names=_LEARNABLE_OBJECTIVE_NAMES)
# TODO(@williamz): ideally, would check that ObjectiveLabelFilter was called with certain
# args. However, it would be a little convoluted to do so settling for this approach.
label_filter_lists = objective_label_filter._label_filter_lists
# Check that the default mask_multiplier value is correctly set.
assert objective_label_filter.mask_multiplier == 0.0
# Check that correct target class names have corresponding entries.
expected_target_class_names = {'car', 'person'}
assert set(label_filter_lists.keys()) == expected_target_class_names
# Same check for objective names.
assert set(label_filter_lists['car'].keys()) == {'cov_norm', 'depth'}
assert set(label_filter_lists['person'].keys()) == {'cov_norm'}
# Check that there is only one label filter that applies to all objectives ('cov_norm').
for target_class_name in expected_target_class_names:
assert len(label_filter_lists[target_class_name]['cov_norm']) == 1
# Check that it is of the correct type.
assert isinstance(label_filter_lists[target_class_name]['cov_norm'][0],
BboxDimensionsLabelFilter)
# SourceClassLabelFilter is only applied to 'depth' + 'car' combo.
assert 'depth' not in label_filter_lists['person']
# Even though it would be stupid to actually duplicate the filter like in the prototxt
# above, check that there are two filters for this combo.
assert len(label_filter_lists['car']['depth']) == 2
for sub_filter in label_filter_lists['car']['depth']:
# Check that it is of the correct type.
assert isinstance(sub_filter, SourceClassLabelFilter)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/objectives/tests/test_build_objective_label_filter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test loss mask filter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pytest
import tensorflow as tf
from nvidia_tao_tf1.cv.detectnet_v2.label_filter.base_label_filter import BaseLabelFilter
from nvidia_tao_tf1.cv.detectnet_v2.label_filter.bbox_dimensions_label_filter import (
BboxDimensionsLabelFilter
)
from nvidia_tao_tf1.cv.detectnet_v2.label_filter.source_class_label_filter import (
SourceClassLabelFilter
)
from nvidia_tao_tf1.cv.detectnet_v2.objectives.objective_label_filter import ObjectiveLabelFilter
from nvidia_tao_tf1.cv.detectnet_v2.objectives.objective_label_filter_config import (
ObjectiveLabelFilterConfig
)
# Some dummy learnable_objective_names.
_LEARNABLE_OBJECTIVE_NAMES = ['cov_norm']
class TestObjectiveLabelFilter:
def test_objective_label_filter_init_assert(self):
# Get init args.
label_filter_configs = \
[ObjectiveLabelFilterConfig(label_filter=BaseLabelFilter(),
target_class_names=["car"])]
with pytest.raises(AssertionError):
# Since the class mapping is missing "car", it should fail.
ObjectiveLabelFilter(label_filter_configs,
dict(person=["pedestrian",
"sentient_lifeform"]),
_LEARNABLE_OBJECTIVE_NAMES)
# This one should be fine.
ObjectiveLabelFilter(label_filter_configs, dict(car=["panamera"]),
_LEARNABLE_OBJECTIVE_NAMES)
@pytest.mark.parametrize(
"model_label_filter_configs,target_class_to_source_classes_mapping,expected_structure",
[
# Case 1: Both filters apply to everything.
([ObjectiveLabelFilterConfig(BaseLabelFilter()),
ObjectiveLabelFilterConfig(BboxDimensionsLabelFilter())],
dict(person=['person', 'rider'], car=['automobile', 'truck']),
{'person':
{'cov_norm': [BaseLabelFilter, BboxDimensionsLabelFilter]},
'car':
{'cov_norm': [BaseLabelFilter, BboxDimensionsLabelFilter]}
}
), # ----- End case 1
# Case 2: Each filter only applies to a single target class.
([ObjectiveLabelFilterConfig(SourceClassLabelFilter(), target_class_names=['car']),
ObjectiveLabelFilterConfig(BboxDimensionsLabelFilter(),
target_class_names=['person'])],
dict(person=['person', 'rider'], car=['automobile', 'truck']),
{'person':
{'cov_norm': [BboxDimensionsLabelFilter]},
'car':
{'cov_norm': [SourceClassLabelFilter]}
}
), # ----- End case 2
# Case 3: Each filter applies to a different target class and objective.
([ObjectiveLabelFilterConfig(SourceClassLabelFilter(),
target_class_names=['truck'],
objective_names=['bbox']),
ObjectiveLabelFilterConfig(BboxDimensionsLabelFilter(),
target_class_names=['car'],
objective_names=['depth']),
ObjectiveLabelFilterConfig(BaseLabelFilter(),
target_class_names=['person'],
objective_names=['orientation']),
ObjectiveLabelFilterConfig(BboxDimensionsLabelFilter(),
target_class_names=['truck'],
objective_names=['bbox'])],
dict(person=['pedestrian'], car=[
'automobile', 'van'], truck=['otto', 'pacar']),
{'person':
{'orientation': [BaseLabelFilter]},
'car':
{'depth': [BboxDimensionsLabelFilter]},
'truck':
{'bbox': [SourceClassLabelFilter, BboxDimensionsLabelFilter]}
} # ----- End case 3
)
]
)
def test_get_label_filter_lists(self,
model_label_filter_configs,
target_class_to_source_classes_mapping,
expected_structure):
"""Test that the ObjectiveLabelFilter builds an inner hierarchy that is the expected one."""
# Get the ObjectiveLabelFilter.
objective_label_filter = ObjectiveLabelFilter(model_label_filter_configs,
target_class_to_source_classes_mapping,
_LEARNABLE_OBJECTIVE_NAMES)
# Check that the correct 'hierarchy' was built internally.
filter_lists = objective_label_filter._label_filter_lists
assert set(filter_lists.keys()) == set(expected_structure.keys())
# Now inner keys.
for target_class_name in expected_structure:
assert set(filter_lists[target_class_name].keys()) == \
set(expected_structure[target_class_name].keys())
for objective_name in expected_structure[target_class_name]:
# Check that the LabelFilter objects are of the correct instance.
# Note that order matters.
assert all(map(lambda x: isinstance(*x),
zip(filter_lists[target_class_name][objective_name],
expected_structure[target_class_name][objective_name])))
@pytest.mark.parametrize(
"model_label_filter_configs,batch_labels,target_class_to_source_classes_mapping,"
"expected_output",
[
# Case 1: No kwargs for ObjectiveLabelFilterConfig --> should be no-ops.
([ObjectiveLabelFilterConfig(BboxDimensionsLabelFilter()),
ObjectiveLabelFilterConfig(SourceClassLabelFilter())],
[{'target/object_class': ['automobile', 'pedestrian']}, # 1st frame.
{'target/object_class': ['pedestrian']}], # Second frame.
# The following line indicates that the output dict should only have this class.
{'car': ['automobile']},
# Since we supplied no objective_names, it should be for 'cov_norm'.
{'car': {'cov_norm': [{'target/object_class': ['automobile', 'pedestrian']}, # frame1.
{'target/object_class': ['pedestrian']}]} # Second frame.
}),
# -------- End case 1.
# Case 2: Only keep 'person' labels.
([ObjectiveLabelFilterConfig(SourceClassLabelFilter(source_class_names=['pedestrian']),
target_class_names=['person'])],
[{'target/object_class': ['automobile', 'pedestrian']},
{'target/object_class': ['pedestrian']}],
# The following line indicates that the output dict should only have this class.
{'car': ['automobile'], 'person': ['pedestrian']},
# Since we supplied no objective_names, it should be for 'cov_norm'.
{'person': {'cov_norm': [{'target/object_class': ['pedestrian']},
{'target/object_class': ['pedestrian']}]}
}),
# -------- End case 2.
# Case 3:
([ObjectiveLabelFilterConfig(BboxDimensionsLabelFilter(min_width=10.0),
target_class_names=['person']),
ObjectiveLabelFilterConfig(SourceClassLabelFilter(source_class_names=['automobile']),
target_class_names=['car'],
objective_names=['depth'])],
[{'target/object_class': ['automobile', 'pedestrian'],
'target/coordinates_x1': np.array([20.0, 30.0], dtype=np.float32),
# 'person' should be gone for 'person' because of width.
'target/coordinates_x2': np.array([31.0, 39.9], dtype=np.float32),
'target/coordinates_y1': np.array([23.0, 24.0], dtype=np.float32),
'target/coordinates_y2': np.array([23.1, 24.1], dtype=np.float32),
'target/bbox_coordinates': \
np.array([[20.0, 23.0, 29.0, 23.1], [30.0, 24.0, 39.9, 24.1]], dtype=np.float32)},
{'target/object_class': ['pedestrian'],
# This one is above the min_width so should be kept.
'target/coordinates_x1': np.array([10.0], dtype=np.float32),
'target/coordinates_x2': np.array([20.1], dtype=np.float32),
'target/coordinates_y1': np.array([0.0], dtype=np.float32),
'target/coordinates_y2': np.array([123.0], dtype=np.float32),
'target/bbox_coordinates': np.array([[10.0, 0.0, 20.1, 123.0]],
dtype=np.float32)
}],
# The following line indicates that the output dict should only have this class.
{'car': ['automobile'], 'person': ['pedestrian']},
# Since we supplied no objective_names, it should be for 'cov_norm'.
{'person':
{'cov_norm':
[{'target/object_class': np.array([]).astype(str),
'target/coordinates_x1': np.array([], dtype=np.float32),
'target/coordinates_x2': np.array([], dtype=np.float32),
'target/coordinates_y1': np.array([], dtype=np.float32),
'target/coordinates_y2': np.array([], dtype=np.float32),
'target/bbox_coordinates': np.empty([0, 4], dtype=np.float32)
}, # End first frame.
{'target/object_class': np.array(['pedestrian']),
'target/coordinates_x1': np.array([10.0], dtype=np.float32),
'target/coordinates_x2': np.array([20.1], dtype=np.float32),
'target/coordinates_y1': np.array([0.0], dtype=np.float32),
'target/coordinates_y2': np.array([123.0], dtype=np.float32),
'target/bbox_coordinates': np.array([[10.0, 0.0, 20.1, 123.0]],
dtype=np.float32)}] # End 2nd frame.
}, # End ['person']['cov_norm'].
'car':
{'depth':
[{'target/object_class': np.array(['automobile']),
'target/coordinates_x1': np.array([20.0], dtype=np.float32),
'target/coordinates_x2': np.array([31.0], dtype=np.float32),
'target/coordinates_y1': np.array([23.0], dtype=np.float32),
'target/coordinates_y2': np.array([23.1], dtype=np.float32),
'target/bbox_coordinates': np.array([[20.0, 23.0, 29.0, 23.1]],
dtype=np.float32)
}, # End first frame.
{'target/object_class': np.array([]).astype(str),
'target/coordinates_x1': np.array([], dtype=np.float32),
'target/coordinates_x2': np.array([], dtype=np.float32),
'target/coordinates_y1': np.array([], dtype=np.float32),
'target/coordinates_y2': np.array([], dtype=np.float32),
'target/bbox_coordinates': np.empty([0, 4], dtype=np.float32),
}], # End 2nd frame.
} # End 'depth'.
} # End 'car', end <expected_output>.
), # -------- End case 3.
]
)
def test_apply_filters(
self,
model_label_filter_configs,
batch_labels,
target_class_to_source_classes_mapping,
expected_output):
# First, get the ObjectiveLabelFilter.
objective_label_filter = ObjectiveLabelFilter(model_label_filter_configs,
target_class_to_source_classes_mapping,
_LEARNABLE_OBJECTIVE_NAMES)
_filtered_labels = objective_label_filter.apply_filters(batch_labels)
with tf.compat.v1.Session() as sess:
filtered_labels = sess.run(_filtered_labels)
# Check the filtering matches our expectations.
assert set(filtered_labels.keys()) == set(expected_output.keys())
for target_class_name in filtered_labels:
assert set(filtered_labels[target_class_name].keys()) == \
set(expected_output[target_class_name].keys())
for objective_name in filtered_labels[target_class_name]:
# Check all the frames from the batch are present.
assert len(filtered_labels[target_class_name][objective_name]) == \
len(expected_output[target_class_name][objective_name])
# Now check all the individual frames match our expectations.
for i in range(len(filtered_labels[target_class_name][objective_name])):
filtered_frame = filtered_labels[target_class_name][objective_name][i]
expected_frame = expected_output[target_class_name][objective_name][i]
assert set(filtered_frame.keys()) == set(
expected_frame.keys())
# Check all features are filtered correctly (both order and value).
for feature_name in filtered_frame:
feature_frame = filtered_frame[feature_name]
if feature_frame.dtype.str.startswith("|O"):
feature_frame = feature_frame.astype(str)
assert np.array_equal(feature_frame,
expected_frame[feature_name])
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/objectives/tests/test_objective_label_filter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test ObjectiveSet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import keras
import numpy as np
import pytest
from six.moves import zip
import tensorflow as tf
from nvidia_tao_tf1.cv.detectnet_v2.label_filter.test_label_filter import get_dummy_labels
from nvidia_tao_tf1.cv.detectnet_v2.objectives.objective_set import build_objective_set
from nvidia_tao_tf1.cv.detectnet_v2.proto.cost_function_config_pb2 import CostFunctionConfig
from nvidia_tao_tf1.cv.detectnet_v2.proto.model_config_pb2 import ModelConfig
from nvidia_tao_tf1.cv.detectnet_v2.proto.visualizer_config_pb2 import VisualizerConfig
from nvidia_tao_tf1.cv.detectnet_v2.rasterizers.bbox_rasterizer import BboxRasterizer
from nvidia_tao_tf1.cv.detectnet_v2.rasterizers.bbox_rasterizer_config import BboxRasterizerConfig
from nvidia_tao_tf1.cv.detectnet_v2.visualization.visualizer import \
DetectNetTBVisualizer as Visualizer
@pytest.fixture(scope="module")
def objective_set():
"""Build ObjectiveSet."""
objective_set = ModelConfig.ObjectiveSet()
objective_set.bbox.input = "dropout"
objective_set.bbox.scale = 1
objective_set.bbox.offset = 1
objective_set.cov.MergeFrom(ModelConfig.CovObjective())
objective_set.cov.input = "dropout"
input_height, input_width = 16, 16
output_height, output_width = 1, 1
visualizer_config = VisualizerConfig()
visualizer_config.enabled = False
Visualizer.build_from_config(visualizer_config)
objective_set = build_objective_set(objective_set, output_height, output_width,
input_height, input_width)
return objective_set
@pytest.fixture(scope="module")
def bbox_rasterizer():
"""Define a BboxRasterizer to use for the tests."""
bbox_rasterizer_config = BboxRasterizerConfig(deadzone_radius=0.5)
bbox_rasterizer_config['car'] = BboxRasterizerConfig.TargetClassConfig(
cov_center_x=0.5, cov_center_y=0.5, cov_radius_x=0.8, cov_radius_y=0.7, bbox_min_radius=0.5
)
bbox_rasterizer_config['person'] = BboxRasterizerConfig.TargetClassConfig(
cov_center_x=0.5, cov_center_y=0.5, cov_radius_x=0.8, cov_radius_y=0.7, bbox_min_radius=0.5
)
bbox_rasterizer = BboxRasterizer(
input_width=16, input_height=16, output_height=1, output_width=1,
target_class_names=['car', 'person'], bbox_rasterizer_config=bbox_rasterizer_config,
target_class_mapping={'pedestrian': 'person', 'automobile': 'car'})
return bbox_rasterizer
@pytest.fixture()
def dummy_predictions(objective_set):
"""Return dict in which keys are objective names and values valid but dummy predictions."""
output_dims = [
objective.num_channels for objective in objective_set.learnable_objectives]
predictions = {o.name: tf.ones((1, 1, dims, 1, 1)) for o, dims in
zip(objective_set.learnable_objectives, output_dims)}
return predictions
def test_build_objective_set(objective_set):
"""Test building an ObjectiveSet."""
objective_names = {
objective.name for objective in objective_set.objectives}
learnable_objective_names = {objective.name for objective in
objective_set.learnable_objectives}
expected = set(["cov", "bbox"])
assert learnable_objective_names == expected
expected.add("cov_norm")
assert objective_names == expected
def test_get_objective_costs(objective_set):
"""Test computing cost per objective for an ObjectiveSet."""
y_true = {'car': {objective.name: tf.ones(
(1, 1)) for objective in objective_set.objectives}}
# Prediction equals the ground truth. Expected cost is zero.
y_pred = y_true
target_class = CostFunctionConfig.TargetClass()
target_class.name = 'car'
objective_costs = objective_set.get_objective_costs(
y_true, y_pred, target_class)
with tf.Session() as session:
objective_costs = session.run(objective_costs)
for objective in objective_set.learnable_objectives:
assert objective_costs[objective.name] == 0.
def test_construct_outputs(objective_set):
"""Check that outputs are created for each objective and that they are in expected order."""
inputs = keras.layers.Input(shape=(2, 1, 1))
outputs = keras.layers.Dropout(0.0)(inputs)
model = keras.models.Model(inputs=inputs, outputs=outputs)
kernel_regularizer = bias_regularizer = keras.regularizers.l1(1.)
num_classes = 3
outputs = objective_set.construct_outputs(model,
num_classes=num_classes,
data_format='channels_first',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer)
assert len(outputs) == len(objective_set.learnable_objectives)
expected_output_dims = [4, 1, 1]
for objective, output, output_dims in zip(objective_set.learnable_objectives, outputs,
expected_output_dims):
assert objective.name in output.name
assert keras.backend.int_shape(output) == (
None, num_classes * output_dims, 1, 1)
def test_construct_outputs_no_matching_input(objective_set):
"""Check that constructing output fails if there is no matching input is found."""
inputs = keras.layers.Input(shape=(2, 1, 1))
outputs = keras.layers.Dropout(0.0, name='ropout_fail')(inputs)
model = keras.models.Model(inputs=inputs, outputs=outputs)
with pytest.raises(AssertionError):
outputs = objective_set.construct_outputs(model,
num_classes=1,
data_format='channels_first',
kernel_regularizer=None,
bias_regularizer=None)
def test_construct_outputs_multiple_matching_inputs(objective_set):
"""Check that constructing output fails if there are multiple matching inputs."""
inputs = keras.layers.Input(shape=(2, 1, 1))
outputs = keras.layers.Dropout(0.0, name='dropout_1')(inputs)
outputs = keras.layers.Dropout(0.0, name='dropout_2')(outputs)
model = keras.models.Model(inputs=inputs, outputs=outputs)
with pytest.raises(AssertionError):
outputs = objective_set.construct_outputs(model,
num_classes=1,
data_format='channels_first',
kernel_regularizer=None,
bias_regularizer=None)
def test_construct_outputs_default_input(objective_set):
"""Check that constructing output is OK when inputs are not specified."""
inputs = keras.layers.Input(shape=(2, 1, 1))
outputs = keras.layers.Dropout(0.0, name='dropout_1')(inputs)
outputs = keras.layers.Dropout(0.0, name='dropout_2')(outputs)
model = keras.models.Model(inputs=inputs, outputs=outputs)
for objective in objective_set.learnable_objectives:
objective.input_layer_name = ''
outputs = objective_set.construct_outputs(model,
num_classes=1,
data_format='channels_first',
kernel_regularizer=None,
bias_regularizer=None)
def _check_transformed_predictions(objective_set, original_predictions, transformed_predictions,
transformation, additional_inputs):
"""Compare transformed predictions to the result of applying a given transformation in place."""
with tf.Session() as session:
for objective in objective_set.learnable_objectives:
original_prediction = original_predictions[objective.name]
inputs = [original_prediction] + additional_inputs
expected = session.run(getattr(objective, transformation)(*inputs))
transformed_prediction = session.run(
transformed_predictions[objective.name])
np.testing.assert_allclose(transformed_prediction, expected)
def test_predictions_to_absolute(objective_set, dummy_predictions):
"""Test converting all objectives to the absolute coordinate space."""
absolute_predictions = objective_set.predictions_to_absolute(
dummy_predictions)
_check_transformed_predictions(objective_set, dummy_predictions, absolute_predictions,
'predictions_to_absolute', [])
def test_transform_predictions(objective_set, dummy_predictions):
"""Test transforming all predictions."""
num_samples = [int(list(dummy_predictions.values())[0].shape[0])]
matrices = tf.eye(3, batch_shape=num_samples)
transformed_predictions = objective_set.transform_predictions(
dummy_predictions, matrices)
_check_transformed_predictions(objective_set, dummy_predictions, transformed_predictions,
'transform_predictions', [matrices])
def test_generate_ground_truth_tensors(objective_set, bbox_rasterizer):
"""Test that generate_ground_truth_tensors sets up the correct tensors.
Note: this does not test the result of running the rasterization op.
Args:
objective_set (ObjectiveSet): As defined by the module-wide fixture.
bbox_rasterizer (BBoxRasterizer): As defined by the module-wide fixture.
"""
batch_source_class_names = [['car', 'person', 'car'], ['person']]
batch_other_attributes = [dict(), dict()]
# First, define bbox coords for each frame.
batch_other_attributes[0]['target/bbox_coordinates'] = \
np.cast[np.float32](np.random.randn(3, 4))
batch_other_attributes[1]['target/bbox_coordinates'] = \
np.cast[np.float32](np.random.randn(1, 4))
# Then, add depth related field.
batch_other_attributes[0]['target/world_bbox_z'] = \
np.array([12.3, 4.56, 7.89], dtype=np.float32)
batch_other_attributes[1]['target/world_bbox_z'] = np.array(
[10.11], dtype=np.float32)
# Same with orientation.
batch_other_attributes[0]['target/orientation'] = np.array(
[0.1, 0.2, 0.3], dtype=np.float32)
batch_other_attributes[1]['target/orientation'] = np.array(
[0.4], dtype=np.float32)
# Add augmentation matrices.
batch_other_attributes[0]['frame/augmented_to_input_matrices'] = \
np.cast[np.float32](np.random.randn(3, 3))
batch_other_attributes[1]['frame/augmented_to_input_matrices'] = \
np.cast[np.float32](np.random.randn(3, 3))
# Add first order bw-poly coefficients.
batch_other_attributes[0]['frame/bw_poly_coeff1'] = \
np.array([0.0005], dtype=np.float32)
batch_other_attributes[1]['frame/bw_poly_coeff1'] = \
np.array([0.001], dtype=np.float32)
batch_labels = \
[get_dummy_labels(x[0], other_attributes=x[1]) for x in zip(
batch_source_class_names, batch_other_attributes)]
target_tensors = objective_set.generate_ground_truth_tensors(bbox_rasterizer=bbox_rasterizer,
batch_labels=batch_labels)
target_class_names = {'car', 'person'}
assert set(target_tensors.keys()) == target_class_names
for target_class_name in target_class_names:
assert set(target_tensors[target_class_name].keys()) == \
{'cov', 'bbox', 'cov_norm'}
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/objectives/tests/test_objective_set.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Objective tests base class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import zip
import tensorflow as tf
import nvidia_tao_tf1.core
from nvidia_tao_tf1.cv.detectnet_v2.rasterizers.bbox_rasterizer import BboxRasterizer
class TestObjective(object):
"""Objective tests base class."""
output_width = 5
output_height = 6
stride = 16
input_width = output_width * stride
input_height = output_height * stride
def check_label_roundtrip(self, objective, label, expected_values):
"""Test label roundtrip through the objective.
- Start from a label
- Construct the target gradient for it
- Rasterize the target gradient
- Transform the rasterized tensor to absolute coordinates
- Check the result matches the original label.
"""
coords = label['target/output_space_coordinates']
# Form the gradients
gradients = objective.target_gradient(label)
# Rasterize the gradients. Use a small min radius to force
# some output even for degenerate boxes.
num_classes = 1
num_images = 1
matrices, _, _ = \
BboxRasterizer.bbox_from_rumpy_params(xmin=coords[0],
ymin=coords[1],
xmax=coords[2],
ymax=coords[3],
cov_radius_x=tf.constant(
[1.0]),
cov_radius_y=tf.constant(
[1.0]),
bbox_min_radius=tf.constant(
[1.0]),
cov_center_x=tf.constant(
[0.5]),
cov_center_y=tf.constant(
[0.5]),
deadzone_radius=tf.constant([1.0]))
bbox_rasterizer = nvidia_tao_tf1.core.processors.BboxRasterizer()
gradient_flags = [objective.gradient_flag] * objective.num_channels
tensor = bbox_rasterizer(num_images=num_images,
num_classes=num_classes,
num_gradients=objective.num_channels,
image_height=self.output_height,
image_width=self.output_width,
bboxes_per_image=[1],
bbox_class_ids=[0],
bbox_matrices=matrices,
bbox_gradients=gradients,
bbox_coverage_radii=[[1.0, 1.0]],
bbox_flags=[
nvidia_tao_tf1.core.processors.BboxRasterizer.DRAW_MODE_ELLIPSE],
gradient_flags=gradient_flags)
# Give the tensor its shape, otherwise predictions_to_absolute does not work
tensor = tf.reshape(tensor, (num_images, num_classes, objective.num_channels,
self.output_height, self.output_width))
# Transform to absolute coordinates
abs_tensor = objective.predictions_to_absolute(tensor)
# Check all output channels are as expected
abs_tensors_per_channel = tf.unstack(abs_tensor, axis=2)
# Assuming gt tensor is non-zero only outside the ellipse
ellipse_mask = tf.not_equal(tensor[:, :, 0], 0.0)
with tf.Session() as session:
for ref_value, test_tensor in zip(expected_values, abs_tensors_per_channel):
# Test only the values within the ellipse
test_values_tensor = tf.boolean_mask(test_tensor, ellipse_mask)
test_values = session.run(test_values_tensor)
# Check that we actually rasterized something
assert test_values.size, "No non-zero target tensor values found"
# Default tolerance is too strict. 1e-6 would already fail
np.testing.assert_allclose(test_values, ref_value, atol=1e-5)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/objectives/tests/test_objective.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test BboxObjective."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pytest
import tensorflow as tf
from nvidia_tao_tf1.cv.detectnet_v2.objectives.build_objective import build_objective
from nvidia_tao_tf1.cv.detectnet_v2.objectives.tests.test_objective import TestObjective
from nvidia_tao_tf1.cv.detectnet_v2.proto.model_config_pb2 import ModelConfig
class TestBboxObjective(TestObjective):
"""Bbox objective tests."""
@pytest.fixture()
def bbox_objective(self):
"""A BboxObjective instance."""
# Build the bbox objective
bbox_config = ModelConfig.BboxObjective()
bbox_config.scale = 35.0
bbox_config.offset = 0.5
bbox_objective = build_objective('bbox', self.output_height, self.output_width,
self.input_height, self.input_width,
objective_config=bbox_config)
return bbox_objective
@pytest.mark.parametrize("label_coords,expected_coords",
[
# Regular bbox, values should not change
([1.1, 21.1, 42.1, 66.666],
[1.1, 21.1, 42.1, 66.666]),
# Overly large, bbox should clip to image boundaries
([-10.1, -30.0, 150.0, 140.0],
[0.0, 0.0, 5*16, 6*16]),
# Negative height, should clip to max vertical coordinate
([-10.1, 30.2, 50.0, 20.2],
[0.0, 30.2, 50.0, 30.2]),
# Negative width, should clip to max horizontal coordinate
([30.2, 50.0, 20.2, 60.0],
[30.2, 50.0, 30.2, 60.0]),
])
def test_bbox_roundtrip(self, bbox_objective, label_coords, expected_coords):
"""Test bbox label roundtrip through the objective.
- Start from a label
- Construct the target gradient for it
- Rasterize the target gradient
- Transform the rasterized tensor to absolute coordinates
- Check the result matches the original label.
"""
# Set up the coordinate tensor and target label
scale_x = self.output_width / self.input_width
scale_y = self.output_height / self.input_height
coords_list = [label_coords[0] * scale_x,
label_coords[1] * scale_y,
label_coords[2] * scale_x,
label_coords[3] * scale_y]
coords = tf.reshape(tf.constant(coords_list), [4, 1])
label = {'target/output_space_coordinates': coords}
self.check_label_roundtrip(bbox_objective, label, expected_coords)
def test_bbox_predictions_to_absolute_coordinates(self, bbox_objective):
"""Test bbox prediction transform to absolute coordinates.
Test that the bbox prediction tensor in gridcell center relative ltrb format
gets transformed to absolute coordinates.
"""
num_classes = 1
output_width = self.output_width
output_height = self.output_height
input_width = self.input_width
input_height = self.input_height
stride = input_height / output_height
bbox_scale = bbox_objective.scale
bbox_offset = bbox_objective.offset
# (batch size, num_classes, channels, height, width)
ltrb = tf.ones((1, num_classes, 4, output_height, output_width))
# note that the output absolute coordinates are clipped to image borders
absolute_coordinates = bbox_objective.predictions_to_absolute(ltrb)
# compute gridcell center coordinates
x = np.arange(0, output_width, dtype=np.float32) * stride + bbox_offset
x = np.tile(x, (output_height, 1))
y = np.arange(0, output_height, dtype=np.float32) * \
stride + bbox_offset
y = np.transpose(np.tile(y, (output_width, 1)))
# all ltrb values are ones, hence add +/- bbox_scale to the expected values
x1 = x - bbox_scale
y1 = y - bbox_scale
x2 = x + bbox_scale
y2 = y + bbox_scale
# clip
x1 = np.minimum(np.maximum(x1, 0.), input_width)
y1 = np.minimum(np.maximum(y1, 0.), input_height)
x2 = np.minimum(np.maximum(x2, x1), input_width)
y2 = np.minimum(np.maximum(y2, y1), input_height)
expected_absolute_coordinates = np.stack((x1, y1, x2, y2), axis=0)
with tf.Session() as session:
absolute_coordinates_res = session.run([absolute_coordinates])
np.testing.assert_allclose(absolute_coordinates_res[0][0][0],
expected_absolute_coordinates)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/objectives/tests/test_bbox_objective.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""Bbox rasterizer config class that holds parameters for BboxRasterizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class BboxRasterizerConfig(dict):
"""Hold the parameters for BboxRasterizer."""
class TargetClassConfig(object):
"""Hold target class specific parameters."""
__slots__ = ["cov_center_x", "cov_center_y", "cov_radius_x", "cov_radius_y",
"bbox_min_radius"]
def __init__(self, cov_center_x, cov_center_y, cov_radius_x, cov_radius_y, bbox_min_radius):
"""Constructor.
Args:
cov_center_x/y (float): The x / y coordinate of the center of the coverage region
relative to the bbox. E.g. If we want the center of the coverage region to be
that of the bbox, the value would be 0.5.
cov_radius_x/y (float): The radius of the coverage region along the x / y axis,
relative to the full extent of the bbox. E.g. If we want the coverage region
to span the entire length of a bbox along a given axis, the value would be 1.0.
bbox_min_radius (float): Minimum radius of the coverage region in output space (not
input pixel space).
Raises:
ValueError: If the input args are not in the accepted ranges.
"""
if cov_center_x < 0.0 or cov_center_x > 1.0:
raise ValueError("BboxRasterizerConfig.TargetClassConfig.cov_center_x must be in "
"[0.0, 1.0]")
if cov_center_y < 0.0 or cov_center_y > 1.0:
raise ValueError("BboxRasterizerConfig.TargetClassConfig.cov_center_y must be in "
"[0.0, 1.0]")
if cov_radius_x <= 0.0:
raise ValueError("BboxRasterizerConfig.TargetClassConfig.cov_radius_x must be > 0")
if cov_radius_y <= 0.0:
raise ValueError("BboxRasterizerConfig.TargetClassConfig.cov_radius_y must be > 0")
if bbox_min_radius <= 0.0:
raise ValueError("BboxRasterizerConfig.TargetClassConfig.bbox_min_radius "
"must be > 0")
self.cov_center_x = cov_center_x
self.cov_center_y = cov_center_y
self.cov_radius_x = cov_radius_x
self.cov_radius_y = cov_radius_y
self.bbox_min_radius = bbox_min_radius
def __init__(self, deadzone_radius):
"""Constructor.
Args:
deadzone_radius (float): Radius of the deadzone to be drawn in between overlapping
coverage regions.
Raises:
ValueError: If the input arg is not within the accepted range.
"""
if deadzone_radius < 0.0 or deadzone_radius > 1.0:
raise ValueError("BboxRasterizerConfig.deadzone_radius must be in [0.0, 1.0]")
self.deadzone_radius = deadzone_radius
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/rasterizers/bbox_rasterizer_config.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""Build for the BboxRasterizerConfig."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from nvidia_tao_tf1.cv.detectnet_v2.rasterizers.bbox_rasterizer_config import BboxRasterizerConfig
def build_bbox_rasterizer_config(bbox_rasterizer_proto):
"""Build BboxRasterizerConfig from a proto.
Args:
bbox_rasterizer_proto: proto.bbox_rasterizer_config.BboxRasterizerConfig message.
Returns:
bbox_rasterizer_config: BboxRasterizerConfig instance.
"""
bbox_rasterizer_config = BboxRasterizerConfig(bbox_rasterizer_proto.deadzone_radius)
for target_class_name, target_class_config in \
six.iteritems(bbox_rasterizer_proto.target_class_config):
bbox_rasterizer_config[target_class_name] = \
BboxRasterizerConfig.TargetClassConfig(target_class_config.cov_center_x,
target_class_config.cov_center_y,
target_class_config.cov_radius_x,
target_class_config.cov_radius_y,
target_class_config.bbox_min_radius)
return bbox_rasterizer_config
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/rasterizers/build_bbox_rasterizer_config.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""Defines functions and classes for translating labels to rasterized ground truth tensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/rasterizers/__init__.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""Bbox rasterizer class that translates labels into ground truth tensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from six.moves import range
import tensorflow as tf
from nvidia_tao_tf1.blocks.multi_source_loader.types.bbox_2d_label import Bbox2DLabel
import nvidia_tao_tf1.core
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.default_dataloader import UNKNOWN_CLASS
from nvidia_tao_tf1.cv.detectnet_v2.label_filter.base_label_filter import filter_labels
class BboxRasterizerInput(object):
"""Encapsulate some of the lower level details needed by BboxRasterizer from the user."""
__slots__ = ["num_bboxes", "bbox_class_ids", "bbox_matrices", "bbox_coverage_radii",
"bbox_flags", "bbox_sort_values", "gradient_info"]
def __init__(self,
num_bboxes,
bbox_class_ids,
bbox_matrices,
bbox_coverage_radii,
bbox_flags,
bbox_sort_values,
gradient_info):
"""Constructor.
Args:
num_bboxes (tf.Tensor): 0-D Tensor with the number of bboxes in this frame.
bbox_class_ids (tf.Tensor): 1-D int32 Tensor indicating of which class each bbox is.
bbox_matrices (tf.Tensor): 3-D float32 Tensor of shape (N, 3, 3) where N is the number
of bboxes in this frame. Each element [i, :, :] is a row major matrix specifying the
shape of the corresponding bbox.
bbox_coverage_radii (tf.Tensor): 2-D float32 Tensor of shape (N, 2). Each element [i, :]
contains the radii (along each dimension x and y) of the coverage region to be drawn
for the corresponding bbox.
bbox_flags (tf.Tensor): 1-D uint8 tensor. Each element indicates how the corresponding
bbox's coverage region should be filled. Hardcoded to 'DRAW_MODE_ELLIPSE'.
gradient_info (dict): Contains output space coordinates, inverse bbox area, and various
other fields needed to calculate the objective-specific target gradients.
"""
self.num_bboxes = num_bboxes
self.bbox_class_ids = bbox_class_ids
self.bbox_matrices = bbox_matrices
self.bbox_coverage_radii = bbox_coverage_radii
self.bbox_flags = bbox_flags
self.bbox_sort_values = bbox_sort_values
self.gradient_info = gradient_info
class BboxRasterizer(object):
"""Takes care of rasterizing labels into ground truth tensors for DetectNet V2 detection."""
def __init__(self, input_width, input_height, output_width, output_height,
target_class_names, bbox_rasterizer_config, target_class_mapping,
output_type=None):
"""Constructor.
Args:
input_width/height (int): Input images' width / height in pixel space.
output_width/height (int): Output rasters' width / height.
target_class_names (list of str): List of target class names for which to generate
rasters.
bbox_rasterizer_config (BboxRasterizerConfig): Maps from target class names to
BboxRasterizerConfig.TargetClassConfig.
Raises:
AssertionError: If certain target classes do not have corresponding parameters.
"""
if not target_class_mapping:
raise ValueError("BboxRasterizer expected a valid class mapping, instead got: {}".
format(target_class_mapping))
self.input_width = input_width
self.input_height = input_height
self.output_width = output_width
self.output_height = output_height
self.target_class_names = target_class_names
self.bbox_rasterizer_config = bbox_rasterizer_config
self.output_type = output_type
self.deadzone_radius = self.bbox_rasterizer_config.deadzone_radius
self._target_class_lookup = nvidia_tao_tf1.core.processors.LookupTable(
keys=list(target_class_mapping.keys()),
values=list(target_class_mapping.values()),
default_value=tf.constant(UNKNOWN_CLASS)
)
# Check that each target class has corresponding rasterization parameters.
for target_class_name in self.target_class_names:
assert target_class_name in self.bbox_rasterizer_config
self._target_class_indices = list(range(len(self.target_class_names)))
# Get strides.
self._scale_x = self.output_width / self.input_width
self._scale_y = self.output_height / self.input_height
# Get lookup tables for rasterization parameters.
self._cov_center_x, self._cov_center_y, self._cov_radius_x, self._cov_radius_y, \
self._bbox_min_radius = self._construct_lookup_tables()
self._rasterizer = nvidia_tao_tf1.core.processors.BboxRasterizer()
def _construct_lookup_tables(self):
"""Construct LUTs for mapping class names into ground truth parameters.
Returns:
cov_center_x/y (list of float): Follows the indexing of self.target_class_names. Each
element corresponds to the x / y coordinate of where the center of the coverage
region should be drawn, relative to ecah bounding box (e.g. midpoint is 0.5).
cov_radius_x/y (list of float): Follows the indexing of self.target_class_names. Each
element corresponds to the x / y extent of the coverage region, relative to the
bbox dimensions (e.g. full bbox dimension is 1.0).
bbox_min_radius (list of float): Follows the indexing of self.target_class_names. Each
element corresponds to the minimum radius each coverage region should have.
"""
cov_center_x = []
cov_center_y = []
cov_radius_x = []
cov_radius_y = []
bbox_min_radius = []
# Go in order of self.target_class_names.
for target_class_name in self.target_class_names:
target_class_config = self.bbox_rasterizer_config[target_class_name]
# Find a matching class from bbox_rasterizer_spec and append the values into lists.
cov_center_x.append(target_class_config.cov_center_x)
cov_center_y.append(target_class_config.cov_center_y)
cov_radius_x.append(target_class_config.cov_radius_x)
cov_radius_y.append(target_class_config.cov_radius_y)
bbox_min_radius.append(target_class_config.bbox_min_radius)
return cov_center_x, cov_center_y, cov_radius_x, cov_radius_y, bbox_min_radius
def _lookup(self, values):
"""Create a lookup function for rasterization parameters.
Args:
values (list): Contains arbitrary elements as constructed by e.g.
self._construct_lookup_tables.
Returns:
(nvidia_tao_tf1.core.processors.LookupTable) Callable with target class name (str) that returns
the corresponding entry in <values>.
"""
return nvidia_tao_tf1.core.processors.LookupTable(keys=self.target_class_names, values=values,
default_value=-1)
@staticmethod
def bbox_from_rumpy_params(
xmin, ymin, xmax, ymax,
cov_center_x, cov_center_y, cov_radius_x, cov_radius_y, bbox_min_radius,
deadzone_radius):
"""Compute bbox matrix and coverage radii based on input coords and Rumpy style parameters.
Args:
The first 4 arguments are all in the model output space.
xmin (1-D tf.Tensor of float): Contains the left-most coordinates of bboxes.
ymin (1-D tf.Tensor of float): Contains the top-most coordinates of bboxes.
xmax / ymax (1-D tf.Tensor of float): Same but right- and bottom-most coordinates.
cov_center_x (1-D tf.Tensor of float): Contains the x-coordinates of the centers of the
coverage regions to be drawn for bboxes. Same indexing as e.g. xmin.
cov_center_y (1-D tf.Tensor of float): Likewise, but for the y-axis.
cov_radius_x (1-D tf.Tensor of float): Contains the radii along the x-axis of the
coverage regions to be drawn for bboxes. Same indexing as e.g. xmin.
cov_radius_y (1-D tf.Tensor of float): Likewise, but for the y-axis.
bbox_min_radius (1-D tf.Tensor of float): Contains the minimum radii for the coverage
regions to be drawn for bboxes. Same indexing as e.g. xmin.
deadzone_radius (float): Radius of the deadzone region to be drawn between bboxes that
have overlapping coverage regions.
Returns:
mat (3-D tf.Tensor of float): A matrix that maps from ground truth image space to the
rasterization space, where transformed coordinates that fall within [-1.0, 1.0]
are inside the deadzone. The shape of this tensor is (N, 3, 3) where N is the
number of elements in <xmin>.
cov_radius (2-D tf.Tensor of float): A (N, 2) tensor whose values contains the ratios
of coverage to deadzone radii.
inv_bbox_area (1-D tf.Tensor of float): Contains the values of the inverse bbox area,
with the indexing corresponding to that of for instance <xmin>.
"""
# Center of the coverage region in gt space
# TODO is cov_center always [0.5, 0.5]?
cx = xmin + cov_center_x * (xmax - xmin)
cy = ymin + cov_center_y * (ymax - ymin)
# Ellipse's semi-diameters (i.e. semi-major and semi-minor axes)
# Picking the distance to the closest edge of the bbox as the radius so the generated
# ellipse never spills outside of the bbox, unless possibly when too small.
# Note: this is in abs gt-pixel coordinate space.
sx = tf.where(tf.less(cov_center_x, 0.5), cx - xmin, xmax - cx)
sy = tf.where(tf.less(cov_center_y, 0.5), cy - ymin, ymax - cy)
# Compute coverage radii as fractions of bbox radii
csx = cov_radius_x * sx
csy = cov_radius_y * sy
# Constrain absolute minimum size to avoid numerical problems below. Tenth of a pixel
# should be small enough to allow almost non-visible bboxes if so desired, while large
# enough to avoid problems. Note that this is just a safety measure: bbox_min_radius
# below provides user controlled clamping (but can't guard against zero-sized bboxes),
# and dataset converters should have removed degeneracies (but augmentation might
# produce small bboxes).
csx = tf.maximum(csx, 0.1)
csy = tf.maximum(csy, 0.1)
# Constrain X dimension, keeping aspect ratio
rx = tf.maximum(csx, bbox_min_radius)
ry = tf.where(tf.less(csx, bbox_min_radius), bbox_min_radius * csy / csx, csy)
csx = rx
csy = ry
# Constrain Y dimension, keeping aspect ratio
rx = tf.where(tf.less(csy, bbox_min_radius), bbox_min_radius * csx / csy, csx)
ry = tf.maximum(csy, bbox_min_radius)
csx = rx
csy = ry
# Compute deadzone radii by interpolating between coverage zone and original bbox size
dsx = (1.0 - deadzone_radius) * csx + deadzone_radius * sx
dsy = (1.0 - deadzone_radius) * csy + deadzone_radius * sy
# Constrain deadzone to be larger than coverage zone
dsx = tf.maximum(dsx, csx)
dsy = tf.maximum(dsy, csy)
# Construct a matrix that maps from ground truth image space to rasterization space
# where transformed coordinates that are within [-1,1] range are inside deadzone
oodsx = 1. / dsx
oodsy = 1. / dsy
zero = tf.zeros(shape=[tf.size(xmin)])
one = tf.ones(shape=[tf.size(xmin)])
mat = [[oodsx, zero, zero],
[zero, oodsy, zero],
[-cx*oodsx, -cy*oodsy, one]]
# Convert from matrix of arrays to array of matrices.
mat = tf.transpose(mat, (2, 0, 1))
# Compute the ratio of coverage and deadzone radii
cov_radius = tf.transpose([csx * oodsx, csy * oodsy])
# Compute coverage area based normalization factor to be used for cost function weighting
# Clamp to ensure the value is always <= 1.0
inv_bbox_area = 1. / tf.maximum(csx * csy * 4., 1.)
return mat, cov_radius, inv_bbox_area
def _prepare_labels(self, labels):
"""Prepare labels by keeping only those with mapped classes, and then sorting them.
Filter out source classes that are not mapped to any target class.
Args:
labels (variable type):
* If a dict, then it contains various label features for a single frame. Maps from
feature name (str) to tf.Tensor. This corresponds to the old (DefaultDataloader)
path.
* Otherwise, expects a Bbox2DLabel with all the features for a minibatch.
Returns:
output_labels (dict of tf.Tensors): Contains the same label features as ``labels``,
but with unmapped classes filtered out.
class_ids (tf.Tensor): 1-D Tensor containing integer indices corresponding to each
label value's class in ``output_labels``.
num_bboxes (tf.Tensor): 1-D Tensor containing the number of bounding boxes per frame.
"""
output_labels = dict()
if isinstance(labels, dict):
# Filter out unmapped labels.
mapped_labels = dict()
mapped_labels.update(labels)
target_classes = self._target_class_lookup(labels['target/object_class'])
valid_indices = tf.not_equal(target_classes, UNKNOWN_CLASS)
mapped_labels['target/object_class'] = target_classes
mapped_labels = filter_labels(mapped_labels, valid_indices)
object_classes = mapped_labels['target/object_class']
class_ids = self._lookup(self._target_class_indices)(object_classes)
num_bboxes = tf.size(class_ids)
for feature_name, feature_tensor in six.iteritems(mapped_labels):
if feature_name.startswith('target/'):
output_labels[feature_name] = feature_tensor
elif feature_name.startswith('frame/'):
output_labels[feature_name] = mapped_labels[feature_name]
elif isinstance(labels, Bbox2DLabel):
# TODO(@williamz): This feature needs to be ported into ObstacleNet version once
# temporal models become a thing there.
if self.output_type == 'last':
# Filter out labels belonging to other than the last frame.
def _filter_labels(labels):
"""Helper function to filter labels other than the last frame."""
valid_indices = tf.equal(labels.object_class.indices[:, 1],
labels.object_class.dense_shape[1]-1)
filtered_labels = labels.filter(valid_indices)
return filtered_labels
labels = tf.cond(labels.object_class.dense_shape[1] > 1,
lambda: _filter_labels(labels),
lambda: labels)
# Filter out unmapped labels.
source_classes = labels.object_class
mapped_classes = tf.SparseTensor(
values=self._target_class_lookup(source_classes.values),
indices=source_classes.indices,
dense_shape=source_classes.dense_shape)
mapped_labels = labels._replace(object_class=mapped_classes)
valid_indices = tf.not_equal(mapped_classes.values, UNKNOWN_CLASS)
filtered_labels = mapped_labels.filter(valid_indices)
valid_classes = filtered_labels.object_class.values
valid_coords = tf.reshape(filtered_labels.vertices.coordinates.values, [-1, 4])
valid_sparse_indices = filtered_labels.object_class.indices
class_ids = self._lookup(self._target_class_indices)(valid_classes)
if self.output_type == 'all':
num_frames = tf.cast(source_classes.dense_shape[0] *
source_classes.dense_shape[1], dtype=tf.int32)
frame_indices = tf.cast(
valid_sparse_indices[:, 0] *
source_classes.dense_shape[1] + valid_sparse_indices[:, 1], dtype=tf.int32)
elif self.output_type in [None, 'last']:
num_frames = tf.cast(source_classes.dense_shape[0], dtype=tf.int32)
frame_indices = tf.cast(valid_sparse_indices[:, 0], dtype=tf.int32)
else:
raise ValueError("Unsupported output_type: {}".format(self.output_type))
output_labels['target/bbox_coordinates'] = valid_coords
for feature_name in filtered_labels.TARGET_FEATURES:
feature_tensor = getattr(filtered_labels, feature_name)
if feature_name == 'vertices' or \
not isinstance(feature_tensor, tf.SparseTensor):
continue
output_labels['target/' + feature_name] = feature_tensor.values
# Calculate number of bboxes per image.
# NOTE: the minlength arg is required because the above filtering mechanism may have
# led to the last frames in the batch being completely void of labels.
num_bboxes = tf.bincount(frame_indices, minlength=num_frames)
else:
raise ValueError("Unsupported variable type for labels ({}).".format(type(labels)))
return output_labels, class_ids, num_bboxes
def get_target_gradient_info(self, frame_labels):
"""Translate labels.
Computes the information necessary to calculate target gradients.
Args:
frame_labels (dict of tf.Tensors): Contains various label features for a single frame.
Returns:
bbox_rasterizer_input (BboxRasterizerInput): Encapsulate all the lower level arguments
needed by the call to the SDK.
"""
filtered_labels, class_ids, num_bboxes = self._prepare_labels(frame_labels)
object_classes = filtered_labels['target/object_class']
coordinates = filtered_labels['target/bbox_coordinates']
# Find appropriate scaling factors to go from input image pixel space to network output
# / 'rasterization' space, i.e. divide by stride.
xmin = coordinates[:, 0] * self._scale_x
ymin = coordinates[:, 1] * self._scale_y
xmax = coordinates[:, 2] * self._scale_x
ymax = coordinates[:, 3] * self._scale_y
# Compute bbox matrices based on bbox coordinates.
matrices, coverage_radii, inv_bbox_area = \
self.bbox_from_rumpy_params(
xmin=xmin, ymin=ymin, xmax=xmax, ymax=ymax,
cov_center_x=self._lookup(self._cov_center_x)(object_classes),
cov_center_y=self._lookup(self._cov_center_y)(object_classes),
cov_radius_x=self._lookup(self._cov_radius_x)(object_classes),
cov_radius_y=self._lookup(self._cov_radius_y)(object_classes),
bbox_min_radius=self._lookup(self._bbox_min_radius)(object_classes),
deadzone_radius=self.deadzone_radius)
flags = tf.fill([tf.size(xmin)],
tf.cast(nvidia_tao_tf1.core.processors.BboxRasterizer.DRAW_MODE_ELLIPSE,
tf.uint8))
# Sort bboxes by ascending ymax to approximate depth sorting.
sort_value = ymax
gradient_info = dict()
gradient_info.update(filtered_labels)
# Make a label info dictionary for use in gradient construction
gradient_info['target/inv_bbox_area'] = inv_bbox_area
# Update label info with the coordinates to be used for "gradient" calculation.
gradient_info['target/output_space_coordinates'] = tf.stack([xmin, ymin, xmax, ymax])
return BboxRasterizerInput(
num_bboxes=num_bboxes,
bbox_class_ids=class_ids,
bbox_matrices=matrices,
bbox_coverage_radii=coverage_radii,
bbox_flags=flags,
bbox_sort_values=sort_value,
gradient_info=gradient_info)
def rasterize_labels(self,
batch_bbox_rasterizer_input,
batch_gradients,
num_gradients,
gradient_flag):
"""Rasterize a batch of labels for a given Objective.
Args:
batch_bbox_rasterizer_input (list): Each element is a BboxRasterizerInput containing
the information for a frame.
batch_gradients (list): Each element is a 3-D tf.Tensor of type float32. Each tensor is
of shape (N, G, 3) where N is the number of bboxes in the corresponding frame, G
the number of output channels the rasterized tensor will have for this objective.
num_gradients (int): Number of gradients (output channels).
gradient_flag: One of the draw modes under nvidia_tao_tf1.core.processors.BboxRasterizer.
Returns:
target_tensor (tf.Tensor): Rasterized ground truth tensor for one single objective.
Shape is (N, C, G, H, W) where C is the number of target classes, and H and W are
respectively the height and width in the model output space.
"""
if isinstance(batch_bbox_rasterizer_input, list):
bboxes_per_image = [item.num_bboxes for item in batch_bbox_rasterizer_input]
# Concatenate the inputs that need it.
bbox_class_ids = tf.concat(
[item.bbox_class_ids for item in batch_bbox_rasterizer_input], axis=0)
bbox_matrices = tf.concat(
[item.bbox_matrices for item in batch_bbox_rasterizer_input], axis=0)
bbox_coverage_radii = tf.concat(
[item.bbox_coverage_radii for item in batch_bbox_rasterizer_input], axis=0)
bbox_flags = tf.concat(
[item.bbox_flags for item in batch_bbox_rasterizer_input], axis=0)
bbox_sort_values = tf.concat(
[item.bbox_sort_values for item in batch_bbox_rasterizer_input], axis=0)
bbox_gradients = tf.concat(batch_gradients, axis=0)
num_images = len(batch_bbox_rasterizer_input)
else:
bboxes_per_image = batch_bbox_rasterizer_input.num_bboxes
bbox_class_ids = batch_bbox_rasterizer_input.bbox_class_ids
bbox_matrices = batch_bbox_rasterizer_input.bbox_matrices
bbox_gradients = batch_gradients
bbox_coverage_radii = batch_bbox_rasterizer_input.bbox_coverage_radii
bbox_flags = batch_bbox_rasterizer_input.bbox_flags
bbox_sort_values = batch_bbox_rasterizer_input.bbox_sort_values
num_images = tf.size(bboxes_per_image)
num_target_classes = len(self.target_class_names)
gradient_flags = [gradient_flag] * num_gradients
target_tensor = \
self._rasterizer(num_images=num_images,
num_classes=num_target_classes,
num_gradients=num_gradients,
image_height=self.output_height,
image_width=self.output_width,
bboxes_per_image=bboxes_per_image,
bbox_class_ids=bbox_class_ids,
bbox_matrices=bbox_matrices,
bbox_gradients=bbox_gradients,
bbox_coverage_radii=bbox_coverage_radii,
bbox_flags=bbox_flags,
bbox_sort_values=bbox_sort_values,
gradient_flags=gradient_flags)
return target_tensor
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/rasterizers/bbox_rasterizer.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""Loss mask rasterizer class that translates labels to rasterized tensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nvidia_tao_tf1.blocks.multi_source_loader.types.bbox_2d_label import Bbox2DLabel
from nvidia_tao_tf1.core.processors import PolygonRasterizer
import six
import tensorflow as tf
class LossMaskRasterizer(object):
"""Handle the logic of translating labels to ground truth tensors.
Much like the LossMaskFilter object, this class holds model-specific information in a
'hierarchy'.
It is for now comprised of two levels: [target_class_name][objective_name], although in the
future it is quite likely an additional [head_name] level will be pre-pended to it.
"""
def __init__(self, input_width, input_height, output_width, output_height):
"""Constructor.
Args:
input_width/height (int): Model input dimensions.
output_width/height (int): Model output dimensions.
"""
self.input_width = input_width
self.input_height = input_height
self.output_width = output_width
self.output_height = output_height
# Get the rasterizer from the SDK.
self._rasterizer = \
PolygonRasterizer(width=output_width, height=output_height,
one_hot=False, data_format='channels_first')
# Setup some private attributes to use for label-to-rasterizer-input translation.
self._scale_x = self.output_width / self.input_width
self._scale_y = self.output_height / self.input_height
def translate_frame_labels_bbox_2d_label(self, ground_truth_labels):
"""Translate a frame's ground truth labels to the inputs necessary for the rasterizer.
Args:
ground_truth_labels (Bbox2DLabel): Filtered labels, which only incorporates
bboxes matching filters for all frames in a batch.
Returns:
polygon_vertices (tf.Tensor of float): 2-D tensor of shape (N, 2) where entry [n - 1, 0]
corresponds to the n-th vertex's x coordinate, and [n - 1, 1] to its y coordinate.
vertex_counts_per_polygon (tf.Tensor of int): 1-D tensor where each entry holds the
number of vertices for a single polygon. As an example, if entries at indices 0 and
1 are 3 and 4, that means the first 3 entries in <polygon_vertices> describe one
polygon, and the next 4 entries in <polygon_vertices> describe another.
However, in this special case, ALL polygons are bboxes and hence have 4 vertices.
class_ids_per_polygon (tf.Tensor of int): 1-D tensor that has the same length as
<vertex_counts_per_polygon>. Contains the class ID of each corresponding polygon.
In this special case, they are assumed to all belong to the same class.
polygons_per_image (tf.Tensor of int): 1-D tensor that describes how many polygons
there are in this image.
"""
source_classes = ground_truth_labels.object_class
frame_indices = tf.cast(source_classes.indices[:, 0], dtype=tf.int32)
num_frames = tf.cast(source_classes.dense_shape[0], dtype=tf.int32)
# Step 1: we separate coords into x1,y1,x2,y2.
coords = tf.reshape(ground_truth_labels.vertices.coordinates.values, [-1, 4])
x1 = tf.reshape(coords[:, 0] * self._scale_x, [-1])
y1 = tf.reshape(coords[:, 1] * self._scale_y, [-1])
x2 = tf.reshape(coords[:, 2] * self._scale_x, [-1])
y2 = tf.reshape(coords[:, 3] * self._scale_y, [-1])
# Step 2: compose the vertices of polygon.
coordinates_x = tf.stack([x1, x2, x2, x1], axis=0)
coordinates_x = tf.reshape(tf.transpose(coordinates_x, perm=[1, 0]), [-1])
coordinates_y = tf.stack([y1, y1, y2, y2], axis=0)
coordinates_y = tf.reshape(tf.transpose(coordinates_y, perm=[1, 0]), [-1])
polygon_vertices = tf.stack([coordinates_x, coordinates_y], axis=1)
# Step 3: compose vertex counts, and we assume each polygon has 4 vertices.
vertex_counts_per_polygon = tf.cast(tf.ones_like(x1) * 4, dtype=tf.int32)
# Step 4: Compose class ids, and they are all the same class.
class_ids_per_polygon = tf.zeros_like(vertex_counts_per_polygon)
# Step 5: polygons per image.
polygons_per_image = tf.bincount(frame_indices, minlength=num_frames)
return polygon_vertices, vertex_counts_per_polygon, \
class_ids_per_polygon, polygons_per_image
def translate_frame_labels_dict(self, frame_ground_truth_labels):
"""Translate a frame's ground truth labels to the inputs necessary for the rasterizer.
Args:
frame_ground_truth_labels (dict of Tensors): contains the labels for a single frame.
Returns:
polygon_vertices (tf.Tensor of float): 2-D tensor of shape (N, 2) where entry [n - 1, 0]
corresponds to the n-th vertex's x coordinate, and [n - 1, 1] to its y coordinate.
vertex_counts_per_polygon (tf.Tensor of int): 1-D tensor where each entry holds the
number of vertices for a single polygon. As an example, if entries at indices 0 and
1 are 3 and 4, that means the first 3 entries in <polygon_vertices> describe one
polygon, and the next 4 entries in <polygon_vertices> describe another.
However, in this special case, ALL polygons are bboxes and hence have 4 vertices.
class_ids_per_polygon (tf.Tensor of int): 1-D tensor that has the same length as
<vertex_counts_per_polygon>. Contains the class ID of each corresponding polygon.
In this special case, they are assumed to all belong to the same class.
polygons_per_image (tf.Tensor of int): 1-D tensor that describes how many polygons
there are in this image.
"""
# TODO(@williamz): again, some hardcoded BS that is likely to lead to some problems.
# Get polygon coordinates.
coordinates_x = frame_ground_truth_labels['target/coordinates/x'] * self._scale_x
coordinates_y = frame_ground_truth_labels['target/coordinates/y'] * self._scale_y
# Setup vertices as (x1, y1), (x2, y1), (x2, y2), (x1, y2).
polygon_vertices = tf.stack([coordinates_x, coordinates_y], axis=1)
# Intermediate step.
coordinates_per_polygon = tf.bincount(tf.cast(
frame_ground_truth_labels['target/coordinates/index'], dtype=tf.int32))
# All the same class.
class_ids_per_polygon = tf.zeros_like(coordinates_per_polygon)
# reshape is needed here because scalars don't play along nicely with concat ops.
polygons_per_image = tf.reshape(tf.size(coordinates_per_polygon), shape=(1,))
return polygon_vertices, coordinates_per_polygon, class_ids_per_polygon, \
polygons_per_image
def rasterize_labels_bbox_2d_label(self, batch_labels, mask=None, mask_multiplier=1.0):
"""Setup the rasterized loss mask for a given set of ground truth labels.
Args:
batch_labels (Bbox2DLabel): Filtered labels, which only incorporates bboxes matching
filters for all frames in a batch.
mask (Tensor): Where nonzero, the mask_multiplier is ignored (mask multiplier is set
to the background value, 1.0). Default None, the mask_multiplier is never ignored.
mask_multiplier (float): Scalar value that will be assigned to each region in a set
of ground truth labels. Default value of 1.0 means the output is all filled with
ones, essentially meaning all regions of the network's output are treated equally.
Returns:
loss_mask (tf.Tensor): rasterized loss mask corresponding to the input labels.
"""
vertices, vertex_counts, ids, polygons_per_image = \
self.translate_frame_labels_bbox_2d_label(batch_labels)
polygon_raster = self._rasterizer(
polygon_vertices=vertices,
vertex_counts_per_polygon=vertex_counts,
class_ids_per_polygon=ids,
polygons_per_image=polygons_per_image
)
# Outside the input labels, the loss mask should have a value of 1.0 (i.e. the loss will
# be treated as usual in those cells).
ones, zeros = tf.ones_like(polygon_raster), tf.zeros_like(polygon_raster)
# If a mask exists, zero the polygon raster where the mask is nonzero.
if mask is not None:
objective_mask = tf.where(mask > 0., zeros, ones)
polygon_raster *= objective_mask
# Set all foreground values to the value of mask_multiplier.
background = tf.where(polygon_raster > 0., zeros, ones)
loss_mask = background + mask_multiplier * polygon_raster
return loss_mask
def rasterize_labels_dict(self, batch_labels, mask=None, mask_multiplier=1.0):
"""Setup the rasterized loss mask for a given set of ground truth labels.
Args:
batch_labels (list of dicts of Tensors): contains the labels for a batch of frames.
mask (Tensor): Where nonzero, the mask_multiplier is ignored (mask multiplier is set
to the background value, 1.0). Default None, the mask_multiplier is never ignored.
mask_multiplier (float): Scalar value that will be assigned to each region in a set
of ground truth labels. Default value of 1.0 means the output is all filled with
ones, essentially meaning all regions of the network's output are treated equally.
Returns:
loss_mask (tf.Tensor): rasterized loss mask corresponding to the input labels.
"""
batch_polygon_vertices = []
batch_vertex_counts_per_polygon = []
batch_class_ids_per_polygon = []
batch_polygons_per_image = []
for frame_labels in batch_labels:
# Get the rasterizer inputs for the new frame.
_polygon_vertices, _vertex_counts_per_polygon, _class_ids_per_polygon, \
_polygons_per_image = self.translate_frame_labels_dict(frame_labels)
# Update the batch's inputs.
batch_polygon_vertices.append(_polygon_vertices)
batch_vertex_counts_per_polygon.append(_vertex_counts_per_polygon)
batch_class_ids_per_polygon.append(_class_ids_per_polygon)
batch_polygons_per_image.append(_polygons_per_image)
# Concatenate them to pass as single tensors to the rasterizer.
polygon_vertices = tf.concat(batch_polygon_vertices, axis=0)
vertex_counts_per_polygon = tf.concat(batch_vertex_counts_per_polygon, axis=0)
class_ids_per_polygon = tf.concat(batch_class_ids_per_polygon, axis=0)
polygons_per_image = tf.concat(batch_polygons_per_image, axis=0)
polygon_raster = self._rasterizer(polygon_vertices=polygon_vertices,
vertex_counts_per_polygon=vertex_counts_per_polygon,
class_ids_per_polygon=class_ids_per_polygon,
polygons_per_image=polygons_per_image)
# Outside the input labels, the loss mask should have a value of 1.0 (i.e. the loss will
# be treated as usual in those cells).
ones, zeros = tf.ones_like(polygon_raster), tf.zeros_like(polygon_raster)
# If a mask exists, zero the polygon raster where the mask is nonzero.
if mask is not None:
objective_mask = tf.where(mask > 0., zeros, ones)
polygon_raster *= objective_mask
# Set all foreground values to the value of mask_multiplier.
background = tf.where(polygon_raster > 0., zeros, ones)
loss_mask = background + mask_multiplier * polygon_raster
return loss_mask
def rasterize_labels(self, batch_labels, mask=None, mask_multiplier=1.0):
"""Setup the rasterized loss mask for a given set of ground truth labels.
Args:
batch_labels (list of dicts of Tensors or Bbox2DLabel): If it were list of dicts of
tensors, it contains the labels for a batch of frames. If it were Bbox2DLabel,
it contains filtered labels for all frames in a batch.
mask (Tensor): Where nonzero, the mask_multiplier is ignored (mask multiplier is set
to the background value, 1.0). Default None, the mask_multiplier is never ignored.
mask_multiplier (float): Scalar value that will be assigned to each region in a set
of ground truth labels. Default value of 1.0 means the output is all filled with
ones, essentially meaning all regions of the network's output are treated equally.
Returns:
loss_mask (tf.Tensor): rasterized loss mask corresponding to the input labels.
"""
loss_mask_tensors = None
if isinstance(batch_labels, list):
loss_mask_tensors = self.rasterize_labels_dict(batch_labels, mask, mask_multiplier)
elif isinstance(batch_labels, Bbox2DLabel):
loss_mask_tensors = self.rasterize_labels_bbox_2d_label(batch_labels, mask,
mask_multiplier)
else:
raise ValueError("Unsupported type.")
return loss_mask_tensors
def __call__(self, loss_mask_batch_labels, ground_truth_tensors=None, mask_multiplier=1.0):
"""Method that users will call to generate necessary loss masks.
Args:
loss_mask_batch_labels (nested dict): for now, has two levels:
[target_class_name][objective_name]. The leaf values are the corresponding filtered
ground truth labels in tf.Tensor for a batch of frames.
mask_multiplier (float): Scalar value that will be assigned to each region in a set
of ground truth labels. Default value of 1.0 means the output is all filled with
ones, essentially meaning all regions of the network's output are treated equally.
Returns:
loss_masks (nested dict): Follows the same hierarchy as the input. Each leaf value
is the loss mask in tf.Tensor form for the corresponding filter.
"""
loss_masks = dict()
for target_class_name in loss_mask_batch_labels:
if target_class_name not in loss_masks:
loss_masks[target_class_name] = dict()
for objective_name, batch_labels in \
six.iteritems(loss_mask_batch_labels[target_class_name]):
ground_truth_mask = ground_truth_tensors[target_class_name]['cov'] \
if ground_truth_tensors is not None else None
loss_masks[target_class_name][objective_name] = \
self.rasterize_labels(batch_labels,
mask=ground_truth_mask,
mask_multiplier=mask_multiplier)
return loss_masks
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/rasterizers/loss_mask_rasterizer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test loss mask rasterizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
import pytest
import tensorflow as tf
from nvidia_tao_tf1.blocks.multi_source_loader.types import Bbox2DLabel
from nvidia_tao_tf1.blocks.multi_source_loader.types import Coordinates2D
import nvidia_tao_tf1.core as tao_core
from nvidia_tao_tf1.cv.detectnet_v2.rasterizers.bbox_rasterizer import BboxRasterizer
from nvidia_tao_tf1.cv.detectnet_v2.rasterizers.bbox_rasterizer import BboxRasterizerInput
from nvidia_tao_tf1.cv.detectnet_v2.rasterizers.bbox_rasterizer_config import BboxRasterizerConfig
Canvas2D = tao_core.types.Canvas2D
INPUT_HEIGHT = 6
INPUT_WIDTH = 15
class TestBboxRasterizer:
@pytest.fixture(scope='function')
def bbox_rasterizer(self):
"""Instantiate a BboxRasterizer."""
bbox_rasterizer_config = BboxRasterizerConfig(deadzone_radius=0.67)
bbox_rasterizer_config['car'] = \
BboxRasterizerConfig.TargetClassConfig(
cov_center_x=0.5, cov_center_y=0.5, cov_radius_x=1.0, cov_radius_y=1.0,
bbox_min_radius=1.0)
bbox_rasterizer_config['person'] = \
BboxRasterizerConfig.TargetClassConfig(
cov_center_x=0.5, cov_center_y=0.5, cov_radius_x=0.5, cov_radius_y=0.5,
bbox_min_radius=1.0)
bbox_rasterizer = BboxRasterizer(
input_width=INPUT_WIDTH, input_height=INPUT_HEIGHT, output_width=5, output_height=3,
target_class_names=['car', 'person'], bbox_rasterizer_config=bbox_rasterizer_config,
target_class_mapping={'pedestrian': 'person', 'automobile': 'car', 'van': 'car'})
return bbox_rasterizer
def test_bbox_from_rumpy_params(self, bbox_rasterizer):
"""Test that the bbox matrix, coverage radius, and inverse bbox area are correct.
Args:
bbox_rasterizer: BboxRasterizer obtained from above fixture.
"""
xmin, ymin = tf.constant([1.0]), tf.constant([2.0])
xmax, ymax = tf.constant([3.0]), tf.constant([4.0])
cov_center_x, cov_center_y = tf.constant([0.5]), tf.constant([0.5])
cov_radius_x, cov_radius_y = tf.constant([0.6]), tf.constant([0.6])
bbox_min_radius = tf.constant([0.5])
deadzone_radius = 1.0
mat, cov_radius, inv_bbox_area = bbox_rasterizer.bbox_from_rumpy_params(
xmin=xmin, ymin=ymin, xmax=xmax, ymax=ymax,
cov_center_x=cov_center_x, cov_center_y=cov_center_y,
cov_radius_x=cov_radius_x, cov_radius_y=cov_radius_y,
bbox_min_radius=bbox_min_radius, deadzone_radius=deadzone_radius)
with tf.compat.v1.Session() as sess:
mat, cov_radius, inv_bbox_area = sess.run(
[mat, cov_radius, inv_bbox_area])
# Check values are as expected.
assert np.allclose(cov_radius, np.array([0.6, 0.6], dtype=np.float32))
# bbox area = 2 * cov_radius_x * 2 * cov_radius_y in this case.
assert np.allclose(inv_bbox_area, np.array(
[1.0 / (4.0 * 0.36)], dtype=np.float32))
# These should the center coordinates * -1.0 / deadzone_radius.
assert np.allclose(mat, np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [-2.0, -3.0, 1.0]],
dtype=np.float32))
def labels(self, dataloader_type, sequence_length, output_type):
"""Create test labels.
Args:
dataloader_type (str): Dataloader type: 'old' or 'common'.
sequence_length (int): Sequence length.
output_type (str): Output type for sequence models: 'last' or 'all'.
Returns:
dict or Bbox2DLabel-namedtuple depending on the dataloader_type.
"""
object_class = tf.constant(
['pedestrian', 'automobile', 'van', 'unmapped'])
bbox_coordinates = tf.constant(
[[7.0, 6.0, 8.0, 9.0],
[2.0, 3.0, 4.0, 5.0],
[0.0, 0.0, 3.0, 4.0],
[1.2, 3.4, 5.6, 7.8]])
world_bbox_z = tf.constant([1.0, 2.0, 3.0, -1.0])
if sequence_length == 1 or output_type == 'last':
sequence_range = [sequence_length-1]
else:
sequence_range = range(sequence_length)
object_class_2 = tf.constant(
['automobile', 'pedestrian', 'unmapped', 'unmapped'])
bbox_coordinates_2 = tf.constant(
[[2.0, 3.0, 4.0, 5.0],
[7.0, 6.0, 8.0, 9.0],
[0.0, 0.0, 3.0, 4.0],
[1.2, 3.4, 5.6, 7.8]])
world_bbox_z_2 = tf.constant([1.0, 2.0, 3.0, -1.0])
object_class = tf.concat([object_class_2, object_class], 0)
bbox_coordinates = tf.concat(
[bbox_coordinates_2, bbox_coordinates], 0)
world_bbox_z = tf.concat([world_bbox_z_2, world_bbox_z], 0)
if dataloader_type == 'old':
labels = {
'target/object_class': object_class,
'target/bbox_coordinates': bbox_coordinates,
'target/world_bbox_z': world_bbox_z}
elif dataloader_type == 'common':
canvas_shape = Canvas2D(height=tf.ones([1, sequence_length, INPUT_HEIGHT]),
width=tf.ones([1, sequence_length, INPUT_WIDTH]))
sparse_coordinates = tf.SparseTensor(
values=tf.reshape(bbox_coordinates, [-1]),
dense_shape=[1, sequence_length, 4, 2, 2],
indices=[[0, s, i, j, k]
for s in sequence_range
for i in range(4)
for j in range(2)
for k in range(2)])
sparse_object_class = tf.SparseTensor(
values=object_class,
dense_shape=[1, sequence_length, 4],
indices=[[0, s, i]
for s in sequence_range
for i in range(4)])
sparse_world_bbox_z = tf.SparseTensor(
values=world_bbox_z,
dense_shape=[1, sequence_length, 4],
indices=[[0, s, i]
for s in sequence_range
for i in range(4)])
# Initialize all fields to empty lists (to signify 'optional' fields).
bbox_2d_label_kwargs = {field_name: []
for field_name in Bbox2DLabel._fields}
bbox_2d_label_kwargs.update({
'frame_id': tf.constant('bogus'),
'object_class': sparse_object_class,
'vertices': Coordinates2D(
coordinates=sparse_coordinates, canvas_shape=canvas_shape),
'world_bbox_z': sparse_world_bbox_z})
labels = Bbox2DLabel(**bbox_2d_label_kwargs)
return labels
@pytest.mark.parametrize(
"dataloader_type,sequence_length,output_type",
[('old', 1, None), ('common', 1, None),
('common', 1, 'last'), ('common', 1, 'all'),
('common', 2, 'last'), ('common', 2, 'all')])
@pytest.mark.parametrize(
"exp_num_bboxes,exp_bbox_class_ids,exp_bbox_matrices,exp_bbox_coverage_radii,"
"exp_bbox_flags,exp_inv_bbox_area,exp_output_space_coordinates,exp_object_class,"
"exp_world_bbox_z",
# Define sequence of two outputs. If sequence_length == 1, only the latter test
# case is used.
[
(
[2, 3],
[[0, 1], [1, 0, 0]],
[np.array([[[1.0, 0.0, 0.0],
[0.0, 0.66666675, 0.0],
[-1.0, -1.3333335, 1.0]],
[[1.0, 0.0, 0.0],
[0.0, 0.26666668, 0.0],
[-2.5, -1.0, 1.0]],
], dtype=np.float32),
np.array([[[1.0, 0.0, 0.0],
[0.0, 0.26666668, 0.0],
[-2.5, -1.0, 1.0]],
[[1.0, 0.0, 0.0],
[0.0, 0.66666675, 0.0],
[-1.0, -1.3333335, 1.0]],
[[1.0, 0.0, 0.0],
[0.0, 0.5, 0.0],
[-0.5, -0.5, 1.0]],
], dtype=np.float32)], # end exp_bbox_matrices
[np.ones((2, 2), dtype=np.float32),
np.ones((3, 2), dtype=np.float32)], # exp_bbox_coverage_radii
[np.ones((2,), dtype=np.uint8),
np.ones((3,), dtype=np.uint8)], # exp_bbox_flags
[np.array([0.16666669, 0.06666667], dtype=np.float32),
# end exp_inv_bbox_area
np.array([0.06666667, 0.16666669, 0.125], dtype=np.float32)],
[np.array([[0.6666667, 2.3333335],
[1.5, 3.],
[1.3333334, 2.6666667],
[2.5, 4.5]], dtype=np.float32),
np.array([[2.3333335, 0.6666667, 0.0],
[3., 1.5, 0.0],
[2.6666667, 1.3333334, 1.0],
[4.5, 2.5, 2.0]],
dtype=np.float32)], # exp_output_space_coordinates
[['car', 'person'],
# exp_object_class. These should now be mapped and filtered.
['person', 'car', 'car']],
[np.array([1.0, 2.0], dtype=np.float32),
np.array([1.0, 2.0, 3.0], dtype=np.float32)], # exp_world_bbox_z
)
]
)
def test_get_target_gradient_info(
self, bbox_rasterizer, dataloader_type, sequence_length, output_type,
exp_num_bboxes, exp_bbox_class_ids, exp_bbox_matrices, exp_bbox_coverage_radii,
exp_bbox_flags, exp_inv_bbox_area, exp_output_space_coordinates, exp_object_class,
exp_world_bbox_z):
"""Test that the inputs for the SDK are correctly computed.
Args:
bbox_rasterizer: BboxRasterizer obtained from above fixture.
dataloader_type (str): Dataloader type: 'old' or 'common'.
sequence_length (int): Sequence length.
output_type (str): Output type for sequence models: 'last' or 'all'.
exp_num_bboxes (int): Expected number of bboxes.
exp_bbox_class_ids (list): Expected class ids (int).
exp_bbox_matrices (np.array): Expected bbox matrices.
exp_bbox_coverage_radii (list): Expected coverage radii (float).
exp_bbox_flags (list): Expected bbox flags.
exp_inv_bbox_area (np.array): Expected inverse bbox areas.
exp_output_space_coordinates (np.array): Expected coordinates of the bboxes in the
model output space.
exp_object_class (list): Expected class names.
exp_world_bbox_z (np.array): Expected depth coordinates.
"""
labels = self.labels(dataloader_type, sequence_length, output_type)
# Expected label sequence length.
exp_sequence_length = sequence_length if output_type == 'all' else 1
bbox_rasterizer.output_type = output_type
_inputs = bbox_rasterizer.get_target_gradient_info(labels)
# Need to initialize lookup tables.
tables_initializer = tf.compat.v1.tables_initializer()
with tf.compat.v1.Session() as sess:
sess.run(tables_initializer)
num_bboxes, bbox_class_ids, bbox_matrices, bbox_coverage_radii, bbox_flags, \
gradient_info = sess.run([_inputs.num_bboxes, _inputs.bbox_class_ids,
_inputs.bbox_matrices, _inputs.bbox_coverage_radii,
_inputs.bbox_flags, _inputs.gradient_info])
assert (num_bboxes == np.array(
exp_num_bboxes[-exp_sequence_length:])).all()
assert (bbox_class_ids == np.array(list(itertools.chain.from_iterable(
exp_bbox_class_ids[-exp_sequence_length:])))).all()
assert np.allclose(bbox_matrices,
np.concatenate(exp_bbox_matrices[-exp_sequence_length:], axis=0))
assert np.allclose(bbox_coverage_radii,
np.concatenate(exp_bbox_coverage_radii[-exp_sequence_length:], axis=0))
assert (bbox_flags == np.concatenate(
exp_bbox_flags[-exp_sequence_length:], axis=0)).all()
assert np.allclose(gradient_info['target/inv_bbox_area'],
np.concatenate(exp_inv_bbox_area[-exp_sequence_length:], axis=0))
assert np.allclose(gradient_info['target/output_space_coordinates'],
np.concatenate(exp_output_space_coordinates[-exp_sequence_length:],
axis=1))
assert gradient_info['target/object_class'].astype(str).tolist() == \
list(itertools.chain.from_iterable(
exp_object_class[-exp_sequence_length:]))
assert np.allclose(gradient_info['target/world_bbox_z'],
np.concatenate(exp_world_bbox_z[-exp_sequence_length:], axis=0))
@pytest.fixture(scope='function', params=['old', 'common'])
def rasterize_labels_input(self, request):
"""Prepare inputs to the rasterize_labels() method."""
if request.param == 'old':
batch_bbox_rasterizer_input, batch_gradients = [], []
batch_bbox_rasterizer_input.append(
BboxRasterizerInput(
num_bboxes=tf.constant(1),
bbox_class_ids=tf.constant([1]), # person
bbox_matrices=tf.constant(
[[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [-2.0, -3.0, 1.0]]]),
bbox_coverage_radii=tf.constant([[0.5, 0.5]]),
bbox_flags=tf.fill(
[1], tf.cast(tao_core.processors.BboxRasterizer.DRAW_MODE_ELLIPSE,
tf.uint8)),
bbox_sort_values=tf.constant([0.]),
# Not needed since we are bypassing ObjectiveSet.
gradient_info=[]
))
# Like cov objective.
batch_gradients.append(tf.constant([[[0., 0., 1.]]]))
batch_bbox_rasterizer_input.append(
BboxRasterizerInput(
num_bboxes=tf.constant(1),
bbox_class_ids=tf.constant([0]), # car
bbox_matrices=tf.constant(
[[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [-3.0, -2.0, 1.0]]]),
bbox_coverage_radii=tf.constant([[0.5, 0.5]]),
bbox_flags=tf.fill(
[1], tf.cast(tao_core.processors.BboxRasterizer.DRAW_MODE_ELLIPSE,
tf.uint8)),
bbox_sort_values=tf.constant([0.]),
# Not needed since we are bypassing ObjectiveSet.
gradient_info=[]
))
# Like cov objective.
batch_gradients.append(tf.constant([[[0., 0., 1.]]]))
else:
batch_bbox_rasterizer_input = BboxRasterizerInput(
num_bboxes=tf.constant([1, 1]),
bbox_class_ids=tf.constant([1, 0]), # person, car.
bbox_matrices=tf.constant(
[[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [-2.0, -3.0, 1.0]],
[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [-3.0, -2.0, 1.0]]]),
bbox_coverage_radii=tf.constant([[0.5, 0.5], [0.5, 0.5]]),
bbox_flags=tf.fill([2], tf.cast(tao_core.processors.BboxRasterizer.DRAW_MODE_ELLIPSE,
tf.uint8)),
bbox_sort_values=tf.constant([0., 0.]),
gradient_info=[])
batch_gradients = tf.constant([[[0., 0., 1.]], [[0., 0., 1.]]])
return batch_bbox_rasterizer_input, batch_gradients
def test_rasterize_labels(self, bbox_rasterizer, rasterize_labels_input):
"""Test the rasterize_labels method."""
batch_bbox_rasterizer_input, batch_gradients = rasterize_labels_input
rasterized_tensors = bbox_rasterizer.rasterize_labels(
batch_bbox_rasterizer_input=batch_bbox_rasterizer_input,
batch_gradients=batch_gradients,
num_gradients=1,
gradient_flag=tao_core.processors.BboxRasterizer.GRADIENT_MODE_MULTIPLY_BY_COVERAGE,
)
expected_raster = np.zeros((2, 2, 1, 3, 5), dtype=np.float32)
# Only a few output indices are non zero, and all equal to 0.1875.
expected_value = 0.1875
expected_raster[0, 1, 0, 2, 1:3] = expected_value
expected_raster[1, 0, 0, 1:3, 2:4] = expected_value
with tf.compat.v1.Session() as sess:
raster = sess.run(rasterized_tensors)
assert np.allclose(raster, expected_raster)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/rasterizers/tests/test_bbox_rasterizer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test loss mask rasterizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pytest
import tensorflow as tf
from nvidia_tao_tf1.blocks.multi_source_loader import types
import nvidia_tao_tf1.core
from nvidia_tao_tf1.cv.detectnet_v2.rasterizers.loss_mask_rasterizer import LossMaskRasterizer
Canvas2D = nvidia_tao_tf1.core.types.Canvas2D
def _get_batch_vertices(batch_x, batch_y, batch_index):
"""Generate a set of labels for a batch of frames.
Args:
batch_x/batch_y/batch_index (list of lists): Outer list is for each frame, inner lists
contain the coordinates of vertices and their polygon indices in that frame.
Returns:
batch_labels (list): Each element is a ground truth labels dict.
"""
# Check they have the same number of 'frames'.
assert len(batch_x) == len(batch_y)
batch_labels = []
for frame_idx in range(len(batch_x)):
coordinates_x = batch_x[frame_idx]
coordinates_y = batch_y[frame_idx]
coordinates_index = batch_index[frame_idx]
# Check the coordinate lists have the same number of elements.
assert len(coordinates_x) == len(
coordinates_y) == len(coordinates_index)
_coordinates_x = tf.constant(coordinates_x, dtype=tf.float32)
_coordinates_y = tf.constant(coordinates_y, dtype=tf.float32)
_coordinates_index = tf.constant(coordinates_index, dtype=tf.int64)
batch_labels.append({
'target/coordinates/x': _coordinates_x,
'target/coordinates/y': _coordinates_y,
'target/coordinates/index': _coordinates_index
})
return batch_labels
def _get_bbox_2d_labels():
"""Bbox2DLabel for test preparation."""
frame_indices = [0, 1, 2, 3, 3]
object_class = tf.constant(
['pedestrian', 'unmapped', 'automobile', 'truck', 'truck'])
bbox_coordinates = tf.constant(
[7.0, 6.0, 8.0, 9.0,
2.0, 3.0, 4.0, 5.0,
0.0, 0.0, 3.0, 4.0,
1.2, 3.4, 5.6, 7.8,
4.0, 4.0, 10.0, 10.0])
world_bbox_z = tf.constant([1.0, 2.0, 3.0, -1.0, -2.0])
front = tf.constant([0.5, 1.0, -0.5, -1.0, 0.5])
back = tf.constant([-1.0, 0.0, 0.0, 0.63, -1.0])
canvas_shape = Canvas2D(height=tf.ones([1, 12]), width=tf.ones([1, 12]))
sparse_coordinates = tf.SparseTensor(
values=bbox_coordinates,
dense_shape=[5, 5, 2, 2],
indices=[[f, 0, j, k]
for f in frame_indices
for j in range(2)
for k in range(2)])
sparse_object_class = tf.SparseTensor(
values=object_class,
dense_shape=[5, 5, 1],
indices=[[f, 0, 0]
for f in frame_indices])
sparse_world_bbox_z = tf.SparseTensor(
values=world_bbox_z,
dense_shape=[5, 5, 1],
indices=[[f, 0, 0]
for f in frame_indices])
sparse_front = tf.SparseTensor(
values=front,
dense_shape=[5, 5, 1],
indices=[[f, 0, 0]
for f in frame_indices])
sparse_back = tf.SparseTensor(
values=back,
dense_shape=[5, 5, 1],
indices=[[f, 0, 0]
for f in frame_indices])
source_weight = [tf.constant(2.0, tf.float32)]
# Initialize all fields to empty lists (to signify 'optional' fields).
bbox_2d_label_kwargs = {field_name: []
for field_name in types.Bbox2DLabel._fields}
bbox_2d_label_kwargs.update({
'frame_id': tf.constant('bogus'),
'object_class': sparse_object_class,
'vertices': types.Coordinates2D(
coordinates=sparse_coordinates, canvas_shape=canvas_shape),
'world_bbox_z': sparse_world_bbox_z,
'front': sparse_front,
'back': sparse_back,
'source_weight': source_weight})
return types.Bbox2DLabel(**bbox_2d_label_kwargs)
class TestLossMaskRasterizer:
def test_loss_mask_rasterizer_setup(self):
"""Test that the LossMaskRasterizer setup follows the input hierarchy."""
# Instantiate a LossMaskRasterizer.
loss_mask_rasterizer = LossMaskRasterizer(
input_width=1,
input_height=2,
output_width=3,
output_height=4
)
# Get some dummy labels for old data format.
batch_x = [[1., 7., 7., 1., 2., 8., 8., 2., 3., 9., 9., 3.]]
batch_y = [[4., 4., 10., 10., 5., 5., 11., 11., 6., 6., 12., 12.]]
batch_idx = [[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2]]
loss_mask_batch_labels = \
{'depth': # Outer level is 'objective'.
{'person': # Second level is 'target class'.
_get_batch_vertices(batch_x, batch_y, batch_idx),
'car': _get_batch_vertices(batch_x, batch_y, batch_idx)},
'bbox': {'road_sign': _get_batch_vertices(batch_x, batch_y, batch_idx)}}
loss_mask_tensors = loss_mask_rasterizer(loss_mask_batch_labels)
# Check that the output keeps the same 'hierarchy' on rasterizing labels of old data.
assert set(loss_mask_batch_labels.keys()) == set(
loss_mask_tensors.keys())
for objective_name in loss_mask_batch_labels:
assert set(loss_mask_batch_labels[objective_name].keys()) == \
set(loss_mask_tensors[objective_name].keys())
# Re-instantiate the rasterizer for larger input and output size.
loss_mask_rasterizer2 = LossMaskRasterizer(
input_width=20,
input_height=22,
output_width=10,
output_height=11
)
# Get dummy labels for bbox2d_label.
loss_mask_batch_labels2 = \
{'depth': # Outer level is 'objective'.
{'person': # Second level is 'target class'.
_get_bbox_2d_labels(),
'car': _get_bbox_2d_labels()},
'bbox': {'road_sign': _get_bbox_2d_labels()}}
loss_mask_tensors2 = loss_mask_rasterizer2(loss_mask_batch_labels2)
# Check that the output keeps the same 'hierarchy' on rasterizing labels of bbox2d_label.
assert set(loss_mask_batch_labels.keys()) == set(
loss_mask_tensors2.keys())
for objective_name in loss_mask_batch_labels:
assert set(loss_mask_batch_labels[objective_name].keys()) == \
set(loss_mask_tensors2[objective_name].keys())
def _get_expected_rasterizer_args(self, coords_x, coords_y, coords_idx):
"""Helper function that generates the expected inputs to the rasterizer.
Args:
coords_x/coords_y/coords_idx (list): Contain the coordinates and index of polygons in a
frame.
Returns:
polygon_vertices:
vertex_counts_per_polygon:
class_ids_per_polygon:
polygons_per_image:
"""
polygon_vertices = []
for i in range(len(coords_x)):
polygon_vertices.extend([[coords_x[i], coords_y[i]]])
vertex_counts_per_polygon = np.bincount(coords_idx)
polygons_per_image = [len(vertex_counts_per_polygon)]
class_ids_per_polygon = [0] * len(vertex_counts_per_polygon)
return polygon_vertices, vertex_counts_per_polygon, class_ids_per_polygon, \
polygons_per_image
@pytest.mark.parametrize(
"input_width,input_height,output_width,output_height,batch_x,batch_y,batch_idx",
[
# First test case is without scaling.
(10, 10, 10, 10,
[1., 7., 7., 1., 2., 8., 8., 2., 3.,
9., 9., 3.], # batch x-coordinates
[4., 4., 10., 10., 5., 5., 11., 11., 6.,
6., 12., 12.], # batch y-coordinates
# batch coordinate indices
[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2],
),
# Now scale by half along both dimensions.
(10, 10, 5, 5,
[1., 7., 7., 1., 2., 8., 8., 2., 3.,
9., 9., 3.], # batch x-coordinates
[4., 4., 10., 10., 5., 5., 11., 11., 6.,
6., 12., 12.], # batch y-coordinates
# batch coordinate indices
[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2],
),
# Now scale by 2.2 along both dimensions.
(10, 10, 22, 22,
[1., 7., 7., 1., 2., 8., 8., 2., 3.,
9., 9., 3.], # batch x-coordinates
[4., 4., 10., 10., 5., 5., 11., 11., 6.,
6., 12., 12.], # batch y-coordinates
# batch coordinate indices
[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2],
),
# Now scale by different factors along each dimensions.
(10, 10, 22, 8,
[1., 7., 7., 1., 2., 8., 8., 2., 3.,
9., 9., 3.], # batch x-coordinates
[4., 4., 10., 10., 5., 5., 11., 11., 6.,
6., 12., 12.], # batch y-coordinates
# batch coordinate indices
[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2],
)
]
)
def test_loss_mask_label_translation_dict(self,
input_width,
input_height,
output_width,
output_height,
batch_x, batch_y, batch_idx):
"""Test that the args passed to the PolygonRasterizer are sane.
Args:
input_width/height (int): Input image dimensions.
output_width/height (int): Output raster dimensions.
x1/y1/x2/y2 (list): Contain the coordinates of bboxes in a frame.
"""
loss_mask_rasterizer = LossMaskRasterizer(
input_width=input_width,
input_height=input_height,
output_width=output_width,
output_height=output_height
)
# Get some dummy labels for a single frame.
loss_mask_frame_labels = _get_batch_vertices(
[batch_x], [batch_y], [batch_idx])[0]
scale_x = output_width / input_width
scale_y = output_height / input_height
scaled_x = [_x * scale_x for _x in batch_x]
scaled_y = [_y * scale_y for _y in batch_y]
expected_args = \
self._get_expected_rasterizer_args(scaled_x, scaled_y, batch_idx)
computed_args = loss_mask_rasterizer.translate_frame_labels_dict(
loss_mask_frame_labels)
# Check that the translation works as expected.
with tf.compat.v1.Session() as sess:
for i in range(len(expected_args)):
np.testing.assert_allclose(
np.array(expected_args[i]), sess.run(computed_args[i]))
@pytest.mark.parametrize(
"input_width,input_height,output_width,output_height,exp_vertx,exp_verty,exp_polygon_num",
[
# First test case is without scaling.
(11, 13, 11, 13,
[7., 8., 8., 7., 2., 4., 4., 2., 0., 3., 3.,
0., 1.2, 5.6, 5.6, 1.2, 4., 10., 10., 4.],
[6., 6., 9., 9., 3., 3., 5., 5., 0., 0., 4.,
4., 3.4, 3.4, 7.8, 7.8, 4., 4., 10., 10.],
[1, 1, 1, 2, 0],
),
# Now scale by 2 along both dimensions(output_width/input_width=22/11=2).
(11, 11, 22, 22,
[14., 16., 16., 14., 4., 8., 8., 4., 0., 6., 6., 0., 2.4, 11.2, 11.2, 2.4, 8.,
20., 20., 8.],
[12., 12., 18., 18., 6., 6., 10., 10., 0., 0., 8., 8., 6.8, 6.8, 15.6, 15.6,
8., 8., 20., 20.],
[1, 1, 1, 2, 0],
),
]
)
def test_loss_mask_label_translation_bbox_2d_label(self,
input_width,
input_height,
output_width,
output_height,
exp_vertx,
exp_verty,
exp_polygon_num):
"""Test that the args passed to the PolygonRasterizer are sane.
Args:
input_width/height (int): Input image dimensions.
output_width/height (int): Output raster dimensions.
exp_vertx (float): expected x1/x2 list for polygons
exp_verty (float): expected y1/y2 list for polygons
exp_polygon_num (int): expected polygon num for each frames
"""
loss_mask_rasterizer = LossMaskRasterizer(
input_width=input_width,
input_height=input_height,
output_width=output_width,
output_height=output_height
)
# Get some dummy labels for a single frame.
loss_mask_frame_labels = _get_bbox_2d_labels()
polygon_vertices, vertex_counts_per_polygon, class_ids_per_polygon, polygons_per_image = \
loss_mask_rasterizer.translate_frame_labels_bbox_2d_label(
loss_mask_frame_labels)
# Check that the translation works as expected.
with tf.compat.v1.Session() as sess:
polygon_vertices_output = sess.run(polygon_vertices)
np.testing.assert_allclose(
polygon_vertices_output[:, 0], np.array(exp_vertx))
np.testing.assert_allclose(
polygon_vertices_output[:, 1], np.array(exp_verty))
vertex_counts_output = sess.run(vertex_counts_per_polygon)
np.testing.assert_equal(
vertex_counts_output, np.array([4] * len(exp_polygon_num)))
class_ids_output = sess.run(class_ids_per_polygon)
np.testing.assert_equal(
class_ids_output, np.array([0] * len(exp_polygon_num)))
polygons_per_image_output = sess.run(polygons_per_image)
np.testing.assert_equal(
polygons_per_image_output, np.array(exp_polygon_num))
# TODO(@williamz): Could consider saving the rasters like in maglev/processors/
# test_bbox_rasterizer_ref?
@pytest.mark.parametrize(
"input_width,input_height,output_width,output_height,mask_multiplier,"
"batch_x,batch_y,batch_idx,expected_mask",
[
# Case 1: First, use a single frame.
(10, 10, 5, 5, 0.0,
# batch x-coordinates
[[1., 7., 7., 1., 2., 8., 8., 2., 3., 9., 9., 3.]],
# batch y-coordinates
[[4., 4., 10., 10., 5., 5., 11., 11., 6., 6., 12., 12.]],
# batch coordinate indices
[[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2]],
np.array([[[[1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1.],
[0., 0., 0., 0., 1.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]]]], dtype=np.float32)
), # ----- End case 1.
# Case 2: Now has two frames. All boxes in the second frame are out of bound.
(10, 10, 5, 5, 1.5,
[[1., 7., 7., 1., 2., 8., 8., 2., 3., 9., 9., 3.],
# batch x-coordinates
[13., 19., 19., 13., 14., 20., 20., 14., 15., 21., 21., 15.]],
[[4., 4., 10., 10., 5., 5., 11., 11., 6., 6., 12., 12.],
# batch y-coordinates
[16., 16., 22., 22., 17., 17., 23., 23., 18., 18., 24., 24.]],
[[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2],
[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2]], # batch coordinate indices
np.array([[[[1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1.],
# Reminder: 1.5 is the multiplier for this case.
[1.5, 1.5, 1.5, 1.5, 1.],
[1.5, 1.5, 1.5, 1.5, 1.5],
[1.5, 1.5, 1.5, 1.5, 1.5]]], # end of first frame.
# Nothing in second frame.
[np.ones((5, 5)).tolist()]
], dtype=np.float32)
), # ----- End case 2.
# Case 3: Test empty frame.
(10, 10, 5, 5, 2.0, # Should not matter.
[[]], [[]], [[]], # Empty batch_x/y/coordinates.
np.ones((5, 5), dtype=np.float32).reshape(1, 1, 5, 5)
) # ----- End case 3.
]
)
def test_loss_mask_rasters_dict(self,
input_width,
input_height,
output_width,
output_height,
mask_multiplier,
batch_x, batch_y, batch_idx,
expected_mask):
"""Test that the masks produced by the LossMaskRasterizer are sane.
Args:
input_width/height (int): Input image dimensions.
output_width/height (int): Output raster dimensions.
mask_multiplier (float): Value that should be present in the loss masks.
batch_x1/y1/x2/y2 (list of lists): Outer list is for each frame, inner lists contain
the coordinates of bboxes in that frame.
expected_mask (np.array): of shape [len(batch_x1), 1, output_height, output_width] which
is the 'golden' truth against which the raster will be compared.
"""
loss_mask_rasterizer = LossMaskRasterizer(
input_width=input_width,
input_height=input_height,
output_width=output_width,
output_height=output_height,
)
# Get some dummy labels.
loss_mask_batch_labels = \
{'car': {'bbox': _get_batch_vertices(batch_x, batch_y, batch_idx)}}
loss_mask_tensor_dict = loss_mask_rasterizer(loss_mask_batch_labels,
mask_multiplier=mask_multiplier)
# Run the rasterization.
with tf.compat.v1.Session() as sess:
loss_mask_rasters = sess.run(loss_mask_tensor_dict)
# Check dict structure.
assert set(loss_mask_tensor_dict.keys()) == set(
loss_mask_batch_labels.keys())
for target_class_name in loss_mask_tensor_dict:
assert set(loss_mask_tensor_dict[target_class_name].keys()) == \
set(loss_mask_batch_labels[target_class_name].keys())
for obj_name in loss_mask_tensor_dict[target_class_name]:
# Compare with golden value.
np.testing.assert_allclose(loss_mask_rasters[target_class_name][obj_name],
expected_mask)
def test_loss_mask_rasters_bbox_2d_label(self):
"""Test that LossMaskRasterizer works correctly with bbox 2d labels."""
loss_mask_rasterizer = LossMaskRasterizer(
input_width=13,
input_height=11,
output_width=13,
output_height=11)
# Get all labels.
all_labels = _get_bbox_2d_labels()
# Empty groundtruth.
empty_loss_mask_tensor = np.ones(shape=(11, 13), dtype=np.float32)
# Case 1: activate bbox 4 and 5.
gt_rast_tensor1 = [[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 2.0, 2.0, 2.0, 2.0, 2.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
2.0, 2.0, 2.0, 1.0, 1.0, 1.0],
[1.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
2.0, 2.0, 2.0, 1.0, 1.0, 1.0],
[1.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
2.0, 2.0, 2.0, 1.0, 1.0, 1.0],
[1.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
2.0, 2.0, 2.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0,
2.0, 2.0, 2.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0,
2.0, 2.0, 2.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]
mask1 = tf.constant(
np.array([False, False, False, True, True]), dtype=tf.bool)
filtered_labels1 = all_labels.filter(mask1)
loss_mask_batch_labels1 = \
{'car': {'bbox': filtered_labels1}}
loss_mask_tensor_dict1 = loss_mask_rasterizer(loss_mask_batch_labels1,
mask_multiplier=2.0)
with tf.compat.v1.Session() as sess:
output_loss_mask_tensor_dict1 = sess.run(loss_mask_tensor_dict1)
output_loss_mask_tensor = output_loss_mask_tensor_dict1['car']['bbox']
np.testing.assert_allclose(np.squeeze(output_loss_mask_tensor[0, :, :]),
empty_loss_mask_tensor)
np.testing.assert_allclose(np.squeeze(output_loss_mask_tensor[1, :, :]),
empty_loss_mask_tensor)
np.testing.assert_allclose(np.squeeze(output_loss_mask_tensor[2, :, :]),
empty_loss_mask_tensor)
np.testing.assert_allclose(np.squeeze(output_loss_mask_tensor[3, :, :]),
gt_rast_tensor1)
np.testing.assert_allclose(np.squeeze(output_loss_mask_tensor[4, :, :]),
empty_loss_mask_tensor)
# Case 2: activate bbox 3,4,5.
gt_rast_tensor2 = np.ones(shape=(11, 13), dtype=np.float32)
gt_rast_tensor2[0:4, 0:3] = 0.0
gt_rast_tensor3 = [[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]
mask2 = tf.constant(
np.array([False, False, True, True, True]), dtype=tf.bool)
filtered_labels2 = all_labels.filter(mask2)
loss_mask_batch_labels2 = \
{'car': {'bbox': filtered_labels2}}
loss_mask_tensor_dict2 = loss_mask_rasterizer(loss_mask_batch_labels2,
mask_multiplier=0.0)
with tf.compat.v1.Session() as sess:
output_loss_mask_tensor_dict2 = sess.run(loss_mask_tensor_dict2)
output_loss_mask_tensor = output_loss_mask_tensor_dict2['car']['bbox']
np.testing.assert_allclose(np.squeeze(output_loss_mask_tensor[0, :, :]),
empty_loss_mask_tensor)
np.testing.assert_allclose(np.squeeze(output_loss_mask_tensor[1, :, :]),
empty_loss_mask_tensor)
np.testing.assert_allclose(np.squeeze(output_loss_mask_tensor[2, :, :]),
gt_rast_tensor2)
np.testing.assert_allclose(np.squeeze(output_loss_mask_tensor[3, :, :]),
gt_rast_tensor3)
np.testing.assert_allclose(np.squeeze(output_loss_mask_tensor[4, :, :]),
empty_loss_mask_tensor)
def test_loss_mask_raster_with_groundtruth_mask_dict(self):
"""Test that masks produced by LossMaskRasterizer when not ignoring groundtruth are sane."""
loss_mask_rasterizer = LossMaskRasterizer(
input_width=4, input_height=4, output_width=4, output_height=4)
batch_x, batch_y, batch_idx = \
[[1., 4., 4., 1.]], [[1., 1., 4., 4.]], [[0, 0, 0, 0]]
loss_mask_batch_labels = \
{'car': {'bbox': _get_batch_vertices(batch_x, batch_y, batch_idx)}}
car_cov = [
[0., 0., 0., 0.],
[0., 0., 1., 0.],
[0., 1., 1., 1.],
[0., 0., 1., 0.],
]
ground_truth_tensors = \
{'car': {'cov': tf.constant(
car_cov, dtype=tf.float32, shape=(1, 1, 4, 4))}}
expected_mask = [[[
[1., 1., 1., 1.],
[1., 2., 1., 2.],
[1., 1., 1., 1.],
[1., 2., 1., 2.],
]]]
loss_mask_tensor_dict = loss_mask_rasterizer(loss_mask_batch_labels,
ground_truth_tensors=ground_truth_tensors,
mask_multiplier=2.)
with tf.compat.v1.Session() as sess:
loss_mask_rasters = sess.run(loss_mask_tensor_dict)
np.testing.assert_allclose(
loss_mask_rasters['car']['bbox'], expected_mask)
def test_loss_mask_raster_with_groundtruth_mask_bbox_2d_label(self):
"""Test groundtruth mask works well for rasterizer with bbox_2d_label type."""
loss_mask_rasterizer = LossMaskRasterizer(
input_width=13,
input_height=11,
output_width=13,
output_height=11)
# Get all labels.
all_labels = _get_bbox_2d_labels()
# Empty groundtruth.
empty_loss_mask_tensor = np.ones(shape=(11, 13), dtype=np.float32)
# Final rasterized groundtruth with mask.
gt_rast1 = np.ones(shape=(11, 13), dtype=np.float32)
gt_rast1[3:5, 2:4] = 2.0
gt_rast1[3, 2] = 1.0
# Ground truth mask.
car_cov = np.zeros(shape=(5, 11, 13), dtype=np.float32)
car_cov[1, 0:4, 0:3] = 1.0
ground_truth_tensors = \
{'car': {'cov': tf.constant(
car_cov, dtype=tf.float32, shape=(5, 1, 11, 13))}}
# Only select bbox 1 for rasterization.
bbox_indices = tf.constant(
np.array([False, True, False, False, False]), dtype=tf.bool)
filtered_labels = all_labels.filter(bbox_indices)
loss_mask_batch_labels = \
{'car': {'cov': filtered_labels}}
loss_mask_tensor_dict = loss_mask_rasterizer(loss_mask_batch_labels,
ground_truth_tensors=ground_truth_tensors,
mask_multiplier=2.0)
with tf.compat.v1.Session() as sess:
output_loss_mask_tensor_dict = sess.run(loss_mask_tensor_dict)
output_loss_mask_tensor = output_loss_mask_tensor_dict['car']['cov']
np.testing.assert_allclose(np.squeeze(output_loss_mask_tensor[0, :, :]),
empty_loss_mask_tensor)
np.testing.assert_allclose(np.squeeze(output_loss_mask_tensor[1, :, :]),
gt_rast1)
np.testing.assert_allclose(np.squeeze(output_loss_mask_tensor[2, :, :]),
empty_loss_mask_tensor)
np.testing.assert_allclose(np.squeeze(output_loss_mask_tensor[3, :, :]),
empty_loss_mask_tensor)
np.testing.assert_allclose(np.squeeze(output_loss_mask_tensor[4, :, :]),
empty_loss_mask_tensor)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/rasterizers/tests/test_loss_mask_rasterizer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test BboxRasterizerConfig builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from unittest.mock import patch
from google.protobuf.text_format import Merge as merge_text_proto
import pytest
import nvidia_tao_tf1.cv.detectnet_v2.proto.bbox_rasterizer_config_pb2 as \
bbox_rasterizer_config_pb2
from nvidia_tao_tf1.cv.detectnet_v2.rasterizers.build_bbox_rasterizer_config import (
build_bbox_rasterizer_config
)
@pytest.fixture(scope='function')
def bbox_rasterizer_proto():
bbox_rasterizer_proto = bbox_rasterizer_config_pb2.BboxRasterizerConfig()
prototxt = """
target_class_config {
key: "animal"
value: {
cov_center_x: 0.125,
cov_center_y: 0.25,
cov_radius_x: 0.375,
cov_radius_y: 0.5,
bbox_min_radius: 0.625
}
}
target_class_config {
key: "traffic_cone"
value: {
cov_center_x: 0.75,
cov_center_y: 0.875,
cov_radius_x: 1.0,
cov_radius_y: 1.125,
bbox_min_radius: 1.25
}
}
deadzone_radius: 1.0
"""
merge_text_proto(prototxt, bbox_rasterizer_proto)
return bbox_rasterizer_proto
def test_build_bbox_rasterizer_config_keys(bbox_rasterizer_proto):
"""Test that build_bbox_rasterizer_config has the correct keys."""
bbox_rasterizer_config = build_bbox_rasterizer_config(bbox_rasterizer_proto)
assert set(bbox_rasterizer_config.keys()) == {'animal', 'traffic_cone'}
@patch(
"nvidia_tao_tf1.cv.detectnet_v2.rasterizers.build_bbox_rasterizer_config.BboxRasterizerConfig"
)
def test_build_bbox_rasterizer_config_values(MockedBboxRasterizerConfig, bbox_rasterizer_proto):
"""Test that build_bbox_rasterizer_config translates a proto correctly."""
build_bbox_rasterizer_config(bbox_rasterizer_proto)
# Check it was called with the expected deadzone_radius values.
MockedBboxRasterizerConfig.assert_called_with(1.0)
# Now check the subclasses.
# Check for "animal".
# NOTE: these numbers are chosen to go around Python's default float being double precision.
MockedBboxRasterizerConfig.TargetClassConfig.assert_any_call(
0.125, 0.25, 0.375, 0.5, 0.625
)
# Check for "traffic_cone".
MockedBboxRasterizerConfig.TargetClassConfig.assert_any_call(
0.75, 0.875, 1.0, 1.125, 1.25
)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/rasterizers/tests/test_build_bbox_rasterizer_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test BboxRasterizerConfig."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pytest
from nvidia_tao_tf1.cv.detectnet_v2.rasterizers.bbox_rasterizer_config import BboxRasterizerConfig
@pytest.mark.parametrize(
"cov_center_x,cov_center_y,cov_radius_x,cov_radius_y,bbox_min_radius,should_raise",
[
(-0.1, 0.1, 0.2, 0.3, 0.4, True),
(0.5, -0.2, 0.6, 0.7, 0.8, True),
(0.9, 0.11, -0.3, 0.12, 0.13, True),
(0.14, 0.15, 0.16, -0.4, 0.17, True),
(0.18, 0.19, 0.20, 0.21, -0.5, True),
(1.1, 0.22, 0.23, 0.24, 0.25, True),
(0.26, 1.2, 0.27, 0.28, 0.29, True),
(0.30, 0.31, 1.3, 0.32, 0.33, False),
(0.34, 0.35, 0.36, 1.4, 0.37, False),
(0.38, 0.39, 0.40, 0.41, 1.5, False)
]
)
def test_target_class_config_init_ranges(cov_center_x, cov_center_y, cov_radius_x, cov_radius_y,
bbox_min_radius, should_raise):
"""Test that BboxRasterizerConfig.TargetClassConfig raises ValueError on invalid values.
Args:
The first 5 are the same as for BboxRasterizerConfig.TargetClassConfig.__init__().
should_raise (bool): Whether or not the __init__() should raise a ValueError.
"""
if should_raise:
with pytest.raises(ValueError):
BboxRasterizerConfig.TargetClassConfig(cov_center_x, cov_center_y, cov_radius_x,
cov_radius_y, bbox_min_radius)
else:
BboxRasterizerConfig.TargetClassConfig(cov_center_x, cov_center_y, cov_radius_x,
cov_radius_y, bbox_min_radius)
@pytest.mark.parametrize(
"deadzone_radius,should_raise",
[(-0.1, True), (0.1, False), (1.1, True), (0.9, False)]
)
def test_bbox_rasterizer_config_init_range(deadzone_radius, should_raise):
"""Test that BboxRasterizerConfig raises ValueError on invalid values.
Args:
The first one is the same as for BboxRasterizerConfig.__init__().
should_raise (bool): Whether or not the __init__() should raise a ValueError.
"""
if should_raise:
with pytest.raises(ValueError):
BboxRasterizerConfig(deadzone_radius)
else:
BboxRasterizerConfig(deadzone_radius)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/rasterizers/tests/test_bbox_rasterizer_config.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""Barebones timer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import timedelta
from time import time
from decorator import decorator
from nvidia_tao_tf1.core import distribution
class time_function(object):
"""Decorator that prints the runtime of a wrapped function."""
def __init__(self, prefix=""):
"""Constructor.
Args:
prefix (str): Prefix to append to the time print out. Defaults to no prefix (empty
string). This can be e.g. a module's name, or a helpful descriptive message.
"""
self._prefix = prefix
self._is_master = distribution.get_distributor().is_master()
def __call__(self, fn):
"""Wrap the call to the function.
Args:
fn (function): Function to be wrapped.
Returns:
wrapped_fn (function): Wrapped function.
"""
@decorator
def wrapped_fn(fn, *args, **kwargs):
if self._is_master:
# Only time if in master process.
start = time()
# Run function as usual.
return_args = fn(*args, **kwargs)
if self._is_master:
time_taken = timedelta(seconds=(time() - start))
print("Time taken to run %s: %s." %
(self._prefix + ":" + fn.__name__, time_taken))
return return_args
return wrapped_fn(fn)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/utilities/timer.py |
# Copyright (c) 2017 - 2019, NVIDIA CORPORATION. All rights reserved.
"""Defining all the constants and magic numbers that the iva gridbox module uses."""
from collections import namedtuple
# Global Variables
# Setting output color book
color = {
'car': 'green',
'road_sign': 'cyan',
'bicycle': 'yellow',
'person': 'magenta',
'heavy_truck': 'blue',
'truck': 'red',
'face': 'white'
}
# Setting output label color map for kitti dumps
output_map = {
'car': 'automobile',
'person': 'person',
'bicycle': 'bicycle',
'road_sign': 'road_sign',
'face': 'face'
}
output_map_sec = {
'car': 'automobile',
'person': 'person',
'bicycle': 'bicycle',
'road_sign': 'road_sign',
'face': 'face'
}
# Clustering parameters
scales = [(1.0, 'cc')]
offset = (0, 0)
train_img_size = (960, 544)
criterion = 'IOU'
DEBUG = False
EPSILON = 1e-05
# Global variable for accepted image extensions
valid_image_ext = ['.jpg', '.png', '.jpeg', '.ppm']
Detection = namedtuple('Detection', [
# Bounding box of the detection in the LTRB format: [left, top, right, bottom]
'bbox',
# Confidence of detection
'confidence',
# Weighted variance of the bounding boxes in this cluster, normalized for the size of the box
'cluster_cv',
# Number of raw bounding boxes that went into this cluster
'num_raw_boxes',
# Sum of of the raw bounding boxes' coverage values in this cluster
'sum_coverages',
# Maximum coverage value among bboxes
'max_cov_value',
# Minimum converage value among bboxes
'min_cov_value',
# Candidate coverages.
'candidate_covs',
# Candidate bbox coordinates.
'candidate_bboxes'
])
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/utilities/constants.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""All constants and magic numbers used in the gridbox modules is defined here."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/utilities/__init__.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""Common routines for DetectNet V2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/common/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test helper functions for data conversion."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nvidia_tao_tf1.cv.detectnet_v2.common.dataio.converter_lib import _partition, _shard
def test_single_partition():
"""Partition a list of sequences into a single partition."""
sequences = [[0, 1, 2], [3, 4]]
assert _partition(sequences, 0, 1) == [[0, 1, 2, 3, 4]]
assert _partition(sequences, 1, 1) == [[0, 1, 2, 3, 4]]
def test_two_partitions():
"""Partition a list of sequences into 2 partitions."""
sequences = [[0, 1], [2, 3, 4], [5]]
assert _partition(sequences, 2, 1) == [[2, 3, 4], [0, 1, 5]]
def test_three_partitions():
"""Partition a list of sequences into 3 partitions."""
sequences = [[0, 1], [2, 3, 4], [5], [6, 7, 8, 9]]
assert _partition(sequences, 3, 1) == [[6, 7, 8, 9], [2, 3, 4], [0, 1, 5]]
def test_partitions_divisor():
"""Partition a list of sequences into 2 partitions."""
sequences = [[0, 1], [2, 3, 4], [5]]
assert _partition(sequences, 2, 2) == [[2, 3], [0, 1]]
def test_sharding():
"""Shard a list of partitions."""
partitions = [[0, 1, 2], [3, 4]]
assert _shard(partitions, 2) == [[[0], [1, 2]], [[3], [4]]]
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/common/dataio/converter_lib_test.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""Common dataio."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/common/dataio/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for converting datasets to .tfrecords."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pprint
import random
import six
from six.moves import range
import tensorflow as tf
def _convert_unicode_to_str(item):
if isinstance(item, six.text_type):
return item.encode('ascii', 'ignore')
return item
def _bytes_feature(*values):
# Convert unicode data to string for saving to TFRecords.
values = [_convert_unicode_to_str(value) for value in values]
return tf.train.Feature(bytes_list=tf.train.BytesList(value=values))
def _float_feature(*values):
return tf.train.Feature(float_list=tf.train.FloatList(value=values))
def _int64_feature(*values):
return tf.train.Feature(int64_list=tf.train.Int64List(value=values))
def _partition(sequences, num_partitions, divisor, uneven=False):
"""Partition a list of sequences to approximately equal lengths."""
num_partitions = max(num_partitions, 1) # 0 means 1 partition.
# The sequence with longest frames sits at the top.
sequences_by_length = sorted(sequences, key=len)
partitions = [[] for _ in range(num_partitions)]
while sequences_by_length:
longest_sequence = sequences_by_length.pop()
# Add the longest_sequence to the shortest partition.
smallest_partition = min(partitions, key=len)
smallest_partition.extend(longest_sequence)
for partition in partitions:
for _ in range(len(partition) % divisor):
partition.pop()
if num_partitions > 1 and uneven:
if len(partitions) != num_partitions:
raise RuntimeError('Check the number of partitions.')
# Flatten the first num_partitions - 1 into one list.
flat_list = [item for l in partitions[0: num_partitions - 1] for item in l]
# Allocate the first k-1th as the 0th partition and the kth as the 1st partition.
partitions = [flat_list, partitions[-1]]
validation_sequence_stats = dict()
for frame in partitions[-1]:
if 'sequence' in list(frame.keys()):
sequence_name = frame['sequence']['name']
else:
sequence_name = frame['sequence_name']
if sequence_name is None:
raise RuntimeError('Sequence name is None.')
if sequence_name in list(validation_sequence_stats.keys()):
validation_sequence_stats[sequence_name] += 1
else:
validation_sequence_stats[sequence_name] = 1
pp = pprint.PrettyPrinter(indent=4)
print('%d training frames ' % (len(partitions[0])))
print('%d validation frames' % (len(partitions[-1])))
print('Validation sequence stats:')
print('Sequence name: #frame')
pp.pprint(validation_sequence_stats)
return partitions
def _shard(partitions, num_shards):
"""Shard each partition."""
num_shards = max(num_shards, 1) # 0 means 1 shard.
shards = []
for partition in partitions:
result = []
if len(partition) == 0:
continue
shard_size = len(partition) // num_shards
for i in range(num_shards):
begin = i * shard_size
end = (i + 1) * shard_size if i + 1 < num_shards else len(partition)
result.append(partition[begin:end])
shards.append(result)
return shards
def _shuffle(partitions):
"""Shuffle each partition independently."""
for partition in partitions:
random.shuffle(partition)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/common/dataio/converter_lib.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Graph package contains all graph related operations.
It assumes implicitly that we're using Tensorflow graph.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nvidia_tao_tf1.cv.detectnet_v2.common.graph.initializers import get_init_ops
__all__ = ('get_init_ops', )
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/common/graph/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensorflow Graph initializer functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def get_init_ops():
"""Return all ops required for initialization."""
return tf.group(
tf.compat.v1.local_variables_initializer(),
tf.compat.v1.tables_initializer(),
*tf.compat.v1.get_collection('iterator_init')
)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/common/graph/initializers.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""Timing related test utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
TIME_DELTA = 1.5
class FakeTime(object):
"""Can be used to replace to built-in time function."""
_NUM_CALLS = 0
@classmethod
def time(cls):
"""Time method."""
new_timestamp = cls._NUM_CALLS * TIME_DELTA
# Next time this is called, returns (_NUM_CALLS + 1) * TIME_DELTA.
cls._NUM_CALLS += 1
return new_timestamp
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/common/tests/utilities/timing.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""IVA gridbox entrypoint scripts."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/scripts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Export a detectnet_v2 model."""
# import build_command_line_parser as this is needed by entrypoint
from nvidia_tao_tf1.cv.common.export.app import build_command_line_parser # noqa pylint: disable=W0611
from nvidia_tao_tf1.cv.common.export.app import launch_export
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.detectnet_v2.export.exporter import DetectNetExporter as Exporter
if __name__ == "__main__":
try:
launch_export(Exporter, backend="onnx")
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Export finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Export was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/scripts/export.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for dumping dataset tensors to TensorFile for int8 calibration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import os
import sys
import tensorflow as tf
from tqdm import trange
from nvidia_tao_tf1.core.export.data import TensorFile
from nvidia_tao_tf1.cv.detectnet_v2.common.graph import get_init_ops
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.build_dataloader import build_dataloader
from nvidia_tao_tf1.cv.detectnet_v2.spec_handler.spec_loader import load_experiment_spec
from nvidia_tao_tf1.cv.detectnet_v2.training.utilities import initialize
from nvidia_tao_tf1.cv.detectnet_v2.utilities.timer import time_function
logger = logging.getLogger(__name__)
def dump_dataset_images_to_tensorfile(experiment_spec, output_path, training, max_batches):
"""Dump dataset images to a nvidia_tao_tf1.core.data.TensorFile object and store it to disk.
The file can be used as an input to e.g. INT8 calibration.
Args:
experiment_spec: experiment_pb2.Experiment object containing experiment parameters.
output_path (str): Path for the TensorFile to be created.
training (bool): Whether to dump images from the training or validation set.
max_batches (int): Maximum number of minibatches to dump.
Returns:
tensor_file: nvidia_tao_tf1.core.data.TensorFile object.
"""
dataset_config = experiment_spec.dataset_config
augmentation_config = experiment_spec.augmentation_config
batch_size = experiment_spec.training_config.batch_size_per_gpu
dataloader = build_dataloader(dataset_config, augmentation_config)
images, _, num_samples = dataloader.get_dataset_tensors(batch_size,
training=training,
enable_augmentation=False,
repeat=True)
batches_in_dataset = num_samples // batch_size
# If max_batches is not supplied, then dump the whole dataset.
max_batches = batches_in_dataset if max_batches == -1 else max_batches
if max_batches > batches_in_dataset:
raise ValueError("The dataset contains %d minibatches, while the requested amount is %d." %
(batches_in_dataset, max_batches))
tensor_file = dump_to_tensorfile(images, output_path, max_batches)
return tensor_file
def dump_to_tensorfile(tensor, output_path, max_batches):
"""Dump iterable tensor to a TensorFile.
Args:
tensor: Tensor that can be iterated over.
output_path: Path for the TensorFile to be created.
max_batches: Maximum number of minibatches to dump.
Returns:
tensor_file: TensorFile object.
"""
output_root = os.path.dirname(output_path)
if not os.path.exists(output_root):
os.makedirs(output_root)
else:
if os.path.exists(output_path):
raise ValueError("A previously generated tensorfile already exists in the output path."
" Please delete this file before writing a new one.")
tensor_file = TensorFile(output_path, 'w')
tr = trange(max_batches, file=sys.stdout)
tr.set_description("Writing calibration tensorfile")
with tf.Session() as session:
session.run(get_init_ops())
for _ in tr:
batch_tensors = session.run(tensor)
tensor_file.write(batch_tensors)
return tensor_file
def build_command_line_parser(parser=None):
"""Simple function to build a command line parser."""
if parser is None:
parser = argparse.ArgumentParser(
prog="calibration_tensorfile",
description="Tool to generate random batches of train/val data for calibration."
)
parser.add_argument(
'-e',
'--experiment_spec_file',
type=str,
help='Absolute path to the experiment spec file.'
)
parser.add_argument(
'-o',
'--output_path',
type=str,
help='Path to the TensorFile that will be created.'
)
parser.add_argument(
'-m',
'--max_batches',
type=int,
default=-1,
help='Maximum number of minibatches to dump. The default is to dump the whole dataset.'
)
parser.add_argument(
'-v',
'--verbose',
action='store_true',
help='Set verbosity level for the logger.'
)
parser.add_argument(
'--use_validation_set',
action='store_true',
help='If set, then validation images are dumped. Otherwise, training images are dumped.'
)
return parser
def parse_command_line_arguments(cl_args=None):
"""Parse command line arguments."""
parser = build_command_line_parser()
return parser.parse_args(cl_args)
@time_function(__name__)
def main(args=None):
"""Run the dataset dump."""
args = parse_command_line_arguments(args)
# Set up logger verbosity.
verbosity = 'INFO'
if args.verbose:
verbosity = 'DEBUG'
# Configure the logger.
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level=verbosity)
logger.info(
"This method is soon to be deprecated. Please use the -e option in the export command "
"to instantiate the dataloader and generate samples for calibration from the "
"training dataloader."
)
experiment_spec = load_experiment_spec(args.experiment_spec_file, merge_from_default=False,
validation_schema="train_val")
training = not args.use_validation_set
output_path = args.output_path
max_batches = args.max_batches
# Set seed. Training precision left untouched as it is irrelevant here.
initialize(random_seed=experiment_spec.random_seed, training_precision=None)
tensorfile = dump_dataset_images_to_tensorfile(experiment_spec,
output_path, training,
max_batches)
tensorfile.close()
if __name__ == "__main__":
try:
main()
except Exception as e:
if type(e) == tf.errors.ResourceExhaustedError:
logger.error(
"Ran out of GPU memory, please lower the batch size, use a smaller input "
"resolution, or use a smaller backbone."
)
exit(1)
else:
# throw out the error as-is if they are not OOM error
raise e
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/scripts/calibration_tensorfile.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command line interface for converting detection datasets to TFRecords."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import os
from google.protobuf.text_format import Merge as merge_text_proto
import tensorflow as tf
from nvidia_tao_tf1.core.utils.path_utils import expand_path
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.detectnet_v2.dataio.build_converter import build_converter
import nvidia_tao_tf1.cv.detectnet_v2.proto.dataset_export_config_pb2 as dataset_export_config_pb2
logger = logging.getLogger(__name__)
def build_command_line_parser(parser=None):
"""Build command line parser for dataset_convert."""
if parser is None:
parser = argparse.ArgumentParser(
prog='dataset_converter',
description='Convert object detection datasets to TFRecords.'
)
parser.add_argument(
'-d',
'--dataset_export_spec',
required=True,
help='Path to the detection dataset spec containing config for exporting .tfrecords.')
parser.add_argument(
'-o',
'--output_filename',
required=True,
help='Output file name.')
parser.add_argument(
'-f',
'--validation_fold',
type=int,
default=None,
help='Indicate the validation fold in 0-based indexing. \
This is required when modifying the training set but otherwise optional.')
parser.add_argument(
'-v',
'--verbose',
action='store_true',
help="Flag to get detailed logs during the conversion process."
)
parser.add_argument(
"-r",
"--results_dir",
type=str,
default=None,
help="Path to the results directory"
)
return parser
def parse_command_line_args(cl_args=None):
"""Parse sys.argv arguments from commandline.
Args:
cl_args: List of command line arguments.
Returns:
args: list of parsed arguments.
"""
parser = build_command_line_parser()
args = parser.parse_args(cl_args)
return args
def main(args=None):
"""
Convert an object detection dataset to TFRecords.
Args:
args(list): list of arguments to be parsed if called from another module.
"""
args = parse_command_line_args(cl_args=args)
verbosity = 'INFO'
if args.verbose:
verbosity = 'DEBUG'
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=verbosity)
# Defining the results directory.
if args.results_dir is not None:
results_dir = expand_path(args.results_dir)
if not os.path.exists(results_dir):
os.makedirs(results_dir)
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True,
verbosity=logger.getEffectiveLevel(),
append=False
)
)
status_logging.get_status_logger().write(
data=None,
message="Starting Object Detection Dataset Convert.",
status_level=status_logging.Status.STARTED
)
# Load config from the proto file.
dataset_export_config = dataset_export_config_pb2.DatasetExportConfig()
with open(expand_path(args.dataset_export_spec), "r") as f:
merge_text_proto(f.read(), dataset_export_config)
converter = build_converter(dataset_export_config, args.output_filename, args.validation_fold)
converter.convert()
if __name__ == '__main__':
try:
main()
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Dataset convert finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Dataset convert was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
if type(e) == tf.errors.ResourceExhaustedError:
logger = logging.getLogger(__name__)
logger.error(
"Ran out of GPU memory, please lower the batch size, use a smaller input "
"resolution, or use a smaller backbone."
)
status_logging.get_status_logger().write(
message="Ran out of GPU memory, please lower the batch size, use a smaller input "
"resolution, or use a smaller backbone.",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
exit(1)
else:
# throw out the error as-is if they are not OOM error
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/scripts/dataset_convert.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Perform continuous training for gridbox object detection networks on a tfrecords dataset.
This code does nothing else than training. There's no validation or inference in this code.
Use separate scripts for those purposes.
Short code breakdown:
(1) Set up some processors (yield tfrecords batches, data decoding, ground-truth generation, ..)
(2) Hook up the data pipe and processors to a DNN, for example a Resnet18, or Vgg16 template.
(3) Set up losses, metrics, hooks.
(4) Perform training steps.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import os
import time
from google.protobuf.json_format import MessageToDict
import tensorflow as tf
import wandb
from nvidia_tao_tf1.blocks.multi_source_loader.types.bbox_2d_label import Bbox2DLabel
import nvidia_tao_tf1.core
from nvidia_tao_tf1.core import distribution
from nvidia_tao_tf1.core.hooks.sample_counter_hook import SampleCounterHook
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.common.mlops.clearml import get_clearml_task
from nvidia_tao_tf1.cv.common.mlops.wandb import check_wandb_logged_in, initialize_wandb
from nvidia_tao_tf1.cv.common.utils import get_model_file_size
from nvidia_tao_tf1.cv.common.utilities.serialization_listener import (
EpochModelSerializationListener
)
from nvidia_tao_tf1.cv.detectnet_v2.common.graph import get_init_ops
from nvidia_tao_tf1.cv.detectnet_v2.cost_function.cost_auto_weight_hook import (
build_cost_auto_weight_hook
)
from nvidia_tao_tf1.cv.detectnet_v2.cost_function.cost_function_parameters import (
build_target_class_list
)
from nvidia_tao_tf1.cv.detectnet_v2.cost_function.cost_function_parameters import (
get_target_class_names
)
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.build_dataloader import build_dataloader
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.build_dataloader import select_dataset_proto
from nvidia_tao_tf1.cv.detectnet_v2.evaluation.evaluation import Evaluator
from nvidia_tao_tf1.cv.detectnet_v2.evaluation.evaluation_config import build_evaluation_config
from nvidia_tao_tf1.cv.detectnet_v2.model.build_model import build_model
from nvidia_tao_tf1.cv.detectnet_v2.model.build_model import get_base_model_config
from nvidia_tao_tf1.cv.detectnet_v2.model.build_model import select_model_proto
from nvidia_tao_tf1.cv.detectnet_v2.model.utilities import get_pretrained_model_path, get_tf_ckpt
from nvidia_tao_tf1.cv.detectnet_v2.objectives.build_objective_label_filter import (
build_objective_label_filter
)
from nvidia_tao_tf1.cv.detectnet_v2.postprocessor.postprocessing_config import (
build_postprocessing_config
)
from nvidia_tao_tf1.cv.detectnet_v2.rasterizers.bbox_rasterizer import BboxRasterizer
from nvidia_tao_tf1.cv.detectnet_v2.rasterizers.build_bbox_rasterizer_config import (
build_bbox_rasterizer_config
)
from nvidia_tao_tf1.cv.detectnet_v2.rasterizers.loss_mask_rasterizer import LossMaskRasterizer
from nvidia_tao_tf1.cv.detectnet_v2.spec_handler.spec_loader import load_experiment_spec
from nvidia_tao_tf1.cv.detectnet_v2.tfhooks.early_stopping_hook import build_early_stopping_hook
from nvidia_tao_tf1.cv.detectnet_v2.tfhooks.task_progress_monitor_hook import (
TaskProgressMonitorHook
)
from nvidia_tao_tf1.cv.detectnet_v2.tfhooks.utils import get_common_training_hooks
from nvidia_tao_tf1.cv.detectnet_v2.tfhooks.validation_hook import ValidationHook
from nvidia_tao_tf1.cv.detectnet_v2.training.training_proto_utilities import (
build_learning_rate_schedule,
build_optimizer,
build_regularizer,
build_train_op_generator
)
from nvidia_tao_tf1.cv.detectnet_v2.training.utilities import compute_steps_per_epoch
from nvidia_tao_tf1.cv.detectnet_v2.training.utilities import compute_summary_logging_frequency
from nvidia_tao_tf1.cv.detectnet_v2.training.utilities import get_singular_monitored_session
from nvidia_tao_tf1.cv.detectnet_v2.training.utilities import get_weights_dir
from nvidia_tao_tf1.cv.detectnet_v2.training.utilities import initialize
from nvidia_tao_tf1.cv.detectnet_v2.utilities.timer import time_function
from nvidia_tao_tf1.cv.detectnet_v2.visualization.visualizer import \
DetectNetTBVisualizer as Visualizer
logger = logging.getLogger(__name__)
loggable_tensors = {}
def run_training_loop(experiment_spec, results_dir, gridbox_model, hooks, steps_per_epoch,
output_model_file_name, maglev_experiment, model_version_labels,
visualizer_config, key):
"""Train the model.
Args:
experiment_spec (experiment_pb2.Experiment): Experiment spec.
results_dir (str): Path to a folder where various training outputs will be written.
gridbox_model (Gridbox): Network to train.
hooks (list): A list of hooks.
steps_per_epoch (int): Number of steps per epoch.
output_model_file_name (str): Name of a model to be saved after training.
maglev_experiment (maglev.platform.experiment.Experiment): Maglev Experiment object.
model_version_labels (dict): Labels to attach to the created ModelVersions.
visualizer_config (VisualizerConfigProto): Configuration element for the visualizer.
key (str): A key to load and save models.
"""
# Get all the objects necessary for the SingularMonitoredSession
status_logging.get_status_logger().write(data=None, message="Running training loop.")
num_epochs = experiment_spec.training_config.num_epochs
num_training_steps = steps_per_epoch * num_epochs
# Setting default checkpoint interval.
checkpoint_interval = 10
if experiment_spec.training_config.checkpoint_interval:
checkpoint_interval = experiment_spec.training_config.checkpoint_interval
logger.info("Checkpoint interval: {}".format(checkpoint_interval))
global_step = tf.train.get_or_create_global_step()
distributor = distribution.get_distributor()
config = distributor.get_config()
is_master = distributor.is_master()
# Number of points per epoch to log scalars.
num_logging_points = visualizer_config.scalar_logging_frequency if \
visualizer_config.scalar_logging_frequency else 10
if num_logging_points > steps_per_epoch:
validation_message = f"Number of logging points {num_logging_points} "\
f"must be <= than the number of steps per epoch {steps_per_epoch}."
status_logging.get_status_logger().write(
message=validation_message,
status_level=status_logging.Status.FAILURE
)
raise ValueError(validation_message)
# Compute logging frequency based on user defined number of logging points.
summary_every_n_steps = compute_summary_logging_frequency(
steps_per_epoch,
num_logging_points=num_logging_points
)
# Infrequent logging frequency in epochs
if Visualizer.enabled:
infrequent_logging_frequency = visualizer_config.infrequent_logging_frequency if \
visualizer_config.infrequent_logging_frequency else 1
if infrequent_logging_frequency > num_epochs:
validation_message = f"Infrequent logging frequency {infrequent_logging_frequency} "\
f"must be lesser than the total number of epochs {num_epochs}."
status_logging.get_status_logger().write(
message=validation_message,
status_level=status_logging.Status.FAILURE
)
raise ValueError(validation_message)
infrequent_summary_every_n_steps = steps_per_epoch * infrequent_logging_frequency
else:
infrequent_summary_every_n_steps = 0
logger.info(
"Scalars logged at every {summary_every_n_steps} steps".format(
summary_every_n_steps=summary_every_n_steps
)
)
logger.info(
"Images logged at every {infrequent_summary_every_n_steps} steps".format(
infrequent_summary_every_n_steps=infrequent_summary_every_n_steps
)
)
scaffold = tf.compat.v1.train.Scaffold(local_init_op=get_init_ops())
# Get a listener that will serialize the metadata upon each checkpoint saving call.
serialization_listener = EpochModelSerializationListener(
checkpoint_dir=results_dir,
model=gridbox_model,
key=key,
steps_per_epoch=steps_per_epoch,
max_to_keep=None)
listeners = [serialization_listener]
loggable_tensors.update({
'epoch': global_step / steps_per_epoch,
'step': global_step,
'loss': gridbox_model.get_total_cost()})
training_hooks = get_common_training_hooks(
log_tensors=loggable_tensors,
log_every_n_secs=5,
checkpoint_n_steps=steps_per_epoch * checkpoint_interval,
model=None,
last_step=num_training_steps,
checkpoint_dir=results_dir,
steps_per_epoch=steps_per_epoch,
scaffold=scaffold,
summary_every_n_steps=summary_every_n_steps,
infrequent_summary_every_n_steps=infrequent_summary_every_n_steps,
listeners=listeners,
key=key)
training_hooks.extend(hooks)
# Add task progress monitoring hook to the master process.
if is_master:
training_hooks.append(TaskProgressMonitorHook(loggable_tensors,
results_dir,
num_epochs,
steps_per_epoch))
total_batch_size = experiment_spec.training_config.batch_size_per_gpu * \
distributor.size()
training_hooks.append(SampleCounterHook(batch_size=total_batch_size, name="Train"))
checkpoint_filename = get_latest_checkpoint(results_dir, key)
with get_singular_monitored_session(keras_models=gridbox_model.get_keras_training_model(),
session_config=config,
hooks=training_hooks,
scaffold=scaffold,
checkpoint_filename=checkpoint_filename) as session:
try:
while not session.should_stop():
session.run([gridbox_model.get_train_op()])
status_logging.get_status_logger().write(
data=None,
message="Training loop completed."
)
except (KeyboardInterrupt, SystemExit) as e:
logger.info("Training was interrupted.")
status_logging.get_status_logger().write(
data={"Error": "{}".format(e)},
message="Training was interrupted"
)
finally:
# Saves the last best model before the graph is finalized.
save_model(gridbox_model, output_model_file_name, key=key)
def get_latest_checkpoint(results_dir, key):
"""Get the latest checkpoint path from a given results directory.
Parses through the directory to look for the latest checkpoint file
and returns the path to this file.
Args:
results_dir (str): Path to the results directory.
Returns:
ckpt_path (str): Path to the latest checkpoint.
"""
trainable_ckpts = [int(item.split('.')[1].split('-')[1]) for item in os.listdir(results_dir)
if item.endswith(".ckzip")]
num_ckpts = len(trainable_ckpts)
if num_ckpts == 0:
return None
latest_step = sorted(trainable_ckpts, reverse=True)[0]
latest_checkpoint = os.path.join(results_dir, "model.epoch-{}.ckzip".format(latest_step))
return get_tf_ckpt(latest_checkpoint, key, latest_step)
def save_model(gridbox_model, output_model_file_name, key):
"""Save final Helnet model to disk and create a ModelVersion if we are in a workflow.
Args:
gridbox_model (GridboxModel): Final gridbox detector model.
output_model_file_name: Name of a model to be saved.
key (str): A key to save and load models in tlt format.
"""
# Master process saves the model to disk. This saves the final model even if checkpointer
# hook was not enabled.
status_logging.get_status_logger().write(
data=None,
message="Saving trained model."
)
if distribution.get_distributor().is_master():
gridbox_model.save_model(file_name=output_model_file_name,
enc_key=key)
s_logger = status_logging.get_status_logger()
s_logger.kpi = {
"size": get_model_file_size(output_model_file_name),
"param_count": gridbox_model.num_params
}
s_logger.write(
message="Model saved."
)
def build_rasterizers(experiment_spec, input_width, input_height, output_width, output_height):
"""Build bbox and loss mask rasterizers.
Args:
experiment_spec (experiment_pb2.Experiment): Experiment spec.
input_width/height (int): Model input size.
output_width/height (int): Model output size.
Returns:
bbox_rasterizer (BboxRasterizer): A rasterizer for ground truths.
loss_mask_rasterizer (LossMaskRasterizer): A rasterizer for loss masks.
"""
# Build a BboxRasterizer with which to generate ground truth tensors.
status_logging.get_status_logger().write(data=None, message="Building rasterizer.")
target_class_names = get_target_class_names(experiment_spec.cost_function_config)
target_class_mapping = dict(experiment_spec.dataset_config.target_class_mapping)
bbox_rasterizer_config = build_bbox_rasterizer_config(experiment_spec.bbox_rasterizer_config)
bbox_rasterizer = BboxRasterizer(input_width=input_width,
input_height=input_height,
output_width=output_width,
output_height=output_height,
target_class_names=target_class_names,
bbox_rasterizer_config=bbox_rasterizer_config,
target_class_mapping=target_class_mapping)
# Build a LossMaskRasterizer with which to generate loss masks.
loss_mask_rasterizer = LossMaskRasterizer(input_width=input_width,
input_height=input_height,
output_width=output_width,
output_height=output_height)
status_logging.get_status_logger().write(data=None, message="Rasterizers built.")
return bbox_rasterizer, loss_mask_rasterizer
def rasterize_source_weight(batch_labels):
"""Method that users will call to generate source_weight tensors.
Args:
batch_labels (nested dict or BBox2DLabel): If nested dict, has two levels:
[target_class_name][objective_name]. The leaf values are the corresponding filtered
ground truth labels in tf.Tensor for a batch of frames.
If BBox2DLabel, it incorporates labels for all frames.
Returns:
source_weight_tensor (Tensor): source weight tensor with shape [N,], where N is the
batch size. It should be expanded to [N,1,1...] before it is computed in loss
function.
"""
source_weight_tensor = None
# Step1_0: we try to get source_weight_tensors with shape [N,].
if isinstance(batch_labels, list):
source_weight_tensor_arrs = []
for gt_label in batch_labels:
# Have to reshape the "source_weight" tensor to [1], so that tf.concat could work.
if "source_weight" in gt_label:
source_weight_tensor_arrs.append(tf.reshape(gt_label["source_weight"], [1]))
else:
return source_weight_tensor
source_weight_tensor = tf.concat(source_weight_tensor_arrs, axis=0)
elif isinstance(batch_labels, Bbox2DLabel):
# source_weight_tensor is in the shape [N,].
source_weight_tensor = tf.squeeze(batch_labels.source_weight)
else:
raise TypeError("Only dict or BBox2dLabel could be handled by sw rasterize")
# TODO(ashen): whether we need below normalization methods:
# Reciprocal of mean value of source_weight tensor, used for normalization
# Step1_1: source_weight_mean_norm = 1.0 / tf.reduce_mean(source_weight_base_tensor)
# Step1_2: source_weight_tensor = source_weight_tensor * source_weight_mean_norm
return source_weight_tensor
def merge_source_weight_to_loss_mask(source_weight_tensor, loss_masks, ground_truth_tensors):
"""Merge source weight tensors into loss masks.
Args:
source_weight_tensor (Tensor): source weight tensor with shape [N,]
loss_masks (Nested dict): dict with 2 levels:
[target_class_name][objective_name]. The leaf values are the loss_mask
tensors. Also the dict could be empty.
ground_truth_tensors (Nested dict): the ground truth dictionary to contain
ground_truth tensors.
Returns:
loss_masks (Nested dict): Modified loss_masks dictionary to incorporate
source weight tensors.
"""
if source_weight_tensor is None or source_weight_tensor.shape.ndims != 1:
return loss_masks
for class_name in ground_truth_tensors.keys():
if class_name not in loss_masks.keys():
loss_masks[class_name] = dict()
for objective_name in ground_truth_tensors[class_name].keys():
# We expand the source_weight_tensor to be [N,1,1,...], which is like
# ground_truth_tensors[class_name][objective_name].
gt_tensor = ground_truth_tensors[class_name][objective_name]
# Step1: broadcast from [N,] to [1,1...,N].
exp_source_weight_tensor = tf.broadcast_to(source_weight_tensor,
shape=[1] * (gt_tensor.shape.ndims - 1)
+ [source_weight_tensor.shape[0]])
# Step2: transpose to get the tensor with shape [N,1,1,..].
exp_source_weight_tensor = tf.transpose(exp_source_weight_tensor)
if objective_name in loss_masks[class_name]:
# If loss_mask exists, we merge it with source_weight_tensor.
loss_mask_tensor = loss_masks[class_name][objective_name]
# Assign merged loss mask tensors.
loss_masks[class_name][objective_name] = tf.multiply(loss_mask_tensor,
exp_source_weight_tensor)
else:
# If loss_mask does not exist, we directly assign it to be source_weight_tensor.
loss_masks[class_name][objective_name] = exp_source_weight_tensor
return loss_masks
def rasterize_tensors(gridbox_model, loss_mask_label_filter, bbox_rasterizer, loss_mask_rasterizer,
ground_truth_labels):
"""Rasterize ground truth and loss mask tensors.
Args:
gridbox_model (HelnetGridbox): A HelnetGridbox instance.
loss_mask_label_filter (ObjectiveLabelFilter): A label filter for loss masks.
bbox_rasterizer (BboxRasterizer): A rasterizer for ground truths.
loss_mask_rasterizer (LossMaskRasterizer): A rasterizer for loss masks.
ground_truth_labels (list): Each element is a dict of target features (each a tf.Tensor).
Returns:
ground_truth_tensors (dict): [target_class_name][objective_name] rasterizer ground truth
tensor.
loss_masks (tf.Tensor): rasterized loss mask corresponding to the input labels.
"""
status_logging.get_status_logger().write(data=None, message="Rasterizing tensors.")
# Get ground truth tensors.
ground_truth_tensors = \
gridbox_model.generate_ground_truth_tensors(bbox_rasterizer=bbox_rasterizer,
batch_labels=ground_truth_labels)
# Get the loss mask labels.
loss_mask_labels = loss_mask_label_filter.apply_filters(ground_truth_labels)
ground_truth_mask = ground_truth_tensors if loss_mask_label_filter.preserve_ground_truth else \
None
# Get the loss masks.
loss_masks = loss_mask_rasterizer(
loss_mask_batch_labels=loss_mask_labels,
ground_truth_tensors=ground_truth_mask,
mask_multiplier=loss_mask_label_filter.mask_multiplier)
source_weight_tensor = rasterize_source_weight(ground_truth_labels)
# Merge source_weight_tensors with loss_masks
loss_masks = merge_source_weight_to_loss_mask(source_weight_tensor,
loss_masks,
ground_truth_tensors)
status_logging.get_status_logger().write(data=None, message="Tensors rasterized.")
return ground_truth_tensors, loss_masks
def build_gridbox_model(experiment_spec, input_shape, model_file_name=None, key=None):
"""Instantiate a HelnetGridbox or a child class, e.g. a HelnetGRUGridbox.
Args:
experiment_spec (experiment_pb2.Experiment): Experiment spec.
input_shape (tuple): Model input shape as a CHW tuple. Not used if
model_file_name is not None.
model_file_name: Model file to load, or None is a new model should be created.
key (str): A key to load and save tlt models.
Returns:
A HelnetGridbox or a child class instance, e.g. a HelnetGRUGridbox.
"""
status_logging.get_status_logger().write(data=None, message="Building DetectNet V2 model")
target_class_names = get_target_class_names(experiment_spec.cost_function_config)
# Select the model config, which might have ModelConfig / TemporalModelConfig type.
model_config = select_model_proto(experiment_spec)
enable_qat = experiment_spec.training_config.enable_qat
gridbox_model = build_model(m_config=model_config,
target_class_names=target_class_names,
enable_qat=enable_qat)
# Set up regularization.
kernel_regularizer, bias_regularizer = build_regularizer(
experiment_spec.training_config.regularizer)
if not model_config.load_graph:
# Construct model if the pretrained model is not pruned.
gridbox_model.construct_model(input_shape=input_shape,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
pretrained_weights_file=model_file_name,
enc_key=key)
else:
# Load model if with structure for pruned models.
assert model_config.pretrained_model_file is not None, "Please provide pretrained"\
"model with the is_pruned flag."
gridbox_model.load_model_weights(model_file_name, enc_key=key)
gridbox_model.update_regularizers(kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer)
# If the model is loaded from a file, we need to make sure that
# model contains all the objectives as defined in the spec file.
gridbox_model.add_missing_outputs(kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer)
gridbox_model.print_model_summary()
status_logging.get_status_logger().write(data=None, message="DetectNet V2 model built.")
return gridbox_model
def build_training_graph(experiment_spec,
gridbox_model,
loss_mask_label_filter,
bbox_rasterizer,
loss_mask_rasterizer,
dataloader,
learning_rate,
cost_combiner_func):
"""Build training graph.
Args:
experiment_spec (experiment_pb2.Experiment): Experiment spec.
gridbox_model (HelnetGridbox): A HelnetGridbox instance.
loss_mask_label_filter (ObjectiveLabelFilter): A label filter for loss masks.
bbox_rasterizer (BboxRasterizer): A rasterizer for ground truths.
loss_mask_rasterizer (LossMaskRasterizer): A rasterizer for loss masks.
dataloader (Dataloader): A dataloader instance (eg. DefaultDataloader).
learning_rate (tf.Variable): Learning rate variable.
cost_combiner_func: A function that takes in a dictionary of objective costs,
and total cost by computing a weighted sum of the objective costs.
"""
status_logging.get_status_logger().write(
data=None,
message="Building training graph."
)
# Get training image and label tensors from dataset.
batch_size = experiment_spec.training_config.batch_size_per_gpu
training_images, training_ground_truth_labels, num_training_samples = \
dataloader.get_dataset_tensors(batch_size, training=True, enable_augmentation=True)
logger.info("Found %d samples in training set", num_training_samples)
# # Add input images to Tensorboard. Specify value range to avoid Tensorflow automatic scaling.
Visualizer.image('images', training_images, value_range=[0.0, 1.0],
collections=[nvidia_tao_tf1.core.hooks.utils.INFREQUENT_SUMMARY_KEY])
# Rasterize ground truth and loss mask tensors.
training_ground_truth_tensors, training_loss_masks =\
rasterize_tensors(gridbox_model, loss_mask_label_filter, bbox_rasterizer,
loss_mask_rasterizer,
training_ground_truth_labels)
# Set up optimizer.
optimizer = build_optimizer(experiment_spec.training_config.optimizer, learning_rate)
# Build training graph.
train_op_generator = build_train_op_generator(experiment_spec.training_config.cost_scaling)
target_classes = build_target_class_list(experiment_spec.cost_function_config)
gridbox_model.build_training_graph(training_images, training_ground_truth_tensors,
optimizer, target_classes, cost_combiner_func,
train_op_generator, training_loss_masks)
gridbox_model.visualize_predictions()
status_logging.get_status_logger().write(data=None, message="Training graph built.")
def build_validation_graph(experiment_spec,
gridbox_model,
loss_mask_label_filter,
bbox_rasterizer,
loss_mask_rasterizer,
dataloader,
num_validation_steps,
cost_combiner_func):
"""Build validation graph.
Args:
experiment_spec (experiment_pb2.Experiment): Experiment spec.
gridbox_model (HelnetGridbox): A HelnetGridbox instance.
loss_mask_label_filter (ObjectiveLabelFilter): A label filter for loss masks.
bbox_rasterizer (BboxRasterizer): A rasterizer for ground truths.
loss_mask_rasterizer (LossMaskRasterizer): A rasterizer for loss masks.
dataloader (Dataloader): A dataloader instance (eg. DefaultDataloader).
num_validation_steps (int): Number of validation steps.
cost_combiner_func: A function that takes in a dictionary of objective costs,
and total cost by computing a weighted sum of the objective costs.
Returns:
Evaluator instance.
"""
status_logging.get_status_logger().write(data=None, message="Building validation graph.")
# Get validation image and label tensors from dataset.
batch_size = experiment_spec.training_config.batch_size_per_gpu
validation_images, validation_ground_truth_labels, num_validation_samples = \
dataloader.get_dataset_tensors(batch_size, training=False, enable_augmentation=False)
logger.info("Found %d samples in validation set", num_validation_samples)
assert num_validation_samples > 0,\
"Validation period is not 0, but no validation data found. "\
"Either turn off validation by setting `validation_period = 0` or specify correct "\
"path/fold for validation data."
# Rasterize ground truth and loss mask tensors.
validation_ground_truth_tensors, validation_loss_masks =\
rasterize_tensors(gridbox_model, loss_mask_label_filter, bbox_rasterizer,
loss_mask_rasterizer, validation_ground_truth_labels)
# Build validation graph.
target_classes = build_target_class_list(experiment_spec.cost_function_config)
gridbox_model.build_validation_graph(validation_images, validation_ground_truth_tensors,
target_classes,
cost_combiner_func, validation_loss_masks)
postprocessing_config = build_postprocessing_config(experiment_spec.postprocessing_config)
evaluation_config = build_evaluation_config(experiment_spec.evaluation_config,
gridbox_model.target_class_names)
confidence_models = None
evaluator = Evaluator(postprocessing_config=postprocessing_config,
evaluation_config=evaluation_config,
gridbox_model=gridbox_model,
images=validation_images,
ground_truth_labels=validation_ground_truth_labels,
steps=num_validation_steps,
confidence_models=confidence_models)
status_logging.get_status_logger().write(data=None, message="Validation graph built.")
return evaluator
def train_gridbox(results_dir, experiment_spec, output_model_file_name,
input_model_file_name=None, maglev_experiment=None, model_version_labels=None,
key=None):
"""Construct, train, and save a gridbox_model gridbox model.
Args:
results_dir (str): Path to a folder where various training outputs will be written.
If the folder does not already exist, it will be created.
experiment_spec (experiment_pb2.Experiment): Experiment spec.
output_model_file_name (str): Name of a model to be saved after training.
input_model_file_name: Name of a model file to load, or None if a model should be
created from scratch.
maglev_experiment (maglev.platform.experiment.Experiment): Maglev Experiment object.
model_version_labels (dict): Labels to attach to the created ModelVersions.
"""
# Extract core model config, which might be wrapped inside a TemporalModelConfig.
status_logging.get_status_logger().write(data=None, message="Training gridbox model.")
model_config = get_base_model_config(experiment_spec)
# Initialization of distributed seed, training precision and learning phase.
initialize(experiment_spec.random_seed, model_config.training_precision)
is_master = distribution.get_distributor().is_master()
# TODO: vpraveen <test without visualizer>
# Set up visualization.
visualizer_config = experiment_spec.training_config.visualizer
# Disable visualization for other than the master process.
if not is_master:
visualizer_config.enabled = False
Visualizer.build_from_config(visualizer_config)
dataset_proto = select_dataset_proto(experiment_spec)
# Build a dataloader.
dataloader = build_dataloader(dataset_proto=dataset_proto,
augmentation_proto=experiment_spec.augmentation_config)
# Compute steps per training epoch, and number of training and validation steps.
num_training_samples = dataloader.get_num_samples(training=True)
num_validation_samples = dataloader.get_num_samples(training=False)
batch_size = experiment_spec.training_config.batch_size_per_gpu
steps_per_epoch = compute_steps_per_epoch(num_training_samples, batch_size, logger)
num_training_steps = steps_per_epoch * experiment_spec.training_config.num_epochs
num_validation_steps = num_validation_samples // batch_size
# Set up cost auto weighter hook.
cost_auto_weight_hook = build_cost_auto_weight_hook(experiment_spec.cost_function_config,
steps_per_epoch)
hooks = [cost_auto_weight_hook]
# Construct a model.
gridbox_model = build_gridbox_model(experiment_spec=experiment_spec,
input_shape=dataloader.get_data_tensor_shape(),
model_file_name=input_model_file_name,
key=key)
# Build ground truth and loss mask rasterizers.
bbox_rasterizer, loss_mask_rasterizer =\
build_rasterizers(experiment_spec,
gridbox_model.input_width, gridbox_model.input_height,
gridbox_model.output_width, gridbox_model.output_height)
# Build an ObjectiveLabelFilter for loss mask generation.
loss_mask_label_filter = build_objective_label_filter(
objective_label_filter_proto=experiment_spec.loss_mask_label_filter,
target_class_to_source_classes_mapping=dataloader.target_class_to_source_classes_mapping,
learnable_objective_names=[x.name for x in gridbox_model.objective_set.learnable_objectives]
)
# Set up validation.
evaluation_config = build_evaluation_config(experiment_spec.evaluation_config,
gridbox_model.target_class_names)
validation_period = evaluation_config.validation_period_during_training
use_early_stopping = (experiment_spec.training_config.
learning_rate.HasField("early_stopping_annealing_schedule"))
learning_rate = None
early_stopping_hook = None
# Build learning rate and hook for early stopping.
if use_early_stopping:
learning_rate, hook = build_early_stopping_annealing_schedule(evaluation_config,
steps_per_epoch,
num_validation_steps,
results_dir,
experiment_spec,
None)
early_stopping_hook = hook
hooks.append(hook)
# Default learning rate.
else:
learning_rate = build_learning_rate_schedule(experiment_spec.training_config.learning_rate,
num_training_steps)
loggable_tensors.update({
"learning_rate": learning_rate
})
tf.summary.scalar("learning_rate", learning_rate)
# Build training graph.
build_training_graph(experiment_spec,
gridbox_model,
loss_mask_label_filter,
bbox_rasterizer,
loss_mask_rasterizer,
dataloader,
learning_rate,
cost_auto_weight_hook.cost_combiner_func)
if is_master and validation_period > 0:
evaluator = build_validation_graph(experiment_spec,
gridbox_model,
loss_mask_label_filter,
bbox_rasterizer,
loss_mask_rasterizer,
dataloader,
num_validation_steps,
cost_auto_weight_hook.cost_combiner_func)
num_epochs = experiment_spec.training_config.num_epochs
first_validation_epoch = evaluation_config.first_validation_epoch
# This logic is the only one that currently seems to work for early stopping:
# - Can't build validation graph before training graph (if we only build
# validation graph on master, horovod complains about missing broadcasts,
# but if we build validation graph on all nodes, we get a lot of errors at
# end of training, complaining some variables didn't get used).
# - Need the learning rate to build training graph, so need to build stopping
# hook before building training graph
# - Need validation cost tensor for stopping hook, so need the validation graph
# to build stopping hook
if use_early_stopping:
early_stopping_hook.validation_cost = gridbox_model.validation_cost
else:
validation_hook = ValidationHook(evaluator, validation_period, num_epochs,
steps_per_epoch, results_dir, first_validation_epoch)
hooks.append(validation_hook)
# Train the model.
run_training_loop(experiment_spec, results_dir, gridbox_model, hooks, steps_per_epoch,
output_model_file_name, maglev_experiment, model_version_labels,
visualizer_config, key)
status_logging.get_status_logger().write(data=None, message="Training op complete.")
def build_early_stopping_annealing_schedule(evaluation_config, steps_per_epoch,
num_validation_steps, results_dir, experiment_spec,
validation_cost):
"""Build early stopping annealing hook and learning rate.
Args:
evaluation_config (nvidia_tao_tf1.cv.detectnet_v2.evaluation.EvaluationConfig):
Configuration for evaluation.
steps_per_epoch (int): Number of steps per epoch.
num_validation_steps (int): Number of steps needed for a pass over validation data.
results_dir (str): Directory for results. Will be used to write tensorboard logs.
experiment_spec (nvidia_tao_tf1.cv.detectnet_v2.proto.experiment_pb2):
Experiment spec message.
validation_cost (Tensor): Validation cost tensor. Can be None for workers, since
validation cost is only computed on master.
"""
stopping_hook = build_early_stopping_hook(evaluation_config, steps_per_epoch,
os.path.join(results_dir, 'val'),
num_validation_steps, experiment_spec,
validation_cost=validation_cost)
return stopping_hook.learning_rate, stopping_hook
def run_experiment(config_path, results_dir, pretrained_model_file=None, model_name="model",
override_spec_path=None, model_version_labels=None, key=None,
wandb_logged_in=False):
"""
Launch experiment that trains the model.
NOTE: Do not change the argument names without verifying that cluster submission works.
Args:
config_path (list): List containing path to a text file containing a complete experiment
configuration and possibly a path to a .yml file containing override parameter values.
results_dir (str): Path to a folder where various training outputs will be written.
If the folder does not already exist, it will be created.
pretrained_model_file (str): Optional path to a pretrained model file. This maybe invoked
from the CLI if needed. For now, we have disabled support to maintain consistency
across all magnet apps.
model_name (str): Model name to be used as a part of model file name.
override_spec_path (str): Absolute path to yaml file which is used to overwrite some of the
experiment spec parameters.
model_version_labels (dict): Labels to attach to the created ModelVersions.
key (str): Key to save and load models from tlt.
wandb_logger_in (bool): Flag on whether wandb was logged in.
"""
model_path = get_weights_dir(results_dir)
# Load experiment spec.
if config_path is not None:
# Create an experiment_pb2.Experiment object from the input file.
logger.info("Loading experiment spec at %s.", config_path)
# The spec in experiment_spec_path has to be complete.
# Default spec is not merged into experiment_spec.
experiment_spec = load_experiment_spec(
config_path, merge_from_default=False, validation_schema="train_val"
)
else:
logger.info("Loading default KITTI single class experiment spec.")
experiment_spec = load_experiment_spec()
# TODO: vpraveen <test without visualizer>
# Set up visualization.
is_master = distribution.get_distributor().is_master()
visualizer_config = experiment_spec.training_config.visualizer
# Disable visualization for other than the master process.
if is_master:
# Setup wandb initializer.
if visualizer_config.HasField("wandb_config"):
wandb_config = visualizer_config.wandb_config
wandb_name = f"{wandb_config.name}" if wandb_config.name \
else f"{model_name}"
wandb_stream_config = MessageToDict(
experiment_spec,
preserving_proto_field_name=True,
including_default_value_fields=True
)
initialize_wandb(
project=wandb_config.project if wandb_config.project else None,
entity=wandb_config.entity if wandb_config.entity else None,
config=wandb_stream_config,
notes=wandb_config.notes if wandb_config.notes else None,
tags=wandb_config.tags if wandb_config.tags else None,
sync_tensorboard=True,
save_code=False,
results_dir=results_dir,
wandb_logged_in=wandb_logged_in,
name=wandb_name
)
if visualizer_config.HasField("clearml_config"):
logger.info("Integrating with clearml.")
clearml_config = visualizer_config.clearml_config
get_clearml_task(clearml_config, "detectnet_v2")
else:
visualizer_config.enabled = False
Visualizer.build_from_config(visualizer_config)
# If hyperopt is used, sample hyperparameters and apply them to spec.
# @TODO: disabling hyperopt for this release.
# experiment_spec, maglev_experiment = sample_hyperparameters_and_apply_to_spec(experiment_spec)
maglev_experiment = None
model_file = os.path.join(model_path, '%s.hdf5' % model_name)
# Extract core model config, which might be wrapped inside a TemporalModelConfig.
model_config = get_base_model_config(experiment_spec)
# Pretrained model can be provided either through CLI or spec. Expand and validate the path.
assert not (pretrained_model_file and model_config.pretrained_model_file), \
"Provide only one pretrained model file."
pretrained_model_file = pretrained_model_file or model_config.pretrained_model_file
input_model_file_name = get_pretrained_model_path(pretrained_model_file)
output_model_file_name = model_file
# Dump experiment spec to result directory.
if distribution.get_distributor().is_master():
with open(os.path.join(results_dir, 'experiment_spec.txt'), 'w') as f:
f.write(str(experiment_spec))
# Train a model.
train_gridbox(results_dir, experiment_spec, output_model_file_name, input_model_file_name,
maglev_experiment, model_version_labels, key=key)
status_logging.get_status_logger().write(
data=None,
status_level=status_logging.Status.SUCCESS,
message="DetectNet_v2 training job complete."
)
def build_command_line_parser(parser=None):
"""
Parse command-line flags passed to the training script.
Returns:
Namespace with all parsed arguments.
"""
if parser is None:
parser = argparse.ArgumentParser(prog='train', description='Train a DetectNet_v2 model.')
default_experiment_path = os.path.join(os.path.expanduser('~'), 'experiments',
time.strftime("drivenet_%Y%m%d_%H%M%S"))
parser.add_argument(
'-e',
'--experiment_spec_file',
type=str,
default=None,
help='Path to spec file. Absolute path or relative to working directory. \
If not specified, default spec from spec_loader.py is used.'
)
parser.add_argument(
'-r',
'--results_dir',
type=str,
default=default_experiment_path,
help='Path to a folder where experiment outputs should be written.'
)
parser.add_argument(
'-n',
'--model_name',
type=str,
default='model',
help='Name of the model file. If not given, then defaults to model.hdf5.'
)
parser.add_argument(
'-v',
'--verbose',
action='store_true',
help='Set verbosity level for the logger.'
)
parser.add_argument(
'-k',
'--key',
default="",
type=str,
required=False,
help='The key to load pretrained weights and save intermediate snapshots and final model.'
)
parser.add_argument(
'--enable_determinism',
action="store_true",
help="Flag to enable deterministic training.",
default=False
)
return parser
def parse_command_line_args(cl_args=None):
"""Parser command line arguments to the trainer.
Args:
cl_args(sys.argv[1:]): Arg from the command line.
Returns:
args: Parsed arguments using argparse.
"""
parser = build_command_line_parser(parser=None)
args = parser.parse_args(cl_args)
return args
def enable_deterministic_training():
"""Define relevant trainer environment variables."""
os.environ["TF_CUDNN_DETERMINISTIC"] = "1"
os.environ["TF_DETERMINISTIC_OPS"] = "1"
os.environ["HOROVOD_FUSION_THRESHOLD"] = "0"
@time_function(__name__)
def main(args=None):
"""Run the training process."""
args = parse_command_line_args(args)
# Set up logger verbosity.
verbosity = 'INFO'
if args.verbose:
verbosity = 'DEBUG'
# Configure the logger.
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level=verbosity)
# Setting results dir to realpath if the user
# doesn't provide an absolute path.
results_dir = args.results_dir
if not os.path.isabs(results_dir):
results_dir = os.path.realpath(results_dir)
wandb_logged_in = False
# Enable Horovod distributor for multi-GPU training.
distribution.set_distributor(distribution.HorovodDistributor())
is_master = distribution.get_distributor().is_master()
try:
if is_master:
if not os.path.exists(results_dir):
os.makedirs(results_dir)
wandb_logged_in = check_wandb_logged_in()
# Writing out status file for TLT.
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=is_master,
verbosity=logger.getEffectiveLevel(),
append=True
)
)
status_logging.get_status_logger().write(
data=None,
status_level=status_logging.Status.STARTED,
message="Starting DetectNet_v2 Training job"
)
if args.enable_determinism:
logger.info("Enabling deterministic training.")
enable_deterministic_training()
run_experiment(
config_path=args.experiment_spec_file,
results_dir=results_dir,
model_name=args.model_name,
key=args.key,
wandb_logged_in=wandb_logged_in
)
except (KeyboardInterrupt, SystemExit) as e:
logger.info("Training was interrupted.")
status_logging.get_status_logger().write(
data={"Error": "{}".format(e)},
message="Training was interrupted",
status_level=status_logging.Status.FAILURE
)
finally:
if distribution.get_distributor().is_master():
if wandb_logged_in:
wandb.finish()
if __name__ == "__main__":
try:
main()
except Exception as e:
if type(e) == tf.errors.ResourceExhaustedError:
logger = logging.getLogger(__name__)
logger.error(
"Ran out of GPU memory, please lower the batch size, use a smaller input "
"resolution, or use a smaller backbone."
)
status_logging.get_status_logger().write(
message="Ran out of GPU memory, please lower the batch size, use a smaller input "
"resolution, or use a smaller backbone.",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
exit(1)
else:
# throw out the error as-is if they are not OOM error
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/scripts/train.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Prune a detectnet_v2 model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.common.magnet_prune import ( # noqa pylint: disable=unused-import
build_command_line_parser,
main
)
if __name__ == "__main__":
try:
main()
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Pruning finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Pruning was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/detectnet_v2/scripts/prune.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.